Lines Matching refs:interval

50    struct ir3_reg_interval interval;
71 * insert the reload code for them last. Keep track of whether this interval
76 /* Keep track of whether this interval currently can't be spilled because:
83 /* Whether this interval can be rematerialized. */
166 /* We don't create an interval, etc. for the base reg, so just lower the
378 ra_spill_interval_init(struct ra_spill_interval *interval,
381 ir3_reg_interval_init(&interval->interval, reg);
382 interval->dst.flags = reg->flags;
383 interval->dst.def = reg;
384 interval->already_spilled = false;
385 interval->needs_reload = false;
386 interval->cant_spill = false;
387 interval->can_rematerialize = can_rematerialize(reg);
391 ir3_reg_interval_to_interval(struct ir3_reg_interval *interval)
393 return rb_node_data(struct ra_spill_interval, interval, interval);
397 ra_spill_interval_root(struct ra_spill_interval *interval)
399 struct ir3_reg_interval *ir3_interval = &interval->interval;
447 struct ra_spill_interval *interval = ir3_reg_interval_to_interval(_interval);
450 unsigned size = reg_size(interval->interval.reg);
451 if (interval->interval.reg->flags & IR3_REG_SHARED) {
454 if (interval->interval.reg->flags & IR3_REG_HALF) {
457 rb_tree_insert(&ctx->half_live_intervals, &interval->half_node,
461 if (ctx->merged_regs || !(interval->interval.reg->flags & IR3_REG_HALF)) {
464 rb_tree_insert(&ctx->full_live_intervals, &interval->node,
474 struct ra_spill_interval *interval = ir3_reg_interval_to_interval(_interval);
477 unsigned size = reg_size(interval->interval.reg);
478 if (interval->interval.reg->flags & IR3_REG_SHARED) {
481 if (interval->interval.reg->flags & IR3_REG_HALF) {
484 rb_tree_remove(&ctx->half_live_intervals, &interval->half_node);
487 if (ctx->merged_regs || !(interval->interval.reg->flags & IR3_REG_HALF)) {
490 rb_tree_remove(&ctx->full_live_intervals, &interval->node);
528 struct ra_spill_interval *interval)
530 ir3_reg_interval_insert(&ctx->reg_ctx, &interval->interval);
535 struct ra_spill_interval *interval)
537 ir3_reg_interval_remove(&ctx->reg_ctx, &interval->interval);
543 struct ra_spill_interval *interval = ctx->intervals[dst->name];
544 ra_spill_interval_init(interval, dst);
546 interval->next_use_distance = dst->next_use;
552 if (interval->can_rematerialize)
560 struct ra_spill_interval *interval = ctx->intervals[dst->name];
561 if (interval->interval.inserted)
564 ra_spill_ctx_insert(ctx, interval);
565 interval->cant_spill = true;
577 if (interval->interval.reg->flags & IR3_REG_SHARED)
579 else if (interval->interval.reg->flags & IR3_REG_HALF)
589 struct ra_spill_interval *interval = ctx->intervals[src->def->name];
591 if (!interval->interval.inserted) {
592 ra_spill_ctx_insert(ctx, interval);
593 interval->needs_reload = true;
594 interval->already_spilled = true;
597 ra_spill_interval_root(interval)->cant_spill = true;
605 struct ra_spill_interval *interval = ctx->intervals[src->def->name];
607 if (!interval->interval.inserted || interval->interval.parent ||
608 !rb_tree_is_empty(&interval->interval.children))
611 ra_spill_ctx_remove(ctx, interval);
618 struct ra_spill_interval *interval = ctx->intervals[src->def->name];
620 if (!interval->interval.inserted)
623 ra_spill_ctx_remove(ctx, interval);
629 struct ra_spill_interval *interval = ctx->intervals[dst->name];
630 interval->cant_spill = false;
636 struct ra_spill_interval *interval = ctx->intervals[dst->name];
638 if (!interval->interval.inserted)
641 ra_spill_ctx_remove(ctx, interval);
647 struct ra_spill_interval *interval = ctx->intervals[src->def->name];
649 assert(interval->interval.inserted);
651 interval->next_use_distance = src->next_use;
656 if (!interval->interval.parent && !(src->flags & IR3_REG_SHARED)) {
658 rb_tree_remove(&ctx->half_live_intervals, &interval->half_node);
659 rb_tree_insert(&ctx->half_live_intervals, &interval->half_node,
663 rb_tree_remove(&ctx->full_live_intervals, &interval->node);
664 rb_tree_insert(&ctx->full_live_intervals, &interval->node,
769 spill_interval(struct ra_spill_ctx *ctx, struct ra_spill_interval *interval,
772 if (interval->can_rematerialize && !interval->interval.reg->merge_set)
775 spill(ctx, &interval->dst, get_spill_slot(ctx, interval->interval.reg),
786 rb_tree_foreach_safe (struct ra_spill_interval, interval,
788 d("trying ssa_%u:%u", interval->interval.reg->instr->serialno,
789 interval->interval.reg->name);
790 if (!interval->cant_spill) {
791 if (!interval->already_spilled)
792 spill_interval(ctx, interval, instr, instr->block);
793 ir3_reg_interval_remove_all(&ctx->reg_ctx, &interval->interval);
805 rb_tree_foreach_safe (struct ra_spill_interval, interval,
807 d("trying ssa_%u:%u", interval->interval.reg->instr->serialno,
808 interval->interval.reg->name);
809 if (!interval->cant_spill) {
810 if (!interval->already_spilled)
811 spill_interval(ctx, interval, instr, instr->block);
812 ir3_reg_interval_remove_all(&ctx->reg_ctx, &interval->interval);
825 * values already reloaded, either because it's the child of some other interval
948 struct ra_spill_interval *interval,
953 interval->dst.flags = def->flags;
954 interval->dst.def = def;
955 interval->needs_reload = false;
958 &interval->interval.children, interval.node) {
959 struct ir3_register *child_reg = child->interval.reg;
962 interval->interval.reg->interval_start) / reg_elem_size(def),
973 struct ra_spill_interval *interval = ctx->intervals[def->name];
975 struct ir3_reg_interval *ir3_parent = interval->interval.parent;
981 interval->dst.flags = def->flags;
982 interval->dst.def = extract(
990 if (interval->can_rematerialize)
995 rewrite_src_interval(ctx, interval, dst, instr, block);
1002 struct ra_spill_interval *interval = ctx->intervals[src->def->name];
1004 if (interval->needs_reload) {
1008 ra_spill_interval_root(interval)->cant_spill = false;
1015 struct ra_spill_interval *interval = ctx->intervals[src->def->name];
1017 set_src_val(src, &interval->dst);
1089 * interval to remove before the source itself is changed.
1131 struct ra_spill_interval *interval = rzalloc(ctx, struct ra_spill_interval);
1132 ra_spill_interval_init(interval, reg);
1133 ctx->intervals[name] = interval;
1135 return interval;
1214 if (src_interval->interval.inserted) {
1227 struct ir3_register *temp = temp_interval->interval.reg;
1315 struct ra_spill_interval *interval = ctx->intervals[def->name];
1316 ra_spill_interval_init(interval, def);
1318 interval->next_use_distance =
1322 ra_spill_ctx_insert(ctx, interval);
1429 rb_tree_foreach_safe (struct ra_spill_interval, interval,
1432 is_live_in_all_preds(ctx, interval->interval.reg, block))
1434 if (interval->interval.reg->merge_set ||
1435 !interval->can_rematerialize)
1436 spill_live_in(ctx, interval->interval.reg, block);
1437 ir3_reg_interval_remove_all(&ctx->reg_ctx, &interval->interval);
1444 rb_tree_foreach_safe (struct ra_spill_interval, interval,
1447 is_live_in_all_preds(ctx, interval->interval.reg, block))
1449 spill_live_in(ctx, interval->interval.reg, block);
1450 ir3_reg_interval_remove_all(&ctx->reg_ctx, &interval->interval);
1459 struct ra_spill_interval *interval,
1465 struct ir3_register *def = interval->interval.reg;
1474 &interval->interval.children, interval.node) {
1478 (child->interval.reg->interval_start - def->interval_start) /
1479 reg_elem_size(def), reg_elems(child->interval.reg),
1492 struct ra_spill_interval *interval = ctx->intervals[def->name];
1506 if (interval->can_rematerialize)
1512 live_in_rewrite(ctx, interval, new_val, block, i);
1519 rb_tree_foreach (struct ra_spill_interval, interval, &ctx->reg_ctx.intervals,
1520 interval.node) {
1521 reload_live_in(ctx, interval->interval.reg, block);
1529 struct ra_spill_interval *interval = ctx->intervals[def->name];
1530 if (!interval->interval.inserted)
1558 interval->dst.def = cur_def;
1559 interval->dst.flags = cur_def->flags;
1593 interval->dst.def = dst;
1594 interval->dst.flags = dst->flags;
1613 struct ra_spill_interval *interval = ctx->intervals[reg->name];
1616 interval->dst = *val;
1618 ra_spill_ctx_remove(ctx, interval);
1626 if (!ctx->intervals[phi->dsts[0]->name]->interval.inserted) {
1651 spill_live_out(struct ra_spill_ctx *ctx, struct ra_spill_interval *interval,
1654 struct ir3_register *def = interval->interval.reg;
1656 if (interval->interval.reg->merge_set ||
1657 !interval->can_rematerialize)
1658 spill(ctx, &interval->dst, get_spill_slot(ctx, def), NULL, block);
1659 ir3_reg_interval_remove_all(&ctx->reg_ctx, &interval->interval);
1666 rb_tree_foreach_safe (struct ra_spill_interval, interval,
1667 &ctx->reg_ctx.intervals, interval.node) {
1668 if (!BITSET_TEST(state->live_out, interval->interval.reg->name)) {
1669 spill_live_out(ctx, interval, block);
1678 struct ra_spill_interval *interval = ctx->intervals[def->name];
1679 ir3_reg_interval_insert(&ctx->reg_ctx, &interval->interval);
1691 struct ra_spill_interval *interval = ctx->intervals[name];
1692 if (!interval->interval.inserted)
1712 struct ra_spill_interval *interval = ctx->intervals[def->name];
1713 if (!interval->interval.inserted)
1715 set_src_val(instr->srcs[pred_idx], &interval->dst);
1721 struct ra_spill_interval *interval,
1727 struct ir3_register *def = interval->interval.reg;
1734 &interval->interval.children, interval.node) {
1752 rb_tree_foreach (struct ra_spill_interval, interval,
1753 &ctx->reg_ctx.intervals, interval.node) {
1754 record_pred_live_out(ctx, interval, block, i);
1762 struct ra_spill_interval *interval)
1764 if (!(interval->dst.flags & IR3_REG_SSA) ||
1765 interval->dst.def) {
1767 *val = interval->dst;
1768 _mesa_hash_table_insert(state->remap, interval->interval.reg, val);
1771 &interval->interval.children, interval.node) {
1782 rb_tree_foreach (struct ra_spill_interval, interval, &ctx->reg_ctx.intervals,
1783 interval.node) {
1784 record_live_out(ctx, state, interval);
2022 * interval information which isn't trashed by spilling, and forcibly merge