Lines Matching refs:tuple

50 /* State of a single tuple and clause under construction */
58 * "forced" for the next scheduled tuple, provided such a tuple
63 /* The previous tuple scheduled (= the next tuple executed in the
72 /* Is this the last tuple in the clause */
78 /* Reads for previous (succeeding) tuple */
83 /* Register slot state for current tuple */
86 /* Constants are shared in the tuple. If constant_count is nonzero, it
89 * but within a tuple, that should be encoded as constant_count != 0
322 struct bi_clause_state *clause, struct bi_tuple_state *tuple)
324 bi_instr *pinstr = tuple->add;
345 bi_tuple_state *tuple)
347 bi_instr *pinstr = tuple->add;
364 bi_tuple_state *tuple)
366 bi_instr *pinstr = tuple->add;
385 struct bi_clause_state *clause, struct bi_tuple_state *tuple)
387 bi_instr *pinstr = tuple->add;
405 struct bi_clause_state *clause, struct bi_tuple_state *tuple)
407 bi_instr *add = tuple->add;
772 /* Would there be space for constants if we added one tuple? */
780 /* Updates the FAU assignment for a tuple. A valid FAU assignment must be
782 * this is gauranteed per-instruction by bi_lower_fau and per-tuple by
787 struct bi_tuple_state *tuple,
792 unsigned *constant_count = &tuple->constant_count;
793 uint32_t *constants = tuple->constants;
794 enum bir_fau fau = tuple->fau;
797 memcpy(copied_constants, tuple->constants,
799 copied_count = tuple->constant_count;
815 tuple->fau = src.value;
833 (i != tuple->pcrel_idx);
847 tuple->pcrel_idx = *constant_count;
855 /* Constants per clause may be limited by tuple count */
867 /* Given an in-progress tuple, a candidate new instruction to add to the tuple,
887 /* If a source is already read in the tuple, it is already counted */
960 /* Likewise for cross-tuple passthrough (reads_temps) */
980 * for a write that will be discarded after one tuple. */
1037 struct bi_tuple_state *tuple,
1050 if (bi_opcode_props[instr->op].last && !tuple->last)
1053 if (bi_must_not_last(instr) && tuple->last)
1099 if (!bi_update_fau(clause, tuple, instr, fma, false))
1104 if (tuple->add && bi_has_staging_passthrough_hazard(instr->dest[0], tuple->add))
1107 /* If this choice of destination would force a cross-tuple passthrough, the next tuple must support that */
1108 if (tuple->prev && bi_has_cross_passthrough_hazard(tuple->prev, instr))
1112 unsigned total_writes = tuple->reg.nr_writes;
1115 /* Last tuple in a clause can only write a single value */
1116 if (tuple->last && total_writes > 1)
1124 if (bi_tuple_is_new_src(instr, &tuple->reg, s))
1128 unsigned total_srcs = tuple->reg.nr_reads + unique_new_srcs;
1130 bool can_spill_to_moves = (!tuple->add);
1140 tuple->add ? tuple->add->dest[0] : bi_null(),
1141 tuple->prev_reads, tuple->nr_prev_reads);
1151 bi_instr_cost(bi_instr *instr, struct bi_tuple_state *tuple)
1177 struct bi_tuple_state *tuple,
1187 if (!bi_instr_schedulable(instr, clause, tuple, live_after_temp, fma))
1190 signed cost = bi_instr_cost(instr, tuple);
1207 bi_pop_instr(struct bi_clause_state *clause, struct bi_tuple_state *tuple,
1210 bi_update_fau(clause, tuple, instr, fma, true);
1218 tuple->reg.nr_writes += bi_write_count(instr, live_after_temp);
1221 if (bi_tuple_is_new_src(instr, &tuple->reg, s))
1222 tuple->reg.reads[tuple->reg.nr_reads++] = instr->src[s];
1239 struct bi_tuple_state *tuple,
1243 if (tuple->add && tuple->add->op == BI_OPCODE_CUBEFACE)
1244 return bi_lower_cubeface(ctx, clause, tuple);
1245 else if (tuple->add && tuple->add->op == BI_OPCODE_ATOM_RETURN_I32)
1246 return bi_lower_atom_c(ctx, clause, tuple);
1247 else if (tuple->add && tuple->add->op == BI_OPCODE_ATOM1_RETURN_I32)
1248 return bi_lower_atom_c1(ctx, clause, tuple);
1249 else if (tuple->add && tuple->add->op == BI_OPCODE_SEG_ADD_I64)
1250 return bi_lower_seg_add(ctx, clause, tuple);
1251 else if (tuple->add && tuple->add->table)
1252 return bi_lower_dtsel(ctx, clause, tuple);
1255 if (!fma && tuple->nr_prev_reads > 3) {
1257 assert(tuple->nr_prev_reads == 4);
1260 bi_index src = tuple->prev_reads[0];
1263 bi_builder b = bi_init_builder(ctx, bi_before_tuple(tuple->prev));
1265 bi_pop_instr(clause, tuple, mov, live_after_temp, fma);
1271 if ((bifrost_debug & BIFROST_DBG_NOSCHED) && tuple->add)
1275 unsigned idx = bi_choose_index(st, clause, tuple, live_after_temp, fma);
1285 bi_pop_instr(clause, tuple, instr, live_after_temp, fma);
1330 * write to the same logical register, the next executed tuple will get the
1351 bi_rewrite_fau_to_pass(bi_tuple *tuple)
1353 bi_foreach_instr_and_src_in_tuple(tuple, ins, s) {
1379 bi_rewrite_constants_to_pass(bi_tuple *tuple, uint64_t constant, bool pcrel)
1381 bi_foreach_instr_and_src_in_tuple(tuple, ins, s) {
1409 /* Constructs a constant state given a tuple state. This has the
1415 bi_get_const_state(struct bi_tuple_state *tuple)
1418 .constant_count = tuple->constant_count,
1419 .constants[0] = tuple->constants[0],
1420 .constants[1] = tuple->constants[1],
1421 .pcrel = tuple->add && tuple->add->branch_target,
1439 * more than one tuple has pcrel:
1441 * 1. If a tuple has two constants, they must be packed together. If one is
1445 * 4. If a tuple has one constant, it may be shared with an existing
1447 * (distinct) tuple of a single constant.
1451 * tuple count 5/8 in EC0.
1601 /* Clauses with these tuple counts lack an M field for the packed EC0,
1668 bi_tuple *tuple = NULL;
1683 * the next tuple in the program. If you scheduled forwards, you'd need
1685 * delay updates to the live_after_temp by an extra tuple. */
1694 .prev = tuple,
1703 tuple = &clause->tuples[idx];
1712 tuple->fma = bi_take_instr(ctx, st, &clause_state, &tuple_state, live_after_temp, true);
1713 tuple->add = tuple_state.add;
1716 if (tuple->add)
1717 *live = bi_postra_liveness_ins(*live, tuple->add);
1719 if (tuple->fma)
1720 *live = bi_postra_liveness_ins(*live, tuple->fma);
1722 /* Rotate in the new per-tuple liveness */
1727 if (tuple->add && bi_must_message(tuple->add)) {
1732 bi_message_type_for_instr(tuple->add);
1733 clause->message = tuple->add;
1739 switch (tuple->add->op) {
1762 if (tuple->fma && bi_reads_zero(tuple->fma))
1763 bi_rewrite_zero(tuple->fma, true);
1767 tuple->fau_idx = tuple_state.fau;
1768 bi_rewrite_fau_to_pass(tuple);
1776 if (tuple->fma) {
1777 bi_use_passthrough(tuple->add, tuple->fma->dest[0],
1781 /* Don't add an empty tuple, unless the worklist has nothing
1791 if (!(tuple->fma || tuple->add || insert_empty))
1796 /* Adding enough tuple might overflow constants */
1801 /* Don't schedule more than 1 tuple if debugging */
1819 bi_tuple *tuple = &clause->tuples[i];
1825 bi_foreach_instr_in_tuple(tuple, ins)
1832 assert(tuple->fau_idx == BIR_FAU_ZERO);
1847 bi_tuple *tuple = &clause->tuples[i];
1854 assert(!tuple->fau_idx);
1864 tuple->fau_idx = bi_constant_field(word_idx) | lo;
1865 bi_rewrite_constants_to_pass(tuple, pair, word_idx == pcrel_idx);
1884 /* Use passthrough register for cross-tuple accesses. Note this is
1885 * after the memmove, so this is forwards. Skip the first tuple since