Lines Matching defs:inst
92 struct qinst *inst)
94 return (inst->dst.file == QFILE_MAGIC &&
95 v3d_qpu_magic_waddr_is_tmu(devinfo, inst->dst.index)) ||
96 inst->qpu.sig.wrtmuc;
101 struct qinst *inst, struct qblock *block)
104 bool is_tmuwt = inst->qpu.type == V3D_QPU_INSTR_TYPE_ALU &&
105 inst->qpu.alu.add.op == V3D_QPU_A_TMUWT;
106 bool is_ldtmu = inst->qpu.sig.ldtmu;
111 list_for_each_entry_from(struct qinst, scan_inst, inst->link.next,
136 can_reconstruct_inst(struct qinst *inst)
138 assert(inst);
140 if (vir_is_add(inst)) {
141 switch (inst->qpu.alu.add.op) {
153 return inst->qpu.flags.ac == V3D_QPU_COND_NONE &&
154 inst->qpu.flags.auf == V3D_QPU_UF_NONE &&
155 inst->qpu.flags.apf == V3D_QPU_PF_NONE &&
156 inst->qpu.alu.add.output_pack == V3D_QPU_PACK_NONE;
240 vir_for_each_inst(inst, block) {
249 for (int i = 0; i < vir_get_nsrc(inst); i++) {
250 if (inst->src[i].file != QFILE_TEMP)
253 int temp = inst->src[i].index;
270 if (inst->dst.file == QFILE_TEMP) {
271 int temp = inst->dst.index;
289 if (inst->qpu.sig.ldvary) {
290 assert(inst->dst.file == QFILE_TEMP);
291 BITSET_CLEAR(c->spillable, inst->dst.index);
294 if (inst->is_last_thrsw)
297 if (v3d_qpu_writes_vpm(&inst->qpu) ||
298 v3d_qpu_uses_tlb(&inst->qpu))
305 if (is_end_of_tmu_sequence(c->devinfo, inst, block))
308 if (qinst_writes_tmu(c->devinfo, inst))
444 struct qinst *inst = vir_ADD_dest(c, tmua, c->spill_base, offset);
445 inst->qpu.flags.ac = cond;
446 inst->uniform = vir_get_uniform_index(c, QUNIFORM_CONSTANT,
491 struct qinst *inst,
497 assert(inst->qpu.type == V3D_QPU_INSTR_TYPE_ALU);
498 assert(inst->dst.file == QFILE_TEMP);
502 enum v3d_qpu_cond cond = vir_get_cond(inst);
504 /* If inst and position don't match, this is a postponed spill,
509 if (inst == position) {
511 inst->dst.index);
512 inst->dst = vir_get_temp(c);
513 add_node(c, inst->dst.index, class_bits);
515 inst->dst = spill_temp;
527 inst->dst);
585 vir_for_each_inst_safe(inst, block) {
586 int32_t ip = inst->ip;
595 if (is_end_of_tmu_sequence(c->devinfo, inst, block)) {
599 inst, ip, spill_offset);
607 qinst_writes_tmu(c->devinfo, inst)) {
608 start_of_tmu_sequence = inst;
613 for (int i = 0; i < vir_get_nsrc(inst); i++) {
614 if (inst->src[i].file != QFILE_TEMP ||
615 inst->src[i].index != spill_temp) {
620 inst->src[i] = inst->src[filled_src];
624 c->cursor = vir_before_inst(inst);
631 inst->src[i] = unif;
640 inst->src[i] = temp;
654 inst->src[i] =
665 fill_ip, &inst->src[i]);
674 if (inst->dst.file == QFILE_TEMP &&
675 inst->dst.index == spill_temp) {
678 vir_remove_instruction(c, inst);
692 vir_get_cond(inst) == V3D_QPU_COND_NONE) {
699 postponed_spill = inst;
701 v3d_emit_tmu_spill(c, inst,
703 inst, ip,
729 vir_for_each_inst_inorder(inst, c)
730 inst->ip = ip++;
946 struct qinst *inst)
948 int32_t ip = inst->ip;
955 if (vir_writes_r3(c->devinfo, inst)) {
965 if (vir_writes_r4(c->devinfo, inst)) {
975 if (inst->qpu.type == V3D_QPU_INSTR_TYPE_ALU) {
976 switch (inst->qpu.alu.add.op) {
987 assert(inst->dst.file == QFILE_TEMP);
988 set_temp_class_bits(&c->nodes, inst->dst.index,
1002 assert(inst->dst.file == QFILE_TEMP);
1003 set_temp_class_bits(&c->nodes, inst->dst.index,
1013 if (inst->src[0].file == QFILE_REG) {
1014 switch (inst->src[0].index) {
1023 assert(inst->qpu.alu.mul.op == V3D_QPU_M_MOV);
1024 assert(inst->dst.file == QFILE_TEMP);
1025 uint32_t node = temp_to_node(inst->dst.index);
1027 PHYS_INDEX + inst->src[0].index);
1033 if (inst->dst.file == QFILE_TEMP) {
1042 if (!inst->qpu.sig.ldunif) {
1044 get_temp_class_bits(&c->nodes, inst->dst.index) &
1046 set_temp_class_bits(&c->nodes, inst->dst.index,
1055 set_temp_class_bits(&c->nodes, inst->dst.index,
1062 if (inst->qpu.sig.thrsw) {
1137 vir_for_each_inst_inorder(inst, c) {
1138 inst->ip = ip++;
1139 update_graph_and_reg_classes_for_inst(c, acc_nodes, inst);