/third_party/mesa3d/src/amd/compiler/ |
H A D | aco_assembler.cpp | 69 if (instr->operands[3 + i].physReg() != instr->operands[3].physReg().advance(i * 4)) in get_mimg_nsa_dwords() 116 encoding |= !instr->definitions.empty() ? instr->definitions[0].physReg() << 16 : 0; in emit_instruction() 117 encoding |= instr->operands.size() >= 2 ? instr->operands[1].physReg() << 8 : 0; in emit_instruction() 118 encoding |= !instr->operands.empty() ? instr->operands[0].physReg() : 0; in emit_instruction() 141 encoding |= !instr->definitions.empty() && !(instr->definitions[0].physReg() == scc) in emit_instruction() 142 ? instr->definitions[0].physReg() << 16 in emit_instruction() 143 : !instr->operands.empty() && instr->operands[0].physReg() <= 127 in emit_instruction() 144 ? instr->operands[0].physReg() << 16 in emit_instruction() 152 encoding |= !instr->definitions.empty() ? instr->definitions[0].physReg() << 1 in emit_instruction() [all...] |
H A D | aco_lower_to_hw_instr.cpp | 509 vcndmask_identity[i], Operand(PhysReg{src.physReg() + i}, v1), in emit_reduction() 589 bld.readlane(Definition(PhysReg{dst.physReg() + i}, s1), Operand(PhysReg{tmp + i}, v1), in emit_reduction() 592 emit_op(ctx, tmp, dst.physReg(), tmp, vtmp, reduce_op, src.size()); in emit_reduction() 626 bld.readlane(Definition(PhysReg{dst.physReg() + i}, s1), Operand(PhysReg{tmp + i}, v1), in emit_reduction() 629 emit_op(ctx, tmp, dst.physReg(), tmp, vtmp, reduce_op, src.size()); in emit_reduction() 724 identity[i].physReg() == PhysReg{sitmp + i}); in emit_reduction() 817 emit_op(ctx, dst.physReg(), tmp, vtmp, PhysReg{0}, reduce_op, src.size()); in emit_reduction() 830 bld.readlane(Definition(PhysReg{dst.physReg() + k}, s1), Operand(PhysReg{tmp + k}, v1), in emit_reduction() 833 } else if (dst.physReg() != tmp) { in emit_reduction() 835 bld.vop1(aco_opcode::v_mov_b32, Definition(PhysReg{dst.physReg() in emit_reduction() [all...] |
H A D | aco_optimizer_postRA.cpp | 123 assert(def.regClass().type() != RegType::sgpr || def.physReg().reg() <= 255); in save_reg_writes() 124 assert(def.regClass().type() != RegType::vgpr || def.physReg().reg() >= 256); in save_reg_writes() 127 unsigned r = def.physReg().reg(); in save_reg_writes() 141 last_writer_idx(pr_opt_ctx& ctx, PhysReg physReg, RegClass rc) in last_writer_idx() argument 144 assert(physReg.reg() < max_reg_cnt); in last_writer_idx() 145 Idx instr_idx = ctx.instr_idx_by_regs[ctx.current_block->index][physReg.reg()]; in last_writer_idx() 147 unsigned r = physReg.reg(); in last_writer_idx() 162 return last_writer_idx(ctx, op.physReg(), op.regClass()); in last_writer_idx() 200 return is_clobbered_since(ctx, t.physReg(), t.regClass(), idx); in is_clobbered_since() 224 instr->operands[0].physReg() ! in try_apply_branch_vcc() [all...] |
H A D | aco_insert_NOPs.cpp | 220 if (regs_intersect(reg, mask_size, def.physReg(), def.size())) { in handle_raw_hazard_instr() 221 unsigned start = def.physReg() > reg ? def.physReg() - reg : 0; in handle_raw_hazard_instr() 282 state, state.block, min_states, op.physReg(), u_bit_consecutive(0, op.size()), false); in handle_raw_hazard() 343 test_bitset_range(ctx.smem_clause_write, op.physReg(), op.size())) { in handle_smem_clause_hazards() 350 if (!*NOPs && test_bitset_range(ctx.smem_clause_read_write, def.physReg(), def.size())) in handle_smem_clause_hazards() 406 if (op.physReg() == vccz) in handle_instruction_gfx6() 408 if (op.physReg() == execz) in handle_instruction_gfx6() 420 NOPs = MAX2(NOPs, ctx.vmem_store_then_wr_data[(def.physReg() & 0xff) + i]); in handle_instruction_gfx6() 499 set_bitset_range(ctx.smem_clause_read_write, op.physReg(), o in handle_instruction_gfx6() [all...] |
H A D | aco_ssa_elimination.cpp | 66 if (phi->operands[i].physReg() == phi->definitions[0].physReg()) in collect_phi_info() 124 PhysReg scratch_sgpr = (*it)->definitions[0].physReg(); in insert_parallelcopies() 153 if (ignore_exec_writes && instr->definitions[i].physReg() == exec) in is_empty_block() 155 if (instr->definitions[i].physReg() != instr->operands[i].physReg()) in is_empty_block() 161 if (ignore_exec_writes && instr->definitions[0].physReg() == exec) in is_empty_block() 293 if (def.physReg() == exec || def.physReg() == exec_hi) in instr_writes_exec() 313 copy_to_exec |= successor_phi_info.def.physReg() in eliminate_useless_exec_writes_in_block() [all...] |
H A D | aco_register_allocation.cpp | 64 reg = def.physReg(); in set() 328 fill_subdword(op.physReg(), op.bytes(), op.tempId()); in fill() 330 fill(op.physReg(), op.size(), op.tempId()); in fill() 333 void clear(Operand op) { clear(op.physReg(), op.regClass()); } in clear() 338 fill_subdword(def.physReg(), def.bytes(), def.tempId()); in fill() 340 fill(def.physReg(), def.size(), def.tempId()); in fill() 343 void clear(Definition def) { clear(def.physReg(), def.regClass()); } in clear() 791 def.setFixed(it->second.physReg()); in update_renames() 793 ctx.assignments[def.tempId()].reg = def.physReg(); in update_renames() 807 other.second.setFixed(it->second.physReg()); in update_renames() [all...] |
H A D | aco_validate.cpp | 165 check((instr->definitions[0].isFixed() && instr->definitions[0].physReg() == vcc) || in validate_ir() 197 check(instr->operands[2].isFixed() && instr->operands[2].physReg() == vcc, in validate_ir() 201 check(instr->definitions[1].isFixed() && instr->definitions[1].physReg() == vcc, in validate_ir() 496 instr->definitions[1].physReg() == scc, in validate_ir() 655 check((op.isTemp() && op.regClass().type() == RegType::vgpr) || op.physReg() == m0, in validate_ir() 792 unsigned byte = op.physReg().byte(); in validate_subdword_operand() 854 unsigned byte = def.physReg().byte(); in validate_subdword_definition() 981 regs[def.physReg().reg_b + j] = 0; in validate_instr_defs() 1022 if (assignments[op.tempId()].valid && assignments[op.tempId()].reg != op.physReg()) in validate_ra() 1027 op.physReg() in validate_ra() [all...] |
H A D | aco_live_var_analysis.cpp | 137 if (definition.isFixed() && definition.physReg() == vcc) in process_live_temps_per_block() 166 if (operand.isFixed() && operand.physReg() == vcc) in process_live_temps_per_block() 203 assert(insn->definitions[0].isFixed() && insn->definitions[0].physReg() == exec); in process_live_temps_per_block() 208 if (definition.isFixed() && definition.physReg() == vcc) in process_live_temps_per_block() 259 if (operand.isFixed() && operand.physReg() == vcc) in process_live_temps_per_block()
|
H A D | aco_opt_value_numbering.cpp | 143 if (a->operands[i].physReg() != b->operands[i].physReg()) in operator ()() 145 if (a->operands[i].physReg() == exec && a->pass_flags != b->pass_flags) in operator ()() 159 if (a->definitions[i].physReg() != b->definitions[i].physReg()) in operator ()() 161 if (a->definitions[i].physReg() == exec) in operator ()()
|
H A D | aco_statistics.cpp | 314 deps_available = MAX2(deps_available, reg_available[op.physReg().reg() + i]); in get_dependency_cost() 399 int32_t* available = ®_available[def.physReg().reg()]; in add() 482 blocks[0].reg_available[def.physReg().reg() + i] = vs_input_latency; in collect_preasm_stats()
|
H A D | aco_insert_waitcnt.cpp | 267 PhysReg reg{op.physReg() + j}; in check_instr() 279 PhysReg reg{def.physReg() + j}; in check_instr() 305 instr->definitions[0].physReg() == sgpr_null) { in parse_wait_instr() 610 insert_wait_entry(ctx, op.physReg(), op.regClass(), event, false, vmem_types); in insert_wait_entry() 616 insert_wait_entry(ctx, def.physReg(), def.regClass(), event, true, vmem_types); in insert_wait_entry()
|
H A D | aco_print_ir.cpp | 167 print_constant(operand->physReg().reg(), output); in aco_print_operand() 185 print_physReg(operand->physReg(), operand->bytes(), output, flags); in aco_print_operand() 206 print_physReg(definition->physReg(), definition->bytes(), output, flags); in print_definition() 627 offset += instr->definitions[0].physReg().byte(); in print_instr_format_specific() 641 offset += instr->operands[i].physReg().byte(); in print_instr_format_specific()
|
H A D | aco_ir.h | 764 constexpr PhysReg physReg() const noexcept { return reg_; } 866 if (isFixed() && other.isFixed() && physReg() != other.physReg()) 871 return other.isConstant() && other.physReg() == physReg(); 951 constexpr PhysReg physReg() const noexcept { return reg_; } 1033 if (op.isFixed() && op.physReg() == exec)
|
H A D | aco_ir.cpp | 329 instr->definitions.back().physReg() != vcc) in can_use_DPP() 332 if (!pre_ra && instr->operands.size() >= 3 && instr->operands[2].physReg() != vcc) in can_use_DPP()
|
H A D | aco_lower_to_cssa.cpp | 112 assert(op.isFixed() && op.physReg() == exec); in collect_parallelcopies()
|
H A D | aco_spill.cpp | 203 if (op.isFixed() && op.physReg() == exec) in next_uses_per_block() 413 if (op.isFixed() && op.physReg() == exec) in update_local_next_uses() 1031 assert(phi->definitions[0].isFixed() && phi->definitions[0].physReg() == exec); in add_coupling_code()
|
H A D | aco_scheduler.cpp | 565 if (def.isFixed() && def.physReg() == exec)
|
H A D | aco_optimizer.cpp | 1021 return op.isFixed() && op.physReg() == exec; in fixed_to_exec() 2395 if (!instr->operands[0].isFixed() || instr->operands[0].physReg() != exec) in combine_inverse_comparison() 4281 pred_instr->definitions[1].physReg() == scc); in to_uniform_bool_instr() 4455 instr->operands[0].isFixed() && instr->operands[0].physReg() == scc) { in select_instruction() 4679 if (instr->operands[0].isFixed() && instr->operands[0].physReg() >= 128) in try_convert_sopc_to_sopk()
|
H A D | aco_instruction_selection.cpp | 139 assert(mask.isUndefined() || mask.isTemp() || (mask.isFixed() && mask.physReg() == exec)); in emit_mbcnt() 156 } else if (mask.physReg() == exec) { in emit_mbcnt()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/CodeGen/ |
H A D | VirtRegMap.cpp | 83 void VirtRegMap::assignVirt2Phys(Register virtReg, MCPhysReg physReg) { in assignVirt2Phys() argument 84 assert(virtReg.isVirtual() && Register::isPhysicalRegister(physReg)); in assignVirt2Phys() 88 assert(!getRegInfo().isReserved(physReg) && in assignVirt2Phys() 89 "Attempt to map virtReg to a reserved physReg"); in assignVirt2Phys() 90 Virt2PhysMap[virtReg.id()] = physReg; in assignVirt2Phys()
|
H A D | InterferenceCache.cpp | 104 void InterferenceCache::Entry::reset(unsigned physReg, in reset() argument 111 PhysReg = physReg; in reset()
|
H A D | InterferenceCache.h | 119 /// valid - Return true if this is a valid entry for physReg. 122 /// reset - Initialize entry to represent physReg's aliases. 123 void reset(unsigned physReg,
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/include/llvm/CodeGen/ |
H A D | VirtRegMap.h | 109 void assignVirt2Phys(Register virtReg, MCPhysReg physReg);
|