/third_party/mesa3d/src/gallium/drivers/freedreno/a2xx/ |
H A D | ir2_assemble.c | 50 /* alu instr need to take into how the output components are allocated */ 62 alu_swizzle(struct ir2_context *ctx, struct ir2_instr *instr, in alu_swizzle() argument 65 struct ir2_reg_component *comp = get_reg(instr)->comp; in alu_swizzle() 66 unsigned swiz0 = src_swizzle(ctx, src, src_ncomp(instr)); in alu_swizzle() 70 switch (instr->alu.vector_opc) { in alu_swizzle() 82 for (int i = 0, j = 0; i < dst_ncomp(instr); j++) { in alu_swizzle() 83 if (instr->alu.write_mask & 1 << j) { in alu_swizzle() 103 alu_write_mask(struct ir2_context *ctx, struct ir2_instr *instr) in alu_write_mask() argument 105 struct ir2_reg_component *comp = get_reg(instr)->comp; in alu_write_mask() 109 if (instr in alu_write_mask() 129 fetch_dst_swiz(struct ir2_context *ctx, struct ir2_instr *instr) fetch_dst_swiz() argument 142 dst_to_reg(struct ir2_context *ctx, struct ir2_instr *instr) dst_to_reg() argument 172 struct ir2_instr *instr = sched->instr, *instr_s, *instr_v; fill_instr() local 391 struct ir2_instr *instr = ctx->instr_sched[j].instr; assemble() local [all...] |
/third_party/mesa3d/src/broadcom/compiler/ |
H A D | v3d_nir_lower_image_load_store.c | 88 v3d_nir_lower_image_store(nir_builder *b, nir_intrinsic_instr *instr) in v3d_nir_lower_image_store() argument 90 enum pipe_format format = nir_intrinsic_format(instr); in v3d_nir_lower_image_store() 97 b->cursor = nir_before_instr(&instr->instr); in v3d_nir_lower_image_store() 100 nir_ssa_for_src(b, instr->src[3], 4), in v3d_nir_lower_image_store() 160 nir_instr_rewrite_src(&instr->instr, &instr->src[3], in v3d_nir_lower_image_store() 162 instr->num_components = formatted->num_components; in v3d_nir_lower_image_store() 168 v3d_nir_lower_image_load(nir_builder *b, nir_intrinsic_instr *instr) in v3d_nir_lower_image_load() argument 201 v3d_nir_lower_image_load_store_cb(nir_builder *b, nir_instr *instr, void *_state) v3d_nir_lower_image_load_store_cb() argument [all...] |
H A D | nir_to_vir.c | 176 v3d_get_op_for_atomic_add(nir_intrinsic_instr *instr, unsigned src) in v3d_get_op_for_atomic_add() argument 178 if (nir_src_is_const(instr->src[src])) { in v3d_get_op_for_atomic_add() 179 int64_t add_val = nir_src_as_int(instr->src[src]); in v3d_get_op_for_atomic_add() 190 v3d_general_tmu_op(nir_intrinsic_instr *instr) in v3d_general_tmu_op() argument 192 switch (instr->intrinsic) { in v3d_general_tmu_op() 205 return v3d_get_op_for_atomic_add(instr, 2); in v3d_general_tmu_op() 208 return v3d_get_op_for_atomic_add(instr, 1); in v3d_general_tmu_op() 353 nir_intrinsic_instr *instr, in emit_tmu_general_store_writes() 373 last_component < instr->num_components); in emit_tmu_general_store_writes() 376 struct qreg data = ntq_get_src(c, instr in emit_tmu_general_store_writes() 351 emit_tmu_general_store_writes(struct v3d_compile *c, enum emit_mode mode, nir_intrinsic_instr *instr, uint32_t base_const_offset, uint32_t *writemask, uint32_t *const_offset, uint32_t *type_size, uint32_t *tmu_writes) emit_tmu_general_store_writes() argument 409 emit_tmu_general_atomic_writes(struct v3d_compile *c, enum emit_mode mode, nir_intrinsic_instr *instr, uint32_t tmu_op, bool has_index, uint32_t *tmu_writes) emit_tmu_general_atomic_writes() argument 443 emit_tmu_general_address_write(struct v3d_compile *c, enum emit_mode mode, nir_intrinsic_instr *instr, uint32_t config, bool dynamic_src, int offset_src, struct qreg base_offset, uint32_t const_offset, uint32_t *tmu_writes) emit_tmu_general_address_write() argument 503 ntq_emit_tmu_general(struct v3d_compile *c, nir_intrinsic_instr *instr, bool is_shared_or_scratch, bool is_global) ntq_emit_tmu_general() argument 876 ntq_get_alu_src(struct v3d_compile *c, nir_alu_instr *instr, unsigned src) ntq_get_alu_src() argument 897 ntq_emit_txs(struct v3d_compile *c, nir_tex_instr *instr) ntq_emit_txs() argument 944 ntq_emit_tex(struct v3d_compile *c, nir_tex_instr *instr) ntq_emit_tex() argument 1423 ntq_emit_alu(struct v3d_compile *c, nir_alu_instr *instr) ntq_emit_alu() argument 2511 ntq_emit_load_const(struct v3d_compile *c, nir_load_const_instr *instr) ntq_emit_load_const() argument 2525 ntq_emit_image_size(struct v3d_compile *c, nir_intrinsic_instr *instr) ntq_emit_image_size() argument 2553 vir_emit_tlb_color_read(struct v3d_compile *c, nir_intrinsic_instr *instr) vir_emit_tlb_color_read() argument 2723 ntq_emit_load_uniform(struct v3d_compile *c, nir_intrinsic_instr *instr) ntq_emit_load_uniform() argument 2747 ntq_emit_inline_ubo_load(struct v3d_compile *c, nir_intrinsic_instr *instr) ntq_emit_inline_ubo_load() argument 2782 ntq_emit_load_input(struct v3d_compile *c, nir_intrinsic_instr *instr) ntq_emit_load_input() argument 2844 ntq_emit_per_sample_color_write(struct v3d_compile *c, nir_intrinsic_instr *instr) ntq_emit_per_sample_color_write() argument 2863 ntq_emit_color_write(struct v3d_compile *c, nir_intrinsic_instr *instr) ntq_emit_color_write() argument 2876 emit_store_output_gs(struct v3d_compile *c, nir_intrinsic_instr *instr) emit_store_output_gs() argument 2917 emit_store_output_vs(struct v3d_compile *c, nir_intrinsic_instr *instr) emit_store_output_vs() argument 2940 ntq_emit_store_output(struct v3d_compile *c, nir_intrinsic_instr *instr) ntq_emit_store_output() argument 3102 ntq_emit_load_unifa(struct v3d_compile *c, nir_intrinsic_instr *instr) ntq_emit_load_unifa() argument 3312 ntq_emit_intrinsic(struct v3d_compile *c, nir_intrinsic_instr *instr) ntq_emit_intrinsic() argument 4126 ntq_emit_instr(struct v3d_compile *c, nir_instr *instr) ntq_emit_instr() argument [all...] |
/third_party/mesa3d/src/gallium/drivers/radeonsi/ |
H A D | si_nir_optim.c | 60 _mesa_set_add(instrs, &store->instr); in check_instr_depends_on_tex() 63 nir_foreach_instr_in_worklist(instr, work) { in check_instr_depends_on_tex() 65 if (_mesa_set_search(instrs, instr)) in check_instr_depends_on_tex() 68 _mesa_set_add(instrs, instr); in check_instr_depends_on_tex() 70 if (instr->type == nir_instr_type_alu || in check_instr_depends_on_tex() 71 instr->type == nir_instr_type_load_const) { in check_instr_depends_on_tex() 73 if (!nir_foreach_src(instr, add_src_instr_to_worklist, work)) in check_instr_depends_on_tex() 76 } else if (instr->type == nir_instr_type_tex) { in check_instr_depends_on_tex() 82 texunit = get_tex_unit(nir_instr_as_tex(instr)); in check_instr_depends_on_tex() 100 nir_foreach_instr_reverse_safe(instr, bloc in get_output_as_const_value() 129 store_instr_depends_on_tex(nir_builder *b, nir_instr *instr, void *state) store_instr_depends_on_tex() argument 146 replace_tex_by_imm(nir_builder *b, nir_instr *instr, void *state) replace_tex_by_imm() argument [all...] |
/third_party/mesa3d/src/compiler/nir/ |
H A D | nir_gather_info.c | 318 nir_intrinsic_writes_external_memory(const nir_intrinsic_instr *instr) in nir_intrinsic_writes_external_memory() argument 320 switch (instr->intrinsic) { in nir_intrinsic_writes_external_memory() 458 return nir_deref_mode_may_be(nir_src_as_deref(instr->src[0]), in nir_intrinsic_writes_external_memory() 467 gather_intrinsic_info(nir_intrinsic_instr *instr, nir_shader *shader, in gather_intrinsic_info() argument 473 if (nir_intrinsic_infos[instr->intrinsic].index_map[NIR_INTRINSIC_IO_SEMANTICS] > 0) { in gather_intrinsic_info() 474 nir_io_semantics semantics = nir_intrinsic_io_semantics(instr); in gather_intrinsic_info() 480 instr->intrinsic == nir_intrinsic_load_input) || in gather_intrinsic_info() 482 (instr->intrinsic == nir_intrinsic_load_output || in gather_intrinsic_info() 483 instr->intrinsic == nir_intrinsic_store_output))); in gather_intrinsic_info() 500 switch (instr in gather_intrinsic_info() 844 gather_tex_info(nir_tex_instr *instr, nir_shader *shader) gather_tex_info() argument 860 gather_alu_info(nir_alu_instr *instr, nir_shader *shader) gather_alu_info() argument [all...] |
H A D | nir_lower_clip_disable.c | 64 lower_clip_plane_store(nir_intrinsic_instr *instr, unsigned clip_plane_enable, nir_builder *b) in lower_clip_plane_store() argument 69 if (instr->intrinsic != nir_intrinsic_store_deref) in lower_clip_plane_store() 72 nir_deref_instr *deref = nir_src_as_deref(instr->src[0]); in lower_clip_plane_store() 80 b->cursor = nir_after_instr(&instr->instr); in lower_clip_plane_store() 82 int wrmask = nir_intrinsic_write_mask(instr); in lower_clip_plane_store() 92 components[i] = nir_channel(b, nir_ssa_for_src(b, instr->src[1], nir_src_num_components(instr->src[1])), i); in lower_clip_plane_store() 96 nir_store_deref(b, deref, nir_vec(b, components, instr->num_components), wrmask); in lower_clip_plane_store() 104 assert(nir_intrinsic_write_mask(instr) in lower_clip_plane_store() [all...] |
H A D | nir_lower_point_size.c | 42 nir_intrinsic_instr *instr = nir_instr_as_intrinsic(psiz_instr); in lower_point_size_instr() local 44 assert(instr->src[1].is_ssa); in lower_point_size_instr() 45 assert(instr->src[1].ssa->num_components == 1); in lower_point_size_instr() 46 nir_ssa_def *psiz = instr->src[1].ssa; in lower_point_size_instr() 54 nir_instr_rewrite_src(&instr->instr, &instr->src[1], nir_src_for_ssa(psiz)); in lower_point_size_instr() 58 instr_is_point_size(const nir_instr *instr) in instr_is_point_size() argument 60 if (instr->type != nir_instr_type_intrinsic) in instr_is_point_size() 63 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr); in instr_is_point_size() [all...] |
H A D | nir_opt_sink.c | 40 nir_can_move_instr(nir_instr *instr, nir_move_options options) in nir_can_move_instr() argument 42 switch (instr->type) { in nir_can_move_instr() 48 if (nir_op_is_vec(nir_instr_as_alu(instr)->op) || in nir_can_move_instr() 49 nir_instr_as_alu(instr)->op == nir_op_b2i32) in nir_can_move_instr() 51 if (nir_alu_instr_is_comparison(nir_instr_as_alu(instr))) in nir_can_move_instr() 56 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr); in nir_can_move_instr() 144 nir_instr *instr = use->parent_instr; in get_preferred_block() local 145 nir_block *use_block = instr->block; in get_preferred_block() 154 if (instr->type == nir_instr_type_phi) { in get_preferred_block() 155 nir_phi_instr *phi = nir_instr_as_phi(instr); in get_preferred_block() 229 nir_instr_insert(nir_after_phis(use_block), instr); nir_opt_sink() local [all...] |
H A D | nir_opt_phi_precision.c | 110 narrowing_conversion_op(nir_instr *instr, nir_op current_op) in narrowing_conversion_op() argument 112 if (instr->type != nir_instr_type_alu) in narrowing_conversion_op() 115 nir_op op = nir_instr_as_alu(instr)->op; in narrowing_conversion_op() 156 widening_conversion_op(nir_instr *instr, unsigned *bit_size) in widening_conversion_op() argument 158 if (instr->type != nir_instr_type_alu) in widening_conversion_op() 161 nir_alu_instr *alu = nir_instr_as_alu(instr); in widening_conversion_op() 228 nir_ssa_dest_init(&new_phi->instr, &new_phi->dest, in try_move_narrowing_dst() 237 /* insert new conversion instr in block of original phi src: */ in try_move_narrowing_dst() 260 b->cursor = nir_after_instr(&phi->instr); in try_move_narrowing_dst() 261 nir_builder_instr_insert(b, &new_phi->instr); in try_move_narrowing_dst() 312 nir_instr *instr = src->src.ssa->parent_instr; find_widening_op() local 350 nir_instr *instr = src->src.ssa->parent_instr; find_widening_op() local 389 nir_instr *instr = src->src.ssa->parent_instr; try_move_widening_src() local [all...] |
H A D | nir_opt_find_array_copies.c | 88 node_for_deref(nir_deref_instr *instr, struct match_node *parent, in node_for_deref() argument 92 switch (instr->deref_type) { in node_for_deref() 95 _mesa_hash_table_search(state->var_nodes, instr->var); in node_for_deref() 99 struct match_node *node = create_match_node(instr->type, state); in node_for_deref() 100 _mesa_hash_table_insert(state->var_nodes, instr->var, node); in node_for_deref() 107 _mesa_hash_table_search(state->cast_nodes, instr); in node_for_deref() 111 struct match_node *node = create_match_node(instr->type, state); in node_for_deref() 112 _mesa_hash_table_insert(state->cast_nodes, instr, node); in node_for_deref() 122 if (nir_src_is_const(instr->arr.index)) { in node_for_deref() 123 idx = nir_src_as_uint(instr in node_for_deref() [all...] |
H A D | nir_opt_peephole_select.c | 71 nir_foreach_instr(instr, block) { in block_check_for_allowed_instrs() 72 switch (instr->type) { in block_check_for_allowed_instrs() 82 if (!nir_intrinsic_can_reorder(nir_instr_as_intrinsic(instr))) in block_check_for_allowed_instrs() 95 nir_foreach_instr(instr, block) { in block_check_for_allowed_instrs() 96 switch (instr->type) { in block_check_for_allowed_instrs() 98 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr); in block_check_for_allowed_instrs() 166 nir_alu_instr *mov = nir_instr_as_alu(instr); in block_check_for_allowed_instrs() 307 nir_foreach_instr(instr, last) { in nir_opt_collapse_if() 311 nir_phi_instr *phi = nir_instr_as_phi(instr); in nir_opt_collapse_if() 342 nir_foreach_instr(instr, las in nir_opt_collapse_if() [all...] |
/third_party/mesa3d/src/gallium/drivers/r600/sfn/ |
H A D | sfn_instr_alu.h | 199 void visit(AluGroup *instr) override; 200 void visit(Block *instr) override; 201 void visit(IfInstr *instr) override; 203 void visit(TexInstr *instr) override {(void)instr;} 204 void visit(ExportInstr *instr) override {(void)instr;} 205 void visit(FetchInstr *instr) override {(void)instr;} 206 void visit(ControlFlowInstr *instr) overrid [all...] |
H A D | sfn_nir_lower_64bit.cpp | 102 bool filter(const nir_instr *instr) const override; 103 nir_ssa_def *lower(nir_instr *instr) override; 112 bool filter(const nir_instr *instr) const override; 113 nir_ssa_def *lower(nir_instr *instr) override; 116 bool LowerLoad64Uniform::filter(const nir_instr *instr) const in filter() 118 if (instr->type != nir_instr_type_intrinsic) in filter() 121 auto intr = nir_instr_as_intrinsic(instr); in filter() 131 nir_ssa_def *LowerLoad64Uniform::lower(nir_instr *instr) in lower() argument 133 auto intr = nir_instr_as_intrinsic(instr); in lower() 164 bool filter(const nir_instr *instr) cons 720 lower(nir_instr *instr) lower() argument 855 lower(nir_instr *instr) lower() argument 1247 r600_lower_64bit_intrinsic(nir_builder *b, nir_intrinsic_instr *instr) r600_lower_64bit_intrinsic() argument 1385 r600_lower_64bit_load_const(nir_builder *b, nir_load_const_instr *instr) r600_lower_64bit_load_const() argument 1422 r600_lower_64bit_to_vec2_instr(nir_builder *b, nir_instr *instr, void *data) r600_lower_64bit_to_vec2_instr() argument [all...] |
/third_party/mesa3d/src/freedreno/ir3/ |
H A D | ir3_cf.c | 29 is_safe_conv(struct ir3_instruction *instr, type_t src_type, opc_t *src_opc) in is_safe_conv() argument 31 if (instr->opc != OPC_MOV) in is_safe_conv() 37 if (type_size(instr->cat1.src_type) == type_size(instr->cat1.dst_type) || in is_safe_conv() 38 full_type(instr->cat1.src_type) != full_type(instr->cat1.dst_type)) in is_safe_conv() 45 type_size(instr->cat1.src_type) == 16) in is_safe_conv() 48 struct ir3_register *dst = instr->dsts[0]; in is_safe_conv() 49 struct ir3_register *src = instr->srcs[0]; in is_safe_conv() 54 if (instr in is_safe_conv() [all...] |
H A D | ir3_spill.c | 151 foreach_instr (instr, &start->instr_list) { in add_base_reg() 152 if (instr->opc != OPC_META_INPUT && in add_base_reg() 153 instr->opc != OPC_META_TEX_PREFETCH) { in add_base_reg() 154 after = instr; in add_base_reg() 178 instr_cycles(struct ir3_instruction *instr) in instr_cycles() argument 180 if (instr->opc == OPC_META_PARALLEL_COPY) { in instr_cycles() 182 for (unsigned i = 0; i < instr->dsts_count; i++) { in instr_cycles() 183 if (!instr->srcs[i]->def || in instr_cycles() 184 instr->srcs[i]->def->merge_set != instr in instr_cycles() 602 remove_src_early(struct ra_spill_ctx *ctx, struct ir3_instruction *instr, struct ir3_register *src) remove_src_early() argument 615 remove_src(struct ra_spill_ctx *ctx, struct ir3_instruction *instr, struct ir3_register *src) remove_src() argument 707 materialize_pcopy_src(const struct reg_or_immed *src, struct ir3_instruction *instr, struct ir3_block *block) materialize_pcopy_src() argument 725 spill(struct ra_spill_ctx *ctx, const struct reg_or_immed *val, unsigned spill_slot, struct ir3_instruction *instr, struct ir3_block *block) spill() argument 769 spill_interval(struct ra_spill_ctx *ctx, struct ra_spill_interval *interval, struct ir3_instruction *instr, struct ir3_block *block) spill_interval() argument 781 limit(struct ra_spill_ctx *ctx, struct ir3_instruction *instr) limit() argument 947 rewrite_src_interval(struct ra_spill_ctx *ctx, struct ra_spill_interval *interval, struct ir3_register *def, struct ir3_instruction *instr, struct ir3_block *block) rewrite_src_interval() argument 969 reload_def(struct ra_spill_ctx *ctx, struct ir3_register *def, struct ir3_instruction *instr, struct ir3_block *block) reload_def() argument 999 reload_src(struct ra_spill_ctx *ctx, struct ir3_instruction *instr, struct ir3_register *src) reload_src() argument 1012 rewrite_src(struct ra_spill_ctx *ctx, struct ir3_instruction *instr, struct ir3_register *src) rewrite_src() argument 1037 handle_instr(struct ra_spill_ctx *ctx, struct ir3_instruction *instr) handle_instr() argument 1293 handle_input_phi(struct ra_spill_ctx *ctx, struct ir3_instruction *instr) handle_input_phi() argument 1301 remove_input_phi(struct ra_spill_ctx *ctx, struct ir3_instruction *instr) remove_input_phi() argument 1904 simplify_phi_srcs(struct ir3_instruction *instr) simplify_phi_srcs() argument [all...] |
H A D | ir3_array_to_ssa.c | 27 * the array defined in instr->dsts[0]->def (possibly a phi node), perform the 28 * operation, and store to instr->dsts[0]. 124 src_reg = __ssa_src(phi, src->instr, flags); in read_value_beginning() 159 struct ir3_instruction *src_instr = src->def->instr; in remove_trivial_phi() 168 src->def = remove_trivial_phi(src->def->instr); in remove_trivial_phi() 192 if (reg->instr->opc == OPC_META_PHI) in lookup_value() 193 return reg->instr->data; in lookup_value() 228 foreach_instr (instr, &block->instr_list) { in ir3_array_to_ssa() 229 foreach_dst (dst, instr) { in ir3_array_to_ssa() 240 foreach_instr (instr, in ir3_array_to_ssa() [all...] |
/third_party/vixl/examples/aarch64/ |
H A D | custom-disassembler.cc | 40 void CustomDisassembler::AppendRegisterNameToOutput(const Instruction* instr, in AppendRegisterNameToOutput() argument 42 USE(instr); in AppendRegisterNameToOutput() 66 Disassembler::AppendRegisterNameToOutput(instr, reg); in AppendRegisterNameToOutput() 89 const Instruction* instr, const void* addr) { in AppendCodeRelativeCodeAddressToOutput() 90 USE(instr); in AppendCodeRelativeCodeAddressToOutput() 111 void CustomDisassembler::Visit(Metadata* metadata, const Instruction* instr) { in Visit() argument 112 vixl::aarch64::Disassembler::Visit(metadata, instr); in Visit() 119 if (instr->GetRd() == 10) { in Visit() 123 ProcessOutput(instr); in Visit() 176 Instruction* instr; in TestCustomDisassembler() local 88 AppendCodeRelativeCodeAddressToOutput( const Instruction* instr, const void* addr) AppendCodeRelativeCodeAddressToOutput() argument [all...] |
/third_party/mesa3d/src/freedreno/computerator/examples/ |
H A D | test-opcodes.sh | 99 instr=$1 104 eval echo $instr 108 instr=$1 111 expand $instr "hr1.x" "hr0.x" "hr0.y" "hr0.z" 113 expand $instr "hr1.y" "r1.x" "r1.y" "r1.z" 116 expand $instr "r2.x" "r1.x" "r1.y" "r1.z" 118 expand $instr "r2.y" "hr0.x" "hr0.y" "hr0.z" 184 instr=$1 188 expand_test $instr 203 instr [all...] |
/third_party/mesa3d/src/gallium/drivers/lima/ir/gp/ |
H A D | codegen.c | 77 int diff = child->sched.instr->index - parent->sched.instr->index; in gpir_get_alu_input() 86 static void gpir_codegen_mul0_slot(gpir_codegen_instr *code, gpir_instr *instr) in gpir_codegen_mul0_slot() argument 88 gpir_node *node = instr->slots[GPIR_INSTR_SLOT_MUL0]; in gpir_codegen_mul0_slot() 146 static void gpir_codegen_mul1_slot(gpir_codegen_instr *code, gpir_instr *instr) in gpir_codegen_mul1_slot() argument 148 gpir_node *node = instr->slots[GPIR_INSTR_SLOT_MUL1]; in gpir_codegen_mul1_slot() 198 static void gpir_codegen_add0_slot(gpir_codegen_instr *code, gpir_instr *instr) in gpir_codegen_add0_slot() argument 200 gpir_node *node = instr->slots[GPIR_INSTR_SLOT_ADD0]; in gpir_codegen_add0_slot() 283 static void gpir_codegen_add1_slot(gpir_codegen_instr *code, gpir_instr *instr) in gpir_codegen_add1_slot() argument 285 gpir_node *node = instr in gpir_codegen_add1_slot() 368 gpir_codegen_complex_slot(gpir_codegen_instr *code, gpir_instr *instr) gpir_codegen_complex_slot() argument 413 gpir_codegen_pass_slot(gpir_codegen_instr *code, gpir_instr *instr) gpir_codegen_pass_slot() argument 458 gpir_codegen_reg0_slot(gpir_codegen_instr *code, gpir_instr *instr) gpir_codegen_reg0_slot() argument 467 gpir_codegen_reg1_slot(gpir_codegen_instr *code, gpir_instr *instr) gpir_codegen_reg1_slot() argument 475 gpir_codegen_mem_slot(gpir_codegen_instr *code, gpir_instr *instr) gpir_codegen_mem_slot() argument 502 gpir_codegen_store_slot(gpir_codegen_instr *code, gpir_instr *instr) gpir_codegen_store_slot() argument 548 gpir_codegen(gpir_codegen_instr *code, gpir_instr *instr) gpir_codegen() argument [all...] |
/third_party/mesa3d/src/gallium/drivers/etnaviv/ |
H A D | etnaviv_nir.c | 40 nir_foreach_instr_safe(instr, block) { in etna_lower_io() 41 if (instr->type == nir_instr_type_intrinsic) { in etna_lower_io() 42 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr); in etna_lower_io() 51 b.cursor = nir_after_instr(instr); in etna_lower_io() 72 b.cursor = nir_before_instr(instr); in etna_lower_io() 78 nir_instr_rewrite_src(instr, &intr->src[1], nir_src_for_ssa(ssa)); in etna_lower_io() 90 if (instr->type != nir_instr_type_tex) in etna_lower_io() 93 nir_tex_instr *tex = nir_instr_as_tex(instr); in etna_lower_io() 137 nir_ssa_dest_init(&vec->instr, &vec->dest.dest, 4, 32, NULL); in etna_lower_io() 140 nir_instr_rewrite_src(&tex->instr, coor in etna_lower_io() [all...] |
/third_party/mesa3d/src/imagination/rogue/nir/ |
H A D | rogue_nir_lower_io.c | 45 nir_instr_remove(&intr->instr); in lower_vulkan_resource_index() 56 nir_instr_remove(&intr->instr); in lower_load_vulkan_descriptor() 62 b->cursor = nir_before_instr(&intr->instr); in lower_load_ubo_to_scalar() 73 nir_ssa_dest_init(&chan_intr->instr, in lower_load_ubo_to_scalar() 95 nir_builder_instr_insert(b, &chan_intr->instr); in lower_load_ubo_to_scalar() 102 nir_instr_remove(&intr->instr); in lower_load_ubo_to_scalar() 106 lower_intrinsic(nir_builder *b, nir_intrinsic_instr *instr, void *layout) in lower_intrinsic() argument 108 switch (instr->intrinsic) { in lower_intrinsic() 110 lower_load_vulkan_descriptor(b, instr); in lower_intrinsic() 114 lower_vulkan_resource_index(b, instr, layou in lower_intrinsic() [all...] |
/third_party/node/deps/v8/src/codegen/arm64/ |
H A D | decoder-arm64.h | 92 #define DECLARE(A) virtual void Visit##A(Instruction* instr) = 0; 132 void VisitNEONShiftImmediate(const Instruction* instr); 134 #define DECLARE(A) void Visit##A(Instruction* instr); 151 virtual void Decode(Instruction* instr); 157 void DecodePCRelAddressing(Instruction* instr); 162 void DecodeAddSubImmediate(Instruction* instr); 167 void DecodeBranchSystemException(Instruction* instr); 172 void DecodeLoadStore(Instruction* instr); 177 void DecodeLogical(Instruction* instr); 182 void DecodeBitfieldExtract(Instruction* instr); [all...] |
/third_party/skia/third_party/externals/spirv-tools/source/fuzz/ |
H A D | transformation_duplicate_region_with_selection.cpp | 219 for (auto& instr : *block) { in IsApplicable() 220 if (!instr.HasResultId()) { in IsApplicable() 225 if (original_id_to_duplicate_id.count(instr.result_id()) == 0) { in IsApplicable() 230 auto duplicate_id = original_id_to_duplicate_id.at(instr.result_id()); in IsApplicable() 246 if (AvailableAfterRegion(instr, exit_block, ir_context)) { in IsApplicable() 247 if (!ValidOpPhiArgument(instr, ir_context)) { in IsApplicable() 253 &instr, in IsApplicable() 267 if (original_id_to_phi_id.count(instr.result_id()) == 0) { in IsApplicable() 273 auto phi_id = original_id_to_phi_id.at(instr.result_id()); in IsApplicable() 336 for (auto& instr in Apply() 687 AvailableAfterRegion( const opt::Instruction& instr, opt::BasicBlock* exit_block, opt::IRContext* ir_context) AvailableAfterRegion() argument 696 ValidOpPhiArgument( const opt::Instruction& instr, opt::IRContext* ir_context) ValidOpPhiArgument() argument [all...] |
/third_party/skia/third_party/externals/swiftshader/third_party/SPIRV-Tools/source/fuzz/ |
H A D | transformation_duplicate_region_with_selection.cpp | 219 for (auto& instr : *block) { in IsApplicable() 220 if (!instr.HasResultId()) { in IsApplicable() 225 if (original_id_to_duplicate_id.count(instr.result_id()) == 0) { in IsApplicable() 230 auto duplicate_id = original_id_to_duplicate_id.at(instr.result_id()); in IsApplicable() 246 if (AvailableAfterRegion(instr, exit_block, ir_context)) { in IsApplicable() 247 if (!ValidOpPhiArgument(instr, ir_context)) { in IsApplicable() 253 &instr, in IsApplicable() 267 if (original_id_to_phi_id.count(instr.result_id()) == 0) { in IsApplicable() 273 auto phi_id = original_id_to_phi_id.at(instr.result_id()); in IsApplicable() 336 for (auto& instr in Apply() 687 AvailableAfterRegion( const opt::Instruction& instr, opt::BasicBlock* exit_block, opt::IRContext* ir_context) AvailableAfterRegion() argument 696 ValidOpPhiArgument( const opt::Instruction& instr, opt::IRContext* ir_context) ValidOpPhiArgument() argument [all...] |
/third_party/spirv-tools/source/fuzz/ |
H A D | transformation_duplicate_region_with_selection.cpp | 219 for (auto& instr : *block) { in IsApplicable() 220 if (!instr.HasResultId()) { in IsApplicable() 225 if (original_id_to_duplicate_id.count(instr.result_id()) == 0) { in IsApplicable() 230 auto duplicate_id = original_id_to_duplicate_id.at(instr.result_id()); in IsApplicable() 246 if (AvailableAfterRegion(instr, exit_block, ir_context)) { in IsApplicable() 247 if (!ValidOpPhiArgument(instr, ir_context)) { in IsApplicable() 253 &instr, in IsApplicable() 267 if (original_id_to_phi_id.count(instr.result_id()) == 0) { in IsApplicable() 273 auto phi_id = original_id_to_phi_id.at(instr.result_id()); in IsApplicable() 336 for (auto& instr in Apply() 687 AvailableAfterRegion( const opt::Instruction& instr, opt::BasicBlock* exit_block, opt::IRContext* ir_context) AvailableAfterRegion() argument 696 ValidOpPhiArgument( const opt::Instruction& instr, opt::IRContext* ir_context) ValidOpPhiArgument() argument [all...] |