Lines Matching defs:instr
84 nir_tex_instr *instr,
97 switch (instr->src[src_idx].src_type) {
100 s = ntq_get_src(c, instr->src[src_idx].src, 0);
108 ntq_get_src(c, instr->src[src_idx].src, 1);
115 ntq_get_src(c, instr->src[src_idx].src, 2);
120 if (instr->is_array) {
122 ntq_get_src(c, instr->src[src_idx].src,
123 instr->coord_components - 1);
130 struct qreg src = ntq_get_src(c, instr->src[src_idx].src, 0);
136 struct qreg src = ntq_get_src(c, instr->src[src_idx].src, 0);
145 if (instr->op != nir_texop_txf &&
146 instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE) {
154 struct qreg src = ntq_get_src(c, instr->src[src_idx].src, 0);
160 bool is_const_offset = nir_src_is_const(instr->src[src_idx].src);
164 nir_src_comp_as_int(instr->src[src_idx].src, 0);
167 nir_src_comp_as_int(instr->src[src_idx].src, 1);
170 nir_src_comp_as_int(instr->src[src_idx].src, 2);
174 ntq_get_src(c, instr->src[src_idx].src, 0);
176 ntq_get_src(c, instr->src[src_idx].src, 1);
201 nir_tex_instr *instr,
206 unsigned non_array_components = instr->op != nir_texop_lod ?
207 instr->coord_components - instr->is_array :
208 instr->coord_components;
210 for (unsigned i = 0; i < instr->num_srcs; i++) {
211 handle_tex_src(c, instr, i, non_array_components,
217 get_required_tex_tmu_writes(struct v3d_compile *c, nir_tex_instr *instr)
220 vir_tex_handle_srcs(c, instr, NULL, NULL, &tmu_writes);
225 v3d40_vir_emit_tex(struct v3d_compile *c, nir_tex_instr *instr)
227 assert(instr->op != nir_texop_lod || c->devinfo->ver >= 42);
229 unsigned texture_idx = instr->texture_index;
230 unsigned sampler_idx = instr->sampler_index;
239 instr->dest.is_ssa ?
240 nir_ssa_def_components_read(&instr->dest.ssa) :
241 (1 << instr->dest.reg.reg->num_components) - 1;
246 .gather_mode = instr->op == nir_texop_tg4,
247 .gather_component = instr->component,
248 .coefficient_mode = instr->op == nir_texop_txd,
249 .disable_autolod = instr->op == nir_texop_tg4
252 const unsigned tmu_writes = get_required_tex_tmu_writes(c, instr);
270 vir_tex_handle_srcs(c, instr, &p2_unpacked, &s, NULL);
286 if (instr->op == nir_texop_lod)
303 !instr->is_shadow;
307 (instr->op == nir_texop_lod ||
312 bool non_default_p1_config = nir_tex_instr_need_sampler(instr) ||
319 .unnormalized_coordinates = (instr->sampler_dim ==
337 if (nir_tex_instr_need_sampler(instr)) {
371 if (instr->op == nir_texop_txf) {
372 assert(instr->sampler_dim != GLSL_SAMPLER_DIM_CUBE);
374 } else if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE) {
376 } else if (instr->op == nir_texop_txl) {
382 ntq_add_pending_tmu_flush(c, &instr->dest,
387 v3d40_image_load_store_tmu_op(nir_intrinsic_instr *instr)
389 switch (instr->intrinsic) {
394 return v3d_get_op_for_atomic_add(instr, 3);
432 nir_intrinsic_instr *instr,
440 switch (nir_intrinsic_image_dim(instr)) {
449 struct qreg src = ntq_get_src(c, instr->src[1], 1);
454 struct qreg src_1_1 = ntq_get_src(c, instr->src[1], 1);
455 struct qreg src_1_2 = ntq_get_src(c, instr->src[1], 2);
467 if (nir_intrinsic_image_dim(instr) == GLSL_SAMPLER_DIM_CUBE ||
468 nir_intrinsic_image_array(instr)) {
469 struct qreg src = ntq_get_src(c, instr->src[1], is_1d ? 1 : 2);
474 if (instr->intrinsic != nir_intrinsic_image_load &&
476 for (int i = 0; i < nir_intrinsic_src_components(instr, 3); i++) {
477 struct qreg src_3_i = ntq_get_src(c, instr->src[3], i);
483 if (instr->intrinsic == nir_intrinsic_image_atomic_comp_swap) {
484 struct qreg src_4_0 = ntq_get_src(c, instr->src[4], 0);
490 struct qreg src_1_0 = ntq_get_src(c, instr->src[1], 0);
492 instr->intrinsic != nir_intrinsic_image_load) {
500 instr->intrinsic != nir_intrinsic_image_load) {
509 nir_intrinsic_instr *instr,
513 vir_image_emit_register_writes(c, instr, atomic_add_replaced,
520 nir_intrinsic_instr *instr)
522 unsigned format = nir_intrinsic_format(instr);
523 unsigned unit = nir_src_as_uint(instr->src[0]);
538 uint32_t instr_return_channels = nir_intrinsic_dest_components(instr);
545 p2_unpacked.op = v3d40_image_load_store_tmu_op(instr);
552 (instr->intrinsic == nir_intrinsic_image_atomic_add &&
577 if (instr->intrinsic != nir_intrinsic_image_load)
582 get_required_image_tmu_writes(c, instr, atomic_add_replaced);
602 vir_image_emit_register_writes(c, instr, atomic_add_replaced, NULL);
604 ntq_add_pending_tmu_flush(c, &instr->dest,