Lines Matching refs:src

110                         .src = { ~0, ~0, ~0, ~0 }, \
119 i.src[0] = ssa; \
198 .src = { ~0, ~0, ~0, ~0 },
236 assert(intr->src[0].is_ssa);
237 nir_ssa_def *addr = intr->src[0].ssa;
254 shared_load->src[0] = nir_src_for_ssa(addr);
290 if (nir_src_bit_size(alu->src[0].src) == 64)
316 int src_bit_size = nir_src_bit_size(alu->src[0].src);
522 nir_is_non_scalar_swizzle(nir_alu_src *src, unsigned nr_components)
524 unsigned comp = src->swizzle[0];
527 if (src->swizzle[c] != comp)
581 nir_accepts_inot(nir_op op, unsigned src)
590 return (src == 0);
639 nir_alu_src src = instr->src[i];
642 if (pan_has_source_mod(&src, nir_op_fneg))
645 if (pan_has_source_mod(&src, nir_op_fabs))
649 if (nir_accepts_inot(instr->op, i) && pan_has_source_mod(&src, nir_op_inot))
653 if (pan_has_source_mod(&src, nir_op_fround_even))
656 if (pan_has_source_mod(&src, nir_op_ftrunc))
659 if (pan_has_source_mod(&src, nir_op_ffloor))
662 if (pan_has_source_mod(&src, nir_op_fceil))
666 unsigned bits = nir_src_bit_size(src.src);
668 ins->src[to] = nir_src_index(NULL, &src.src);
672 ins->swizzle[to][c] = src.swizzle[
705 nir_alu_src s = instr->src[i];
713 nir_alu_src s = instr->src[i];
761 ASSERTED unsigned src_bitsize = nir_src_bit_size(instr->src[0].src);
936 bool mixed = nir_is_non_scalar_swizzle(&instr->src[0], nr_components);
960 if (op == midgard_alu_op_imov && nir_src_is_const(instr->src[0].src)
961 && nir_src_bit_size(instr->src[0].src) == 32
962 && nir_is_same_comp_swizzle(instr->src[0].swizzle,
963 nir_src_num_components(instr->src[0].src))) {
1003 for (unsigned i = nr_inputs; i < ARRAY_SIZE(ins.src); ++i)
1004 ins.src[i] = ~0;
1007 ins.src[0] = ~0;
1069 ins.src[1] = SSA_FIXED_REGISTER(REGISTER_CONSTANT);
1081 ins.src[1] = SSA_FIXED_REGISTER(REGISTER_CONSTANT);
1091 ins.src[1] = SSA_FIXED_REGISTER(REGISTER_CONSTANT);
1180 nir_src_bit_size(intr->src[0]) : 32;
1230 ins.src[2] = nir_src_index(ctx, indirect_offset);
1310 unsigned bitsize = nir_src_bit_size(intr->src[0]) *
1311 nir_src_num_components(intr->src[0]);
1363 unsigned val = nir_src_index(ctx, &instr->src[val_src]);
1364 unsigned bitsize = nir_src_bit_size(instr->src[val_src]);
1371 .src = { ~0, ~0, ~0, val },
1380 unsigned xchg_val = nir_src_index(ctx, &instr->src[xchg_val_src]);
1383 ins.src[2] = val;
1385 ins.src[3] = xchg_val;
1395 ins.src[1] = is_image ? image_direct_address :
1403 ins.src[2] = image_direct_address;
1448 ins.src[2] = nir_src_index(ctx, indirect_offset);
1495 unsigned coord_reg = nir_src_index(ctx, &instr->src[1]);
1498 nir_src *index = &instr->src[0];
1508 unsigned val = nir_src_index(ctx, &instr->src[3]);
1514 ins.src_types[0] = base_type | nir_src_bit_size(instr->src[3]);
1527 ins.src[1] = coord_reg;
1535 ins.src[2] = nir_src_index(ctx, index);
1616 emit_fragment_store(compiler_context *ctx, unsigned src, unsigned src_z, unsigned src_s,
1626 emit_explicit_constant(ctx, src, src);
1636 ins.src[0] = src;
1649 ins.src[2] = src_z;
1655 ins.src[3] = src_s;
1724 .src = { ~0, ~0, ~0, ~0 },
1732 mir_get_branch_cond(nir_src *src, bool *invert)
1737 .src = *src
1741 return nir_src_index(NULL, &alu.src);
1780 discard.src[0] = mir_get_branch_cond(&instr->src[0],
1850 nir_src index = instr->src[0];
1902 if (nir_src_is_const(instr->src[0])) {
1903 unsigned sample = nir_src_as_uint(instr->src[0]);
1909 ld.src[1] = nir_src_index(ctx, &instr->src[0]);
1956 assert(nir_src_is_const(instr->src[1]) && "no indirect outputs");
1958 reg = nir_src_index(ctx, &instr->src[0]);
1971 reg_z = nir_src_index(ctx, &instr->src[2]);
1973 reg_s = nir_src_index(ctx, &instr->src[3]);
1975 reg_2 = nir_src_index(ctx, &instr->src[4]);
1995 if (instr->src[4].is_ssa) {
2023 offset = nir_intrinsic_base(instr) + nir_src_as_uint(instr->src[1]);
2026 unsigned nr_comp = nir_src_num_components(instr->src[0]);
2075 reg = nir_src_index(ctx, &instr->src[0]);
2085 reg = nir_src_index(ctx, &instr->src[0]);
2096 emit_global(ctx, &instr->instr, false, reg, &instr->src[1], seg);
2285 unsigned coords = nir_src_index(ctx, &instr->src[coord_idx].src);
2290 nir_src_bit_size(instr->src[coord_idx].src);
2325 ins->src[1] = make_compiler_temp_reg(ctx);
2330 midgard_instruction ld = m_ld_cubemap_coords(ins->src[1], 0);
2331 ld.src[1] = coords;
2356 nir_src_index(ctx, &instr->src[ms_or_comparator_idx].src);
2360 if (ins->src[1] == ~0)
2361 ins->src[1] = make_compiler_temp_reg(ctx);
2363 midgard_instruction mov = v_mov(sample_or_ref, ins->src[1]);
2380 if (ins->src[1] == ~0)
2381 ins->src[1] = make_compiler_temp_reg(ctx);
2385 v_mov(SSA_FIXED_REGISTER(REGISTER_CONSTANT), ins->src[1]);
2395 if (ins->src[1] == ~0) {
2396 /* No temporary reg created, use the src coords directly */
2397 ins->src[1] = coords;
2400 midgard_instruction mov = v_mov(coords, ins->src[1]);
2437 .src = { ~0, ~0, ~0, ~0 },
2455 int index = nir_src_index(ctx, &instr->src[i].src);
2456 unsigned sz = nir_src_bit_size(instr->src[i].src);
2459 switch (instr->src[i].src_type) {
2469 if (!is_txf && pan_attach_constant_bias(ctx, instr->src[i].src, &ins.texture))
2473 ins.src[2] = index;
2486 ins.src[3] = index;
2502 fprintf(stderr, "Unknown texture source type: %d\n", instr->src[i].src_type);
2594 void *entry = _mesa_hash_table_u64_search(ctx->ssa_constants, alu->src[idx] + 1); \
2597 attach_constants(ctx, alu, entry, alu->src[idx] + 1); \
2598 alu->src[idx] = SSA_FIXED_REGISTER(REGISTER_CONSTANT); \
2623 void *entry = _mesa_hash_table_u64_search(ctx->ssa_constants, alu->src[1] + 1);
2628 attach_constants(ctx, &ins, entry, alu->src[1] + 1);
2631 alu->src[1] = scratch;
2645 if (ins->src[i] == ~0) continue;
2729 if (ins->src[0] == SSA_FIXED_REGISTER(REGISTER_CONSTANT) &&
2734 if (ins->src[1] == SSA_FIXED_REGISTER(REGISTER_CONSTANT)) {
2801 ins->src[1] = ~0;
2920 then_branch->src[0] = mir_get_branch_cond(&nif->condition, &inv);