Lines Matching defs:dst

83 agx_cache_combine(agx_builder *b, agx_index dst,
94 _mesa_hash_table_u64_insert(b->shader->allocated_vec, agx_index_to_key(dst),
105 agx_emit_combine_to(agx_builder *b, agx_index dst,
108 agx_cache_combine(b, dst, s0, s1, s2, s3);
109 return agx_p_combine_to(b, dst, s0, s1, s2, s3);
407 agx_emit_load_ubo(agx_builder *b, agx_index dst, nir_intrinsic_instr *instr)
431 agx_device_load_to(b, dst, base, agx_src_index(offset),
435 agx_emit_cached_split(b, dst, instr->num_components);
455 agx_blend_const(agx_builder *b, agx_index dst, unsigned comp)
460 return agx_mov_to(b, dst, val);
485 agx_index dst = nir_intrinsic_infos[instr->intrinsic].has_dest ?
528 return agx_emit_load_ubo(b, dst, instr);
538 return agx_get_sr_to(b, dst, AGX_SR_BACKFACING);
541 return agx_mov_to(b, dst, agx_abs(agx_register(10, AGX_SIZE_32)));
544 return agx_mov_to(b, dst, agx_abs(agx_register(12, AGX_SIZE_32)));
546 case nir_intrinsic_load_blend_const_color_r_float: return agx_blend_const(b, dst, 0);
547 case nir_intrinsic_load_blend_const_color_g_float: return agx_blend_const(b, dst, 1);
548 case nir_intrinsic_load_blend_const_color_b_float: return agx_blend_const(b, dst, 2);
549 case nir_intrinsic_load_blend_const_color_a_float: return agx_blend_const(b, dst, 3);
562 return agx_emit_combine_to(b, dst, dests[0], dests[1], dests[2], dests[3]);
588 agx_index dst, agx_index s0, agx_index s1, agx_index s2)
596 case nir_op_feq: return agx_fcmpsel_to(b, dst, s0, s1, t, f, AGX_FCOND_EQ);
597 case nir_op_flt: return agx_fcmpsel_to(b, dst, s0, s1, t, f, AGX_FCOND_LT);
598 case nir_op_fge: return agx_fcmpsel_to(b, dst, s0, s1, t, f, AGX_FCOND_GE);
599 case nir_op_fneu: return agx_fcmpsel_to(b, dst, s0, s1, f, t, AGX_FCOND_EQ);
601 case nir_op_ieq: return agx_icmpsel_to(b, dst, s0, s1, t, f, AGX_ICOND_UEQ);
602 case nir_op_ine: return agx_icmpsel_to(b, dst, s0, s1, f, t, AGX_ICOND_UEQ);
603 case nir_op_ilt: return agx_icmpsel_to(b, dst, s0, s1, t, f, AGX_ICOND_SLT);
604 case nir_op_ige: return agx_icmpsel_to(b, dst, s0, s1, f, t, AGX_ICOND_SLT);
605 case nir_op_ult: return agx_icmpsel_to(b, dst, s0, s1, t, f, AGX_ICOND_ULT);
606 case nir_op_uge: return agx_icmpsel_to(b, dst, s0, s1, f, t, AGX_ICOND_ULT);
608 case nir_op_mov: return agx_mov_to(b, dst, s0);
609 case nir_op_iand: return agx_and_to(b, dst, s0, s1);
610 case nir_op_ior: return agx_or_to(b, dst, s0, s1);
611 case nir_op_ixor: return agx_xor_to(b, dst, s0, s1);
612 case nir_op_inot: return agx_xor_to(b, dst, s0, t);
614 case nir_op_f2b1: return agx_fcmpsel_to(b, dst, s0, f, f, t, AGX_FCOND_EQ);
615 case nir_op_i2b1: return agx_icmpsel_to(b, dst, s0, f, f, t, AGX_ICOND_UEQ);
616 case nir_op_b2b1: return agx_icmpsel_to(b, dst, s0, f, f, t, AGX_ICOND_UEQ);
619 return agx_icmpsel_to(b, dst, s0, f, s2, s1, AGX_ICOND_UEQ);
638 agx_index dst = agx_dest_index(&instr->dest.dest);
646 return agx_emit_alu_bool(b, instr->op, dst, s0, s1, s2);
649 case nir_op_ ## nop: return agx_ ## aop ## _to(b, dst, s0);
651 case nir_op_ ## nop: return agx_ ## aop ## _to(b, dst, s0, s1);
653 case nir_op_ ## nop: return agx_ ## aop ## _to(b, dst, s0, s1, s2);
687 case nir_op_fsqrt: return agx_fmul_to(b, dst, s0, agx_srsqrt(b, s0));
688 case nir_op_fsub: return agx_fadd_to(b, dst, s0, agx_neg(s1));
689 case nir_op_fabs: return agx_fmov_to(b, dst, agx_abs(s0));
690 case nir_op_fneg: return agx_fmov_to(b, dst, agx_neg(s0));
692 case nir_op_fmin: return agx_fcmpsel_to(b, dst, s0, s1, s0, s1, AGX_FCOND_LTN);
693 case nir_op_fmax: return agx_fcmpsel_to(b, dst, s0, s1, s0, s1, AGX_FCOND_GTN);
694 case nir_op_imin: return agx_icmpsel_to(b, dst, s0, s1, s0, s1, AGX_ICOND_SLT);
695 case nir_op_imax: return agx_icmpsel_to(b, dst, s0, s1, s0, s1, AGX_ICOND_SGT);
696 case nir_op_umin: return agx_icmpsel_to(b, dst, s0, s1, s0, s1, AGX_ICOND_ULT);
697 case nir_op_umax: return agx_icmpsel_to(b, dst, s0, s1, s0, s1, AGX_ICOND_UGT);
699 case nir_op_iadd: return agx_iadd_to(b, dst, s0, s1, 0);
700 case nir_op_isub: return agx_iadd_to(b, dst, s0, agx_neg(s1), 0);
701 case nir_op_ineg: return agx_iadd_to(b, dst, agx_zero(), agx_neg(s0), 0);
702 case nir_op_imul: return agx_imad_to(b, dst, s0, s1, agx_zero(), 0);
704 case nir_op_ishl: return agx_bfi_to(b, dst, agx_zero(), s0, s1, 0);
705 case nir_op_ushr: return agx_ushr_to(b, dst, s0, s1);
706 case nir_op_ishr: return agx_asr_to(b, dst, s0, s1);
709 return agx_icmpsel_to(b, dst, s0, agx_zero(), s2, s1, AGX_ICOND_UEQ);
713 return agx_icmpsel_to(b, dst, s0, agx_zero(), agx_zero(), agx_immediate(1), AGX_ICOND_UEQ);
725 return agx_fcmpsel_to(b, dst, s0, zero, zero, one, AGX_FCOND_EQ);
733 return agx_iadd_to(b, dst, s0, agx_zero(), 0);
741 return agx_iadd_to(b, dst, s0, agx_zero(), 0);
746 agx_instr *I = agx_iadd_to(b, dst, s0, s1, 0);
753 agx_instr *I = agx_iadd_to(b, dst, s0, agx_neg(s1), 0);
760 agx_instr *I = agx_iadd_to(b, dst, agx_abs(s0), agx_abs(s1), 0);
767 agx_instr *I = agx_iadd_to(b, dst, agx_abs(s0), agx_neg(agx_abs(s1)), 0);
774 agx_instr *I = agx_fadd_to(b, dst, s0, agx_negzero());
783 return agx_fmul_to(b, dst, sinc, fixup);
787 return agx_convert_to(b, dst,
791 return agx_convert_to(b, dst,
795 return agx_convert_to(b, dst,
799 return agx_convert_to(b, dst,
813 return agx_convert_to(b, dst, agx_immediate(mode), s0, AGX_ROUND_RTE);
827 return agx_convert_to(b, dst, agx_immediate(mode), s0, AGX_ROUND_RTE);
833 return agx_emit_combine_to(b, dst, s0, s1, s2, s3);
968 agx_index dst = agx_dest_index(&instr->dest);
969 agx_texture_sample_to(b, dst, coords, lod, texture, sampler, offset,
976 agx_emit_cached_split(b, dst, 4);