Lines Matching refs:loc0
375 __m128i loc0, loc1, loc2, loc3;
410 LSX_BUTTERFLY_4_H(reg2, reg14, reg6, reg10, loc0, loc1, reg14, reg2);
418 reg12 = __lsx_vsub_h(reg14, loc0);
419 reg14 = __lsx_vadd_h(reg14, loc0);
435 VP9_DOTP_CONST_PAIR(reg13, reg3, cospi_6_64, cospi_26_64, loc0, loc1);
436 LSX_BUTTERFLY_4_H(loc0, loc1, reg11, reg5, reg13, reg3, reg11, reg5);
445 loc0 = __lsx_vadd_h(reg0, loc1);
447 tmp6 = loc0;
455 loc0 = __lsx_vadd_h(reg9, reg5);
457 reg2 = __lsx_vadd_h(reg6, loc0);
458 reg1 = __lsx_vsub_h(reg6, loc0);
460 loc0 = __lsx_vadd_h(reg7, reg11);
462 loc1 = __lsx_vadd_h(reg4, loc0);
463 loc2 = __lsx_vsub_h(reg4, loc0);
467 LSX_BUTTERFLY_4_H(reg8, reg10, reg11, reg5, loc0, reg4, reg9, loc1);
469 reg10 = loc0;
500 __m128i loc0, loc1, loc2, loc3;
535 LSX_BUTTERFLY_4_H(reg2, reg14, reg6, reg10, loc0, loc1, reg14, reg2);
543 reg12 = __lsx_vsub_h(reg14, loc0);
544 reg14 = __lsx_vadd_h(reg14, loc0);
560 VP9_DOTP_CONST_PAIR(reg13, reg3, cospi_6_64, cospi_26_64, loc0, loc1);
561 LSX_BUTTERFLY_4_H(loc0, loc1, reg11, reg5, reg13, reg3, reg11, reg5);
570 loc0 = __lsx_vadd_h(reg0, loc1);
572 tmp6 = loc0;
580 loc0 = __lsx_vadd_h(reg9, reg5);
582 reg2 = __lsx_vadd_h(reg6, loc0);
583 reg1 = __lsx_vsub_h(reg6, loc0);
585 loc0 = __lsx_vadd_h(reg7, reg11);
587 loc1 = __lsx_vadd_h(reg4, loc0);
588 loc2 = __lsx_vsub_h(reg4, loc0);
593 LSX_BUTTERFLY_4_H(reg8, reg10, reg11, reg5, loc0, reg4, reg9, loc1);
595 reg10 = loc0;
741 __m128i vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3;
749 loc0 = __lsx_vld(tmp_eve_buf, 0);
754 DUP4_ARG2(__lsx_vadd_h,loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0,
759 __lsx_vst(SUB(loc0, vec3), tmp_buf, 31 * 16);
769 loc0 = __lsx_vld(tmp_eve_buf, 2 * 16);
774 DUP4_ARG2(__lsx_vadd_h, loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0,
777 __lsx_vst(SUB(loc0, vec3), tmp_buf, 29 * 16);
787 loc0 = __lsx_vld(tmp_eve_buf, 1 * 16);
792 DUP4_ARG2(__lsx_vadd_h, loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0,
795 __lsx_vst(SUB(loc0, vec3), tmp_buf, 30 * 16);
805 loc0 = __lsx_vld(tmp_eve_buf, 3 * 16);
810 DUP4_ARG2(__lsx_vadd_h, loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0,
813 __lsx_vst(SUB(loc0, vec3), tmp_buf, 28 * 16);
882 __m128i vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3;
910 loc0 = vec1;
915 LSX_BUTTERFLY_4_H(vec0, vec1, loc1, loc0, stp3, stp0, stp7, stp4);
969 LSX_BUTTERFLY_4_H(stp0, stp1, reg7, reg5, loc1, loc3, loc2, loc0);
973 __lsx_vst(loc0, tmp_eve_buf, 14 * 16 + 16);
974 LSX_BUTTERFLY_4_H(stp2, stp3, reg4, reg1, loc1, loc3, loc2, loc0);
978 __lsx_vst(loc0, tmp_eve_buf, 12 * 16 + 16);
981 LSX_BUTTERFLY_4_H(stp4, stp5, reg6, reg3, loc1, loc3, loc2, loc0);
985 __lsx_vst(loc0, tmp_eve_buf, 10 * 16 + 16);
987 LSX_BUTTERFLY_4_H(stp6, stp7, reg2, reg0, loc1, loc3, loc2, loc0);
991 __lsx_vst(loc0, tmp_eve_buf, 8 * 16 + 16);
997 __m128i vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3;
1082 VP9_DOTP_CONST_PAIR(vec1, vec0, cospi_12_64, cospi_20_64, loc0, loc1);
1084 LSX_BUTTERFLY_4_H(loc2, loc3, loc1, loc0, vec0, vec1, vec3, vec2);
1111 loc0, loc1, loc2, loc3);
1112 __lsx_vst(loc0, tmp_odd_buf, 0);
1117 VP9_DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc0, loc1);
1121 __lsx_vst(loc0, tmp_odd_buf, 8 * 16);
1135 loc0, loc1, loc2, loc3);
1136 __lsx_vst(loc0, tmp_odd_buf, 4 * 16);
1142 VP9_DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc0, loc1);
1146 __lsx_vst(loc0, tmp_odd_buf, 12 * 16);
1157 __m128i vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3;
1165 loc0 = __lsx_vld(tmp_eve_buf, 0);
1170 DUP4_ARG2(__lsx_vadd_h, loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0,
1175 DUP4_ARG2(__lsx_vsub_h, loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0,
1186 loc0 = __lsx_vld(tmp_eve_buf, 2 * 16);
1191 DUP4_ARG2(__lsx_vadd_h, loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0,
1197 DUP4_ARG2(__lsx_vsub_h, loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0,
1208 loc0 = __lsx_vld(tmp_eve_buf, 1 * 16);
1213 DUP4_ARG2(__lsx_vadd_h, loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0,
1218 DUP4_ARG2(__lsx_vsub_h, loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0,
1229 loc0 = __lsx_vld(tmp_eve_buf, 3 * 16);
1234 DUP4_ARG2(__lsx_vadd_h, loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0,
1239 DUP4_ARG2(__lsx_vsub_h, loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0,