Lines Matching defs:sum0_r

108     __m128i sum0_r, sum1_r, sum2_r, sum3_r;                              \
122 LSX_BUTTERFLY_4_W(temp0_r, temp0_l, temp1_l, temp1_r, sum0_r, sum0_l,\
126 sum3_r = sum0_r; \
134 sum0_r = __lsx_vadd_w(sum0_r, temp2_r); \
139 in0 = __lsx_vssrarni_h_w(sum0_l, sum0_r, shift); \
161 LSX_BUTTERFLY_4_W(temp0_r, temp0_l, temp1_l, temp1_r, sum0_r, sum0_l,\
165 sum3_r = sum0_r; \
173 sum0_r = __lsx_vadd_w(sum0_r, temp2_r); \
178 in1 = __lsx_vssrarni_h_w(sum0_l, sum0_r, shift); \
204 __m128i sum0_r, sum1_r, sum2_r, sum3_r, sum0_l, sum1_l, sum2_l; \
216 src4_r, filter2, src4_l, filter2, sum0_r, sum0_l, \
220 DUP4_ARG3(__lsx_vdp2add_w_h, sum0_r, src1_r, filter1, sum0_l, \
222 src5_l, filter3, sum0_r, sum0_l, sum2_r, sum2_l); \
226 sum1_r = sum0_r; \
239 sum0_r = __lsx_vadd_w(sum0_r, temp0_r); \
252 sum0_r = __lsx_vadd_w(sum0_r, temp0_r); \
257 LSX_BUTTERFLY_4_W(sum0_r, sum0_l, sum2_l, sum2_r, res0_r, res0_l, \
277 #define HEVC_EVEN16_CALC(input, sum0_r, sum0_l, load_idx, store_idx) \
281 tmp1_r = sum0_r; \
283 sum0_r = __lsx_vadd_w(sum0_r, tmp0_r); \
285 __lsx_vst(sum0_r, (input + load_idx * 8), 0); \
548 __m128i sum0_r, sum0_l, sum1_r, sum1_l, tmp0_r, tmp0_l, tmp1_r, tmp1_l;
569 sum0_r = __lsx_vdp2_w_h(src0_r, filter0);
571 sum0_r = __lsx_vdp2add_w_h(sum0_r, src1_r, filter1);
573 __lsx_vst(sum0_r, tmp_buf_ptr, 0);
578 sum0_r = __lsx_vdp2_w_h(src0_r, filter0);
580 sum0_r = __lsx_vdp2add_w_h(sum0_r, src1_r, filter1);
582 __lsx_vst(sum0_r, tmp_buf_ptr, 32);
587 sum0_r = __lsx_vdp2_w_h(src0_r, filter0);
589 sum0_r = __lsx_vdp2add_w_h(sum0_r, src1_r, filter1);
591 __lsx_vst(sum0_r, tmp_buf_ptr, 64);
596 sum0_r = __lsx_vdp2_w_h(src0_r, filter0);
598 sum0_r = __lsx_vdp2add_w_h(sum0_r, src1_r, filter1);
600 __lsx_vst(sum0_r, tmp_buf_ptr, 96);
608 src3_r, filter1, src3_l, filter1, sum0_r, sum0_l, tmp1_r, tmp1_l);
609 sum1_r = __lsx_vsub_w(sum0_r, tmp1_r);
611 sum0_r = __lsx_vadd_w(sum0_r, tmp1_r);
614 HEVC_EVEN16_CALC(tmp_buf_ptr, sum0_r, sum0_l, 0, 7);
621 src3_r, filter1, src3_l, filter1, sum0_r, sum0_l, tmp1_r, tmp1_l);
622 sum1_r = __lsx_vsub_w(sum0_r, tmp1_r);
624 sum0_r = __lsx_vadd_w(sum0_r, tmp1_r);
627 HEVC_EVEN16_CALC(tmp_buf_ptr, sum0_r, sum0_l, 1, 6);
652 sum0_r = __lsx_vdp2_w_h(src0_r, filter0);
654 sum0_r = __lsx_vdp2add_w_h(sum0_r, src1_r, filter1);
656 sum0_r = __lsx_vdp2add_w_h(sum0_r, src2_r, filter2);
658 sum0_r = __lsx_vdp2add_w_h(sum0_r, src3_r, filter3);
665 tmp0_r = __lsx_vadd_w(tmp0_r, sum0_r);
667 tmp1_r = __lsx_vsub_w(tmp1_r, sum0_r);
713 sum0_r = __lsx_vdp2_w_h(src0_r, filter0);
715 sum0_r = __lsx_vdp2add_w_h(sum0_r, src1_r, filter1);
717 sum0_r = __lsx_vdp2add_w_h(sum0_r, src2_r, filter2);
719 sum0_r = __lsx_vdp2add_w_h(sum0_r, src3_r, filter3);
721 tmp1_r = sum0_r;
728 sum0_r = __lsx_vdp2_w_h(src4_r, filter0);
730 sum0_r = __lsx_vdp2add_w_h(sum0_r, src5_r, filter1);
732 sum0_r = __lsx_vdp2add_w_h(sum0_r, src6_r, filter2);
734 sum0_r = __lsx_vdp2add_w_h(sum0_r, src7_r, filter3);
736 sum0_r = __lsx_vadd_w(sum0_r, tmp1_r);
743 tmp0_r = __lsx_vadd_w(tmp0_r, sum0_r);
750 tmp1_r = __lsx_vsub_w(tmp1_r, sum0_r);