Lines Matching defs:sum0_l
109 __m128i sum0_l, sum1_l, sum2_l, sum3_l; \
122 LSX_BUTTERFLY_4_W(temp0_r, temp0_l, temp1_l, temp1_r, sum0_r, sum0_l,\
127 sum3_l = sum0_l; \
135 sum0_l = __lsx_vadd_w(sum0_l, temp2_l); \
139 in0 = __lsx_vssrarni_h_w(sum0_l, sum0_r, shift); \
161 LSX_BUTTERFLY_4_W(temp0_r, temp0_l, temp1_l, temp1_r, sum0_r, sum0_l,\
166 sum3_l = sum0_l; \
174 sum0_l = __lsx_vadd_w(sum0_l, temp2_l); \
178 in1 = __lsx_vssrarni_h_w(sum0_l, sum0_r, shift); \
204 __m128i sum0_r, sum1_r, sum2_r, sum3_r, sum0_l, sum1_l, sum2_l; \
216 src4_r, filter2, src4_l, filter2, sum0_r, sum0_l, \
220 DUP4_ARG3(__lsx_vdp2add_w_h, sum0_r, src1_r, filter1, sum0_l, \
222 src5_l, filter3, sum0_r, sum0_l, sum2_r, sum2_l); \
227 sum1_l = sum0_l; \
240 sum0_l = __lsx_vadd_w(sum0_l, temp0_l); \
253 sum0_l = __lsx_vadd_w(sum0_l, temp0_l); \
257 LSX_BUTTERFLY_4_W(sum0_r, sum0_l, sum2_l, sum2_r, res0_r, res0_l, \
277 #define HEVC_EVEN16_CALC(input, sum0_r, sum0_l, load_idx, store_idx) \
282 tmp1_l = sum0_l; \
284 sum0_l = __lsx_vadd_w(sum0_l, tmp0_l); \
286 __lsx_vst(sum0_l, (input + load_idx * 8), 16); \
548 __m128i sum0_r, sum0_l, sum1_r, sum1_l, tmp0_r, tmp0_l, tmp1_r, tmp1_l;
570 sum0_l = __lsx_vdp2_w_h(src0_l, filter0);
572 sum0_l = __lsx_vdp2add_w_h(sum0_l, src1_l, filter1);
574 __lsx_vst(sum0_l, tmp_buf_ptr, 16);
579 sum0_l = __lsx_vdp2_w_h(src0_l, filter0);
581 sum0_l = __lsx_vdp2add_w_h(sum0_l, src1_l, filter1);
583 __lsx_vst(sum0_l, tmp_buf_ptr, 48);
588 sum0_l = __lsx_vdp2_w_h(src0_l, filter0);
590 sum0_l = __lsx_vdp2add_w_h(sum0_l, src1_l, filter1);
592 __lsx_vst(sum0_l, tmp_buf_ptr, 80);
597 sum0_l = __lsx_vdp2_w_h(src0_l, filter0);
599 sum0_l = __lsx_vdp2add_w_h(sum0_l, src1_l, filter1);
601 __lsx_vst(sum0_l, tmp_buf_ptr, 112);
608 src3_r, filter1, src3_l, filter1, sum0_r, sum0_l, tmp1_r, tmp1_l);
610 sum1_l = __lsx_vsub_w(sum0_l, tmp1_l);
612 sum0_l = __lsx_vadd_w(sum0_l, tmp1_l);
614 HEVC_EVEN16_CALC(tmp_buf_ptr, sum0_r, sum0_l, 0, 7);
621 src3_r, filter1, src3_l, filter1, sum0_r, sum0_l, tmp1_r, tmp1_l);
623 sum1_l = __lsx_vsub_w(sum0_l, tmp1_l);
625 sum0_l = __lsx_vadd_w(sum0_l, tmp1_l);
627 HEVC_EVEN16_CALC(tmp_buf_ptr, sum0_r, sum0_l, 1, 6);
653 sum0_l = __lsx_vdp2_w_h(src0_l, filter0);
655 sum0_l = __lsx_vdp2add_w_h(sum0_l, src1_l, filter1);
657 sum0_l = __lsx_vdp2add_w_h(sum0_l, src2_l, filter2);
659 sum0_l = __lsx_vdp2add_w_h(sum0_l, src3_l, filter3);
666 tmp0_l = __lsx_vadd_w(tmp0_l, sum0_l);
668 tmp1_l = __lsx_vsub_w(tmp1_l, sum0_l);
714 sum0_l = __lsx_vdp2_w_h(src0_l, filter0);
716 sum0_l = __lsx_vdp2add_w_h(sum0_l, src1_l, filter1);
718 sum0_l = __lsx_vdp2add_w_h(sum0_l, src2_l, filter2);
720 sum0_l = __lsx_vdp2add_w_h(sum0_l, src3_l, filter3);
722 tmp1_l = sum0_l;
729 sum0_l = __lsx_vdp2_w_h(src4_l, filter0);
731 sum0_l = __lsx_vdp2add_w_h(sum0_l, src5_l, filter1);
733 sum0_l = __lsx_vdp2add_w_h(sum0_l, src6_l, filter2);
735 sum0_l = __lsx_vdp2add_w_h(sum0_l, src7_l, filter3);
737 sum0_l = __lsx_vadd_w(sum0_l, tmp1_l);
744 tmp0_l = __lsx_vadd_w(tmp0_l, sum0_l);
751 tmp1_l = __lsx_vsub_w(tmp1_l, sum0_l);