Lines Matching defs:vec1

33 hevc_bi_rnd_clip(__m128i in0, __m128i vec0, __m128i in1, __m128i vec1)
38 vec1 = __lsx_vsadd_h(in1, vec1);
39 out = __lsx_vssrarni_bu_h(vec1, vec0, 7);
479 __m128i vec0, vec1, vec2, vec3;
502 vec0, vec1);
505 DUP4_ARG2(__lsx_vdp2_h_bu_b, vec0, filt0, vec1, filt0, vec2, filt0,
508 vec0, vec1);
511 DUP4_ARG3(__lsx_vdp2add_h_bu_b, dst0, vec0, filt1, dst1, vec1, filt1,
514 vec0, vec1);
517 DUP4_ARG3(__lsx_vdp2add_h_bu_b, dst0, vec0, filt2, dst1, vec1, filt2,
520 vec0, vec1);
523 DUP4_ARG3(__lsx_vdp2add_h_bu_b, dst0, vec0, filt3, dst1, vec1, filt3,
543 __m128i vec0, vec1, vec2, vec3;
565 src1, mask0, src0, src0, mask1, vec0, vec1, vec2, vec3);
566 DUP2_ARG2(__lsx_vdp2_h_bu_b, vec0, filt0, vec1, filt0, dst0, dst1);
570 src0, mask2, src1, src0, mask6, vec0, vec1, vec2, vec3);
571 DUP4_ARG3(__lsx_vdp2add_h_bu_b, dst1, vec0, filt1, dst2, vec1, filt1,
574 mask7, src1, src1, mask3, vec0, vec1, vec2, vec3);
575 DUP4_ARG3(__lsx_vdp2add_h_bu_b, dst2, vec0, filt2, dst0, vec1, filt3,
880 __m128i vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
917 src0, mask2, src0, src0, mask3, vec0, vec1, vec2, vec3);
926 DUP4_ARG3(__lsx_vdp2add_h_bu_b, dst0, vec1, filt1, dst1, vec5, filt1,
934 src4, mask2, src4, src4, mask3, vec0, vec1, vec2, vec3);
941 DUP4_ARG3(__lsx_vdp2add_h_bu_b, dst4, vec1, filt1, dst5, vec5, filt1,
955 src7, mask2, src7, src7, mask3, vec0, vec1, vec2, vec3);
957 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst7, vec1, filt1, dst7, vec2,
1077 __m128i vec0, vec1, vec2, vec3;
1109 src2, mask0, src3, src2, mask2, vec0, vec1, vec2, vec3);
1110 DUP4_ARG2(__lsx_vdp2_h_bu_b, vec0, filt0, vec1, filt0, vec2, filt0,
1113 src2, mask1, src3, src2, mask3, vec0, vec1, vec2, vec3);
1114 DUP4_ARG3(__lsx_vdp2add_h_bu_b, dst0, vec0, filt1, dst1, vec1, filt1,
1118 src6, mask0, src7, src6, mask2, vec0, vec1, vec2, vec3);
1119 DUP4_ARG2(__lsx_vdp2_h_bu_b, vec0, filt0, vec1, filt0, vec2, filt0,
1122 src6, mask1, src7, src6, mask3, vec0, vec1, vec2, vec3);
1123 DUP4_ARG3(__lsx_vdp2add_h_bu_b, dst4, vec0, filt1, dst5, vec1, filt1,
1143 src5, mask0, src7, src7, mask0, vec0, vec1, vec2, vec3);
1144 DUP4_ARG2(__lsx_vdp2_h_bu_b, vec0, filt0, vec1, filt0, vec2, filt0,
1147 src5, mask1, src7, src7, mask1, vec0, vec1, vec2, vec3);
1148 DUP4_ARG3(__lsx_vdp2add_h_bu_b, dst0, vec0, filt1, dst1, vec1, filt1,
1172 __m128i vec0, vec1, vec2, vec3;
1189 src1, mask0, src2, src2, mask0, vec0, vec1, vec2, vec3);
1190 DUP4_ARG2(__lsx_vdp2_h_bu_b, vec0, filt0, vec1, filt0, vec2, filt0,
1193 src1, mask1, src2, src2, mask1, vec0, vec1, vec2, vec3);
1194 DUP4_ARG3(__lsx_vdp2add_h_bu_b, dst0, vec0, filt1, dst1, vec1, filt1,
1500 __m128i vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, mask1;
1524 DUP2_ARG3(__lsx_vshuf_b, src0, src0, mask0, src0, src0, mask1, vec0, vec1);
1530 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dsth0, vec1, filt1, dsth1, vec3, filt1,
1542 DUP2_ARG3(__lsx_vshuf_b, src3, src3, mask0, src3, src3, mask1, vec0, vec1);
1549 DUP4_ARG3(__lsx_vdp2add_h_bu_b, dsth3, vec1, filt1, dsth4, vec3, filt1, dsth5,
1557 DUP2_ARG3(__lsx_vshuf_b, src3, src3, mask0, src3, src3, mask1, vec0, vec1);
1564 DUP4_ARG3(__lsx_vdp2add_h_bu_b, dsth7, vec1, filt1, dsth8, vec3, filt1, dsth9,
1685 __m128i vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, vec8, vec9;
1709 DUP2_ARG3(__lsx_vshuf_b, src0, src0, mask0, src0, src0, mask1, vec0, vec1);
1718 DUP4_ARG3(__lsx_vdp2add_h_bu_b, dst0, vec1, filt1, dst1, vec3, filt1, dst2,
1760 __m128i vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
1796 vec0, vec1);
1804 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst0, vec1, filt1, dst1, vec3, filt1,
1812 vec0, vec1);
1822 DUP4_ARG3(__lsx_vdp2add_h_bu_b, dst3, vec1, filt1, dst4, vec3, filt1,
1882 __m128i vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, vec8, vec9;
1920 DUP2_ARG3(__lsx_vshuf_b, src0, src0, mask0, src0, src0, mask1, vec0, vec1);
1935 DUP4_ARG3(__lsx_vdp2add_h_bu_b, dst0, vec1, filt1, dst1, vec3, filt1, dst2,
2019 __m128i vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
2049 vec0, vec1);
2057 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst0, vec1, filt1, dst1, vec3, filt1,
2077 src4, mask0, src4, src4, mask1, vec0, vec1, vec2, vec3);
2083 DUP4_ARG3(__lsx_vdp2add_h_bu_b, dst3, vec1, filt1, dst4, vec3,