Lines Matching refs:q2

238     __m128i p3, p2, p1, p0, q3, q2, q1, q0, p1_out, p0_out, q0_out, q1_out;
243 DUP2_ARG2(__lsx_vldx, dst, stride, dst, stride2, q1, q2);
250 LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh,
272 __m128i p3, p2, p1, p0, q3, q2, q1, q0;
277 DUP2_ARG2(__lsx_vldx, dst, stride, dst, stride2, q1, q2);
292 LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit0, b_limit0, thresh0,
311 __m128i p3, p2, p1, p0, q3, q2, q1, q0;
321 DUP2_ARG2(__lsx_vldx, dst, stride, dst, stride2, q1, q2);
328 LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh,
330 VP9_FLAT4(p3, p2, p0, q0, q2, q3, flat);
345 DUP4_ARG2(__lsx_vilvl_b, zero, q0, zero, q1, zero, q2, zero, q3,
363 q2_out = __lsx_vbitsel_v(q2, q2_filter8, flat);
382 __m128i p3, p2, p1, p0, q3, q2, q1, q0;
397 DUP2_ARG2(__lsx_vldx, dst, stride, dst, stride2, q1, q2);
413 LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh,
415 VP9_FLAT4(p3, p2, p0, q0, q2, q3, flat);
428 DUP4_ARG2(__lsx_vilvl_b, zero, q0, zero, q1, zero, q2, zero, q3,
435 DUP4_ARG2(__lsx_vilvh_b, zero, q0, zero, q1, zero, q2, zero, q3,
453 q2_out = __lsx_vbitsel_v(q2, q2_filt8_l, flat);
473 __m128i p3, p2, p1, p0, q3, q2, q1, q0;
485 DUP2_ARG2(__lsx_vldx, dst, stride, dst, stride2, q1, q2);
501 LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh,
503 VP9_FLAT4(p3, p2, p0, q0, q2, q3, flat);
518 DUP4_ARG2(__lsx_vilvl_b, zero, q0, zero, q1, zero, q2, zero, q3,
536 q2_out = __lsx_vbitsel_v(q2, q2_filt8_l, flat);
555 __m128i p3, p2, p1, p0, q3, q2, q1, q0;
567 DUP2_ARG2(__lsx_vldx, dst, stride, dst, stride2, q1, q2);
583 LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh,
585 VP9_FLAT4(p3, p2, p0, q0, q2, q3, flat);
600 DUP4_ARG2(__lsx_vilvh_b, zero, q0, zero, q1, zero, q2, zero, q3,
618 q2_out = __lsx_vbitsel_v(q2, q2_filt8_h, flat);
638 __m128i p3, p2, p1, p0, q3, q2, q1, q0;
653 DUP2_ARG2(__lsx_vldx, dst, stride, dst, stride2, q1, q2);
661 LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh,
663 VP9_FLAT4(p3, p2, p0, q0, q2, q3, flat);
677 DUP4_ARG2(__lsx_vilvl_b, zero, q0, zero, q1, zero, q2, zero, q3,
684 DUP4_ARG2(__lsx_vilvh_b, zero, q0, zero, q1, zero, q2, zero, q3,
702 q2_out = __lsx_vbitsel_v(q2, q2_filt8_l, flat);
724 __m128i p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6, q7;
747 DUP2_ARG2(__lsx_vldx, dst, stride, dst, stride2, q1, q2);
759 DUP2_ARG2(__lsx_vld, filter48, 64, filter48, 80, q1, q2);
766 __lsx_vstx(q2, dst, stride2);
846 q2_l_in = (v8u16)__lsx_vilvl_b(zero, q2);
853 q2_h_in = (v8u16)__lsx_vilvh_b(zero, q2);
988 /* q2 */
1108 __m128i p3, p2, p1, p0, q3, q2, q1, q0, p7, p6, p5, p4, q4, q5, q6, q7;
1121 DUP2_ARG2(__lsx_vldx, dst, stride, dst, stride2, q1, q2);
1128 LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh,
1130 VP9_FLAT4(p3, p2, p0, q0, q2, q3, flat);
1146 DUP4_ARG2(__lsx_vilvl_b, zero, q0, zero, q1, zero, q2, zero, q3,
1165 q2_out = __lsx_vbitsel_v(q2, q2_filter8, flat);
1288 /* calculation of q1 and q2 */
1361 __m128i p3, p2, p1, p0, q3, q2, q1, q0;
1368 DUP2_ARG2(__lsx_vldx, dst_tmp2, stride, dst_tmp2, stride2, q1, q2);
1375 LSX_TRANSPOSE8x8_B(p3, p2, p1, p0, q0, q1, q2, q3,
1376 p3, p2, p1, p0, q0, q1, q2, q3);
1377 LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh,
1407 __m128i p3, p2, p1, p0, q3, q2, q1, q0;
1430 p3, p2, p1, p0, q0, q1, q2, q3);
1444 LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit0, b_limit0, thresh0,
1485 __m128i p3, p2, p1, p0, q3, q2, q1, q0;
1500 DUP2_ARG2(__lsx_vldx, dst_tmp, stride, dst_tmp, stride2, q1, q2);
1503 LSX_TRANSPOSE8x8_B(p3, p2, p1, p0, q0, q1, q2, q3,
1504 p3, p2, p1, p0, q0, q1, q2, q3);
1511 LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh,
1514 VP9_FLAT4(p3, p2, p0, q0, q2, q3, flat);
1541 DUP4_ARG2(__lsx_vilvl_b, zero, q0, zero, q1, zero, q2, zero, q3,
1559 q2 = __lsx_vbitsel_v(q2, q2_filt8_l, flat);
1565 vec4 = __lsx_vilvl_b(q2, q1);
1603 __m128i p3, p2, p1, p0, q3, q2, q1, q0;
1625 DUP2_ARG2(__lsx_vldx, dst_tmp, stride, dst_tmp, stride2, q2, q1);
1634 q3, q2, q1, q0, row12, row13, row14, row15,
1635 p3, p2, p1, p0, q0, q1, q2, q3);
1650 LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh,
1653 VP9_FLAT4(p3, p2, p0, q0, q2, q3, flat);
1690 DUP4_ARG2(__lsx_vilvl_b, zero, q0, zero, q1, zero, q2, zero, q3,
1697 DUP4_ARG2(__lsx_vilvh_b, zero, q0, zero, q1, zero, q2, zero, q3,
1717 q2 = __lsx_vbitsel_v(q2, q2_filt8_l, flat);
1725 vec2 = __lsx_vilvl_b(q2, q1);
1726 vec5 = __lsx_vilvh_b(q2, q1);
1788 __m128i p3, p2, p1, p0, q3, q2, q1, q0;
1807 DUP2_ARG2(__lsx_vldx, dst_tmp, stride, dst_tmp, stride2, q2, q1);
1816 q3, q2, q1, q0, row12, row13, row14, row15,
1817 p3, p2, p1, p0, q0, q1, q2, q3);
1832 LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh,
1835 VP9_FLAT4(p3, p2, p0, q0, q2, q3, flat);
1874 DUP4_ARG2(__lsx_vilvl_b, zero, q0, zero, q1, zero, q2, zero, q3,
1892 q2 = __lsx_vbitsel_v(q2, q2_filt8_l, flat);
1900 vec2 = __lsx_vilvl_b(q2, q1);
1901 vec5 = __lsx_vilvh_b(q2, q1);
1963 __m128i p3, p2, p1, p0, q3, q2, q1, q0;
1982 DUP2_ARG2(__lsx_vldx, dst_tmp, stride, dst_tmp, stride2, q2, q1);
1991 q3, q2, q1, q0, row12, row13, row14, row15,
1992 p3, p2, p1, p0, q0, q1, q2, q3);
2007 LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh,
2010 VP9_FLAT4(p3, p2, p0, q0, q2, q3, flat);
2049 DUP4_ARG2(__lsx_vilvh_b, zero, q0, zero, q1, zero, q2, zero, q3,
2068 q2 = __lsx_vbitsel_v(q2, q2_filt8_h, flat);
2076 vec2 = __lsx_vilvl_b(q2, q1);
2077 vec5 = __lsx_vilvh_b(q2, q1);
2135 __m128i p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6, q7;
2151 DUP2_ARG2(__lsx_vilvh_w, tmp6, tmp4, tmp7, tmp5, q2, q6);
2152 DUP4_ARG2(__lsx_vbsrl_v, q0, 8, q2, 8, q4, 8, q6, 8, q1, q3, q5, q7);
2164 __lsx_vst(q2, output, 160);
2176 __m128i p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6, q7;
2186 q0, q1, q2, q3);
2189 LSX_TRANSPOSE16x8_B(p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5,
2202 __m128i p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6, q7;
2227 q2 = __lsx_vpackod_d(row13, row5);
2234 DUP2_ARG2(__lsx_vpackev_b, q2, q3, q0, q1, q5, q7);
2235 DUP2_ARG2(__lsx_vpackod_b, q2, q3, q0, q1, tmp6, tmp7);
2243 q2 = __lsx_vpackev_w(tmp3, tmp2);
2258 LSX_ST_8(q0, q1, q2, q3, q4, q5, q6, q7, output, out_stride,
2268 __m128i p3, p2, p1, p0, q3, q2, q1, q0;
2280 DUP4_ARG2(__lsx_vld, src, 0, src, 16, src, 32, src, 48, q0, q1, q2, q3);
2287 LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh,
2290 VP9_FLAT4(p3, p2, p0, q0, q2, q3, flat);
2323 DUP4_ARG2(__lsx_vilvl_b, zero, q0, zero, q1, zero, q2, zero, q3,
2342 q2_out = __lsx_vbitsel_v(q2, q2_l, flat);
2362 __m128i p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6, q7;
2376 DUP4_ARG2(__lsx_vld, dst, 0, dst, 16, dst, 32, dst, 48, q0, q1, q2, q3);
2390 DUP2_ARG2(__lsx_vld, filter48, 64, filter48, 80, q1, q2);
2395 vec2 = __lsx_vilvl_b(q2, q1);
2466 q2_l_in = (v8u16)__lsx_vilvl_b(zero, q2);
2553 /* q2 */
2646 __m128i p3, p2, p1, p0, q3, q2, q1, q0;
2661 DUP4_ARG2(__lsx_vld, dst, 0, dst, 16, dst, 32, dst, 48, q0, q1, q2, q3);
2668 LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh,
2671 VP9_FLAT4(p3, p2, p0, q0, q2, q3, flat);
2710 DUP4_ARG2(__lsx_vilvl_b, zero, q0, zero, q1, zero, q2, zero, q3,
2716 DUP4_ARG2(__lsx_vilvh_b, zero, q0, zero, q1, zero, q2, zero, q3,
2735 q2_out = __lsx_vbitsel_v(q2, q2_filt8_l, flat);
2755 __m128i p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6, q7;
2774 DUP4_ARG2(__lsx_vld, dst, 0, dst, 16, dst, 32, dst, 48, q0, q1, q2, q3);
2785 DUP2_ARG2(__lsx_vld, filter48, 64, filter48, 80, q1, q2);
2793 vec2 = __lsx_vilvl_b(q2, q1);
2794 vec5 = __lsx_vilvh_b(q2, q1);
2917 q2_l_in = (v8u16)__lsx_vilvl_b(zero, q2);
2923 q2_h_in = (v8u16)__lsx_vilvh_b(zero, q2);
3038 /* q2 */