Lines Matching refs:q4
724 __m128i p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6, q7;
750 q4 = __lsx_vld(dst_tmp1, 0);
753 VP9_FLAT5(p7, p6, p5, p4, p0, q0, q4, q5, q6, q7, flat, flat2);
886 q4_l_in = (v8u16)__lsx_vilvl_b(zero, q4);
894 q4_h_in = (v8u16)__lsx_vilvh_b(zero, q4);
1025 /* q4 */
1039 q4 = __lsx_vbitsel_v(q4, out_l, flat2);
1040 __lsx_vst(q4, dst, 0);
1108 __m128i p3, p2, p1, p0, q3, q2, q1, q0, p7, p6, p5, p4, q4, q5, q6, q7;
1171 dst_tmp1 + stride2, 0, dst_tmp1 + stride3, 0, q4, q5, q6, q7);
1173 VP9_FLAT5(p7, p6, p5, p4, p0, q0, q4, q5, q6, q7, flat, flat2);
1193 DUP4_ARG2(__lsx_vilvl_b, zero, q4, zero, q5, zero, q6, zero, q7,
1308 /* calculation of q3 and q4 */
1322 p1_filter16 = __lsx_vbitsel_v(q4, p1_filter16, flat2);
2135 __m128i p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6, q7;
2150 DUP2_ARG2(__lsx_vilvl_w, tmp6, tmp4, tmp7, tmp5, q0, q4);
2152 DUP4_ARG2(__lsx_vbsrl_v, q0, 8, q2, 8, q4, 8, q6, 8, q1, q3, q5, q7);
2166 __lsx_vst(q4, output, 192);
2176 __m128i p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6, q7;
2188 q4, q5, q6, q7);
2189 LSX_TRANSPOSE16x8_B(p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5,
2202 __m128i p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6, q7;
2225 q4 = __lsx_vpackod_d(row11, row3);
2231 DUP2_ARG2(__lsx_vpackev_b, q6, q7, q4, q5, tmp0, tmp1);
2232 DUP2_ARG2(__lsx_vpackod_b, q6, q7, q4, q5, tmp4, tmp5);
2239 q4 = __lsx_vpackod_w(tmp3, tmp2);
2258 LSX_ST_8(q0, q1, q2, q3, q4, q5, q6, q7, output, out_stride,
2362 __m128i p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6, q7;
2377 DUP4_ARG2(__lsx_vld, dst, 64, dst, 80, dst, 96, dst, 112, q4, q5, q6, q7);
2382 VP9_FLAT5(p7, p6, p5, p4, p0, q0, q4, q5, q6, q7, flat, flat2);
2490 q4_l_in = (v8u16)__lsx_vilvl_b(zero, q4);
2576 /* q4 */
2583 q4 = __lsx_vbitsel_v(q4, out_l, flat2);
2584 __lsx_vstelm_d(q4, dst, 0, 0);
2755 __m128i p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6, q7;
2775 DUP4_ARG2(__lsx_vld, dst, 64, dst, 80, dst, 96, dst, 112, q4, q5, q6, q7);
2777 VP9_FLAT5(p7, p6, p5, p4, p0, q0, q4, q5, q6, q7, flat, flat2);
2951 q4_l_in = (v8u16)__lsx_vilvl_b(zero, q4);
2958 q4_h_in = (v8u16)__lsx_vilvh_b(zero, q4);
3069 /* q4 */
3081 q4 = __lsx_vbitsel_v(q4, out_l, flat2);
3082 __lsx_vst(q4, dst, 16*11);