Lines Matching defs:q6

724     __m128i p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6, q7;
751 DUP2_ARG2(__lsx_vldx, dst_tmp1, stride, dst_tmp1, stride2, q5, q6);
753 VP9_FLAT5(p7, p6, p5, p4, p0, q0, q4, q5, q6, q7, flat, flat2);
928 q6_l_in = (v8u16)__lsx_vilvl_b(zero, q6);
936 q6_h_in = (v8u16)__lsx_vilvh_b(zero, q6);
1061 /* q6 */
1075 q6 = __lsx_vbitsel_v(q6, out_l, flat2);
1076 __lsx_vst(q6, dst, 0);
1108 __m128i p3, p2, p1, p0, q3, q2, q1, q0, p7, p6, p5, p4, q4, q5, q6, q7;
1171 dst_tmp1 + stride2, 0, dst_tmp1 + stride3, 0, q4, q5, q6, q7);
1173 VP9_FLAT5(p7, p6, p5, p4, p0, q0, q4, q5, q6, q7, flat, flat2);
1193 DUP4_ARG2(__lsx_vilvl_b, zero, q4, zero, q5, zero, q6, zero, q7,
1328 /* calculation of q5 and q6 */
1342 p1_filter16 = __lsx_vbitsel_v(q6, p1_filter16, flat2);
2135 __m128i p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6, q7;
2151 DUP2_ARG2(__lsx_vilvh_w, tmp6, tmp4, tmp7, tmp5, q2, q6);
2152 DUP4_ARG2(__lsx_vbsrl_v, q0, 8, q2, 8, q4, 8, q6, 8, q1, q3, q5, q7);
2168 __lsx_vst(q6, output, 224);
2176 __m128i p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6, q7;
2188 q4, q5, q6, q7);
2190 q6, q7, p7_o, p6_o, p5_o, p4_o, p3_o, p2_o, p1_o, p0_o);
2202 __m128i p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6, q7;
2223 q6 = __lsx_vpackod_d(row9, row1);
2231 DUP2_ARG2(__lsx_vpackev_b, q6, q7, q4, q5, tmp0, tmp1);
2232 DUP2_ARG2(__lsx_vpackod_b, q6, q7, q4, q5, tmp4, tmp5);
2244 q6 = __lsx_vpackod_w(tmp3, tmp2);
2258 LSX_ST_8(q0, q1, q2, q3, q4, q5, q6, q7, output, out_stride,
2362 __m128i p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6, q7;
2377 DUP4_ARG2(__lsx_vld, dst, 64, dst, 80, dst, 96, dst, 112, q4, q5, q6, q7);
2382 VP9_FLAT5(p7, p6, p5, p4, p0, q0, q4, q5, q6, q7, flat, flat2);
2516 q6_l_in = (v8u16)__lsx_vilvl_b(zero, q6);
2598 /* q6 */
2605 q6 = __lsx_vbitsel_v(q6, out_l, flat2);
2606 __lsx_vstelm_d(q6, dst, 0, 0);
2755 __m128i p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6, q7;
2775 DUP4_ARG2(__lsx_vld, dst, 64, dst, 80, dst, 96, dst, 112, q4, q5, q6, q7);
2777 VP9_FLAT5(p7, p6, p5, p4, p0, q0, q4, q5, q6, q7, flat, flat2);
2987 q6_l_in = (v8u16)__lsx_vilvl_b(zero, q6);
2994 q6_h_in = (v8u16)__lsx_vilvh_b(zero, q6);
3099 /* q6 */
3111 q6 = __lsx_vbitsel_v(q6, out_l, flat2);
3112 __lsx_vst(q6, dst, 16*13);