Lines Matching defs:q4
630 v16u8 p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6, q7;
645 LD_UB8(src, pitch, q0, q1, q2, q3, q4, q5, q6, q7);
646 VP9_FLAT5(p7, p6, p5, p4, p0, q0, q4, q5, q6, q7, flat, flat2);
764 q4_r_in = (v8u16) __msa_ilvr_b(zero, (v16i8) q4);
772 q4_l_in = (v8u16) __msa_ilvl_b(zero, (v16i8) q4);
903 /* q4 */
917 q4 = __msa_bmnz_v(q4, (v16u8) r_out, flat2);
918 ST_UB(q4, src);
982 v16u8 p3, p2, p1, p0, q3, q2, q1, q0, p7, p6, p5, p4, q4, q5, q6, q7;
1040 LD_UB4(src + (4 * pitch), pitch, q4, q5, q6, q7);
1042 VP9_FLAT5(p7, p6, p5, p4, p0, q0, q4, q5, q6, q7, flat, flat2);
1058 ILVR_B8_UH(zero, p7, zero, p6, zero, p5, zero, p4, zero, q4,
1160 /* calculation of q3 and q4 */
1170 p1_filter16 = __msa_bmnz_v(q4, p1_filter16, flat2);
1675 v16u8 p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6, q7;
1688 ILVR_W2_UB(tmp6, tmp4, tmp7, tmp5, q0, q4);
1690 SLDI_B4_UB(zeros, q0, zeros, q2, zeros, q4, zeros, q6, 8, q1, q3, q5, q7);
1694 ST_UB8(q0, q1, q2, q3, q4, q5, q6, q7, output, out_pitch);
1701 v16u8 p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6, q7;
1704 LD_UB8(input + (8 * in_pitch), in_pitch, q0, q1, q2, q3, q4, q5, q6, q7);
1705 TRANSPOSE16x8_UB_UB(p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5,
1717 v16u8 p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6, q7;
1733 q4 = (v16u8) __msa_ilvod_d((v2i64) row11, (v2i64) row3);
1739 ILVEV_B2_SH(q7, q6, q5, q4, tmp0, tmp1);
1741 tmp5 = (v8i16) __msa_ilvod_b((v16i8) q4, (v16i8) q5);
1749 q4 = (v16u8) __msa_ilvod_w(tmp3, tmp2);
1767 ST_UB8(q0, q1, q2, q3, q4, q5, q6, q7, output, out_pitch);
1847 v16u8 p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6, q7;
1858 LD_UB8(src, 16, q0, q1, q2, q3, q4, q5, q6, q7);
1860 VP9_FLAT5(p7, p6, p5, p4, p0, q0, q4, q5, q6, q7, flat, flat2);
1944 q4_r_in = (v8u16) __msa_ilvr_b(zero, (v16i8) q4);
2030 /* q4 */
2037 q4 = __msa_bmnz_v(q4, (v16u8) r_out, flat2);
2038 ST_D1(q4, 0, src);
2180 v16u8 p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6, q7;
2195 LD_UB8(src, 16, q0, q1, q2, q3, q4, q5, q6, q7);
2197 VP9_FLAT5(p7, p6, p5, p4, p0, q0, q4, q5, q6, q7, flat, flat2);
2326 q4_r_in = (v8u16) __msa_ilvr_b(zero, (v16i8) q4);
2333 q4_l_in = (v8u16) __msa_ilvl_b(zero, (v16i8) q4);
2451 /* q4 */
2463 q4 = __msa_bmnz_v(q4, (v16u8) r_out, flat2);
2464 ST_UB(q4, src);