Lines Matching defs:vec1

33 #define HEVC_PRED_PLANAR_16x2(src0_r, src0_l, tmp0, tmp1, vec0, vec1,          \
39 MUL4(mul_val_h0, vec0, mul_val_h2, vec0, mul_val_h0, vec1, \
40 mul_val_h2, vec1, res0_m, res1_m, res2_m, res3_m); \
68 v8i16 vec0, vec1, vec2;
80 vec1 = __msa_fill_h(src_top[0]);
85 vec2 += vec1;
103 v8i16 vec0, vec1, vec2;
119 vec1 = __msa_fill_h(src_top[0]);
124 vec2 += vec1;
158 v8i16 vec0, vec1, vec2, vec3;
171 vec1 = __msa_fill_h(src_top[0]);
179 ADD2(vec2, vec1, vec3, vec1, vec2, vec3);
351 v8u16 sum, vec0, vec1;
366 ILVR_B2_UH(zero, store, zero, src, vec0, vec1);
368 vec1 += vec0;
370 vec1 += vec0;
372 vec1 = (v8u16) __msa_srari_h((v8i16) vec1, 2);
373 store = (v16u8) __msa_pckev_b((v16i8) vec1, (v16i8) vec1);
412 v8u16 sum, vec0, vec1;
434 ILVR_B2_UH(zero, store, zero, src, vec0, vec1);
436 vec1 += vec0;
438 vec1 += vec0;
439 vec1 = (v8u16) __msa_srari_h((v8i16) vec1, 2);
440 store = (v16u8) __msa_pckev_b((v16i8) vec1, (v16i8) vec1);
448 vec1 = (v8u16) __msa_ilvr_b(zero, (v16i8) src);
451 vec1 += vec0;
452 vec1 = (v8u16) __msa_srari_h((v8i16) vec1, 2);
455 tmp_dst[stride * col] = vec1[col];
470 v8u16 vec0, vec1, vec2;
493 ILVRL_B2_UH(zero, src_above1, vec1, vec2);
494 ADD2(vec1, vec0, vec2, vec0, vec1, vec2);
496 ADD2(vec1, vec0, vec2, vec0, vec1, vec2);
497 SRARI_H2_UH(vec1, vec2, 2);
498 store = (v16u8) __msa_pckev_b((v16i8) vec2, (v16i8) vec1);
503 ILVRL_B2_UH(zero, src_left1, vec1, vec2);
506 ADD2(vec1, vec0, vec2, vec0, vec1, vec2);
507 SRARI_H2_UH(vec1, vec2, 2);
508 store = (v16u8) __msa_pckev_b((v16i8) vec2, (v16i8) vec1);
555 v8i16 vec0, vec1, vec2, vec3, res0, res1, res2, res3;
568 SPLATI_H4_SH(src1_r, 0, 1, 2, 3, vec0, vec1, vec2, vec3);
573 MUL4(mul_val0, vec0, mul_val0, vec1, mul_val0, vec2, mul_val0, vec3,
602 v8i16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
616 SPLATI_H4_SH(src_vec1_r, 0, 1, 2, 3, vec0, vec1, vec2, vec3);
622 MUL4(mul_val0, vec0, mul_val0, vec1, mul_val0, vec2, mul_val0, vec3,
669 v8i16 vec0, vec1;
687 SPLATI_H2_SH(src1_r, 0, 1, vec0, vec1);
688 HEVC_PRED_PLANAR_16x2(src0_r, src0_l, tmp0, tmp1, vec0, vec1,
694 SPLATI_H2_SH(src1_r, 2, 3, vec0, vec1);
695 HEVC_PRED_PLANAR_16x2(src0_r, src0_l, tmp0, tmp1, vec0, vec1,
701 SPLATI_H2_SH(src1_r, 4, 5, vec0, vec1);
702 HEVC_PRED_PLANAR_16x2(src0_r, src0_l, tmp0, tmp1, vec0, vec1,
708 SPLATI_H2_SH(src1_r, 6, 7, vec0, vec1);
709 HEVC_PRED_PLANAR_16x2(src0_r, src0_l, tmp0, tmp1, vec0, vec1,
715 SPLATI_H2_SH(src1_l, 0, 1, vec0, vec1);
716 HEVC_PRED_PLANAR_16x2(src0_r, src0_l, tmp0, tmp1, vec0, vec1,
722 SPLATI_H2_SH(src1_l, 2, 3, vec0, vec1);
723 HEVC_PRED_PLANAR_16x2(src0_r, src0_l, tmp0, tmp1, vec0, vec1,
729 SPLATI_H2_SH(src1_l, 4, 5, vec0, vec1);
730 HEVC_PRED_PLANAR_16x2(src0_r, src0_l, tmp0, tmp1, vec0, vec1,
736 SPLATI_H2_SH(src1_l, 6, 7, vec0, vec1);
737 HEVC_PRED_PLANAR_16x2(src0_r, src0_l, tmp0, tmp1, vec0, vec1,
750 v8i16 vec0, vec1, res0, res1;
770 SPLATI_H2_SH(src1_r, 0, 1, vec0, vec1);
771 HEVC_PRED_PLANAR_16x2(src0_r, src0_l, tmp0, tmp1, vec0, vec1,
777 SPLATI_H2_SH(src1_r, 2, 3, vec0, vec1);
778 HEVC_PRED_PLANAR_16x2(src0_r, src0_l, tmp0, tmp1, vec0, vec1,
784 SPLATI_H2_SH(src1_r, 4, 5, vec0, vec1);
785 HEVC_PRED_PLANAR_16x2(src0_r, src0_l, tmp0, tmp1, vec0, vec1,
791 SPLATI_H2_SH(src1_r, 6, 7, vec0, vec1);
792 HEVC_PRED_PLANAR_16x2(src0_r, src0_l, tmp0, tmp1, vec0, vec1,
798 SPLATI_H2_SH(src1_l, 0, 1, vec0, vec1);
799 HEVC_PRED_PLANAR_16x2(src0_r, src0_l, tmp0, tmp1, vec0, vec1,
805 SPLATI_H2_SH(src1_l, 2, 3, vec0, vec1);
806 HEVC_PRED_PLANAR_16x2(src0_r, src0_l, tmp0, tmp1, vec0, vec1,
812 SPLATI_H2_SH(src1_l, 4, 5, vec0, vec1);
813 HEVC_PRED_PLANAR_16x2(src0_r, src0_l, tmp0, tmp1, vec0, vec1,
819 SPLATI_H2_SH(src1_l, 6, 7, vec0, vec1);
820 HEVC_PRED_PLANAR_16x2(src0_r, src0_l, tmp0, tmp1, vec0, vec1,
833 v8i16 vec0, vec1, res0, res1, tmp0, tmp1;
852 SPLATI_H2_SH(src1_r, 0, 1, vec0, vec1);
853 HEVC_PRED_PLANAR_16x2(src0_r, src0_l, tmp0, tmp1, vec0, vec1,
859 SPLATI_H2_SH(src1_r, 2, 3, vec0, vec1);
860 HEVC_PRED_PLANAR_16x2(src0_r, src0_l, tmp0, tmp1, vec0, vec1,
866 SPLATI_H2_SH(src1_r, 4, 5, vec0, vec1);
867 HEVC_PRED_PLANAR_16x2(src0_r, src0_l, tmp0, tmp1, vec0, vec1,
873 SPLATI_H2_SH(src1_r, 6, 7, vec0, vec1);
874 HEVC_PRED_PLANAR_16x2(src0_r, src0_l, tmp0, tmp1, vec0, vec1,
880 SPLATI_H2_SH(src1_l, 0, 1, vec0, vec1);
881 HEVC_PRED_PLANAR_16x2(src0_r, src0_l, tmp0, tmp1, vec0, vec1,
887 SPLATI_H2_SH(src1_l, 2, 3, vec0, vec1);
888 HEVC_PRED_PLANAR_16x2(src0_r, src0_l, tmp0, tmp1, vec0, vec1,
894 SPLATI_H2_SH(src1_l, 4, 5, vec0, vec1);
895 HEVC_PRED_PLANAR_16x2(src0_r, src0_l, tmp0, tmp1, vec0, vec1,
901 SPLATI_H2_SH(src1_l, 6, 7, vec0, vec1);
902 HEVC_PRED_PLANAR_16x2(src0_r, src0_l, tmp0, tmp1, vec0, vec1,
2421 v16u8 vec0, vec1;
2572 LD_UB2(src - stride, 16, vec0, vec1);
2573 ST_UB2(vec0, vec1, top, 16);
2577 LD_UB2(src - stride + 32, 16, vec0, vec1);
2578 ST_UB2(vec0, vec1, (top + 32), 16);
2950 vec1 = (v16u8) __msa_pckev_b((v16i8) res3, (v16i8) res2);
2952 ST_UB2(vec0, vec1, filtered_top, 16);
2978 vec1 = (v16u8) __msa_pckev_b((v16i8) res3, (v16i8) res2);
2980 ST_UB2(vec0, vec1, (filtered_top + 32), 16);
3009 vec1 = (v16u8) __msa_pckev_b((v16i8) res3, (v16i8) res2);
3011 ST_UB2(vec0, vec1, left, 16);
3037 vec1 = (v16u8) __msa_pckev_b((v16i8) res3, (v16i8) res2);
3039 ST_UB2(vec0, vec1, (left + 32), 16);