Lines Matching refs:tmp1

33 #define HEVC_PRED_PLANAR_16x2(src0_r, src0_l, tmp0, tmp1, vec0, vec1,          \
52 res0_m += mul_val_b1 * tmp1; \
53 res1_m += mul_val_b1 * tmp1; \
54 res2_m += (mul_val_b1 + 1) * tmp1; \
55 res3_m += (mul_val_b1 + 1) * tmp1; \
554 v8i16 src_vec0_r, src1_r, tmp0, tmp1, mul_val1;
571 tmp1 = __msa_fill_h(src_left[4]);
584 res0 += tmp1;
585 res1 += 2 * tmp1;
586 res2 += 3 * tmp1;
587 res3 += 4 * tmp1;
604 v8i16 tmp0, tmp1, tmp2;
620 tmp1 = __msa_fill_h(src_left[8]);
645 res0 += tmp1;
646 res1 += 2 * tmp1;
647 res2 += 3 * tmp1;
648 res3 += 4 * tmp1;
649 res4 += 5 * tmp1;
650 res5 += 6 * tmp1;
651 res6 += 7 * tmp1;
652 res7 += 8 * tmp1;
670 v8i16 res0, res1, tmp0, tmp1;
685 tmp1 = __msa_fill_h(src_left[16]);
688 HEVC_PRED_PLANAR_16x2(src0_r, src0_l, tmp0, tmp1, vec0, vec1,
695 HEVC_PRED_PLANAR_16x2(src0_r, src0_l, tmp0, tmp1, vec0, vec1,
702 HEVC_PRED_PLANAR_16x2(src0_r, src0_l, tmp0, tmp1, vec0, vec1,
709 HEVC_PRED_PLANAR_16x2(src0_r, src0_l, tmp0, tmp1, vec0, vec1,
716 HEVC_PRED_PLANAR_16x2(src0_r, src0_l, tmp0, tmp1, vec0, vec1,
723 HEVC_PRED_PLANAR_16x2(src0_r, src0_l, tmp0, tmp1, vec0, vec1,
730 HEVC_PRED_PLANAR_16x2(src0_r, src0_l, tmp0, tmp1, vec0, vec1,
737 HEVC_PRED_PLANAR_16x2(src0_r, src0_l, tmp0, tmp1, vec0, vec1,
751 v8i16 tmp0, tmp1;
757 tmp1 = __msa_fill_h(src_left[32]);
771 HEVC_PRED_PLANAR_16x2(src0_r, src0_l, tmp0, tmp1, vec0, vec1,
778 HEVC_PRED_PLANAR_16x2(src0_r, src0_l, tmp0, tmp1, vec0, vec1,
785 HEVC_PRED_PLANAR_16x2(src0_r, src0_l, tmp0, tmp1, vec0, vec1,
792 HEVC_PRED_PLANAR_16x2(src0_r, src0_l, tmp0, tmp1, vec0, vec1,
799 HEVC_PRED_PLANAR_16x2(src0_r, src0_l, tmp0, tmp1, vec0, vec1,
806 HEVC_PRED_PLANAR_16x2(src0_r, src0_l, tmp0, tmp1, vec0, vec1,
813 HEVC_PRED_PLANAR_16x2(src0_r, src0_l, tmp0, tmp1, vec0, vec1,
820 HEVC_PRED_PLANAR_16x2(src0_r, src0_l, tmp0, tmp1, vec0, vec1,
833 v8i16 vec0, vec1, res0, res1, tmp0, tmp1;
839 tmp1 = __msa_fill_h(src_left[16]);
853 HEVC_PRED_PLANAR_16x2(src0_r, src0_l, tmp0, tmp1, vec0, vec1,
860 HEVC_PRED_PLANAR_16x2(src0_r, src0_l, tmp0, tmp1, vec0, vec1,
867 HEVC_PRED_PLANAR_16x2(src0_r, src0_l, tmp0, tmp1, vec0, vec1,
874 HEVC_PRED_PLANAR_16x2(src0_r, src0_l, tmp0, tmp1, vec0, vec1,
881 HEVC_PRED_PLANAR_16x2(src0_r, src0_l, tmp0, tmp1, vec0, vec1,
888 HEVC_PRED_PLANAR_16x2(src0_r, src0_l, tmp0, tmp1, vec0, vec1,
895 HEVC_PRED_PLANAR_16x2(src0_r, src0_l, tmp0, tmp1, vec0, vec1,
902 HEVC_PRED_PLANAR_16x2(src0_r, src0_l, tmp0, tmp1, vec0, vec1,
1031 int32_t tmp0, tmp1, tmp2;
1047 tmp1 = LW(ref + 4);
1050 SW(tmp1, ref_tmp + 4);
1236 int32_t tmp0, tmp1, tmp2, tmp3;
1258 tmp1 = ref[33];
1264 ref_tmp[33] = tmp1;
1447 int32_t last, offset, tmp0, tmp1, tmp2;
1465 tmp1 = LW(ref + 4);
1468 SW(tmp1, ref_tmp + 4);
2422 v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
2926 tmp1 = __msa_fill_h(top[63]);
2939 res0 += mul_val1 * tmp1;
2940 res1 += tmp5 * tmp1;
2941 res2 += tmp6 * tmp1;
2942 res3 += tmp7 * tmp1;
2966 res0 += res3 * tmp1;
2968 res1 += tmp5 * tmp1;
2969 res2 += tmp6 * tmp1;
2970 res3 += tmp7 * tmp1;
2985 tmp1 = __msa_fill_h(left[63]);
2998 res0 += mul_val1 * tmp1;
2999 res1 += tmp5 * tmp1;
3000 res2 += tmp6 * tmp1;
3001 res3 += tmp7 * tmp1;
3025 res0 += res3 * tmp1;
3027 res1 += tmp5 * tmp1;
3028 res2 += tmp6 * tmp1;
3029 res3 += tmp7 * tmp1;
3041 left[63] = tmp1[0];