Lines Matching refs:src

230 static void avc_biwgt_4x2_msa(uint8_t *src, uint8_t *dst, ptrdiff_t stride,
249 LW2(src, stride, tp0, tp1);
263 static void avc_biwgt_4x4_msa(uint8_t *src, uint8_t *dst, ptrdiff_t stride,
282 LW4(src, stride, tp0, tp1, tp2, tp3);
297 static void avc_biwgt_4x8_msa(uint8_t *src, uint8_t *dst, ptrdiff_t stride,
315 LW4(src, stride, tp0, tp1, tp2, tp3);
316 src += 4 * stride;
318 LW4(src, stride, tp0, tp1, tp2, tp3);
337 static void avc_biwgt_8x4_msa(uint8_t *src, uint8_t *dst, ptrdiff_t stride,
356 LD4(src, stride, tp0, tp1, tp2, tp3);
375 static void avc_biwgt_8x8_msa(uint8_t *src, uint8_t *dst, ptrdiff_t stride,
393 LD4(src, stride, tp0, tp1, tp2, tp3);
396 LD4(src + 4 * stride, stride, tp0, tp1, tp2, tp3);
426 static void avc_biwgt_8x16_msa(uint8_t *src, uint8_t *dst, ptrdiff_t stride,
449 LD4(src, stride, tp0, tp1, tp2, tp3);
450 src += 4 * stride;
453 LD4(src, stride, tp0, tp1, tp2, tp3);
454 src += 4 * stride;
562 #define AVC_LPF_H_CHROMA_422(src, stride, tc_val, alpha, beta, res) \
576 LW4((src - 2), stride, load0, load1, load2, load3); \
628 #define AVC_LPF_H_2BYTE_CHROMA_422(src, stride, tc_val, alpha, beta, res) \
641 load0 = LW(src - 2); \
642 load1 = LW(src - 2 + stride); \
821 uint8_t *src = data - 4;
832 LD_UB8(src, img_width, row0, row1, row2, row3, row4, row5, row6, row7);
833 LD_UB8(src + (8 * img_width), img_width,
959 src = data - 3;
960 ST_W4(tmp3, 0, 1, 2, 3, src, img_width);
961 ST_H4(tmp2, 0, 1, 2, 3, src + 4, img_width);
962 src += 4 * img_width;
963 ST_W4(tmp4, 0, 1, 2, 3, src, img_width);
964 ST_H4(tmp2, 4, 5, 6, 7, src + 4, img_width);
965 src += 4 * img_width;
967 ST_W4(tmp6, 0, 1, 2, 3, src, img_width);
968 ST_H4(tmp5, 0, 1, 2, 3, src + 4, img_width);
969 src += 4 * img_width;
970 ST_W4(tmp7, 0, 1, 2, 3, src, img_width);
971 ST_H4(tmp5, 4, 5, 6, 7, src + 4, img_width);
976 static void avc_h_loop_filter_luma_mbaff_intra_msa(uint8_t *src,
1003 load0 = LD(src - 4);
1004 load1 = LD(src + stride - 4);
1008 load0 = LD(src + (2 * stride) - 4);
1009 load1 = LD(src + (3 * stride) - 4);
1013 load0 = LD(src + (4 * stride) - 4);
1014 load1 = LD(src + (5 * stride) - 4);
1018 load0 = LD(src + (6 * stride) - 4);
1019 load1 = LD(src + (7 * stride) - 4);
1133 SW(out0, (src - 3));
1134 SH(out1, (src + 1));
1135 src += stride;
1136 SW(out2, (src - 3));
1137 SH(out3, (src + 1));
1138 src += stride;
1145 SW(out0, (src - 3));
1146 SH(out1, (src + 1));
1147 src += stride;
1148 SW(out2, (src - 3));
1149 SH(out3, (src + 1));
1150 src += stride;
1157 SW(out0, (src - 3));
1158 SH(out1, (src + 1));
1159 src += stride;
1160 SW(out2, (src - 3));
1161 SH(out3, (src + 1));
1162 src += stride;
1169 SW(out0, (src - 3));
1170 SH(out1, (src + 1));
1171 src += stride;
1172 SW(out2, (src - 3));
1173 SH(out3, (src + 1));
1913 uint8_t *src;
1991 src = data - 1;
1992 ST_H4(tmp1, 0, 1, 2, 3, src, img_width);
1993 src += 4 * img_width;
1994 ST_H4(tmp1, 4, 5, 6, 7, src, img_width);
1999 static void avc_h_loop_filter_chroma422_msa(uint8_t *src, ptrdiff_t stride,
2013 src += (4 * stride);
2017 AVC_LPF_H_CHROMA_422(src, stride, tc_val, alpha, beta, res);
2018 ST_H4(res, 0, 1, 2, 3, (src - 1), stride);
2019 src += (4 * stride);
2023 static void avc_h_loop_filter_chroma422_mbaff_msa(uint8_t *src,
2040 src += 4 * stride;
2044 AVC_LPF_H_2BYTE_CHROMA_422(src, stride, tc_val, alpha, beta, res);
2049 SH(out0, (src - 1));
2050 src += stride;
2051 SH(out1, (src - 1));
2052 src += stride;
2178 void ff_h264_h_loop_filter_chroma422_msa(uint8_t *src,
2183 avc_h_loop_filter_chroma422_msa(src, ystride, alpha, beta, tc0);
2186 void ff_h264_h_loop_filter_chroma422_mbaff_msa(uint8_t *src,
2192 avc_h_loop_filter_chroma422_mbaff_msa(src, ystride, alpha, beta, tc0);
2195 void ff_h264_h_loop_filter_luma_mbaff_msa(uint8_t *src,
2201 avc_h_loop_filter_luma_mbaff_msa(src, ystride, alpha, beta, tc0);
2204 void ff_h264_h_loop_filter_luma_mbaff_intra_msa(uint8_t *src,
2209 avc_h_loop_filter_luma_mbaff_intra_msa(src, ystride, alpha, beta);
2212 void ff_weight_h264_pixels16_8_msa(uint8_t *src, ptrdiff_t stride,
2232 LD_UB8(src, stride, src0, src1, src2, src3, src4, src5, src6, src7);
2267 ST_UB8(dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7, src, stride);
2268 src += 8 * stride;
2271 LD_UB8(src, stride, src0, src1, src2, src3, src4, src5, src6, src7);
2306 ST_UB8(dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7, src, stride);
2310 void ff_weight_h264_pixels8_8_msa(uint8_t *src, ptrdiff_t stride,
2315 avc_wgt_8x4_msa(src, stride, log2_denom, weight_src, offset);
2317 avc_wgt_8x8_msa(src, stride, log2_denom, weight_src, offset);
2319 avc_wgt_8x16_msa(src, stride, log2_denom, weight_src, offset);
2323 void ff_weight_h264_pixels4_8_msa(uint8_t *src, ptrdiff_t stride,
2328 avc_wgt_4x2_msa(src, stride, log2_denom, weight_src, offset);
2330 avc_wgt_4x4_msa(src, stride, log2_denom, weight_src, offset);
2332 avc_wgt_4x8_msa(src, stride, log2_denom, weight_src, offset);
2336 void ff_biweight_h264_pixels16_8_msa(uint8_t *dst, uint8_t *src,
2360 LD_UB8(src, stride, src0, src1, src2, src3, src4, src5, src6, src7);
2361 src += 8 * stride;
2403 LD_UB8(src, stride, src0, src1, src2, src3, src4, src5, src6, src7);
2445 void ff_biweight_h264_pixels8_8_msa(uint8_t *dst, uint8_t *src,
2451 avc_biwgt_8x4_msa(src, dst, stride, log2_denom, weight_src, weight_dst,
2454 avc_biwgt_8x8_msa(src, dst, stride, log2_denom, weight_src, weight_dst,
2457 avc_biwgt_8x16_msa(src, dst, stride, log2_denom, weight_src, weight_dst,
2462 void ff_biweight_h264_pixels4_8_msa(uint8_t *dst, uint8_t *src,
2468 avc_biwgt_4x2_msa(src, dst, stride, log2_denom, weight_src, weight_dst,
2471 avc_biwgt_4x4_msa(src, dst, stride, log2_denom, weight_src, weight_dst,
2474 avc_biwgt_4x8_msa(src, dst, stride, log2_denom, weight_src, weight_dst,