Lines Matching refs:src
37 static void OPNAME ## rv40_qpel8_h_lowpass(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride,\
43 OP(dst[0], (src[-2] + src[ 3] - 5*(src[-1]+src[2]) + src[0]*C1 + src[1]*C2 + (1<<(SHIFT-1))) >> SHIFT);\
44 OP(dst[1], (src[-1] + src[ 4] - 5*(src[ 0]+src[3]) + src[1]*C1 + src[2]*C2 + (1<<(SHIFT-1))) >> SHIFT);\
45 OP(dst[2], (src[ 0] + src[ 5] - 5*(src[ 1]+src[4]) + src[2]*C1 + src[3]*C2 + (1<<(SHIFT-1))) >> SHIFT);\
46 OP(dst[3], (src[ 1] + src[ 6] - 5*(src[ 2]+src[5]) + src[3]*C1 + src[4]*C2 + (1<<(SHIFT-1))) >> SHIFT);\
47 OP(dst[4], (src[ 2] + src[ 7] - 5*(src[ 3]+src[6]) + src[4]*C1 + src[5]*C2 + (1<<(SHIFT-1))) >> SHIFT);\
48 OP(dst[5], (src[ 3] + src[ 8] - 5*(src[ 4]+src[7]) + src[5]*C1 + src[6]*C2 + (1<<(SHIFT-1))) >> SHIFT);\
49 OP(dst[6], (src[ 4] + src[ 9] - 5*(src[ 5]+src[8]) + src[6]*C1 + src[7]*C2 + (1<<(SHIFT-1))) >> SHIFT);\
50 OP(dst[7], (src[ 5] + src[10] - 5*(src[ 6]+src[9]) + src[7]*C1 + src[8]*C2 + (1<<(SHIFT-1))) >> SHIFT);\
52 src += srcStride;\
56 static void OPNAME ## rv40_qpel8_v_lowpass(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride,\
62 const int srcB = src[-2*srcStride];\
63 const int srcA = src[-1*srcStride];\
64 const int src0 = src[0 *srcStride];\
65 const int src1 = src[1 *srcStride];\
66 const int src2 = src[2 *srcStride];\
67 const int src3 = src[3 *srcStride];\
68 const int src4 = src[4 *srcStride];\
69 const int src5 = src[5 *srcStride];\
70 const int src6 = src[6 *srcStride];\
71 const int src7 = src[7 *srcStride];\
72 const int src8 = src[8 *srcStride];\
73 const int src9 = src[9 *srcStride];\
74 const int src10 = src[10*srcStride];\
84 src++;\
88 static void OPNAME ## rv40_qpel16_v_lowpass(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride,\
90 OPNAME ## rv40_qpel8_v_lowpass(dst , src , dstStride, srcStride, 8, C1, C2, SHIFT);\
91 OPNAME ## rv40_qpel8_v_lowpass(dst+8, src+8, dstStride, srcStride, 8, C1, C2, SHIFT);\
92 src += 8*srcStride;\
94 OPNAME ## rv40_qpel8_v_lowpass(dst , src , dstStride, srcStride, w-8, C1, C2, SHIFT);\
95 OPNAME ## rv40_qpel8_v_lowpass(dst+8, src+8, dstStride, srcStride, w-8, C1, C2, SHIFT);\
98 static void OPNAME ## rv40_qpel16_h_lowpass(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride,\
100 OPNAME ## rv40_qpel8_h_lowpass(dst , src , dstStride, srcStride, 8, C1, C2, SHIFT);\
101 OPNAME ## rv40_qpel8_h_lowpass(dst+8, src+8, dstStride, srcStride, 8, C1, C2, SHIFT);\
102 src += 8*srcStride;\
104 OPNAME ## rv40_qpel8_h_lowpass(dst , src , dstStride, srcStride, h-8, C1, C2, SHIFT);\
105 OPNAME ## rv40_qpel8_h_lowpass(dst+8, src+8, dstStride, srcStride, h-8, C1, C2, SHIFT);\
110 static void OPNAME ## rv40_qpel ## SIZE ## _mc10_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
112 OPNAME ## rv40_qpel ## SIZE ## _h_lowpass(dst, src, stride, stride, SIZE, 52, 20, 6);\
115 static void OPNAME ## rv40_qpel ## SIZE ## _mc30_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
117 OPNAME ## rv40_qpel ## SIZE ## _h_lowpass(dst, src, stride, stride, SIZE, 20, 52, 6);\
120 static void OPNAME ## rv40_qpel ## SIZE ## _mc01_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
122 OPNAME ## rv40_qpel ## SIZE ## _v_lowpass(dst, src, stride, stride, SIZE, 52, 20, 6);\
125 static void OPNAME ## rv40_qpel ## SIZE ## _mc11_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
129 put_rv40_qpel ## SIZE ## _h_lowpass(full, src - 2*stride, SIZE, stride, SIZE+5, 52, 20, 6);\
133 static void OPNAME ## rv40_qpel ## SIZE ## _mc21_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
137 put_rv40_qpel ## SIZE ## _h_lowpass(full, src - 2*stride, SIZE, stride, SIZE+5, 20, 20, 5);\
141 static void OPNAME ## rv40_qpel ## SIZE ## _mc31_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
145 put_rv40_qpel ## SIZE ## _h_lowpass(full, src - 2*stride, SIZE, stride, SIZE+5, 20, 52, 6);\
149 static void OPNAME ## rv40_qpel ## SIZE ## _mc12_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
153 put_rv40_qpel ## SIZE ## _h_lowpass(full, src - 2*stride, SIZE, stride, SIZE+5, 52, 20, 6);\
157 static void OPNAME ## rv40_qpel ## SIZE ## _mc22_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
161 put_rv40_qpel ## SIZE ## _h_lowpass(full, src - 2*stride, SIZE, stride, SIZE+5, 20, 20, 5);\
165 static void OPNAME ## rv40_qpel ## SIZE ## _mc32_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
169 put_rv40_qpel ## SIZE ## _h_lowpass(full, src - 2*stride, SIZE, stride, SIZE+5, 20, 52, 6);\
173 static void OPNAME ## rv40_qpel ## SIZE ## _mc03_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
175 OPNAME ## rv40_qpel ## SIZE ## _v_lowpass(dst, src, stride, stride, SIZE, 20, 52, 6);\
178 static void OPNAME ## rv40_qpel ## SIZE ## _mc13_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
182 put_rv40_qpel ## SIZE ## _h_lowpass(full, src - 2*stride, SIZE, stride, SIZE+5, 52, 20, 6);\
186 static void OPNAME ## rv40_qpel ## SIZE ## _mc23_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
190 put_rv40_qpel ## SIZE ## _h_lowpass(full, src - 2*stride, SIZE, stride, SIZE+5, 20, 20, 5);\
269 static void put_rv40_qpel16_mc33_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
271 put_pixels16_xy2_8_c(dst, src, stride, 16);
273 static void avg_rv40_qpel16_mc33_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
275 avg_pixels16_xy2_8_c(dst, src, stride, 16);
277 static void put_rv40_qpel8_mc33_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
279 put_pixels8_xy2_8_c(dst, src, stride, 8);
281 static void avg_rv40_qpel8_mc33_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
283 avg_pixels8_xy2_8_c(dst, src, stride, 8);
295 uint8_t *src /*align 1*/,\
309 OP(dst[0], (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1] + bias));\
310 OP(dst[1], (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2] + bias));\
311 OP(dst[2], (A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3] + bias));\
312 OP(dst[3], (A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4] + bias));\
314 src += stride;\
320 OP(dst[0], (A*src[0] + E*src[step+0] + bias));\
321 OP(dst[1], (A*src[1] + E*src[step+1] + bias));\
322 OP(dst[2], (A*src[2] + E*src[step+2] + bias));\
323 OP(dst[3], (A*src[3] + E*src[step+3] + bias));\
325 src += stride;\
331 uint8_t *src/*align 1*/,\
345 OP(dst[0], (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1] + bias));\
346 OP(dst[1], (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2] + bias));\
347 OP(dst[2], (A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3] + bias));\
348 OP(dst[3], (A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4] + bias));\
349 OP(dst[4], (A*src[4] + B*src[5] + C*src[stride+4] + D*src[stride+5] + bias));\
350 OP(dst[5], (A*src[5] + B*src[6] + C*src[stride+5] + D*src[stride+6] + bias));\
351 OP(dst[6], (A*src[6] + B*src[7] + C*src[stride+6] + D*src[stride+7] + bias));\
352 OP(dst[7], (A*src[7] + B*src[8] + C*src[stride+7] + D*src[stride+8] + bias));\
354 src += stride;\
360 OP(dst[0], (A*src[0] + E*src[step+0] + bias));\
361 OP(dst[1], (A*src[1] + E*src[step+1] + bias));\
362 OP(dst[2], (A*src[2] + E*src[step+2] + bias));\
363 OP(dst[3], (A*src[3] + E*src[step+3] + bias));\
364 OP(dst[4], (A*src[4] + E*src[step+4] + bias));\
365 OP(dst[5], (A*src[5] + E*src[step+5] + bias));\
366 OP(dst[6], (A*src[6] + E*src[step+6] + bias));\
367 OP(dst[7], (A*src[7] + E*src[step+7] + bias));\
369 src += stride;\
429 static av_always_inline void rv40_weak_loop_filter(uint8_t *src,
443 for (i = 0; i < 4; i++, src += stride) {
444 int diff_p1p0 = src[-2*step] - src[-1*step];
445 int diff_q1q0 = src[ 1*step] - src[ 0*step];
446 int diff_p1p2 = src[-2*step] - src[-3*step];
447 int diff_q1q2 = src[ 1*step] - src[ 2*step];
449 t = src[0*step] - src[-1*step];
459 t += src[-2*step] - src[1*step];
462 src[-1*step] = cm[src[-1*step] + diff];
463 src[ 0*step] = cm[src[ 0*step] - diff];
467 src[-2*step] = cm[src[-2*step] - CLIP_SYMM(t, lim_p1)];
472 src[ 1*step] = cm[src[ 1*step] - CLIP_SYMM(t, lim_q1)];
477 static void rv40_h_weak_loop_filter(uint8_t *src, const ptrdiff_t stride,
483 rv40_weak_loop_filter(src, stride, 1, filter_p1, filter_q1,
487 static void rv40_v_weak_loop_filter(uint8_t *src, const ptrdiff_t stride,
493 rv40_weak_loop_filter(src, 1, stride, filter_p1, filter_q1,
497 static av_always_inline void rv40_strong_loop_filter(uint8_t *src,
507 for(i = 0; i < 4; i++, src += stride){
509 int t = src[0*step] - src[-1*step];
518 p0 = (25*src[-3*step] + 26*src[-2*step] + 26*src[-1*step] +
519 26*src[ 0*step] + 25*src[ 1*step] +
522 q0 = (25*src[-2*step] + 26*src[-1*step] + 26*src[ 0*step] +
523 26*src[ 1*step] + 25*src[ 2*step] +
527 p0 = av_clip(p0, src[-1*step] - lims, src[-1*step] + lims);
528 q0 = av_clip(q0, src[ 0*step] - lims, src[ 0*step] + lims);
531 p1 = (25*src[-4*step] + 26*src[-3*step] + 26*src[-2*step] + 26*p0 +
532 25*src[ 0*step] + rv40_dither_l[dmode + i]) >> 7;
533 q1 = (25*src[-1*step] + 26*q0 + 26*src[ 1*step] + 26*src[ 2*step] +
534 25*src[ 3*step] + rv40_dither_r[dmode + i]) >> 7;
537 p1 = av_clip(p1, src[-2*step] - lims, src[-2*step] + lims);
538 q1 = av_clip(q1, src[ 1*step] - lims, src[ 1*step] + lims);
541 src[-2*step] = p1;
542 src[-1*step] = p0;
543 src[ 0*step] = q0;
544 src[ 1*step] = q1;
547 src[-3*step] = (25*src[-1*step] + 26*src[-2*step] +
548 51*src[-3*step] + 26*src[-4*step] + 64) >> 7;
549 src[ 2*step] = (25*src[ 0*step] + 26*src[ 1*step] +
550 51*src[ 2*step] + 26*src[ 3*step] + 64) >> 7;
555 static void rv40_h_strong_loop_filter(uint8_t *src, const ptrdiff_t stride,
559 rv40_strong_loop_filter(src, stride, 1, alpha, lims, dmode, chroma);
562 static void rv40_v_strong_loop_filter(uint8_t *src, const ptrdiff_t stride,
566 rv40_strong_loop_filter(src, 1, stride, alpha, lims, dmode, chroma);
569 static av_always_inline int rv40_loop_filter_strength(uint8_t *src,
580 for (i = 0, ptr = src; i < 4; i++, ptr += stride) {
594 for (i = 0, ptr = src; i < 4; i++, ptr += stride) {
605 static int rv40_h_loop_filter_strength(uint8_t *src, ptrdiff_t stride,
609 return rv40_loop_filter_strength(src, stride, 1, beta, beta2, edge, p1, q1);
612 static int rv40_v_loop_filter_strength(uint8_t *src, ptrdiff_t stride,
616 return rv40_loop_filter_strength(src, 1, stride, beta, beta2, edge, p1, q1);