Lines Matching refs:stride
38 ptrdiff_t stride) \
40 name ## _idct_dc_add_c(dst + stride * 0 + 0, block[0], stride); \
41 name ## _idct_dc_add_c(dst + stride * 0 + 4, block[1], stride); \
42 name ## _idct_dc_add_c(dst + stride * 4 + 0, block[2], stride); \
43 name ## _idct_dc_add_c(dst + stride * 4 + 4, block[3], stride); \
47 ptrdiff_t stride) \
49 name ## _idct_dc_add_c(dst + 0, block[0], stride); \
50 name ## _idct_dc_add_c(dst + 4, block[1], stride); \
51 name ## _idct_dc_add_c(dst + 8, block[2], stride); \
52 name ## _idct_dc_add_c(dst + 12, block[3], stride); \
99 static void vp7_idct_add_c(uint8_t *dst, int16_t block[16], ptrdiff_t stride)
122 dst[0 * stride + i] = av_clip_uint8(dst[0 * stride + i] +
124 dst[3 * stride + i] = av_clip_uint8(dst[3 * stride + i] +
126 dst[1 * stride + i] = av_clip_uint8(dst[1 * stride + i] +
128 dst[2 * stride + i] = av_clip_uint8(dst[2 * stride + i] +
133 static void vp7_idct_dc_add_c(uint8_t *dst, int16_t block[16], ptrdiff_t stride)
143 dst += stride;
198 static void vp8_idct_add_c(uint8_t *dst, int16_t block[16], ptrdiff_t stride)
229 dst += stride;
233 static void vp8_idct_dc_add_c(uint8_t *dst, int16_t block[16], ptrdiff_t stride)
243 dst += stride;
252 int av_unused p3 = p[-4 * stride]; \
253 int av_unused p2 = p[-3 * stride]; \
254 int av_unused p1 = p[-2 * stride]; \
255 int av_unused p0 = p[-1 * stride]; \
256 int av_unused q0 = p[ 0 * stride]; \
257 int av_unused q1 = p[ 1 * stride]; \
258 int av_unused q2 = p[ 2 * stride]; \
259 int av_unused q3 = p[ 3 * stride];
263 static av_always_inline void filter_common(uint8_t *p, ptrdiff_t stride,
288 p[-1 * stride] = cm[p0 + f2];
289 p[ 0 * stride] = cm[q0 - f1];
294 p[-2 * stride] = cm[p1 + a];
295 p[ 1 * stride] = cm[q1 - a];
299 static av_always_inline void vp7_filter_common(uint8_t *p, ptrdiff_t stride,
302 filter_common(p, stride, is4tap, IS_VP7);
305 static av_always_inline void vp8_filter_common(uint8_t *p, ptrdiff_t stride,
308 filter_common(p, stride, is4tap, IS_VP8);
311 static av_always_inline int vp7_simple_limit(uint8_t *p, ptrdiff_t stride,
318 static av_always_inline int vp8_simple_limit(uint8_t *p, ptrdiff_t stride,
331 ptrdiff_t stride, \
335 return vp ## vpn ## _simple_limit(p, stride, E) && \
345 static av_always_inline int hev(uint8_t *p, ptrdiff_t stride, int thresh)
351 static av_always_inline void filter_mbedge(uint8_t *p, ptrdiff_t stride)
365 p[-3 * stride] = cm[p2 + a2];
366 p[-2 * stride] = cm[p1 + a1];
367 p[-1 * stride] = cm[p0 + a0];
368 p[ 0 * stride] = cm[q0 - a0];
369 p[ 1 * stride] = cm[q1 - a1];
370 p[ 2 * stride] = cm[q2 - a2];
376 ptrdiff_t stride, \
393 ptrdiff_t stride, \
414 ptrdiff_t stride, int fE, \
417 vpn ## _ ## dir ## _loop_filter8_c(dstU, stride, fE, fI, hev_thresh); \
418 vpn ## _ ## dir ## _loop_filter8_c(dstV, stride, fE, fI, hev_thresh); \
423 ptrdiff_t stride, \
427 vpn ## _ ## dir ## _loop_filter8_inner_c(dstU, stride, fE, fI, \
429 vpn ## _ ## dir ## _loop_filter8_inner_c(dstV, stride, fE, fI, \
434 static void vpn ## _v_loop_filter_simple_c(uint8_t *dst, ptrdiff_t stride, \
439 if (vpn ## _simple_limit(dst + i, stride, flim)) \
440 vpn ## _filter_common(dst + i, stride, 1); \
443 static void vpn ## _h_loop_filter_simple_c(uint8_t *dst, ptrdiff_t stride, \
448 if (vpn ## _simple_limit(dst + i * stride, 1, flim)) \
449 vpn ## _filter_common(dst + i * stride, 1, 1); \
453 LOOP_FILTER(vpn, v, 16, 1, stride, ) \
454 LOOP_FILTER(vpn, h, 16, stride, 1, ) \
455 UV_LOOP_FILTER(vpn, v, 1, stride) \
456 UV_LOOP_FILTER(vpn, h, stride, 1) \
483 #define FILTER_6TAP(src, F, stride) \
484 cm[(F[2] * src[x + 0 * stride] - F[1] * src[x - 1 * stride] + \
485 F[0] * src[x - 2 * stride] + F[3] * src[x + 1 * stride] - \
486 F[4] * src[x + 2 * stride] + F[5] * src[x + 3 * stride] + 64) >> 7]
488 #define FILTER_4TAP(src, F, stride) \
489 cm[(F[2] * src[x + 0 * stride] - F[1] * src[x - 1 * stride] + \
490 F[3] * src[x + 1 * stride] - F[4] * src[x + 2 * stride] + 64) >> 7]