Lines Matching refs:stride

228 #define VP8_ST6x1_UB(in0, in0_idx, in1, in1_idx, pdst, stride)      \
231 __lsx_vstelm_h(in1, pdst + stride, 0, in1_idx); \
234 #define ST_W4(in, idx0, idx1, idx2, idx3, pdst, stride) \
237 pdst += stride; \
239 pdst += stride; \
241 pdst += stride; \
243 pdst += stride; \
246 void ff_vp8_v_loop_filter16_lsx(uint8_t *dst, ptrdiff_t stride, int b_limit_in,
252 ptrdiff_t stride2 = stride << 1;
253 ptrdiff_t stride3 = stride2 + stride;
262 dst - stride, 0, p3, p2, p1, p0);
263 DUP4_ARG2(__lsx_vld, dst, 0, dst + stride, 0, dst + stride2, 0, dst + stride3, 0,
271 __lsx_vst(p0, dst - stride, 0);
274 __lsx_vst(q1, dst + stride, 0);
279 ptrdiff_t stride, int b_limit_in,
287 ptrdiff_t stride2 = stride << 1;
288 ptrdiff_t stride3 = stride2 + stride;
296 dst_u - stride, 0, p3_u, p2_u, p1_u, p0_u);
297 DUP4_ARG2(__lsx_vld, dst_u, 0, dst_u + stride, 0, dst_u + stride2, 0,
301 dst_v - stride, 0, p3_v, p2_v, p1_v, p0_v);
302 DUP4_ARG2(__lsx_vld, dst_v, 0, dst_v + stride, 0, dst_v + stride2, 0,
314 __lsx_vstelm_d(p0, dst_u - stride , 0, 0);
317 __lsx_vstelm_d(q1, dst_u + stride, 0, 0);
322 __lsx_vstelm_d(p0, dst_v - stride , 0, 1);
325 __lsx_vstelm_d(q1, dst_v + stride, 0, 1);
329 void ff_vp8_h_loop_filter16_lsx(uint8_t *dst, ptrdiff_t stride, int b_limit_in,
339 ptrdiff_t stride2 = stride << 1;
340 ptrdiff_t stride3 = stride2 + stride;
348 DUP4_ARG2(__lsx_vld, temp_src, 0, temp_src + stride, 0, temp_src + stride2, 0,
351 DUP4_ARG2(__lsx_vld, temp_src, 0, temp_src + stride, 0, temp_src + stride2, 0,
355 DUP4_ARG2(__lsx_vld, temp_src, 0, temp_src + stride, 0, temp_src + stride2, 0,
358 DUP4_ARG2(__lsx_vld, temp_src, 0, temp_src + stride, 0, temp_src + stride2, 0,
383 temp_src += stride;
385 temp_src += stride;
387 temp_src += stride;
389 temp_src += stride;
391 temp_src += stride;
393 temp_src += stride;
395 temp_src += stride;
397 temp_src += stride;
399 temp_src += stride;
401 temp_src += stride;
403 temp_src += stride;
405 temp_src += stride;
407 temp_src += stride;
409 temp_src += stride;
411 temp_src += stride;
416 ptrdiff_t stride, int b_limit_in,
426 ptrdiff_t stride2 = stride << 1;
427 ptrdiff_t stride3 = stride2 + stride;
435 DUP4_ARG2(__lsx_vld, temp_src, 0, temp_src + stride, 0, temp_src + stride2, 0,
438 DUP4_ARG2(__lsx_vld, temp_src, 0, temp_src + stride, 0, temp_src + stride2, 0,
442 DUP4_ARG2(__lsx_vld, temp_src, 0, temp_src + stride, 0, temp_src + stride2, 0,
445 DUP4_ARG2(__lsx_vld, temp_src, 0, temp_src + stride, 0, temp_src + stride2, 0,
472 dst_u += stride;
474 dst_u += stride;
476 dst_u += stride;
478 dst_u += stride;
480 dst_u += stride;
482 dst_u += stride;
484 dst_u += stride;
489 dst_v += stride;
491 dst_v += stride;
493 dst_v += stride;
495 dst_v += stride;
497 dst_v += stride;
499 dst_v += stride;
501 dst_v += stride;
505 void ff_vp8_v_loop_filter16_inner_lsx(uint8_t *src, ptrdiff_t stride,
512 ptrdiff_t stride2 = stride << 1;
513 ptrdiff_t stride3 = stride2 + stride;
518 DUP4_ARG2(__lsx_vld, src, 0, src + stride, 0, src + stride2, 0,
521 DUP4_ARG2(__lsx_vld, src, 0, src + stride, 0, src + stride2, 0,
532 __lsx_vst(p0, src - stride, 0);
534 __lsx_vst(q1, src + stride, 0);
537 void ff_vp8_h_loop_filter16_inner_lsx(uint8_t *src, ptrdiff_t stride,
546 ptrdiff_t stride2 = stride << 1;
547 ptrdiff_t stride3 = stride2 + stride;
551 DUP4_ARG2(__lsx_vld, src, 0, src + stride, 0, src + stride2, 0,
554 DUP4_ARG2(__lsx_vld, src, 0, src + stride, 0, src + stride2, 0,
557 DUP4_ARG2(__lsx_vld, src, 0, src + stride, 0, src + stride2, 0,
560 DUP4_ARG2(__lsx_vld, src, 0, src + stride, 0, src + stride2, 0,
581 ST_W4(tmp2, 0, 1, 2, 3, src, stride);
582 ST_W4(tmp3, 0, 1, 2, 3, src, stride);
588 ST_W4(tmp2, 0, 1, 2, 3, src, stride);
589 ST_W4(tmp3, 0, 1, 2, 3, src, stride);