Lines Matching refs:src
315 /* put_pixels8_8_inline_asm: dst = src */
317 put_pixels8_8_inline_asm(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
325 "ld.d %[tmp0], %[src], 0x0 \n\t"
326 "ldx.d %[tmp1], %[src], %[stride] \n\t"
327 "ldx.d %[tmp2], %[src], %[stride_2] \n\t"
328 "ldx.d %[tmp3], %[src], %[stride_3] \n\t"
329 "add.d %[src], %[src], %[stride_4] \n\t"
330 "ld.d %[tmp4], %[src], 0x0 \n\t"
331 "ldx.d %[tmp5], %[src], %[stride] \n\t"
332 "ldx.d %[tmp6], %[src], %[stride_2] \n\t"
333 "ldx.d %[tmp7], %[src], %[stride_3] \n\t"
350 [dst]"+&r"(dst), [src]"+&r"(src)
356 /* avg_pixels8_8_lsx : dst = avg(src, dst)
357 * put_pixels8_l2_8_lsx: dst = avg(src, half) , half stride is 8.
358 * avg_pixels8_l2_8_lsx: dst = avg(avg(src, half), dst) , half stride is 8.*/
360 avg_pixels8_8_lsx(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
369 "vld $vr0, %[src], 0 \n\t"
370 "vldx $vr1, %[src], %[stride] \n\t"
371 "vldx $vr2, %[src], %[stride_2] \n\t"
372 "vldx $vr3, %[src], %[stride_3] \n\t"
373 "add.d %[src], %[src], %[stride_4] \n\t"
374 "vld $vr4, %[src], 0 \n\t"
375 "vldx $vr5, %[src], %[stride] \n\t"
376 "vldx $vr6, %[src], %[stride_2] \n\t"
377 "vldx $vr7, %[src], %[stride_3] \n\t"
413 : [dst]"+&r"(dst), [tmp]"+&r"(tmp), [src]"+&r"(src),
421 /* avg_pixels8_8_lsx : dst = avg(src, dst)
422 * put_pixels8_l2_8_lsx: dst = avg(src, half) , half stride is 8.
423 * avg_pixels8_l2_8_lsx: dst = avg(avg(src, half), dst) , half stride is 8.*/
425 put_pixels8_l2_8_lsx(uint8_t *dst, const uint8_t *src, const uint8_t *half,
434 "vld $vr0, %[src], 0 \n\t"
435 "vldx $vr1, %[src], %[srcStride] \n\t"
436 "vldx $vr2, %[src], %[stride_2] \n\t"
437 "vldx $vr3, %[src], %[stride_3] \n\t"
438 "add.d %[src], %[src], %[stride_4] \n\t"
439 "vld $vr4, %[src], 0 \n\t"
440 "vldx $vr5, %[src], %[srcStride] \n\t"
441 "vldx $vr6, %[src], %[stride_2] \n\t"
442 "vldx $vr7, %[src], %[stride_3] \n\t"
477 : [dst]"+&r"(dst), [half]"+&r"(half), [src]"+&r"(src),
485 /* avg_pixels8_8_lsx : dst = avg(src, dst)
486 * put_pixels8_l2_8_lsx: dst = avg(src, half) , half stride is 8.
487 * avg_pixels8_l2_8_lsx: dst = avg(avg(src, half), dst) , half stride is 8.*/
489 avg_pixels8_l2_8_lsx(uint8_t *dst, const uint8_t *src, const uint8_t *half,
499 "vld $vr0, %[src], 0 \n\t"
500 "vldx $vr1, %[src], %[srcStride] \n\t"
501 "vldx $vr2, %[src], %[stride_2] \n\t"
502 "vldx $vr3, %[src], %[stride_3] \n\t"
503 "add.d %[src], %[src], %[stride_4] \n\t"
504 "vld $vr4, %[src], 0 \n\t"
505 "vldx $vr5, %[src], %[srcStride] \n\t"
506 "vldx $vr6, %[src], %[stride_2] \n\t"
507 "vldx $vr7, %[src], %[stride_3] \n\t"
565 [src]"+&r"(src), [stride_2]"=&r"(stride_2),
572 /* put_pixels16_8_lsx: dst = src */
574 put_pixels16_8_lsx(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
581 "vld $vr0, %[src], 0 \n\t"
582 "vldx $vr1, %[src], %[stride] \n\t"
583 "vldx $vr2, %[src], %[stride_2] \n\t"
584 "vldx $vr3, %[src], %[stride_3] \n\t"
585 "add.d %[src], %[src], %[stride_4] \n\t"
586 "vld $vr4, %[src], 0 \n\t"
587 "vldx $vr5, %[src], %[stride] \n\t"
588 "vldx $vr6, %[src], %[stride_2] \n\t"
589 "vldx $vr7, %[src], %[stride_3] \n\t"
590 "add.d %[src], %[src], %[stride_4] \n\t"
603 "vld $vr0, %[src], 0 \n\t"
604 "vldx $vr1, %[src], %[stride] \n\t"
605 "vldx $vr2, %[src], %[stride_2] \n\t"
606 "vldx $vr3, %[src], %[stride_3] \n\t"
607 "add.d %[src], %[src], %[stride_4] \n\t"
608 "vld $vr4, %[src], 0 \n\t"
609 "vldx $vr5, %[src], %[stride] \n\t"
610 "vldx $vr6, %[src], %[stride_2] \n\t"
611 "vldx $vr7, %[src], %[stride_3] \n\t"
622 : [dst]"+&r"(dst), [src]"+&r"(src),
630 /* avg_pixels16_8_lsx : dst = avg(src, dst)
631 * put_pixels16_l2_8_lsx: dst = avg(src, half) , half stride is 8.
632 * avg_pixels16_l2_8_lsx: dst = avg(avg(src, half), dst) , half stride is 8.*/
634 avg_pixels16_8_lsx(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
643 "vld $vr0, %[src], 0 \n\t"
644 "vldx $vr1, %[src], %[stride] \n\t"
645 "vldx $vr2, %[src], %[stride_2] \n\t"
646 "vldx $vr3, %[src], %[stride_3] \n\t"
647 "add.d %[src], %[src], %[stride_4] \n\t"
648 "vld $vr4, %[src], 0 \n\t"
649 "vldx $vr5, %[src], %[stride] \n\t"
650 "vldx $vr6, %[src], %[stride_2] \n\t"
651 "vldx $vr7, %[src], %[stride_3] \n\t"
652 "add.d %[src], %[src], %[stride_4] \n\t"
686 "vld $vr0, %[src], 0 \n\t"
687 "vldx $vr1, %[src], %[stride] \n\t"
688 "vldx $vr2, %[src], %[stride_2] \n\t"
689 "vldx $vr3, %[src], %[stride_3] \n\t"
690 "add.d %[src], %[src], %[stride_4] \n\t"
691 "vld $vr4, %[src], 0 \n\t"
692 "vldx $vr5, %[src], %[stride] \n\t"
693 "vldx $vr6, %[src], %[stride_2] \n\t"
694 "vldx $vr7, %[src], %[stride_3] \n\t"
724 : [dst]"+&r"(dst), [tmp]"+&r"(tmp), [src]"+&r"(src),
732 /* avg_pixels16_8_lsx : dst = avg(src, dst)
733 * put_pixels16_l2_8_lsx: dst = avg(src, half) , half stride is 8.
734 * avg_pixels16_l2_8_lsx: dst = avg(avg(src, half), dst) , half stride is 8.*/
736 put_pixels16_l2_8_lsx(uint8_t *dst, const uint8_t *src, uint8_t *half,
749 "vld $vr0, %[src], 0 \n\t"
750 "vldx $vr1, %[src], %[srcStride] \n\t"
751 "vldx $vr2, %[src], %[stride_2] \n\t"
752 "vldx $vr3, %[src], %[stride_3] \n\t"
753 "add.d %[src], %[src], %[stride_4] \n\t"
754 "vld $vr4, %[src], 0 \n\t"
755 "vldx $vr5, %[src], %[srcStride] \n\t"
756 "vldx $vr6, %[src], %[stride_2] \n\t"
757 "vldx $vr7, %[src], %[stride_3] \n\t"
758 "add.d %[src], %[src], %[stride_4] \n\t"
790 "vld $vr0, %[src], 0 \n\t"
791 "vldx $vr1, %[src], %[srcStride] \n\t"
792 "vldx $vr2, %[src], %[stride_2] \n\t"
793 "vldx $vr3, %[src], %[stride_3] \n\t"
794 "add.d %[src], %[src], %[stride_4] \n\t"
795 "vld $vr4, %[src], 0 \n\t"
796 "vldx $vr5, %[src], %[srcStride] \n\t"
797 "vldx $vr6, %[src], %[stride_2] \n\t"
798 "vldx $vr7, %[src], %[stride_3] \n\t"
827 : [dst]"+&r"(dst), [half]"+&r"(half), [src]"+&r"(src),
836 /* avg_pixels16_8_lsx : dst = avg(src, dst)
837 * put_pixels16_l2_8_lsx: dst = avg(src, half) , half stride is 8.
838 * avg_pixels16_l2_8_lsx: dst = avg(avg(src, half), dst) , half stride is 8.*/
840 avg_pixels16_l2_8_lsx(uint8_t *dst, const uint8_t *src, uint8_t *half,
854 "vld $vr0, %[src], 0 \n\t"
855 "vldx $vr1, %[src], %[srcStride] \n\t"
856 "vldx $vr2, %[src], %[stride_2] \n\t"
857 "vldx $vr3, %[src], %[stride_3] \n\t"
858 "add.d %[src], %[src], %[stride_4] \n\t"
859 "vld $vr4, %[src], 0 \n\t"
860 "vldx $vr5, %[src], %[srcStride] \n\t"
861 "vldx $vr6, %[src], %[stride_2] \n\t"
862 "vldx $vr7, %[src], %[stride_3] \n\t"
863 "add.d %[src], %[src], %[stride_4] \n\t"
915 "vld $vr0, %[src], 0 \n\t"
916 "vldx $vr1, %[src], %[srcStride] \n\t"
917 "vldx $vr2, %[src], %[stride_2] \n\t"
918 "vldx $vr3, %[src], %[stride_3] \n\t"
919 "add.d %[src], %[src], %[stride_4] \n\t"
920 "vld $vr4, %[src], 0 \n\t"
921 "vldx $vr5, %[src], %[srcStride] \n\t"
922 "vldx $vr6, %[src], %[stride_2] \n\t"
923 "vldx $vr7, %[src], %[stride_3] \n\t"
971 : [dst]"+&r"(dst), [tmp]"+&r"(tmp), [half]"+&r"(half), [src]"+&r"(src),
981 src00 = __lasx_xvld(src, - 2); \
982 src += srcStride; \
983 src10 = __lasx_xvld(src, - 2); \
984 src += srcStride; \
1001 put_h264_qpel8_h_lowpass_lasx(uint8_t *dst, const uint8_t *src, int dstStride,
1053 put_h264_qpel8_v_lowpass_lasx(uint8_t *dst, uint8_t *src, int dstStride,
1067 DUP2_ARG2(__lasx_xvld, src - srcStride_2x, 0, src - srcStride, 0,
1069 src02 = __lasx_xvld(src, 0);
1070 DUP4_ARG2(__lasx_xvldx, src, srcStride, src, srcStride_2x, src,
1071 srcStride_3x, src, srcStride_4x, src03, src04, src05, src06);
1072 src += srcStride_4x;
1073 DUP4_ARG2(__lasx_xvldx, src, srcStride, src, srcStride_2x, src,
1074 srcStride_3x, src, srcStride_4x, src07, src08, src09, src10);
1075 src += srcStride_4x;
1076 DUP2_ARG2(__lasx_xvldx, src, srcStride, src, srcStride_2x, src11, src12);
1100 avg_h264_qpel8_v_lowpass_lasx(uint8_t *dst, uint8_t *src, int dstStride,
1117 DUP2_ARG2(__lasx_xvld, src - srcStride_2x, 0, src - srcStride, 0,
1119 src02 = __lasx_xvld(src, 0);
1120 DUP4_ARG2(__lasx_xvldx, src, srcStride, src, srcStride_2x, src,
1121 srcStride_3x, src, srcStride_4x, src03, src04, src05, src06);
1122 src += srcStride_4x;
1123 DUP4_ARG2(__lasx_xvldx, src, srcStride, src, srcStride_2x, src,
1124 srcStride_3x, src, srcStride_4x, src07, src08, src09, src10);
1125 src += srcStride_4x;
1126 DUP2_ARG2(__lasx_xvldx, src, srcStride, src, srcStride_2x, src11, src12);
1170 src00 = __lasx_xvld(src, -2); \
1171 src += srcStride; \
1172 src10 = __lasx_xvld(src, -2); \
1173 src += srcStride; \
1214 put_h264_qpel8_hv_lowpass_lasx(uint8_t *dst, const uint8_t *src,
1233 src -= srcStride << 1;
1274 avg_h264_qpel8_h_lowpass_lasx(uint8_t *dst, const uint8_t *src, int dstStride,
1323 avg_h264_qpel8_hv_lowpass_lasx(uint8_t *dst, const uint8_t *src,
1345 src -= srcStride << 1;
1402 put_h264_qpel16_h_lowpass_lasx(uint8_t *dst, const uint8_t *src,
1405 put_h264_qpel8_h_lowpass_lasx(dst, src, dstStride, srcStride);
1406 put_h264_qpel8_h_lowpass_lasx(dst+8, src+8, dstStride, srcStride);
1407 src += srcStride << 3;
1409 put_h264_qpel8_h_lowpass_lasx(dst, src, dstStride, srcStride);
1410 put_h264_qpel8_h_lowpass_lasx(dst+8, src+8, dstStride, srcStride);
1414 avg_h264_qpel16_h_lowpass_lasx(uint8_t *dst, const uint8_t *src,
1417 avg_h264_qpel8_h_lowpass_lasx(dst, src, dstStride, srcStride);
1418 avg_h264_qpel8_h_lowpass_lasx(dst+8, src+8, dstStride, srcStride);
1419 src += srcStride << 3;
1421 avg_h264_qpel8_h_lowpass_lasx(dst, src, dstStride, srcStride);
1422 avg_h264_qpel8_h_lowpass_lasx(dst+8, src+8, dstStride, srcStride);
1425 static void put_h264_qpel16_v_lowpass_lasx(uint8_t *dst, const uint8_t *src,
1428 put_h264_qpel8_v_lowpass_lasx(dst, (uint8_t*)src, dstStride, srcStride);
1429 put_h264_qpel8_v_lowpass_lasx(dst+8, (uint8_t*)src+8, dstStride, srcStride);
1430 src += 8*srcStride;
1432 put_h264_qpel8_v_lowpass_lasx(dst, (uint8_t*)src, dstStride, srcStride);
1433 put_h264_qpel8_v_lowpass_lasx(dst+8, (uint8_t*)src+8, dstStride, srcStride);
1436 static void avg_h264_qpel16_v_lowpass_lasx(uint8_t *dst, const uint8_t *src,
1439 avg_h264_qpel8_v_lowpass_lasx(dst, (uint8_t*)src, dstStride, srcStride);
1440 avg_h264_qpel8_v_lowpass_lasx(dst+8, (uint8_t*)src+8, dstStride, srcStride);
1441 src += 8*srcStride;
1443 avg_h264_qpel8_v_lowpass_lasx(dst, (uint8_t*)src, dstStride, srcStride);
1444 avg_h264_qpel8_v_lowpass_lasx(dst+8, (uint8_t*)src+8, dstStride, srcStride);
1447 static void put_h264_qpel16_hv_lowpass_lasx(uint8_t *dst, const uint8_t *src,
1450 put_h264_qpel8_hv_lowpass_lasx(dst, src, dstStride, srcStride);
1451 put_h264_qpel8_hv_lowpass_lasx(dst + 8, src + 8, dstStride, srcStride);
1452 src += srcStride << 3;
1454 put_h264_qpel8_hv_lowpass_lasx(dst, src, dstStride, srcStride);
1455 put_h264_qpel8_hv_lowpass_lasx(dst + 8, src + 8, dstStride, srcStride);
1458 static void avg_h264_qpel16_hv_lowpass_lasx(uint8_t *dst, const uint8_t *src,
1461 avg_h264_qpel8_hv_lowpass_lasx(dst, src, dstStride, srcStride);
1462 avg_h264_qpel8_hv_lowpass_lasx(dst + 8, src + 8, dstStride, srcStride);
1463 src += srcStride << 3;
1465 avg_h264_qpel8_hv_lowpass_lasx(dst, src, dstStride, srcStride);
1466 avg_h264_qpel8_hv_lowpass_lasx(dst + 8, src + 8, dstStride, srcStride);
1469 void ff_put_h264_qpel8_mc00_lasx(uint8_t *dst, const uint8_t *src,
1474 put_pixels8_8_inline_asm(dst, src, stride);
1477 void ff_put_h264_qpel8_mc10_lasx(uint8_t *dst, const uint8_t *src,
1482 put_h264_qpel8_h_lowpass_lasx(half, src, 8, stride);
1484 put_pixels8_l2_8_lsx(dst, src, half, stride, stride);
1487 void ff_put_h264_qpel8_mc20_lasx(uint8_t *dst, const uint8_t *src,
1490 put_h264_qpel8_h_lowpass_lasx(dst, src, stride, stride);
1493 void ff_put_h264_qpel8_mc30_lasx(uint8_t *dst, const uint8_t *src,
1498 put_h264_qpel8_h_lowpass_lasx(half, src, 8, stride);
1499 put_pixels8_l2_8_lsx(dst, src+1, half, stride, stride);
1502 void ff_put_h264_qpel8_mc01_lasx(uint8_t *dst, const uint8_t *src,
1507 put_h264_qpel8_v_lowpass_lasx(half, (uint8_t*)src, 8, stride);
1508 put_pixels8_l2_8_lsx(dst, src, half, stride, stride);
1511 void ff_put_h264_qpel8_mc11_lasx(uint8_t *dst, const uint8_t *src,
1517 put_h264_qpel8_h_lowpass_lasx(halfH, src, 8, stride);
1518 put_h264_qpel8_v_lowpass_lasx(halfV, (uint8_t*)src, 8, stride);
1522 void ff_put_h264_qpel8_mc21_lasx(uint8_t *dst, const uint8_t *src,
1529 put_h264_qpel8_h_lowpass_lasx(halfH, src, 8, stride);
1530 put_h264_qpel8_hv_lowpass_lasx(halfHV, src, 8, stride);
1534 void ff_put_h264_qpel8_mc31_lasx(uint8_t *dst, const uint8_t *src,
1540 put_h264_qpel8_h_lowpass_lasx(halfH, src, 8, stride);
1541 put_h264_qpel8_v_lowpass_lasx(halfV, (uint8_t*)src + 1, 8, stride);
1545 void ff_put_h264_qpel8_mc02_lasx(uint8_t *dst, const uint8_t *src,
1548 put_h264_qpel8_v_lowpass_lasx(dst, (uint8_t*)src, stride, stride);
1551 void ff_put_h264_qpel8_mc12_lasx(uint8_t *dst, const uint8_t *src,
1558 put_h264_qpel8_hv_lowpass_lasx(halfHV, src, 8, stride);
1559 put_h264_qpel8_v_lowpass_lasx(halfH, (uint8_t*)src, 8, stride);
1563 void ff_put_h264_qpel8_mc22_lasx(uint8_t *dst, const uint8_t *src,
1566 put_h264_qpel8_hv_lowpass_lasx(dst, src, stride, stride);
1569 void ff_put_h264_qpel8_mc32_lasx(uint8_t *dst, const uint8_t *src,
1576 put_h264_qpel8_hv_lowpass_lasx(halfHV, src, 8, stride);
1577 put_h264_qpel8_v_lowpass_lasx(halfH, (uint8_t*)src + 1, 8, stride);
1581 void ff_put_h264_qpel8_mc03_lasx(uint8_t *dst, const uint8_t *src,
1586 put_h264_qpel8_v_lowpass_lasx(half, (uint8_t*)src, 8, stride);
1587 put_pixels8_l2_8_lsx(dst, src + stride, half, stride, stride);
1590 void ff_put_h264_qpel8_mc13_lasx(uint8_t *dst, const uint8_t *src,
1596 put_h264_qpel8_h_lowpass_lasx(halfH, src + stride, 8, stride);
1597 put_h264_qpel8_v_lowpass_lasx(halfV, (uint8_t*)src, 8, stride);
1601 void ff_put_h264_qpel8_mc23_lasx(uint8_t *dst, const uint8_t *src,
1608 put_h264_qpel8_h_lowpass_lasx(halfH, src + stride, 8, stride);
1609 put_h264_qpel8_hv_lowpass_lasx(halfHV, src, 8, stride);
1613 void ff_put_h264_qpel8_mc33_lasx(uint8_t *dst, const uint8_t *src,
1619 put_h264_qpel8_h_lowpass_lasx(halfH, src + stride, 8, stride);
1620 put_h264_qpel8_v_lowpass_lasx(halfV, (uint8_t*)src + 1, 8, stride);
1624 void ff_avg_h264_qpel8_mc00_lasx(uint8_t *dst, const uint8_t *src,
1629 avg_pixels8_8_lsx(dst, src, stride);
1632 void ff_avg_h264_qpel8_mc10_lasx(uint8_t *dst, const uint8_t *src,
1637 put_h264_qpel8_h_lowpass_lasx(half, src, 8, stride);
1638 avg_pixels8_l2_8_lsx(dst, src, half, stride, stride);
1641 void ff_avg_h264_qpel8_mc20_lasx(uint8_t *dst, const uint8_t *src,
1644 avg_h264_qpel8_h_lowpass_lasx(dst, src, stride, stride);
1647 void ff_avg_h264_qpel8_mc30_lasx(uint8_t *dst, const uint8_t *src,
1652 put_h264_qpel8_h_lowpass_lasx(half, src, 8, stride);
1653 avg_pixels8_l2_8_lsx(dst, src+1, half, stride, stride);
1656 void ff_avg_h264_qpel8_mc11_lasx(uint8_t *dst, const uint8_t *src,
1662 put_h264_qpel8_h_lowpass_lasx(halfH, src, 8, stride);
1663 put_h264_qpel8_v_lowpass_lasx(halfV, (uint8_t*)src, 8, stride);
1667 void ff_avg_h264_qpel8_mc21_lasx(uint8_t *dst, const uint8_t *src,
1674 put_h264_qpel8_h_lowpass_lasx(halfH, src, 8, stride);
1675 put_h264_qpel8_hv_lowpass_lasx(halfHV, src, 8, stride);
1679 void ff_avg_h264_qpel8_mc31_lasx(uint8_t *dst, const uint8_t *src,
1685 put_h264_qpel8_h_lowpass_lasx(halfH, src, 8, stride);
1686 put_h264_qpel8_v_lowpass_lasx(halfV, (uint8_t*)src + 1, 8, stride);
1690 void ff_avg_h264_qpel8_mc02_lasx(uint8_t *dst, const uint8_t *src,
1693 avg_h264_qpel8_v_lowpass_lasx(dst, (uint8_t*)src, stride, stride);
1696 void ff_avg_h264_qpel8_mc12_lasx(uint8_t *dst, const uint8_t *src,
1703 put_h264_qpel8_hv_lowpass_lasx(halfHV, src, 8, stride);
1704 put_h264_qpel8_v_lowpass_lasx(halfH, (uint8_t*)src, 8, stride);
1708 void ff_avg_h264_qpel8_mc22_lasx(uint8_t *dst, const uint8_t *src,
1711 avg_h264_qpel8_hv_lowpass_lasx(dst, src, stride, stride);
1714 void ff_avg_h264_qpel8_mc32_lasx(uint8_t *dst, const uint8_t *src,
1721 put_h264_qpel8_hv_lowpass_lasx(halfHV, src, 8, stride);
1722 put_h264_qpel8_v_lowpass_lasx(halfH, (uint8_t*)src + 1, 8, stride);
1726 void ff_avg_h264_qpel8_mc13_lasx(uint8_t *dst, const uint8_t *src,
1732 put_h264_qpel8_h_lowpass_lasx(halfH, src + stride, 8, stride);
1733 put_h264_qpel8_v_lowpass_lasx(halfV, (uint8_t*)src, 8, stride);
1737 void ff_avg_h264_qpel8_mc23_lasx(uint8_t *dst, const uint8_t *src,
1744 put_h264_qpel8_h_lowpass_lasx(halfH, src + stride, 8, stride);
1745 put_h264_qpel8_hv_lowpass_lasx(halfHV, src, 8, stride);
1749 void ff_avg_h264_qpel8_mc33_lasx(uint8_t *dst, const uint8_t *src,
1755 put_h264_qpel8_h_lowpass_lasx(halfH, src + stride, 8, stride);
1756 put_h264_qpel8_v_lowpass_lasx(halfV, (uint8_t*)src + 1, 8, stride);
1760 void ff_put_h264_qpel16_mc00_lasx(uint8_t *dst, const uint8_t *src,
1765 put_pixels16_8_lsx(dst, src, stride);
1768 void ff_put_h264_qpel16_mc10_lasx(uint8_t *dst, const uint8_t *src,
1773 put_h264_qpel16_h_lowpass_lasx(half, src, 16, stride);
1774 put_pixels16_l2_8_lsx(dst, src, half, stride, stride);
1777 void ff_put_h264_qpel16_mc20_lasx(uint8_t *dst, const uint8_t *src,
1780 put_h264_qpel16_h_lowpass_lasx(dst, src, stride, stride);
1783 void ff_put_h264_qpel16_mc30_lasx(uint8_t *dst, const uint8_t *src,
1788 put_h264_qpel16_h_lowpass_lasx(half, src, 16, stride);
1789 put_pixels16_l2_8_lsx(dst, src+1, half, stride, stride);
1792 void ff_put_h264_qpel16_mc01_lasx(uint8_t *dst, const uint8_t *src,
1797 put_h264_qpel16_v_lowpass_lasx(half, src, 16, stride);
1798 put_pixels16_l2_8_lsx(dst, src, half, stride, stride);
1801 void ff_put_h264_qpel16_mc11_lasx(uint8_t *dst, const uint8_t *src,
1804 avc_luma_hv_qrt_16x16_lasx((uint8_t*)src - 2, (uint8_t*)src - (stride * 2),
1808 void ff_put_h264_qpel16_mc21_lasx(uint8_t *dst, const uint8_t *src,
1815 put_h264_qpel16_h_lowpass_lasx(halfH, src, 16, stride);
1816 put_h264_qpel16_hv_lowpass_lasx(halfHV, src, 16, stride);
1820 void ff_put_h264_qpel16_mc31_lasx(uint8_t *dst, const uint8_t *src,
1823 avc_luma_hv_qrt_16x16_lasx((uint8_t*)src - 2, (uint8_t*)src - (stride * 2) + 1,
1827 void ff_put_h264_qpel16_mc02_lasx(uint8_t *dst, const uint8_t *src,
1830 put_h264_qpel16_v_lowpass_lasx(dst, src, stride, stride);
1833 void ff_put_h264_qpel16_mc12_lasx(uint8_t *dst, const uint8_t *src,
1840 put_h264_qpel16_hv_lowpass_lasx(halfHV, src, 16, stride);
1841 put_h264_qpel16_v_lowpass_lasx(halfH, src, 16, stride);
1845 void ff_put_h264_qpel16_mc22_lasx(uint8_t *dst, const uint8_t *src,
1848 put_h264_qpel16_hv_lowpass_lasx(dst, src, stride, stride);
1851 void ff_put_h264_qpel16_mc32_lasx(uint8_t *dst, const uint8_t *src,
1858 put_h264_qpel16_hv_lowpass_lasx(halfHV, src, 16, stride);
1859 put_h264_qpel16_v_lowpass_lasx(halfH, src + 1, 16, stride);
1863 void ff_put_h264_qpel16_mc03_lasx(uint8_t *dst, const uint8_t *src,
1868 put_h264_qpel16_v_lowpass_lasx(half, src, 16, stride);
1869 put_pixels16_l2_8_lsx(dst, src+stride, half, stride, stride);
1872 void ff_put_h264_qpel16_mc13_lasx(uint8_t *dst, const uint8_t *src,
1875 avc_luma_hv_qrt_16x16_lasx((uint8_t*)src + stride - 2, (uint8_t*)src - (stride * 2),
1879 void ff_put_h264_qpel16_mc23_lasx(uint8_t *dst, const uint8_t *src,
1886 put_h264_qpel16_h_lowpass_lasx(halfH, src + stride, 16, stride);
1887 put_h264_qpel16_hv_lowpass_lasx(halfHV, src, 16, stride);
1891 void ff_put_h264_qpel16_mc33_lasx(uint8_t *dst, const uint8_t *src,
1894 avc_luma_hv_qrt_16x16_lasx((uint8_t*)src + stride - 2,
1895 (uint8_t*)src - (stride * 2) + 1, dst, stride);
1898 void ff_avg_h264_qpel16_mc00_lasx(uint8_t *dst, const uint8_t *src,
1903 avg_pixels16_8_lsx(dst, src, stride);
1906 void ff_avg_h264_qpel16_mc10_lasx(uint8_t *dst, const uint8_t *src,
1911 put_h264_qpel16_h_lowpass_lasx(half, src, 16, stride);
1912 avg_pixels16_l2_8_lsx(dst, src, half, stride, stride);
1915 void ff_avg_h264_qpel16_mc20_lasx(uint8_t *dst, const uint8_t *src,
1918 avg_h264_qpel16_h_lowpass_lasx(dst, src, stride, stride);
1921 void ff_avg_h264_qpel16_mc30_lasx(uint8_t *dst, const uint8_t *src,
1926 put_h264_qpel16_h_lowpass_lasx(half, src, 16, stride);
1927 avg_pixels16_l2_8_lsx(dst, src+1, half, stride, stride);
1930 void ff_avg_h264_qpel16_mc01_lasx(uint8_t *dst, const uint8_t *src,
1935 put_h264_qpel16_v_lowpass_lasx(half, src, 16, stride);
1936 avg_pixels16_l2_8_lsx(dst, src, half, stride, stride);
1939 void ff_avg_h264_qpel16_mc11_lasx(uint8_t *dst, const uint8_t *src,
1942 avc_luma_hv_qrt_and_aver_dst_16x16_lasx((uint8_t*)src - 2,
1943 (uint8_t*)src - (stride * 2),
1947 void ff_avg_h264_qpel16_mc21_lasx(uint8_t *dst, const uint8_t *src,
1954 put_h264_qpel16_h_lowpass_lasx(halfH, src, 16, stride);
1955 put_h264_qpel16_hv_lowpass_lasx(halfHV, src, 16, stride);
1959 void ff_avg_h264_qpel16_mc31_lasx(uint8_t *dst, const uint8_t *src,
1962 avc_luma_hv_qrt_and_aver_dst_16x16_lasx((uint8_t*)src - 2,
1963 (uint8_t*)src - (stride * 2) + 1,
1967 void ff_avg_h264_qpel16_mc02_lasx(uint8_t *dst, const uint8_t *src,
1970 avg_h264_qpel16_v_lowpass_lasx(dst, src, stride, stride);
1973 void ff_avg_h264_qpel16_mc12_lasx(uint8_t *dst, const uint8_t *src,
1980 put_h264_qpel16_hv_lowpass_lasx(halfHV, src, 16, stride);
1981 put_h264_qpel16_v_lowpass_lasx(halfH, src, 16, stride);
1985 void ff_avg_h264_qpel16_mc22_lasx(uint8_t *dst, const uint8_t *src,
1988 avg_h264_qpel16_hv_lowpass_lasx(dst, src, stride, stride);
1991 void ff_avg_h264_qpel16_mc32_lasx(uint8_t *dst, const uint8_t *src,
1998 put_h264_qpel16_hv_lowpass_lasx(halfHV, src, 16, stride);
1999 put_h264_qpel16_v_lowpass_lasx(halfH, src + 1, 16, stride);
2003 void ff_avg_h264_qpel16_mc03_lasx(uint8_t *dst, const uint8_t *src,
2008 put_h264_qpel16_v_lowpass_lasx(half, src, 16, stride);
2009 avg_pixels16_l2_8_lsx(dst, src + stride, half, stride, stride);
2012 void ff_avg_h264_qpel16_mc13_lasx(uint8_t *dst, const uint8_t *src,
2015 avc_luma_hv_qrt_and_aver_dst_16x16_lasx((uint8_t*)src + stride - 2,
2016 (uint8_t*)src - (stride * 2),
2020 void ff_avg_h264_qpel16_mc23_lasx(uint8_t *dst, const uint8_t *src,
2027 put_h264_qpel16_h_lowpass_lasx(halfH, src + stride, 16, stride);
2028 put_h264_qpel16_hv_lowpass_lasx(halfHV, src, 16, stride);
2032 void ff_avg_h264_qpel16_mc33_lasx(uint8_t *dst, const uint8_t *src,
2035 avc_luma_hv_qrt_and_aver_dst_16x16_lasx((uint8_t*)src + stride - 2,
2036 (uint8_t*)src - (stride * 2) + 1,