Lines Matching refs:dst

29 static inline void copy_block4_mmi(uint8_t *dst, const uint8_t *src,
38 MMI_SWC1(%[ftmp0], %[dst], 0x00)
41 PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t"
44 [dst]"+&r"(dst), [src]"+&r"(src),
53 static inline void copy_block8_mmi(uint8_t *dst, const uint8_t *src,
62 MMI_SDC1(%[ftmp0], %[dst], 0x00)
65 PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t"
69 [dst]"+&r"(dst), [src]"+&r"(src),
77 static inline void copy_block16_mmi(uint8_t *dst, const uint8_t *src,
89 MMI_SDC1(%[ftmp0], %[dst], 0x00)
90 "sdl %[tmp0], 0x0f(%[dst]) \n\t"
91 "sdr %[tmp0], 0x08(%[dst]) \n\t"
94 PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t"
99 [dst]"+&r"(dst), [src]"+&r"(src),
109 static void put_h264_qpel4_h_lowpass_mmi(uint8_t *dst, const uint8_t *src,
143 MMI_SWC1(%[ftmp9], %[dst], 0x00)
145 PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t"
155 [dst]"+&r"(dst), [src]"+&r"(src)
164 static void put_h264_qpel8_h_lowpass_mmi(uint8_t *dst, const uint8_t *src,
212 MMI_SDC1(%[ftmp9], %[dst], 0x00)
215 PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t"
225 [dst]"+&r"(dst), [src]"+&r"(src)
234 static void put_h264_qpel16_h_lowpass_mmi(uint8_t *dst, const uint8_t *src,
237 put_h264_qpel8_h_lowpass_mmi(dst, src, dstStride, srcStride);
238 put_h264_qpel8_h_lowpass_mmi(dst+8, src+8, dstStride, srcStride);
240 dst += 8*dstStride;
241 put_h264_qpel8_h_lowpass_mmi(dst, src, dstStride, srcStride);
242 put_h264_qpel8_h_lowpass_mmi(dst+8, src+8, dstStride, srcStride);
245 static void avg_h264_qpel4_h_lowpass_mmi(uint8_t *dst, const uint8_t *src,
278 MMI_LWC1(%[ftmp10], %[dst], 0x00)
280 MMI_SWC1(%[ftmp9], %[dst], 0x00)
283 PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t"
293 [dst]"+&r"(dst), [src]"+&r"(src)
302 static void avg_h264_qpel8_h_lowpass_mmi(uint8_t *dst, const uint8_t *src,
350 MMI_LDC1(%[ftmp10], %[dst], 0x00)
352 MMI_SDC1(%[ftmp9], %[dst], 0x00)
355 PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t"
365 [dst]"+&r"(dst), [src]"+&r"(src)
374 static void avg_h264_qpel16_h_lowpass_mmi(uint8_t *dst, const uint8_t *src,
377 avg_h264_qpel8_h_lowpass_mmi(dst, src, dstStride, srcStride);
378 avg_h264_qpel8_h_lowpass_mmi(dst+8, src+8, dstStride, srcStride);
380 dst += 8*dstStride;
381 avg_h264_qpel8_h_lowpass_mmi(dst, src, dstStride, srcStride);
382 avg_h264_qpel8_h_lowpass_mmi(dst+8, src+8, dstStride, srcStride);
385 static void put_h264_qpel4_v_lowpass_mmi(uint8_t *dst, const uint8_t *src,
430 MMI_SWC1(%[ftmp7], %[dst], 0x00)
431 PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t"
445 MMI_SWC1(%[ftmp7], %[dst], 0x00)
446 PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t"
460 MMI_SWC1(%[ftmp7], %[dst], 0x00)
461 PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t"
475 MMI_SWC1(%[ftmp7], %[dst], 0x00)
476 PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t"
486 [dst]"+&r"(dst), [src]"+&r"(src)
494 static void put_h264_qpel8_v_lowpass_mmi(uint8_t *dst, const uint8_t *src,
542 MMI_SWC1(%[ftmp6], %[dst], 0x00)
543 PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t"
557 MMI_SWC1(%[ftmp6], %[dst], 0x00)
558 PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t"
572 MMI_SWC1(%[ftmp6], %[dst], 0x00)
574 PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t"
587 MMI_SWC1(%[ftmp6], %[dst], 0x00)
589 PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t"
602 MMI_SWC1(%[ftmp6], %[dst], 0x00)
604 PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t"
617 MMI_SWC1(%[ftmp6], %[dst], 0x00)
619 PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t"
632 MMI_SWC1(%[ftmp6], %[dst], 0x00)
634 PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t"
647 MMI_SWC1(%[ftmp6], %[dst], 0x00)
649 PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t"
663 MMI_SWC1(%[ftmp6], %[dst], 0x00)
665 PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t"
678 MMI_SWC1(%[ftmp6], %[dst], 0x00)
680 PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t"
693 MMI_SWC1(%[ftmp6], %[dst], 0x00)
695 PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t"
708 MMI_SWC1(%[ftmp6], %[dst], 0x00)
710 PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t"
723 MMI_SWC1(%[ftmp6], %[dst], 0x00)
725 PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t"
738 MMI_SWC1(%[ftmp6], %[dst], 0x00)
740 PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t"
753 MMI_SWC1(%[ftmp6], %[dst], 0x00)
755 PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t"
768 MMI_SWC1(%[ftmp6], %[dst], 0x00)
769 PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t"
779 [src]"+&r"(src), [dst]"+&r"(dst),
788 dst += 4 - h * dstStride;
792 static void put_h264_qpel16_v_lowpass_mmi(uint8_t *dst, const uint8_t *src,
795 put_h264_qpel8_v_lowpass_mmi(dst, src, dstStride, srcStride);
796 put_h264_qpel8_v_lowpass_mmi(dst+8, src+8, dstStride, srcStride);
798 dst += 8*dstStride;
799 put_h264_qpel8_v_lowpass_mmi(dst, src, dstStride, srcStride);
800 put_h264_qpel8_v_lowpass_mmi(dst+8, src+8, dstStride, srcStride);
803 static void avg_h264_qpel4_v_lowpass_mmi(uint8_t *dst, const uint8_t *src,
847 MMI_LWC1(%[ftmp0], %[dst], 0x00)
849 MMI_SWC1(%[ftmp6], %[dst], 0x00)
850 PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t"
864 MMI_LWC1(%[ftmp1], %[dst], 0x00)
866 MMI_SWC1(%[ftmp6], %[dst], 0x00)
867 PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t"
881 MMI_LWC1(%[ftmp2], %[dst], 0x00)
883 MMI_SWC1(%[ftmp6], %[dst], 0x00)
884 PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t"
898 MMI_LWC1(%[ftmp3], %[dst], 0x00)
900 MMI_SWC1(%[ftmp6], %[dst], 0x00)
901 PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t"
909 [src]"+&r"(src), [dst]"+&r"(dst)
917 static void avg_h264_qpel8_v_lowpass_mmi(uint8_t *dst, const uint8_t *src,
965 MMI_LWC1(%[ftmp0], %[dst], 0x00)
967 MMI_SWC1(%[ftmp6], %[dst], 0x00)
968 PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t"
982 MMI_LWC1(%[ftmp1], %[dst], 0x00)
984 MMI_SWC1(%[ftmp6], %[dst], 0x00)
985 PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t"
999 MMI_LWC1(%[ftmp2], %[dst], 0x00)
1001 MMI_SWC1(%[ftmp6], %[dst], 0x00)
1002 PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t"
1016 MMI_LWC1(%[ftmp3], %[dst], 0x00)
1018 MMI_SWC1(%[ftmp6], %[dst], 0x00)
1019 PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t"
1033 MMI_LWC1(%[ftmp4], %[dst], 0x00)
1035 MMI_SWC1(%[ftmp6], %[dst], 0x00)
1036 PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t"
1050 MMI_LWC1(%[ftmp5], %[dst], 0x00)
1052 MMI_SWC1(%[ftmp6], %[dst], 0x00)
1053 PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t"
1067 MMI_LWC1(%[ftmp0], %[dst], 0x00)
1069 MMI_SWC1(%[ftmp6], %[dst], 0x00)
1070 PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t"
1084 MMI_LWC1(%[ftmp1], %[dst], 0x00)
1086 MMI_SWC1(%[ftmp6], %[dst], 0x00)
1088 PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t"
1102 MMI_LWC1(%[ftmp2], %[dst], 0x00)
1104 MMI_SWC1(%[ftmp6], %[dst], 0x00)
1105 PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t"
1119 MMI_LWC1(%[ftmp3], %[dst], 0x00)
1121 MMI_SWC1(%[ftmp6], %[dst], 0x00)
1122 PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t"
1136 MMI_LWC1(%[ftmp4], %[dst], 0x00)
1138 MMI_SWC1(%[ftmp6], %[dst], 0x00)
1139 PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t"
1153 MMI_LWC1(%[ftmp5], %[dst], 0x00)
1155 MMI_SWC1(%[ftmp6], %[dst], 0x00)
1156 PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t"
1170 MMI_LWC1(%[ftmp0], %[dst], 0x00)
1172 MMI_SWC1(%[ftmp6], %[dst], 0x00)
1173 PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t"
1187 MMI_LWC1(%[ftmp1], %[dst], 0x00)
1189 MMI_SWC1(%[ftmp6], %[dst], 0x00)
1190 PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t"
1204 MMI_LWC1(%[ftmp2], %[dst], 0x00)
1206 MMI_SWC1(%[ftmp6], %[dst], 0x00)
1207 PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t"
1221 MMI_LWC1(%[ftmp3], %[dst], 0x00)
1223 MMI_SWC1(%[ftmp6], %[dst], 0x00)
1224 PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t"
1234 [src]"+&r"(src), [dst]"+&r"(dst),
1243 dst += 4 - h * dstStride;
1247 static void avg_h264_qpel16_v_lowpass_mmi(uint8_t *dst, const uint8_t *src,
1250 avg_h264_qpel8_v_lowpass_mmi(dst, src, dstStride, srcStride);
1251 avg_h264_qpel8_v_lowpass_mmi(dst+8, src+8, dstStride, srcStride);
1253 dst += 8*dstStride;
1254 avg_h264_qpel8_v_lowpass_mmi(dst, src, dstStride, srcStride);
1255 avg_h264_qpel8_v_lowpass_mmi(dst+8, src+8, dstStride, srcStride);
1258 static void put_h264_qpel4_hv_lowpass_mmi(uint8_t *dst, const uint8_t *src,
1325 op2_put(dst[0*dstStride], (tmp0+tmp1)*20 - (tmpA+tmp2)*5 + (tmpB+tmp3));
1326 op2_put(dst[1*dstStride], (tmp1+tmp2)*20 - (tmp0+tmp3)*5 + (tmpA+tmp4));
1327 op2_put(dst[2*dstStride], (tmp2+tmp3)*20 - (tmp1+tmp4)*5 + (tmp0+tmp5));
1328 op2_put(dst[3*dstStride], (tmp3+tmp4)*20 - (tmp2+tmp5)*5 + (tmp1+tmp6));
1329 dst++;
1579 static void put_h264_qpel8or16_hv2_lowpass_mmi(uint8_t *dst,
1628 MMI_SDC1(%[ftmp0], %[dst], 0x00)
1630 PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t"
1639 [tmp]"+&r"(tmp), [dst]"+&r"(dst),
1646 dst += 8 - size * dstStride;
1650 static void put_h264_qpel8or16_hv_lowpass_mmi(uint8_t *dst, int16_t *tmp,
1655 put_h264_qpel8or16_hv2_lowpass_mmi(dst, tmp, dstStride, tmpStride, size);
1658 static void put_h264_qpel8_hv_lowpass_mmi(uint8_t *dst, int16_t *tmp,
1662 put_h264_qpel8or16_hv_lowpass_mmi(dst, tmp, src, dstStride, tmpStride,
1666 static void put_h264_qpel16_hv_lowpass_mmi(uint8_t *dst, int16_t *tmp,
1670 put_h264_qpel8or16_hv_lowpass_mmi(dst, tmp, src, dstStride, tmpStride,
1674 static void put_h264_qpel8_h_lowpass_l2_mmi(uint8_t *dst, const uint8_t *src,
1729 MMI_SDC1(%[ftmp1], %[dst], 0x00)
1730 PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t"
1741 [src]"+&r"(src), [dst]"+&r"(dst),
1750 static void put_pixels8_l2_shift5_mmi(uint8_t *dst, int16_t *src16,
1776 MMI_SDC1(%[ftmp0], %[dst], 0x00)
1777 MMI_SDXC1(%[ftmp2], %[dst], %[dstStride], 0x00)
1786 [dst]"r"(dst),
1794 dst += 2 * dstStride;
1798 static void put_h264_qpel16_h_lowpass_l2_mmi(uint8_t *dst, const uint8_t *src,
1801 put_h264_qpel8_h_lowpass_l2_mmi(dst, src, src2, dstStride, src2Stride);
1802 put_h264_qpel8_h_lowpass_l2_mmi(dst + 8, src + 8, src2 + 8, dstStride,
1806 dst += 8 * dstStride;
1809 put_h264_qpel8_h_lowpass_l2_mmi(dst, src, src2, dstStride, src2Stride);
1810 put_h264_qpel8_h_lowpass_l2_mmi(dst + 8, src + 8, src2 + 8, dstStride,
1814 static void put_pixels16_l2_shift5_mmi(uint8_t *dst, int16_t *src16,
1817 put_pixels8_l2_shift5_mmi(dst, src16, src8, dstStride, src8Stride, h);
1818 put_pixels8_l2_shift5_mmi(dst + 8, src16 + 8, src8 + 8, dstStride,
1822 static void avg_h264_qpel4_hv_lowpass_mmi(uint8_t *dst, const uint8_t *src,
1889 op2_avg(dst[0*dstStride], (tmp0+tmp1)*20 - (tmpA+tmp2)*5 + (tmpB+tmp3));
1890 op2_avg(dst[1*dstStride], (tmp1+tmp2)*20 - (tmp0+tmp3)*5 + (tmpA+tmp4));
1891 op2_avg(dst[2*dstStride], (tmp2+tmp3)*20 - (tmp1+tmp4)*5 + (tmp0+tmp5));
1892 op2_avg(dst[3*dstStride], (tmp3+tmp4)*20 - (tmp2+tmp5)*5 + (tmp1+tmp6));
1893 dst++;
1898 static void avg_h264_qpel8or16_hv2_lowpass_mmi(uint8_t *dst,
1945 MMI_LDC1(%[ftmp6], %[dst], 0x00)
1947 MMI_SDC1(%[ftmp0], %[dst], 0x00)
1950 PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t"
1960 [tmp]"+&r"(tmp), [dst]"+&r"(dst),
1967 dst += 8 - size * dstStride;
1971 static void avg_h264_qpel8or16_hv_lowpass_mmi(uint8_t *dst, int16_t *tmp,
1976 avg_h264_qpel8or16_hv2_lowpass_mmi(dst, tmp, dstStride, tmpStride, size);
1979 static void avg_h264_qpel8_hv_lowpass_mmi(uint8_t *dst, int16_t *tmp,
1983 avg_h264_qpel8or16_hv_lowpass_mmi(dst, tmp, src, dstStride, tmpStride,
1987 static void avg_h264_qpel16_hv_lowpass_mmi(uint8_t *dst, int16_t *tmp,
1991 avg_h264_qpel8or16_hv_lowpass_mmi(dst, tmp, src, dstStride, tmpStride,
1995 static void avg_h264_qpel8_h_lowpass_l2_mmi(uint8_t *dst, const uint8_t *src,
2047 MMI_LDC1(%[ftmp9], %[dst], 0x00)
2051 MMI_SDC1(%[ftmp1], %[dst], 0x00)
2053 PTR_ADDU "%[dst], %[dst], %[dstStride] \n\t"
2064 [dst]"+&r"(dst), [src]"+&r"(src),
2073 static void avg_h264_qpel16_h_lowpass_l2_mmi(uint8_t *dst, const uint8_t *src,
2076 avg_h264_qpel8_h_lowpass_l2_mmi(dst, src, src2, dstStride, src2Stride);
2077 avg_h264_qpel8_h_lowpass_l2_mmi(dst + 8, src + 8, src2 + 8, dstStride,
2081 dst += 8 * dstStride;
2084 avg_h264_qpel8_h_lowpass_l2_mmi(dst, src, src2, dstStride, src2Stride);
2085 avg_h264_qpel8_h_lowpass_l2_mmi(dst + 8, src + 8, src2 + 8, dstStride,
2089 static void avg_pixels8_l2_shift5_mmi(uint8_t *dst, int16_t *src16,
2115 MMI_LDC1(%[ftmp7], %[dst], 0x00)
2117 MMI_SDC1(%[ftmp0], %[dst], 0x00)
2118 MMI_LDXC1(%[ftmp7], %[dst], %[dstStride], 0x00)
2120 MMI_SDXC1(%[ftmp2], %[dst], %[dstStride], 0x00)
2129 [dst]"r"(dst),
2137 dst += 2 * dstStride;
2141 static void avg_pixels16_l2_shift5_mmi(uint8_t *dst, int16_t *src16,
2144 avg_pixels8_l2_shift5_mmi(dst, src16, src8, dstStride, src8Stride, b);
2145 avg_pixels8_l2_shift5_mmi(dst + 8, src16 + 8, src8 + 8, dstStride,
2150 void ff_put_h264_qpel4_mc00_mmi(uint8_t *dst, const uint8_t *src,
2153 ff_put_pixels4_8_mmi(dst, src, stride, 4);
2156 void ff_put_h264_qpel4_mc10_mmi(uint8_t *dst, const uint8_t *src,
2161 ff_put_pixels4_l2_8_mmi(dst, src, half, stride, stride, 4, 4);
2164 void ff_put_h264_qpel4_mc20_mmi(uint8_t *dst, const uint8_t *src,
2167 put_h264_qpel4_h_lowpass_mmi(dst, src, stride, stride);
2170 void ff_put_h264_qpel4_mc30_mmi(uint8_t *dst, const uint8_t *src,
2175 ff_put_pixels4_l2_8_mmi(dst, src+1, half, stride, stride, 4, 4);
2178 void ff_put_h264_qpel4_mc01_mmi(uint8_t *dst, const uint8_t *src,
2186 ff_put_pixels4_l2_8_mmi(dst, full_mid, half, stride, 4, 4, 4);
2189 void ff_put_h264_qpel4_mc02_mmi(uint8_t *dst, const uint8_t *src,
2195 put_h264_qpel4_v_lowpass_mmi(dst, full_mid, stride, 4);
2198 void ff_put_h264_qpel4_mc03_mmi(uint8_t *dst, const uint8_t *src,
2206 ff_put_pixels4_l2_8_mmi(dst, full_mid+4, half, stride, 4, 4, 4);
2209 void ff_put_h264_qpel4_mc11_mmi(uint8_t *dst, const uint8_t *src,
2219 ff_put_pixels4_l2_8_mmi(dst, halfH, halfV, stride, 4, 4, 4);
2222 void ff_put_h264_qpel4_mc31_mmi(uint8_t *dst, const uint8_t *src,
2232 ff_put_pixels4_l2_8_mmi(dst, halfH, halfV, stride, 4, 4, 4);
2235 void ff_put_h264_qpel4_mc13_mmi(uint8_t *dst, const uint8_t *src,
2245 ff_put_pixels4_l2_8_mmi(dst, halfH, halfV, stride, 4, 4, 4);
2248 void ff_put_h264_qpel4_mc33_mmi(uint8_t *dst, const uint8_t *src,
2258 ff_put_pixels4_l2_8_mmi(dst, halfH, halfV, stride, 4, 4, 4);
2261 void ff_put_h264_qpel4_mc22_mmi(uint8_t *dst, const uint8_t *src,
2264 put_h264_qpel4_hv_lowpass_mmi(dst, src, stride, stride);
2267 void ff_put_h264_qpel4_mc21_mmi(uint8_t *dst, const uint8_t *src,
2274 ff_put_pixels4_l2_8_mmi(dst, halfH, halfHV, stride, 4, 4, 4);
2277 void ff_put_h264_qpel4_mc23_mmi(uint8_t *dst, const uint8_t *src,
2284 ff_put_pixels4_l2_8_mmi(dst, halfH, halfHV, stride, 4, 4, 4);
2287 void ff_put_h264_qpel4_mc12_mmi(uint8_t *dst, const uint8_t *src,
2297 ff_put_pixels4_l2_8_mmi(dst, halfV, halfHV, stride, 4, 4, 4);
2300 void ff_put_h264_qpel4_mc32_mmi(uint8_t *dst, const uint8_t *src,
2310 ff_put_pixels4_l2_8_mmi(dst, halfV, halfHV, stride, 4, 4, 4);
2314 void ff_avg_h264_qpel4_mc00_mmi(uint8_t *dst, const uint8_t *src,
2317 ff_avg_pixels4_8_mmi(dst, src, stride, 4);
2320 void ff_avg_h264_qpel4_mc10_mmi(uint8_t *dst, const uint8_t *src,
2325 ff_avg_pixels4_l2_8_mmi(dst, src, half, stride, stride, 4, 4);
2328 void ff_avg_h264_qpel4_mc20_mmi(uint8_t *dst, const uint8_t *src,
2331 avg_h264_qpel4_h_lowpass_mmi(dst, src, stride, stride);
2334 void ff_avg_h264_qpel4_mc30_mmi(uint8_t *dst, const uint8_t *src,
2339 ff_avg_pixels4_l2_8_mmi(dst, src+1, half, stride, stride, 4, 4);
2342 void ff_avg_h264_qpel4_mc01_mmi(uint8_t *dst, const uint8_t *src,
2350 ff_avg_pixels4_l2_8_mmi(dst, full_mid, half, stride, 4, 4, 4);
2353 void ff_avg_h264_qpel4_mc02_mmi(uint8_t *dst, const uint8_t *src,
2359 avg_h264_qpel4_v_lowpass_mmi(dst, full_mid, stride, 4);
2362 void ff_avg_h264_qpel4_mc03_mmi(uint8_t *dst, const uint8_t *src,
2370 ff_avg_pixels4_l2_8_mmi(dst, full_mid+4, half, stride, 4, 4, 4);
2373 void ff_avg_h264_qpel4_mc11_mmi(uint8_t *dst, const uint8_t *src,
2383 ff_avg_pixels4_l2_8_mmi(dst, halfH, halfV, stride, 4, 4, 4);
2386 void ff_avg_h264_qpel4_mc31_mmi(uint8_t *dst, const uint8_t *src,
2396 ff_avg_pixels4_l2_8_mmi(dst, halfH, halfV, stride, 4, 4, 4);
2399 void ff_avg_h264_qpel4_mc13_mmi(uint8_t *dst, const uint8_t *src,
2409 ff_avg_pixels4_l2_8_mmi(dst, halfH, halfV, stride, 4, 4, 4);
2412 void ff_avg_h264_qpel4_mc33_mmi(uint8_t *dst, const uint8_t *src,
2422 ff_avg_pixels4_l2_8_mmi(dst, halfH, halfV, stride, 4, 4, 4);
2425 void ff_avg_h264_qpel4_mc22_mmi(uint8_t *dst, const uint8_t *src,
2428 avg_h264_qpel4_hv_lowpass_mmi(dst, src, stride, stride);
2431 void ff_avg_h264_qpel4_mc21_mmi(uint8_t *dst, const uint8_t *src,
2438 ff_avg_pixels4_l2_8_mmi(dst, halfH, halfHV, stride, 4, 4, 4);
2441 void ff_avg_h264_qpel4_mc23_mmi(uint8_t *dst, const uint8_t *src,
2448 ff_avg_pixels4_l2_8_mmi(dst, halfH, halfHV, stride, 4, 4, 4);
2451 void ff_avg_h264_qpel4_mc12_mmi(uint8_t *dst, const uint8_t *src,
2461 ff_avg_pixels4_l2_8_mmi(dst, halfV, halfHV, stride, 4, 4, 4);
2464 void ff_avg_h264_qpel4_mc32_mmi(uint8_t *dst, const uint8_t *src,
2474 ff_avg_pixels4_l2_8_mmi(dst, halfV, halfHV, stride, 4, 4, 4);
2478 void ff_put_h264_qpel8_mc00_mmi(uint8_t *dst, const uint8_t *src,
2481 ff_put_pixels8_8_mmi(dst, src, stride, 8);
2484 void ff_put_h264_qpel8_mc10_mmi(uint8_t *dst, const uint8_t *src,
2489 ff_put_pixels8_l2_8_mmi(dst, src, half, stride, stride, 8, 8);
2492 void ff_put_h264_qpel8_mc20_mmi(uint8_t *dst, const uint8_t *src,
2495 put_h264_qpel8_h_lowpass_mmi(dst, src, stride, stride);
2498 void ff_put_h264_qpel8_mc30_mmi(uint8_t *dst, const uint8_t *src,
2503 ff_put_pixels8_l2_8_mmi(dst, src+1, half, stride, stride, 8, 8);
2506 void ff_put_h264_qpel8_mc01_mmi(uint8_t *dst, const uint8_t *src,
2514 ff_put_pixels8_l2_8_mmi(dst, full_mid, half, stride, 8, 8, 8);
2517 void ff_put_h264_qpel8_mc02_mmi(uint8_t *dst, const uint8_t *src,
2523 put_h264_qpel8_v_lowpass_mmi(dst, full_mid, stride, 8);
2526 void ff_put_h264_qpel8_mc03_mmi(uint8_t *dst, const uint8_t *src,
2534 ff_put_pixels8_l2_8_mmi(dst, full_mid+8, half, stride, 8, 8, 8);
2537 void ff_put_h264_qpel8_mc11_mmi(uint8_t *dst, const uint8_t *src,
2547 ff_put_pixels8_l2_8_mmi(dst, halfH, halfV, stride, 8, 8, 8);
2550 void ff_put_h264_qpel8_mc31_mmi(uint8_t *dst, const uint8_t *src,
2560 ff_put_pixels8_l2_8_mmi(dst, halfH, halfV, stride, 8, 8, 8);
2563 void ff_put_h264_qpel8_mc13_mmi(uint8_t *dst, const uint8_t *src,
2573 ff_put_pixels8_l2_8_mmi(dst, halfH, halfV, stride, 8, 8, 8);
2576 void ff_put_h264_qpel8_mc33_mmi(uint8_t *dst, const uint8_t *src,
2586 ff_put_pixels8_l2_8_mmi(dst, halfH, halfV, stride, 8, 8, 8);
2589 void ff_put_h264_qpel8_mc22_mmi(uint8_t *dst, const uint8_t *src,
2594 put_h264_qpel8_hv_lowpass_mmi(dst, temp, src, stride, 8, stride);
2597 void ff_put_h264_qpel8_mc21_mmi(uint8_t *dst, const uint8_t *src,
2605 put_h264_qpel8_h_lowpass_l2_mmi(dst, src, halfHV, stride, 8);
2608 void ff_put_h264_qpel8_mc23_mmi(uint8_t *dst, const uint8_t *src,
2616 put_h264_qpel8_h_lowpass_l2_mmi(dst, src + stride, halfHV, stride, 8);
2619 void ff_put_h264_qpel8_mc12_mmi(uint8_t *dst, const uint8_t *src,
2627 put_pixels8_l2_shift5_mmi(dst, halfV + 2, halfHV, stride, 8, 8);
2630 void ff_put_h264_qpel8_mc32_mmi(uint8_t *dst, const uint8_t *src,
2638 put_pixels8_l2_shift5_mmi(dst, halfV + 3, halfHV, stride, 8, 8);
2642 void ff_avg_h264_qpel8_mc00_mmi(uint8_t *dst, const uint8_t *src,
2645 ff_avg_pixels8_8_mmi(dst, src, stride, 8);
2648 void ff_avg_h264_qpel8_mc10_mmi(uint8_t *dst, const uint8_t *src,
2653 ff_avg_pixels8_l2_8_mmi(dst, src, half, stride, stride, 8, 8);
2656 void ff_avg_h264_qpel8_mc20_mmi(uint8_t *dst, const uint8_t *src,
2659 avg_h264_qpel8_h_lowpass_mmi(dst, src, stride, stride);
2662 void ff_avg_h264_qpel8_mc30_mmi(uint8_t *dst, const uint8_t *src,
2667 ff_avg_pixels8_l2_8_mmi(dst, src+1, half, stride, stride, 8, 8);
2670 void ff_avg_h264_qpel8_mc01_mmi(uint8_t *dst, const uint8_t *src,
2678 ff_avg_pixels8_l2_8_mmi(dst, full_mid, half, stride, 8, 8, 8);
2681 void ff_avg_h264_qpel8_mc02_mmi(uint8_t *dst, const uint8_t *src,
2687 avg_h264_qpel8_v_lowpass_mmi(dst, full_mid, stride, 8);
2690 void ff_avg_h264_qpel8_mc03_mmi(uint8_t *dst, const uint8_t *src,
2698 ff_avg_pixels8_l2_8_mmi(dst, full_mid+8, half, stride, 8, 8, 8);
2701 void ff_avg_h264_qpel8_mc11_mmi(uint8_t *dst, const uint8_t *src,
2711 ff_avg_pixels8_l2_8_mmi(dst, halfH, halfV, stride, 8, 8, 8);
2714 void ff_avg_h264_qpel8_mc31_mmi(uint8_t *dst, const uint8_t *src,
2724 ff_avg_pixels8_l2_8_mmi(dst, halfH, halfV, stride, 8, 8, 8);
2727 void ff_avg_h264_qpel8_mc13_mmi(uint8_t *dst, const uint8_t *src,
2737 ff_avg_pixels8_l2_8_mmi(dst, halfH, halfV, stride, 8, 8, 8);
2740 void ff_avg_h264_qpel8_mc33_mmi(uint8_t *dst, const uint8_t *src,
2750 ff_avg_pixels8_l2_8_mmi(dst, halfH, halfV, stride, 8, 8, 8);
2753 void ff_avg_h264_qpel8_mc22_mmi(uint8_t *dst, const uint8_t *src,
2758 avg_h264_qpel8_hv_lowpass_mmi(dst, temp, src, stride, 8, stride);
2761 void ff_avg_h264_qpel8_mc21_mmi(uint8_t *dst, const uint8_t *src,
2769 avg_h264_qpel8_h_lowpass_l2_mmi(dst, src, halfHV, stride, 8);
2772 void ff_avg_h264_qpel8_mc23_mmi(uint8_t *dst, const uint8_t *src,
2780 avg_h264_qpel8_h_lowpass_l2_mmi(dst, src + stride, halfHV, stride, 8);
2783 void ff_avg_h264_qpel8_mc12_mmi(uint8_t *dst, const uint8_t *src,
2791 avg_pixels8_l2_shift5_mmi(dst, halfV + 2, halfHV, stride, 8, 8);
2794 void ff_avg_h264_qpel8_mc32_mmi(uint8_t *dst, const uint8_t *src,
2802 avg_pixels8_l2_shift5_mmi(dst, halfV + 3, halfHV, stride, 8, 8);
2806 void ff_put_h264_qpel16_mc00_mmi(uint8_t *dst, const uint8_t *src,
2809 ff_put_pixels16_8_mmi(dst, src, stride, 16);
2812 void ff_put_h264_qpel16_mc10_mmi(uint8_t *dst, const uint8_t *src,
2817 ff_put_pixels16_l2_8_mmi(dst, src, half, stride, stride, 16, 16);
2820 void ff_put_h264_qpel16_mc20_mmi(uint8_t *dst, const uint8_t *src,
2823 put_h264_qpel16_h_lowpass_mmi(dst, src, stride, stride);
2826 void ff_put_h264_qpel16_mc30_mmi(uint8_t *dst, const uint8_t *src,
2831 ff_put_pixels16_l2_8_mmi(dst, src+1, half, stride, stride, 16, 16);
2834 void ff_put_h264_qpel16_mc01_mmi(uint8_t *dst, const uint8_t *src,
2842 ff_put_pixels16_l2_8_mmi(dst, full_mid, half, stride, 16, 16, 16);
2845 void ff_put_h264_qpel16_mc02_mmi(uint8_t *dst, const uint8_t *src,
2851 put_h264_qpel16_v_lowpass_mmi(dst, full_mid, stride, 16);
2854 void ff_put_h264_qpel16_mc03_mmi(uint8_t *dst, const uint8_t *src,
2862 ff_put_pixels16_l2_8_mmi(dst, full_mid+16, half, stride, 16, 16, 16);
2865 void ff_put_h264_qpel16_mc11_mmi(uint8_t *dst, const uint8_t *src,
2875 ff_put_pixels16_l2_8_mmi(dst, halfH, halfV, stride, 16, 16, 16);
2878 void ff_put_h264_qpel16_mc31_mmi(uint8_t *dst, const uint8_t *src,
2888 ff_put_pixels16_l2_8_mmi(dst, halfH, halfV, stride, 16, 16, 16);
2891 void ff_put_h264_qpel16_mc13_mmi(uint8_t *dst, const uint8_t *src,
2901 ff_put_pixels16_l2_8_mmi(dst, halfH, halfV, stride, 16, 16, 16);
2904 void ff_put_h264_qpel16_mc33_mmi(uint8_t *dst, const uint8_t *src,
2914 ff_put_pixels16_l2_8_mmi(dst, halfH, halfV, stride, 16, 16, 16);
2917 void ff_put_h264_qpel16_mc22_mmi(uint8_t *dst, const uint8_t *src,
2922 put_h264_qpel16_hv_lowpass_mmi(dst, temp, src, stride, 16, stride);
2925 void ff_put_h264_qpel16_mc21_mmi(uint8_t *dst, const uint8_t *src,
2933 put_h264_qpel16_h_lowpass_l2_mmi(dst, src, halfHV, stride, 16);
2936 void ff_put_h264_qpel16_mc23_mmi(uint8_t *dst, const uint8_t *src,
2944 put_h264_qpel16_h_lowpass_l2_mmi(dst, src + stride, halfHV, stride, 16);
2947 void ff_put_h264_qpel16_mc12_mmi(uint8_t *dst, const uint8_t *src,
2955 put_pixels16_l2_shift5_mmi(dst, halfV + 2, halfHV, stride, 16, 16);
2958 void ff_put_h264_qpel16_mc32_mmi(uint8_t *dst, const uint8_t *src,
2966 put_pixels16_l2_shift5_mmi(dst, halfV + 3, halfHV, stride, 16, 16);
2970 void ff_avg_h264_qpel16_mc00_mmi(uint8_t *dst, const uint8_t *src,
2973 ff_avg_pixels16_8_mmi(dst, src, stride, 16);
2976 void ff_avg_h264_qpel16_mc10_mmi(uint8_t *dst, const uint8_t *src,
2981 ff_avg_pixels16_l2_8_mmi(dst, src, half, stride, stride, 16, 16);
2984 void ff_avg_h264_qpel16_mc20_mmi(uint8_t *dst, const uint8_t *src,
2987 avg_h264_qpel16_h_lowpass_mmi(dst, src, stride, stride);
2990 void ff_avg_h264_qpel16_mc30_mmi(uint8_t *dst, const uint8_t *src,
2995 ff_avg_pixels16_l2_8_mmi(dst, src+1, half, stride, stride, 16, 16);
2998 void ff_avg_h264_qpel16_mc01_mmi(uint8_t *dst, const uint8_t *src,
3006 ff_avg_pixels16_l2_8_mmi(dst, full_mid, half, stride, 16, 16, 16);
3009 void ff_avg_h264_qpel16_mc02_mmi(uint8_t *dst, const uint8_t *src,
3015 avg_h264_qpel16_v_lowpass_mmi(dst, full_mid, stride, 16);
3018 void ff_avg_h264_qpel16_mc03_mmi(uint8_t *dst, const uint8_t *src,
3026 ff_avg_pixels16_l2_8_mmi(dst, full_mid+16, half, stride, 16, 16, 16);
3029 void ff_avg_h264_qpel16_mc11_mmi(uint8_t *dst, const uint8_t *src,
3039 ff_avg_pixels16_l2_8_mmi(dst, halfH, halfV, stride, 16, 16, 16);
3042 void ff_avg_h264_qpel16_mc31_mmi(uint8_t *dst, const uint8_t *src,
3052 ff_avg_pixels16_l2_8_mmi(dst, halfH, halfV, stride, 16, 16, 16);
3055 void ff_avg_h264_qpel16_mc13_mmi(uint8_t *dst, const uint8_t *src,
3065 ff_avg_pixels16_l2_8_mmi(dst, halfH, halfV, stride, 16, 16, 16);
3068 void ff_avg_h264_qpel16_mc33_mmi(uint8_t *dst, const uint8_t *src,
3078 ff_avg_pixels16_l2_8_mmi(dst, halfH, halfV, stride, 16, 16, 16);
3081 void ff_avg_h264_qpel16_mc22_mmi(uint8_t *dst, const uint8_t *src,
3086 avg_h264_qpel16_hv_lowpass_mmi(dst, temp, src, stride, 16, stride);
3089 void ff_avg_h264_qpel16_mc21_mmi(uint8_t *dst, const uint8_t *src,
3097 avg_h264_qpel16_h_lowpass_l2_mmi(dst, src, halfHV, stride, 16);
3100 void ff_avg_h264_qpel16_mc23_mmi(uint8_t *dst, const uint8_t *src,
3108 avg_h264_qpel16_h_lowpass_l2_mmi(dst, src + stride, halfHV, stride, 16);
3111 void ff_avg_h264_qpel16_mc12_mmi(uint8_t *dst, const uint8_t *src,
3119 avg_pixels16_l2_shift5_mmi(dst, halfV + 2, halfHV, stride, 16, 16);
3122 void ff_avg_h264_qpel16_mc32_mmi(uint8_t *dst, const uint8_t *src,
3130 avg_pixels16_l2_shift5_mmi(dst, halfV + 3, halfHV, stride, 16, 16);