Lines Matching refs:src
479 int16_t *src = block;
499 MMI_LDC1(%[ftmp1], %[src], 0x00)
500 MMI_LDC1(%[ftmp2], %[src], 0x08)
580 PTR_ADDIU "%[src], %[src], 0x10 \n\t"
592 [src]"+&r"(src), [dst]"+&r"(dst), [count]"+&r"(count)
597 src = block;
607 MMI_LDC1(%[ftmp1], %[src], 0x00)
608 MMI_LDC1(%[ftmp2], %[src], 0x10)
609 MMI_LDC1(%[ftmp3], %[src], 0x20)
610 MMI_LDC1(%[ftmp4], %[src], 0x30)
731 MMI_LDC1(%[ftmp1], %[src], 0x08)
732 MMI_LDC1(%[ftmp2], %[src], 0x18)
733 MMI_LDC1(%[ftmp3], %[src], 0x28)
734 MMI_LDC1(%[ftmp4], %[src], 0x38)
862 [src]"r"(src), [dest]"r"(dest), [linesize]"r"(linesize)
946 int16_t *src = block;
967 MMI_LDC1(%[ftmp1], %[src], 0x00)
987 PTR_ADDIU "%[src], %[src], 0x10 \n\t"
998 [src]"+&r"(src), [dst]"+&r"(dst)
1003 src = block;
1010 MMI_LDC1(%[ftmp1], %[src], 0x00)
1011 MMI_LDC1(%[ftmp2], %[src], 0x20)
1012 MMI_LDC1(%[ftmp3], %[src], 0x40)
1013 MMI_LDC1(%[ftmp4], %[src], 0x60)
1019 MMI_LDC1(%[ftmp1], %[src], 0x10)
1020 MMI_LDC1(%[ftmp2], %[src], 0x30)
1021 MMI_LDC1(%[ftmp3], %[src], 0x50)
1022 MMI_LDC1(%[ftmp4], %[src], 0x70)
1117 [src]"r"(src), [dest]"r"(dest), [linesize]"r"(linesize)
1176 int16_t *src = block;
1195 MMI_LDC1(%[ftmp1], %[src], 0x00)
1215 PTR_ADDIU "%[src], %[src], 0x10 \n\t"
1226 [src]"+&r"(src), [dst]"+&r"(dst)
1231 src = block;
1240 MMI_LDC1(%[ftmp1], %[src], 0x00)
1241 MMI_LDC1(%[ftmp2], %[src], 0x10)
1242 MMI_LDC1(%[ftmp3], %[src], 0x20)
1243 MMI_LDC1(%[ftmp4], %[src], 0x30)
1372 [src]"r"(src), [dest]"r"(dest), [linesize]"r"(linesize)
1378 void ff_vc1_h_overlap_mmi(uint8_t *src, ptrdiff_t stride)
1385 a = src[-2];
1386 b = src[-1];
1387 c = src[0];
1388 d = src[1];
1392 src[-2] = a - d1;
1393 src[-1] = av_clip_uint8(b - d2);
1394 src[0] = av_clip_uint8(c + d2);
1395 src[1] = d + d1;
1396 src += stride;
1431 void ff_vc1_v_overlap_mmi(uint8_t *src, ptrdiff_t stride)
1438 a = src[-2 * stride];
1439 b = src[-stride];
1440 c = src[0];
1441 d = src[stride];
1445 src[-2 * stride] = a - d1;
1446 src[-stride] = av_clip_uint8(b - d2);
1447 src[0] = av_clip_uint8(c + d2);
1448 src[stride] = d + d1;
1449 src++;
1482 * @param src source block type
1488 static av_always_inline int vc1_filter_line(uint8_t *src, int stride, int pq)
1490 int a0 = (2 * (src[-2 * stride] - src[1 * stride]) -
1491 5 * (src[-1 * stride] - src[0 * stride]) + 4) >> 3;
1496 int a1 = FFABS((2 * (src[-4 * stride] - src[-1 * stride]) -
1497 5 * (src[-3 * stride] - src[-2 * stride]) + 4) >> 3);
1498 int a2 = FFABS((2 * (src[ 0 * stride] - src[ 3 * stride]) -
1499 5 * (src[ 1 * stride] - src[ 2 * stride]) + 4) >> 3);
1501 int clip = src[-1 * stride] - src[0 * stride];
1518 src[-1 * stride] = av_clip_uint8(src[-1 * stride] - d);
1519 src[ 0 * stride] = av_clip_uint8(src[ 0 * stride] + d);
1530 * @param src source block type
1537 static inline void vc1_loop_filter(uint8_t *src, int step, int stride,
1544 filt3 = vc1_filter_line(src + 2 * step, stride, pq);
1546 vc1_filter_line(src + 0 * step, stride, pq);
1547 vc1_filter_line(src + 1 * step, stride, pq);
1548 vc1_filter_line(src + 3 * step, stride, pq);
1550 src += step * 4;
1554 void ff_vc1_v_loop_filter4_mmi(uint8_t *src, ptrdiff_t stride, int pq)
1556 vc1_loop_filter(src, 1, stride, 4, pq);
1559 void ff_vc1_h_loop_filter4_mmi(uint8_t *src, ptrdiff_t stride, int pq)
1561 vc1_loop_filter(src, stride, 1, 4, pq);
1564 void ff_vc1_v_loop_filter8_mmi(uint8_t *src, ptrdiff_t stride, int pq)
1566 vc1_loop_filter(src, 1, stride, 8, pq);
1569 void ff_vc1_h_loop_filter8_mmi(uint8_t *src, ptrdiff_t stride, int pq)
1571 vc1_loop_filter(src, stride, 1, 8, pq);
1574 void ff_vc1_v_loop_filter16_mmi(uint8_t *src, ptrdiff_t stride, int pq)
1576 vc1_loop_filter(src, 1, stride, 16, pq);
1579 void ff_vc1_h_loop_filter16_mmi(uint8_t *src, ptrdiff_t stride, int pq)
1581 vc1_loop_filter(src, stride, 1, 16, pq);
1584 void ff_put_vc1_mspel_mc00_mmi(uint8_t *dst, const uint8_t *src,
1587 ff_put_pixels8_8_mmi(dst, src, stride, 8);
1589 void ff_put_vc1_mspel_mc00_16_mmi(uint8_t *dst, const uint8_t *src,
1592 ff_put_pixels16_8_mmi(dst, src, stride, 16);
1594 void ff_avg_vc1_mspel_mc00_mmi(uint8_t *dst, const uint8_t *src,
1597 ff_avg_pixels8_8_mmi(dst, src, stride, 8);
1599 void ff_avg_vc1_mspel_mc00_16_mmi(uint8_t *dst, const uint8_t *src,
1602 ff_avg_pixels16_8_mmi(dst, src, stride, 16);
1642 PTR_ADDU "$9, %[src], %[stride1] \n\t" \
1646 PTR_ADDU "$9, %[src], %[stride] \n\t" \
1654 PTR_ADDU "%[src], %[src], %[stride] \n\t"
1656 /** Sacrificing $f12 makes it possible to pipeline loads from src */
1658 const uint8_t *src, mips_reg stride,
1671 MMI_ULWC1($f4, %[src], 0x00)
1672 PTR_ADDU "%[src], %[src], %[stride] \n\t"
1673 MMI_ULWC1($f6, %[src], 0x00)
1684 PTR_SUBU "%[src], %[src], %[stride2] \n\t"
1689 [src]"+r"(src), [dst]"+r"(dst)
1704 const int16_t *src, int rnd) \
1710 src -= 1; \
1716 MMI_ULDC1($f2, %[src], 0x00) \
1717 MMI_ULDC1($f4, %[src], 0x08) \
1718 MMI_ULDC1($f6, %[src], 0x02) \
1719 MMI_ULDC1($f8, %[src], 0x0a) \
1720 MMI_ULDC1($f0, %[src], 0x06) \
1722 MMI_ULDC1($f0, %[src], 0x0e) \
1724 MMI_ULDC1($f0, %[src], 0x04) \
1726 MMI_ULDC1($f0, %[src], 0x0b) \
1740 PTR_ADDIU "%[src], %[src], 0x18 \n\t" \
1745 [src]"+r"(src), [dst]"+r"(dst) \
1761 static void OPNAME ## vc1_shift2_mmi(uint8_t *dst, const uint8_t *src, \
1775 MMI_ULWC1($f6, %[src], 0x00) \
1776 MMI_ULWC1($f8, %[src], 0x04) \
1777 PTR_ADDU "$9, %[src], %[offset] \n\t" \
1780 PTR_ADDU "%[src], %[src], %[offset] \n\t" \
1787 PTR_ADDU "$9, %[src], %[offset_x2n] \n\t" \
1796 PTR_ADDU "$9, %[src], %[offset] \n\t" \
1810 PTR_ADDU "%[src], %[src], %[stride1] \n\t" \
1814 [src]"+r"(src), [dst]"+r"(dst) \
1839 PTR_ADDU "$9, %[src], "#A1" \n\t" \
1846 PTR_ADDU "$9, %[src], "#A2" \n\t" \
1855 PTR_ADDU "$9, %[src], "#A4" \n\t" \
1866 PTR_ADDU "$9, %[src], "#A3" \n\t" \
1886 vc1_put_ver_16b_ ## NAME ## _mmi(int16_t *dst, const uint8_t *src, \
1896 src -= src_stride; \
1907 PTR_ADDU "$9, %[src], "#A1" \n\t" \
1913 PTR_ADDU "$9, %[src], "#A2" \n\t" \
1918 PTR_ADDU "$9, %[src], "#A3" \n\t" \
1923 PTR_ADDU "$9, %[src], "#A4" \n\t" \
1936 PTR_ADDU "%[src], %[src], %[stride_x1] \n\t" \
1941 [src]"+r"(src), [dst]"+r"(dst) \
1962 const int16_t *src, int rnd) \
1968 src -= 1; \
1985 PTR_ADDU "%[src], %[src], 0x18 \n\t" \
1990 [src]"+r"(src), [dst]"+r"(dst) \
2009 OPNAME ## vc1_## NAME ## _mmi(uint8_t *dst, const uint8_t *src, \
2016 src -= offset; \
2030 PTR_ADDU "%[src], %[src], %[stride] \n\t" \
2035 [src]"+r"(src), [dst]"+r"(dst) \
2062 (int16_t *dst, const uint8_t *src, mips_reg src_stride, int rnd,
2065 (uint8_t *dst, mips_reg dst_stride, const int16_t *src, int rnd);
2067 (uint8_t *dst, const uint8_t *src, mips_reg stride, int rnd,
2075 * @param src Source buffer.
2076 * @param stride Stride for both src and dst buffers.
2082 static void OP ## vc1_mspel_mc(uint8_t *dst, const uint8_t *src, int stride,\
2106 vc1_put_shift_ver_16bits[vmode](tmp, src-1, stride, r, shift); \
2112 vc1_put_shift_8bits[vmode](dst, src, stride, 1-rnd, stride); \
2118 vc1_put_shift_8bits[hmode](dst, src, stride, rnd, 1); \
2120 static void OP ## vc1_mspel_mc_16(uint8_t *dst, const uint8_t *src, \
2123 OP ## vc1_mspel_mc(dst + 0, src + 0, stride, hmode, vmode, rnd); \
2124 OP ## vc1_mspel_mc(dst + 8, src + 8, stride, hmode, vmode, rnd); \
2125 dst += 8*stride; src += 8*stride; \
2126 OP ## vc1_mspel_mc(dst + 0, src + 0, stride, hmode, vmode, rnd); \
2127 OP ## vc1_mspel_mc(dst + 8, src + 8, stride, hmode, vmode, rnd); \
2136 const uint8_t *src, \
2140 put_vc1_mspel_mc(dst, src, stride, a, b, rnd); \
2143 const uint8_t *src, \
2147 avg_vc1_mspel_mc(dst, src, stride, a, b, rnd); \
2150 const uint8_t *src, \
2154 put_vc1_mspel_mc_16(dst, src, stride, a, b, rnd); \
2157 const uint8_t *src, \
2161 avg_vc1_mspel_mc_16(dst, src, stride, a, b, rnd); \
2238 uint8_t *src /* align 1 */,
2263 MMI_ULDC1(%[ftmp1], %[src], 0x00)
2264 MMI_ULDC1(%[ftmp2], %[src], 0x01)
2265 PTR_ADDU "%[src], %[src], %[stride] \n\t"
2266 MMI_ULDC1(%[ftmp3], %[src], 0x00)
2267 MMI_ULDC1(%[ftmp4], %[src], 0x01)
2283 [src]"+&r"(src), [dst]"+&r"(dst),
2294 uint8_t *src /* align 1 */,
2319 MMI_ULWC1(%[ftmp1], %[src], 0x00)
2320 MMI_ULWC1(%[ftmp2], %[src], 0x01)
2321 PTR_ADDU "%[src], %[src], %[stride] \n\t"
2322 MMI_ULWC1(%[ftmp3], %[src], 0x00)
2323 MMI_ULWC1(%[ftmp4], %[src], 0x01)
2337 [src]"+&r"(src), [dst]"+&r"(dst),
2348 uint8_t *src /* align 1 */,
2373 MMI_ULDC1(%[ftmp1], %[src], 0x00)
2374 MMI_ULDC1(%[ftmp2], %[src], 0x01)
2375 PTR_ADDU "%[src], %[src], %[stride] \n\t"
2376 MMI_ULDC1(%[ftmp3], %[src], 0x00)
2377 MMI_ULDC1(%[ftmp4], %[src], 0x01)
2396 [src]"+&r"(src), [dst]"+&r"(dst),
2407 uint8_t *src /* align 1 */,
2432 MMI_ULWC1(%[ftmp1], %[src], 0x00)
2433 MMI_ULWC1(%[ftmp2], %[src], 0x01)
2434 PTR_ADDU "%[src], %[src], %[stride] \n\t"
2435 MMI_ULWC1(%[ftmp3], %[src], 0x00)
2436 MMI_ULWC1(%[ftmp4], %[src], 0x01)
2453 [src]"+&r"(src), [dst]"+&r"(dst),