1;************************************************************************ 2;* SIMD-optimized lossless video encoding functions 3;* Copyright (c) 2000, 2001 Fabrice Bellard 4;* Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at> 5;* 6;* MMX optimization by Nick Kurshev <nickols_k@mail.ru> 7;* Conversion to NASM format by Tiancheng "Timothy" Gu <timothygu99@gmail.com> 8;* 9;* This file is part of FFmpeg. 10;* 11;* FFmpeg is free software; you can redistribute it and/or 12;* modify it under the terms of the GNU Lesser General Public 13;* License as published by the Free Software Foundation; either 14;* version 2.1 of the License, or (at your option) any later version. 15;* 16;* FFmpeg is distributed in the hope that it will be useful, 17;* but WITHOUT ANY WARRANTY; without even the implied warranty of 18;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 19;* Lesser General Public License for more details. 20;* 21;* You should have received a copy of the GNU Lesser General Public 22;* License along with FFmpeg; if not, write to the Free Software 23;* 51, Inc., Foundation Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 24;****************************************************************************** 25 26%include "libavutil/x86/x86util.asm" 27 28cextern pb_80 29 30SECTION .text 31 32; void ff_diff_bytes(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, 33; intptr_t w); 34%macro DIFF_BYTES_PROLOGUE 0 35%if ARCH_X86_32 36cglobal diff_bytes, 3,5,2, dst, src1, src2 37%define wq r4q 38 DECLARE_REG_TMP 3 39 mov wq, r3mp 40%else 41cglobal diff_bytes, 4,5,2, dst, src1, src2, w 42 DECLARE_REG_TMP 4 43%endif ; ARCH_X86_32 44%define i t0q 45%endmacro 46 47; labels to jump to if w < regsize and w < 0 48%macro DIFF_BYTES_LOOP_PREP 2 49 mov i, wq 50 and i, -2 * regsize 51 js %2 52 jz %1 53 add dstq, i 54 add src1q, i 55 add src2q, i 56 neg i 57%endmacro 58 59; mov type used for src1q, dstq, first reg, second reg 60%macro DIFF_BYTES_LOOP_CORE 4 61%if mmsize != 16 62 mov%1 %3, [src1q + i] 63 mov%1 %4, [src1q + i + regsize] 64 psubb %3, [src2q + i] 65 psubb %4, [src2q + i + regsize] 66 mov%2 [dstq + i], %3 67 mov%2 [regsize + dstq + i], %4 68%else 69 ; SSE enforces alignment of psubb operand 70 mov%1 %3, [src1q + i] 71 movu %4, [src2q + i] 72 psubb %3, %4 73 mov%2 [dstq + i], %3 74 mov%1 %3, [src1q + i + regsize] 75 movu %4, [src2q + i + regsize] 76 psubb %3, %4 77 mov%2 [regsize + dstq + i], %3 78%endif 79%endmacro 80 81%macro DIFF_BYTES_BODY 2 ; mov type used for src1q, for dstq 82 %define regsize mmsize 83.loop_%1%2: 84 DIFF_BYTES_LOOP_CORE %1, %2, m0, m1 85 add i, 2 * regsize 86 jl .loop_%1%2 87.skip_main_%1%2: 88 and wq, 2 * regsize - 1 89 jz .end_%1%2 90%if mmsize > 16 91 ; fall back to narrower xmm 92 %define regsize (mmsize / 2) 93 DIFF_BYTES_LOOP_PREP .setup_loop_gpr_aa, .end_aa 94.loop2_%1%2: 95 DIFF_BYTES_LOOP_CORE %1, %2, xm0, xm1 96 add i, 2 * regsize 97 jl .loop2_%1%2 98.setup_loop_gpr_%1%2: 99 and wq, 2 * regsize - 1 100 jz .end_%1%2 101%endif 102 add dstq, wq 103 add src1q, wq 104 add src2q, wq 105 neg wq 106.loop_gpr_%1%2: 107 mov t0b, [src1q + wq] 108 sub t0b, [src2q + wq] 109 mov [dstq + wq], t0b 110 inc wq 111 jl .loop_gpr_%1%2 112.end_%1%2: 113 REP_RET 114%endmacro 115 116INIT_XMM sse2 117DIFF_BYTES_PROLOGUE 118 %define regsize mmsize 119 DIFF_BYTES_LOOP_PREP .skip_main_aa, .end_aa 120 test dstq, regsize - 1 121 jnz .loop_uu 122 test src1q, regsize - 1 123 jnz .loop_ua 124 DIFF_BYTES_BODY a, a 125 DIFF_BYTES_BODY u, a 126 DIFF_BYTES_BODY u, u 127%undef i 128 129%if HAVE_AVX2_EXTERNAL 130INIT_YMM avx2 131DIFF_BYTES_PROLOGUE 132 %define regsize mmsize 133 ; Directly using unaligned SSE2 version is marginally faster than 134 ; branching based on arguments. 135 DIFF_BYTES_LOOP_PREP .skip_main_uu, .end_uu 136 test dstq, regsize - 1 137 jnz .loop_uu 138 test src1q, regsize - 1 139 jnz .loop_ua 140 DIFF_BYTES_BODY a, a 141 DIFF_BYTES_BODY u, a 142 DIFF_BYTES_BODY u, u 143%undef i 144%endif 145 146 147;-------------------------------------------------------------------------------------------------- 148;void sub_left_predict(uint8_t *dst, uint8_t *src, ptrdiff_t stride, ptrdiff_t width, int height) 149;-------------------------------------------------------------------------------------------------- 150 151INIT_XMM avx 152cglobal sub_left_predict, 5,6,5, dst, src, stride, width, height, x 153 mova m1, [pb_80] ; prev initial 154 add dstq, widthq 155 add srcq, widthq 156 lea xd, [widthq-1] 157 neg widthq 158 and xd, 15 159 pinsrb m4, m1, xd, 15 160 mov xq, widthq 161 162 .loop: 163 movu m0, [srcq + widthq] 164 palignr m2, m0, m1, 15 165 movu m1, [srcq + widthq + 16] 166 palignr m3, m1, m0, 15 167 psubb m2, m0, m2 168 psubb m3, m1, m3 169 movu [dstq + widthq], m2 170 movu [dstq + widthq + 16], m3 171 add widthq, 2 * 16 172 jl .loop 173 174 add srcq, strideq 175 sub dstq, xq ; dst + width 176 test xd, 16 177 jz .mod32 178 mova m1, m0 179 180.mod32: 181 pshufb m1, m4 182 mov widthq, xq 183 dec heightd 184 jg .loop 185 RET 186