1;*****************************************************************************
2;* SIMD-optimized MPEG encoding functions
3;*****************************************************************************
4;* Copyright (c) 2000, 2001 Fabrice Bellard
5;* Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6;*
7;* This file is part of FFmpeg.
8;*
9;* FFmpeg is free software; you can redistribute it and/or
10;* modify it under the terms of the GNU Lesser General Public
11;* License as published by the Free Software Foundation; either
12;* version 2.1 of the License, or (at your option) any later version.
13;*
14;* FFmpeg is distributed in the hope that it will be useful,
15;* but WITHOUT ANY WARRANTY; without even the implied warranty of
16;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17;* Lesser General Public License for more details.
18;*
19;* You should have received a copy of the GNU Lesser General Public
20;* License along with FFmpeg; if not, write to the Free Software
21;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22;*****************************************************************************
23
24%include "libavutil/x86/x86util.asm"
25
26SECTION .text
27; int ff_pix_sum16(uint8_t *pix, int line_size)
28; %1 = number of loops
29; %2 = number of GPRs used
30%macro PIX_SUM16 3
31cglobal pix_sum16, 2, %2, 6
32    movsxdifnidn r1, r1d
33    mov          r2, %1
34    lea          r3, [r1*3]
35%if notcpuflag(xop)
36    pxor         m5, m5
37%endif
38    pxor         m4, m4
39.loop:
40%if cpuflag(xop)
41    vphaddubq    m0, [r0]
42    vphaddubq    m1, [r0+r1]
43    vphaddubq    m2, [r0+r1*2]
44    vphaddubq    m3, [r0+r3]
45%else
46    mova         m0, [r0]
47    mova         m1, [r0+r1]
48    mova         m2, [r0+r1*2]
49    mova         m3, [r0+r3]
50    psadbw       m0, m5
51    psadbw       m1, m5
52    psadbw       m2, m5
53    psadbw       m3, m5
54%endif ; cpuflag(xop)
55    paddw        m1, m0
56    paddw        m3, m2
57    paddw        m3, m1
58    paddw        m4, m3
59    lea          r0, [r0+r1*%3]
60    dec r2
61    jne .loop
62    pshufd       m0, m4, q0032
63    paddd        m4, m0
64    movd        eax, m4
65    RET
66%endmacro
67
68INIT_XMM sse2
69PIX_SUM16  4, 4, 4
70%if HAVE_XOP_EXTERNAL
71INIT_XMM xop
72PIX_SUM16  4, 4, 4
73%endif
74
75; int ff_pix_norm1(uint8_t *pix, int line_size)
76; %1 = number of xmm registers used
77; %2 = number of loops
78%macro PIX_NORM1 2
79cglobal pix_norm1, 2, 3, %1
80    movsxdifnidn r1, r1d
81    mov          r2, %2
82    pxor         m0, m0
83    pxor         m5, m5
84.loop:
85    mova         m2, [r0+0]
86    mova         m3, [r0+r1]
87    punpckhbw    m1, m2, m0
88    punpcklbw    m2, m0
89    punpckhbw    m4, m3, m0
90    punpcklbw    m3, m0
91    pmaddwd      m1, m1
92    pmaddwd      m2, m2
93    pmaddwd      m3, m3
94    pmaddwd      m4, m4
95    paddd        m2, m1
96    paddd        m4, m3
97    paddd        m5, m2
98    paddd        m5, m4
99    lea          r0, [r0+r1*2]
100    dec r2
101    jne .loop
102    HADDD        m5, m1
103    movd        eax, m5
104    RET
105%endmacro
106
107INIT_XMM sse2
108PIX_NORM1 6, 8
109
110