1;******************************************************************************
2;* x86-optimized input routines; does shuffling of packed
3;* YUV formats into individual planes, and converts RGB
4;* into YUV planes also.
5;* Copyright (c) 2012 Ronald S. Bultje <rsbultje@gmail.com>
6;*
7;* This file is part of FFmpeg.
8;*
9;* FFmpeg is free software; you can redistribute it and/or
10;* modify it under the terms of the GNU Lesser General Public
11;* License as published by the Free Software Foundation; either
12;* version 2.1 of the License, or (at your option) any later version.
13;*
14;* FFmpeg is distributed in the hope that it will be useful,
15;* but WITHOUT ANY WARRANTY; without even the implied warranty of
16;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17;* Lesser General Public License for more details.
18;*
19;* You should have received a copy of the GNU Lesser General Public
20;* License along with FFmpeg; if not, write to the Free Software
21;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22;******************************************************************************
23
24%include "libavutil/x86/x86util.asm"
25
26SECTION_RODATA
27
28%define RY 0x20DE
29%define GY 0x4087
30%define BY 0x0C88
31%define RU 0xECFF
32%define GU 0xDAC8
33%define BU 0x3838
34%define RV 0x3838
35%define GV 0xD0E3
36%define BV 0xF6E4
37
38rgb_Yrnd:        times 4 dd 0x80100        ;  16.5 << 15
39rgb_UVrnd:       times 4 dd 0x400100       ; 128.5 << 15
40%define bgr_Ycoeff_12x4 16*4 + 16* 0 + tableq
41%define bgr_Ycoeff_3x56 16*4 + 16* 1 + tableq
42%define rgb_Ycoeff_12x4 16*4 + 16* 2 + tableq
43%define rgb_Ycoeff_3x56 16*4 + 16* 3 + tableq
44%define bgr_Ucoeff_12x4 16*4 + 16* 4 + tableq
45%define bgr_Ucoeff_3x56 16*4 + 16* 5 + tableq
46%define rgb_Ucoeff_12x4 16*4 + 16* 6 + tableq
47%define rgb_Ucoeff_3x56 16*4 + 16* 7 + tableq
48%define bgr_Vcoeff_12x4 16*4 + 16* 8 + tableq
49%define bgr_Vcoeff_3x56 16*4 + 16* 9 + tableq
50%define rgb_Vcoeff_12x4 16*4 + 16*10 + tableq
51%define rgb_Vcoeff_3x56 16*4 + 16*11 + tableq
52
53%define rgba_Ycoeff_rb 16*4 + 16*12 + tableq
54%define rgba_Ycoeff_br 16*4 + 16*13 + tableq
55%define rgba_Ycoeff_ga 16*4 + 16*14 + tableq
56%define rgba_Ycoeff_ag 16*4 + 16*15 + tableq
57%define rgba_Ucoeff_rb 16*4 + 16*16 + tableq
58%define rgba_Ucoeff_br 16*4 + 16*17 + tableq
59%define rgba_Ucoeff_ga 16*4 + 16*18 + tableq
60%define rgba_Ucoeff_ag 16*4 + 16*19 + tableq
61%define rgba_Vcoeff_rb 16*4 + 16*20 + tableq
62%define rgba_Vcoeff_br 16*4 + 16*21 + tableq
63%define rgba_Vcoeff_ga 16*4 + 16*22 + tableq
64%define rgba_Vcoeff_ag 16*4 + 16*23 + tableq
65
66; bgr_Ycoeff_12x4: times 2 dw BY, GY, 0, BY
67; bgr_Ycoeff_3x56: times 2 dw RY, 0, GY, RY
68; rgb_Ycoeff_12x4: times 2 dw RY, GY, 0, RY
69; rgb_Ycoeff_3x56: times 2 dw BY, 0, GY, BY
70; bgr_Ucoeff_12x4: times 2 dw BU, GU, 0, BU
71; bgr_Ucoeff_3x56: times 2 dw RU, 0, GU, RU
72; rgb_Ucoeff_12x4: times 2 dw RU, GU, 0, RU
73; rgb_Ucoeff_3x56: times 2 dw BU, 0, GU, BU
74; bgr_Vcoeff_12x4: times 2 dw BV, GV, 0, BV
75; bgr_Vcoeff_3x56: times 2 dw RV, 0, GV, RV
76; rgb_Vcoeff_12x4: times 2 dw RV, GV, 0, RV
77; rgb_Vcoeff_3x56: times 2 dw BV, 0, GV, BV
78
79; rgba_Ycoeff_rb:  times 4 dw RY, BY
80; rgba_Ycoeff_br:  times 4 dw BY, RY
81; rgba_Ycoeff_ga:  times 4 dw GY, 0
82; rgba_Ycoeff_ag:  times 4 dw 0,  GY
83; rgba_Ucoeff_rb:  times 4 dw RU, BU
84; rgba_Ucoeff_br:  times 4 dw BU, RU
85; rgba_Ucoeff_ga:  times 4 dw GU, 0
86; rgba_Ucoeff_ag:  times 4 dw 0,  GU
87; rgba_Vcoeff_rb:  times 4 dw RV, BV
88; rgba_Vcoeff_br:  times 4 dw BV, RV
89; rgba_Vcoeff_ga:  times 4 dw GV, 0
90; rgba_Vcoeff_ag:  times 4 dw 0,  GV
91
92shuf_rgb_12x4:   db 0, 0x80, 1, 0x80,  2, 0x80,  3, 0x80, \
93                    6, 0x80, 7, 0x80,  8, 0x80,  9, 0x80
94shuf_rgb_3x56:   db 2, 0x80, 3, 0x80,  4, 0x80,  5, 0x80, \
95                    8, 0x80, 9, 0x80, 10, 0x80, 11, 0x80
96pd_65535f:     times 8 dd 65535.0
97pb_pack_shuffle16le:    db  0,  1,  4,  5, \
98                            8,  9, 12, 13, \
99                           -1, -1, -1, -1, \
100                           -1, -1, -1, -1, \
101                           -1, -1, -1, -1, \
102                           -1, -1, -1, -1, \
103                            0,  1,  4,  5, \
104                            8,  9, 12, 13
105pb_shuffle32be:         db  3,  2,  1,  0, \
106                            7,  6,  5,  4, \
107                           11, 10,  9,  8, \
108                           15, 14, 13, 12, \
109                            3,  2,  1,  0, \
110                            7,  6,  5,  4, \
111                           11, 10,  9,  8, \
112                           15, 14, 13, 12
113pb_shuffle16be:         db  1,  0,  3,  2, \
114                            5,  4,  7,  6, \
115                            9,  8, 11, 10, \
116                           13, 12, 15, 14, \
117                            1,  0,  3,  2, \
118                            5,  4,  7,  6, \
119                            9,  8, 11, 10, \
120                           13, 12, 15, 14
121SECTION .text
122
123;-----------------------------------------------------------------------------
124; RGB to Y/UV.
125;
126; void <fmt>ToY_<opt>(uint8_t *dst, const uint8_t *src, int w);
127; and
128; void <fmt>toUV_<opt>(uint8_t *dstU, uint8_t *dstV, const uint8_t *src,
129;                      const uint8_t *unused, int w);
130;-----------------------------------------------------------------------------
131
132; %1 = nr. of XMM registers
133; %2 = rgb or bgr
134%macro RGB24_TO_Y_FN 2-3
135cglobal %2 %+ 24ToY, 6, 6, %1, dst, src, u1, u2, w, table
136%if ARCH_X86_64
137    mova           m8, [%2_Ycoeff_12x4]
138    mova           m9, [%2_Ycoeff_3x56]
139%define coeff1 m8
140%define coeff2 m9
141%else ; x86-32
142%define coeff1 [%2_Ycoeff_12x4]
143%define coeff2 [%2_Ycoeff_3x56]
144%endif ; x86-32/64
145%if ARCH_X86_64 && %0 == 3
146    jmp mangle(private_prefix %+ _ %+ %3 %+ 24ToY %+ SUFFIX).body
147%else ; ARCH_X86_64 && %0 == 3
148.body:
149%if cpuflag(ssse3)
150    mova           m7, [shuf_rgb_12x4]
151%define shuf_rgb1 m7
152%if ARCH_X86_64
153    mova          m10, [shuf_rgb_3x56]
154%define shuf_rgb2 m10
155%else ; x86-32
156%define shuf_rgb2 [shuf_rgb_3x56]
157%endif ; x86-32/64
158%endif ; cpuflag(ssse3)
159%if ARCH_X86_64
160    movsxd         wq, wd
161%endif
162    add            wq, wq
163    add          dstq, wq
164    neg            wq
165%if notcpuflag(ssse3)
166    pxor           m7, m7
167%endif ; !cpuflag(ssse3)
168    mova           m4, [rgb_Yrnd]
169.loop:
170%if cpuflag(ssse3)
171    movu           m0, [srcq+0]           ; (byte) { Bx, Gx, Rx }[0-3]
172    movu           m2, [srcq+12]          ; (byte) { Bx, Gx, Rx }[4-7]
173    pshufb         m1, m0, shuf_rgb2      ; (word) { R0, B1, G1, R1, R2, B3, G3, R3 }
174    pshufb         m0, shuf_rgb1          ; (word) { B0, G0, R0, B1, B2, G2, R2, B3 }
175    pshufb         m3, m2, shuf_rgb2      ; (word) { R4, B5, G5, R5, R6, B7, G7, R7 }
176    pshufb         m2, shuf_rgb1          ; (word) { B4, G4, R4, B5, B6, G6, R6, B7 }
177%else ; !cpuflag(ssse3)
178    movd           m0, [srcq+0]           ; (byte) { B0, G0, R0, B1 }
179    movd           m1, [srcq+2]           ; (byte) { R0, B1, G1, R1 }
180    movd           m2, [srcq+6]           ; (byte) { B2, G2, R2, B3 }
181    movd           m3, [srcq+8]           ; (byte) { R2, B3, G3, R3 }
182    punpckldq      m0, m2                 ; (byte) { B0, G0, R0, B1, B2, G2, R2, B3 }
183    punpckldq      m1, m3                 ; (byte) { R0, B1, G1, R1, R2, B3, G3, R3 }
184    movd           m2, [srcq+12]          ; (byte) { B4, G4, R4, B5 }
185    movd           m3, [srcq+14]          ; (byte) { R4, B5, G5, R5 }
186    movd           m5, [srcq+18]          ; (byte) { B6, G6, R6, B7 }
187    movd           m6, [srcq+20]          ; (byte) { R6, B7, G7, R7 }
188    punpckldq      m2, m5                 ; (byte) { B4, G4, R4, B5, B6, G6, R6, B7 }
189    punpckldq      m3, m6                 ; (byte) { R4, B5, G5, R5, R6, B7, G7, R7 }
190    punpcklbw      m0, m7                 ; (word) { B0, G0, R0, B1, B2, G2, R2, B3 }
191    punpcklbw      m1, m7                 ; (word) { R0, B1, G1, R1, R2, B3, G3, R3 }
192    punpcklbw      m2, m7                 ; (word) { B4, G4, R4, B5, B6, G6, R6, B7 }
193    punpcklbw      m3, m7                 ; (word) { R4, B5, G5, R5, R6, B7, G7, R7 }
194%endif ; cpuflag(ssse3)
195    add          srcq, 3 * mmsize / 2
196    pmaddwd        m0, coeff1             ; (dword) { B0*BY + G0*GY, B1*BY, B2*BY + G2*GY, B3*BY }
197    pmaddwd        m1, coeff2             ; (dword) { R0*RY, G1+GY + R1*RY, R2*RY, G3+GY + R3*RY }
198    pmaddwd        m2, coeff1             ; (dword) { B4*BY + G4*GY, B5*BY, B6*BY + G6*GY, B7*BY }
199    pmaddwd        m3, coeff2             ; (dword) { R4*RY, G5+GY + R5*RY, R6*RY, G7+GY + R7*RY }
200    paddd          m0, m1                 ; (dword) { Bx*BY + Gx*GY + Rx*RY }[0-3]
201    paddd          m2, m3                 ; (dword) { Bx*BY + Gx*GY + Rx*RY }[4-7]
202    paddd          m0, m4                 ; += rgb_Yrnd, i.e. (dword) { Y[0-3] }
203    paddd          m2, m4                 ; += rgb_Yrnd, i.e. (dword) { Y[4-7] }
204    psrad          m0, 9
205    psrad          m2, 9
206    packssdw       m0, m2                 ; (word) { Y[0-7] }
207    mova    [dstq+wq], m0
208    add            wq, mmsize
209    jl .loop
210    REP_RET
211%endif ; ARCH_X86_64 && %0 == 3
212%endmacro
213
214; %1 = nr. of XMM registers
215; %2 = rgb or bgr
216%macro RGB24_TO_UV_FN 2-3
217cglobal %2 %+ 24ToUV, 7, 7, %1, dstU, dstV, u1, src, u2, w, table
218%if ARCH_X86_64
219    mova           m8, [%2_Ucoeff_12x4]
220    mova           m9, [%2_Ucoeff_3x56]
221    mova          m10, [%2_Vcoeff_12x4]
222    mova          m11, [%2_Vcoeff_3x56]
223%define coeffU1 m8
224%define coeffU2 m9
225%define coeffV1 m10
226%define coeffV2 m11
227%else ; x86-32
228%define coeffU1 [%2_Ucoeff_12x4]
229%define coeffU2 [%2_Ucoeff_3x56]
230%define coeffV1 [%2_Vcoeff_12x4]
231%define coeffV2 [%2_Vcoeff_3x56]
232%endif ; x86-32/64
233%if ARCH_X86_64 && %0 == 3
234    jmp mangle(private_prefix %+ _ %+ %3 %+ 24ToUV %+ SUFFIX).body
235%else ; ARCH_X86_64 && %0 == 3
236.body:
237%if cpuflag(ssse3)
238    mova           m7, [shuf_rgb_12x4]
239%define shuf_rgb1 m7
240%if ARCH_X86_64
241    mova          m12, [shuf_rgb_3x56]
242%define shuf_rgb2 m12
243%else ; x86-32
244%define shuf_rgb2 [shuf_rgb_3x56]
245%endif ; x86-32/64
246%endif ; cpuflag(ssse3)
247%if ARCH_X86_64
248    movsxd         wq, dword r5m
249%else ; x86-32
250    mov            wq, r5m
251%endif
252    add            wq, wq
253    add         dstUq, wq
254    add         dstVq, wq
255    neg            wq
256    mova           m6, [rgb_UVrnd]
257%if notcpuflag(ssse3)
258    pxor           m7, m7
259%endif
260.loop:
261%if cpuflag(ssse3)
262    movu           m0, [srcq+0]           ; (byte) { Bx, Gx, Rx }[0-3]
263    movu           m4, [srcq+12]          ; (byte) { Bx, Gx, Rx }[4-7]
264    pshufb         m1, m0, shuf_rgb2      ; (word) { R0, B1, G1, R1, R2, B3, G3, R3 }
265    pshufb         m0, shuf_rgb1          ; (word) { B0, G0, R0, B1, B2, G2, R2, B3 }
266%else ; !cpuflag(ssse3)
267    movd           m0, [srcq+0]           ; (byte) { B0, G0, R0, B1 }
268    movd           m1, [srcq+2]           ; (byte) { R0, B1, G1, R1 }
269    movd           m4, [srcq+6]           ; (byte) { B2, G2, R2, B3 }
270    movd           m5, [srcq+8]           ; (byte) { R2, B3, G3, R3 }
271    punpckldq      m0, m4                 ; (byte) { B0, G0, R0, B1, B2, G2, R2, B3 }
272    punpckldq      m1, m5                 ; (byte) { R0, B1, G1, R1, R2, B3, G3, R3 }
273    movd           m4, [srcq+12]          ; (byte) { B4, G4, R4, B5 }
274    movd           m5, [srcq+14]          ; (byte) { R4, B5, G5, R5 }
275    punpcklbw      m0, m7                 ; (word) { B0, G0, R0, B1, B2, G2, R2, B3 }
276    punpcklbw      m1, m7                 ; (word) { R0, B1, G1, R1, R2, B3, G3, R3 }
277%endif ; cpuflag(ssse3)
278    pmaddwd        m2, m0, coeffV1        ; (dword) { B0*BV + G0*GV, B1*BV, B2*BV + G2*GV, B3*BV }
279    pmaddwd        m3, m1, coeffV2        ; (dword) { R0*BV, G1*GV + R1*BV, R2*BV, G3*GV + R3*BV }
280    pmaddwd        m0, coeffU1            ; (dword) { B0*BU + G0*GU, B1*BU, B2*BU + G2*GU, B3*BU }
281    pmaddwd        m1, coeffU2            ; (dword) { R0*BU, G1*GU + R1*BU, R2*BU, G3*GU + R3*BU }
282    paddd          m0, m1                 ; (dword) { Bx*BU + Gx*GU + Rx*RU }[0-3]
283    paddd          m2, m3                 ; (dword) { Bx*BV + Gx*GV + Rx*RV }[0-3]
284%if cpuflag(ssse3)
285    pshufb         m5, m4, shuf_rgb2      ; (word) { R4, B5, G5, R5, R6, B7, G7, R7 }
286    pshufb         m4, shuf_rgb1          ; (word) { B4, G4, R4, B5, B6, G6, R6, B7 }
287%else ; !cpuflag(ssse3)
288    movd           m1, [srcq+18]          ; (byte) { B6, G6, R6, B7 }
289    movd           m3, [srcq+20]          ; (byte) { R6, B7, G7, R7 }
290    punpckldq      m4, m1                 ; (byte) { B4, G4, R4, B5, B6, G6, R6, B7 }
291    punpckldq      m5, m3                 ; (byte) { R4, B5, G5, R5, R6, B7, G7, R7 }
292    punpcklbw      m4, m7                 ; (word) { B4, G4, R4, B5, B6, G6, R6, B7 }
293    punpcklbw      m5, m7                 ; (word) { R4, B5, G5, R5, R6, B7, G7, R7 }
294%endif ; cpuflag(ssse3)
295    add          srcq, 3 * mmsize / 2
296    pmaddwd        m1, m4, coeffU1        ; (dword) { B4*BU + G4*GU, B5*BU, B6*BU + G6*GU, B7*BU }
297    pmaddwd        m3, m5, coeffU2        ; (dword) { R4*BU, G5*GU + R5*BU, R6*BU, G7*GU + R7*BU }
298    pmaddwd        m4, coeffV1            ; (dword) { B4*BV + G4*GV, B5*BV, B6*BV + G6*GV, B7*BV }
299    pmaddwd        m5, coeffV2            ; (dword) { R4*BV, G5*GV + R5*BV, R6*BV, G7*GV + R7*BV }
300    paddd          m1, m3                 ; (dword) { Bx*BU + Gx*GU + Rx*RU }[4-7]
301    paddd          m4, m5                 ; (dword) { Bx*BV + Gx*GV + Rx*RV }[4-7]
302    paddd          m0, m6                 ; += rgb_UVrnd, i.e. (dword) { U[0-3] }
303    paddd          m2, m6                 ; += rgb_UVrnd, i.e. (dword) { V[0-3] }
304    paddd          m1, m6                 ; += rgb_UVrnd, i.e. (dword) { U[4-7] }
305    paddd          m4, m6                 ; += rgb_UVrnd, i.e. (dword) { V[4-7] }
306    psrad          m0, 9
307    psrad          m2, 9
308    psrad          m1, 9
309    psrad          m4, 9
310    packssdw       m0, m1                 ; (word) { U[0-7] }
311    packssdw       m2, m4                 ; (word) { V[0-7] }
312    mova   [dstUq+wq], m0
313    mova   [dstVq+wq], m2
314    add            wq, mmsize
315    jl .loop
316    REP_RET
317%endif ; ARCH_X86_64 && %0 == 3
318%endmacro
319
320; %1 = nr. of XMM registers for rgb-to-Y func
321; %2 = nr. of XMM registers for rgb-to-UV func
322%macro RGB24_FUNCS 2
323RGB24_TO_Y_FN %1, rgb
324RGB24_TO_Y_FN %1, bgr, rgb
325RGB24_TO_UV_FN %2, rgb
326RGB24_TO_UV_FN %2, bgr, rgb
327%endmacro
328
329INIT_XMM sse2
330RGB24_FUNCS 10, 12
331
332INIT_XMM ssse3
333RGB24_FUNCS 11, 13
334
335%if HAVE_AVX_EXTERNAL
336INIT_XMM avx
337RGB24_FUNCS 11, 13
338%endif
339
340; %1 = nr. of XMM registers
341; %2-5 = rgba, bgra, argb or abgr (in individual characters)
342%macro RGB32_TO_Y_FN 5-6
343cglobal %2%3%4%5 %+ ToY, 6, 6, %1, dst, src, u1, u2, w, table
344    mova           m5, [rgba_Ycoeff_%2%4]
345    mova           m6, [rgba_Ycoeff_%3%5]
346%if %0 == 6
347    jmp mangle(private_prefix %+ _ %+ %6 %+ ToY %+ SUFFIX).body
348%else ; %0 == 6
349.body:
350%if ARCH_X86_64
351    movsxd         wq, wd
352%endif
353    add            wq, wq
354    sub            wq, mmsize - 1
355    lea          srcq, [srcq+wq*2]
356    add          dstq, wq
357    neg            wq
358    mova           m4, [rgb_Yrnd]
359    pcmpeqb        m7, m7
360    psrlw          m7, 8                  ; (word) { 0x00ff } x4
361.loop:
362    ; FIXME check alignment and use mova
363    movu           m0, [srcq+wq*2+0]      ; (byte) { Bx, Gx, Rx, xx }[0-3]
364    movu           m2, [srcq+wq*2+mmsize] ; (byte) { Bx, Gx, Rx, xx }[4-7]
365    DEINTB          1,  0,  3,  2,  7     ; (word) { Gx, xx (m0/m2) or Bx, Rx (m1/m3) }[0-3]/[4-7]
366    pmaddwd        m1, m5                 ; (dword) { Bx*BY + Rx*RY }[0-3]
367    pmaddwd        m0, m6                 ; (dword) { Gx*GY }[0-3]
368    pmaddwd        m3, m5                 ; (dword) { Bx*BY + Rx*RY }[4-7]
369    pmaddwd        m2, m6                 ; (dword) { Gx*GY }[4-7]
370    paddd          m0, m4                 ; += rgb_Yrnd
371    paddd          m2, m4                 ; += rgb_Yrnd
372    paddd          m0, m1                 ; (dword) { Y[0-3] }
373    paddd          m2, m3                 ; (dword) { Y[4-7] }
374    psrad          m0, 9
375    psrad          m2, 9
376    packssdw       m0, m2                 ; (word) { Y[0-7] }
377    mova    [dstq+wq], m0
378    add            wq, mmsize
379    jl .loop
380    sub            wq, mmsize - 1
381    jz .end
382    add            srcq, 2*mmsize - 2
383    add            dstq, mmsize - 1
384.loop2:
385    movd           m0, [srcq+wq*2+0]      ; (byte) { Bx, Gx, Rx, xx }[0-3]
386    DEINTB          1,  0,  3,  2,  7     ; (word) { Gx, xx (m0/m2) or Bx, Rx (m1/m3) }[0-3]/[4-7]
387    pmaddwd        m1, m5                 ; (dword) { Bx*BY + Rx*RY }[0-3]
388    pmaddwd        m0, m6                 ; (dword) { Gx*GY }[0-3]
389    paddd          m0, m4                 ; += rgb_Yrnd
390    paddd          m0, m1                 ; (dword) { Y[0-3] }
391    psrad          m0, 9
392    packssdw       m0, m0                 ; (word) { Y[0-7] }
393    movd    [dstq+wq], m0
394    add            wq, 2
395    jl .loop2
396.end:
397    REP_RET
398%endif ; %0 == 3
399%endmacro
400
401; %1 = nr. of XMM registers
402; %2-5 = rgba, bgra, argb or abgr (in individual characters)
403%macro RGB32_TO_UV_FN 5-6
404cglobal %2%3%4%5 %+ ToUV, 7, 7, %1, dstU, dstV, u1, src, u2, w, table
405%if ARCH_X86_64
406    mova           m8, [rgba_Ucoeff_%2%4]
407    mova           m9, [rgba_Ucoeff_%3%5]
408    mova          m10, [rgba_Vcoeff_%2%4]
409    mova          m11, [rgba_Vcoeff_%3%5]
410%define coeffU1 m8
411%define coeffU2 m9
412%define coeffV1 m10
413%define coeffV2 m11
414%else ; x86-32
415%define coeffU1 [rgba_Ucoeff_%2%4]
416%define coeffU2 [rgba_Ucoeff_%3%5]
417%define coeffV1 [rgba_Vcoeff_%2%4]
418%define coeffV2 [rgba_Vcoeff_%3%5]
419%endif ; x86-64/32
420%if ARCH_X86_64 && %0 == 6
421    jmp mangle(private_prefix %+ _ %+ %6 %+ ToUV %+ SUFFIX).body
422%else ; ARCH_X86_64 && %0 == 6
423.body:
424%if ARCH_X86_64
425    movsxd         wq, dword r5m
426%else ; x86-32
427    mov            wq, r5m
428%endif
429    add            wq, wq
430    sub            wq, mmsize - 1
431    add         dstUq, wq
432    add         dstVq, wq
433    lea          srcq, [srcq+wq*2]
434    neg            wq
435    pcmpeqb        m7, m7
436    psrlw          m7, 8                  ; (word) { 0x00ff } x4
437    mova           m6, [rgb_UVrnd]
438.loop:
439    ; FIXME check alignment and use mova
440    movu           m0, [srcq+wq*2+0]      ; (byte) { Bx, Gx, Rx, xx }[0-3]
441    movu           m4, [srcq+wq*2+mmsize] ; (byte) { Bx, Gx, Rx, xx }[4-7]
442    DEINTB          1,  0,  5,  4,  7     ; (word) { Gx, xx (m0/m4) or Bx, Rx (m1/m5) }[0-3]/[4-7]
443    pmaddwd        m3, m1, coeffV1        ; (dword) { Bx*BV + Rx*RV }[0-3]
444    pmaddwd        m2, m0, coeffV2        ; (dword) { Gx*GV }[0-3]
445    pmaddwd        m1, coeffU1            ; (dword) { Bx*BU + Rx*RU }[0-3]
446    pmaddwd        m0, coeffU2            ; (dword) { Gx*GU }[0-3]
447    paddd          m3, m6                 ; += rgb_UVrnd
448    paddd          m1, m6                 ; += rgb_UVrnd
449    paddd          m2, m3                 ; (dword) { V[0-3] }
450    paddd          m0, m1                 ; (dword) { U[0-3] }
451    pmaddwd        m3, m5, coeffV1        ; (dword) { Bx*BV + Rx*RV }[4-7]
452    pmaddwd        m1, m4, coeffV2        ; (dword) { Gx*GV }[4-7]
453    pmaddwd        m5, coeffU1            ; (dword) { Bx*BU + Rx*RU }[4-7]
454    pmaddwd        m4, coeffU2            ; (dword) { Gx*GU }[4-7]
455    paddd          m3, m6                 ; += rgb_UVrnd
456    paddd          m5, m6                 ; += rgb_UVrnd
457    psrad          m0, 9
458    paddd          m1, m3                 ; (dword) { V[4-7] }
459    paddd          m4, m5                 ; (dword) { U[4-7] }
460    psrad          m2, 9
461    psrad          m4, 9
462    psrad          m1, 9
463    packssdw       m0, m4                 ; (word) { U[0-7] }
464    packssdw       m2, m1                 ; (word) { V[0-7] }
465    mova   [dstUq+wq], m0
466    mova   [dstVq+wq], m2
467    add            wq, mmsize
468    jl .loop
469    sub            wq, mmsize - 1
470    jz .end
471    add            srcq , 2*mmsize - 2
472    add            dstUq, mmsize - 1
473    add            dstVq, mmsize - 1
474.loop2:
475    movd           m0, [srcq+wq*2]        ; (byte) { Bx, Gx, Rx, xx }[0-3]
476    DEINTB          1,  0,  5,  4,  7     ; (word) { Gx, xx (m0/m4) or Bx, Rx (m1/m5) }[0-3]/[4-7]
477    pmaddwd        m3, m1, coeffV1        ; (dword) { Bx*BV + Rx*RV }[0-3]
478    pmaddwd        m2, m0, coeffV2        ; (dword) { Gx*GV }[0-3]
479    pmaddwd        m1, coeffU1            ; (dword) { Bx*BU + Rx*RU }[0-3]
480    pmaddwd        m0, coeffU2            ; (dword) { Gx*GU }[0-3]
481    paddd          m3, m6                 ; += rgb_UVrnd
482    paddd          m1, m6                 ; += rgb_UVrnd
483    paddd          m2, m3                 ; (dword) { V[0-3] }
484    paddd          m0, m1                 ; (dword) { U[0-3] }
485    psrad          m0, 9
486    psrad          m2, 9
487    packssdw       m0, m0                 ; (word) { U[0-7] }
488    packssdw       m2, m2                 ; (word) { V[0-7] }
489    movd   [dstUq+wq], m0
490    movd   [dstVq+wq], m2
491    add            wq, 2
492    jl .loop2
493.end:
494    REP_RET
495%endif ; ARCH_X86_64 && %0 == 3
496%endmacro
497
498; %1 = nr. of XMM registers for rgb-to-Y func
499; %2 = nr. of XMM registers for rgb-to-UV func
500%macro RGB32_FUNCS 2
501RGB32_TO_Y_FN %1, r, g, b, a
502RGB32_TO_Y_FN %1, b, g, r, a, rgba
503RGB32_TO_Y_FN %1, a, r, g, b, rgba
504RGB32_TO_Y_FN %1, a, b, g, r, rgba
505
506RGB32_TO_UV_FN %2, r, g, b, a
507RGB32_TO_UV_FN %2, b, g, r, a, rgba
508RGB32_TO_UV_FN %2, a, r, g, b, rgba
509RGB32_TO_UV_FN %2, a, b, g, r, rgba
510%endmacro
511
512INIT_XMM sse2
513RGB32_FUNCS 8, 12
514
515%if HAVE_AVX_EXTERNAL
516INIT_XMM avx
517RGB32_FUNCS 8, 12
518%endif
519
520;-----------------------------------------------------------------------------
521; YUYV/UYVY/NV12/NV21 packed pixel shuffling.
522;
523; void <fmt>ToY_<opt>(uint8_t *dst, const uint8_t *src, int w);
524; and
525; void <fmt>toUV_<opt>(uint8_t *dstU, uint8_t *dstV, const uint8_t *src,
526;                      const uint8_t *unused, int w);
527;-----------------------------------------------------------------------------
528
529; %1 = a (aligned) or u (unaligned)
530; %2 = yuyv or uyvy
531%macro LOOP_YUYV_TO_Y 2
532.loop_%1:
533    mov%1          m0, [srcq+wq*2]        ; (byte) { Y0, U0, Y1, V0, ... }
534    mov%1          m1, [srcq+wq*2+mmsize] ; (byte) { Y8, U4, Y9, V4, ... }
535%ifidn %2, yuyv
536    pand           m0, m2                 ; (word) { Y0, Y1, ..., Y7 }
537    pand           m1, m2                 ; (word) { Y8, Y9, ..., Y15 }
538%else ; uyvy
539    psrlw          m0, 8                  ; (word) { Y0, Y1, ..., Y7 }
540    psrlw          m1, 8                  ; (word) { Y8, Y9, ..., Y15 }
541%endif ; yuyv/uyvy
542    packuswb       m0, m1                 ; (byte) { Y0, ..., Y15 }
543    mova    [dstq+wq], m0
544    add            wq, mmsize
545    jl .loop_%1
546    REP_RET
547%endmacro
548
549; %1 = nr. of XMM registers
550; %2 = yuyv or uyvy
551; %3 = if specified, it means that unaligned and aligned code in loop
552;      will be the same (i.e. YUYV+AVX), and thus we don't need to
553;      split the loop in an aligned and unaligned case
554%macro YUYV_TO_Y_FN 2-3
555cglobal %2ToY, 5, 5, %1, dst, unused0, unused1, src, w
556%if ARCH_X86_64
557    movsxd         wq, wd
558%endif
559    add          dstq, wq
560    test         srcq, 15
561    lea          srcq, [srcq+wq*2]
562%ifidn %2, yuyv
563    pcmpeqb        m2, m2                 ; (byte) { 0xff } x 16
564    psrlw          m2, 8                  ; (word) { 0x00ff } x 8
565%endif ; yuyv
566    jnz .loop_u_start
567    neg            wq
568    LOOP_YUYV_TO_Y  a, %2
569.loop_u_start:
570    neg            wq
571    LOOP_YUYV_TO_Y  u, %2
572%endmacro
573
574; %1 = a (aligned) or u (unaligned)
575; %2 = yuyv or uyvy
576%macro LOOP_YUYV_TO_UV 2
577.loop_%1:
578%ifidn %2, yuyv
579    mov%1          m0, [srcq+wq*4]        ; (byte) { Y0, U0, Y1, V0, ... }
580    mov%1          m1, [srcq+wq*4+mmsize] ; (byte) { Y8, U4, Y9, V4, ... }
581    psrlw          m0, 8                  ; (word) { U0, V0, ..., U3, V3 }
582    psrlw          m1, 8                  ; (word) { U4, V4, ..., U7, V7 }
583%else ; uyvy
584%if cpuflag(avx)
585    vpand          m0, m2, [srcq+wq*4]        ; (word) { U0, V0, ..., U3, V3 }
586    vpand          m1, m2, [srcq+wq*4+mmsize] ; (word) { U4, V4, ..., U7, V7 }
587%else
588    mov%1          m0, [srcq+wq*4]        ; (byte) { Y0, U0, Y1, V0, ... }
589    mov%1          m1, [srcq+wq*4+mmsize] ; (byte) { Y8, U4, Y9, V4, ... }
590    pand           m0, m2                 ; (word) { U0, V0, ..., U3, V3 }
591    pand           m1, m2                 ; (word) { U4, V4, ..., U7, V7 }
592%endif
593%endif ; yuyv/uyvy
594    packuswb       m0, m1                 ; (byte) { U0, V0, ..., U7, V7 }
595    pand           m1, m0, m2             ; (word) { U0, U1, ..., U7 }
596    psrlw          m0, 8                  ; (word) { V0, V1, ..., V7 }
597    packuswb       m1, m0                 ; (byte) { U0, ... U7, V1, ... V7 }
598    movh   [dstUq+wq], m1
599    movhps [dstVq+wq], m1
600    add            wq, mmsize / 2
601    jl .loop_%1
602    REP_RET
603%endmacro
604
605; %1 = nr. of XMM registers
606; %2 = yuyv or uyvy
607; %3 = if specified, it means that unaligned and aligned code in loop
608;      will be the same (i.e. UYVY+AVX), and thus we don't need to
609;      split the loop in an aligned and unaligned case
610%macro YUYV_TO_UV_FN 2-3
611cglobal %2ToUV, 4, 5, %1, dstU, dstV, unused, src, w
612%if ARCH_X86_64
613    movsxd         wq, dword r5m
614%else ; x86-32
615    mov            wq, r5m
616%endif
617    add         dstUq, wq
618    add         dstVq, wq
619%if %0 == 2
620    test         srcq, 15
621%endif
622    lea          srcq, [srcq+wq*4]
623    pcmpeqb        m2, m2                 ; (byte) { 0xff } x 16
624    psrlw          m2, 8                  ; (word) { 0x00ff } x 8
625    ; NOTE: if uyvy+avx, u/a are identical
626%if %0 == 2
627    jnz .loop_u_start
628    neg            wq
629    LOOP_YUYV_TO_UV a, %2
630.loop_u_start:
631    neg            wq
632    LOOP_YUYV_TO_UV u, %2
633%else
634    neg            wq
635    LOOP_YUYV_TO_UV a, %2
636%endif
637%endmacro
638
639; %1 = a (aligned) or u (unaligned)
640; %2 = nv12 or nv21
641%macro LOOP_NVXX_TO_UV 2
642.loop_%1:
643    mov%1          m0, [srcq+wq*2]        ; (byte) { U0, V0, U1, V1, ... }
644    mov%1          m1, [srcq+wq*2+mmsize] ; (byte) { U8, V8, U9, V9, ... }
645    pand           m2, m0, m5             ; (word) { U0, U1, ..., U7 }
646    pand           m3, m1, m5             ; (word) { U8, U9, ..., U15 }
647    psrlw          m0, 8                  ; (word) { V0, V1, ..., V7 }
648    psrlw          m1, 8                  ; (word) { V8, V9, ..., V15 }
649    packuswb       m2, m3                 ; (byte) { U0, ..., U15 }
650    packuswb       m0, m1                 ; (byte) { V0, ..., V15 }
651%ifidn %2, nv12
652    mova   [dstUq+wq], m2
653    mova   [dstVq+wq], m0
654%else ; nv21
655    mova   [dstVq+wq], m2
656    mova   [dstUq+wq], m0
657%endif ; nv12/21
658    add            wq, mmsize
659    jl .loop_%1
660    REP_RET
661%endmacro
662
663; %1 = nr. of XMM registers
664; %2 = nv12 or nv21
665%macro NVXX_TO_UV_FN 2
666cglobal %2ToUV, 4, 5, %1, dstU, dstV, unused, src, w
667%if ARCH_X86_64
668    movsxd         wq, dword r5m
669%else ; x86-32
670    mov            wq, r5m
671%endif
672    add         dstUq, wq
673    add         dstVq, wq
674    test         srcq, 15
675    lea          srcq, [srcq+wq*2]
676    pcmpeqb        m5, m5                 ; (byte) { 0xff } x 16
677    psrlw          m5, 8                  ; (word) { 0x00ff } x 8
678    jnz .loop_u_start
679    neg            wq
680    LOOP_NVXX_TO_UV a, %2
681.loop_u_start:
682    neg            wq
683    LOOP_NVXX_TO_UV u, %2
684%endmacro
685
686INIT_XMM sse2
687YUYV_TO_Y_FN  3, yuyv
688YUYV_TO_Y_FN  2, uyvy
689YUYV_TO_UV_FN 3, yuyv
690YUYV_TO_UV_FN 3, uyvy
691NVXX_TO_UV_FN 5, nv12
692NVXX_TO_UV_FN 5, nv21
693
694%if HAVE_AVX_EXTERNAL
695INIT_XMM avx
696; in theory, we could write a yuy2-to-y using vpand (i.e. AVX), but
697; that's not faster in practice
698YUYV_TO_UV_FN 3, yuyv
699YUYV_TO_UV_FN 3, uyvy, 1
700NVXX_TO_UV_FN 5, nv12
701NVXX_TO_UV_FN 5, nv21
702%endif
703
704%if ARCH_X86_64
705%define RY_IDX 0
706%define GY_IDX 1
707%define BY_IDX 2
708%define RU_IDX 3
709%define GU_IDX 4
710%define BU_IDX 5
711%define RV_IDX 6
712%define GV_IDX 7
713%define BV_IDX 8
714%define RGB2YUV_SHIFT 15
715
716%define R m0
717%define G m1
718%define B m2
719
720%macro SWAP32 1
721%if mmsize > 16 || cpuflag(sse4)
722    pshufb   m%1, [pb_shuffle32be]
723%else
724    psrlw    xm7, xm%1, 8
725    psllw   xm%1, 8
726    por     xm%1, xm7
727    pshuflw xm%1, xm%1, (2 << 6 | 3 << 4 | 0 << 2 | 1 << 0)
728    pshufhw xm%1, xm%1, (2 << 6 | 3 << 4 | 0 << 2 | 1 << 0)
729%endif
730%endmacro
731
732; 1 - dest
733; 2 - source
734; 3 - is big endian
735; 4 - load only 2 values on sse2
736%macro LOADF32 4
737    %if notcpuflag(sse4) && %4
738        %if %3  ; big endian
739            mov tmp1q, %2
740            bswap tmp1q
741            movq xm%1, tmp1q
742        %else
743            movq m%1, %2
744        %endif
745    %else
746        movu m%1, %2
747        %if %3
748            SWAP32 %1
749        %endif
750    %endif
751    maxps m%1, m9 ; 0.0 (nan, -inf) -> 0.0
752    mulps m%1, m8 ; [pd_65535f]
753    minps m%1, m8 ; +inf -> 65535
754    ; cvtps2dq rounds to nearest int
755    ; assuming mxcsr register is default rounding
756    ; 0.40 -> 0.0, 0.50 -> 0.0, 0.51 -> 1.0
757    cvtps2dq m%1, m%1
758
759    %if notcpuflag(sse4) && %4
760        ; line up the 2 values in lanes 0,2
761        %if %3 ; big endian
762            pshufd m%1, m%1, (3 << 6 | 0 << 4 | 2 << 2 | 1 << 0)
763        %else
764            pshufd m%1, m%1, (3 << 6 | 1 << 4 | 2 << 2 | 0 << 0)
765        %endif
766    %endif
767%endmacro
768
769; 1 - dest
770; 2 - source
771; 3 - is big endian
772%macro LOAD16 3
773    %if cpuflag(sse4) || mmsize > 16
774        pmovzxwd  m%1, %2
775        %if %3 ; bigendian
776            pshufb m%1, m8 ; [pb_shuffle16be]
777        %endif
778    %else
779        %if %3 ; bigendian
780            mov     tmp1d, dword %2
781            bswap   tmp1d
782            movd     xm%1, tmp1d
783            pshuflw   m%1, m%1, (3 << 6 | 0 << 4 | 3 << 2 | 1 << 0)
784            pshufd    m%1, m%1, (3 << 6 | 1 << 4 | 2 << 2 | 0 << 0)
785        %else
786            movd     xm%1, %2
787            punpcklwd m%1, m9 ; interleave words with zero
788            pshufd    m%1, m%1, (3 << 6 | 1 << 4 | 2 << 2 | 0 << 0)
789        %endif
790    %endif
791%endmacro
792
793%macro LOAD8_RGB 0
794    %if cpuflag(sse4) || mmsize > 16
795        pmovzxbd  R, [srcRq + xq]
796        pmovzxbd  G, [srcGq + xq]
797        pmovzxbd  B, [srcBq + xq]
798    %else
799        ; thought this would be faster but from my measurments its not
800        ; movd m0, [srcRq + xq + 0]; overeads by 2 bytes
801        ; punpcklbw m0, m9 ; interleave bytes with zero
802        ; punpcklwd m0, m9 ; interleave words with zero
803        ; pshufd m0, m0, (3 << 6 | 1 << 4 | 2 << 2 | 0 << 0)
804
805        movzx tmp2q, byte [srcRq + xq + 1]
806        movzx tmp1q, byte [srcRq + xq + 0]
807        shl   tmp2q, 32
808        or    tmp1q, tmp2q
809        movq    xm0, tmp1q
810
811        movzx tmp2q, byte [srcGq + xq + 1]
812        movzx tmp3q, byte [srcGq + xq + 0]
813        shl   tmp2q, 32
814        or    tmp3q, tmp2q
815        movq    xm1, tmp3q
816
817        movzx tmp2q, byte [srcBq + xq + 1]
818        movzx tmp1q, byte [srcBq + xq + 0]
819        shl   tmp2q, 32
820        or    tmp1q, tmp2q
821        movq    xm2, tmp1q
822
823        pshufd   m0, m0, (3 << 6 | 1 << 4 | 2 << 2 | 0 << 0)
824        pshufd   m1, m1, (3 << 6 | 1 << 4 | 2 << 2 | 0 << 0)
825        pshufd   m2, m2, (3 << 6 | 1 << 4 | 2 << 2 | 0 << 0)
826    %endif
827%endmacro
828
829; 1 - dest
830; 2 - source
831; 3 - store only 2 values on sse2
832%macro STORE16 3
833    %if %3 && notcpuflag(sse4)
834        pshufd        m%2,  m%2, (3 << 6 | 1 << 4 | 2 << 2 | 0 << 0)
835        pshuflw       m%2,  m%2, (3 << 6 | 1 << 4 | 2 << 2 | 0 << 0)
836        movd           %1,  m%2
837    %elif mmsize > 16
838        pshufb        m%2,   m7   ; [pb_pack_shuffle16le]
839        vpermq        m%2,  m%2,  (3 << 6 | 0 << 4 | 3 << 2 | 0 << 0)
840        movu           %1, xm%2
841    %else
842        %if cpuflag(sse4)
843            pshufb  m%2,  m7 ; [pb_pack_shuffle16le]
844        %else
845            pshuflw m%2, m%2, (1 << 6 | 1 << 4 | 2 << 2 | 0 << 0)
846            pshufhw m%2, m%2, (1 << 6 | 1 << 4 | 2 << 2 | 0 << 0)
847            pshufd  m%2, m%2, (3 << 6 | 3 << 4 | 2 << 2 | 0 << 0)
848        %endif
849        movq %1, m%2
850    %endif
851%endmacro
852
853%macro PMUL 3
854%if cpuflag(sse4) || mmsize > 16
855    pmulld  %1, %2, %3
856%else
857    pmuludq %1, %2, %3
858%endif
859%endmacro
860
861; 1 - name
862; 2 - depth
863; 3 - is big endian
864; 4 - is float
865; in sse2 mode only 2 values are done per loop, due to lack of pmulld instruction
866%macro planar_rgb_to_y_fn 4
867%if %2 == 8
868    %define OFFSET (0x801<<(RGB2YUV_SHIFT-7))
869    %define RSHIFT (RGB2YUV_SHIFT-6)
870%else
871    %if %2 < 16
872        %define SHIFT %2
873        %define BPC %2
874    %else
875        %define SHIFT 14
876        %define BPC 16
877    %endif
878    %define OFFSET ((16 << (RGB2YUV_SHIFT + BPC - 8)) + (1 << (RGB2YUV_SHIFT + SHIFT - 15)))
879    %define RSHIFT (RGB2YUV_SHIFT + SHIFT - 14)
880%endif
881cglobal planar_%1_to_y, 4, 12, 13, dst, src, w, rgb2yuv, srcR, srcG, srcB, x, tmp1, tmp2, tmp3, tmp4
882    VBROADCASTSS m10, dword [rgb2yuvq + RY_IDX*4] ; ry
883    VBROADCASTSS m11, dword [rgb2yuvq + GY_IDX*4] ; gy
884    VBROADCASTSS m12, dword [rgb2yuvq + BY_IDX*4] ; by
885    pxor m9, m9
886
887    %if %4
888        movu m8, [pd_65535f]
889    %endif
890
891    %if cpuflag(sse4) || mmsize > 16
892        movu m7, [pb_pack_shuffle16le]
893        %if %3 && %2 > 8 && %2 <= 16
894            movu m8,  [pb_shuffle16be]
895        %endif
896    %endif
897
898    mov           xq, OFFSET
899    movq         xm6, xq
900    VBROADCASTSS  m6, xm6
901
902    mov srcGq, [srcq +  0]
903    mov srcBq, [srcq +  8]
904    mov srcRq, [srcq + 16]
905
906    xor xq, xq
907    %%loop_x:
908        %if %4
909            LOADF32 0, [srcRq + xq*4], %3, 1
910            LOADF32 1, [srcGq + xq*4], %3, 1
911            LOADF32 2, [srcBq + xq*4], %3, 1
912        %elif %2 == 8
913            LOAD8_RGB
914        %else
915            LOAD16 0, [srcRq + xq*2], %3
916            LOAD16 1, [srcGq + xq*2], %3
917            LOAD16 2, [srcBq + xq*2], %3
918        %endif
919
920        PMUL      R, R, m10 ; r*ry
921        PMUL      G, G, m11 ; g*gy
922        PMUL      B, B, m12 ; b*by
923        paddd    m0, m6       ; + OFFSET
924        paddd     B, G
925        paddd    m0, B
926        psrad    m0, RSHIFT
927        STORE16 [dstq + 2*xq], 0, 1
928
929        %if cpuflag(avx2) || cpuflag(sse4)
930            add xq, mmsize/4
931        %else
932            add xd, 2
933        %endif
934        cmp xd, wd
935        jl %%loop_x
936RET
937%endmacro
938
939; 1 - name
940; 2 - depth
941; 3 - is big endian
942; 4 - is float
943; in sse2 mode only 2 values are done per loop, due to lack of pmulld instruction
944%macro planar_rgb_to_uv_fn 4
945%if %2 == 8
946    %define OFFSET (0x4001<<(RGB2YUV_SHIFT-7))
947    %define RSHIFT (RGB2YUV_SHIFT-6)
948%else
949    %if %2 < 16
950        %define SHIFT %2
951        %define BPC %2
952    %else
953        %define SHIFT 14
954        %define BPC 16
955    %endif
956    %define OFFSET ((128 << (RGB2YUV_SHIFT + BPC - 8)) + (1 << (RGB2YUV_SHIFT + SHIFT - 15)))
957    %define RSHIFT (RGB2YUV_SHIFT + SHIFT - 14)
958%endif
959cglobal planar_%1_to_uv, 5, 12, 16, dstU, dstV, src, w, rgb2yuv, srcR, srcG, srcB, x, tmp1, tmp2, tmp3
960    VBROADCASTSS m10, dword [rgb2yuvq + RU_IDX*4] ; ru
961    VBROADCASTSS m11, dword [rgb2yuvq + GU_IDX*4] ; gu
962    VBROADCASTSS m12, dword [rgb2yuvq + BU_IDX*4] ; bu
963    VBROADCASTSS m13, dword [rgb2yuvq + RV_IDX*4] ; rv
964    VBROADCASTSS m14, dword [rgb2yuvq + GV_IDX*4] ; gv
965    VBROADCASTSS m15, dword [rgb2yuvq + BV_IDX*4] ; bv
966    pxor m9, m9
967
968    %if %4
969        movu m8, [pd_65535f]
970    %endif
971
972    %if cpuflag(sse4) || mmsize > 16
973        movu m7, [pb_pack_shuffle16le]
974        %if %3 && %2 > 8 && %2 <= 16
975            movu m8,  [pb_shuffle16be]
976        %endif
977    %endif
978
979    mov          xq, OFFSET
980    movq        xm6, xq
981    VBROADCASTSS m6, xm6
982
983    mov srcGq, [srcq +  0]
984    mov srcBq, [srcq +  8]
985    mov srcRq, [srcq + 16]
986
987    xor xq, xq
988    %%loop_x:
989        %if %4
990            LOADF32 0, [srcRq + xq*4], %3, 1
991            LOADF32 1, [srcGq + xq*4], %3, 1
992            LOADF32 2, [srcBq + xq*4], %3, 1
993        %elif %2 == 8
994            LOAD8_RGB
995        %else
996            LOAD16 0, [srcRq + xq*2], %3
997            LOAD16 1, [srcGq + xq*2], %3
998            LOAD16 2, [srcBq + xq*2], %3
999        %endif
1000
1001        PMUL      m5, R, m10 ; r*ru
1002        PMUL      m4, G, m11 ; b*gu
1003        paddd     m4, m5
1004        PMUL      m5, B, m12 ; b*bu
1005        paddd     m4, m6     ; + OFFSET
1006        paddd     m4, m5
1007        psrad     m4, RSHIFT
1008        STORE16 [dstUq + 2*xq], 4, 1
1009
1010        PMUL      R, R, m13 ; r*rv
1011        PMUL      G, G, m14 ; g*gv*g
1012        PMUL      B, B, m15 ; b*bv
1013        paddd    m0, m6     ; + OFFSET
1014        paddd     B, G
1015        paddd    m0, B
1016        psrad    m0, RSHIFT
1017        STORE16 [dstVq + 2*xq], 0, 1
1018
1019        %if cpuflag(avx2) || cpuflag(sse4)
1020            add xd, mmsize/4
1021        %else
1022            add xd, 2
1023        %endif
1024        cmp xd, wd
1025        jl %%loop_x
1026RET
1027%endmacro
1028
1029; 1 - name
1030; 2 - depth
1031; 3 - is big endian
1032; 4 - is float
1033%macro planar_rgb_to_a_fn 4
1034cglobal planar_%1_to_a, 4, 6, 10, dst, src, w, rgb2yuv, srcA, x
1035    %if %4 && (cpuflag(sse4) || mmsize > 16)
1036        movu m7, [pb_pack_shuffle16le]
1037    %elif %3 && (cpuflag(sse4) || mmsize > 16)
1038        movu m7, [pb_shuffle16be]
1039    %endif
1040
1041    %if %4
1042        movu m8, [pd_65535f]
1043    %endif
1044
1045    pxor   m9, m9
1046    mov srcAq, [srcq +  24]
1047    xor    xq, xq
1048    %%loop_x:
1049        %if %4 ; float
1050            LOADF32 0, [srcAq + xq*4], %3, 0
1051            STORE16 [dstq + xq*2], 0, 0
1052            add xq, mmsize/4
1053        %elif %2 == 8
1054            ; only need to convert 8bit value to 16bit
1055            %if cpuflag(sse4) || mmsize > 16
1056                pmovzxbw  m0, [srcAq + xq]
1057            %else
1058                movsd     m0, [srcAq + xq]
1059                punpcklbw m0, m9 ; interleave bytes with zero
1060            %endif
1061            psllw m0, 6
1062            movu [dstq + xq*2], m0
1063            add xq, mmsize/2
1064        %else
1065            ; only need to convert 16bit format to 16le
1066            movu m0, [srcAq + xq*2]
1067            %if %3 ; bigendian
1068                %if cpuflag(sse4) || mmsize > 16
1069                    pshufb m0, m7 ; [pb_shuffle16be]
1070                %else
1071                    psrlw  m7, m0, 8
1072                    psllw  m0, 8
1073                    por    m0, m7
1074                %endif
1075            %endif
1076            %if %2 < 16
1077                psllw m0, (14 - %2)
1078            %endif
1079            movu [dstq + xq*2], m0
1080            add xq, mmsize/2
1081        %endif
1082        cmp xd, wd
1083        jl %%loop_x
1084RET
1085%endmacro
1086
1087; 1 - name
1088; 2 - depth
1089; 3 - is float
1090%macro planer_rgbxx_y_fn_decl 3
1091planar_rgb_to_y_fn  %1le,  %2, 0, %3
1092planar_rgb_to_y_fn  %1be,  %2, 1, %3
1093%endmacro
1094
1095; 1 - name
1096; 2 - depth
1097; 3 - is float
1098%macro planer_rgbxx_uv_fn_decl 3
1099planar_rgb_to_uv_fn  %1le,  %2, 0, %3
1100planar_rgb_to_uv_fn  %1be,  %2, 1, %3
1101%endmacro
1102
1103; 1 - name
1104; 2 - depth
1105; 3 - is float
1106%macro planer_rgbxx_a_fn_decl 3
1107planar_rgb_to_a_fn  %1le,  %2, 0, %3
1108planar_rgb_to_a_fn  %1be,  %2, 1, %3
1109%endmacro
1110
1111%macro planar_rgb_y_all_fn_decl 0
1112planar_rgb_to_y_fn        rgb,  8, 0, 0
1113planer_rgbxx_y_fn_decl   rgb9,  9, 0
1114planer_rgbxx_y_fn_decl  rgb10, 10, 0
1115planer_rgbxx_y_fn_decl  rgb12, 12, 0
1116planer_rgbxx_y_fn_decl  rgb14, 14, 0
1117planer_rgbxx_y_fn_decl  rgb16, 16, 0
1118planer_rgbxx_y_fn_decl rgbf32, 32, 1
1119%endmacro
1120
1121%macro planar_rgb_uv_all_fn_decl 0
1122planar_rgb_to_uv_fn        rgb,  8, 0, 0
1123planer_rgbxx_uv_fn_decl   rgb9,  9, 0
1124planer_rgbxx_uv_fn_decl  rgb10, 10, 0
1125planer_rgbxx_uv_fn_decl  rgb12, 12, 0
1126planer_rgbxx_uv_fn_decl  rgb14, 14, 0
1127planer_rgbxx_uv_fn_decl  rgb16, 16, 0
1128planer_rgbxx_uv_fn_decl rgbf32, 32, 1
1129%endmacro
1130
1131%macro planar_rgb_a_all_fn_decl 0
1132planar_rgb_to_a_fn        rgb,  8, 0, 0
1133planer_rgbxx_a_fn_decl  rgb10, 10, 0
1134planer_rgbxx_a_fn_decl  rgb12, 12, 0
1135planer_rgbxx_a_fn_decl  rgb16, 16, 0
1136planer_rgbxx_a_fn_decl rgbf32, 32, 1
1137%endmacro
1138
1139; sse2 to_y only matches c speed with current implementation
1140; except on floating point formats
1141INIT_XMM sse2
1142planer_rgbxx_y_fn_decl rgbf32, 32, 1
1143planar_rgb_uv_all_fn_decl
1144planar_rgb_a_all_fn_decl
1145
1146; sse4 to_a conversions are just the sse2 ones
1147; except on floating point formats
1148INIT_XMM sse4
1149planar_rgb_y_all_fn_decl
1150planar_rgb_uv_all_fn_decl
1151planer_rgbxx_a_fn_decl rgbf32, 32, 1
1152
1153%if HAVE_AVX2_EXTERNAL
1154INIT_YMM avx2
1155planar_rgb_y_all_fn_decl
1156planar_rgb_uv_all_fn_decl
1157planar_rgb_a_all_fn_decl
1158%endif
1159
1160%endif ; ARCH_X86_64
1161