1;*****************************************************************************
2;* x86inc.asm: x264asm abstraction layer
3;*****************************************************************************
4;* Copyright (C) 2005-2018 x264 project
5;*
6;* Authors: Loren Merritt <lorenm@u.washington.edu>
7;*          Henrik Gramner <henrik@gramner.com>
8;*          Anton Mitrofanov <BugMaster@narod.ru>
9;*          Fiona Glaser <fiona@x264.com>
10;*
11;* Permission to use, copy, modify, and/or distribute this software for any
12;* purpose with or without fee is hereby granted, provided that the above
13;* copyright notice and this permission notice appear in all copies.
14;*
15;* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
16;* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
17;* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
18;* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
19;* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
20;* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
21;* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22;*****************************************************************************
23
24; This is a header file for the x264ASM assembly language, which uses
25; NASM/YASM syntax combined with a large number of macros to provide easy
26; abstraction between different calling conventions (x86_32, win64, linux64).
27; It also has various other useful features to simplify writing the kind of
28; DSP functions that are most often used in x264.
29
30; Unlike the rest of x264, this file is available under an ISC license, as it
31; has significant usefulness outside of x264 and we want it to be available
32; to the largest audience possible.  Of course, if you modify it for your own
33; purposes to add a new feature, we strongly encourage contributing a patch
34; as this feature might be useful for others as well.  Send patches or ideas
35; to x264-devel@videolan.org .
36
37%ifndef private_prefix
38    %define private_prefix x264
39%endif
40
41%ifndef public_prefix
42    %define public_prefix private_prefix
43%endif
44
45%if HAVE_ALIGNED_STACK
46    %define STACK_ALIGNMENT 16
47%endif
48%ifndef STACK_ALIGNMENT
49    %if ARCH_X86_64
50        %define STACK_ALIGNMENT 16
51    %else
52        %define STACK_ALIGNMENT 4
53    %endif
54%endif
55
56%define WIN64  0
57%define UNIX64 0
58%if ARCH_X86_64
59    %ifidn __OUTPUT_FORMAT__,win32
60        %define WIN64  1
61    %elifidn __OUTPUT_FORMAT__,win64
62        %define WIN64  1
63    %elifidn __OUTPUT_FORMAT__,x64
64        %define WIN64  1
65    %else
66        %define UNIX64 1
67    %endif
68%endif
69
70%define FORMAT_ELF 0
71%ifidn __OUTPUT_FORMAT__,elf
72    %define FORMAT_ELF 1
73%elifidn __OUTPUT_FORMAT__,elf32
74    %define FORMAT_ELF 1
75%elifidn __OUTPUT_FORMAT__,elf64
76    %define FORMAT_ELF 1
77%endif
78
79%ifdef PREFIX
80    %define mangle(x) _ %+ x
81%else
82    %define mangle(x) x
83%endif
84
85; aout does not support align=
86; NOTE: This section is out of sync with x264, in order to
87; keep supporting OS/2.
88%macro SECTION_RODATA 0-1 16
89    %ifidn __OUTPUT_FORMAT__,aout
90        SECTION .text
91    %elifidn __OUTPUT_FORMAT__,coff
92        SECTION .text
93    %elifidn __OUTPUT_FORMAT__,win32
94        SECTION .rdata align=%1
95    %elif WIN64
96        SECTION .rdata align=%1
97    %else
98        SECTION .rodata align=%1
99    %endif
100%endmacro
101
102%if WIN64
103    %define PIC
104%elif ARCH_X86_64 == 0
105; x86_32 doesn't require PIC.
106; Some distros prefer shared objects to be PIC, but nothing breaks if
107; the code contains a few textrels, so we'll skip that complexity.
108    %undef PIC
109%endif
110%ifdef PIC
111    default rel
112%endif
113
114%macro CPUNOP 1
115    %if HAVE_CPUNOP
116        CPU %1
117    %endif
118%endmacro
119
120; Macros to eliminate most code duplication between x86_32 and x86_64:
121; Currently this works only for leaf functions which load all their arguments
122; into registers at the start, and make no other use of the stack. Luckily that
123; covers most of x264's asm.
124
125; PROLOGUE:
126; %1 = number of arguments. loads them from stack if needed.
127; %2 = number of registers used. pushes callee-saved regs if needed.
128; %3 = number of xmm registers used. pushes callee-saved xmm regs if needed.
129; %4 = (optional) stack size to be allocated. The stack will be aligned before
130;      allocating the specified stack size. If the required stack alignment is
131;      larger than the known stack alignment the stack will be manually aligned
132;      and an extra register will be allocated to hold the original stack
133;      pointer (to not invalidate r0m etc.). To prevent the use of an extra
134;      register as stack pointer, request a negative stack size.
135; %4+/%5+ = list of names to define to registers
136; PROLOGUE can also be invoked by adding the same options to cglobal
137
138; e.g.
139; cglobal foo, 2,3,7,0x40, dst, src, tmp
140; declares a function (foo) that automatically loads two arguments (dst and
141; src) into registers, uses one additional register (tmp) plus 7 vector
142; registers (m0-m6) and allocates 0x40 bytes of stack space.
143
144; TODO Some functions can use some args directly from the stack. If they're the
145; last args then you can just not declare them, but if they're in the middle
146; we need more flexible macro.
147
148; RET:
149; Pops anything that was pushed by PROLOGUE, and returns.
150
151; REP_RET:
152; Use this instead of RET if it's a branch target.
153
154; registers:
155; rN and rNq are the native-size register holding function argument N
156; rNd, rNw, rNb are dword, word, and byte size
157; rNh is the high 8 bits of the word size
158; rNm is the original location of arg N (a register or on the stack), dword
159; rNmp is native size
160
161%macro DECLARE_REG 2-3
162    %define r%1q %2
163    %define r%1d %2d
164    %define r%1w %2w
165    %define r%1b %2b
166    %define r%1h %2h
167    %define %2q %2
168    %if %0 == 2
169        %define r%1m  %2d
170        %define r%1mp %2
171    %elif ARCH_X86_64 ; memory
172        %define r%1m [rstk + stack_offset + %3]
173        %define r%1mp qword r %+ %1 %+ m
174    %else
175        %define r%1m [rstk + stack_offset + %3]
176        %define r%1mp dword r %+ %1 %+ m
177    %endif
178    %define r%1  %2
179%endmacro
180
181%macro DECLARE_REG_SIZE 3
182    %define r%1q r%1
183    %define e%1q r%1
184    %define r%1d e%1
185    %define e%1d e%1
186    %define r%1w %1
187    %define e%1w %1
188    %define r%1h %3
189    %define e%1h %3
190    %define r%1b %2
191    %define e%1b %2
192    %if ARCH_X86_64 == 0
193        %define r%1 e%1
194    %endif
195%endmacro
196
197DECLARE_REG_SIZE ax, al, ah
198DECLARE_REG_SIZE bx, bl, bh
199DECLARE_REG_SIZE cx, cl, ch
200DECLARE_REG_SIZE dx, dl, dh
201DECLARE_REG_SIZE si, sil, null
202DECLARE_REG_SIZE di, dil, null
203DECLARE_REG_SIZE bp, bpl, null
204
205; t# defines for when per-arch register allocation is more complex than just function arguments
206
207%macro DECLARE_REG_TMP 1-*
208    %assign %%i 0
209    %rep %0
210        CAT_XDEFINE t, %%i, r%1
211        %assign %%i %%i+1
212        %rotate 1
213    %endrep
214%endmacro
215
216%macro DECLARE_REG_TMP_SIZE 0-*
217    %rep %0
218        %define t%1q t%1 %+ q
219        %define t%1d t%1 %+ d
220        %define t%1w t%1 %+ w
221        %define t%1h t%1 %+ h
222        %define t%1b t%1 %+ b
223        %rotate 1
224    %endrep
225%endmacro
226
227DECLARE_REG_TMP_SIZE 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14
228
229%if ARCH_X86_64
230    %define gprsize 8
231%else
232    %define gprsize 4
233%endif
234
235%macro PUSH 1
236    push %1
237    %ifidn rstk, rsp
238        %assign stack_offset stack_offset+gprsize
239    %endif
240%endmacro
241
242%macro POP 1
243    pop %1
244    %ifidn rstk, rsp
245        %assign stack_offset stack_offset-gprsize
246    %endif
247%endmacro
248
249%macro PUSH_IF_USED 1-*
250    %rep %0
251        %if %1 < regs_used
252            PUSH r%1
253        %endif
254        %rotate 1
255    %endrep
256%endmacro
257
258%macro POP_IF_USED 1-*
259    %rep %0
260        %if %1 < regs_used
261            pop r%1
262        %endif
263        %rotate 1
264    %endrep
265%endmacro
266
267%macro LOAD_IF_USED 1-*
268    %rep %0
269        %if %1 < num_args
270            mov r%1, r %+ %1 %+ mp
271        %endif
272        %rotate 1
273    %endrep
274%endmacro
275
276%macro SUB 2
277    sub %1, %2
278    %ifidn %1, rstk
279        %assign stack_offset stack_offset+(%2)
280    %endif
281%endmacro
282
283%macro ADD 2
284    add %1, %2
285    %ifidn %1, rstk
286        %assign stack_offset stack_offset-(%2)
287    %endif
288%endmacro
289
290%macro movifnidn 2
291    %ifnidn %1, %2
292        mov %1, %2
293    %endif
294%endmacro
295
296%macro movsxdifnidn 2
297    %ifnidn %1, %2
298        movsxd %1, %2
299    %endif
300%endmacro
301
302%macro ASSERT 1
303    %if (%1) == 0
304        %error assertion ``%1'' failed
305    %endif
306%endmacro
307
308%macro DEFINE_ARGS 0-*
309    %ifdef n_arg_names
310        %assign %%i 0
311        %rep n_arg_names
312            CAT_UNDEF arg_name %+ %%i, q
313            CAT_UNDEF arg_name %+ %%i, d
314            CAT_UNDEF arg_name %+ %%i, w
315            CAT_UNDEF arg_name %+ %%i, h
316            CAT_UNDEF arg_name %+ %%i, b
317            CAT_UNDEF arg_name %+ %%i, m
318            CAT_UNDEF arg_name %+ %%i, mp
319            CAT_UNDEF arg_name, %%i
320            %assign %%i %%i+1
321        %endrep
322    %endif
323
324    %xdefine %%stack_offset stack_offset
325    %undef stack_offset ; so that the current value of stack_offset doesn't get baked in by xdefine
326    %assign %%i 0
327    %rep %0
328        %xdefine %1q r %+ %%i %+ q
329        %xdefine %1d r %+ %%i %+ d
330        %xdefine %1w r %+ %%i %+ w
331        %xdefine %1h r %+ %%i %+ h
332        %xdefine %1b r %+ %%i %+ b
333        %xdefine %1m r %+ %%i %+ m
334        %xdefine %1mp r %+ %%i %+ mp
335        CAT_XDEFINE arg_name, %%i, %1
336        %assign %%i %%i+1
337        %rotate 1
338    %endrep
339    %xdefine stack_offset %%stack_offset
340    %assign n_arg_names %0
341%endmacro
342
343%define required_stack_alignment ((mmsize + 15) & ~15)
344%define vzeroupper_required (mmsize > 16 && (ARCH_X86_64 == 0 || xmm_regs_used > 16 || notcpuflag(avx512)))
345%define high_mm_regs (16*cpuflag(avx512))
346
347%macro ALLOC_STACK 1-2 0 ; stack_size, n_xmm_regs (for win64 only)
348    %ifnum %1
349        %if %1 != 0
350            %assign %%pad 0
351            %assign stack_size %1
352            %if stack_size < 0
353                %assign stack_size -stack_size
354            %endif
355            %if WIN64
356                %assign %%pad %%pad + 32 ; shadow space
357                %if mmsize != 8
358                    %assign xmm_regs_used %2
359                    %if xmm_regs_used > 8
360                        %assign %%pad %%pad + (xmm_regs_used-8)*16 ; callee-saved xmm registers
361                    %endif
362                %endif
363            %endif
364            %if required_stack_alignment <= STACK_ALIGNMENT
365                ; maintain the current stack alignment
366                %assign stack_size_padded stack_size + %%pad + ((-%%pad-stack_offset-gprsize) & (STACK_ALIGNMENT-1))
367                SUB rsp, stack_size_padded
368            %else
369                %assign %%reg_num (regs_used - 1)
370                %xdefine rstk r %+ %%reg_num
371                ; align stack, and save original stack location directly above
372                ; it, i.e. in [rsp+stack_size_padded], so we can restore the
373                ; stack in a single instruction (i.e. mov rsp, rstk or mov
374                ; rsp, [rsp+stack_size_padded])
375                %if %1 < 0 ; need to store rsp on stack
376                    %xdefine rstkm [rsp + stack_size + %%pad]
377                    %assign %%pad %%pad + gprsize
378                %else ; can keep rsp in rstk during whole function
379                    %xdefine rstkm rstk
380                %endif
381                %assign stack_size_padded stack_size + ((%%pad + required_stack_alignment-1) & ~(required_stack_alignment-1))
382                mov rstk, rsp
383                and rsp, ~(required_stack_alignment-1)
384                sub rsp, stack_size_padded
385                movifnidn rstkm, rstk
386            %endif
387            WIN64_PUSH_XMM
388        %endif
389    %endif
390%endmacro
391
392%macro SETUP_STACK_POINTER 1
393    %ifnum %1
394        %if %1 != 0 && required_stack_alignment > STACK_ALIGNMENT
395            %if %1 > 0
396                ; Reserve an additional register for storing the original stack pointer, but avoid using
397                ; eax/rax for this purpose since it can potentially get overwritten as a return value.
398                %assign regs_used (regs_used + 1)
399                %if ARCH_X86_64 && regs_used == 7
400                    %assign regs_used 8
401                %elif ARCH_X86_64 == 0 && regs_used == 1
402                    %assign regs_used 2
403                %endif
404            %endif
405            %if ARCH_X86_64 && regs_used < 5 + UNIX64 * 3
406                ; Ensure that we don't clobber any registers containing arguments. For UNIX64 we also preserve r6 (rax)
407                ; since it's used as a hidden argument in vararg functions to specify the number of vector registers used.
408                %assign regs_used 5 + UNIX64 * 3
409            %endif
410        %endif
411    %endif
412%endmacro
413
414%if WIN64 ; Windows x64 ;=================================================
415
416DECLARE_REG 0,  rcx
417DECLARE_REG 1,  rdx
418DECLARE_REG 2,  R8
419DECLARE_REG 3,  R9
420DECLARE_REG 4,  R10, 40
421DECLARE_REG 5,  R11, 48
422DECLARE_REG 6,  rax, 56
423DECLARE_REG 7,  rdi, 64
424DECLARE_REG 8,  rsi, 72
425DECLARE_REG 9,  rbx, 80
426DECLARE_REG 10, rbp, 88
427DECLARE_REG 11, R14, 96
428DECLARE_REG 12, R15, 104
429DECLARE_REG 13, R12, 112
430DECLARE_REG 14, R13, 120
431
432%macro PROLOGUE 2-5+ 0, 0 ; #args, #regs, #xmm_regs, [stack_size,] arg_names...
433    %assign num_args %1
434    %assign regs_used %2
435    ASSERT regs_used >= num_args
436    SETUP_STACK_POINTER %4
437    ASSERT regs_used <= 15
438    PUSH_IF_USED 7, 8, 9, 10, 11, 12, 13, 14
439    ALLOC_STACK %4, %3
440    %if mmsize != 8 && stack_size == 0
441        WIN64_SPILL_XMM %3
442    %endif
443    LOAD_IF_USED 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
444    %if %0 > 4
445        %ifnum %4
446            DEFINE_ARGS %5
447        %else
448            DEFINE_ARGS %4, %5
449        %endif
450    %elifnnum %4
451        DEFINE_ARGS %4
452    %endif
453%endmacro
454
455%macro WIN64_PUSH_XMM 0
456    ; Use the shadow space to store XMM6 and XMM7, the rest needs stack space allocated.
457    %if xmm_regs_used > 6 + high_mm_regs
458        movaps [rstk + stack_offset +  8], xmm6
459    %endif
460    %if xmm_regs_used > 7 + high_mm_regs
461        movaps [rstk + stack_offset + 24], xmm7
462    %endif
463    %assign %%xmm_regs_on_stack xmm_regs_used - high_mm_regs - 8
464    %if %%xmm_regs_on_stack > 0
465        %assign %%i 8
466        %rep %%xmm_regs_on_stack
467            movaps [rsp + (%%i-8)*16 + stack_size + 32], xmm %+ %%i
468            %assign %%i %%i+1
469        %endrep
470    %endif
471%endmacro
472
473%macro WIN64_SPILL_XMM 1
474    %assign xmm_regs_used %1
475    ASSERT xmm_regs_used <= 16 + high_mm_regs
476    %assign %%xmm_regs_on_stack xmm_regs_used - high_mm_regs - 8
477    %if %%xmm_regs_on_stack > 0
478        ; Allocate stack space for callee-saved xmm registers plus shadow space and align the stack.
479        %assign %%pad %%xmm_regs_on_stack*16 + 32
480        %assign stack_size_padded %%pad + ((-%%pad-stack_offset-gprsize) & (STACK_ALIGNMENT-1))
481        SUB rsp, stack_size_padded
482    %endif
483    WIN64_PUSH_XMM
484%endmacro
485
486%macro WIN64_RESTORE_XMM_INTERNAL 0
487    %assign %%pad_size 0
488    %assign %%xmm_regs_on_stack xmm_regs_used - high_mm_regs - 8
489    %if %%xmm_regs_on_stack > 0
490        %assign %%i xmm_regs_used - high_mm_regs
491        %rep %%xmm_regs_on_stack
492            %assign %%i %%i-1
493            movaps xmm %+ %%i, [rsp + (%%i-8)*16 + stack_size + 32]
494        %endrep
495    %endif
496    %if stack_size_padded > 0
497        %if stack_size > 0 && required_stack_alignment > STACK_ALIGNMENT
498            mov rsp, rstkm
499        %else
500            add rsp, stack_size_padded
501            %assign %%pad_size stack_size_padded
502        %endif
503    %endif
504    %if xmm_regs_used > 7 + high_mm_regs
505        movaps xmm7, [rsp + stack_offset - %%pad_size + 24]
506    %endif
507    %if xmm_regs_used > 6 + high_mm_regs
508        movaps xmm6, [rsp + stack_offset - %%pad_size +  8]
509    %endif
510%endmacro
511
512%macro WIN64_RESTORE_XMM 0
513    WIN64_RESTORE_XMM_INTERNAL
514    %assign stack_offset (stack_offset-stack_size_padded)
515    %assign stack_size_padded 0
516    %assign xmm_regs_used 0
517%endmacro
518
519%define has_epilogue regs_used > 7 || stack_size > 0 || vzeroupper_required || xmm_regs_used > 6+high_mm_regs
520
521%macro RET 0
522    WIN64_RESTORE_XMM_INTERNAL
523    POP_IF_USED 14, 13, 12, 11, 10, 9, 8, 7
524    %if vzeroupper_required
525        vzeroupper
526    %endif
527    AUTO_REP_RET
528%endmacro
529
530%elif ARCH_X86_64 ; *nix x64 ;=============================================
531
532DECLARE_REG 0,  rdi
533DECLARE_REG 1,  rsi
534DECLARE_REG 2,  rdx
535DECLARE_REG 3,  rcx
536DECLARE_REG 4,  R8
537DECLARE_REG 5,  R9
538DECLARE_REG 6,  rax, 8
539DECLARE_REG 7,  R10, 16
540DECLARE_REG 8,  R11, 24
541DECLARE_REG 9,  rbx, 32
542DECLARE_REG 10, rbp, 40
543DECLARE_REG 11, R14, 48
544DECLARE_REG 12, R15, 56
545DECLARE_REG 13, R12, 64
546DECLARE_REG 14, R13, 72
547
548%macro PROLOGUE 2-5+ 0, 0 ; #args, #regs, #xmm_regs, [stack_size,] arg_names...
549    %assign num_args %1
550    %assign regs_used %2
551    %assign xmm_regs_used %3
552    ASSERT regs_used >= num_args
553    SETUP_STACK_POINTER %4
554    ASSERT regs_used <= 15
555    PUSH_IF_USED 9, 10, 11, 12, 13, 14
556    ALLOC_STACK %4
557    LOAD_IF_USED 6, 7, 8, 9, 10, 11, 12, 13, 14
558    %if %0 > 4
559        %ifnum %4
560            DEFINE_ARGS %5
561        %else
562            DEFINE_ARGS %4, %5
563        %endif
564    %elifnnum %4
565        DEFINE_ARGS %4
566    %endif
567%endmacro
568
569%define has_epilogue regs_used > 9 || stack_size > 0 || vzeroupper_required
570
571%macro RET 0
572    %if stack_size_padded > 0
573        %if required_stack_alignment > STACK_ALIGNMENT
574            mov rsp, rstkm
575        %else
576            add rsp, stack_size_padded
577        %endif
578    %endif
579    POP_IF_USED 14, 13, 12, 11, 10, 9
580    %if vzeroupper_required
581        vzeroupper
582    %endif
583    AUTO_REP_RET
584%endmacro
585
586%else ; X86_32 ;==============================================================
587
588DECLARE_REG 0, eax, 4
589DECLARE_REG 1, ecx, 8
590DECLARE_REG 2, edx, 12
591DECLARE_REG 3, ebx, 16
592DECLARE_REG 4, esi, 20
593DECLARE_REG 5, edi, 24
594DECLARE_REG 6, ebp, 28
595%define rsp esp
596
597%macro DECLARE_ARG 1-*
598    %rep %0
599        %define r%1m [rstk + stack_offset + 4*%1 + 4]
600        %define r%1mp dword r%1m
601        %rotate 1
602    %endrep
603%endmacro
604
605DECLARE_ARG 7, 8, 9, 10, 11, 12, 13, 14
606
607%macro PROLOGUE 2-5+ 0, 0 ; #args, #regs, #xmm_regs, [stack_size,] arg_names...
608    %assign num_args %1
609    %assign regs_used %2
610    ASSERT regs_used >= num_args
611    %if num_args > 7
612        %assign num_args 7
613    %endif
614    %if regs_used > 7
615        %assign regs_used 7
616    %endif
617    SETUP_STACK_POINTER %4
618    ASSERT regs_used <= 7
619    PUSH_IF_USED 3, 4, 5, 6
620    ALLOC_STACK %4
621    LOAD_IF_USED 0, 1, 2, 3, 4, 5, 6
622    %if %0 > 4
623        %ifnum %4
624            DEFINE_ARGS %5
625        %else
626            DEFINE_ARGS %4, %5
627        %endif
628    %elifnnum %4
629        DEFINE_ARGS %4
630    %endif
631%endmacro
632
633%define has_epilogue regs_used > 3 || stack_size > 0 || vzeroupper_required
634
635%macro RET 0
636    %if stack_size_padded > 0
637        %if required_stack_alignment > STACK_ALIGNMENT
638            mov rsp, rstkm
639        %else
640            add rsp, stack_size_padded
641        %endif
642    %endif
643    POP_IF_USED 6, 5, 4, 3
644    %if vzeroupper_required
645        vzeroupper
646    %endif
647    AUTO_REP_RET
648%endmacro
649
650%endif ;======================================================================
651
652%if WIN64 == 0
653    %macro WIN64_SPILL_XMM 1
654    %endmacro
655    %macro WIN64_RESTORE_XMM 0
656    %endmacro
657    %macro WIN64_PUSH_XMM 0
658    %endmacro
659%endif
660
661; On AMD cpus <=K10, an ordinary ret is slow if it immediately follows either
662; a branch or a branch target. So switch to a 2-byte form of ret in that case.
663; We can automatically detect "follows a branch", but not a branch target.
664; (SSSE3 is a sufficient condition to know that your cpu doesn't have this problem.)
665%macro REP_RET 0
666    %if has_epilogue || cpuflag(ssse3)
667        RET
668    %else
669        rep ret
670    %endif
671    annotate_function_size
672%endmacro
673
674%define last_branch_adr $$
675%macro AUTO_REP_RET 0
676    %if notcpuflag(ssse3)
677        times ((last_branch_adr-$)>>31)+1 rep ; times 1 iff $ == last_branch_adr.
678    %endif
679    ret
680    annotate_function_size
681%endmacro
682
683%macro BRANCH_INSTR 0-*
684    %rep %0
685        %macro %1 1-2 %1
686            %2 %1
687            %if notcpuflag(ssse3)
688                %%branch_instr equ $
689                %xdefine last_branch_adr %%branch_instr
690            %endif
691        %endmacro
692        %rotate 1
693    %endrep
694%endmacro
695
696BRANCH_INSTR jz, je, jnz, jne, jl, jle, jnl, jnle, jg, jge, jng, jnge, ja, jae, jna, jnae, jb, jbe, jnb, jnbe, jc, jnc, js, jns, jo, jno, jp, jnp
697
698%macro TAIL_CALL 2 ; callee, is_nonadjacent
699    %if has_epilogue
700        call %1
701        RET
702    %elif %2
703        jmp %1
704    %endif
705    annotate_function_size
706%endmacro
707
708;=============================================================================
709; arch-independent part
710;=============================================================================
711
712%assign function_align 16
713
714; Begin a function.
715; Applies any symbol mangling needed for C linkage, and sets up a define such that
716; subsequent uses of the function name automatically refer to the mangled version.
717; Appends cpuflags to the function name if cpuflags has been specified.
718; The "" empty default parameter is a workaround for nasm, which fails if SUFFIX
719; is empty and we call cglobal_internal with just %1 %+ SUFFIX (without %2).
720%macro cglobal 1-2+ "" ; name, [PROLOGUE args]
721    cglobal_internal 1, %1 %+ SUFFIX, %2
722%endmacro
723%macro cvisible 1-2+ "" ; name, [PROLOGUE args]
724    cglobal_internal 0, %1 %+ SUFFIX, %2
725%endmacro
726%macro cglobal_internal 2-3+
727    annotate_function_size
728    %if %1
729        %xdefine %%FUNCTION_PREFIX private_prefix
730        %xdefine %%VISIBILITY hidden
731    %else
732        %xdefine %%FUNCTION_PREFIX public_prefix
733        %xdefine %%VISIBILITY
734    %endif
735    %ifndef cglobaled_%2
736        %xdefine %2 mangle(%%FUNCTION_PREFIX %+ _ %+ %2)
737        %xdefine %2.skip_prologue %2 %+ .skip_prologue
738        CAT_XDEFINE cglobaled_, %2, 1
739    %endif
740    %xdefine current_function %2
741    %xdefine current_function_section __SECT__
742    %if FORMAT_ELF
743        global %2:function %%VISIBILITY
744    %else
745        global %2
746    %endif
747    align function_align
748    %2:
749    RESET_MM_PERMUTATION        ; needed for x86-64, also makes disassembly somewhat nicer
750    %xdefine rstk rsp           ; copy of the original stack pointer, used when greater alignment than the known stack alignment is required
751    %assign stack_offset 0      ; stack pointer offset relative to the return address
752    %assign stack_size 0        ; amount of stack space that can be freely used inside a function
753    %assign stack_size_padded 0 ; total amount of allocated stack space, including space for callee-saved xmm registers on WIN64 and alignment padding
754    %assign xmm_regs_used 0     ; number of XMM registers requested, used for dealing with callee-saved registers on WIN64 and vzeroupper
755    %ifnidn %3, ""
756        PROLOGUE %3
757    %endif
758%endmacro
759
760; Create a global symbol from a local label with the correct name mangling and type
761%macro cglobal_label 1
762    %if FORMAT_ELF
763        global current_function %+ %1:function hidden
764    %else
765        global current_function %+ %1
766    %endif
767    %1:
768%endmacro
769
770%macro cextern 1
771    %xdefine %1 mangle(private_prefix %+ _ %+ %1)
772    CAT_XDEFINE cglobaled_, %1, 1
773    extern %1
774%endmacro
775
776; like cextern, but without the prefix
777%macro cextern_naked 1
778    %ifdef PREFIX
779        %xdefine %1 mangle(%1)
780    %endif
781    CAT_XDEFINE cglobaled_, %1, 1
782    extern %1
783%endmacro
784
785%macro const 1-2+
786    %xdefine %1 mangle(private_prefix %+ _ %+ %1)
787    %if FORMAT_ELF
788        global %1:data hidden
789    %else
790        global %1
791    %endif
792    %1: %2
793%endmacro
794
795; This is needed for ELF, otherwise the GNU linker assumes the stack is executable by default.
796%if FORMAT_ELF
797    [SECTION .note.GNU-stack noalloc noexec nowrite progbits]
798%endif
799
800; Tell debuggers how large the function was.
801; This may be invoked multiple times per function; we rely on later instances overriding earlier ones.
802; This is invoked by RET and similar macros, and also cglobal does it for the previous function,
803; but if the last function in a source file doesn't use any of the standard macros for its epilogue,
804; then its size might be unspecified.
805%macro annotate_function_size 0
806    %ifdef __YASM_VER__
807        %ifdef current_function
808            %if FORMAT_ELF
809                current_function_section
810                %%ecf equ $
811                size current_function %%ecf - current_function
812                __SECT__
813            %endif
814        %endif
815    %endif
816%endmacro
817
818; cpuflags
819
820%assign cpuflags_mmx       (1<<0)
821%assign cpuflags_mmx2      (1<<1) | cpuflags_mmx
822%assign cpuflags_3dnow     (1<<2) | cpuflags_mmx
823%assign cpuflags_3dnowext  (1<<3) | cpuflags_3dnow
824%assign cpuflags_sse       (1<<4) | cpuflags_mmx2
825%assign cpuflags_sse2      (1<<5) | cpuflags_sse
826%assign cpuflags_sse2slow  (1<<6) | cpuflags_sse2
827%assign cpuflags_lzcnt     (1<<7) | cpuflags_sse2
828%assign cpuflags_sse3      (1<<8) | cpuflags_sse2
829%assign cpuflags_ssse3     (1<<9) | cpuflags_sse3
830%assign cpuflags_sse4      (1<<10)| cpuflags_ssse3
831%assign cpuflags_sse42     (1<<11)| cpuflags_sse4
832%assign cpuflags_aesni     (1<<12)| cpuflags_sse42
833%assign cpuflags_avx       (1<<13)| cpuflags_sse42
834%assign cpuflags_xop       (1<<14)| cpuflags_avx
835%assign cpuflags_fma4      (1<<15)| cpuflags_avx
836%assign cpuflags_fma3      (1<<16)| cpuflags_avx
837%assign cpuflags_bmi1      (1<<17)| cpuflags_avx|cpuflags_lzcnt
838%assign cpuflags_bmi2      (1<<18)| cpuflags_bmi1
839%assign cpuflags_avx2      (1<<19)| cpuflags_fma3|cpuflags_bmi2
840%assign cpuflags_avx512    (1<<20)| cpuflags_avx2 ; F, CD, BW, DQ, VL
841%assign cpuflags_avx512icl (1<<25)| cpuflags_avx512
842
843%assign cpuflags_cache32   (1<<21)
844%assign cpuflags_cache64   (1<<22)
845%assign cpuflags_aligned   (1<<23) ; not a cpu feature, but a function variant
846%assign cpuflags_atom      (1<<24)
847
848; Returns a boolean value expressing whether or not the specified cpuflag is enabled.
849%define    cpuflag(x) (((((cpuflags & (cpuflags_ %+ x)) ^ (cpuflags_ %+ x)) - 1) >> 31) & 1)
850%define notcpuflag(x) (cpuflag(x) ^ 1)
851
852; Takes an arbitrary number of cpuflags from the above list.
853; All subsequent functions (up to the next INIT_CPUFLAGS) is built for the specified cpu.
854; You shouldn't need to invoke this macro directly, it's a subroutine for INIT_MMX &co.
855%macro INIT_CPUFLAGS 0-*
856    %xdefine SUFFIX
857    %undef cpuname
858    %assign cpuflags 0
859
860    %if %0 >= 1
861        %rep %0
862            %ifdef cpuname
863                %xdefine cpuname cpuname %+ _%1
864            %else
865                %xdefine cpuname %1
866            %endif
867            %assign cpuflags cpuflags | cpuflags_%1
868            %rotate 1
869        %endrep
870        %xdefine SUFFIX _ %+ cpuname
871
872        %if cpuflag(avx)
873            %assign avx_enabled 1
874        %endif
875        %if (mmsize == 16 && notcpuflag(sse2)) || (mmsize == 32 && notcpuflag(avx2))
876            %define mova movaps
877            %define movu movups
878            %define movnta movntps
879        %endif
880        %if cpuflag(aligned)
881            %define movu mova
882        %elif cpuflag(sse3) && notcpuflag(ssse3)
883            %define movu lddqu
884        %endif
885    %endif
886
887    %if ARCH_X86_64 || cpuflag(sse2)
888        CPUNOP amdnop
889    %else
890        CPUNOP basicnop
891    %endif
892%endmacro
893
894; Merge mmx, sse*, and avx*
895; m# is a simd register of the currently selected size
896; xm# is the corresponding xmm register if mmsize >= 16, otherwise the same as m#
897; ym# is the corresponding ymm register if mmsize >= 32, otherwise the same as m#
898; zm# is the corresponding zmm register if mmsize >= 64, otherwise the same as m#
899; (All 4 remain in sync through SWAP.)
900
901%macro CAT_XDEFINE 3
902    %xdefine %1%2 %3
903%endmacro
904
905%macro CAT_UNDEF 2
906    %undef %1%2
907%endmacro
908
909%macro DEFINE_MMREGS 1 ; mmtype
910    %assign %%prev_mmregs 0
911    %ifdef num_mmregs
912        %assign %%prev_mmregs num_mmregs
913    %endif
914
915    %assign num_mmregs 8
916    %if ARCH_X86_64 && mmsize >= 16
917        %assign num_mmregs 16
918        %if cpuflag(avx512) || mmsize == 64
919            %assign num_mmregs 32
920        %endif
921    %endif
922
923    %assign %%i 0
924    %rep num_mmregs
925        CAT_XDEFINE m, %%i, %1 %+ %%i
926        CAT_XDEFINE nn%1, %%i, %%i
927        %assign %%i %%i+1
928    %endrep
929    %if %%prev_mmregs > num_mmregs
930        %rep %%prev_mmregs - num_mmregs
931            CAT_UNDEF m, %%i
932            CAT_UNDEF nn %+ mmtype, %%i
933            %assign %%i %%i+1
934        %endrep
935    %endif
936    %xdefine mmtype %1
937%endmacro
938
939; Prefer registers 16-31 over 0-15 to avoid having to use vzeroupper
940%macro AVX512_MM_PERMUTATION 0-1 0 ; start_reg
941    %if ARCH_X86_64 && cpuflag(avx512)
942        %assign %%i %1
943        %rep 16-%1
944            %assign %%i_high %%i+16
945            SWAP %%i, %%i_high
946            %assign %%i %%i+1
947        %endrep
948    %endif
949%endmacro
950
951%macro INIT_MMX 0-1+
952    %assign avx_enabled 0
953    %define RESET_MM_PERMUTATION INIT_MMX %1
954    %define mmsize 8
955    %define mova movq
956    %define movu movq
957    %define movh movd
958    %define movnta movntq
959    INIT_CPUFLAGS %1
960    DEFINE_MMREGS mm
961%endmacro
962
963%macro INIT_XMM 0-1+
964    %assign avx_enabled 0
965    %define RESET_MM_PERMUTATION INIT_XMM %1
966    %define mmsize 16
967    %define mova movdqa
968    %define movu movdqu
969    %define movh movq
970    %define movnta movntdq
971    INIT_CPUFLAGS %1
972    DEFINE_MMREGS xmm
973    %if WIN64
974        AVX512_MM_PERMUTATION 6 ; Swap callee-saved registers with volatile registers
975    %endif
976%endmacro
977
978%macro INIT_YMM 0-1+
979    %assign avx_enabled 1
980    %define RESET_MM_PERMUTATION INIT_YMM %1
981    %define mmsize 32
982    %define mova movdqa
983    %define movu movdqu
984    %undef movh
985    %define movnta movntdq
986    INIT_CPUFLAGS %1
987    DEFINE_MMREGS ymm
988    AVX512_MM_PERMUTATION
989%endmacro
990
991%macro INIT_ZMM 0-1+
992    %assign avx_enabled 1
993    %define RESET_MM_PERMUTATION INIT_ZMM %1
994    %define mmsize 64
995    %define mova movdqa
996    %define movu movdqu
997    %undef movh
998    %define movnta movntdq
999    INIT_CPUFLAGS %1
1000    DEFINE_MMREGS zmm
1001    AVX512_MM_PERMUTATION
1002%endmacro
1003
1004INIT_XMM
1005
1006%macro DECLARE_MMCAST 1
1007    %define  mmmm%1   mm%1
1008    %define  mmxmm%1  mm%1
1009    %define  mmymm%1  mm%1
1010    %define  mmzmm%1  mm%1
1011    %define xmmmm%1   mm%1
1012    %define xmmxmm%1 xmm%1
1013    %define xmmymm%1 xmm%1
1014    %define xmmzmm%1 xmm%1
1015    %define ymmmm%1   mm%1
1016    %define ymmxmm%1 xmm%1
1017    %define ymmymm%1 ymm%1
1018    %define ymmzmm%1 ymm%1
1019    %define zmmmm%1   mm%1
1020    %define zmmxmm%1 xmm%1
1021    %define zmmymm%1 ymm%1
1022    %define zmmzmm%1 zmm%1
1023    %define xm%1 xmm %+ m%1
1024    %define ym%1 ymm %+ m%1
1025    %define zm%1 zmm %+ m%1
1026%endmacro
1027
1028%assign i 0
1029%rep 32
1030    DECLARE_MMCAST i
1031    %assign i i+1
1032%endrep
1033
1034; I often want to use macros that permute their arguments. e.g. there's no
1035; efficient way to implement butterfly or transpose or dct without swapping some
1036; arguments.
1037;
1038; I would like to not have to manually keep track of the permutations:
1039; If I insert a permutation in the middle of a function, it should automatically
1040; change everything that follows. For more complex macros I may also have multiple
1041; implementations, e.g. the SSE2 and SSSE3 versions may have different permutations.
1042;
1043; Hence these macros. Insert a PERMUTE or some SWAPs at the end of a macro that
1044; permutes its arguments. It's equivalent to exchanging the contents of the
1045; registers, except that this way you exchange the register names instead, so it
1046; doesn't cost any cycles.
1047
1048%macro PERMUTE 2-* ; takes a list of pairs to swap
1049    %rep %0/2
1050        %xdefine %%tmp%2 m%2
1051        %rotate 2
1052    %endrep
1053    %rep %0/2
1054        %xdefine m%1 %%tmp%2
1055        CAT_XDEFINE nn, m%1, %1
1056        %rotate 2
1057    %endrep
1058%endmacro
1059
1060%macro SWAP 2+ ; swaps a single chain (sometimes more concise than pairs)
1061    %ifnum %1 ; SWAP 0, 1, ...
1062        SWAP_INTERNAL_NUM %1, %2
1063    %else ; SWAP m0, m1, ...
1064        SWAP_INTERNAL_NAME %1, %2
1065    %endif
1066%endmacro
1067
1068%macro SWAP_INTERNAL_NUM 2-*
1069    %rep %0-1
1070        %xdefine %%tmp m%1
1071        %xdefine m%1 m%2
1072        %xdefine m%2 %%tmp
1073        CAT_XDEFINE nn, m%1, %1
1074        CAT_XDEFINE nn, m%2, %2
1075        %rotate 1
1076    %endrep
1077%endmacro
1078
1079%macro SWAP_INTERNAL_NAME 2-*
1080    %xdefine %%args nn %+ %1
1081    %rep %0-1
1082        %xdefine %%args %%args, nn %+ %2
1083        %rotate 1
1084    %endrep
1085    SWAP_INTERNAL_NUM %%args
1086%endmacro
1087
1088; If SAVE_MM_PERMUTATION is placed at the end of a function, then any later
1089; calls to that function will automatically load the permutation, so values can
1090; be returned in mmregs.
1091%macro SAVE_MM_PERMUTATION 0-1
1092    %if %0
1093        %xdefine %%f %1_m
1094    %else
1095        %xdefine %%f current_function %+ _m
1096    %endif
1097    %assign %%i 0
1098    %rep num_mmregs
1099        CAT_XDEFINE %%f, %%i, m %+ %%i
1100        %assign %%i %%i+1
1101    %endrep
1102%endmacro
1103
1104%macro LOAD_MM_PERMUTATION 1 ; name to load from
1105    %ifdef %1_m0
1106        %assign %%i 0
1107        %rep num_mmregs
1108            CAT_XDEFINE m, %%i, %1_m %+ %%i
1109            CAT_XDEFINE nn, m %+ %%i, %%i
1110            %assign %%i %%i+1
1111        %endrep
1112    %endif
1113%endmacro
1114
1115; Append cpuflags to the callee's name iff the appended name is known and the plain name isn't
1116%macro call 1
1117    %ifid %1
1118        call_internal %1 %+ SUFFIX, %1
1119    %else
1120        call %1
1121    %endif
1122%endmacro
1123%macro call_internal 2
1124    %xdefine %%i %2
1125    %ifndef cglobaled_%2
1126        %ifdef cglobaled_%1
1127            %xdefine %%i %1
1128        %endif
1129    %endif
1130    call %%i
1131    LOAD_MM_PERMUTATION %%i
1132%endmacro
1133
1134; Substitutions that reduce instruction size but are functionally equivalent
1135%macro add 2
1136    %ifnum %2
1137        %if %2==128
1138            sub %1, -128
1139        %else
1140            add %1, %2
1141        %endif
1142    %else
1143        add %1, %2
1144    %endif
1145%endmacro
1146
1147%macro sub 2
1148    %ifnum %2
1149        %if %2==128
1150            add %1, -128
1151        %else
1152            sub %1, %2
1153        %endif
1154    %else
1155        sub %1, %2
1156    %endif
1157%endmacro
1158
1159;=============================================================================
1160; AVX abstraction layer
1161;=============================================================================
1162
1163%assign i 0
1164%rep 32
1165    %if i < 8
1166        CAT_XDEFINE sizeofmm, i, 8
1167        CAT_XDEFINE regnumofmm, i, i
1168    %endif
1169    CAT_XDEFINE sizeofxmm, i, 16
1170    CAT_XDEFINE sizeofymm, i, 32
1171    CAT_XDEFINE sizeofzmm, i, 64
1172    CAT_XDEFINE regnumofxmm, i, i
1173    CAT_XDEFINE regnumofymm, i, i
1174    CAT_XDEFINE regnumofzmm, i, i
1175    %assign i i+1
1176%endrep
1177%undef i
1178
1179%macro CHECK_AVX_INSTR_EMU 3-*
1180    %xdefine %%opcode %1
1181    %xdefine %%dst %2
1182    %rep %0-2
1183        %ifidn %%dst, %3
1184            %error non-avx emulation of ``%%opcode'' is not supported
1185        %endif
1186        %rotate 1
1187    %endrep
1188%endmacro
1189
1190;%1 == instruction
1191;%2 == minimal instruction set
1192;%3 == 1 if float, 0 if int
1193;%4 == 1 if 4-operand emulation, 0 if 3-operand emulation, 255 otherwise (no emulation)
1194;%5 == 1 if commutative (i.e. doesn't matter which src arg is which), 0 if not
1195;%6+: operands
1196%macro RUN_AVX_INSTR 6-9+
1197    %ifnum sizeof%7
1198        %assign __sizeofreg sizeof%7
1199    %elifnum sizeof%6
1200        %assign __sizeofreg sizeof%6
1201    %else
1202        %assign __sizeofreg mmsize
1203    %endif
1204    %assign __emulate_avx 0
1205    %if avx_enabled && __sizeofreg >= 16
1206        %xdefine __instr v%1
1207    %else
1208        %xdefine __instr %1
1209        %if %0 >= 8+%4
1210            %assign __emulate_avx 1
1211        %endif
1212    %endif
1213    %ifnidn %2, fnord
1214        %ifdef cpuname
1215            %if notcpuflag(%2)
1216                %error use of ``%1'' %2 instruction in cpuname function: current_function
1217            %elif cpuflags_%2 < cpuflags_sse && notcpuflag(sse2) && __sizeofreg > 8
1218                %error use of ``%1'' sse2 instruction in cpuname function: current_function
1219            %endif
1220        %endif
1221    %endif
1222
1223    %if __emulate_avx
1224        %xdefine __src1 %7
1225        %xdefine __src2 %8
1226        %if %5 && %4 == 0
1227            %ifnidn %6, %7
1228                %ifidn %6, %8
1229                    %xdefine __src1 %8
1230                    %xdefine __src2 %7
1231                %elifnnum sizeof%8
1232                    ; 3-operand AVX instructions with a memory arg can only have it in src2,
1233                    ; whereas SSE emulation prefers to have it in src1 (i.e. the mov).
1234                    ; So, if the instruction is commutative with a memory arg, swap them.
1235                    %xdefine __src1 %8
1236                    %xdefine __src2 %7
1237                %endif
1238            %endif
1239        %endif
1240        %ifnidn %6, __src1
1241            %if %0 >= 9
1242                CHECK_AVX_INSTR_EMU {%1 %6, %7, %8, %9}, %6, __src2, %9
1243            %else
1244                CHECK_AVX_INSTR_EMU {%1 %6, %7, %8}, %6, __src2
1245            %endif
1246            %if __sizeofreg == 8
1247                MOVQ %6, __src1
1248            %elif %3
1249                MOVAPS %6, __src1
1250            %else
1251                MOVDQA %6, __src1
1252            %endif
1253        %endif
1254        %if %0 >= 9
1255            %1 %6, __src2, %9
1256        %else
1257            %1 %6, __src2
1258        %endif
1259    %elif %0 >= 9
1260        __instr %6, %7, %8, %9
1261    %elif %0 == 8
1262        __instr %6, %7, %8
1263    %elif %0 == 7
1264        __instr %6, %7
1265    %else
1266        __instr %6
1267    %endif
1268%endmacro
1269
1270;%1 == instruction
1271;%2 == minimal instruction set
1272;%3 == 1 if float, 0 if int
1273;%4 == 1 if 4-operand emulation, 0 if 3-operand emulation, 255 otherwise (no emulation)
1274;%5 == 1 if commutative (i.e. doesn't matter which src arg is which), 0 if not
1275%macro AVX_INSTR 1-5 fnord, 0, 255, 0
1276    %macro %1 1-10 fnord, fnord, fnord, fnord, %1, %2, %3, %4, %5
1277        %ifidn %2, fnord
1278            RUN_AVX_INSTR %6, %7, %8, %9, %10, %1
1279        %elifidn %3, fnord
1280            RUN_AVX_INSTR %6, %7, %8, %9, %10, %1, %2
1281        %elifidn %4, fnord
1282            RUN_AVX_INSTR %6, %7, %8, %9, %10, %1, %2, %3
1283        %elifidn %5, fnord
1284            RUN_AVX_INSTR %6, %7, %8, %9, %10, %1, %2, %3, %4
1285        %else
1286            RUN_AVX_INSTR %6, %7, %8, %9, %10, %1, %2, %3, %4, %5
1287        %endif
1288    %endmacro
1289%endmacro
1290
1291; Instructions with both VEX/EVEX and legacy encodings
1292; Non-destructive instructions are written without parameters
1293AVX_INSTR addpd, sse2, 1, 0, 1
1294AVX_INSTR addps, sse, 1, 0, 1
1295AVX_INSTR addsd, sse2, 1, 0, 0
1296AVX_INSTR addss, sse, 1, 0, 0
1297AVX_INSTR addsubpd, sse3, 1, 0, 0
1298AVX_INSTR addsubps, sse3, 1, 0, 0
1299AVX_INSTR aesdec, aesni, 0, 0, 0
1300AVX_INSTR aesdeclast, aesni, 0, 0, 0
1301AVX_INSTR aesenc, aesni, 0, 0, 0
1302AVX_INSTR aesenclast, aesni, 0, 0, 0
1303AVX_INSTR aesimc, aesni
1304AVX_INSTR aeskeygenassist, aesni
1305AVX_INSTR andnpd, sse2, 1, 0, 0
1306AVX_INSTR andnps, sse, 1, 0, 0
1307AVX_INSTR andpd, sse2, 1, 0, 1
1308AVX_INSTR andps, sse, 1, 0, 1
1309AVX_INSTR blendpd, sse4, 1, 1, 0
1310AVX_INSTR blendps, sse4, 1, 1, 0
1311AVX_INSTR blendvpd, sse4 ; can't be emulated
1312AVX_INSTR blendvps, sse4 ; can't be emulated
1313AVX_INSTR cmpeqpd, sse2, 1, 0, 1
1314AVX_INSTR cmpeqps, sse, 1, 0, 1
1315AVX_INSTR cmpeqsd, sse2, 1, 0, 0
1316AVX_INSTR cmpeqss, sse, 1, 0, 0
1317AVX_INSTR cmplepd, sse2, 1, 0, 0
1318AVX_INSTR cmpleps, sse, 1, 0, 0
1319AVX_INSTR cmplesd, sse2, 1, 0, 0
1320AVX_INSTR cmpless, sse, 1, 0, 0
1321AVX_INSTR cmpltpd, sse2, 1, 0, 0
1322AVX_INSTR cmpltps, sse, 1, 0, 0
1323AVX_INSTR cmpltsd, sse2, 1, 0, 0
1324AVX_INSTR cmpltss, sse, 1, 0, 0
1325AVX_INSTR cmpneqpd, sse2, 1, 0, 1
1326AVX_INSTR cmpneqps, sse, 1, 0, 1
1327AVX_INSTR cmpneqsd, sse2, 1, 0, 0
1328AVX_INSTR cmpneqss, sse, 1, 0, 0
1329AVX_INSTR cmpnlepd, sse2, 1, 0, 0
1330AVX_INSTR cmpnleps, sse, 1, 0, 0
1331AVX_INSTR cmpnlesd, sse2, 1, 0, 0
1332AVX_INSTR cmpnless, sse, 1, 0, 0
1333AVX_INSTR cmpnltpd, sse2, 1, 0, 0
1334AVX_INSTR cmpnltps, sse, 1, 0, 0
1335AVX_INSTR cmpnltsd, sse2, 1, 0, 0
1336AVX_INSTR cmpnltss, sse, 1, 0, 0
1337AVX_INSTR cmpordpd, sse2 1, 0, 1
1338AVX_INSTR cmpordps, sse 1, 0, 1
1339AVX_INSTR cmpordsd, sse2 1, 0, 0
1340AVX_INSTR cmpordss, sse 1, 0, 0
1341AVX_INSTR cmppd, sse2, 1, 1, 0
1342AVX_INSTR cmpps, sse, 1, 1, 0
1343AVX_INSTR cmpsd, sse2, 1, 1, 0
1344AVX_INSTR cmpss, sse, 1, 1, 0
1345AVX_INSTR cmpunordpd, sse2, 1, 0, 1
1346AVX_INSTR cmpunordps, sse, 1, 0, 1
1347AVX_INSTR cmpunordsd, sse2, 1, 0, 0
1348AVX_INSTR cmpunordss, sse, 1, 0, 0
1349AVX_INSTR comisd, sse2
1350AVX_INSTR comiss, sse
1351AVX_INSTR cvtdq2pd, sse2
1352AVX_INSTR cvtdq2ps, sse2
1353AVX_INSTR cvtpd2dq, sse2
1354AVX_INSTR cvtpd2ps, sse2
1355AVX_INSTR cvtps2dq, sse2
1356AVX_INSTR cvtps2pd, sse2
1357AVX_INSTR cvtsd2si, sse2
1358AVX_INSTR cvtsd2ss, sse2, 1, 0, 0
1359AVX_INSTR cvtsi2sd, sse2, 1, 0, 0
1360AVX_INSTR cvtsi2ss, sse, 1, 0, 0
1361AVX_INSTR cvtss2sd, sse2, 1, 0, 0
1362AVX_INSTR cvtss2si, sse
1363AVX_INSTR cvttpd2dq, sse2
1364AVX_INSTR cvttps2dq, sse2
1365AVX_INSTR cvttsd2si, sse2
1366AVX_INSTR cvttss2si, sse
1367AVX_INSTR divpd, sse2, 1, 0, 0
1368AVX_INSTR divps, sse, 1, 0, 0
1369AVX_INSTR divsd, sse2, 1, 0, 0
1370AVX_INSTR divss, sse, 1, 0, 0
1371AVX_INSTR dppd, sse4, 1, 1, 0
1372AVX_INSTR dpps, sse4, 1, 1, 0
1373AVX_INSTR extractps, sse4
1374AVX_INSTR haddpd, sse3, 1, 0, 0
1375AVX_INSTR haddps, sse3, 1, 0, 0
1376AVX_INSTR hsubpd, sse3, 1, 0, 0
1377AVX_INSTR hsubps, sse3, 1, 0, 0
1378AVX_INSTR insertps, sse4, 1, 1, 0
1379AVX_INSTR lddqu, sse3
1380AVX_INSTR ldmxcsr, sse
1381AVX_INSTR maskmovdqu, sse2
1382AVX_INSTR maxpd, sse2, 1, 0, 1
1383AVX_INSTR maxps, sse, 1, 0, 1
1384AVX_INSTR maxsd, sse2, 1, 0, 0
1385AVX_INSTR maxss, sse, 1, 0, 0
1386AVX_INSTR minpd, sse2, 1, 0, 1
1387AVX_INSTR minps, sse, 1, 0, 1
1388AVX_INSTR minsd, sse2, 1, 0, 0
1389AVX_INSTR minss, sse, 1, 0, 0
1390AVX_INSTR movapd, sse2
1391AVX_INSTR movaps, sse
1392AVX_INSTR movd, mmx
1393AVX_INSTR movddup, sse3
1394AVX_INSTR movdqa, sse2
1395AVX_INSTR movdqu, sse2
1396AVX_INSTR movhlps, sse, 1, 0, 0
1397AVX_INSTR movhpd, sse2, 1, 0, 0
1398AVX_INSTR movhps, sse, 1, 0, 0
1399AVX_INSTR movlhps, sse, 1, 0, 0
1400AVX_INSTR movlpd, sse2, 1, 0, 0
1401AVX_INSTR movlps, sse, 1, 0, 0
1402AVX_INSTR movmskpd, sse2
1403AVX_INSTR movmskps, sse
1404AVX_INSTR movntdq, sse2
1405AVX_INSTR movntdqa, sse4
1406AVX_INSTR movntpd, sse2
1407AVX_INSTR movntps, sse
1408AVX_INSTR movq, mmx
1409AVX_INSTR movsd, sse2, 1, 0, 0
1410AVX_INSTR movshdup, sse3
1411AVX_INSTR movsldup, sse3
1412AVX_INSTR movss, sse, 1, 0, 0
1413AVX_INSTR movupd, sse2
1414AVX_INSTR movups, sse
1415AVX_INSTR mpsadbw, sse4, 0, 1, 0
1416AVX_INSTR mulpd, sse2, 1, 0, 1
1417AVX_INSTR mulps, sse, 1, 0, 1
1418AVX_INSTR mulsd, sse2, 1, 0, 0
1419AVX_INSTR mulss, sse, 1, 0, 0
1420AVX_INSTR orpd, sse2, 1, 0, 1
1421AVX_INSTR orps, sse, 1, 0, 1
1422AVX_INSTR pabsb, ssse3
1423AVX_INSTR pabsd, ssse3
1424AVX_INSTR pabsw, ssse3
1425AVX_INSTR packsswb, mmx, 0, 0, 0
1426AVX_INSTR packssdw, mmx, 0, 0, 0
1427AVX_INSTR packuswb, mmx, 0, 0, 0
1428AVX_INSTR packusdw, sse4, 0, 0, 0
1429AVX_INSTR paddb, mmx, 0, 0, 1
1430AVX_INSTR paddw, mmx, 0, 0, 1
1431AVX_INSTR paddd, mmx, 0, 0, 1
1432AVX_INSTR paddq, sse2, 0, 0, 1
1433AVX_INSTR paddsb, mmx, 0, 0, 1
1434AVX_INSTR paddsw, mmx, 0, 0, 1
1435AVX_INSTR paddusb, mmx, 0, 0, 1
1436AVX_INSTR paddusw, mmx, 0, 0, 1
1437AVX_INSTR palignr, ssse3, 0, 1, 0
1438AVX_INSTR pand, mmx, 0, 0, 1
1439AVX_INSTR pandn, mmx, 0, 0, 0
1440AVX_INSTR pavgb, mmx2, 0, 0, 1
1441AVX_INSTR pavgw, mmx2, 0, 0, 1
1442AVX_INSTR pblendvb, sse4 ; can't be emulated
1443AVX_INSTR pblendw, sse4, 0, 1, 0
1444AVX_INSTR pclmulqdq, fnord, 0, 1, 0
1445AVX_INSTR pclmulhqhqdq, fnord, 0, 0, 0
1446AVX_INSTR pclmulhqlqdq, fnord, 0, 0, 0
1447AVX_INSTR pclmullqhqdq, fnord, 0, 0, 0
1448AVX_INSTR pclmullqlqdq, fnord, 0, 0, 0
1449AVX_INSTR pcmpestri, sse42
1450AVX_INSTR pcmpestrm, sse42
1451AVX_INSTR pcmpistri, sse42
1452AVX_INSTR pcmpistrm, sse42
1453AVX_INSTR pcmpeqb, mmx, 0, 0, 1
1454AVX_INSTR pcmpeqw, mmx, 0, 0, 1
1455AVX_INSTR pcmpeqd, mmx, 0, 0, 1
1456AVX_INSTR pcmpeqq, sse4, 0, 0, 1
1457AVX_INSTR pcmpgtb, mmx, 0, 0, 0
1458AVX_INSTR pcmpgtw, mmx, 0, 0, 0
1459AVX_INSTR pcmpgtd, mmx, 0, 0, 0
1460AVX_INSTR pcmpgtq, sse42, 0, 0, 0
1461AVX_INSTR pextrb, sse4
1462AVX_INSTR pextrd, sse4
1463AVX_INSTR pextrq, sse4
1464AVX_INSTR pextrw, mmx2
1465AVX_INSTR phaddw, ssse3, 0, 0, 0
1466AVX_INSTR phaddd, ssse3, 0, 0, 0
1467AVX_INSTR phaddsw, ssse3, 0, 0, 0
1468AVX_INSTR phminposuw, sse4
1469AVX_INSTR phsubw, ssse3, 0, 0, 0
1470AVX_INSTR phsubd, ssse3, 0, 0, 0
1471AVX_INSTR phsubsw, ssse3, 0, 0, 0
1472AVX_INSTR pinsrb, sse4, 0, 1, 0
1473AVX_INSTR pinsrd, sse4, 0, 1, 0
1474AVX_INSTR pinsrq, sse4, 0, 1, 0
1475AVX_INSTR pinsrw, mmx2, 0, 1, 0
1476AVX_INSTR pmaddwd, mmx, 0, 0, 1
1477AVX_INSTR pmaddubsw, ssse3, 0, 0, 0
1478AVX_INSTR pmaxsb, sse4, 0, 0, 1
1479AVX_INSTR pmaxsw, mmx2, 0, 0, 1
1480AVX_INSTR pmaxsd, sse4, 0, 0, 1
1481AVX_INSTR pmaxub, mmx2, 0, 0, 1
1482AVX_INSTR pmaxuw, sse4, 0, 0, 1
1483AVX_INSTR pmaxud, sse4, 0, 0, 1
1484AVX_INSTR pminsb, sse4, 0, 0, 1
1485AVX_INSTR pminsw, mmx2, 0, 0, 1
1486AVX_INSTR pminsd, sse4, 0, 0, 1
1487AVX_INSTR pminub, mmx2, 0, 0, 1
1488AVX_INSTR pminuw, sse4, 0, 0, 1
1489AVX_INSTR pminud, sse4, 0, 0, 1
1490AVX_INSTR pmovmskb, mmx2
1491AVX_INSTR pmovsxbw, sse4
1492AVX_INSTR pmovsxbd, sse4
1493AVX_INSTR pmovsxbq, sse4
1494AVX_INSTR pmovsxwd, sse4
1495AVX_INSTR pmovsxwq, sse4
1496AVX_INSTR pmovsxdq, sse4
1497AVX_INSTR pmovzxbw, sse4
1498AVX_INSTR pmovzxbd, sse4
1499AVX_INSTR pmovzxbq, sse4
1500AVX_INSTR pmovzxwd, sse4
1501AVX_INSTR pmovzxwq, sse4
1502AVX_INSTR pmovzxdq, sse4
1503AVX_INSTR pmuldq, sse4, 0, 0, 1
1504AVX_INSTR pmulhrsw, ssse3, 0, 0, 1
1505AVX_INSTR pmulhuw, mmx2, 0, 0, 1
1506AVX_INSTR pmulhw, mmx, 0, 0, 1
1507AVX_INSTR pmullw, mmx, 0, 0, 1
1508AVX_INSTR pmulld, sse4, 0, 0, 1
1509AVX_INSTR pmuludq, sse2, 0, 0, 1
1510AVX_INSTR por, mmx, 0, 0, 1
1511AVX_INSTR psadbw, mmx2, 0, 0, 1
1512AVX_INSTR pshufb, ssse3, 0, 0, 0
1513AVX_INSTR pshufd, sse2
1514AVX_INSTR pshufhw, sse2
1515AVX_INSTR pshuflw, sse2
1516AVX_INSTR psignb, ssse3, 0, 0, 0
1517AVX_INSTR psignw, ssse3, 0, 0, 0
1518AVX_INSTR psignd, ssse3, 0, 0, 0
1519AVX_INSTR psllw, mmx, 0, 0, 0
1520AVX_INSTR pslld, mmx, 0, 0, 0
1521AVX_INSTR psllq, mmx, 0, 0, 0
1522AVX_INSTR pslldq, sse2, 0, 0, 0
1523AVX_INSTR psraw, mmx, 0, 0, 0
1524AVX_INSTR psrad, mmx, 0, 0, 0
1525AVX_INSTR psrlw, mmx, 0, 0, 0
1526AVX_INSTR psrld, mmx, 0, 0, 0
1527AVX_INSTR psrlq, mmx, 0, 0, 0
1528AVX_INSTR psrldq, sse2, 0, 0, 0
1529AVX_INSTR psubb, mmx, 0, 0, 0
1530AVX_INSTR psubw, mmx, 0, 0, 0
1531AVX_INSTR psubd, mmx, 0, 0, 0
1532AVX_INSTR psubq, sse2, 0, 0, 0
1533AVX_INSTR psubsb, mmx, 0, 0, 0
1534AVX_INSTR psubsw, mmx, 0, 0, 0
1535AVX_INSTR psubusb, mmx, 0, 0, 0
1536AVX_INSTR psubusw, mmx, 0, 0, 0
1537AVX_INSTR ptest, sse4
1538AVX_INSTR punpckhbw, mmx, 0, 0, 0
1539AVX_INSTR punpckhwd, mmx, 0, 0, 0
1540AVX_INSTR punpckhdq, mmx, 0, 0, 0
1541AVX_INSTR punpckhqdq, sse2, 0, 0, 0
1542AVX_INSTR punpcklbw, mmx, 0, 0, 0
1543AVX_INSTR punpcklwd, mmx, 0, 0, 0
1544AVX_INSTR punpckldq, mmx, 0, 0, 0
1545AVX_INSTR punpcklqdq, sse2, 0, 0, 0
1546AVX_INSTR pxor, mmx, 0, 0, 1
1547AVX_INSTR rcpps, sse
1548AVX_INSTR rcpss, sse, 1, 0, 0
1549AVX_INSTR roundpd, sse4
1550AVX_INSTR roundps, sse4
1551AVX_INSTR roundsd, sse4, 1, 1, 0
1552AVX_INSTR roundss, sse4, 1, 1, 0
1553AVX_INSTR rsqrtps, sse
1554AVX_INSTR rsqrtss, sse, 1, 0, 0
1555AVX_INSTR shufpd, sse2, 1, 1, 0
1556AVX_INSTR shufps, sse, 1, 1, 0
1557AVX_INSTR sqrtpd, sse2
1558AVX_INSTR sqrtps, sse
1559AVX_INSTR sqrtsd, sse2, 1, 0, 0
1560AVX_INSTR sqrtss, sse, 1, 0, 0
1561AVX_INSTR stmxcsr, sse
1562AVX_INSTR subpd, sse2, 1, 0, 0
1563AVX_INSTR subps, sse, 1, 0, 0
1564AVX_INSTR subsd, sse2, 1, 0, 0
1565AVX_INSTR subss, sse, 1, 0, 0
1566AVX_INSTR ucomisd, sse2
1567AVX_INSTR ucomiss, sse
1568AVX_INSTR unpckhpd, sse2, 1, 0, 0
1569AVX_INSTR unpckhps, sse, 1, 0, 0
1570AVX_INSTR unpcklpd, sse2, 1, 0, 0
1571AVX_INSTR unpcklps, sse, 1, 0, 0
1572AVX_INSTR xorpd, sse2, 1, 0, 1
1573AVX_INSTR xorps, sse, 1, 0, 1
1574
1575; 3DNow instructions, for sharing code between AVX, SSE and 3DN
1576AVX_INSTR pfadd, 3dnow, 1, 0, 1
1577AVX_INSTR pfsub, 3dnow, 1, 0, 0
1578AVX_INSTR pfmul, 3dnow, 1, 0, 1
1579
1580; base-4 constants for shuffles
1581%assign i 0
1582%rep 256
1583    %assign j ((i>>6)&3)*1000 + ((i>>4)&3)*100 + ((i>>2)&3)*10 + (i&3)
1584    %if j < 10
1585        CAT_XDEFINE q000, j, i
1586    %elif j < 100
1587        CAT_XDEFINE q00, j, i
1588    %elif j < 1000
1589        CAT_XDEFINE q0, j, i
1590    %else
1591        CAT_XDEFINE q, j, i
1592    %endif
1593    %assign i i+1
1594%endrep
1595%undef i
1596%undef j
1597
1598%macro FMA_INSTR 3
1599    %macro %1 4-7 %1, %2, %3
1600        %if cpuflag(xop)
1601            v%5 %1, %2, %3, %4
1602        %elifnidn %1, %4
1603            %6 %1, %2, %3
1604            %7 %1, %4
1605        %else
1606            %error non-xop emulation of ``%5 %1, %2, %3, %4'' is not supported
1607        %endif
1608    %endmacro
1609%endmacro
1610
1611FMA_INSTR  pmacsww,  pmullw, paddw
1612FMA_INSTR  pmacsdd,  pmulld, paddd ; sse4 emulation
1613FMA_INSTR pmacsdql,  pmuldq, paddq ; sse4 emulation
1614FMA_INSTR pmadcswd, pmaddwd, paddd
1615
1616; tzcnt is equivalent to "rep bsf" and is backwards-compatible with bsf.
1617; This lets us use tzcnt without bumping the yasm version requirement yet.
1618%define tzcnt rep bsf
1619
1620; Macros for consolidating FMA3 and FMA4 using 4-operand (dst, src1, src2, src3) syntax.
1621; FMA3 is only possible if dst is the same as one of the src registers.
1622; Either src2 or src3 can be a memory operand.
1623%macro FMA4_INSTR 2-*
1624    %push fma4_instr
1625    %xdefine %$prefix %1
1626    %rep %0 - 1
1627        %macro %$prefix%2 4-6 %$prefix, %2
1628            %if notcpuflag(fma3) && notcpuflag(fma4)
1629                %error use of ``%5%6'' fma instruction in cpuname function: current_function
1630            %elif cpuflag(fma4)
1631                v%5%6 %1, %2, %3, %4
1632            %elifidn %1, %2
1633                ; If %3 or %4 is a memory operand it needs to be encoded as the last operand.
1634                %ifnum sizeof%3
1635                    v%{5}213%6 %2, %3, %4
1636                %else
1637                    v%{5}132%6 %2, %4, %3
1638                %endif
1639            %elifidn %1, %3
1640                v%{5}213%6 %3, %2, %4
1641            %elifidn %1, %4
1642                v%{5}231%6 %4, %2, %3
1643            %else
1644                %error fma3 emulation of ``%5%6 %1, %2, %3, %4'' is not supported
1645            %endif
1646        %endmacro
1647        %rotate 1
1648    %endrep
1649    %pop
1650%endmacro
1651
1652FMA4_INSTR fmadd,    pd, ps, sd, ss
1653FMA4_INSTR fmaddsub, pd, ps
1654FMA4_INSTR fmsub,    pd, ps, sd, ss
1655FMA4_INSTR fmsubadd, pd, ps
1656FMA4_INSTR fnmadd,   pd, ps, sd, ss
1657FMA4_INSTR fnmsub,   pd, ps, sd, ss
1658
1659; Macros for converting VEX instructions to equivalent EVEX ones.
1660%macro EVEX_INSTR 2-3 0 ; vex, evex, prefer_evex
1661    %macro %1 2-7 fnord, fnord, %1, %2, %3
1662        %ifidn %3, fnord
1663            %define %%args %1, %2
1664        %elifidn %4, fnord
1665            %define %%args %1, %2, %3
1666        %else
1667            %define %%args %1, %2, %3, %4
1668        %endif
1669        %assign %%evex_required cpuflag(avx512) & %7
1670        %ifnum regnumof%1
1671            %if regnumof%1 >= 16 || sizeof%1 > 32
1672                %assign %%evex_required 1
1673            %endif
1674        %endif
1675        %ifnum regnumof%2
1676            %if regnumof%2 >= 16 || sizeof%2 > 32
1677                %assign %%evex_required 1
1678            %endif
1679        %endif
1680        %if %%evex_required
1681            %6 %%args
1682        %else
1683            %5 %%args ; Prefer VEX over EVEX due to shorter instruction length
1684        %endif
1685    %endmacro
1686%endmacro
1687
1688EVEX_INSTR vbroadcastf128, vbroadcastf32x4
1689EVEX_INSTR vbroadcasti128, vbroadcasti32x4
1690EVEX_INSTR vextractf128,   vextractf32x4
1691EVEX_INSTR vextracti128,   vextracti32x4
1692EVEX_INSTR vinsertf128,    vinsertf32x4
1693EVEX_INSTR vinserti128,    vinserti32x4
1694EVEX_INSTR vmovdqa,        vmovdqa32
1695EVEX_INSTR vmovdqu,        vmovdqu32
1696EVEX_INSTR vpand,          vpandd
1697EVEX_INSTR vpandn,         vpandnd
1698EVEX_INSTR vpor,           vpord
1699EVEX_INSTR vpxor,          vpxord
1700EVEX_INSTR vrcpps,         vrcp14ps,   1 ; EVEX versions have higher precision
1701EVEX_INSTR vrcpss,         vrcp14ss,   1
1702EVEX_INSTR vrsqrtps,       vrsqrt14ps, 1
1703EVEX_INSTR vrsqrtss,       vrsqrt14ss, 1
1704
1705; workaround: vpbroadcastq is broken in x86_32 due to a yasm bug (fixed in 1.3.0)
1706%ifdef __YASM_VER__
1707    %if __YASM_VERSION_ID__ < 0x01030000 && ARCH_X86_64 == 0
1708        %macro vpbroadcastq 2
1709            %if sizeof%1 == 16
1710                movddup %1, %2
1711            %else
1712                vbroadcastsd %1, %2
1713            %endif
1714        %endmacro
1715    %endif
1716%endif
1717