1 /*
2  * CPU detection code, extracted from mmx.h
3  * (c)1997-99 by H. Dietz and R. Fisher
4  * Converted to C and improved by Fabrice Bellard.
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 #include <stdlib.h>
24 #include <string.h>
25 
26 #include "libavutil/x86/asm.h"
27 #include "libavutil/x86/cpu.h"
28 #include "libavutil/cpu.h"
29 #include "libavutil/cpu_internal.h"
30 
31 #if HAVE_X86ASM
32 
33 #define cpuid(index, eax, ebx, ecx, edx)        \
34     ff_cpu_cpuid(index, &eax, &ebx, &ecx, &edx)
35 
36 #define xgetbv(index, eax, edx)                 \
37     ff_cpu_xgetbv(index, &eax, &edx)
38 
39 #elif HAVE_INLINE_ASM
40 
41 /* ebx saving is necessary for PIC. gcc seems unable to see it alone */
42 #define cpuid(index, eax, ebx, ecx, edx)                        \
43     __asm__ volatile (                                          \
44         "mov    %%"FF_REG_b", %%"FF_REG_S" \n\t"                \
45         "cpuid                       \n\t"                      \
46         "xchg   %%"FF_REG_b", %%"FF_REG_S                       \
47         : "=a" (eax), "=S" (ebx), "=c" (ecx), "=d" (edx)        \
48         : "0" (index), "2"(0))
49 
50 #define xgetbv(index, eax, edx)                                 \
51     __asm__ (".byte 0x0f, 0x01, 0xd0" : "=a"(eax), "=d"(edx) : "c" (index))
52 
53 #define get_eflags(x)                           \
54     __asm__ volatile ("pushfl     \n"           \
55                       "pop    %0  \n"           \
56                       : "=r"(x))
57 
58 #define set_eflags(x)                           \
59     __asm__ volatile ("push    %0 \n"           \
60                       "popfl      \n"           \
61                       :: "r"(x))
62 
63 #endif /* HAVE_INLINE_ASM */
64 
65 #if ARCH_X86_64
66 
67 #define cpuid_test() 1
68 
69 #elif HAVE_X86ASM
70 
71 #define cpuid_test ff_cpu_cpuid_test
72 
73 #elif HAVE_INLINE_ASM
74 
cpuid_test(void)75 static int cpuid_test(void)
76 {
77     x86_reg a, c;
78 
79     /* Check if CPUID is supported by attempting to toggle the ID bit in
80      * the EFLAGS register. */
81     get_eflags(a);
82     set_eflags(a ^ 0x200000);
83     get_eflags(c);
84 
85     return a != c;
86 }
87 #endif
88 
89 /* Function to test if multimedia instructions are supported...  */
ff_get_cpu_flags_x86(void)90 int ff_get_cpu_flags_x86(void)
91 {
92     int rval = 0;
93 
94 #ifdef cpuid
95 
96     int eax, ebx, ecx, edx;
97     int max_std_level, max_ext_level, std_caps = 0, ext_caps = 0;
98     int family = 0, model = 0;
99     union { int i[3]; char c[12]; } vendor;
100     int xcr0_lo = 0, xcr0_hi = 0;
101 
102     if (!cpuid_test())
103         return 0; /* CPUID not supported */
104 
105     cpuid(0, max_std_level, vendor.i[0], vendor.i[2], vendor.i[1]);
106 
107     if (max_std_level >= 1) {
108         cpuid(1, eax, ebx, ecx, std_caps);
109         family = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
110         model  = ((eax >> 4) & 0xf) + ((eax >> 12) & 0xf0);
111         if (std_caps & (1 << 15))
112             rval |= AV_CPU_FLAG_CMOV;
113         if (std_caps & (1 << 23))
114             rval |= AV_CPU_FLAG_MMX;
115         if (std_caps & (1 << 25))
116             rval |= AV_CPU_FLAG_MMXEXT;
117 #if HAVE_SSE
118         if (std_caps & (1 << 25))
119             rval |= AV_CPU_FLAG_SSE;
120         if (std_caps & (1 << 26))
121             rval |= AV_CPU_FLAG_SSE2;
122         if (ecx & 1)
123             rval |= AV_CPU_FLAG_SSE3;
124         if (ecx & 0x00000200 )
125             rval |= AV_CPU_FLAG_SSSE3;
126         if (ecx & 0x00080000 )
127             rval |= AV_CPU_FLAG_SSE4;
128         if (ecx & 0x00100000 )
129             rval |= AV_CPU_FLAG_SSE42;
130         if (ecx & 0x02000000 )
131             rval |= AV_CPU_FLAG_AESNI;
132 #if HAVE_AVX
133         /* Check OXSAVE and AVX bits */
134         if ((ecx & 0x18000000) == 0x18000000) {
135             /* Check for OS support */
136             xgetbv(0, xcr0_lo, xcr0_hi);
137             if ((xcr0_lo & 0x6) == 0x6) {
138                 rval |= AV_CPU_FLAG_AVX;
139                 if (ecx & 0x00001000)
140                     rval |= AV_CPU_FLAG_FMA3;
141             }
142         }
143 #endif /* HAVE_AVX */
144 #endif /* HAVE_SSE */
145     }
146     if (max_std_level >= 7) {
147         cpuid(7, eax, ebx, ecx, edx);
148 #if HAVE_AVX2
149         if ((rval & AV_CPU_FLAG_AVX) && (ebx & 0x00000020))
150             rval |= AV_CPU_FLAG_AVX2;
151 #if HAVE_AVX512 /* F, CD, BW, DQ, VL */
152         if ((xcr0_lo & 0xe0) == 0xe0) { /* OPMASK/ZMM state */
153             if ((rval & AV_CPU_FLAG_AVX2) && (ebx & 0xd0030000) == 0xd0030000) {
154                 rval |= AV_CPU_FLAG_AVX512;
155 #if HAVE_AVX512ICL
156                 if ((ebx & 0xd0200000) == 0xd0200000 && (ecx & 0x5f42) == 0x5f42)
157                     rval |= AV_CPU_FLAG_AVX512ICL;
158 #endif /* HAVE_AVX512ICL */
159             }
160         }
161 #endif /* HAVE_AVX512 */
162 #endif /* HAVE_AVX2 */
163         /* BMI1/2 don't need OS support */
164         if (ebx & 0x00000008) {
165             rval |= AV_CPU_FLAG_BMI1;
166             if (ebx & 0x00000100)
167                 rval |= AV_CPU_FLAG_BMI2;
168         }
169     }
170 
171     cpuid(0x80000000, max_ext_level, ebx, ecx, edx);
172 
173     if (max_ext_level >= 0x80000001) {
174         cpuid(0x80000001, eax, ebx, ecx, ext_caps);
175         if (ext_caps & (1U << 31))
176             rval |= AV_CPU_FLAG_3DNOW;
177         if (ext_caps & (1 << 30))
178             rval |= AV_CPU_FLAG_3DNOWEXT;
179         if (ext_caps & (1 << 23))
180             rval |= AV_CPU_FLAG_MMX;
181         if (ext_caps & (1 << 22))
182             rval |= AV_CPU_FLAG_MMXEXT;
183 
184         if (!strncmp(vendor.c, "AuthenticAMD", 12)) {
185         /* Allow for selectively disabling SSE2 functions on AMD processors
186            with SSE2 support but not SSE4a. This includes Athlon64, some
187            Opteron, and some Sempron processors. MMX, SSE, or 3DNow! are faster
188            than SSE2 often enough to utilize this special-case flag.
189            AV_CPU_FLAG_SSE2 and AV_CPU_FLAG_SSE2SLOW are both set in this case
190            so that SSE2 is used unless explicitly disabled by checking
191            AV_CPU_FLAG_SSE2SLOW. */
192             if (rval & AV_CPU_FLAG_SSE2 && !(ecx & 0x00000040))
193                 rval |= AV_CPU_FLAG_SSE2SLOW;
194 
195         /* Similar to the above but for AVX functions on AMD processors.
196            This is necessary only for functions using YMM registers on Bulldozer
197            and Jaguar based CPUs as they lack 256-bit execution units. SSE/AVX
198            functions using XMM registers are always faster on them.
199            AV_CPU_FLAG_AVX and AV_CPU_FLAG_AVXSLOW are both set so that AVX is
200            used unless explicitly disabled by checking AV_CPU_FLAG_AVXSLOW. */
201             if ((family == 0x15 || family == 0x16) && (rval & AV_CPU_FLAG_AVX))
202                 rval |= AV_CPU_FLAG_AVXSLOW;
203 
204         /* Zen 3 and earlier have slow gather */
205             if ((family <= 0x19) && (rval & AV_CPU_FLAG_AVX2))
206                 rval |= AV_CPU_FLAG_SLOW_GATHER;
207         }
208 
209         /* XOP and FMA4 use the AVX instruction coding scheme, so they can't be
210          * used unless the OS has AVX support. */
211         if (rval & AV_CPU_FLAG_AVX) {
212             if (ecx & 0x00000800)
213                 rval |= AV_CPU_FLAG_XOP;
214             if (ecx & 0x00010000)
215                 rval |= AV_CPU_FLAG_FMA4;
216         }
217     }
218 
219     if (!strncmp(vendor.c, "GenuineIntel", 12)) {
220         if (family == 6 && (model == 9 || model == 13 || model == 14)) {
221             /* 6/9 (pentium-m "banias"), 6/13 (pentium-m "dothan"), and
222              * 6/14 (core1 "yonah") theoretically support sse2, but it's
223              * usually slower than mmx, so let's just pretend they don't.
224              * AV_CPU_FLAG_SSE2 is disabled and AV_CPU_FLAG_SSE2SLOW is
225              * enabled so that SSE2 is not used unless explicitly enabled
226              * by checking AV_CPU_FLAG_SSE2SLOW. The same situation
227              * applies for AV_CPU_FLAG_SSE3 and AV_CPU_FLAG_SSE3SLOW. */
228             if (rval & AV_CPU_FLAG_SSE2)
229                 rval ^= AV_CPU_FLAG_SSE2SLOW | AV_CPU_FLAG_SSE2;
230             if (rval & AV_CPU_FLAG_SSE3)
231                 rval ^= AV_CPU_FLAG_SSE3SLOW | AV_CPU_FLAG_SSE3;
232         }
233         /* The Atom processor has SSSE3 support, which is useful in many cases,
234          * but sometimes the SSSE3 version is slower than the SSE2 equivalent
235          * on the Atom, but is generally faster on other processors supporting
236          * SSSE3. This flag allows for selectively disabling certain SSSE3
237          * functions on the Atom. */
238         if (family == 6 && model == 28)
239             rval |= AV_CPU_FLAG_ATOM;
240 
241         /* Conroe has a slow shuffle unit. Check the model number to ensure not
242          * to include crippled low-end Penryns and Nehalems that lack SSE4. */
243         if ((rval & AV_CPU_FLAG_SSSE3) && !(rval & AV_CPU_FLAG_SSE4) &&
244             family == 6 && model < 23)
245             rval |= AV_CPU_FLAG_SSSE3SLOW;
246 
247         /* Haswell has slow gather */
248         if ((rval & AV_CPU_FLAG_AVX2) && family == 6 && model < 70)
249             rval |= AV_CPU_FLAG_SLOW_GATHER;
250     }
251 
252 #endif /* cpuid */
253 
254     return rval;
255 }
256 
ff_get_cpu_max_align_x86(void)257 size_t ff_get_cpu_max_align_x86(void)
258 {
259     int flags = av_get_cpu_flags();
260 
261     if (flags & AV_CPU_FLAG_AVX512)
262         return 64;
263     if (flags & (AV_CPU_FLAG_AVX2      |
264                  AV_CPU_FLAG_AVX       |
265                  AV_CPU_FLAG_XOP       |
266                  AV_CPU_FLAG_FMA4      |
267                  AV_CPU_FLAG_FMA3      |
268                  AV_CPU_FLAG_AVXSLOW))
269         return 32;
270     if (flags & (AV_CPU_FLAG_AESNI     |
271                  AV_CPU_FLAG_SSE42     |
272                  AV_CPU_FLAG_SSE4      |
273                  AV_CPU_FLAG_SSSE3     |
274                  AV_CPU_FLAG_SSE3      |
275                  AV_CPU_FLAG_SSE2      |
276                  AV_CPU_FLAG_SSE       |
277                  AV_CPU_FLAG_ATOM      |
278                  AV_CPU_FLAG_SSSE3SLOW |
279                  AV_CPU_FLAG_SSE3SLOW  |
280                  AV_CPU_FLAG_SSE2SLOW))
281         return 16;
282 
283     return 8;
284 }
285