xref: /third_party/ffmpeg/libavutil/x86/intmath.h (revision cabdff1a)
1/*
2 * Copyright (c) 2015 James Almer
3 *
4 * This file is part of FFmpeg.
5 *
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21#ifndef AVUTIL_X86_INTMATH_H
22#define AVUTIL_X86_INTMATH_H
23
24#include <stdint.h>
25#include <stdlib.h>
26#if HAVE_FAST_CLZ
27#if defined(_MSC_VER)
28#include <intrin.h>
29#elif defined(__INTEL_COMPILER)
30#include <immintrin.h>
31#endif
32#endif
33#include "config.h"
34
35#if HAVE_FAST_CLZ
36#if (defined(__INTEL_COMPILER) && (__INTEL_COMPILER>=1216)) || defined(_MSC_VER)
37#   if defined(__INTEL_COMPILER)
38#       define ff_log2(x) (_bit_scan_reverse((x)|1))
39#   else
40#       define ff_log2 ff_log2_x86
41static av_always_inline av_const int ff_log2_x86(unsigned int v)
42{
43    unsigned long n;
44    _BitScanReverse(&n, v|1);
45    return n;
46}
47#   endif
48#   define ff_log2_16bit av_log2
49
50#if defined(__INTEL_COMPILER) || (defined(_MSC_VER) && (_MSC_VER >= 1700) && \
51                                  (defined(__BMI__) || !defined(__clang__)))
52#   define ff_ctz(v) _tzcnt_u32(v)
53
54#   if ARCH_X86_64
55#       define ff_ctzll(v) _tzcnt_u64(v)
56#   else
57#       define ff_ctzll ff_ctzll_x86
58static av_always_inline av_const int ff_ctzll_x86(long long v)
59{
60    return ((uint32_t)v == 0) ? _tzcnt_u32((uint32_t)(v >> 32)) + 32 : _tzcnt_u32((uint32_t)v);
61}
62#   endif
63#endif /* _MSC_VER */
64
65#endif /* __INTEL_COMPILER */
66
67#endif /* HAVE_FAST_CLZ */
68
69#if defined(__GNUC__)
70
71/* Our generic version of av_popcount is faster than GCC's built-in on
72 * CPUs that don't support the popcnt instruction.
73 */
74#if defined(__POPCNT__)
75    #define av_popcount   __builtin_popcount
76#if ARCH_X86_64
77    #define av_popcount64 __builtin_popcountll
78#endif
79
80#endif /* __POPCNT__ */
81
82#if defined(__BMI2__)
83
84#if AV_GCC_VERSION_AT_LEAST(5,1)
85#define av_mod_uintp2 __builtin_ia32_bzhi_si
86#elif HAVE_INLINE_ASM
87/* GCC releases before 5.1.0 have a broken bzhi builtin, so for those we
88 * implement it using inline assembly
89 */
90#define av_mod_uintp2 av_mod_uintp2_bmi2
91static av_always_inline av_const unsigned av_mod_uintp2_bmi2(unsigned a, unsigned p)
92{
93    if (av_builtin_constant_p(p))
94        return a & ((1 << p) - 1);
95    else {
96        unsigned x;
97        __asm__ ("bzhi %2, %1, %0 \n\t" : "=r"(x) : "rm"(a), "r"(p));
98        return x;
99    }
100}
101#endif /* AV_GCC_VERSION_AT_LEAST */
102
103#endif /* __BMI2__ */
104
105#if defined(__SSE2__) && !defined(__INTEL_COMPILER)
106
107#define av_clipd av_clipd_sse2
108static av_always_inline av_const double av_clipd_sse2(double a, double amin, double amax)
109{
110#if defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2
111    if (amin > amax) abort();
112#endif
113    __asm__ ("maxsd %1, %0 \n\t"
114             "minsd %2, %0 \n\t"
115             : "+&x"(a) : "xm"(amin), "xm"(amax));
116    return a;
117}
118
119#endif /* __SSE2__ */
120
121#if defined(__SSE__) && !defined(__INTEL_COMPILER)
122
123#define av_clipf av_clipf_sse
124static av_always_inline av_const float av_clipf_sse(float a, float amin, float amax)
125{
126#if defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2
127    if (amin > amax) abort();
128#endif
129    __asm__ ("maxss %1, %0 \n\t"
130             "minss %2, %0 \n\t"
131             : "+&x"(a) : "xm"(amin), "xm"(amax));
132    return a;
133}
134
135#endif /* __SSE__ */
136
137#if defined(__AVX__) && !defined(__INTEL_COMPILER)
138
139#undef av_clipd
140#define av_clipd av_clipd_avx
141static av_always_inline av_const double av_clipd_avx(double a, double amin, double amax)
142{
143#if defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2
144    if (amin > amax) abort();
145#endif
146    __asm__ ("vmaxsd %1, %0, %0 \n\t"
147             "vminsd %2, %0, %0 \n\t"
148             : "+&x"(a) : "xm"(amin), "xm"(amax));
149    return a;
150}
151
152#undef av_clipf
153#define av_clipf av_clipf_avx
154static av_always_inline av_const float av_clipf_avx(float a, float amin, float amax)
155{
156#if defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2
157    if (amin > amax) abort();
158#endif
159    __asm__ ("vmaxss %1, %0, %0 \n\t"
160             "vminss %2, %0, %0 \n\t"
161             : "+&x"(a) : "xm"(amin), "xm"(amax));
162    return a;
163}
164
165#endif /* __AVX__ */
166
167#endif /* __GNUC__ */
168
169#endif /* AVUTIL_X86_INTMATH_H */
170