1 /*
2 * Copyright 2019 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #ifndef SKVX_DEFINED
9 #define SKVX_DEFINED
10
11 // skvx::Vec<N,T> are SIMD vectors of N T's, a v1.5 successor to SkNx<N,T>.
12 //
13 // This time we're leaning a bit less on platform-specific intrinsics and a bit
14 // more on Clang/GCC vector extensions, but still keeping the option open to
15 // drop in platform-specific intrinsics, actually more easily than before.
16 //
17 // We've also fixed a few of the caveats that used to make SkNx awkward to work
18 // with across translation units. skvx::Vec<N,T> always has N*sizeof(T) size
19 // and alignment and is safe to use across translation units freely.
20 // (Ideally we'd only align to T, but that tanks ARMv7 NEON codegen.)
21
22 // Please try to keep this file independent of Skia headers.
23 #include <algorithm> // std::min, std::max
24 #include <cassert> // assert()
25 #include <cmath> // ceilf, floorf, truncf, roundf, sqrtf, etc.
26 #include <cstdint> // intXX_t
27 #include <cstring> // memcpy()
28 #include <initializer_list> // std::initializer_list
29 #include <utility> // std::index_sequence
30
31 #if defined(__SSE__) || defined(__AVX__) || defined(__AVX2__)
32 #include <immintrin.h>
33 #elif defined(__ARM_NEON)
34 #include <arm_neon.h>
35 #elif defined(__wasm_simd128__)
36 #include <wasm_simd128.h>
37 #endif
38
39 // To avoid ODR violations, all methods must be force-inlined...
40 #if defined(_MSC_VER)
41 #define SKVX_ALWAYS_INLINE __forceinline
42 #else
43 #define SKVX_ALWAYS_INLINE __attribute__((always_inline))
44 #endif
45
46 // ... and all standalone functions must be static. Please use these helpers:
47 #define SI static inline
48 #define SIT template < typename T> SI
49 #define SIN template <int N > SI
50 #define SINT template <int N, typename T> SI
51 #define SINTU template <int N, typename T, typename U, \
52 typename=std::enable_if_t<std::is_convertible<U,T>::value>> SI
53
54 namespace skvx {
55
56 template <int N, typename T>
57 struct alignas(N*sizeof(T)) Vec;
58
59 template <int... Ix, int N, typename T>
60 SI Vec<sizeof...(Ix),T> shuffle(const Vec<N,T>&);
61
62 template <typename D, typename S>
63 SI D bit_pun(const S&);
64
65 // All Vec have the same simple memory layout, the same as `T vec[N]`.
66 template <int N, typename T>
67 struct alignas(N*sizeof(T)) VecStorage {
68 SKVX_ALWAYS_INLINE VecStorage() = default;
VecStorage(T s)69 SKVX_ALWAYS_INLINE VecStorage(T s) : lo(s), hi(s) {}
70
71 Vec<N/2,T> lo, hi;
72 };
73
74 template <typename T>
75 struct VecStorage<4,T> {
76 SKVX_ALWAYS_INLINE VecStorage() = default;
VecStorageskvx::VecStorage77 SKVX_ALWAYS_INLINE VecStorage(T s) : lo(s), hi(s) {}
VecStorageskvx::VecStorage78 SKVX_ALWAYS_INLINE VecStorage(T x, T y, T z, T w) : lo(x,y), hi(z, w) {}
VecStorageskvx::VecStorage79 SKVX_ALWAYS_INLINE VecStorage(Vec<2,T> xy, T z, T w) : lo(xy), hi(z,w) {}
VecStorageskvx::VecStorage80 SKVX_ALWAYS_INLINE VecStorage(T x, T y, Vec<2,T> zw) : lo(x,y), hi(zw) {}
VecStorageskvx::VecStorage81 SKVX_ALWAYS_INLINE VecStorage(Vec<2,T> xy, Vec<2,T> zw) : lo(xy), hi(zw) {}
82
xyskvx::VecStorage83 SKVX_ALWAYS_INLINE Vec<2,T>& xy() { return lo; }
zwskvx::VecStorage84 SKVX_ALWAYS_INLINE Vec<2,T>& zw() { return hi; }
xskvx::VecStorage85 SKVX_ALWAYS_INLINE T& x() { return lo.lo.val; }
yskvx::VecStorage86 SKVX_ALWAYS_INLINE T& y() { return lo.hi.val; }
zskvx::VecStorage87 SKVX_ALWAYS_INLINE T& z() { return hi.lo.val; }
wskvx::VecStorage88 SKVX_ALWAYS_INLINE T& w() { return hi.hi.val; }
89
xyskvx::VecStorage90 SKVX_ALWAYS_INLINE Vec<2,T> xy() const { return lo; }
zwskvx::VecStorage91 SKVX_ALWAYS_INLINE Vec<2,T> zw() const { return hi; }
xskvx::VecStorage92 SKVX_ALWAYS_INLINE T x() const { return lo.lo.val; }
yskvx::VecStorage93 SKVX_ALWAYS_INLINE T y() const { return lo.hi.val; }
zskvx::VecStorage94 SKVX_ALWAYS_INLINE T z() const { return hi.lo.val; }
wskvx::VecStorage95 SKVX_ALWAYS_INLINE T w() const { return hi.hi.val; }
96
97 // Exchange-based swizzles. These should take 1 cycle on NEON and 3 (pipelined) cycles on SSE.
yxwzskvx::VecStorage98 SKVX_ALWAYS_INLINE Vec<4,T> yxwz() const { return shuffle<1,0,3,2>(bit_pun<Vec<4,T>>(*this)); }
zwxyskvx::VecStorage99 SKVX_ALWAYS_INLINE Vec<4,T> zwxy() const { return shuffle<2,3,0,1>(bit_pun<Vec<4,T>>(*this)); }
100
101 Vec<2,T> lo, hi;
102 };
103
104 template <typename T>
105 struct VecStorage<2,T> {
106 SKVX_ALWAYS_INLINE VecStorage() = default;
VecStorageskvx::VecStorage107 SKVX_ALWAYS_INLINE VecStorage(T s) : lo(s), hi(s) {}
VecStorageskvx::VecStorage108 SKVX_ALWAYS_INLINE VecStorage(T x, T y) : lo(x), hi(y) {}
109
xskvx::VecStorage110 SKVX_ALWAYS_INLINE T& x() { return lo.val; }
yskvx::VecStorage111 SKVX_ALWAYS_INLINE T& y() { return hi.val; }
112
xskvx::VecStorage113 SKVX_ALWAYS_INLINE T x() const { return lo.val; }
yskvx::VecStorage114 SKVX_ALWAYS_INLINE T y() const { return hi.val; }
115
116 // This exchange-based swizzle should take 1 cycle on NEON and 3 (pipelined) cycles on SSE.
yxskvx::VecStorage117 SKVX_ALWAYS_INLINE Vec<2,T> yx() const { return shuffle<1,0>(bit_pun<Vec<2,T>>(*this)); }
118
xyxyskvx::VecStorage119 SKVX_ALWAYS_INLINE Vec<4,T> xyxy() const {
120 return Vec<4,T>(bit_pun<Vec<2,T>>(*this), bit_pun<Vec<2,T>>(*this));
121 }
122
123 Vec<1,T> lo, hi;
124 };
125
126 template <int N, typename T>
127 struct alignas(N*sizeof(T)) Vec : public VecStorage<N,T> {
128 static_assert((N & (N-1)) == 0, "N must be a power of 2.");
129 static_assert(sizeof(T) >= alignof(T), "What kind of unusual T is this?");
130
131 // Methods belong here in the class declaration of Vec only if:
132 // - they must be here, like constructors or operator[];
133 // - they'll definitely never want a specialized implementation.
134 // Other operations on Vec should be defined outside the type.
135
136 SKVX_ALWAYS_INLINE Vec() = default;
137
138 using VecStorage<N,T>::VecStorage;
139
Vec(std::initializer_list<T> xs)140 SKVX_ALWAYS_INLINE Vec(std::initializer_list<T> xs) {
141 T vals[N] = {0};
142 memcpy(vals, xs.begin(), std::min(xs.size(), (size_t)N)*sizeof(T));
143
144 this->lo = Vec<N/2,T>::Load(vals + 0);
145 this->hi = Vec<N/2,T>::Load(vals + N/2);
146 }
147
operator [](int i) const148 SKVX_ALWAYS_INLINE T operator[](int i) const { return i<N/2 ? this->lo[i] : this->hi[i-N/2]; }
operator [](int i)149 SKVX_ALWAYS_INLINE T& operator[](int i) { return i<N/2 ? this->lo[i] : this->hi[i-N/2]; }
150
Load(const void* ptr)151 SKVX_ALWAYS_INLINE static Vec Load(const void* ptr) {
152 Vec v;
153 memcpy(&v, ptr, sizeof(Vec));
154 return v;
155 }
store(void* ptr) const156 SKVX_ALWAYS_INLINE void store(void* ptr) const {
157 memcpy(ptr, this, sizeof(Vec));
158 }
159 };
160
161 template <typename T>
162 struct Vec<1,T> {
163 T val;
164
165 SKVX_ALWAYS_INLINE Vec() = default;
166
Vecskvx::Vec167 Vec(T s) : val(s) {}
168
Vecskvx::Vec169 SKVX_ALWAYS_INLINE Vec(std::initializer_list<T> xs) : val(xs.size() ? *xs.begin() : 0) {}
170
operator []skvx::Vec171 SKVX_ALWAYS_INLINE T operator[](int) const { return val; }
operator []skvx::Vec172 SKVX_ALWAYS_INLINE T& operator[](int) { return val; }
173
Loadskvx::Vec174 SKVX_ALWAYS_INLINE static Vec Load(const void* ptr) {
175 Vec v;
176 memcpy(&v, ptr, sizeof(Vec));
177 return v;
178 }
storeskvx::Vec179 SKVX_ALWAYS_INLINE void store(void* ptr) const {
180 memcpy(ptr, this, sizeof(Vec));
181 }
182 };
183
184 // Ideally we'd only use bit_pun(), but until this file is always built as C++17 with constexpr if,
185 // we'll sometimes find need to use unchecked_bit_pun(). Please do check the call sites yourself!
186 template <typename D, typename S>
unchecked_bit_pun(const S& s)187 SI D unchecked_bit_pun(const S& s) {
188 D d;
189 memcpy(&d, &s, sizeof(D));
190 return d;
191 }
192
193 template <typename D, typename S>
bit_pun(const S& s)194 SI D bit_pun(const S& s) {
195 static_assert(sizeof(D) == sizeof(S), "");
196 return unchecked_bit_pun<D>(s);
197 }
198
199 // Translate from a value type T to its corresponding Mask, the result of a comparison.
200 template <typename T> struct Mask { using type = T; };
201 template <> struct Mask<float > { using type = int32_t; };
202 template <> struct Mask<double> { using type = int64_t; };
203 template <typename T> using M = typename Mask<T>::type;
204
205 // Join two Vec<N,T> into one Vec<2N,T>.
join(const Vec<N,T>& lo, const Vec<N,T>& hi)206 SINT Vec<2*N,T> join(const Vec<N,T>& lo, const Vec<N,T>& hi) {
207 Vec<2*N,T> v;
208 v.lo = lo;
209 v.hi = hi;
210 return v;
211 }
212
213 // We have three strategies for implementing Vec operations:
214 // 1) lean on Clang/GCC vector extensions when available;
215 // 2) use map() to apply a scalar function lane-wise;
216 // 3) recurse on lo/hi to scalar portable implementations.
217 // We can slot in platform-specific implementations as overloads for particular Vec<N,T>,
218 // or often integrate them directly into the recursion of style 3), allowing fine control.
219
220 #if !defined(SKNX_NO_SIMD) && (defined(__clang__) || defined(__GNUC__))
221
222 // VExt<N,T> types have the same size as Vec<N,T> and support most operations directly.
223 #if defined(__clang__)
224 template <int N, typename T>
225 using VExt = T __attribute__((ext_vector_type(N)));
226
227 #elif defined(__GNUC__)
228 template <int N, typename T>
229 struct VExtHelper {
230 typedef T __attribute__((vector_size(N*sizeof(T)))) type;
231 };
232
233 template <int N, typename T>
234 using VExt = typename VExtHelper<N,T>::type;
235
236 // For some reason some (new!) versions of GCC cannot seem to deduce N in the generic
237 // to_vec<N,T>() below for N=4 and T=float. This workaround seems to help...
238 SI Vec<4,float> to_vec(VExt<4,float> v) { return bit_pun<Vec<4,float>>(v); }
239 #endif
240
to_vext(const Vec<N,T>& v)241 SINT VExt<N,T> to_vext(const Vec<N,T>& v) { return bit_pun<VExt<N,T>>(v); }
to_vec(const VExt<N,T>& v)242 SINT Vec <N,T> to_vec(const VExt<N,T>& v) { return bit_pun<Vec <N,T>>(v); }
243
operator +(const Vec<N,T>& x, const Vec<N,T>& y)244 SINT Vec<N,T> operator+(const Vec<N,T>& x, const Vec<N,T>& y) {
245 return to_vec<N,T>(to_vext(x) + to_vext(y));
246 }
operator -(const Vec<N,T>& x, const Vec<N,T>& y)247 SINT Vec<N,T> operator-(const Vec<N,T>& x, const Vec<N,T>& y) {
248 return to_vec<N,T>(to_vext(x) - to_vext(y));
249 }
operator *(const Vec<N,T>& x, const Vec<N,T>& y)250 SINT Vec<N,T> operator*(const Vec<N,T>& x, const Vec<N,T>& y) {
251 return to_vec<N,T>(to_vext(x) * to_vext(y));
252 }
operator /(const Vec<N,T>& x, const Vec<N,T>& y)253 SINT Vec<N,T> operator/(const Vec<N,T>& x, const Vec<N,T>& y) {
254 return to_vec<N,T>(to_vext(x) / to_vext(y));
255 }
256
operator ^(const Vec<N,T>& x, const Vec<N,T>& y)257 SINT Vec<N,T> operator^(const Vec<N,T>& x, const Vec<N,T>& y) {
258 return to_vec<N,T>(to_vext(x) ^ to_vext(y));
259 }
operator &(const Vec<N,T>& x, const Vec<N,T>& y)260 SINT Vec<N,T> operator&(const Vec<N,T>& x, const Vec<N,T>& y) {
261 return to_vec<N,T>(to_vext(x) & to_vext(y));
262 }
operator |(const Vec<N,T>& x, const Vec<N,T>& y)263 SINT Vec<N,T> operator|(const Vec<N,T>& x, const Vec<N,T>& y) {
264 return to_vec<N,T>(to_vext(x) | to_vext(y));
265 }
266
operator !(const Vec<N,T>& x)267 SINT Vec<N,T> operator!(const Vec<N,T>& x) { return to_vec<N,T>(!to_vext(x)); }
operator -(const Vec<N,T>& x)268 SINT Vec<N,T> operator-(const Vec<N,T>& x) { return to_vec<N,T>(-to_vext(x)); }
operator ~(const Vec<N,T>& x)269 SINT Vec<N,T> operator~(const Vec<N,T>& x) { return to_vec<N,T>(~to_vext(x)); }
270
operator <<(const Vec<N,T>& x, int k)271 SINT Vec<N,T> operator<<(const Vec<N,T>& x, int k) { return to_vec<N,T>(to_vext(x) << k); }
operator >>(const Vec<N,T>& x, int k)272 SINT Vec<N,T> operator>>(const Vec<N,T>& x, int k) { return to_vec<N,T>(to_vext(x) >> k); }
273
operator ==(const Vec<N,T>& x, const Vec<N,T>& y)274 SINT Vec<N,M<T>> operator==(const Vec<N,T>& x, const Vec<N,T>& y) {
275 return bit_pun<Vec<N,M<T>>>(to_vext(x) == to_vext(y));
276 }
operator !=(const Vec<N,T>& x, const Vec<N,T>& y)277 SINT Vec<N,M<T>> operator!=(const Vec<N,T>& x, const Vec<N,T>& y) {
278 return bit_pun<Vec<N,M<T>>>(to_vext(x) != to_vext(y));
279 }
operator <=(const Vec<N,T>& x, const Vec<N,T>& y)280 SINT Vec<N,M<T>> operator<=(const Vec<N,T>& x, const Vec<N,T>& y) {
281 return bit_pun<Vec<N,M<T>>>(to_vext(x) <= to_vext(y));
282 }
operator >=(const Vec<N,T>& x, const Vec<N,T>& y)283 SINT Vec<N,M<T>> operator>=(const Vec<N,T>& x, const Vec<N,T>& y) {
284 return bit_pun<Vec<N,M<T>>>(to_vext(x) >= to_vext(y));
285 }
operator <(const Vec<N,T>& x, const Vec<N,T>& y)286 SINT Vec<N,M<T>> operator< (const Vec<N,T>& x, const Vec<N,T>& y) {
287 return bit_pun<Vec<N,M<T>>>(to_vext(x) < to_vext(y));
288 }
operator >(const Vec<N,T>& x, const Vec<N,T>& y)289 SINT Vec<N,M<T>> operator> (const Vec<N,T>& x, const Vec<N,T>& y) {
290 return bit_pun<Vec<N,M<T>>>(to_vext(x) > to_vext(y));
291 }
292
293 #else
294
295 // Either SKNX_NO_SIMD is defined, or Clang/GCC vector extensions are not available.
296 // We'll implement things portably with N==1 scalar implementations and recursion onto them.
297
298 // N == 1 scalar implementations.
operator +(const Vec<1,T>& x, const Vec<1,T>& y)299 SIT Vec<1,T> operator+(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val + y.val; }
operator -(const Vec<1,T>& x, const Vec<1,T>& y)300 SIT Vec<1,T> operator-(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val - y.val; }
operator *(const Vec<1,T>& x, const Vec<1,T>& y)301 SIT Vec<1,T> operator*(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val * y.val; }
operator /(const Vec<1,T>& x, const Vec<1,T>& y)302 SIT Vec<1,T> operator/(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val / y.val; }
303
operator ^(const Vec<1,T>& x, const Vec<1,T>& y)304 SIT Vec<1,T> operator^(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val ^ y.val; }
operator &(const Vec<1,T>& x, const Vec<1,T>& y)305 SIT Vec<1,T> operator&(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val & y.val; }
operator |(const Vec<1,T>& x, const Vec<1,T>& y)306 SIT Vec<1,T> operator|(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val | y.val; }
307
operator !(const Vec<1,T>& x)308 SIT Vec<1,T> operator!(const Vec<1,T>& x) { return !x.val; }
operator -(const Vec<1,T>& x)309 SIT Vec<1,T> operator-(const Vec<1,T>& x) { return -x.val; }
operator ~(const Vec<1,T>& x)310 SIT Vec<1,T> operator~(const Vec<1,T>& x) { return ~x.val; }
311
operator <<(const Vec<1,T>& x, int k)312 SIT Vec<1,T> operator<<(const Vec<1,T>& x, int k) { return x.val << k; }
operator >>(const Vec<1,T>& x, int k)313 SIT Vec<1,T> operator>>(const Vec<1,T>& x, int k) { return x.val >> k; }
314
operator ==(const Vec<1,T>& x, const Vec<1,T>& y)315 SIT Vec<1,M<T>> operator==(const Vec<1,T>& x, const Vec<1,T>& y) {
316 return x.val == y.val ? ~0 : 0;
317 }
operator !=(const Vec<1,T>& x, const Vec<1,T>& y)318 SIT Vec<1,M<T>> operator!=(const Vec<1,T>& x, const Vec<1,T>& y) {
319 return x.val != y.val ? ~0 : 0;
320 }
operator <=(const Vec<1,T>& x, const Vec<1,T>& y)321 SIT Vec<1,M<T>> operator<=(const Vec<1,T>& x, const Vec<1,T>& y) {
322 return x.val <= y.val ? ~0 : 0;
323 }
operator >=(const Vec<1,T>& x, const Vec<1,T>& y)324 SIT Vec<1,M<T>> operator>=(const Vec<1,T>& x, const Vec<1,T>& y) {
325 return x.val >= y.val ? ~0 : 0;
326 }
operator <(const Vec<1,T>& x, const Vec<1,T>& y)327 SIT Vec<1,M<T>> operator< (const Vec<1,T>& x, const Vec<1,T>& y) {
328 return x.val < y.val ? ~0 : 0;
329 }
operator >(const Vec<1,T>& x, const Vec<1,T>& y)330 SIT Vec<1,M<T>> operator> (const Vec<1,T>& x, const Vec<1,T>& y) {
331 return x.val > y.val ? ~0 : 0;
332 }
333
334 // Recurse on lo/hi down to N==1 scalar implementations.
operator +(const Vec<N,T>& x, const Vec<N,T>& y)335 SINT Vec<N,T> operator+(const Vec<N,T>& x, const Vec<N,T>& y) {
336 return join(x.lo + y.lo, x.hi + y.hi);
337 }
operator -(const Vec<N,T>& x, const Vec<N,T>& y)338 SINT Vec<N,T> operator-(const Vec<N,T>& x, const Vec<N,T>& y) {
339 return join(x.lo - y.lo, x.hi - y.hi);
340 }
operator *(const Vec<N,T>& x, const Vec<N,T>& y)341 SINT Vec<N,T> operator*(const Vec<N,T>& x, const Vec<N,T>& y) {
342 return join(x.lo * y.lo, x.hi * y.hi);
343 }
operator /(const Vec<N,T>& x, const Vec<N,T>& y)344 SINT Vec<N,T> operator/(const Vec<N,T>& x, const Vec<N,T>& y) {
345 return join(x.lo / y.lo, x.hi / y.hi);
346 }
347
operator ^(const Vec<N,T>& x, const Vec<N,T>& y)348 SINT Vec<N,T> operator^(const Vec<N,T>& x, const Vec<N,T>& y) {
349 return join(x.lo ^ y.lo, x.hi ^ y.hi);
350 }
operator &(const Vec<N,T>& x, const Vec<N,T>& y)351 SINT Vec<N,T> operator&(const Vec<N,T>& x, const Vec<N,T>& y) {
352 return join(x.lo & y.lo, x.hi & y.hi);
353 }
operator |(const Vec<N,T>& x, const Vec<N,T>& y)354 SINT Vec<N,T> operator|(const Vec<N,T>& x, const Vec<N,T>& y) {
355 return join(x.lo | y.lo, x.hi | y.hi);
356 }
357
operator !(const Vec<N,T>& x)358 SINT Vec<N,T> operator!(const Vec<N,T>& x) { return join(!x.lo, !x.hi); }
operator -(const Vec<N,T>& x)359 SINT Vec<N,T> operator-(const Vec<N,T>& x) { return join(-x.lo, -x.hi); }
operator ~(const Vec<N,T>& x)360 SINT Vec<N,T> operator~(const Vec<N,T>& x) { return join(~x.lo, ~x.hi); }
361
operator <<(const Vec<N,T>& x, int k)362 SINT Vec<N,T> operator<<(const Vec<N,T>& x, int k) { return join(x.lo << k, x.hi << k); }
operator >>(const Vec<N,T>& x, int k)363 SINT Vec<N,T> operator>>(const Vec<N,T>& x, int k) { return join(x.lo >> k, x.hi >> k); }
364
operator ==(const Vec<N,T>& x, const Vec<N,T>& y)365 SINT Vec<N,M<T>> operator==(const Vec<N,T>& x, const Vec<N,T>& y) {
366 return join(x.lo == y.lo, x.hi == y.hi);
367 }
operator !=(const Vec<N,T>& x, const Vec<N,T>& y)368 SINT Vec<N,M<T>> operator!=(const Vec<N,T>& x, const Vec<N,T>& y) {
369 return join(x.lo != y.lo, x.hi != y.hi);
370 }
operator <=(const Vec<N,T>& x, const Vec<N,T>& y)371 SINT Vec<N,M<T>> operator<=(const Vec<N,T>& x, const Vec<N,T>& y) {
372 return join(x.lo <= y.lo, x.hi <= y.hi);
373 }
operator >=(const Vec<N,T>& x, const Vec<N,T>& y)374 SINT Vec<N,M<T>> operator>=(const Vec<N,T>& x, const Vec<N,T>& y) {
375 return join(x.lo >= y.lo, x.hi >= y.hi);
376 }
operator <(const Vec<N,T>& x, const Vec<N,T>& y)377 SINT Vec<N,M<T>> operator< (const Vec<N,T>& x, const Vec<N,T>& y) {
378 return join(x.lo < y.lo, x.hi < y.hi);
379 }
operator >(const Vec<N,T>& x, const Vec<N,T>& y)380 SINT Vec<N,M<T>> operator> (const Vec<N,T>& x, const Vec<N,T>& y) {
381 return join(x.lo > y.lo, x.hi > y.hi);
382 }
383 #endif
384
385 // Scalar/vector operations splat the scalar to a vector.
operator +(U x, const Vec<N,T>& y)386 SINTU Vec<N,T> operator+ (U x, const Vec<N,T>& y) { return Vec<N,T>(x) + y; }
operator -(U x, const Vec<N,T>& y)387 SINTU Vec<N,T> operator- (U x, const Vec<N,T>& y) { return Vec<N,T>(x) - y; }
operator *(U x, const Vec<N,T>& y)388 SINTU Vec<N,T> operator* (U x, const Vec<N,T>& y) { return Vec<N,T>(x) * y; }
operator /(U x, const Vec<N,T>& y)389 SINTU Vec<N,T> operator/ (U x, const Vec<N,T>& y) { return Vec<N,T>(x) / y; }
operator ^(U x, const Vec<N,T>& y)390 SINTU Vec<N,T> operator^ (U x, const Vec<N,T>& y) { return Vec<N,T>(x) ^ y; }
operator &(U x, const Vec<N,T>& y)391 SINTU Vec<N,T> operator& (U x, const Vec<N,T>& y) { return Vec<N,T>(x) & y; }
operator |(U x, const Vec<N,T>& y)392 SINTU Vec<N,T> operator| (U x, const Vec<N,T>& y) { return Vec<N,T>(x) | y; }
operator ==(U x, const Vec<N,T>& y)393 SINTU Vec<N,M<T>> operator==(U x, const Vec<N,T>& y) { return Vec<N,T>(x) == y; }
operator !=(U x, const Vec<N,T>& y)394 SINTU Vec<N,M<T>> operator!=(U x, const Vec<N,T>& y) { return Vec<N,T>(x) != y; }
operator <=(U x, const Vec<N,T>& y)395 SINTU Vec<N,M<T>> operator<=(U x, const Vec<N,T>& y) { return Vec<N,T>(x) <= y; }
operator >=(U x, const Vec<N,T>& y)396 SINTU Vec<N,M<T>> operator>=(U x, const Vec<N,T>& y) { return Vec<N,T>(x) >= y; }
operator <(U x, const Vec<N,T>& y)397 SINTU Vec<N,M<T>> operator< (U x, const Vec<N,T>& y) { return Vec<N,T>(x) < y; }
operator >(U x, const Vec<N,T>& y)398 SINTU Vec<N,M<T>> operator> (U x, const Vec<N,T>& y) { return Vec<N,T>(x) > y; }
399
operator +(const Vec<N,T>& x, U y)400 SINTU Vec<N,T> operator+ (const Vec<N,T>& x, U y) { return x + Vec<N,T>(y); }
operator -(const Vec<N,T>& x, U y)401 SINTU Vec<N,T> operator- (const Vec<N,T>& x, U y) { return x - Vec<N,T>(y); }
operator *(const Vec<N,T>& x, U y)402 SINTU Vec<N,T> operator* (const Vec<N,T>& x, U y) { return x * Vec<N,T>(y); }
operator /(const Vec<N,T>& x, U y)403 SINTU Vec<N,T> operator/ (const Vec<N,T>& x, U y) { return x / Vec<N,T>(y); }
operator ^(const Vec<N,T>& x, U y)404 SINTU Vec<N,T> operator^ (const Vec<N,T>& x, U y) { return x ^ Vec<N,T>(y); }
operator &(const Vec<N,T>& x, U y)405 SINTU Vec<N,T> operator& (const Vec<N,T>& x, U y) { return x & Vec<N,T>(y); }
operator |(const Vec<N,T>& x, U y)406 SINTU Vec<N,T> operator| (const Vec<N,T>& x, U y) { return x | Vec<N,T>(y); }
operator ==(const Vec<N,T>& x, U y)407 SINTU Vec<N,M<T>> operator==(const Vec<N,T>& x, U y) { return x == Vec<N,T>(y); }
operator !=(const Vec<N,T>& x, U y)408 SINTU Vec<N,M<T>> operator!=(const Vec<N,T>& x, U y) { return x != Vec<N,T>(y); }
operator <=(const Vec<N,T>& x, U y)409 SINTU Vec<N,M<T>> operator<=(const Vec<N,T>& x, U y) { return x <= Vec<N,T>(y); }
operator >=(const Vec<N,T>& x, U y)410 SINTU Vec<N,M<T>> operator>=(const Vec<N,T>& x, U y) { return x >= Vec<N,T>(y); }
operator <(const Vec<N,T>& x, U y)411 SINTU Vec<N,M<T>> operator< (const Vec<N,T>& x, U y) { return x < Vec<N,T>(y); }
operator >(const Vec<N,T>& x, U y)412 SINTU Vec<N,M<T>> operator> (const Vec<N,T>& x, U y) { return x > Vec<N,T>(y); }
413
operator +=(Vec<N,T>& x, const Vec<N,T>& y)414 SINT Vec<N,T>& operator+=(Vec<N,T>& x, const Vec<N,T>& y) { return (x = x + y); }
operator -=(Vec<N,T>& x, const Vec<N,T>& y)415 SINT Vec<N,T>& operator-=(Vec<N,T>& x, const Vec<N,T>& y) { return (x = x - y); }
operator *=(Vec<N,T>& x, const Vec<N,T>& y)416 SINT Vec<N,T>& operator*=(Vec<N,T>& x, const Vec<N,T>& y) { return (x = x * y); }
operator /=(Vec<N,T>& x, const Vec<N,T>& y)417 SINT Vec<N,T>& operator/=(Vec<N,T>& x, const Vec<N,T>& y) { return (x = x / y); }
operator ^=(Vec<N,T>& x, const Vec<N,T>& y)418 SINT Vec<N,T>& operator^=(Vec<N,T>& x, const Vec<N,T>& y) { return (x = x ^ y); }
operator &=(Vec<N,T>& x, const Vec<N,T>& y)419 SINT Vec<N,T>& operator&=(Vec<N,T>& x, const Vec<N,T>& y) { return (x = x & y); }
operator |=(Vec<N,T>& x, const Vec<N,T>& y)420 SINT Vec<N,T>& operator|=(Vec<N,T>& x, const Vec<N,T>& y) { return (x = x | y); }
421
operator +=(Vec<N,T>& x, U y)422 SINTU Vec<N,T>& operator+=(Vec<N,T>& x, U y) { return (x = x + Vec<N,T>(y)); }
operator -=(Vec<N,T>& x, U y)423 SINTU Vec<N,T>& operator-=(Vec<N,T>& x, U y) { return (x = x - Vec<N,T>(y)); }
operator *=(Vec<N,T>& x, U y)424 SINTU Vec<N,T>& operator*=(Vec<N,T>& x, U y) { return (x = x * Vec<N,T>(y)); }
operator /=(Vec<N,T>& x, U y)425 SINTU Vec<N,T>& operator/=(Vec<N,T>& x, U y) { return (x = x / Vec<N,T>(y)); }
operator ^=(Vec<N,T>& x, U y)426 SINTU Vec<N,T>& operator^=(Vec<N,T>& x, U y) { return (x = x ^ Vec<N,T>(y)); }
operator &=(Vec<N,T>& x, U y)427 SINTU Vec<N,T>& operator&=(Vec<N,T>& x, U y) { return (x = x & Vec<N,T>(y)); }
operator |=(Vec<N,T>& x, U y)428 SINTU Vec<N,T>& operator|=(Vec<N,T>& x, U y) { return (x = x | Vec<N,T>(y)); }
429
operator <<=(Vec<N,T>& x, int bits)430 SINT Vec<N,T>& operator<<=(Vec<N,T>& x, int bits) { return (x = x << bits); }
operator >>=(Vec<N,T>& x, int bits)431 SINT Vec<N,T>& operator>>=(Vec<N,T>& x, int bits) { return (x = x >> bits); }
432
433 // Some operations we want are not expressible with Clang/GCC vector extensions.
434
435 // Clang can reason about naive_if_then_else() and optimize through it better
436 // than if_then_else(), so it's sometimes useful to call it directly when we
437 // think an entire expression should optimize away, e.g. min()/max().
naive_if_then_else(const Vec<N,M<T>>& cond, const Vec<N,T>& t, const Vec<N,T>& e)438 SINT Vec<N,T> naive_if_then_else(const Vec<N,M<T>>& cond, const Vec<N,T>& t, const Vec<N,T>& e) {
439 return bit_pun<Vec<N,T>>(( cond & bit_pun<Vec<N, M<T>>>(t)) |
440 (~cond & bit_pun<Vec<N, M<T>>>(e)) );
441 }
442
if_then_else(const Vec<1,M<T>>& cond, const Vec<1,T>& t, const Vec<1,T>& e)443 SIT Vec<1,T> if_then_else(const Vec<1,M<T>>& cond, const Vec<1,T>& t, const Vec<1,T>& e) {
444 // In practice this scalar implementation is unlikely to be used. See next if_then_else().
445 return bit_pun<Vec<1,T>>(( cond & bit_pun<Vec<1, M<T>>>(t)) |
446 (~cond & bit_pun<Vec<1, M<T>>>(e)) );
447 }
if_then_else(const Vec<N,M<T>>& cond, const Vec<N,T>& t, const Vec<N,T>& e)448 SINT Vec<N,T> if_then_else(const Vec<N,M<T>>& cond, const Vec<N,T>& t, const Vec<N,T>& e) {
449 // Specializations inline here so they can generalize what types the apply to.
450 // (This header is used in C++14 contexts, so we have to kind of fake constexpr if.)
451 #if defined(__AVX2__)
452 if /*constexpr*/ (N*sizeof(T) == 32) {
453 return unchecked_bit_pun<Vec<N,T>>(_mm256_blendv_epi8(unchecked_bit_pun<__m256i>(e),
454 unchecked_bit_pun<__m256i>(t),
455 unchecked_bit_pun<__m256i>(cond)));
456 }
457 #endif
458 #if defined(__SSE4_1__)
459 if /*constexpr*/ (N*sizeof(T) == 16) {
460 return unchecked_bit_pun<Vec<N,T>>(_mm_blendv_epi8(unchecked_bit_pun<__m128i>(e),
461 unchecked_bit_pun<__m128i>(t),
462 unchecked_bit_pun<__m128i>(cond)));
463 }
464 #endif
465 #if defined(__ARM_NEON)
466 if /*constexpr*/ (N*sizeof(T) == 16) {
467 return unchecked_bit_pun<Vec<N,T>>(vbslq_u8(unchecked_bit_pun<uint8x16_t>(cond),
468 unchecked_bit_pun<uint8x16_t>(t),
469 unchecked_bit_pun<uint8x16_t>(e)));
470 }
471 #endif
472 // Recurse for large vectors to try to hit the specializations above.
473 if /*constexpr*/ (N*sizeof(T) > 16) {
474 return join(if_then_else(cond.lo, t.lo, e.lo),
475 if_then_else(cond.hi, t.hi, e.hi));
476 }
477 // This default can lead to better code than the recursing onto scalars.
478 return naive_if_then_else(cond, t, e);
479 }
480
any(const Vec<1,T>& x)481 SIT bool any(const Vec<1,T>& x) { return x.val != 0; }
any(const Vec<N,T>& x)482 SINT bool any(const Vec<N,T>& x) {
483 #if defined(__wasm_simd128__)
484 if constexpr (N == 4 && sizeof(T) == 4) {
485 return wasm_i32x4_any_true(unchecked_bit_pun<VExt<4,int>>(x));
486 }
487 #endif
488 return any(x.lo)
489 || any(x.hi);
490 }
491
all(const Vec<1,T>& x)492 SIT bool all(const Vec<1,T>& x) { return x.val != 0; }
all(const Vec<N,T>& x)493 SINT bool all(const Vec<N,T>& x) {
494 #if defined(__AVX2__)
495 if /*constexpr*/ (N*sizeof(T) == 32) {
496 return _mm256_testc_si256(unchecked_bit_pun<__m256i>(x),
497 _mm256_set1_epi32(-1));
498 }
499 #endif
500 #if defined(__SSE4_1__)
501 if /*constexpr*/ (N*sizeof(T) == 16) {
502 return _mm_testc_si128(unchecked_bit_pun<__m128i>(x),
503 _mm_set1_epi32(-1));
504 }
505 #endif
506 #if defined(__wasm_simd128__)
507 if /*constexpr*/ (N == 4 && sizeof(T) == 4) {
508 return wasm_i32x4_all_true(unchecked_bit_pun<VExt<4,int>>(x));
509 }
510 #endif
511 return all(x.lo)
512 && all(x.hi);
513 }
514
515 // cast() Vec<N,S> to Vec<N,D>, as if applying a C-cast to each lane.
516 // TODO: implement with map()?
517 template <typename D, typename S>
cast(const Vec<1,S>& src)518 SI Vec<1,D> cast(const Vec<1,S>& src) { return (D)src.val; }
519
520 template <typename D, int N, typename S>
cast(const Vec<N,S>& src)521 SI Vec<N,D> cast(const Vec<N,S>& src) {
522 #if !defined(SKNX_NO_SIMD) && defined(__clang__)
523 return to_vec(__builtin_convertvector(to_vext(src), VExt<N,D>));
524 #else
525 return join(cast<D>(src.lo), cast<D>(src.hi));
526 #endif
527 }
528
529 // min/max match logic of std::min/std::max, which is important when NaN is involved.
min(const Vec<1,T>& x)530 SIT T min(const Vec<1,T>& x) { return x.val; }
max(const Vec<1,T>& x)531 SIT T max(const Vec<1,T>& x) { return x.val; }
min(const Vec<N,T>& x)532 SINT T min(const Vec<N,T>& x) { return std::min(min(x.lo), min(x.hi)); }
max(const Vec<N,T>& x)533 SINT T max(const Vec<N,T>& x) { return std::max(max(x.lo), max(x.hi)); }
534
min(const Vec<N,T>& x, const Vec<N,T>& y)535 SINT Vec<N,T> min(const Vec<N,T>& x, const Vec<N,T>& y) { return naive_if_then_else(y < x, y, x); }
max(const Vec<N,T>& x, const Vec<N,T>& y)536 SINT Vec<N,T> max(const Vec<N,T>& x, const Vec<N,T>& y) { return naive_if_then_else(x < y, y, x); }
537
min(const Vec<N,T>& x, U y)538 SINTU Vec<N,T> min(const Vec<N,T>& x, U y) { return min(x, Vec<N,T>(y)); }
max(const Vec<N,T>& x, U y)539 SINTU Vec<N,T> max(const Vec<N,T>& x, U y) { return max(x, Vec<N,T>(y)); }
min(U x, const Vec<N,T>& y)540 SINTU Vec<N,T> min(U x, const Vec<N,T>& y) { return min(Vec<N,T>(x), y); }
max(U x, const Vec<N,T>& y)541 SINTU Vec<N,T> max(U x, const Vec<N,T>& y) { return max(Vec<N,T>(x), y); }
542
543 // pin matches the logic of SkTPin, which is important when NaN is involved. It always returns
544 // values in the range lo..hi, and if x is NaN, it returns lo.
pin(const Vec<N,T>& x, const Vec<N,T>& lo, const Vec<N,T>& hi)545 SINT Vec<N,T> pin(const Vec<N,T>& x, const Vec<N,T>& lo, const Vec<N,T>& hi) {
546 return max(lo, min(x, hi));
547 }
548
549 // Shuffle values from a vector pretty arbitrarily:
550 // skvx::Vec<4,float> rgba = {R,G,B,A};
551 // shuffle<2,1,0,3> (rgba) ~> {B,G,R,A}
552 // shuffle<2,1> (rgba) ~> {B,G}
553 // shuffle<2,1,2,1,2,1,2,1>(rgba) ~> {B,G,B,G,B,G,B,G}
554 // shuffle<3,3,3,3> (rgba) ~> {A,A,A,A}
555 // The only real restriction is that the output also be a legal N=power-of-two sknx::Vec.
556 template <int... Ix, int N, typename T>
shuffle(const Vec<N,T>& x)557 SI Vec<sizeof...(Ix),T> shuffle(const Vec<N,T>& x) {
558 #if !defined(SKNX_NO_SIMD) && defined(__clang__)
559 // TODO: can we just always use { x[Ix]... }?
560 return to_vec<sizeof...(Ix),T>(__builtin_shufflevector(to_vext(x), to_vext(x), Ix...));
561 #else
562 return { x[Ix]... };
563 #endif
564 }
565
566 // Call map(fn, x) for a vector with fn() applied to each lane of x, { fn(x[0]), fn(x[1]), ... },
567 // or map(fn, x,y) for a vector of fn(x[i], y[i]), etc.
568
569 template <typename Fn, typename... Args, size_t... I>
570 SI auto map(std::index_sequence<I...>,
571 Fn&& fn, const Args&... args) -> skvx::Vec<sizeof...(I), decltype(fn(args[0]...))> {
572 auto lane = [&](size_t i)
573 #if defined(__clang__)
574 // CFI, specifically -fsanitize=cfi-icall, seems to give a false positive here,
575 // with errors like "control flow integrity check for type 'float (float)
576 // noexcept' failed during indirect function call... note: sqrtf.cfi_jt defined
577 // here". But we can be quite sure fn is the right type: it's all inferred!
578 // So, stifle CFI in this function.
579 __attribute__((no_sanitize("cfi")))
580 #endif
581 { return fn(args[i]...); };
582
583 return { lane(I)... };
584 }
585
586 template <typename Fn, int N, typename T, typename... Rest>
map(Fn&& fn, const Vec<N,T>& first, const Rest&... rest)587 auto map(Fn&& fn, const Vec<N,T>& first, const Rest&... rest) {
588 // Derive an {0...N-1} index_sequence from the size of the first arg: N lanes in, N lanes out.
589 return map(std::make_index_sequence<N>{}, fn, first,rest...);
590 }
591
ceil(const Vec<N,float>& x)592 SIN Vec<N,float> ceil(const Vec<N,float>& x) { return map( ceilf, x); }
floor(const Vec<N,float>& x)593 SIN Vec<N,float> floor(const Vec<N,float>& x) { return map(floorf, x); }
trunc(const Vec<N,float>& x)594 SIN Vec<N,float> trunc(const Vec<N,float>& x) { return map(truncf, x); }
round(const Vec<N,float>& x)595 SIN Vec<N,float> round(const Vec<N,float>& x) { return map(roundf, x); }
sqrt(const Vec<N,float>& x)596 SIN Vec<N,float> sqrt(const Vec<N,float>& x) { return map( sqrtf, x); }
abs(const Vec<N,float>& x)597 SIN Vec<N,float> abs(const Vec<N,float>& x) { return map( fabsf, x); }
fma(const Vec<N,float>& x, const Vec<N,float>& y, const Vec<N,float>& z)598 SIN Vec<N,float> fma(const Vec<N,float>& x,
599 const Vec<N,float>& y,
600 const Vec<N,float>& z) {
601 // I don't understand why Clang's codegen is terrible if we write map(fmaf, x,y,z) directly.
602 auto fn = [](float x, float y, float z) { return fmaf(x,y,z); };
603 return map(fn, x,y,z);
604 }
605
lrint(const Vec<1,float>& x)606 SI Vec<1,int> lrint(const Vec<1,float>& x) {
607 return (int)lrintf(x.val);
608 }
lrint(const Vec<N,float>& x)609 SIN Vec<N,int> lrint(const Vec<N,float>& x) {
610 #if defined(__AVX__)
611 if /*constexpr*/ (N == 8) {
612 return unchecked_bit_pun<Vec<N,int>>(_mm256_cvtps_epi32(unchecked_bit_pun<__m256>(x)));
613 }
614 #endif
615 #if defined(__SSE__)
616 if /*constexpr*/ (N == 4) {
617 return unchecked_bit_pun<Vec<N,int>>(_mm_cvtps_epi32(unchecked_bit_pun<__m128>(x)));
618 }
619 #endif
620 return join(lrint(x.lo),
621 lrint(x.hi));
622 }
623
fract(const Vec<N,float>& x)624 SIN Vec<N,float> fract(const Vec<N,float>& x) { return x - floor(x); }
625
626 // The default logic for to_half/from_half is borrowed from skcms,
627 // and assumes inputs are finite and treat/flush denorm half floats as/to zero.
628 // Key constants to watch for:
629 // - a float is 32-bit, 1-8-23 sign-exponent-mantissa, with 127 exponent bias;
630 // - a half is 16-bit, 1-5-10 sign-exponent-mantissa, with 15 exponent bias.
to_half_finite_ftz(const Vec<N,float>& x)631 SIN Vec<N,uint16_t> to_half_finite_ftz(const Vec<N,float>& x) {
632 Vec<N,uint32_t> sem = bit_pun<Vec<N,uint32_t>>(x),
633 s = sem & 0x8000'0000,
634 em = sem ^ s,
635 is_denorm = em < 0x3880'0000;
636 return cast<uint16_t>(if_then_else(is_denorm, Vec<N,uint32_t>(0)
637 , (s>>16) + (em>>13) - ((127-15)<<10)));
638 }
from_half_finite_ftz(const Vec<N,uint16_t>& x)639 SIN Vec<N,float> from_half_finite_ftz(const Vec<N,uint16_t>& x) {
640 Vec<N,uint32_t> wide = cast<uint32_t>(x),
641 s = wide & 0x8000,
642 em = wide ^ s;
643 auto is_denorm = bit_pun<Vec<N,int32_t>>(em < 0x0400);
644 return if_then_else(is_denorm, Vec<N,float>(0)
645 , bit_pun<Vec<N,float>>( (s<<16) + (em<<13) + ((127-15)<<23) ));
646 }
647
648 // Like if_then_else(), these N=1 base cases won't actually be used unless explicitly called.
to_half(const Vec<1,float>& x)649 SI Vec<1,uint16_t> to_half(const Vec<1,float>& x) { return to_half_finite_ftz(x); }
from_half(const Vec<1,uint16_t>& x)650 SI Vec<1,float> from_half(const Vec<1,uint16_t>& x) { return from_half_finite_ftz(x); }
651
to_half(const Vec<N,float>& x)652 SIN Vec<N,uint16_t> to_half(const Vec<N,float>& x) {
653 #if defined(__F16C__)
654 if /*constexpr*/ (N == 8) {
655 return unchecked_bit_pun<Vec<N,uint16_t>>(_mm256_cvtps_ph(unchecked_bit_pun<__m256>(x),
656 _MM_FROUND_CUR_DIRECTION));
657 }
658 #endif
659 #if defined(__aarch64__)
660 if /*constexpr*/ (N == 4) {
661 return unchecked_bit_pun<Vec<N,uint16_t>>(vcvt_f16_f32(unchecked_bit_pun<float32x4_t>(x)));
662
663 }
664 #endif
665 if /*constexpr*/ (N > 4) {
666 return join(to_half(x.lo),
667 to_half(x.hi));
668 }
669 return to_half_finite_ftz(x);
670 }
671
from_half(const Vec<N,uint16_t>& x)672 SIN Vec<N,float> from_half(const Vec<N,uint16_t>& x) {
673 #if defined(__F16C__)
674 if /*constexpr*/ (N == 8) {
675 return unchecked_bit_pun<Vec<N,float>>(_mm256_cvtph_ps(unchecked_bit_pun<__m128i>(x)));
676 }
677 #endif
678 #if defined(__aarch64__)
679 if /*constexpr*/ (N == 4) {
680 return unchecked_bit_pun<Vec<N,float>>(vcvt_f32_f16(unchecked_bit_pun<float16x4_t>(x)));
681 }
682 #endif
683 if /*constexpr*/ (N > 4) {
684 return join(from_half(x.lo),
685 from_half(x.hi));
686 }
687 return from_half_finite_ftz(x);
688 }
689
690 // div255(x) = (x + 127) / 255 is a bit-exact rounding divide-by-255, packing down to 8-bit.
div255(const Vec<N,uint16_t>& x)691 SIN Vec<N,uint8_t> div255(const Vec<N,uint16_t>& x) {
692 return cast<uint8_t>( (x+127)/255 );
693 }
694
695 // approx_scale(x,y) approximates div255(cast<uint16_t>(x)*cast<uint16_t>(y)) within a bit,
696 // and is always perfect when x or y is 0 or 255.
approx_scale(const Vec<N,uint8_t>& x, const Vec<N,uint8_t>& y)697 SIN Vec<N,uint8_t> approx_scale(const Vec<N,uint8_t>& x, const Vec<N,uint8_t>& y) {
698 // All of (x*y+x)/256, (x*y+y)/256, and (x*y+255)/256 meet the criteria above.
699 // We happen to have historically picked (x*y+x)/256.
700 auto X = cast<uint16_t>(x),
701 Y = cast<uint16_t>(y);
702 return cast<uint8_t>( (X*Y+X)/256 );
703 }
704
705 // The ScaledDividerU32 takes a divisor > 1, and creates a function divide(numerator) that
706 // calculates a numerator / denominator. For this to be rounded properly, numerator should have
707 // half added in:
708 // divide(numerator + half) == floor(numerator/denominator + 1/2).
709 //
710 // This gives an answer within +/- 1 from the true value.
711 //
712 // Derivation of half:
713 // numerator/denominator + 1/2 = (numerator + half) / d
714 // numerator + denominator / 2 = numerator + half
715 // half = denominator / 2.
716 //
717 // Because half is divided by 2, that division must also be rounded.
718 // half == denominator / 2 = (denominator + 1) / 2.
719 //
720 // The divisorFactor is just a scaled value:
721 // divisorFactor = (1 / divisor) * 2 ^ 32.
722 // The maximum that can be divided and rounded is UINT_MAX - half.
723 class ScaledDividerU32 {
724 public:
ScaledDividerU32(uint32_t divisor)725 explicit ScaledDividerU32(uint32_t divisor)
726 : fDivisorFactor{(uint32_t)(std::round((1.0 / divisor) * (1ull << 32)))}
727 , fHalf{(divisor + 1) >> 1} {
728 assert(divisor > 1);
729 }
730
divide(const Vec<4, uint32_t>& numerator) const731 Vec<4, uint32_t> divide(const Vec<4, uint32_t>& numerator) const {
732 #if !defined(SKNX_NO_SIMD) && defined(__ARM_NEON)
733 uint64x2_t hi = vmull_n_u32(vget_high_u32(to_vext(numerator)), fDivisorFactor);
734 uint64x2_t lo = vmull_n_u32(vget_low_u32(to_vext(numerator)), fDivisorFactor);
735
736 return to_vec<4, uint32_t>(vcombine_u32(vshrn_n_u64(lo,32), vshrn_n_u64(hi,32)));
737 #else
738 return cast<uint32_t>((cast<uint64_t>(numerator) * fDivisorFactor) >> 32);
739 #endif
740 }
741
half() const742 uint32_t half() const { return fHalf; }
743
744 private:
745 const uint32_t fDivisorFactor;
746 const uint32_t fHalf;
747 };
748
749 #if !defined(SKNX_NO_SIMD) && defined(__ARM_NEON)
750 // With NEON we can do eight u8*u8 -> u16 in one instruction, vmull_u8 (read, mul-long).
mull(const Vec<8,uint8_t>& x, const Vec<8,uint8_t>& y)751 SI Vec<8,uint16_t> mull(const Vec<8,uint8_t>& x,
752 const Vec<8,uint8_t>& y) {
753 return to_vec<8,uint16_t>(vmull_u8(to_vext(x),
754 to_vext(y)));
755 }
756
757 SIN std::enable_if_t<(N < 8), Vec<N,uint16_t>> mull(const Vec<N,uint8_t>& x,
758 const Vec<N,uint8_t>& y) {
759 // N < 8 --> double up data until N == 8, returning the part we need.
760 return mull(join(x,x),
761 join(y,y)).lo;
762 }
763
mull(const Vec<N,uint8_t>& x, const Vec<N,uint8_t>& y)764 SIN std::enable_if_t<(N > 8), Vec<N,uint16_t>> mull(const Vec<N,uint8_t>& x,
765 const Vec<N,uint8_t>& y) {
766 // N > 8 --> usual join(lo,hi) strategy to recurse down to N == 8.
767 return join(mull(x.lo, y.lo),
768 mull(x.hi, y.hi));
769 }
770 #else
771 // Nothing special when we don't have NEON... just cast up to 16-bit and multiply.
mull(const Vec<N,uint8_t>& x, const Vec<N,uint8_t>& y)772 SIN Vec<N,uint16_t> mull(const Vec<N,uint8_t>& x,
773 const Vec<N,uint8_t>& y) {
774 return cast<uint16_t>(x)
775 * cast<uint16_t>(y);
776 }
777 #endif
778
779 // Allow floating point contraction. e.g., allow a*x + y to be compiled to a single FMA even though
780 // it introduces LSB differences on platforms that don't have an FMA instruction.
781 #if defined(__clang__)
782 #pragma STDC FP_CONTRACT ON
783 #endif
784
785 // Approximates the inverse cosine of x within 0.96 degrees using the rational polynomial:
786 //
787 // acos(x) ~= (bx^3 + ax) / (dx^4 + cx^2 + 1) + pi/2
788 //
789 // See: https://stackoverflow.com/a/36387954
790 //
791 // For a proof of max error, see the "SkVx_approx_acos" unit test.
792 //
793 // NOTE: This function deviates immediately from pi and 0 outside -1 and 1. (The derivatives are
794 // infinite at -1 and 1). So the input must still be clamped between -1 and 1.
795 #define SKVX_APPROX_ACOS_MAX_ERROR SkDegreesToRadians(.96f)
approx_acos(Vec<N,float> x)796 SIN Vec<N,float> approx_acos(Vec<N,float> x) {
797 constexpr static float a = -0.939115566365855f;
798 constexpr static float b = 0.9217841528914573f;
799 constexpr static float c = -1.2845906244690837f;
800 constexpr static float d = 0.295624144969963174f;
801 constexpr static float pi_over_2 = 1.5707963267948966f;
802 auto xx = x*x;
803 auto numer = b*xx + a;
804 auto denom = xx*(d*xx + c) + 1;
805 return x * (numer/denom) + pi_over_2;
806 }
807
808 #if defined(__clang__)
809 #pragma STDC FP_CONTRACT DEFAULT
810 #endif
811
812 // De-interleaving load of 4 vectors.
813 //
814 // WARNING: These are really only supported well on NEON. Consider restructuring your data before
815 // resorting to these methods.
strided_load4(const T* v, skvx::Vec<1,T>& a, skvx::Vec<1,T>& b, skvx::Vec<1,T>& c, skvx::Vec<1,T>& d)816 SIT void strided_load4(const T* v,
817 skvx::Vec<1,T>& a,
818 skvx::Vec<1,T>& b,
819 skvx::Vec<1,T>& c,
820 skvx::Vec<1,T>& d) {
821 a.val = v[0];
822 b.val = v[1];
823 c.val = v[2];
824 d.val = v[3];
825 }
strided_load4(const T* v, skvx::Vec<N,T>& a, skvx::Vec<N,T>& b, skvx::Vec<N,T>& c, skvx::Vec<N,T>& d)826 SINT void strided_load4(const T* v,
827 skvx::Vec<N,T>& a,
828 skvx::Vec<N,T>& b,
829 skvx::Vec<N,T>& c,
830 skvx::Vec<N,T>& d) {
831 strided_load4(v, a.lo, b.lo, c.lo, d.lo);
832 strided_load4(v + 4*(N/2), a.hi, b.hi, c.hi, d.hi);
833 }
834 #if !defined(SKNX_NO_SIMD)
835 #if defined(__ARM_NEON)
836 #define IMPL_LOAD4_TRANSPOSED(N, T, VLD) \
837 SI void strided_load4(const T* v, \
838 skvx::Vec<N,T>& a, \
839 skvx::Vec<N,T>& b, \
840 skvx::Vec<N,T>& c, \
841 skvx::Vec<N,T>& d) { \
842 auto mat = VLD(v); \
843 a = skvx::bit_pun<skvx::Vec<N,T>>(mat.val[0]); \
844 b = skvx::bit_pun<skvx::Vec<N,T>>(mat.val[1]); \
845 c = skvx::bit_pun<skvx::Vec<N,T>>(mat.val[2]); \
846 d = skvx::bit_pun<skvx::Vec<N,T>>(mat.val[3]); \
847 }
848 IMPL_LOAD4_TRANSPOSED(2, uint32_t, vld4_u32);
849 IMPL_LOAD4_TRANSPOSED(4, uint16_t, vld4_u16);
850 IMPL_LOAD4_TRANSPOSED(8, uint8_t, vld4_u8);
851 IMPL_LOAD4_TRANSPOSED(2, int32_t, vld4_s32);
852 IMPL_LOAD4_TRANSPOSED(4, int16_t, vld4_s16);
853 IMPL_LOAD4_TRANSPOSED(8, int8_t, vld4_s8);
854 IMPL_LOAD4_TRANSPOSED(2, float, vld4_f32);
855 IMPL_LOAD4_TRANSPOSED(4, uint32_t, vld4q_u32);
856 IMPL_LOAD4_TRANSPOSED(8, uint16_t, vld4q_u16);
857 IMPL_LOAD4_TRANSPOSED(16, uint8_t, vld4q_u8);
858 IMPL_LOAD4_TRANSPOSED(4, int32_t, vld4q_s32);
859 IMPL_LOAD4_TRANSPOSED(8, int16_t, vld4q_s16);
860 IMPL_LOAD4_TRANSPOSED(16, int8_t, vld4q_s8);
861 IMPL_LOAD4_TRANSPOSED(4, float, vld4q_f32);
862 #undef IMPL_LOAD4_TRANSPOSED
863 #elif defined(__SSE__)
strided_load4(const float* v, Vec<4,float>& a, Vec<4,float>& b, Vec<4,float>& c, Vec<4,float>& d)864 SI void strided_load4(const float* v,
865 Vec<4,float>& a,
866 Vec<4,float>& b,
867 Vec<4,float>& c,
868 Vec<4,float>& d) {
869 using skvx::bit_pun;
870 __m128 a_ = _mm_loadu_ps(v);
871 __m128 b_ = _mm_loadu_ps(v+4);
872 __m128 c_ = _mm_loadu_ps(v+8);
873 __m128 d_ = _mm_loadu_ps(v+12);
874 _MM_TRANSPOSE4_PS(a_, b_, c_, d_);
875 a = bit_pun<Vec<4,float>>(a_);
876 b = bit_pun<Vec<4,float>>(b_);
877 c = bit_pun<Vec<4,float>>(c_);
878 d = bit_pun<Vec<4,float>>(d_);
879 }
880 #endif
881 #endif
882
883 // De-interleaving load of 2 vectors.
884 //
885 // WARNING: These are really only supported well on NEON. Consider restructuring your data before
886 // resorting to these methods.
strided_load2(const T* v, skvx::Vec<1,T>& a, skvx::Vec<1,T>& b)887 SIT void strided_load2(const T* v, skvx::Vec<1,T>& a, skvx::Vec<1,T>& b) {
888 a.val = v[0];
889 b.val = v[1];
890 }
strided_load2(const T* v, skvx::Vec<N,T>& a, skvx::Vec<N,T>& b)891 SINT void strided_load2(const T* v, skvx::Vec<N,T>& a, skvx::Vec<N,T>& b) {
892 strided_load2(v, a.lo, b.lo);
893 strided_load2(v + 2*(N/2), a.hi, b.hi);
894 }
895 #if !defined(SKNX_NO_SIMD)
896 #if defined(__ARM_NEON)
897 #define IMPL_LOAD2_TRANSPOSED(N, T, VLD) \
898 SI void strided_load2(const T* v, skvx::Vec<N,T>& a, skvx::Vec<N,T>& b) { \
899 auto mat = VLD(v); \
900 a = skvx::bit_pun<skvx::Vec<N,T>>(mat.val[0]); \
901 b = skvx::bit_pun<skvx::Vec<N,T>>(mat.val[1]); \
902 }
903 IMPL_LOAD2_TRANSPOSED(2, uint32_t, vld2_u32);
904 IMPL_LOAD2_TRANSPOSED(4, uint16_t, vld2_u16);
905 IMPL_LOAD2_TRANSPOSED(8, uint8_t, vld2_u8);
906 IMPL_LOAD2_TRANSPOSED(2, int32_t, vld2_s32);
907 IMPL_LOAD2_TRANSPOSED(4, int16_t, vld2_s16);
908 IMPL_LOAD2_TRANSPOSED(8, int8_t, vld2_s8);
909 IMPL_LOAD2_TRANSPOSED(2, float, vld2_f32);
910 IMPL_LOAD2_TRANSPOSED(4, uint32_t, vld2q_u32);
911 IMPL_LOAD2_TRANSPOSED(8, uint16_t, vld2q_u16);
912 IMPL_LOAD2_TRANSPOSED(16, uint8_t, vld2q_u8);
913 IMPL_LOAD2_TRANSPOSED(4, int32_t, vld2q_s32);
914 IMPL_LOAD2_TRANSPOSED(8, int16_t, vld2q_s16);
915 IMPL_LOAD2_TRANSPOSED(16, int8_t, vld2q_s8);
916 IMPL_LOAD2_TRANSPOSED(4, float, vld2q_f32);
917 #undef IMPL_LOAD2_TRANSPOSED
918 #endif
919 #endif
920
921 } // namespace skvx
922
923 #undef SINTU
924 #undef SINT
925 #undef SIN
926 #undef SIT
927 #undef SI
928 #undef SKVX_ALWAYS_INLINE
929
930 #endif//SKVX_DEFINED
931