Lines Matching defs:vint4
181 // vint4 data type
187 struct vint4
192 ASTCENC_SIMD_INLINE vint4() = default;
197 * Consider using vint4::loada() which is better with wider VLA vectors
200 ASTCENC_SIMD_INLINE explicit vint4(const int* p)
211 ASTCENC_SIMD_INLINE explicit vint4(const uint8_t *p)
224 ASTCENC_SIMD_INLINE explicit vint4(int a, int b, int c, int d)
236 * Consider using vint4::zero() for constexpr zeros.
238 ASTCENC_SIMD_INLINE explicit vint4(int a)
265 static ASTCENC_SIMD_INLINE vint4 zero()
267 return vint4(0);
273 static ASTCENC_SIMD_INLINE vint4 load1(const int* p)
275 return vint4(*p);
281 static ASTCENC_SIMD_INLINE vint4 load(const uint8_t* p)
283 vint4 data;
291 static ASTCENC_SIMD_INLINE vint4 loada(const int* p)
293 return vint4(p);
299 static ASTCENC_SIMD_INLINE vint4 lane_id()
301 return vint4(0, 1, 2, 3);
430 // vint4 operators and functions
436 ASTCENC_SIMD_INLINE vint4 operator+(vint4 a, vint4 b)
438 return vint4(a.m[0] + b.m[0],
447 ASTCENC_SIMD_INLINE vint4 operator-(vint4 a, vint4 b)
449 return vint4(a.m[0] - b.m[0],
458 ASTCENC_SIMD_INLINE vint4 operator*(vint4 a, vint4 b)
460 return vint4(a.m[0] * b.m[0],
469 ASTCENC_SIMD_INLINE vint4 operator~(vint4 a)
471 return vint4(~a.m[0],
480 ASTCENC_SIMD_INLINE vint4 operator|(vint4 a, vint4 b)
482 return vint4(a.m[0] | b.m[0],
491 ASTCENC_SIMD_INLINE vint4 operator&(vint4 a, vint4 b)
493 return vint4(a.m[0] & b.m[0],
502 ASTCENC_SIMD_INLINE vint4 operator^(vint4 a, vint4 b)
504 return vint4(a.m[0] ^ b.m[0],
513 ASTCENC_SIMD_INLINE vmask4 operator==(vint4 a, vint4 b)
524 ASTCENC_SIMD_INLINE vmask4 operator!=(vint4 a, vint4 b)
535 ASTCENC_SIMD_INLINE vmask4 operator<(vint4 a, vint4 b)
546 ASTCENC_SIMD_INLINE vmask4 operator>(vint4 a, vint4 b)
557 template <int s> ASTCENC_SIMD_INLINE vint4 lsl(vint4 a)
559 return vint4(a.m[0] << s,
568 template <int s> ASTCENC_SIMD_INLINE vint4 lsr(vint4 a)
575 return vint4(static_cast<int>(as0),
584 template <int s> ASTCENC_SIMD_INLINE vint4 asr(vint4 a)
586 return vint4(a.m[0] >> s,
595 ASTCENC_SIMD_INLINE vint4 min(vint4 a, vint4 b)
597 return vint4(a.m[0] < b.m[0] ? a.m[0] : b.m[0],
606 ASTCENC_SIMD_INLINE vint4 max(vint4 a, vint4 b)
608 return vint4(a.m[0] > b.m[0] ? a.m[0] : b.m[0],
617 ASTCENC_SIMD_INLINE vint4 hmin(vint4 a)
621 return vint4(std::min(b, c));
627 ASTCENC_SIMD_INLINE vint4 hmax(vint4 a)
631 return vint4(std::max(b, c));
637 ASTCENC_SIMD_INLINE int hadd_s(vint4 a)
645 ASTCENC_SIMD_INLINE void storea(vint4 a, int* p)
656 ASTCENC_SIMD_INLINE void store(vint4 a, int* p)
667 ASTCENC_SIMD_INLINE void store(vint4 a, uint8_t* p)
675 ASTCENC_SIMD_INLINE void store_nbytes(vint4 a, uint8_t* p)
683 ASTCENC_SIMD_INLINE vint4 gatheri(const int* base, vint4 indices)
685 return vint4(base[indices.m[0]],
694 ASTCENC_SIMD_INLINE vint4 pack_low_bytes(vint4 a)
702 return vint4(b, 0, 0, 0);
708 ASTCENC_SIMD_INLINE vint4 select(vint4 a, vint4 b, vmask4 cond)
710 return vint4((cond.m[0] & static_cast<int>(0x80000000)) ? b.m[0] : a.m[0],
944 ASTCENC_SIMD_INLINE vfloat4 gatherf(const float* base, vint4 indices)
977 ASTCENC_SIMD_INLINE vint4 float_to_int(vfloat4 a)
979 return vint4(static_cast<int>(a.m[0]),
988 ASTCENC_SIMD_INLINE vint4 float_to_int_rtn(vfloat4 a)
991 return vint4(static_cast<int>(a.m[0]),
1000 ASTCENC_SIMD_INLINE vfloat4 int_to_float(vint4 a)
1011 ASTCENC_SIMD_INLINE vint4 float_to_float16(vfloat4 a)
1013 return vint4(
1031 ASTCENC_SIMD_INLINE vfloat4 float16_to_float(vint4 a)
1055 ASTCENC_SIMD_INLINE vint4 float_as_int(vfloat4 a)
1057 vint4 r;
1069 ASTCENC_SIMD_INLINE vfloat4 int_as_float(vint4 a)
1079 ASTCENC_SIMD_INLINE void vtable_prepare(vint4 t0, vint4& t0p)
1087 ASTCENC_SIMD_INLINE void vtable_prepare(vint4 t0, vint4 t1, vint4& t0p, vint4& t1p)
1097 vint4 t0, vint4 t1, vint4 t2, vint4 t3,
1098 vint4& t0p, vint4& t1p, vint4& t2p, vint4& t3p)
1109 ASTCENC_SIMD_INLINE vint4 vtable_8bt_32bi(vint4 t0, vint4 idx)
1115 return vint4(table[idx.lane<0>()],
1125 ASTCENC_SIMD_INLINE vint4 vtable_8bt_32bi(vint4 t0, vint4 t1, vint4 idx)
1132 return vint4(table[idx.lane<0>()],
1141 ASTCENC_SIMD_INLINE vint4 vtable_8bt_32bi(vint4 t0, vint4 t1, vint4 t2, vint4 t3, vint4 idx)
1150 return vint4(table[idx.lane<0>()],
1164 ASTCENC_SIMD_INLINE vint4 interleave_rgba8(vint4 r, vint4 g, vint4 b, vint4 a)
1183 ASTCENC_SIMD_INLINE void store_lanes_masked(uint8_t* base, vint4 data, vmask4 mask)