Lines Matching refs:rg
243 uint16x4x2_t rg;
245 if ( true ) { rg = vld2_lane_u16(ptr + 0, rg, 0); }
246 if (tail > 1) { rg = vld2_lane_u16(ptr + 2, rg, 1); }
247 if (tail > 2) { rg = vld2_lane_u16(ptr + 4, rg, 2); }
249 rg = vld2_u16(ptr);
251 *r = rg.val[0];
252 *g = rg.val[1];
301 float32x4x2_t rg;
303 if ( true ) { rg = vld2q_lane_f32(ptr + 0, rg, 0); }
304 if (tail > 1) { rg = vld2q_lane_f32(ptr + 2, rg, 1); }
305 if (tail > 2) { rg = vld2q_lane_f32(ptr + 4, rg, 2); }
307 rg = vld2q_f32(ptr);
309 *r = rg.val[0];
310 *g = rg.val[1];
764 auto rg = _mm_shufflehi_epi16(rg01_23, 0xD8); // r0 r1 g0 g1 r2 r3 g2 g3
766 auto R = _mm_shuffle_epi32(rg, 0x88); // r0 r1 r2 r3 r0 r1 r2 r3
767 auto G = _mm_shuffle_epi32(rg, 0xDD); // g0 g1 g2 g3 g0 g1 g2 g3
772 U32 rg = _mm_unpacklo_epi16(widen_cast<__m128i>(r), widen_cast<__m128i>(g));
775 _mm_storel_epi64((__m128i*)ptr, rg);
777 int32_t rgpair = rg[2];
781 int32_t rgpair = rg[0];
785 _mm_storeu_si128((__m128i*)ptr + 0, rg);
841 auto rg = _mm_unpacklo_epi16(_02, _13), // r0 r1 r2 r3 g0 g1 g2 g3
844 *r = sk_unaligned_load<U16>((uint16_t*)&rg + 0);
845 *g = sk_unaligned_load<U16>((uint16_t*)&rg + 4);
851 auto rg = _mm_unpacklo_epi16(widen_cast<__m128i>(r), widen_cast<__m128i>(g)),
856 if ( true ) { _mm_storel_pd(dst + 0, _mm_unpacklo_epi32(rg, ba)); }
857 if (tail > 1) { _mm_storeh_pd(dst + 1, _mm_unpacklo_epi32(rg, ba)); }
858 if (tail > 2) { _mm_storel_pd(dst + 2, _mm_unpackhi_epi32(rg, ba)); }
860 _mm_storeu_si128((__m128i*)ptr + 0, _mm_unpacklo_epi32(rg, ba));
861 _mm_storeu_si128((__m128i*)ptr + 1, _mm_unpackhi_epi32(rg, ba));
3743 SI void from_88(U16 rg, U16* r, U16* g) {
3744 *r = (rg & 0xFF);
3745 *g = (rg >> 8);
3750 uint8x8x2_t rg;
3752 case 0: rg = vld2_u8 ((const uint8_t*)(ptr+0) ); break;
3753 case 7: rg = vld2_lane_u8((const uint8_t*)(ptr+6), rg, 6); [[fallthrough]];
3754 case 6: rg = vld2_lane_u8((const uint8_t*)(ptr+5), rg, 5); [[fallthrough]];
3755 case 5: rg = vld2_lane_u8((const uint8_t*)(ptr+4), rg, 4); [[fallthrough]];
3756 case 4: rg = vld2_lane_u8((const uint8_t*)(ptr+3), rg, 3); [[fallthrough]];
3757 case 3: rg = vld2_lane_u8((const uint8_t*)(ptr+2), rg, 2); [[fallthrough]];
3758 case 2: rg = vld2_lane_u8((const uint8_t*)(ptr+1), rg, 1); [[fallthrough]];
3759 case 1: rg = vld2_lane_u8((const uint8_t*)(ptr+0), rg, 0);
3761 *r = cast<U16>(rg.val[0]);
3762 *g = cast<U16>(rg.val[1]);
3770 uint8x8x2_t rg = {{
3775 case 0: vst2_u8 ((uint8_t*)(ptr+0), rg ); break;
3776 case 7: vst2_lane_u8((uint8_t*)(ptr+6), rg, 6); [[fallthrough]];
3777 case 6: vst2_lane_u8((uint8_t*)(ptr+5), rg, 5); [[fallthrough]];
3778 case 5: vst2_lane_u8((uint8_t*)(ptr+4), rg, 4); [[fallthrough]];
3779 case 4: vst2_lane_u8((uint8_t*)(ptr+3), rg, 3); [[fallthrough]];
3780 case 3: vst2_lane_u8((uint8_t*)(ptr+2), rg, 2); [[fallthrough]];
3781 case 2: vst2_lane_u8((uint8_t*)(ptr+1), rg, 1); [[fallthrough]];
3782 case 1: vst2_lane_u8((uint8_t*)(ptr+0), rg, 0);