Home
last modified time | relevance | path

Searched refs:xmm_crc0 (Results 1 - 3 of 3) sorted by relevance

/third_party/node/deps/zlib/
H A Dcrc_folding.c30 __m128i xmm_crc0 = _mm_loadu_si128((__m128i *)s->crc0 + 0);\
37 _mm_storeu_si128((__m128i *)s->crc0 + 0, xmm_crc0);\
48 xmm_crc0 = _mm_cvtsi32_si128(0x9db42487); in crc_fold_init()
59 __m128i *xmm_crc0, __m128i *xmm_crc1, in fold_1()
71 *xmm_crc3 = *xmm_crc0; in fold_1()
72 *xmm_crc0 = _mm_clmulepi64_si128(*xmm_crc0, xmm_fold4, 0x01); in fold_1()
74 ps_crc0 = _mm_castsi128_ps(*xmm_crc0); in fold_1()
78 *xmm_crc0 = *xmm_crc1; in fold_1()
85 __m128i *xmm_crc0, __m128 in fold_2()
58 fold_1(deflate_state *const s, __m128i *xmm_crc0, __m128i *xmm_crc1, __m128i *xmm_crc2, __m128i *xmm_crc3) fold_1() argument
84 fold_2(deflate_state *const s, __m128i *xmm_crc0, __m128i *xmm_crc1, __m128i *xmm_crc2, __m128i *xmm_crc3) fold_2() argument
118 fold_3(deflate_state *const s, __m128i *xmm_crc0, __m128i *xmm_crc1, __m128i *xmm_crc2, __m128i *xmm_crc3) fold_3() argument
158 fold_4(deflate_state *const s, __m128i *xmm_crc0, __m128i *xmm_crc1, __m128i *xmm_crc2, __m128i *xmm_crc3) fold_4() argument
224 partial_fold(deflate_state *const s, const size_t len, __m128i *xmm_crc0, __m128i *xmm_crc1, __m128i *xmm_crc2, __m128i *xmm_crc3, __m128i *xmm_crc_part) partial_fold() argument
438 __m128i xmm_crc0 = _mm_loadu_si128((__m128i *)s->crc0 + 0); crc_fold_512to32() local
[all...]
/third_party/node/deps/v8/third_party/zlib/
H A Dcrc_folding.c30 __m128i xmm_crc0 = _mm_loadu_si128((__m128i *)s->crc0 + 0);\
37 _mm_storeu_si128((__m128i *)s->crc0 + 0, xmm_crc0);\
48 xmm_crc0 = _mm_cvtsi32_si128(0x9db42487); in crc_fold_init()
59 __m128i *xmm_crc0, __m128i *xmm_crc1, in fold_1()
71 *xmm_crc3 = *xmm_crc0; in fold_1()
72 *xmm_crc0 = _mm_clmulepi64_si128(*xmm_crc0, xmm_fold4, 0x01); in fold_1()
74 ps_crc0 = _mm_castsi128_ps(*xmm_crc0); in fold_1()
78 *xmm_crc0 = *xmm_crc1; in fold_1()
85 __m128i *xmm_crc0, __m128 in fold_2()
58 fold_1(deflate_state *const s, __m128i *xmm_crc0, __m128i *xmm_crc1, __m128i *xmm_crc2, __m128i *xmm_crc3) fold_1() argument
84 fold_2(deflate_state *const s, __m128i *xmm_crc0, __m128i *xmm_crc1, __m128i *xmm_crc2, __m128i *xmm_crc3) fold_2() argument
118 fold_3(deflate_state *const s, __m128i *xmm_crc0, __m128i *xmm_crc1, __m128i *xmm_crc2, __m128i *xmm_crc3) fold_3() argument
158 fold_4(deflate_state *const s, __m128i *xmm_crc0, __m128i *xmm_crc1, __m128i *xmm_crc2, __m128i *xmm_crc3) fold_4() argument
224 partial_fold(deflate_state *const s, const size_t len, __m128i *xmm_crc0, __m128i *xmm_crc1, __m128i *xmm_crc2, __m128i *xmm_crc3, __m128i *xmm_crc_part) partial_fold() argument
[all...]
/third_party/skia/third_party/externals/zlib/
H A Dcrc_folding.c30 __m128i xmm_crc0 = _mm_loadu_si128((__m128i *)s->crc0 + 0);\
37 _mm_storeu_si128((__m128i *)s->crc0 + 0, xmm_crc0);\
48 xmm_crc0 = _mm_cvtsi32_si128(0x9db42487); in crc_fold_init()
59 __m128i *xmm_crc0, __m128i *xmm_crc1, in fold_1()
71 *xmm_crc3 = *xmm_crc0; in fold_1()
72 *xmm_crc0 = _mm_clmulepi64_si128(*xmm_crc0, xmm_fold4, 0x01); in fold_1()
74 ps_crc0 = _mm_castsi128_ps(*xmm_crc0); in fold_1()
78 *xmm_crc0 = *xmm_crc1; in fold_1()
85 __m128i *xmm_crc0, __m128 in fold_2()
58 fold_1(deflate_state *const s, __m128i *xmm_crc0, __m128i *xmm_crc1, __m128i *xmm_crc2, __m128i *xmm_crc3) fold_1() argument
84 fold_2(deflate_state *const s, __m128i *xmm_crc0, __m128i *xmm_crc1, __m128i *xmm_crc2, __m128i *xmm_crc3) fold_2() argument
118 fold_3(deflate_state *const s, __m128i *xmm_crc0, __m128i *xmm_crc1, __m128i *xmm_crc2, __m128i *xmm_crc3) fold_3() argument
158 fold_4(deflate_state *const s, __m128i *xmm_crc0, __m128i *xmm_crc1, __m128i *xmm_crc2, __m128i *xmm_crc3) fold_4() argument
224 partial_fold(deflate_state *const s, const size_t len, __m128i *xmm_crc0, __m128i *xmm_crc1, __m128i *xmm_crc2, __m128i *xmm_crc3, __m128i *xmm_crc_part) partial_fold() argument
[all...]

Completed in 3 milliseconds