18c2ecf20Sopenharmony_ci/* SPDX-License-Identifier: GPL-2.0 */
28c2ecf20Sopenharmony_ci#ifndef _ASM_X86_XOR_64_H
38c2ecf20Sopenharmony_ci#define _ASM_X86_XOR_64_H
48c2ecf20Sopenharmony_ci
58c2ecf20Sopenharmony_cistatic struct xor_block_template xor_block_sse = {
68c2ecf20Sopenharmony_ci	.name = "generic_sse",
78c2ecf20Sopenharmony_ci	.do_2 = xor_sse_2,
88c2ecf20Sopenharmony_ci	.do_3 = xor_sse_3,
98c2ecf20Sopenharmony_ci	.do_4 = xor_sse_4,
108c2ecf20Sopenharmony_ci	.do_5 = xor_sse_5,
118c2ecf20Sopenharmony_ci};
128c2ecf20Sopenharmony_ci
138c2ecf20Sopenharmony_ci
148c2ecf20Sopenharmony_ci/* Also try the AVX routines */
158c2ecf20Sopenharmony_ci#include <asm/xor_avx.h>
168c2ecf20Sopenharmony_ci
178c2ecf20Sopenharmony_ci/* We force the use of the SSE xor block because it can write around L2.
188c2ecf20Sopenharmony_ci   We may also be able to load into the L1 only depending on how the cpu
198c2ecf20Sopenharmony_ci   deals with a load to a line that is being prefetched.  */
208c2ecf20Sopenharmony_ci#undef XOR_TRY_TEMPLATES
218c2ecf20Sopenharmony_ci#define XOR_TRY_TEMPLATES			\
228c2ecf20Sopenharmony_cido {						\
238c2ecf20Sopenharmony_ci	AVX_XOR_SPEED;				\
248c2ecf20Sopenharmony_ci	xor_speed(&xor_block_sse_pf64);		\
258c2ecf20Sopenharmony_ci	xor_speed(&xor_block_sse);		\
268c2ecf20Sopenharmony_ci} while (0)
278c2ecf20Sopenharmony_ci
288c2ecf20Sopenharmony_ci#endif /* _ASM_X86_XOR_64_H */
29