1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_X86_UACCESS_64_H
3#define _ASM_X86_UACCESS_64_H
4
5/*
6 * User space memory access functions
7 */
8#include <linux/compiler.h>
9#include <linux/lockdep.h>
10#include <linux/kasan-checks.h>
11#include <asm/alternative.h>
12#include <asm/cpufeatures.h>
13#include <asm/page.h>
14
15/*
16 * Copy To/From Userspace
17 */
18
19/* Handles exceptions in both to and from, but doesn't do access_ok */
20__must_check unsigned long
21copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
22__must_check unsigned long
23copy_user_generic_string(void *to, const void *from, unsigned len);
24__must_check unsigned long
25copy_user_generic_unrolled(void *to, const void *from, unsigned len);
26
27static __always_inline __must_check unsigned long
28copy_user_generic(void *to, const void *from, unsigned len)
29{
30	unsigned ret;
31
32	/*
33	 * If CPU has ERMS feature, use copy_user_enhanced_fast_string.
34	 * Otherwise, if CPU has rep_good feature, use copy_user_generic_string.
35	 * Otherwise, use copy_user_generic_unrolled.
36	 */
37	alternative_call_2(copy_user_generic_unrolled,
38			 copy_user_generic_string,
39			 X86_FEATURE_REP_GOOD,
40			 copy_user_enhanced_fast_string,
41			 X86_FEATURE_ERMS,
42			 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
43				     "=d" (len)),
44			 "1" (to), "2" (from), "3" (len)
45			 : "memory", "rcx", "r8", "r9", "r10", "r11");
46	return ret;
47}
48
49static __always_inline __must_check unsigned long
50raw_copy_from_user(void *dst, const void __user *src, unsigned long size)
51{
52	return copy_user_generic(dst, (__force void *)src, size);
53}
54
55static __always_inline __must_check unsigned long
56raw_copy_to_user(void __user *dst, const void *src, unsigned long size)
57{
58	return copy_user_generic((__force void *)dst, src, size);
59}
60
61static __always_inline __must_check
62unsigned long raw_copy_in_user(void __user *dst, const void __user *src, unsigned long size)
63{
64	return copy_user_generic((__force void *)dst,
65				 (__force void *)src, size);
66}
67
68extern long __copy_user_nocache(void *dst, const void __user *src,
69				unsigned size, int zerorest);
70
71extern long __copy_user_flushcache(void *dst, const void __user *src, unsigned size);
72extern void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
73			   size_t len);
74
75static inline int
76__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
77				  unsigned size)
78{
79	kasan_check_write(dst, size);
80	return __copy_user_nocache(dst, src, size, 0);
81}
82
83static inline int
84__copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
85{
86	kasan_check_write(dst, size);
87	return __copy_user_flushcache(dst, src, size);
88}
89#endif /* _ASM_X86_UACCESS_64_H */
90