1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_X86_UACCESS_64_H
3#define _ASM_X86_UACCESS_64_H
4
5/*
6 * User space memory access functions
7 */
8#include <linux/compiler.h>
9#include <linux/lockdep.h>
10#include <linux/kasan-checks.h>
11#include <asm/alternative.h>
12#include <asm/cpufeatures.h>
13#include <asm/page.h>
14
15#ifdef CONFIG_ADDRESS_MASKING
16/*
17 * Mask out tag bits from the address.
18 */
19static inline unsigned long __untagged_addr(unsigned long addr)
20{
21	/*
22	 * Refer tlbstate_untag_mask directly to avoid RIP-relative relocation
23	 * in alternative instructions. The relocation gets wrong when gets
24	 * copied to the target place.
25	 */
26	asm (ALTERNATIVE("",
27			 "and %%gs:tlbstate_untag_mask, %[addr]\n\t", X86_FEATURE_LAM)
28	     : [addr] "+r" (addr) : "m" (tlbstate_untag_mask));
29
30	return addr;
31}
32
33#define untagged_addr(addr)	({					\
34	unsigned long __addr = (__force unsigned long)(addr);		\
35	(__force __typeof__(addr))__untagged_addr(__addr);		\
36})
37
38static inline unsigned long __untagged_addr_remote(struct mm_struct *mm,
39						   unsigned long addr)
40{
41	mmap_assert_locked(mm);
42	return addr & (mm)->context.untag_mask;
43}
44
45#define untagged_addr_remote(mm, addr)	({				\
46	unsigned long __addr = (__force unsigned long)(addr);		\
47	(__force __typeof__(addr))__untagged_addr_remote(mm, __addr);	\
48})
49
50#endif
51
52/*
53 * The virtual address space space is logically divided into a kernel
54 * half and a user half.  When cast to a signed type, user pointers
55 * are positive and kernel pointers are negative.
56 */
57#define valid_user_address(x) ((long)(x) >= 0)
58
59/*
60 * User pointers can have tag bits on x86-64.  This scheme tolerates
61 * arbitrary values in those bits rather then masking them off.
62 *
63 * Enforce two rules:
64 * 1. 'ptr' must be in the user half of the address space
65 * 2. 'ptr+size' must not overflow into kernel addresses
66 *
67 * Note that addresses around the sign change are not valid addresses,
68 * and will GP-fault even with LAM enabled if the sign bit is set (see
69 * "CR3.LAM_SUP" that can narrow the canonicality check if we ever
70 * enable it, but not remove it entirely).
71 *
72 * So the "overflow into kernel addresses" does not imply some sudden
73 * exact boundary at the sign bit, and we can allow a lot of slop on the
74 * size check.
75 *
76 * In fact, we could probably remove the size check entirely, since
77 * any kernel accesses will be in increasing address order starting
78 * at 'ptr', and even if the end might be in kernel space, we'll
79 * hit the GP faults for non-canonical accesses before we ever get
80 * there.
81 *
82 * That's a separate optimization, for now just handle the small
83 * constant case.
84 */
85static inline bool __access_ok(const void __user *ptr, unsigned long size)
86{
87	if (__builtin_constant_p(size <= PAGE_SIZE) && size <= PAGE_SIZE) {
88		return valid_user_address(ptr);
89	} else {
90		unsigned long sum = size + (unsigned long)ptr;
91		return valid_user_address(sum) && sum >= (unsigned long)ptr;
92	}
93}
94#define __access_ok __access_ok
95
96/*
97 * Copy To/From Userspace
98 */
99
100/* Handles exceptions in both to and from, but doesn't do access_ok */
101__must_check unsigned long
102rep_movs_alternative(void *to, const void *from, unsigned len);
103
104static __always_inline __must_check unsigned long
105copy_user_generic(void *to, const void *from, unsigned long len)
106{
107	stac();
108	/*
109	 * If CPU has FSRM feature, use 'rep movs'.
110	 * Otherwise, use rep_movs_alternative.
111	 */
112	asm volatile(
113		"1:\n\t"
114		ALTERNATIVE("rep movsb",
115			    "call rep_movs_alternative", ALT_NOT(X86_FEATURE_FSRM))
116		"2:\n"
117		_ASM_EXTABLE_UA(1b, 2b)
118		:"+c" (len), "+D" (to), "+S" (from), ASM_CALL_CONSTRAINT
119		: : "memory", "rax");
120	clac();
121	return len;
122}
123
124static __always_inline __must_check unsigned long
125raw_copy_from_user(void *dst, const void __user *src, unsigned long size)
126{
127	return copy_user_generic(dst, (__force void *)src, size);
128}
129
130static __always_inline __must_check unsigned long
131raw_copy_to_user(void __user *dst, const void *src, unsigned long size)
132{
133	return copy_user_generic((__force void *)dst, src, size);
134}
135
136extern long __copy_user_nocache(void *dst, const void __user *src, unsigned size);
137extern long __copy_user_flushcache(void *dst, const void __user *src, unsigned size);
138
139static inline int
140__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
141				  unsigned size)
142{
143	long ret;
144	kasan_check_write(dst, size);
145	stac();
146	ret = __copy_user_nocache(dst, src, size);
147	clac();
148	return ret;
149}
150
151static inline int
152__copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
153{
154	kasan_check_write(dst, size);
155	return __copy_user_flushcache(dst, src, size);
156}
157
158/*
159 * Zero Userspace.
160 */
161
162__must_check unsigned long
163rep_stos_alternative(void __user *addr, unsigned long len);
164
165static __always_inline __must_check unsigned long __clear_user(void __user *addr, unsigned long size)
166{
167	might_fault();
168	stac();
169
170	/*
171	 * No memory constraint because it doesn't change any memory gcc
172	 * knows about.
173	 */
174	asm volatile(
175		"1:\n\t"
176		ALTERNATIVE("rep stosb",
177			    "call rep_stos_alternative", ALT_NOT(X86_FEATURE_FSRS))
178		"2:\n"
179	       _ASM_EXTABLE_UA(1b, 2b)
180	       : "+c" (size), "+D" (addr), ASM_CALL_CONSTRAINT
181	       : "a" (0));
182
183	clac();
184
185	return size;
186}
187
188static __always_inline unsigned long clear_user(void __user *to, unsigned long n)
189{
190	if (__access_ok(to, n))
191		return __clear_user(to, n);
192	return n;
193}
194#endif /* _ASM_X86_UACCESS_64_H */
195