1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_X86_UACCESS_H
3#define _ASM_X86_UACCESS_H
4/*
5 * User space memory access functions
6 */
7#include <linux/compiler.h>
8#include <linux/kasan-checks.h>
9#include <linux/string.h>
10#include <asm/asm.h>
11#include <asm/page.h>
12#include <asm/smap.h>
13#include <asm/extable.h>
14
15/*
16 * Test whether a block of memory is a valid user space address.
17 * Returns 0 if the range is valid, nonzero otherwise.
18 */
19static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit)
20{
21	/*
22	 * If we have used "sizeof()" for the size,
23	 * we know it won't overflow the limit (but
24	 * it might overflow the 'addr', so it's
25	 * important to subtract the size from the
26	 * limit, not add it to the address).
27	 */
28	if (__builtin_constant_p(size))
29		return unlikely(addr > limit - size);
30
31	/* Arbitrary sizes? Be careful about overflow */
32	addr += size;
33	if (unlikely(addr < size))
34		return true;
35	return unlikely(addr > limit);
36}
37
38#define __range_not_ok(addr, size, limit)				\
39({									\
40	__chk_user_ptr(addr);						\
41	__chk_range_not_ok((unsigned long __force)(addr), size, limit); \
42})
43
44#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
45static inline bool pagefault_disabled(void);
46# define WARN_ON_IN_IRQ()	\
47	WARN_ON_ONCE(!in_task() && !pagefault_disabled())
48#else
49# define WARN_ON_IN_IRQ()
50#endif
51
52/**
53 * access_ok - Checks if a user space pointer is valid
54 * @addr: User space pointer to start of block to check
55 * @size: Size of block to check
56 *
57 * Context: User context only. This function may sleep if pagefaults are
58 *          enabled.
59 *
60 * Checks if a pointer to a block of memory in user space is valid.
61 *
62 * Note that, depending on architecture, this function probably just
63 * checks that the pointer is in the user space range - after calling
64 * this function, memory access functions may still return -EFAULT.
65 *
66 * Return: true (nonzero) if the memory block may be valid, false (zero)
67 * if it is definitely invalid.
68 */
69#define access_ok(addr, size)					\
70({									\
71	WARN_ON_IN_IRQ();						\
72	likely(!__range_not_ok(addr, size, TASK_SIZE_MAX));		\
73})
74
75extern int __get_user_1(void);
76extern int __get_user_2(void);
77extern int __get_user_4(void);
78extern int __get_user_8(void);
79extern int __get_user_nocheck_1(void);
80extern int __get_user_nocheck_2(void);
81extern int __get_user_nocheck_4(void);
82extern int __get_user_nocheck_8(void);
83extern int __get_user_bad(void);
84
85#define __uaccess_begin() stac()
86#define __uaccess_end()   clac()
87#define __uaccess_begin_nospec()	\
88({					\
89	stac();				\
90	barrier_nospec();		\
91})
92
93/*
94 * This is the smallest unsigned integer type that can fit a value
95 * (up to 'long long')
96 */
97#define __inttype(x) __typeof__(		\
98	__typefits(x,char,			\
99	  __typefits(x,short,			\
100	    __typefits(x,int,			\
101	      __typefits(x,long,0ULL)))))
102
103#define __typefits(x,type,not) \
104	__builtin_choose_expr(sizeof(x)<=sizeof(type),(unsigned type)0,not)
105
106/*
107 * This is used for both get_user() and __get_user() to expand to
108 * the proper special function call that has odd calling conventions
109 * due to returning both a value and an error, and that depends on
110 * the size of the pointer passed in.
111 *
112 * Careful: we have to cast the result to the type of the pointer
113 * for sign reasons.
114 *
115 * The use of _ASM_DX as the register specifier is a bit of a
116 * simplification, as gcc only cares about it as the starting point
117 * and not size: for a 64-bit value it will use %ecx:%edx on 32 bits
118 * (%ecx being the next register in gcc's x86 register sequence), and
119 * %rdx on 64 bits.
120 *
121 * Clang/LLVM cares about the size of the register, but still wants
122 * the base register for something that ends up being a pair.
123 */
124#define do_get_user_call(fn,x,ptr)					\
125({									\
126	int __ret_gu;							\
127	register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX);		\
128	__chk_user_ptr(ptr);						\
129	asm volatile("call __" #fn "_%P4"				\
130		     : "=a" (__ret_gu), "=r" (__val_gu),		\
131			ASM_CALL_CONSTRAINT				\
132		     : "0" (ptr), "i" (sizeof(*(ptr))));		\
133	(x) = (__force __typeof__(*(ptr))) __val_gu;			\
134	__builtin_expect(__ret_gu, 0);					\
135})
136
137/**
138 * get_user - Get a simple variable from user space.
139 * @x:   Variable to store result.
140 * @ptr: Source address, in user space.
141 *
142 * Context: User context only. This function may sleep if pagefaults are
143 *          enabled.
144 *
145 * This macro copies a single simple variable from user space to kernel
146 * space.  It supports simple types like char and int, but not larger
147 * data types like structures or arrays.
148 *
149 * @ptr must have pointer-to-simple-variable type, and the result of
150 * dereferencing @ptr must be assignable to @x without a cast.
151 *
152 * Return: zero on success, or -EFAULT on error.
153 * On error, the variable @x is set to zero.
154 */
155#define get_user(x,ptr) ({ might_fault(); do_get_user_call(get_user,x,ptr); })
156
157/**
158 * __get_user - Get a simple variable from user space, with less checking.
159 * @x:   Variable to store result.
160 * @ptr: Source address, in user space.
161 *
162 * Context: User context only. This function may sleep if pagefaults are
163 *          enabled.
164 *
165 * This macro copies a single simple variable from user space to kernel
166 * space.  It supports simple types like char and int, but not larger
167 * data types like structures or arrays.
168 *
169 * @ptr must have pointer-to-simple-variable type, and the result of
170 * dereferencing @ptr must be assignable to @x without a cast.
171 *
172 * Caller must check the pointer with access_ok() before calling this
173 * function.
174 *
175 * Return: zero on success, or -EFAULT on error.
176 * On error, the variable @x is set to zero.
177 */
178#define __get_user(x,ptr) do_get_user_call(get_user_nocheck,x,ptr)
179
180
181#ifdef CONFIG_X86_32
182#define __put_user_goto_u64(x, addr, label)			\
183	asm_volatile_goto("\n"					\
184		     "1:	movl %%eax,0(%1)\n"		\
185		     "2:	movl %%edx,4(%1)\n"		\
186		     _ASM_EXTABLE_UA(1b, %l2)			\
187		     _ASM_EXTABLE_UA(2b, %l2)			\
188		     : : "A" (x), "r" (addr)			\
189		     : : label)
190
191#else
192#define __put_user_goto_u64(x, ptr, label) \
193	__put_user_goto(x, ptr, "q", "er", label)
194#endif
195
196extern void __put_user_bad(void);
197
198/*
199 * Strange magic calling convention: pointer in %ecx,
200 * value in %eax(:%edx), return value in %ecx. clobbers %rbx
201 */
202extern void __put_user_1(void);
203extern void __put_user_2(void);
204extern void __put_user_4(void);
205extern void __put_user_8(void);
206extern void __put_user_nocheck_1(void);
207extern void __put_user_nocheck_2(void);
208extern void __put_user_nocheck_4(void);
209extern void __put_user_nocheck_8(void);
210
211/*
212 * ptr must be evaluated and assigned to the temporary __ptr_pu before
213 * the assignment of x to __val_pu, to avoid any function calls
214 * involved in the ptr expression (possibly implicitly generated due
215 * to KASAN) from clobbering %ax.
216 */
217#define do_put_user_call(fn,x,ptr)					\
218({									\
219	int __ret_pu;							\
220	void __user *__ptr_pu;						\
221	register __typeof__(*(ptr)) __val_pu asm("%"_ASM_AX);		\
222	__chk_user_ptr(ptr);						\
223	__ptr_pu = (ptr);						\
224	__val_pu = (x);							\
225	asm volatile("call __" #fn "_%P[size]"				\
226		     : "=c" (__ret_pu),					\
227			ASM_CALL_CONSTRAINT				\
228		     : "0" (__ptr_pu),					\
229		       "r" (__val_pu),					\
230		       [size] "i" (sizeof(*(ptr)))			\
231		     :"ebx");						\
232	__builtin_expect(__ret_pu, 0);					\
233})
234
235/**
236 * put_user - Write a simple value into user space.
237 * @x:   Value to copy to user space.
238 * @ptr: Destination address, in user space.
239 *
240 * Context: User context only. This function may sleep if pagefaults are
241 *          enabled.
242 *
243 * This macro copies a single simple value from kernel space to user
244 * space.  It supports simple types like char and int, but not larger
245 * data types like structures or arrays.
246 *
247 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
248 * to the result of dereferencing @ptr.
249 *
250 * Return: zero on success, or -EFAULT on error.
251 */
252#define put_user(x, ptr) ({ might_fault(); do_put_user_call(put_user,x,ptr); })
253
254/**
255 * __put_user - Write a simple value into user space, with less checking.
256 * @x:   Value to copy to user space.
257 * @ptr: Destination address, in user space.
258 *
259 * Context: User context only. This function may sleep if pagefaults are
260 *          enabled.
261 *
262 * This macro copies a single simple value from kernel space to user
263 * space.  It supports simple types like char and int, but not larger
264 * data types like structures or arrays.
265 *
266 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
267 * to the result of dereferencing @ptr.
268 *
269 * Caller must check the pointer with access_ok() before calling this
270 * function.
271 *
272 * Return: zero on success, or -EFAULT on error.
273 */
274#define __put_user(x, ptr) do_put_user_call(put_user_nocheck,x,ptr)
275
276#define __put_user_size(x, ptr, size, label)				\
277do {									\
278	__chk_user_ptr(ptr);						\
279	switch (size) {							\
280	case 1:								\
281		__put_user_goto(x, ptr, "b", "iq", label);		\
282		break;							\
283	case 2:								\
284		__put_user_goto(x, ptr, "w", "ir", label);		\
285		break;							\
286	case 4:								\
287		__put_user_goto(x, ptr, "l", "ir", label);		\
288		break;							\
289	case 8:								\
290		__put_user_goto_u64(x, ptr, label);			\
291		break;							\
292	default:							\
293		__put_user_bad();					\
294	}								\
295} while (0)
296
297#ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
298
299#ifdef CONFIG_X86_32
300#define __get_user_asm_u64(x, ptr, label) do {				\
301	unsigned int __gu_low, __gu_high;				\
302	const unsigned int __user *__gu_ptr;				\
303	__gu_ptr = (const void __user *)(ptr);				\
304	__get_user_asm(__gu_low, __gu_ptr, "l", "=r", label);		\
305	__get_user_asm(__gu_high, __gu_ptr+1, "l", "=r", label);	\
306	(x) = ((unsigned long long)__gu_high << 32) | __gu_low;		\
307} while (0)
308#else
309#define __get_user_asm_u64(x, ptr, label)				\
310	__get_user_asm(x, ptr, "q", "=r", label)
311#endif
312
313#define __get_user_size(x, ptr, size, label)				\
314do {									\
315	__chk_user_ptr(ptr);						\
316	switch (size) {							\
317	case 1:	{							\
318		unsigned char x_u8__;					\
319		__get_user_asm(x_u8__, ptr, "b", "=q", label);		\
320		(x) = x_u8__;						\
321		break;							\
322	}								\
323	case 2:								\
324		__get_user_asm(x, ptr, "w", "=r", label);		\
325		break;							\
326	case 4:								\
327		__get_user_asm(x, ptr, "l", "=r", label);		\
328		break;							\
329	case 8:								\
330		__get_user_asm_u64(x, ptr, label);			\
331		break;							\
332	default:							\
333		(x) = __get_user_bad();					\
334	}								\
335} while (0)
336
337#define __get_user_asm(x, addr, itype, ltype, label)			\
338	asm_volatile_goto("\n"						\
339		     "1:	mov"itype" %[umem],%[output]\n"		\
340		     _ASM_EXTABLE_UA(1b, %l2)				\
341		     : [output] ltype(x)				\
342		     : [umem] "m" (__m(addr))				\
343		     : : label)
344
345#else // !CONFIG_CC_HAS_ASM_GOTO_OUTPUT
346
347#ifdef CONFIG_X86_32
348#define __get_user_asm_u64(x, ptr, retval)				\
349({									\
350	__typeof__(ptr) __ptr = (ptr);					\
351	asm volatile("\n"						\
352		     "1:	movl %[lowbits],%%eax\n"		\
353		     "2:	movl %[highbits],%%edx\n"		\
354		     "3:\n"						\
355		     ".section .fixup,\"ax\"\n"				\
356		     "4:	mov %[efault],%[errout]\n"		\
357		     "	xorl %%eax,%%eax\n"				\
358		     "	xorl %%edx,%%edx\n"				\
359		     "	jmp 3b\n"					\
360		     ".previous\n"					\
361		     _ASM_EXTABLE_UA(1b, 4b)				\
362		     _ASM_EXTABLE_UA(2b, 4b)				\
363		     : [errout] "=r" (retval),				\
364		       [output] "=&A"(x)				\
365		     : [lowbits] "m" (__m(__ptr)),			\
366		       [highbits] "m" __m(((u32 __user *)(__ptr)) + 1),	\
367		       [efault] "i" (-EFAULT), "0" (retval));		\
368})
369
370#else
371#define __get_user_asm_u64(x, ptr, retval) \
372	 __get_user_asm(x, ptr, retval, "q", "=r")
373#endif
374
375#define __get_user_size(x, ptr, size, retval)				\
376do {									\
377	unsigned char x_u8__;						\
378									\
379	retval = 0;							\
380	__chk_user_ptr(ptr);						\
381	switch (size) {							\
382	case 1:								\
383		__get_user_asm(x_u8__, ptr, retval, "b", "=q");		\
384		(x) = x_u8__;						\
385		break;							\
386	case 2:								\
387		__get_user_asm(x, ptr, retval, "w", "=r");		\
388		break;							\
389	case 4:								\
390		__get_user_asm(x, ptr, retval, "l", "=r");		\
391		break;							\
392	case 8:								\
393		__get_user_asm_u64(x, ptr, retval);			\
394		break;							\
395	default:							\
396		(x) = __get_user_bad();					\
397	}								\
398} while (0)
399
400#define __get_user_asm(x, addr, err, itype, ltype)			\
401	asm volatile("\n"						\
402		     "1:	mov"itype" %[umem],%[output]\n"		\
403		     "2:\n"						\
404		     ".section .fixup,\"ax\"\n"				\
405		     "3:	mov %[efault],%[errout]\n"		\
406		     "	xorl %k[output],%k[output]\n"			\
407		     "	jmp 2b\n"					\
408		     ".previous\n"					\
409		     _ASM_EXTABLE_UA(1b, 3b)				\
410		     : [errout] "=r" (err),				\
411		       [output] ltype(x)				\
412		     : [umem] "m" (__m(addr)),				\
413		       [efault] "i" (-EFAULT), "0" (err))
414
415#endif // CONFIG_CC_ASM_GOTO_OUTPUT
416
417/* FIXME: this hack is definitely wrong -AK */
418struct __large_struct { unsigned long buf[100]; };
419#define __m(x) (*(struct __large_struct __user *)(x))
420
421/*
422 * Tell gcc we read from memory instead of writing: this is because
423 * we do not write to any memory gcc knows about, so there are no
424 * aliasing issues.
425 */
426#define __put_user_goto(x, addr, itype, ltype, label)			\
427	asm_volatile_goto("\n"						\
428		"1:	mov"itype" %0,%1\n"				\
429		_ASM_EXTABLE_UA(1b, %l2)				\
430		: : ltype(x), "m" (__m(addr))				\
431		: : label)
432
433extern unsigned long
434copy_from_user_nmi(void *to, const void __user *from, unsigned long n);
435extern __must_check long
436strncpy_from_user(char *dst, const char __user *src, long count);
437
438extern __must_check long strnlen_user(const char __user *str, long n);
439
440unsigned long __must_check clear_user(void __user *mem, unsigned long len);
441unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
442
443#ifdef CONFIG_ARCH_HAS_COPY_MC
444unsigned long __must_check
445copy_mc_to_kernel(void *to, const void *from, unsigned len);
446#define copy_mc_to_kernel copy_mc_to_kernel
447
448unsigned long __must_check
449copy_mc_to_user(void __user *to, const void *from, unsigned len);
450#endif
451
452/*
453 * movsl can be slow when source and dest are not both 8-byte aligned
454 */
455#ifdef CONFIG_X86_INTEL_USERCOPY
456extern struct movsl_mask {
457	int mask;
458} ____cacheline_aligned_in_smp movsl_mask;
459#endif
460
461#define ARCH_HAS_NOCACHE_UACCESS 1
462
463#ifdef CONFIG_X86_32
464# include <asm/uaccess_32.h>
465#else
466# include <asm/uaccess_64.h>
467#endif
468
469/*
470 * The "unsafe" user accesses aren't really "unsafe", but the naming
471 * is a big fat warning: you have to not only do the access_ok()
472 * checking before using them, but you have to surround them with the
473 * user_access_begin/end() pair.
474 */
475static __must_check __always_inline bool user_access_begin(const void __user *ptr, size_t len)
476{
477	if (unlikely(!access_ok(ptr,len)))
478		return 0;
479	__uaccess_begin_nospec();
480	return 1;
481}
482#define user_access_begin(a,b)	user_access_begin(a,b)
483#define user_access_end()	__uaccess_end()
484
485#define user_access_save()	smap_save()
486#define user_access_restore(x)	smap_restore(x)
487
488#define unsafe_put_user(x, ptr, label)	\
489	__put_user_size((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), label)
490
491#ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
492#define unsafe_get_user(x, ptr, err_label)					\
493do {										\
494	__inttype(*(ptr)) __gu_val;						\
495	__get_user_size(__gu_val, (ptr), sizeof(*(ptr)), err_label);		\
496	(x) = (__force __typeof__(*(ptr)))__gu_val;				\
497} while (0)
498#else // !CONFIG_CC_HAS_ASM_GOTO_OUTPUT
499#define unsafe_get_user(x, ptr, err_label)					\
500do {										\
501	int __gu_err;								\
502	__inttype(*(ptr)) __gu_val;						\
503	__get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err);		\
504	(x) = (__force __typeof__(*(ptr)))__gu_val;				\
505	if (unlikely(__gu_err)) goto err_label;					\
506} while (0)
507#endif // CONFIG_CC_HAS_ASM_GOTO_OUTPUT
508
509/*
510 * We want the unsafe accessors to always be inlined and use
511 * the error labels - thus the macro games.
512 */
513#define unsafe_copy_loop(dst, src, len, type, label)				\
514	while (len >= sizeof(type)) {						\
515		unsafe_put_user(*(type *)(src),(type __user *)(dst),label);	\
516		dst += sizeof(type);						\
517		src += sizeof(type);						\
518		len -= sizeof(type);						\
519	}
520
521#define unsafe_copy_to_user(_dst,_src,_len,label)			\
522do {									\
523	char __user *__ucu_dst = (_dst);				\
524	const char *__ucu_src = (_src);					\
525	size_t __ucu_len = (_len);					\
526	unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u64, label);	\
527	unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u32, label);	\
528	unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u16, label);	\
529	unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u8, label);	\
530} while (0)
531
532#define HAVE_GET_KERNEL_NOFAULT
533
534#ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
535#define __get_kernel_nofault(dst, src, type, err_label)			\
536	__get_user_size(*((type *)(dst)), (__force type __user *)(src),	\
537			sizeof(type), err_label)
538#else // !CONFIG_CC_HAS_ASM_GOTO_OUTPUT
539#define __get_kernel_nofault(dst, src, type, err_label)			\
540do {									\
541	int __kr_err;							\
542									\
543	__get_user_size(*((type *)(dst)), (__force type __user *)(src),	\
544			sizeof(type), __kr_err);			\
545	if (unlikely(__kr_err))						\
546		goto err_label;						\
547} while (0)
548#endif // CONFIG_CC_HAS_ASM_GOTO_OUTPUT
549
550#define __put_kernel_nofault(dst, src, type, err_label)			\
551	__put_user_size(*((type *)(src)), (__force type __user *)(dst),	\
552			sizeof(type), err_label)
553
554#endif /* _ASM_X86_UACCESS_H */
555
556