1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ARCH_POWERPC_UACCESS_H
3#define _ARCH_POWERPC_UACCESS_H
4
5#include <asm/ppc_asm.h>
6#include <asm/processor.h>
7#include <asm/page.h>
8#include <asm/extable.h>
9#include <asm/kup.h>
10
11#ifdef __powerpc64__
12/* We use TASK_SIZE_USER64 as TASK_SIZE is not constant */
13#define TASK_SIZE_MAX		TASK_SIZE_USER64
14#else
15#define TASK_SIZE_MAX		TASK_SIZE
16#endif
17
18static inline bool __access_ok(unsigned long addr, unsigned long size)
19{
20	return addr < TASK_SIZE_MAX && size <= TASK_SIZE_MAX - addr;
21}
22
23#define access_ok(addr, size)		\
24	(__chk_user_ptr(addr),		\
25	 __access_ok((unsigned long)(addr), (size)))
26
27/*
28 * These are the main single-value transfer routines.  They automatically
29 * use the right size if we just have the right pointer type.
30 *
31 * This gets kind of ugly. We want to return _two_ values in "get_user()"
32 * and yet we don't want to do any pointers, because that is too much
33 * of a performance impact. Thus we have a few rather ugly macros here,
34 * and hide all the ugliness from the user.
35 *
36 * The "__xxx" versions of the user access functions are versions that
37 * do not verify the address space, that must have been done previously
38 * with a separate "access_ok()" call (this is used when we do multiple
39 * accesses to the same area of user memory).
40 *
41 * As we use the same address space for kernel and user data on the
42 * PowerPC, we can just do these as direct assignments.  (Of course, the
43 * exception handling means that it's no longer "just"...)
44 *
45 */
46#define get_user(x, ptr) \
47	__get_user_check((x), (ptr), sizeof(*(ptr)))
48#define put_user(x, ptr) \
49	__put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
50
51#define __get_user(x, ptr) \
52	__get_user_nocheck((x), (ptr), sizeof(*(ptr)), true)
53#define __put_user(x, ptr) \
54	__put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
55#define __put_user_goto(x, ptr, label) \
56	__put_user_nocheck_goto((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), label)
57
58#define __get_user_allowed(x, ptr) \
59	__get_user_nocheck((x), (ptr), sizeof(*(ptr)), false)
60
61#define __get_user_inatomic(x, ptr) \
62	__get_user_nosleep((x), (ptr), sizeof(*(ptr)))
63#define __put_user_inatomic(x, ptr) \
64	__put_user_nosleep((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
65
66#ifdef CONFIG_PPC64
67
68#define ___get_user_instr(gu_op, dest, ptr)				\
69({									\
70	long __gui_ret = 0;						\
71	unsigned long __gui_ptr = (unsigned long)ptr;			\
72	struct ppc_inst __gui_inst;					\
73	unsigned int __prefix, __suffix;				\
74	__gui_ret = gu_op(__prefix, (unsigned int __user *)__gui_ptr);	\
75	if (__gui_ret == 0) {						\
76		if ((__prefix >> 26) == OP_PREFIX) {			\
77			__gui_ret = gu_op(__suffix,			\
78				(unsigned int __user *)__gui_ptr + 1);	\
79			__gui_inst = ppc_inst_prefix(__prefix,		\
80						     __suffix);		\
81		} else {						\
82			__gui_inst = ppc_inst(__prefix);		\
83		}							\
84		if (__gui_ret == 0)					\
85			(dest) = __gui_inst;				\
86	}								\
87	__gui_ret;							\
88})
89
90#define get_user_instr(x, ptr) \
91	___get_user_instr(get_user, x, ptr)
92
93#define __get_user_instr(x, ptr) \
94	___get_user_instr(__get_user, x, ptr)
95
96#define __get_user_instr_inatomic(x, ptr) \
97	___get_user_instr(__get_user_inatomic, x, ptr)
98
99#else /* !CONFIG_PPC64 */
100#define get_user_instr(x, ptr) \
101	get_user((x).val, (u32 __user *)(ptr))
102
103#define __get_user_instr(x, ptr) \
104	__get_user_nocheck((x).val, (u32 __user *)(ptr), sizeof(u32), true)
105
106#define __get_user_instr_inatomic(x, ptr) \
107	__get_user_nosleep((x).val, (u32 __user *)(ptr), sizeof(u32))
108
109#endif /* CONFIG_PPC64 */
110
111extern long __put_user_bad(void);
112
113#define __put_user_size_allowed(x, ptr, size, retval)		\
114do {								\
115	__label__ __pu_failed;					\
116								\
117	retval = 0;						\
118	__put_user_size_goto(x, ptr, size, __pu_failed);	\
119	break;							\
120								\
121__pu_failed:							\
122	retval = -EFAULT;					\
123} while (0)
124
125#define __put_user_size(x, ptr, size, retval)			\
126do {								\
127	allow_write_to_user(ptr, size);				\
128	__put_user_size_allowed(x, ptr, size, retval);		\
129	prevent_write_to_user(ptr, size);			\
130} while (0)
131
132#define __put_user_nocheck(x, ptr, size)			\
133({								\
134	long __pu_err;						\
135	__typeof__(*(ptr)) __user *__pu_addr = (ptr);		\
136	__typeof__(*(ptr)) __pu_val = (x);			\
137	__typeof__(size) __pu_size = (size);			\
138								\
139	if (!is_kernel_addr((unsigned long)__pu_addr))		\
140		might_fault();					\
141	__chk_user_ptr(__pu_addr);				\
142	__put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err);	\
143								\
144	__pu_err;						\
145})
146
147#define __put_user_check(x, ptr, size)					\
148({									\
149	long __pu_err = -EFAULT;					\
150	__typeof__(*(ptr)) __user *__pu_addr = (ptr);			\
151	__typeof__(*(ptr)) __pu_val = (x);				\
152	__typeof__(size) __pu_size = (size);				\
153									\
154	might_fault();							\
155	if (access_ok(__pu_addr, __pu_size))				\
156		__put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \
157									\
158	__pu_err;							\
159})
160
161#define __put_user_nosleep(x, ptr, size)			\
162({								\
163	long __pu_err;						\
164	__typeof__(*(ptr)) __user *__pu_addr = (ptr);		\
165	__typeof__(*(ptr)) __pu_val = (x);			\
166	__typeof__(size) __pu_size = (size);			\
167								\
168	__chk_user_ptr(__pu_addr);				\
169	__put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \
170								\
171	__pu_err;						\
172})
173
174
175/*
176 * We don't tell gcc that we are accessing memory, but this is OK
177 * because we do not write to any memory gcc knows about, so there
178 * are no aliasing issues.
179 */
180#define __put_user_asm_goto(x, addr, label, op)			\
181	asm_volatile_goto(					\
182		"1:	" op "%U1%X1 %0,%1	# put_user\n"	\
183		EX_TABLE(1b, %l2)				\
184		:						\
185		: "r" (x), "m"UPD_CONSTR (*addr)		\
186		:						\
187		: label)
188
189#ifdef __powerpc64__
190#define __put_user_asm2_goto(x, ptr, label)			\
191	__put_user_asm_goto(x, ptr, label, "std")
192#else /* __powerpc64__ */
193#define __put_user_asm2_goto(x, addr, label)			\
194	asm_volatile_goto(					\
195		"1:	stw%X1 %0, %1\n"			\
196		"2:	stw%X1 %L0, %L1\n"			\
197		EX_TABLE(1b, %l2)				\
198		EX_TABLE(2b, %l2)				\
199		:						\
200		: "r" (x), "m" (*addr)				\
201		:						\
202		: label)
203#endif /* __powerpc64__ */
204
205#define __put_user_size_goto(x, ptr, size, label)		\
206do {								\
207	switch (size) {						\
208	case 1: __put_user_asm_goto(x, ptr, label, "stb"); break;	\
209	case 2: __put_user_asm_goto(x, ptr, label, "sth"); break;	\
210	case 4: __put_user_asm_goto(x, ptr, label, "stw"); break;	\
211	case 8: __put_user_asm2_goto(x, ptr, label); break;	\
212	default: __put_user_bad();				\
213	}							\
214} while (0)
215
216#define __put_user_nocheck_goto(x, ptr, size, label)		\
217do {								\
218	__typeof__(*(ptr)) __user *__pu_addr = (ptr);		\
219	__chk_user_ptr(ptr);					\
220	__put_user_size_goto((x), __pu_addr, (size), label);	\
221} while (0)
222
223
224extern long __get_user_bad(void);
225
226/*
227 * This does an atomic 128 byte aligned load from userspace.
228 * Upto caller to do enable_kernel_vmx() before calling!
229 */
230#define __get_user_atomic_128_aligned(kaddr, uaddr, err)		\
231	__asm__ __volatile__(				\
232		".machine push\n"			\
233		".machine altivec\n"			\
234		"1:	lvx  0,0,%1	# get user\n"	\
235		" 	stvx 0,0,%2	# put kernel\n"	\
236		".machine pop\n"			\
237		"2:\n"					\
238		".section .fixup,\"ax\"\n"		\
239		"3:	li %0,%3\n"			\
240		"	b 2b\n"				\
241		".previous\n"				\
242		EX_TABLE(1b, 3b)			\
243		: "=r" (err)			\
244		: "b" (uaddr), "b" (kaddr), "i" (-EFAULT), "0" (err))
245
246#define __get_user_asm(x, addr, err, op)		\
247	__asm__ __volatile__(				\
248		"1:	"op"%U2%X2 %1, %2	# get_user\n"	\
249		"2:\n"					\
250		".section .fixup,\"ax\"\n"		\
251		"3:	li %0,%3\n"			\
252		"	li %1,0\n"			\
253		"	b 2b\n"				\
254		".previous\n"				\
255		EX_TABLE(1b, 3b)			\
256		: "=r" (err), "=r" (x)			\
257		: "m"UPD_CONSTR (*addr), "i" (-EFAULT), "0" (err))
258
259#ifdef __powerpc64__
260#define __get_user_asm2(x, addr, err)			\
261	__get_user_asm(x, addr, err, "ld")
262#else /* __powerpc64__ */
263#define __get_user_asm2(x, addr, err)			\
264	__asm__ __volatile__(				\
265		"1:	lwz%X2 %1, %2\n"			\
266		"2:	lwz%X2 %L1, %L2\n"		\
267		"3:\n"					\
268		".section .fixup,\"ax\"\n"		\
269		"4:	li %0,%3\n"			\
270		"	li %1,0\n"			\
271		"	li %1+1,0\n"			\
272		"	b 3b\n"				\
273		".previous\n"				\
274		EX_TABLE(1b, 4b)			\
275		EX_TABLE(2b, 4b)			\
276		: "=r" (err), "=&r" (x)			\
277		: "m" (*addr), "i" (-EFAULT), "0" (err))
278#endif /* __powerpc64__ */
279
280#define __get_user_size_allowed(x, ptr, size, retval)		\
281do {								\
282	retval = 0;						\
283	__chk_user_ptr(ptr);					\
284	if (size > sizeof(x))					\
285		(x) = __get_user_bad();				\
286	switch (size) {						\
287	case 1: __get_user_asm(x, (u8 __user *)ptr, retval, "lbz"); break;	\
288	case 2: __get_user_asm(x, (u16 __user *)ptr, retval, "lhz"); break;	\
289	case 4: __get_user_asm(x, (u32 __user *)ptr, retval, "lwz"); break;	\
290	case 8: __get_user_asm2(x, (u64 __user *)ptr, retval);  break;	\
291	default: (x) = __get_user_bad();			\
292	}							\
293} while (0)
294
295#define __get_user_size(x, ptr, size, retval)			\
296do {								\
297	allow_read_from_user(ptr, size);			\
298	__get_user_size_allowed(x, ptr, size, retval);		\
299	prevent_read_from_user(ptr, size);			\
300} while (0)
301
302/*
303 * This is a type: either unsigned long, if the argument fits into
304 * that type, or otherwise unsigned long long.
305 */
306#define __long_type(x) \
307	__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
308
309#define __get_user_nocheck(x, ptr, size, do_allow)			\
310({								\
311	long __gu_err;						\
312	__long_type(*(ptr)) __gu_val;				\
313	__typeof__(*(ptr)) __user *__gu_addr = (ptr);	\
314	__typeof__(size) __gu_size = (size);			\
315								\
316	__chk_user_ptr(__gu_addr);				\
317	if (do_allow && !is_kernel_addr((unsigned long)__gu_addr)) \
318		might_fault();					\
319	barrier_nospec();					\
320	if (do_allow)								\
321		__get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err);	\
322	else									\
323		__get_user_size_allowed(__gu_val, __gu_addr, __gu_size, __gu_err); \
324	(x) = (__typeof__(*(ptr)))__gu_val;			\
325								\
326	__gu_err;						\
327})
328
329#define __get_user_check(x, ptr, size)					\
330({									\
331	long __gu_err = -EFAULT;					\
332	__long_type(*(ptr)) __gu_val = 0;				\
333	__typeof__(*(ptr)) __user *__gu_addr = (ptr);		\
334	__typeof__(size) __gu_size = (size);				\
335									\
336	might_fault();							\
337	if (access_ok(__gu_addr, __gu_size)) {				\
338		barrier_nospec();					\
339		__get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err); \
340	}								\
341	(x) = (__force __typeof__(*(ptr)))__gu_val;				\
342									\
343	__gu_err;							\
344})
345
346#define __get_user_nosleep(x, ptr, size)			\
347({								\
348	long __gu_err;						\
349	__long_type(*(ptr)) __gu_val;				\
350	__typeof__(*(ptr)) __user *__gu_addr = (ptr);	\
351	__typeof__(size) __gu_size = (size);			\
352								\
353	__chk_user_ptr(__gu_addr);				\
354	barrier_nospec();					\
355	__get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err); \
356	(x) = (__force __typeof__(*(ptr)))__gu_val;			\
357								\
358	__gu_err;						\
359})
360
361
362/* more complex routines */
363
364extern unsigned long __copy_tofrom_user(void __user *to,
365		const void __user *from, unsigned long size);
366
367#ifdef CONFIG_ARCH_HAS_COPY_MC
368unsigned long __must_check
369copy_mc_generic(void *to, const void *from, unsigned long size);
370
371static inline unsigned long __must_check
372copy_mc_to_kernel(void *to, const void *from, unsigned long size)
373{
374	return copy_mc_generic(to, from, size);
375}
376#define copy_mc_to_kernel copy_mc_to_kernel
377
378static inline unsigned long __must_check
379copy_mc_to_user(void __user *to, const void *from, unsigned long n)
380{
381	if (likely(check_copy_size(from, n, true))) {
382		if (access_ok(to, n)) {
383			allow_write_to_user(to, n);
384			n = copy_mc_generic((void *)to, from, n);
385			prevent_write_to_user(to, n);
386		}
387	}
388
389	return n;
390}
391#endif
392
393#ifdef __powerpc64__
394static inline unsigned long
395raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
396{
397	unsigned long ret;
398
399	barrier_nospec();
400	allow_read_write_user(to, from, n);
401	ret = __copy_tofrom_user(to, from, n);
402	prevent_read_write_user(to, from, n);
403	return ret;
404}
405#endif /* __powerpc64__ */
406
407static inline unsigned long raw_copy_from_user(void *to,
408		const void __user *from, unsigned long n)
409{
410	unsigned long ret;
411	if (__builtin_constant_p(n) && (n <= 8)) {
412		ret = 1;
413
414		switch (n) {
415		case 1:
416			barrier_nospec();
417			__get_user_size(*(u8 *)to, from, 1, ret);
418			break;
419		case 2:
420			barrier_nospec();
421			__get_user_size(*(u16 *)to, from, 2, ret);
422			break;
423		case 4:
424			barrier_nospec();
425			__get_user_size(*(u32 *)to, from, 4, ret);
426			break;
427		case 8:
428			barrier_nospec();
429			__get_user_size(*(u64 *)to, from, 8, ret);
430			break;
431		}
432		if (ret == 0)
433			return 0;
434	}
435
436	barrier_nospec();
437	allow_read_from_user(from, n);
438	ret = __copy_tofrom_user((__force void __user *)to, from, n);
439	prevent_read_from_user(from, n);
440	return ret;
441}
442
443static inline unsigned long
444raw_copy_to_user_allowed(void __user *to, const void *from, unsigned long n)
445{
446	if (__builtin_constant_p(n) && (n <= 8)) {
447		unsigned long ret = 1;
448
449		switch (n) {
450		case 1:
451			__put_user_size_allowed(*(u8 *)from, (u8 __user *)to, 1, ret);
452			break;
453		case 2:
454			__put_user_size_allowed(*(u16 *)from, (u16 __user *)to, 2, ret);
455			break;
456		case 4:
457			__put_user_size_allowed(*(u32 *)from, (u32 __user *)to, 4, ret);
458			break;
459		case 8:
460			__put_user_size_allowed(*(u64 *)from, (u64 __user *)to, 8, ret);
461			break;
462		}
463		if (ret == 0)
464			return 0;
465	}
466
467	return __copy_tofrom_user(to, (__force const void __user *)from, n);
468}
469
470static inline unsigned long
471raw_copy_to_user(void __user *to, const void *from, unsigned long n)
472{
473	unsigned long ret;
474
475	allow_write_to_user(to, n);
476	ret = raw_copy_to_user_allowed(to, from, n);
477	prevent_write_to_user(to, n);
478	return ret;
479}
480
481unsigned long __arch_clear_user(void __user *addr, unsigned long size);
482
483static inline unsigned long clear_user(void __user *addr, unsigned long size)
484{
485	unsigned long ret = size;
486	might_fault();
487	if (likely(access_ok(addr, size))) {
488		allow_write_to_user(addr, size);
489		ret = __arch_clear_user(addr, size);
490		prevent_write_to_user(addr, size);
491	}
492	return ret;
493}
494
495static inline unsigned long __clear_user(void __user *addr, unsigned long size)
496{
497	return clear_user(addr, size);
498}
499
500extern long strncpy_from_user(char *dst, const char __user *src, long count);
501extern __must_check long strnlen_user(const char __user *str, long n);
502
503extern long __copy_from_user_flushcache(void *dst, const void __user *src,
504		unsigned size);
505extern void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
506			   size_t len);
507
508static __must_check inline bool user_access_begin(const void __user *ptr, size_t len)
509{
510	if (unlikely(!access_ok(ptr, len)))
511		return false;
512
513	might_fault();
514
515	allow_read_write_user((void __user *)ptr, ptr, len);
516	return true;
517}
518#define user_access_begin	user_access_begin
519#define user_access_end		prevent_current_access_user
520#define user_access_save	prevent_user_access_return
521#define user_access_restore	restore_user_access
522
523static __must_check inline bool
524user_read_access_begin(const void __user *ptr, size_t len)
525{
526	if (unlikely(!access_ok(ptr, len)))
527		return false;
528
529	might_fault();
530
531	allow_read_from_user(ptr, len);
532	return true;
533}
534#define user_read_access_begin	user_read_access_begin
535#define user_read_access_end		prevent_current_read_from_user
536
537static __must_check inline bool
538user_write_access_begin(const void __user *ptr, size_t len)
539{
540	if (unlikely(!access_ok(ptr, len)))
541		return false;
542
543	might_fault();
544
545	allow_write_to_user((void __user *)ptr, len);
546	return true;
547}
548#define user_write_access_begin	user_write_access_begin
549#define user_write_access_end		prevent_current_write_to_user
550
551#define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0)
552#define unsafe_get_user(x, p, e) unsafe_op_wrap(__get_user_allowed(x, p), e)
553#define unsafe_put_user(x, p, e) __put_user_goto(x, p, e)
554
555#define unsafe_copy_to_user(d, s, l, e) \
556do {									\
557	u8 __user *_dst = (u8 __user *)(d);				\
558	const u8 *_src = (const u8 *)(s);				\
559	size_t _len = (l);						\
560	int _i;								\
561									\
562	for (_i = 0; _i < (_len & ~(sizeof(long) - 1)); _i += sizeof(long))		\
563		__put_user_goto(*(long*)(_src + _i), (long __user *)(_dst + _i), e);\
564	if (IS_ENABLED(CONFIG_PPC64) && (_len & 4)) {			\
565		__put_user_goto(*(u32*)(_src + _i), (u32 __user *)(_dst + _i), e);	\
566		_i += 4;						\
567	}								\
568	if (_len & 2) {							\
569		__put_user_goto(*(u16*)(_src + _i), (u16 __user *)(_dst + _i), e);	\
570		_i += 2;						\
571	}								\
572	if (_len & 1) \
573		__put_user_goto(*(u8*)(_src + _i), (u8 __user *)(_dst + _i), e);\
574} while (0)
575
576#define HAVE_GET_KERNEL_NOFAULT
577
578#define __get_kernel_nofault(dst, src, type, err_label)			\
579do {									\
580	int __kr_err;							\
581									\
582	__get_user_size_allowed(*((type *)(dst)), (__force type __user *)(src),\
583			sizeof(type), __kr_err);			\
584	if (unlikely(__kr_err))						\
585		goto err_label;						\
586} while (0)
587
588#define __put_kernel_nofault(dst, src, type, err_label)			\
589	__put_user_size_goto(*((type *)(src)),				\
590		(__force type __user *)(dst), sizeof(type), err_label)
591
592#endif	/* _ARCH_POWERPC_UACCESS_H */
593