1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __PARISC_UACCESS_H
3#define __PARISC_UACCESS_H
4
5/*
6 * User space memory access functions
7 */
8#include <asm/page.h>
9#include <asm/cache.h>
10
11#include <linux/bug.h>
12#include <linux/string.h>
13
14#define KERNEL_DS	((mm_segment_t){0})
15#define USER_DS 	((mm_segment_t){1})
16
17#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg)
18
19#define get_fs()	(current_thread_info()->addr_limit)
20#define set_fs(x)	(current_thread_info()->addr_limit = (x))
21
22/*
23 * Note that since kernel addresses are in a separate address space on
24 * parisc, we don't need to do anything for access_ok().
25 * We just let the page fault handler do the right thing. This also means
26 * that put_user is the same as __put_user, etc.
27 */
28
29#define access_ok(uaddr, size)	\
30	( (uaddr) == (uaddr) )
31
32#define put_user __put_user
33#define get_user __get_user
34
35#if !defined(CONFIG_64BIT)
36#define LDD_USER(val, ptr)	__get_user_asm64(val, ptr)
37#define STD_USER(x, ptr)	__put_user_asm64(x, ptr)
38#else
39#define LDD_USER(val, ptr)	__get_user_asm(val, "ldd", ptr)
40#define STD_USER(x, ptr)	__put_user_asm("std", x, ptr)
41#endif
42
43/*
44 * The exception table contains two values: the first is the relative offset to
45 * the address of the instruction that is allowed to fault, and the second is
46 * the relative offset to the address of the fixup routine. Since relative
47 * addresses are used, 32bit values are sufficient even on 64bit kernel.
48 */
49
50#define ARCH_HAS_RELATIVE_EXTABLE
51struct exception_table_entry {
52	int insn;	/* relative address of insn that is allowed to fault. */
53	int fixup;	/* relative address of fixup routine */
54};
55
56#define ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr )\
57	".section __ex_table,\"aw\"\n"			   \
58	".word (" #fault_addr " - .), (" #except_addr " - .)\n\t" \
59	".previous\n"
60
61/*
62 * ASM_EXCEPTIONTABLE_ENTRY_EFAULT() creates a special exception table entry
63 * (with lowest bit set) for which the fault handler in fixup_exception() will
64 * load -EFAULT into %r8 for a read or write fault, and zeroes the target
65 * register in case of a read fault in get_user().
66 */
67#define ASM_EXCEPTIONTABLE_ENTRY_EFAULT( fault_addr, except_addr )\
68	ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr + 1)
69
70/*
71 * load_sr2() preloads the space register %%sr2 - based on the value of
72 * get_fs() - with either a value of 0 to access kernel space (KERNEL_DS which
73 * is 0), or with the current value of %%sr3 to access user space (USER_DS)
74 * memory. The following __get_user_asm() and __put_user_asm() functions have
75 * %%sr2 hard-coded to access the requested memory.
76 */
77#define load_sr2() \
78	__asm__(" or,=  %0,%%r0,%%r0\n\t"	\
79		" mfsp %%sr3,%0\n\t"		\
80		" mtsp %0,%%sr2\n\t"		\
81		: : "r"(get_fs()) : )
82
83#define __get_user_internal(val, ptr)			\
84({							\
85	register long __gu_err __asm__ ("r8") = 0;	\
86							\
87	switch (sizeof(*(ptr))) {			\
88	case 1: __get_user_asm(val, "ldb", ptr); break;	\
89	case 2: __get_user_asm(val, "ldh", ptr); break; \
90	case 4: __get_user_asm(val, "ldw", ptr); break; \
91	case 8: LDD_USER(val, ptr); break;		\
92	default: BUILD_BUG();				\
93	}						\
94							\
95	__gu_err;					\
96})
97
98#define __get_user(val, ptr)				\
99({							\
100	load_sr2();					\
101	__get_user_internal(val, ptr);			\
102})
103
104#define __get_user_asm(val, ldx, ptr)			\
105{							\
106	register long __gu_val;				\
107							\
108	__asm__("1: " ldx " 0(%%sr2,%2),%0\n"		\
109		"9:\n"					\
110		ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b)	\
111		: "=r"(__gu_val), "=r"(__gu_err)        \
112		: "r"(ptr), "1"(__gu_err));		\
113							\
114	(val) = (__force __typeof__(*(ptr))) __gu_val;	\
115}
116
117#if !defined(CONFIG_64BIT)
118
119#define __get_user_asm64(val, ptr)			\
120{							\
121	union {						\
122		unsigned long long	l;		\
123		__typeof__(*(ptr))	t;		\
124	} __gu_tmp;					\
125							\
126	__asm__("   copy %%r0,%R0\n"			\
127		"1: ldw 0(%%sr2,%2),%0\n"		\
128		"2: ldw 4(%%sr2,%2),%R0\n"		\
129		"9:\n"					\
130		ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b)	\
131		ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b)	\
132		: "=&r"(__gu_tmp.l), "=r"(__gu_err)	\
133		: "r"(ptr), "1"(__gu_err));		\
134							\
135	(val) = __gu_tmp.t;				\
136}
137
138#endif /* !defined(CONFIG_64BIT) */
139
140
141#define __put_user_internal(x, ptr)				\
142({								\
143	register long __pu_err __asm__ ("r8") = 0;      	\
144        __typeof__(*(ptr)) __x = (__typeof__(*(ptr)))(x);	\
145								\
146	switch (sizeof(*(ptr))) {				\
147	case 1: __put_user_asm("stb", __x, ptr); break;		\
148	case 2: __put_user_asm("sth", __x, ptr); break;		\
149	case 4: __put_user_asm("stw", __x, ptr); break;		\
150	case 8: STD_USER(__x, ptr); break;			\
151	default: BUILD_BUG();					\
152	}							\
153								\
154	__pu_err;						\
155})
156
157#define __put_user(x, ptr)					\
158({								\
159	load_sr2();						\
160	__put_user_internal(x, ptr);				\
161})
162
163
164/*
165 * The "__put_user/kernel_asm()" macros tell gcc they read from memory
166 * instead of writing. This is because they do not write to any memory
167 * gcc knows about, so there are no aliasing issues. These macros must
168 * also be aware that fixups are executed in the context of the fault,
169 * and any registers used there must be listed as clobbers.
170 * r8 is already listed as err.
171 */
172
173#define __put_user_asm(stx, x, ptr)                         \
174	__asm__ __volatile__ (                              \
175		"1: " stx " %2,0(%%sr2,%1)\n"		    \
176		"9:\n"					    \
177		ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b)	    \
178		: "=r"(__pu_err)                            \
179		: "r"(ptr), "r"(x), "0"(__pu_err))
180
181
182#if !defined(CONFIG_64BIT)
183
184#define __put_user_asm64(__val, ptr) do {	    	    \
185	__asm__ __volatile__ (				    \
186		"1: stw %2,0(%%sr2,%1)\n"		    \
187		"2: stw %R2,4(%%sr2,%1)\n"		    \
188		"9:\n"					    \
189		ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b)	    \
190		ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b)	    \
191		: "=r"(__pu_err)                            \
192		: "r"(ptr), "r"(__val), "0"(__pu_err));	    \
193} while (0)
194
195#endif /* !defined(CONFIG_64BIT) */
196
197
198/*
199 * Complex access routines -- external declarations
200 */
201
202extern long strncpy_from_user(char *, const char __user *, long);
203extern unsigned lclear_user(void __user *, unsigned long);
204extern long lstrnlen_user(const char __user *, long);
205/*
206 * Complex access routines -- macros
207 */
208#define user_addr_max() (~0UL)
209
210#define strnlen_user lstrnlen_user
211#define clear_user lclear_user
212#define __clear_user lclear_user
213
214unsigned long __must_check raw_copy_to_user(void __user *dst, const void *src,
215					    unsigned long len);
216unsigned long __must_check raw_copy_from_user(void *dst, const void __user *src,
217					    unsigned long len);
218unsigned long __must_check raw_copy_in_user(void __user *dst, const void __user *src,
219					    unsigned long len);
220#define INLINE_COPY_TO_USER
221#define INLINE_COPY_FROM_USER
222
223struct pt_regs;
224int fixup_exception(struct pt_regs *regs);
225
226#endif /* __PARISC_UACCESS_H */
227