1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_POWERPC_BOOK3S_64_KUP_RADIX_H
3#define _ASM_POWERPC_BOOK3S_64_KUP_RADIX_H
4
5#include <linux/const.h>
6#include <asm/reg.h>
7
8#define AMR_KUAP_BLOCK_READ	UL(0x4000000000000000)
9#define AMR_KUAP_BLOCK_WRITE	UL(0x8000000000000000)
10#define AMR_KUAP_BLOCKED	(AMR_KUAP_BLOCK_READ | AMR_KUAP_BLOCK_WRITE)
11#define AMR_KUAP_SHIFT		62
12
13#ifdef __ASSEMBLY__
14
15.macro kuap_restore_amr	gpr1, gpr2
16#ifdef CONFIG_PPC_KUAP
17	BEGIN_MMU_FTR_SECTION_NESTED(67)
18	mfspr	\gpr1, SPRN_AMR
19	ld	\gpr2, STACK_REGS_KUAP(r1)
20	cmpd	\gpr1, \gpr2
21	beq	998f
22	isync
23	mtspr	SPRN_AMR, \gpr2
24	/* No isync required, see kuap_restore_amr() */
25998:
26	END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_RADIX_KUAP, 67)
27#endif
28.endm
29
30#ifdef CONFIG_PPC_KUAP
31.macro kuap_check_amr gpr1, gpr2
32#ifdef CONFIG_PPC_KUAP_DEBUG
33	BEGIN_MMU_FTR_SECTION_NESTED(67)
34	mfspr	\gpr1, SPRN_AMR
35	li	\gpr2, (AMR_KUAP_BLOCKED >> AMR_KUAP_SHIFT)
36	sldi	\gpr2, \gpr2, AMR_KUAP_SHIFT
37999:	tdne	\gpr1, \gpr2
38	EMIT_BUG_ENTRY 999b, __FILE__, __LINE__, (BUGFLAG_WARNING | BUGFLAG_ONCE)
39	END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_RADIX_KUAP, 67)
40#endif
41.endm
42#endif
43
44.macro kuap_save_amr_and_lock gpr1, gpr2, use_cr, msr_pr_cr
45#ifdef CONFIG_PPC_KUAP
46	BEGIN_MMU_FTR_SECTION_NESTED(67)
47	.ifnb \msr_pr_cr
48	bne	\msr_pr_cr, 99f
49	.endif
50	mfspr	\gpr1, SPRN_AMR
51	std	\gpr1, STACK_REGS_KUAP(r1)
52	li	\gpr2, (AMR_KUAP_BLOCKED >> AMR_KUAP_SHIFT)
53	sldi	\gpr2, \gpr2, AMR_KUAP_SHIFT
54	cmpd	\use_cr, \gpr1, \gpr2
55	beq	\use_cr, 99f
56	// We don't isync here because we very recently entered via rfid
57	mtspr	SPRN_AMR, \gpr2
58	isync
5999:
60	END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_RADIX_KUAP, 67)
61#endif
62.endm
63
64#else /* !__ASSEMBLY__ */
65
66#include <linux/jump_label.h>
67
68DECLARE_STATIC_KEY_FALSE(uaccess_flush_key);
69
70#ifdef CONFIG_PPC_KUAP
71
72#include <asm/mmu.h>
73#include <asm/ptrace.h>
74
75static inline void kuap_restore_amr(struct pt_regs *regs, unsigned long amr)
76{
77	if (mmu_has_feature(MMU_FTR_RADIX_KUAP) && unlikely(regs->kuap != amr)) {
78		isync();
79		mtspr(SPRN_AMR, regs->kuap);
80		/*
81		 * No isync required here because we are about to RFI back to
82		 * previous context before any user accesses would be made,
83		 * which is a CSI.
84		 */
85	}
86}
87
88static inline unsigned long kuap_get_and_check_amr(void)
89{
90	if (mmu_has_feature(MMU_FTR_RADIX_KUAP)) {
91		unsigned long amr = mfspr(SPRN_AMR);
92		if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG)) /* kuap_check_amr() */
93			WARN_ON_ONCE(amr != AMR_KUAP_BLOCKED);
94		return amr;
95	}
96	return 0;
97}
98
99static inline void kuap_check_amr(void)
100{
101	if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG) && mmu_has_feature(MMU_FTR_RADIX_KUAP))
102		WARN_ON_ONCE(mfspr(SPRN_AMR) != AMR_KUAP_BLOCKED);
103}
104
105/*
106 * We support individually allowing read or write, but we don't support nesting
107 * because that would require an expensive read/modify write of the AMR.
108 */
109
110static inline unsigned long get_kuap(void)
111{
112	/*
113	 * We return AMR_KUAP_BLOCKED when we don't support KUAP because
114	 * prevent_user_access_return needs to return AMR_KUAP_BLOCKED to
115	 * cause restore_user_access to do a flush.
116	 *
117	 * This has no effect in terms of actually blocking things on hash,
118	 * so it doesn't break anything.
119	 */
120	if (!early_mmu_has_feature(MMU_FTR_RADIX_KUAP))
121		return AMR_KUAP_BLOCKED;
122
123	return mfspr(SPRN_AMR);
124}
125
126static inline void set_kuap(unsigned long value)
127{
128	if (!early_mmu_has_feature(MMU_FTR_RADIX_KUAP))
129		return;
130
131	/*
132	 * ISA v3.0B says we need a CSI (Context Synchronising Instruction) both
133	 * before and after the move to AMR. See table 6 on page 1134.
134	 */
135	isync();
136	mtspr(SPRN_AMR, value);
137	isync();
138}
139
140static inline bool
141bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
142{
143	return WARN(mmu_has_feature(MMU_FTR_RADIX_KUAP) &&
144		    (regs->kuap & (is_write ? AMR_KUAP_BLOCK_WRITE : AMR_KUAP_BLOCK_READ)),
145		    "Bug: %s fault blocked by AMR!", is_write ? "Write" : "Read");
146}
147#else /* CONFIG_PPC_KUAP */
148static inline void kuap_restore_amr(struct pt_regs *regs, unsigned long amr) { }
149
150static inline unsigned long kuap_get_and_check_amr(void)
151{
152	return 0UL;
153}
154
155static inline unsigned long get_kuap(void)
156{
157	return AMR_KUAP_BLOCKED;
158}
159
160static inline void set_kuap(unsigned long value) { }
161#endif /* !CONFIG_PPC_KUAP */
162
163static __always_inline void allow_user_access(void __user *to, const void __user *from,
164					      unsigned long size, unsigned long dir)
165{
166	// This is written so we can resolve to a single case at build time
167	BUILD_BUG_ON(!__builtin_constant_p(dir));
168	if (dir == KUAP_READ)
169		set_kuap(AMR_KUAP_BLOCK_WRITE);
170	else if (dir == KUAP_WRITE)
171		set_kuap(AMR_KUAP_BLOCK_READ);
172	else if (dir == KUAP_READ_WRITE)
173		set_kuap(0);
174	else
175		BUILD_BUG();
176}
177
178static inline void prevent_user_access(void __user *to, const void __user *from,
179				       unsigned long size, unsigned long dir)
180{
181	set_kuap(AMR_KUAP_BLOCKED);
182	if (static_branch_unlikely(&uaccess_flush_key))
183		do_uaccess_flush();
184}
185
186static inline unsigned long prevent_user_access_return(void)
187{
188	unsigned long flags = get_kuap();
189
190	set_kuap(AMR_KUAP_BLOCKED);
191	if (static_branch_unlikely(&uaccess_flush_key))
192		do_uaccess_flush();
193
194	return flags;
195}
196
197static inline void restore_user_access(unsigned long flags)
198{
199	set_kuap(flags);
200	if (static_branch_unlikely(&uaccess_flush_key) && flags == AMR_KUAP_BLOCKED)
201		do_uaccess_flush();
202}
203#endif /* __ASSEMBLY__ */
204
205#endif /* _ASM_POWERPC_BOOK3S_64_KUP_RADIX_H */
206