1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __ASM_POWERPC_MMU_CONTEXT_H
3#define __ASM_POWERPC_MMU_CONTEXT_H
4#ifdef __KERNEL__
5
6#include <linux/kernel.h>
7#include <linux/mm.h>
8#include <linux/sched.h>
9#include <linux/spinlock.h>
10#include <asm/mmu.h>
11#include <asm/cputable.h>
12#include <asm/cputhreads.h>
13
14/*
15 * Most if the context management is out of line
16 */
17extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
18extern void destroy_context(struct mm_struct *mm);
19#ifdef CONFIG_SPAPR_TCE_IOMMU
20struct mm_iommu_table_group_mem_t;
21
22extern int isolate_lru_page(struct page *page);	/* from internal.h */
23extern bool mm_iommu_preregistered(struct mm_struct *mm);
24extern long mm_iommu_new(struct mm_struct *mm,
25		unsigned long ua, unsigned long entries,
26		struct mm_iommu_table_group_mem_t **pmem);
27extern long mm_iommu_newdev(struct mm_struct *mm, unsigned long ua,
28		unsigned long entries, unsigned long dev_hpa,
29		struct mm_iommu_table_group_mem_t **pmem);
30extern long mm_iommu_put(struct mm_struct *mm,
31		struct mm_iommu_table_group_mem_t *mem);
32extern void mm_iommu_init(struct mm_struct *mm);
33extern void mm_iommu_cleanup(struct mm_struct *mm);
34extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
35		unsigned long ua, unsigned long size);
36extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm(
37		struct mm_struct *mm, unsigned long ua, unsigned long size);
38extern struct mm_iommu_table_group_mem_t *mm_iommu_get(struct mm_struct *mm,
39		unsigned long ua, unsigned long entries);
40extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
41		unsigned long ua, unsigned int pageshift, unsigned long *hpa);
42extern long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
43		unsigned long ua, unsigned int pageshift, unsigned long *hpa);
44extern void mm_iommu_ua_mark_dirty_rm(struct mm_struct *mm, unsigned long ua);
45extern bool mm_iommu_is_devmem(struct mm_struct *mm, unsigned long hpa,
46		unsigned int pageshift, unsigned long *size);
47extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem);
48extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem);
49#else
50static inline bool mm_iommu_is_devmem(struct mm_struct *mm, unsigned long hpa,
51		unsigned int pageshift, unsigned long *size)
52{
53	return false;
54}
55static inline void mm_iommu_init(struct mm_struct *mm) { }
56#endif
57extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);
58extern void set_context(unsigned long id, pgd_t *pgd);
59
60#ifdef CONFIG_PPC_BOOK3S_64
61extern void radix__switch_mmu_context(struct mm_struct *prev,
62				      struct mm_struct *next);
63static inline void switch_mmu_context(struct mm_struct *prev,
64				      struct mm_struct *next,
65				      struct task_struct *tsk)
66{
67	if (radix_enabled())
68		return radix__switch_mmu_context(prev, next);
69	return switch_slb(tsk, next);
70}
71
72extern int hash__alloc_context_id(void);
73extern void hash__reserve_context_id(int id);
74extern void __destroy_context(int context_id);
75static inline void mmu_context_init(void) { }
76
77static inline int alloc_extended_context(struct mm_struct *mm,
78					 unsigned long ea)
79{
80	int context_id;
81
82	int index = ea >> MAX_EA_BITS_PER_CONTEXT;
83
84	context_id = hash__alloc_context_id();
85	if (context_id < 0)
86		return context_id;
87
88	VM_WARN_ON(mm->context.extended_id[index]);
89	mm->context.extended_id[index] = context_id;
90	return context_id;
91}
92
93static inline bool need_extra_context(struct mm_struct *mm, unsigned long ea)
94{
95	int context_id;
96
97	context_id = get_user_context(&mm->context, ea);
98	if (!context_id)
99		return true;
100	return false;
101}
102
103#else
104extern void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next,
105			       struct task_struct *tsk);
106extern unsigned long __init_new_context(void);
107extern void __destroy_context(unsigned long context_id);
108extern void mmu_context_init(void);
109static inline int alloc_extended_context(struct mm_struct *mm,
110					 unsigned long ea)
111{
112	/* non book3s_64 should never find this called */
113	WARN_ON(1);
114	return -ENOMEM;
115}
116
117static inline bool need_extra_context(struct mm_struct *mm, unsigned long ea)
118{
119	return false;
120}
121#endif
122
123#if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE) && defined(CONFIG_PPC_RADIX_MMU)
124extern void radix_kvm_prefetch_workaround(struct mm_struct *mm);
125#else
126static inline void radix_kvm_prefetch_workaround(struct mm_struct *mm) { }
127#endif
128
129extern void switch_cop(struct mm_struct *next);
130extern int use_cop(unsigned long acop, struct mm_struct *mm);
131extern void drop_cop(unsigned long acop, struct mm_struct *mm);
132
133#ifdef CONFIG_PPC_BOOK3S_64
134static inline void inc_mm_active_cpus(struct mm_struct *mm)
135{
136	atomic_inc(&mm->context.active_cpus);
137}
138
139static inline void dec_mm_active_cpus(struct mm_struct *mm)
140{
141	atomic_dec(&mm->context.active_cpus);
142}
143
144static inline void mm_context_add_copro(struct mm_struct *mm)
145{
146	/*
147	 * If any copro is in use, increment the active CPU count
148	 * in order to force TLB invalidations to be global as to
149	 * propagate to the Nest MMU.
150	 */
151	if (atomic_inc_return(&mm->context.copros) == 1)
152		inc_mm_active_cpus(mm);
153}
154
155static inline void mm_context_remove_copro(struct mm_struct *mm)
156{
157	int c;
158
159	/*
160	 * When removing the last copro, we need to broadcast a global
161	 * flush of the full mm, as the next TLBI may be local and the
162	 * nMMU and/or PSL need to be cleaned up.
163	 *
164	 * Both the 'copros' and 'active_cpus' counts are looked at in
165	 * flush_all_mm() to determine the scope (local/global) of the
166	 * TLBIs, so we need to flush first before decrementing
167	 * 'copros'. If this API is used by several callers for the
168	 * same context, it can lead to over-flushing. It's hopefully
169	 * not common enough to be a problem.
170	 *
171	 * Skip on hash, as we don't know how to do the proper flush
172	 * for the time being. Invalidations will remain global if
173	 * used on hash. Note that we can't drop 'copros' either, as
174	 * it could make some invalidations local with no flush
175	 * in-between.
176	 */
177	if (radix_enabled()) {
178		flush_all_mm(mm);
179
180		c = atomic_dec_if_positive(&mm->context.copros);
181		/* Detect imbalance between add and remove */
182		WARN_ON(c < 0);
183
184		if (c == 0)
185			dec_mm_active_cpus(mm);
186	}
187}
188
189/*
190 * vas_windows counter shows number of open windows in the mm
191 * context. During context switch, use this counter to clear the
192 * foreign real address mapping (CP_ABORT) for the thread / process
193 * that intend to use COPY/PASTE. When a process closes all windows,
194 * disable CP_ABORT which is expensive to run.
195 *
196 * For user context, register a copro so that TLBIs are seen by the
197 * nest MMU. mm_context_add/remove_vas_window() are used only for user
198 * space windows.
199 */
200static inline void mm_context_add_vas_window(struct mm_struct *mm)
201{
202	atomic_inc(&mm->context.vas_windows);
203	mm_context_add_copro(mm);
204}
205
206static inline void mm_context_remove_vas_window(struct mm_struct *mm)
207{
208	int v;
209
210	mm_context_remove_copro(mm);
211	v = atomic_dec_if_positive(&mm->context.vas_windows);
212
213	/* Detect imbalance between add and remove */
214	WARN_ON(v < 0);
215}
216#else
217static inline void inc_mm_active_cpus(struct mm_struct *mm) { }
218static inline void dec_mm_active_cpus(struct mm_struct *mm) { }
219static inline void mm_context_add_copro(struct mm_struct *mm) { }
220static inline void mm_context_remove_copro(struct mm_struct *mm) { }
221#endif
222
223
224extern void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
225			       struct task_struct *tsk);
226
227static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
228			     struct task_struct *tsk)
229{
230	unsigned long flags;
231
232	local_irq_save(flags);
233	switch_mm_irqs_off(prev, next, tsk);
234	local_irq_restore(flags);
235}
236#define switch_mm_irqs_off switch_mm_irqs_off
237
238
239#define deactivate_mm(tsk,mm)	do { } while (0)
240
241/*
242 * After we have set current->mm to a new value, this activates
243 * the context for the new mm so we see the new mappings.
244 */
245static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
246{
247	switch_mm_irqs_off(prev, next, current);
248}
249
250/* We don't currently use enter_lazy_tlb() for anything */
251static inline void enter_lazy_tlb(struct mm_struct *mm,
252				  struct task_struct *tsk)
253{
254	/* 64-bit Book3E keeps track of current PGD in the PACA */
255#ifdef CONFIG_PPC_BOOK3E_64
256	get_paca()->pgd = NULL;
257#endif
258}
259
260extern void arch_exit_mmap(struct mm_struct *mm);
261
262static inline void arch_unmap(struct mm_struct *mm,
263			      unsigned long start, unsigned long end)
264{
265	if (start <= mm->context.vdso_base && mm->context.vdso_base < end)
266		mm->context.vdso_base = 0;
267}
268
269#ifdef CONFIG_PPC_MEM_KEYS
270bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write,
271			       bool execute, bool foreign);
272void arch_dup_pkeys(struct mm_struct *oldmm, struct mm_struct *mm);
273#else /* CONFIG_PPC_MEM_KEYS */
274static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
275		bool write, bool execute, bool foreign)
276{
277	/* by default, allow everything */
278	return true;
279}
280
281#define pkey_mm_init(mm)
282#define thread_pkey_regs_save(thread)
283#define thread_pkey_regs_restore(new_thread, old_thread)
284#define thread_pkey_regs_init(thread)
285#define arch_dup_pkeys(oldmm, mm)
286
287static inline u64 pte_to_hpte_pkey_bits(u64 pteflags)
288{
289	return 0x0UL;
290}
291
292#endif /* CONFIG_PPC_MEM_KEYS */
293
294static inline int arch_dup_mmap(struct mm_struct *oldmm,
295				struct mm_struct *mm)
296{
297	arch_dup_pkeys(oldmm, mm);
298	return 0;
299}
300
301#endif /* __KERNEL__ */
302#endif /* __ASM_POWERPC_MMU_CONTEXT_H */
303