1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_POWERPC_NOHASH_32_PGTABLE_H
3#define _ASM_POWERPC_NOHASH_32_PGTABLE_H
4
5#include <asm-generic/pgtable-nopmd.h>
6
7#ifndef __ASSEMBLY__
8#include <linux/sched.h>
9#include <linux/threads.h>
10#include <asm/mmu.h>			/* For sub-arch specific PPC_PIN_SIZE */
11
12#ifdef CONFIG_44x
13extern int icache_44x_need_flush;
14#endif
15
16#endif /* __ASSEMBLY__ */
17
18#define PTE_INDEX_SIZE	PTE_SHIFT
19#define PMD_INDEX_SIZE	0
20#define PUD_INDEX_SIZE	0
21#define PGD_INDEX_SIZE	(32 - PGDIR_SHIFT)
22
23#define PMD_CACHE_INDEX	PMD_INDEX_SIZE
24#define PUD_CACHE_INDEX	PUD_INDEX_SIZE
25
26#ifndef __ASSEMBLY__
27#define PTE_TABLE_SIZE	(sizeof(pte_t) << PTE_INDEX_SIZE)
28#define PMD_TABLE_SIZE	0
29#define PUD_TABLE_SIZE	0
30#define PGD_TABLE_SIZE	(sizeof(pgd_t) << PGD_INDEX_SIZE)
31
32#define PMD_MASKED_BITS (PTE_TABLE_SIZE - 1)
33#endif	/* __ASSEMBLY__ */
34
35#define PTRS_PER_PTE	(1 << PTE_INDEX_SIZE)
36#define PTRS_PER_PGD	(1 << PGD_INDEX_SIZE)
37
38/*
39 * The normal case is that PTEs are 32-bits and we have a 1-page
40 * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages.  -- paulus
41 *
42 * For any >32-bit physical address platform, we can use the following
43 * two level page table layout where the pgdir is 8KB and the MS 13 bits
44 * are an index to the second level table.  The combined pgdir/pmd first
45 * level has 2048 entries and the second level has 512 64-bit PTE entries.
46 * -Matt
47 */
48/* PGDIR_SHIFT determines what a top-level page table entry can map */
49#define PGDIR_SHIFT	(PAGE_SHIFT + PTE_INDEX_SIZE)
50#define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
51#define PGDIR_MASK	(~(PGDIR_SIZE-1))
52
53/* Bits to mask out from a PGD to get to the PUD page */
54#define PGD_MASKED_BITS		0
55
56#define USER_PTRS_PER_PGD	(TASK_SIZE / PGDIR_SIZE)
57#define FIRST_USER_ADDRESS	0UL
58
59#define pte_ERROR(e) \
60	pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \
61		(unsigned long long)pte_val(e))
62#define pgd_ERROR(e) \
63	pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
64
65#ifndef __ASSEMBLY__
66
67int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
68void unmap_kernel_page(unsigned long va);
69
70#endif /* !__ASSEMBLY__ */
71
72
73/*
74 * This is the bottom of the PKMAP area with HIGHMEM or an arbitrary
75 * value (for now) on others, from where we can start layout kernel
76 * virtual space that goes below PKMAP and FIXMAP
77 */
78#include <asm/fixmap.h>
79
80/*
81 * ioremap_bot starts at that address. Early ioremaps move down from there,
82 * until mem_init() at which point this becomes the top of the vmalloc
83 * and ioremap space
84 */
85#ifdef CONFIG_HIGHMEM
86#define IOREMAP_TOP	PKMAP_BASE
87#else
88#define IOREMAP_TOP	FIXADDR_START
89#endif
90
91/* PPC32 shares vmalloc area with ioremap */
92#define IOREMAP_START	VMALLOC_START
93#define IOREMAP_END	VMALLOC_END
94
95/*
96 * Just any arbitrary offset to the start of the vmalloc VM area: the
97 * current 16MB value just means that there will be a 64MB "hole" after the
98 * physical memory until the kernel virtual memory starts.  That means that
99 * any out-of-bounds memory accesses will hopefully be caught.
100 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
101 * area for the same reason. ;)
102 *
103 * We no longer map larger than phys RAM with the BATs so we don't have
104 * to worry about the VMALLOC_OFFSET causing problems.  We do have to worry
105 * about clashes between our early calls to ioremap() that start growing down
106 * from IOREMAP_TOP being run into the VM area allocations (growing upwards
107 * from VMALLOC_START).  For this reason we have ioremap_bot to check when
108 * we actually run into our mappings setup in the early boot with the VM
109 * system.  This really does become a problem for machines with good amounts
110 * of RAM.  -- Cort
111 */
112#define VMALLOC_OFFSET (0x1000000) /* 16M */
113#ifdef PPC_PIN_SIZE
114#define VMALLOC_START (((ALIGN((long)high_memory, PPC_PIN_SIZE) + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
115#else
116#define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
117#endif
118
119#ifdef CONFIG_KASAN_VMALLOC
120#define VMALLOC_END	ALIGN_DOWN(ioremap_bot, PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
121#else
122#define VMALLOC_END	ioremap_bot
123#endif
124
125/*
126 * Bits in a linux-style PTE.  These match the bits in the
127 * (hardware-defined) PowerPC PTE as closely as possible.
128 */
129
130#if defined(CONFIG_40x)
131#include <asm/nohash/32/pte-40x.h>
132#elif defined(CONFIG_44x)
133#include <asm/nohash/32/pte-44x.h>
134#elif defined(CONFIG_FSL_BOOKE) && defined(CONFIG_PTE_64BIT)
135#include <asm/nohash/pte-book3e.h>
136#elif defined(CONFIG_FSL_BOOKE)
137#include <asm/nohash/32/pte-fsl-booke.h>
138#elif defined(CONFIG_PPC_8xx)
139#include <asm/nohash/32/pte-8xx.h>
140#endif
141
142/*
143 * Location of the PFN in the PTE. Most 32-bit platforms use the same
144 * as _PAGE_SHIFT here (ie, naturally aligned).
145 * Platform who don't just pre-define the value so we don't override it here.
146 */
147#ifndef PTE_RPN_SHIFT
148#define PTE_RPN_SHIFT	(PAGE_SHIFT)
149#endif
150
151/*
152 * The mask covered by the RPN must be a ULL on 32-bit platforms with
153 * 64-bit PTEs.
154 */
155#if defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT)
156#define PTE_RPN_MASK	(~((1ULL << PTE_RPN_SHIFT) - 1))
157#define MAX_POSSIBLE_PHYSMEM_BITS 36
158#else
159#define PTE_RPN_MASK	(~((1UL << PTE_RPN_SHIFT) - 1))
160#define MAX_POSSIBLE_PHYSMEM_BITS 32
161#endif
162
163/*
164 * _PAGE_CHG_MASK masks of bits that are to be preserved across
165 * pgprot changes.
166 */
167#define _PAGE_CHG_MASK	(PTE_RPN_MASK | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SPECIAL)
168
169#ifndef __ASSEMBLY__
170
171#define pte_clear(mm, addr, ptep) \
172	do { pte_update(mm, addr, ptep, ~0, 0, 0); } while (0)
173
174#ifndef pte_mkwrite
175static inline pte_t pte_mkwrite(pte_t pte)
176{
177	return __pte(pte_val(pte) | _PAGE_RW);
178}
179#endif
180
181static inline pte_t pte_mkdirty(pte_t pte)
182{
183	return __pte(pte_val(pte) | _PAGE_DIRTY);
184}
185
186static inline pte_t pte_mkyoung(pte_t pte)
187{
188	return __pte(pte_val(pte) | _PAGE_ACCESSED);
189}
190
191#ifndef pte_wrprotect
192static inline pte_t pte_wrprotect(pte_t pte)
193{
194	return __pte(pte_val(pte) & ~_PAGE_RW);
195}
196#endif
197
198static inline pte_t pte_mkexec(pte_t pte)
199{
200	return __pte(pte_val(pte) | _PAGE_EXEC);
201}
202
203#define pmd_none(pmd)		(!pmd_val(pmd))
204#define	pmd_bad(pmd)		(pmd_val(pmd) & _PMD_BAD)
205#define	pmd_present(pmd)	(pmd_val(pmd) & _PMD_PRESENT_MASK)
206static inline void pmd_clear(pmd_t *pmdp)
207{
208	*pmdp = __pmd(0);
209}
210
211/*
212 * PTE updates. This function is called whenever an existing
213 * valid PTE is updated. This does -not- include set_pte_at()
214 * which nowadays only sets a new PTE.
215 *
216 * Depending on the type of MMU, we may need to use atomic updates
217 * and the PTE may be either 32 or 64 bit wide. In the later case,
218 * when using atomic updates, only the low part of the PTE is
219 * accessed atomically.
220 *
221 * In addition, on 44x, we also maintain a global flag indicating
222 * that an executable user mapping was modified, which is needed
223 * to properly flush the virtually tagged instruction cache of
224 * those implementations.
225 *
226 * On the 8xx, the page tables are a bit special. For 16k pages, we have
227 * 4 identical entries. For 512k pages, we have 128 entries as if it was
228 * 4k pages, but they are flagged as 512k pages for the hardware.
229 * For other page sizes, we have a single entry in the table.
230 */
231#ifdef CONFIG_PPC_8xx
232static pmd_t *pmd_off(struct mm_struct *mm, unsigned long addr);
233static int hugepd_ok(hugepd_t hpd);
234
235static int number_of_cells_per_pte(pmd_t *pmd, pte_basic_t val, int huge)
236{
237	if (!huge)
238		return PAGE_SIZE / SZ_4K;
239	else if (hugepd_ok(*((hugepd_t *)pmd)))
240		return 1;
241	else if (IS_ENABLED(CONFIG_PPC_4K_PAGES) && !(val & _PAGE_HUGE))
242		return SZ_16K / SZ_4K;
243	else
244		return SZ_512K / SZ_4K;
245}
246
247static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p,
248				     unsigned long clr, unsigned long set, int huge)
249{
250	pte_basic_t *entry = &p->pte;
251	pte_basic_t old = pte_val(*p);
252	pte_basic_t new = (old & ~(pte_basic_t)clr) | set;
253	int num, i;
254	pmd_t *pmd = pmd_off(mm, addr);
255
256	num = number_of_cells_per_pte(pmd, new, huge);
257
258	for (i = 0; i < num; i++, entry++, new += SZ_4K)
259		*entry = new;
260
261	return old;
262}
263
264#ifdef CONFIG_PPC_16K_PAGES
265#define __HAVE_ARCH_PTEP_GET
266static inline pte_t ptep_get(pte_t *ptep)
267{
268	pte_basic_t val = READ_ONCE(ptep->pte);
269	pte_t pte = {val, val, val, val};
270
271	return pte;
272}
273#endif /* CONFIG_PPC_16K_PAGES */
274
275#else
276static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p,
277				     unsigned long clr, unsigned long set, int huge)
278{
279	pte_basic_t old = pte_val(*p);
280	pte_basic_t new = (old & ~(pte_basic_t)clr) | set;
281
282	*p = __pte(new);
283
284#ifdef CONFIG_44x
285	if ((old & _PAGE_USER) && (old & _PAGE_EXEC))
286		icache_44x_need_flush = 1;
287#endif
288	return old;
289}
290#endif
291
292#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
293static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
294					      unsigned long addr, pte_t *ptep)
295{
296	unsigned long old;
297	old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0);
298	return (old & _PAGE_ACCESSED) != 0;
299}
300#define ptep_test_and_clear_young(__vma, __addr, __ptep) \
301	__ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep)
302
303#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
304static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
305				       pte_t *ptep)
306{
307	return __pte(pte_update(mm, addr, ptep, ~0, 0, 0));
308}
309
310#define __HAVE_ARCH_PTEP_SET_WRPROTECT
311static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
312				      pte_t *ptep)
313{
314	unsigned long clr = ~pte_val(pte_wrprotect(__pte(~0)));
315	unsigned long set = pte_val(pte_wrprotect(__pte(0)));
316
317	pte_update(mm, addr, ptep, clr, set, 0);
318}
319
320static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
321					   pte_t *ptep, pte_t entry,
322					   unsigned long address,
323					   int psize)
324{
325	pte_t pte_set = pte_mkyoung(pte_mkdirty(pte_mkwrite(pte_mkexec(__pte(0)))));
326	pte_t pte_clr = pte_mkyoung(pte_mkdirty(pte_mkwrite(pte_mkexec(__pte(~0)))));
327	unsigned long set = pte_val(entry) & pte_val(pte_set);
328	unsigned long clr = ~pte_val(entry) & ~pte_val(pte_clr);
329	int huge = psize > mmu_virtual_psize ? 1 : 0;
330
331	pte_update(vma->vm_mm, address, ptep, clr, set, huge);
332
333	flush_tlb_page(vma, address);
334}
335
336static inline int pte_young(pte_t pte)
337{
338	return pte_val(pte) & _PAGE_ACCESSED;
339}
340
341#define __HAVE_ARCH_PTE_SAME
342#define pte_same(A,B)	((pte_val(A) ^ pte_val(B)) == 0)
343
344/*
345 * Note that on Book E processors, the pmd contains the kernel virtual
346 * (lowmem) address of the pte page.  The physical address is less useful
347 * because everything runs with translation enabled (even the TLB miss
348 * handler).  On everything else the pmd contains the physical address
349 * of the pte page.  -- paulus
350 */
351#ifndef CONFIG_BOOKE
352#define pmd_page(pmd)		\
353	pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
354#else
355#define pmd_page_vaddr(pmd)	\
356	((unsigned long)(pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1)))
357#define pmd_page(pmd)		\
358	pfn_to_page((__pa(pmd_val(pmd)) >> PAGE_SHIFT))
359#endif
360
361/*
362 * Encode and decode a swap entry.
363 * Note that the bits we use in a PTE for representing a swap entry
364 * must not include the _PAGE_PRESENT bit.
365 *   -- paulus
366 */
367#define __swp_type(entry)		((entry).val & 0x1f)
368#define __swp_offset(entry)		((entry).val >> 5)
369#define __swp_entry(type, offset)	((swp_entry_t) { (type) | ((offset) << 5) })
370#define __pte_to_swp_entry(pte)		((swp_entry_t) { pte_val(pte) >> 3 })
371#define __swp_entry_to_pte(x)		((pte_t) { (x).val << 3 })
372
373#endif /* !__ASSEMBLY__ */
374
375#endif /* __ASM_POWERPC_NOHASH_32_PGTABLE_H */
376