1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (C) 2020 Loongson Technology Corporation Limited
4 */
5#ifndef _ASM_PGTABLE_64_H
6#define _ASM_PGTABLE_64_H
7
8#include <linux/compiler.h>
9#include <linux/linkage.h>
10
11#include <asm/addrspace.h>
12
13#if CONFIG_PGTABLE_LEVELS == 2
14#include <asm-generic/pgtable-nopmd.h>
15#elif CONFIG_PGTABLE_LEVELS == 3
16#include <asm-generic/pgtable-nopud.h>
17#else
18#include <asm-generic/pgtable-nop4d.h>
19#endif
20
21#define PGD_ORDER		0
22#define PUD_ORDER		0
23#define PMD_ORDER		0
24#define PTE_ORDER		0
25
26#if CONFIG_PGTABLE_LEVELS == 2
27#define PGDIR_SHIFT	(PAGE_SHIFT + (PAGE_SHIFT + PTE_ORDER - 3))
28#elif CONFIG_PGTABLE_LEVELS == 3
29#define PMD_SHIFT	(PAGE_SHIFT + (PAGE_SHIFT + PTE_ORDER - 3))
30#define PMD_SIZE	(1UL << PMD_SHIFT)
31#define PMD_MASK	(~(PMD_SIZE-1))
32#define PGDIR_SHIFT	(PMD_SHIFT + (PAGE_SHIFT + PMD_ORDER - 3))
33#elif CONFIG_PGTABLE_LEVELS == 4
34#define PMD_SHIFT	(PAGE_SHIFT + (PAGE_SHIFT + PTE_ORDER - 3))
35#define PMD_SIZE	(1UL << PMD_SHIFT)
36#define PMD_MASK	(~(PMD_SIZE-1))
37#define PUD_SHIFT	(PMD_SHIFT + (PAGE_SHIFT + PMD_ORDER - 3))
38#define PUD_SIZE	(1UL << PUD_SHIFT)
39#define PUD_MASK	(~(PUD_SIZE-1))
40#define PGDIR_SHIFT	(PUD_SHIFT + (PAGE_SHIFT + PUD_ORDER - 3))
41#endif
42
43#define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
44#define PGDIR_MASK	(~(PGDIR_SIZE-1))
45
46#define VA_BITS		(PGDIR_SHIFT + (PAGE_SHIFT + PGD_ORDER - 3))
47
48#define PTRS_PER_PGD	((PAGE_SIZE << PGD_ORDER) >> 3)
49#if CONFIG_PGTABLE_LEVELS > 3
50#define PTRS_PER_PUD	((PAGE_SIZE << PUD_ORDER) >> 3)
51#endif
52#if CONFIG_PGTABLE_LEVELS > 2
53#define PTRS_PER_PMD	((PAGE_SIZE << PMD_ORDER) >> 3)
54#endif
55#define PTRS_PER_PTE	((PAGE_SIZE << PTE_ORDER) >> 3)
56
57#define USER_PTRS_PER_PGD       ((TASK_SIZE64 / PGDIR_SIZE)?(TASK_SIZE64 / PGDIR_SIZE):1)
58#define FIRST_USER_ADDRESS	0UL
59
60#ifndef __ASSEMBLY__
61
62#include <asm/fixmap.h>
63#include <asm/sparsemem.h>
64
65/*
66 * TLB refill handlers may also map the vmalloc area into xkvrange.
67 * Avoid the first couple of pages so NULL pointer dereferences will
68 * still reliably trap.
69 */
70#define MODULES_VADDR	(vm_map_base + PCI_IOSIZE + (2 * PAGE_SIZE))
71#define MODULES_END	(MODULES_VADDR + SZ_256M)
72
73#define VMALLOC_START	MODULES_END
74
75#ifndef CONFIG_KASAN
76#define VMALLOC_END	\
77	(vm_map_base +	\
78	 min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, (1UL << cpu_vabits)) - PMD_SIZE - VMEMMAP_SIZE)
79#else
80#define VMALLOC_END	\
81	(vm_map_base +	\
82	 min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, (1UL << cpu_vabits) / 2) - PMD_SIZE - VMEMMAP_SIZE)
83#endif
84
85#define vmemmap		((struct page *)((VMALLOC_END + PMD_SIZE) & PMD_MASK))
86#define VMEMMAP_END	((unsigned long)vmemmap + VMEMMAP_SIZE - 1)
87
88#define pte_ERROR(e) \
89	printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
90#ifndef __PAGETABLE_PMD_FOLDED
91#define pmd_ERROR(e) \
92	printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
93#endif
94#ifndef __PAGETABLE_PUD_FOLDED
95#define pud_ERROR(e) \
96	printk("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e))
97#endif
98#define pgd_ERROR(e) \
99	printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
100
101extern pte_t invalid_pte_table[PTRS_PER_PTE];
102
103#ifndef __PAGETABLE_PUD_FOLDED
104
105typedef struct { unsigned long pud; } pud_t;
106#define pud_val(x)	((x).pud)
107#define __pud(x)	((pud_t) { (x) })
108
109extern pud_t invalid_pud_table[PTRS_PER_PUD];
110
111/*
112 * Empty pgd/p4d entries point to the invalid_pud_table.
113 */
114static inline int p4d_none(p4d_t p4d)
115{
116	return p4d_val(p4d) == (unsigned long)invalid_pud_table;
117}
118
119static inline int p4d_bad(p4d_t p4d)
120{
121	return p4d_val(p4d) & ~PAGE_MASK;
122}
123
124static inline int p4d_present(p4d_t p4d)
125{
126	return p4d_val(p4d) != (unsigned long)invalid_pud_table;
127}
128
129static inline void p4d_clear(p4d_t *p4dp)
130{
131	p4d_val(*p4dp) = (unsigned long)invalid_pud_table;
132}
133
134static inline pud_t *p4d_pgtable(p4d_t p4d)
135{
136	return (pud_t *)p4d_val(p4d);
137}
138
139static inline void set_p4d(p4d_t *p4d, p4d_t p4dval)
140{
141	*p4d = p4dval;
142}
143
144#define p4d_phys(p4d)		PHYSADDR(p4d_val(p4d))
145#define p4d_page(p4d)		(pfn_to_page(p4d_phys(p4d) >> PAGE_SHIFT))
146
147#endif
148
149#ifndef __PAGETABLE_PMD_FOLDED
150
151typedef struct { unsigned long pmd; } pmd_t;
152#define pmd_val(x)	((x).pmd)
153#define __pmd(x)	((pmd_t) { (x) } )
154
155extern pmd_t invalid_pmd_table[PTRS_PER_PMD];
156
157/*
158 * Empty pud entries point to the invalid_pmd_table.
159 */
160static inline int pud_none(pud_t pud)
161{
162	return pud_val(pud) == (unsigned long)invalid_pmd_table;
163}
164
165static inline int pud_bad(pud_t pud)
166{
167	return pud_val(pud) & ~PAGE_MASK;
168}
169
170static inline int pud_present(pud_t pud)
171{
172	return pud_val(pud) != (unsigned long)invalid_pmd_table;
173}
174
175static inline void pud_clear(pud_t *pudp)
176{
177	pud_val(*pudp) = ((unsigned long)invalid_pmd_table);
178}
179
180static inline pmd_t *pud_pgtable(pud_t pud)
181{
182	return (pmd_t *)pud_val(pud);
183}
184
185#define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while (0)
186
187#define pud_phys(pud)		PHYSADDR(pud_val(pud))
188#define pud_page(pud)		(pfn_to_page(pud_phys(pud) >> PAGE_SHIFT))
189
190#endif
191
192/*
193 * Empty pmd entries point to the invalid_pte_table.
194 */
195static inline int pmd_none(pmd_t pmd)
196{
197	return pmd_val(pmd) == (unsigned long)invalid_pte_table;
198}
199
200static inline int pmd_bad(pmd_t pmd)
201{
202	return (pmd_val(pmd) & ~PAGE_MASK);
203}
204
205static inline int pmd_present(pmd_t pmd)
206{
207	if (unlikely(pmd_val(pmd) & _PAGE_HUGE))
208		return !!(pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PRESENT_INVALID));
209
210	return pmd_val(pmd) != (unsigned long)invalid_pte_table;
211}
212
213static inline void pmd_clear(pmd_t *pmdp)
214{
215	pmd_val(*pmdp) = ((unsigned long)invalid_pte_table);
216}
217
218#define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while (0)
219
220#define pmd_phys(pmd)		PHYSADDR(pmd_val(pmd))
221
222#ifndef CONFIG_TRANSPARENT_HUGEPAGE
223#define pmd_page(pmd)		(pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
224#endif /* CONFIG_TRANSPARENT_HUGEPAGE  */
225
226#define pmd_page_vaddr(pmd)	pmd_val(pmd)
227
228extern pmd_t mk_pmd(struct page *page, pgprot_t prot);
229extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmd);
230
231#define pte_page(x)		pfn_to_page(pte_pfn(x))
232#define pte_pfn(x)		((unsigned long)(((x).pte & _PFN_MASK) >> _PFN_SHIFT))
233#define pfn_pte(pfn, prot)	__pte(((pfn) << _PFN_SHIFT) | pgprot_val(prot))
234#define pfn_pmd(pfn, prot)	__pmd(((pfn) << _PFN_SHIFT) | pgprot_val(prot))
235
236/*
237 * Initialize a new pgd / pmd table with invalid pointers.
238 */
239extern void pgd_init(unsigned long page);
240extern void pud_init(unsigned long page, unsigned long pagetable);
241extern void pmd_init(unsigned long page, unsigned long pagetable);
242
243/*
244 * Non-present pages:  high 40 bits are offset, next 8 bits type,
245 * low 16 bits zero.
246 */
247static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
248{ pte_t pte; pte_val(pte) = (type << 16) | (offset << 24); return pte; }
249
250#define __swp_type(x)		(((x).val >> 16) & 0xff)
251#define __swp_offset(x)		((x).val >> 24)
252#define __swp_entry(type, offset) ((swp_entry_t) { pte_val(mk_swap_pte((type), (offset))) })
253#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
254#define __swp_entry_to_pte(x)	((pte_t) { (x).val })
255#define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) })
256#define __swp_entry_to_pmd(x)	((pmd_t) { (x).val | _PAGE_HUGE })
257
258#endif /* !__ASSEMBLY__ */
259
260#endif /* _ASM_PGTABLE_64_H */
261