1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_POWERPC_PGTABLE_H
3#define _ASM_POWERPC_PGTABLE_H
4
5#ifndef __ASSEMBLY__
6#include <linux/mmdebug.h>
7#include <linux/mmzone.h>
8#include <asm/processor.h>		/* For TASK_SIZE */
9#include <asm/mmu.h>
10#include <asm/page.h>
11#include <asm/tlbflush.h>
12
13struct mm_struct;
14
15#endif /* !__ASSEMBLY__ */
16
17#ifdef CONFIG_PPC_BOOK3S
18#include <asm/book3s/pgtable.h>
19#else
20#include <asm/nohash/pgtable.h>
21#endif /* !CONFIG_PPC_BOOK3S */
22
23/*
24 * Protection used for kernel text. We want the debuggers to be able to
25 * set breakpoints anywhere, so don't write protect the kernel text
26 * on platforms where such control is possible.
27 */
28#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) || \
29	defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE)
30#define PAGE_KERNEL_TEXT	PAGE_KERNEL_X
31#else
32#define PAGE_KERNEL_TEXT	PAGE_KERNEL_ROX
33#endif
34
35/* Make modules code happy. We don't set RO yet */
36#define PAGE_KERNEL_EXEC	PAGE_KERNEL_X
37
38/* Advertise special mapping type for AGP */
39#define PAGE_AGP		(PAGE_KERNEL_NC)
40#define HAVE_PAGE_AGP
41
42#ifndef __ASSEMBLY__
43
44void set_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
45		pte_t pte, unsigned int nr);
46#define set_ptes set_ptes
47#define update_mmu_cache(vma, addr, ptep) \
48	update_mmu_cache_range(NULL, vma, addr, ptep, 1)
49
50#ifndef MAX_PTRS_PER_PGD
51#define MAX_PTRS_PER_PGD PTRS_PER_PGD
52#endif
53
54/* Keep these as a macros to avoid include dependency mess */
55#define pte_page(x)		pfn_to_page(pte_pfn(x))
56#define mk_pte(page, pgprot)	pfn_pte(page_to_pfn(page), (pgprot))
57
58static inline unsigned long pte_pfn(pte_t pte)
59{
60	return (pte_val(pte) & PTE_RPN_MASK) >> PTE_RPN_SHIFT;
61}
62
63/*
64 * Select all bits except the pfn
65 */
66static inline pgprot_t pte_pgprot(pte_t pte)
67{
68	unsigned long pte_flags;
69
70	pte_flags = pte_val(pte) & ~PTE_RPN_MASK;
71	return __pgprot(pte_flags);
72}
73
74#ifndef pmd_page_vaddr
75static inline const void *pmd_page_vaddr(pmd_t pmd)
76{
77	return __va(pmd_val(pmd) & ~PMD_MASKED_BITS);
78}
79#define pmd_page_vaddr pmd_page_vaddr
80#endif
81/*
82 * ZERO_PAGE is a global shared page that is always zero: used
83 * for zero-mapped memory areas etc..
84 */
85extern unsigned long empty_zero_page[];
86#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
87
88extern pgd_t swapper_pg_dir[];
89
90extern void paging_init(void);
91void poking_init(void);
92
93extern unsigned long ioremap_bot;
94extern const pgprot_t protection_map[16];
95
96#ifndef CONFIG_TRANSPARENT_HUGEPAGE
97#define pmd_large(pmd)		0
98#endif
99
100/* can we use this in kvm */
101unsigned long vmalloc_to_phys(void *vmalloc_addr);
102
103void pgtable_cache_add(unsigned int shift);
104
105pte_t *early_pte_alloc_kernel(pmd_t *pmdp, unsigned long va);
106
107#if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_PPC32)
108void mark_initmem_nx(void);
109#else
110static inline void mark_initmem_nx(void) { }
111#endif
112
113/*
114 * When used, PTE_FRAG_NR is defined in subarch pgtable.h
115 * so we are sure it is included when arriving here.
116 */
117#ifdef PTE_FRAG_NR
118static inline void *pte_frag_get(mm_context_t *ctx)
119{
120	return ctx->pte_frag;
121}
122
123static inline void pte_frag_set(mm_context_t *ctx, void *p)
124{
125	ctx->pte_frag = p;
126}
127#else
128#define PTE_FRAG_NR		1
129#define PTE_FRAG_SIZE_SHIFT	PAGE_SHIFT
130#define PTE_FRAG_SIZE		(1UL << PTE_FRAG_SIZE_SHIFT)
131
132static inline void *pte_frag_get(mm_context_t *ctx)
133{
134	return NULL;
135}
136
137static inline void pte_frag_set(mm_context_t *ctx, void *p)
138{
139}
140#endif
141
142#ifndef pmd_is_leaf
143#define pmd_is_leaf pmd_is_leaf
144static inline bool pmd_is_leaf(pmd_t pmd)
145{
146	return false;
147}
148#endif
149
150#ifndef pud_is_leaf
151#define pud_is_leaf pud_is_leaf
152static inline bool pud_is_leaf(pud_t pud)
153{
154	return false;
155}
156#endif
157
158#ifndef p4d_is_leaf
159#define p4d_is_leaf p4d_is_leaf
160static inline bool p4d_is_leaf(p4d_t p4d)
161{
162	return false;
163}
164#endif
165
166#define pmd_pgtable pmd_pgtable
167static inline pgtable_t pmd_pgtable(pmd_t pmd)
168{
169	return (pgtable_t)pmd_page_vaddr(pmd);
170}
171
172#ifdef CONFIG_PPC64
173int __meminit vmemmap_populated(unsigned long vmemmap_addr, int vmemmap_map_size);
174bool altmap_cross_boundary(struct vmem_altmap *altmap, unsigned long start,
175			   unsigned long page_size);
176/*
177 * mm/memory_hotplug.c:mhp_supports_memmap_on_memory goes into details
178 * some of the restrictions. We don't check for PMD_SIZE because our
179 * vmemmap allocation code can fallback correctly. The pageblock
180 * alignment requirement is met using altmap->reserve blocks.
181 */
182#define arch_supports_memmap_on_memory arch_supports_memmap_on_memory
183static inline bool arch_supports_memmap_on_memory(unsigned long vmemmap_size)
184{
185	if (!radix_enabled())
186		return false;
187	/*
188	 * With 4K page size and 2M PMD_SIZE, we can align
189	 * things better with memory block size value
190	 * starting from 128MB. Hence align things with PMD_SIZE.
191	 */
192	if (IS_ENABLED(CONFIG_PPC_4K_PAGES))
193		return IS_ALIGNED(vmemmap_size, PMD_SIZE);
194	return true;
195}
196
197#endif /* CONFIG_PPC64 */
198
199#endif /* __ASSEMBLY__ */
200
201#endif /* _ASM_POWERPC_PGTABLE_H */
202