xref: /kernel/linux/linux-6.6/arch/powerpc/mm/nohash/8xx.c (revision 62306a36)
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * This file contains the routines for initializing the MMU
4 * on the 8xx series of chips.
5 *  -- christophe
6 *
7 *  Derived from arch/powerpc/mm/40x_mmu.c:
8 */
9
10#include <linux/memblock.h>
11#include <linux/hugetlb.h>
12
13#include <mm/mmu_decl.h>
14
15#define IMMR_SIZE (FIX_IMMR_SIZE << PAGE_SHIFT)
16
17static unsigned long block_mapped_ram;
18
19/*
20 * Return PA for this VA if it is in an area mapped with LTLBs or fixmap.
21 * Otherwise, returns 0
22 */
23phys_addr_t v_block_mapped(unsigned long va)
24{
25	unsigned long p = PHYS_IMMR_BASE;
26
27	if (va >= VIRT_IMMR_BASE && va < VIRT_IMMR_BASE + IMMR_SIZE)
28		return p + va - VIRT_IMMR_BASE;
29	if (va >= PAGE_OFFSET && va < PAGE_OFFSET + block_mapped_ram)
30		return __pa(va);
31	return 0;
32}
33
34/*
35 * Return VA for a given PA mapped with LTLBs or fixmap
36 * Return 0 if not mapped
37 */
38unsigned long p_block_mapped(phys_addr_t pa)
39{
40	unsigned long p = PHYS_IMMR_BASE;
41
42	if (pa >= p && pa < p + IMMR_SIZE)
43		return VIRT_IMMR_BASE + pa - p;
44	if (pa < block_mapped_ram)
45		return (unsigned long)__va(pa);
46	return 0;
47}
48
49static pte_t __init *early_hugepd_alloc_kernel(hugepd_t *pmdp, unsigned long va)
50{
51	if (hpd_val(*pmdp) == 0) {
52		pte_t *ptep = memblock_alloc(sizeof(pte_basic_t), SZ_4K);
53
54		if (!ptep)
55			return NULL;
56
57		hugepd_populate_kernel((hugepd_t *)pmdp, ptep, PAGE_SHIFT_8M);
58		hugepd_populate_kernel((hugepd_t *)pmdp + 1, ptep, PAGE_SHIFT_8M);
59	}
60	return hugepte_offset(*(hugepd_t *)pmdp, va, PGDIR_SHIFT);
61}
62
63static int __ref __early_map_kernel_hugepage(unsigned long va, phys_addr_t pa,
64					     pgprot_t prot, int psize, bool new)
65{
66	pmd_t *pmdp = pmd_off_k(va);
67	pte_t *ptep;
68
69	if (WARN_ON(psize != MMU_PAGE_512K && psize != MMU_PAGE_8M))
70		return -EINVAL;
71
72	if (new) {
73		if (WARN_ON(slab_is_available()))
74			return -EINVAL;
75
76		if (psize == MMU_PAGE_512K)
77			ptep = early_pte_alloc_kernel(pmdp, va);
78		else
79			ptep = early_hugepd_alloc_kernel((hugepd_t *)pmdp, va);
80	} else {
81		if (psize == MMU_PAGE_512K)
82			ptep = pte_offset_kernel(pmdp, va);
83		else
84			ptep = hugepte_offset(*(hugepd_t *)pmdp, va, PGDIR_SHIFT);
85	}
86
87	if (WARN_ON(!ptep))
88		return -ENOMEM;
89
90	/* The PTE should never be already present */
91	if (new && WARN_ON(pte_present(*ptep) && pgprot_val(prot)))
92		return -EINVAL;
93
94	set_huge_pte_at(&init_mm, va, ptep,
95			pte_mkhuge(pfn_pte(pa >> PAGE_SHIFT, prot)), psize);
96
97	return 0;
98}
99
100/*
101 * MMU_init_hw does the chip-specific initialization of the MMU hardware.
102 */
103void __init MMU_init_hw(void)
104{
105}
106
107static bool immr_is_mapped __initdata;
108
109void __init mmu_mapin_immr(void)
110{
111	if (immr_is_mapped)
112		return;
113
114	immr_is_mapped = true;
115
116	__early_map_kernel_hugepage(VIRT_IMMR_BASE, PHYS_IMMR_BASE,
117				    PAGE_KERNEL_NCG, MMU_PAGE_512K, true);
118}
119
120static void mmu_mapin_ram_chunk(unsigned long offset, unsigned long top,
121				pgprot_t prot, bool new)
122{
123	unsigned long v = PAGE_OFFSET + offset;
124	unsigned long p = offset;
125
126	WARN_ON(!IS_ALIGNED(offset, SZ_512K) || !IS_ALIGNED(top, SZ_512K));
127
128	for (; p < ALIGN(p, SZ_8M) && p < top; p += SZ_512K, v += SZ_512K)
129		__early_map_kernel_hugepage(v, p, prot, MMU_PAGE_512K, new);
130	for (; p < ALIGN_DOWN(top, SZ_8M) && p < top; p += SZ_8M, v += SZ_8M)
131		__early_map_kernel_hugepage(v, p, prot, MMU_PAGE_8M, new);
132	for (; p < ALIGN_DOWN(top, SZ_512K) && p < top; p += SZ_512K, v += SZ_512K)
133		__early_map_kernel_hugepage(v, p, prot, MMU_PAGE_512K, new);
134
135	if (!new)
136		flush_tlb_kernel_range(PAGE_OFFSET + v, PAGE_OFFSET + top);
137}
138
139unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
140{
141	unsigned long etext8 = ALIGN(__pa(_etext), SZ_8M);
142	unsigned long sinittext = __pa(_sinittext);
143	bool strict_boundary = strict_kernel_rwx_enabled() || debug_pagealloc_enabled_or_kfence();
144	unsigned long boundary = strict_boundary ? sinittext : etext8;
145	unsigned long einittext8 = ALIGN(__pa(_einittext), SZ_8M);
146
147	WARN_ON(top < einittext8);
148
149	mmu_mapin_immr();
150
151	mmu_mapin_ram_chunk(0, boundary, PAGE_KERNEL_TEXT, true);
152	if (debug_pagealloc_enabled_or_kfence()) {
153		top = boundary;
154	} else {
155		mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL_TEXT, true);
156		mmu_mapin_ram_chunk(einittext8, top, PAGE_KERNEL, true);
157	}
158
159	if (top > SZ_32M)
160		memblock_set_current_limit(top);
161
162	block_mapped_ram = top;
163
164	return top;
165}
166
167void mmu_mark_initmem_nx(void)
168{
169	unsigned long etext8 = ALIGN(__pa(_etext), SZ_8M);
170	unsigned long sinittext = __pa(_sinittext);
171	unsigned long boundary = strict_kernel_rwx_enabled() ? sinittext : etext8;
172	unsigned long einittext8 = ALIGN(__pa(_einittext), SZ_8M);
173
174	if (!debug_pagealloc_enabled_or_kfence())
175		mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL, false);
176
177	mmu_pin_tlb(block_mapped_ram, false);
178}
179
180#ifdef CONFIG_STRICT_KERNEL_RWX
181void mmu_mark_rodata_ro(void)
182{
183	unsigned long sinittext = __pa(_sinittext);
184
185	mmu_mapin_ram_chunk(0, sinittext, PAGE_KERNEL_ROX, false);
186	if (IS_ENABLED(CONFIG_PIN_TLB_DATA))
187		mmu_pin_tlb(block_mapped_ram, true);
188}
189#endif
190
191void __init setup_initial_memory_limit(phys_addr_t first_memblock_base,
192				       phys_addr_t first_memblock_size)
193{
194	/* We don't currently support the first MEMBLOCK not mapping 0
195	 * physical on those processors
196	 */
197	BUG_ON(first_memblock_base != 0);
198
199	/* 8xx can only access 32MB at the moment */
200	memblock_set_current_limit(min_t(u64, first_memblock_size, SZ_32M));
201}
202
203int pud_clear_huge(pud_t *pud)
204{
205	 return 0;
206}
207
208int pmd_clear_huge(pmd_t *pmd)
209{
210	 return 0;
211}
212