1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * This file contains the routines for handling the MMU on those
4 * PowerPC implementations where the MMU substantially follows the
5 * architecture specification.  This includes the 6xx, 7xx, 7xxx,
6 * and 8260 implementations but excludes the 8xx and 4xx.
7 *  -- paulus
8 *
9 *  Derived from arch/ppc/mm/init.c:
10 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
11 *
12 *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
13 *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
14 *    Copyright (C) 1996 Paul Mackerras
15 *
16 *  Derived from "arch/i386/mm/init.c"
17 *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
18 */
19
20#include <linux/kernel.h>
21#include <linux/mm.h>
22#include <linux/init.h>
23#include <linux/highmem.h>
24#include <linux/memblock.h>
25
26#include <asm/prom.h>
27#include <asm/mmu.h>
28#include <asm/machdep.h>
29#include <asm/code-patching.h>
30#include <asm/sections.h>
31
32#include <mm/mmu_decl.h>
33
34u8 __initdata early_hash[SZ_256K] __aligned(SZ_256K) = {0};
35
36struct hash_pte *Hash;
37static unsigned long Hash_size, Hash_mask;
38unsigned long _SDR1;
39static unsigned int hash_mb, hash_mb2;
40
41struct ppc_bat BATS[8][2];	/* 8 pairs of IBAT, DBAT */
42
43struct batrange {		/* stores address ranges mapped by BATs */
44	unsigned long start;
45	unsigned long limit;
46	phys_addr_t phys;
47} bat_addrs[8];
48
49/*
50 * Return PA for this VA if it is mapped by a BAT, or 0
51 */
52phys_addr_t v_block_mapped(unsigned long va)
53{
54	int b;
55	for (b = 0; b < ARRAY_SIZE(bat_addrs); ++b)
56		if (va >= bat_addrs[b].start && va < bat_addrs[b].limit)
57			return bat_addrs[b].phys + (va - bat_addrs[b].start);
58	return 0;
59}
60
61/*
62 * Return VA for a given PA or 0 if not mapped
63 */
64unsigned long p_block_mapped(phys_addr_t pa)
65{
66	int b;
67	for (b = 0; b < ARRAY_SIZE(bat_addrs); ++b)
68		if (pa >= bat_addrs[b].phys
69	    	    && pa < (bat_addrs[b].limit-bat_addrs[b].start)
70		              +bat_addrs[b].phys)
71			return bat_addrs[b].start+(pa-bat_addrs[b].phys);
72	return 0;
73}
74
75int __init find_free_bat(void)
76{
77	int b;
78	int n = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4;
79
80	for (b = 0; b < n; b++) {
81		struct ppc_bat *bat = BATS[b];
82
83		if (!(bat[1].batu & 3))
84			return b;
85	}
86	return -1;
87}
88
89/*
90 * This function calculates the size of the larger block usable to map the
91 * beginning of an area based on the start address and size of that area:
92 * - max block size is 256 on 6xx.
93 * - base address must be aligned to the block size. So the maximum block size
94 *   is identified by the lowest bit set to 1 in the base address (for instance
95 *   if base is 0x16000000, max size is 0x02000000).
96 * - block size has to be a power of two. This is calculated by finding the
97 *   highest bit set to 1.
98 */
99unsigned int bat_block_size(unsigned long base, unsigned long top)
100{
101	unsigned int max_size = SZ_256M;
102	unsigned int base_shift = (ffs(base) - 1) & 31;
103	unsigned int block_shift = (fls(top - base) - 1) & 31;
104
105	return min3(max_size, 1U << base_shift, 1U << block_shift);
106}
107
108/*
109 * Set up one of the IBAT (block address translation) register pairs.
110 * The parameters are not checked; in particular size must be a power
111 * of 2 between 128k and 256M.
112 */
113static void setibat(int index, unsigned long virt, phys_addr_t phys,
114		    unsigned int size, pgprot_t prot)
115{
116	unsigned int bl = (size >> 17) - 1;
117	int wimgxpp;
118	struct ppc_bat *bat = BATS[index];
119	unsigned long flags = pgprot_val(prot);
120
121	if (!cpu_has_feature(CPU_FTR_NEED_COHERENT))
122		flags &= ~_PAGE_COHERENT;
123
124	wimgxpp = (flags & _PAGE_COHERENT) | (_PAGE_EXEC ? BPP_RX : BPP_XX);
125	bat[0].batu = virt | (bl << 2) | 2; /* Vs=1, Vp=0 */
126	bat[0].batl = BAT_PHYS_ADDR(phys) | wimgxpp;
127	if (flags & _PAGE_USER)
128		bat[0].batu |= 1;	/* Vp = 1 */
129}
130
131static void clearibat(int index)
132{
133	struct ppc_bat *bat = BATS[index];
134
135	bat[0].batu = 0;
136	bat[0].batl = 0;
137}
138
139static unsigned long __init __mmu_mapin_ram(unsigned long base, unsigned long top)
140{
141	int idx;
142
143	while ((idx = find_free_bat()) != -1 && base != top) {
144		unsigned int size = bat_block_size(base, top);
145
146		if (size < 128 << 10)
147			break;
148		setbat(idx, PAGE_OFFSET + base, base, size, PAGE_KERNEL_X);
149		base += size;
150	}
151
152	return base;
153}
154
155unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
156{
157	unsigned long done;
158	unsigned long border = (unsigned long)__init_begin - PAGE_OFFSET;
159
160	if (__map_without_bats) {
161		pr_debug("RAM mapped without BATs\n");
162		return base;
163	}
164	if (debug_pagealloc_enabled()) {
165		if (base >= border)
166			return base;
167		if (top >= border)
168			top = border;
169	}
170
171	if (!strict_kernel_rwx_enabled() || base >= border || top <= border)
172		return __mmu_mapin_ram(base, top);
173
174	done = __mmu_mapin_ram(base, border);
175	if (done != border)
176		return done;
177
178	return __mmu_mapin_ram(border, top);
179}
180
181static bool is_module_segment(unsigned long addr)
182{
183	if (!IS_ENABLED(CONFIG_MODULES))
184		return false;
185#ifdef MODULES_VADDR
186	if (addr < ALIGN_DOWN(MODULES_VADDR, SZ_256M))
187		return false;
188	if (addr > ALIGN(MODULES_END, SZ_256M) - 1)
189		return false;
190#else
191	if (addr < ALIGN_DOWN(VMALLOC_START, SZ_256M))
192		return false;
193	if (addr > ALIGN(VMALLOC_END, SZ_256M) - 1)
194		return false;
195#endif
196	return true;
197}
198
199void mmu_mark_initmem_nx(void)
200{
201	int nb = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4;
202	int i;
203	unsigned long base = (unsigned long)_stext - PAGE_OFFSET;
204	unsigned long top = ALIGN((unsigned long)_etext - PAGE_OFFSET, SZ_128K);
205	unsigned long border = (unsigned long)__init_begin - PAGE_OFFSET;
206	unsigned long size;
207
208	for (i = 0; i < nb - 1 && base < top;) {
209		size = bat_block_size(base, top);
210		setibat(i++, PAGE_OFFSET + base, base, size, PAGE_KERNEL_TEXT);
211		base += size;
212	}
213	if (base < top) {
214		size = bat_block_size(base, top);
215		if ((top - base) > size) {
216			size <<= 1;
217			if (strict_kernel_rwx_enabled() && base + size > border)
218				pr_warn("Some RW data is getting mapped X. "
219					"Adjust CONFIG_DATA_SHIFT to avoid that.\n");
220		}
221		setibat(i++, PAGE_OFFSET + base, base, size, PAGE_KERNEL_TEXT);
222		base += size;
223	}
224	for (; i < nb; i++)
225		clearibat(i);
226
227	update_bats();
228
229	for (i = TASK_SIZE >> 28; i < 16; i++) {
230		/* Do not set NX on VM space for modules */
231		if (is_module_segment(i << 28))
232			continue;
233
234		mtsrin(mfsrin(i << 28) | 0x10000000, i << 28);
235	}
236}
237
238void mmu_mark_rodata_ro(void)
239{
240	int nb = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4;
241	int i;
242
243	for (i = 0; i < nb; i++) {
244		struct ppc_bat *bat = BATS[i];
245
246		if (bat_addrs[i].start < (unsigned long)__init_begin)
247			bat[1].batl = (bat[1].batl & ~BPP_RW) | BPP_RX;
248	}
249
250	update_bats();
251}
252
253/*
254 * Set up one of the I/D BAT (block address translation) register pairs.
255 * The parameters are not checked; in particular size must be a power
256 * of 2 between 128k and 256M.
257 * On 603+, only set IBAT when _PAGE_EXEC is set
258 */
259void __init setbat(int index, unsigned long virt, phys_addr_t phys,
260		   unsigned int size, pgprot_t prot)
261{
262	unsigned int bl;
263	int wimgxpp;
264	struct ppc_bat *bat;
265	unsigned long flags = pgprot_val(prot);
266
267	if (index == -1)
268		index = find_free_bat();
269	if (index == -1) {
270		pr_err("%s: no BAT available for mapping 0x%llx\n", __func__,
271		       (unsigned long long)phys);
272		return;
273	}
274	bat = BATS[index];
275
276	if ((flags & _PAGE_NO_CACHE) ||
277	    (cpu_has_feature(CPU_FTR_NEED_COHERENT) == 0))
278		flags &= ~_PAGE_COHERENT;
279
280	bl = (size >> 17) - 1;
281	/* Do DBAT first */
282	wimgxpp = flags & (_PAGE_WRITETHRU | _PAGE_NO_CACHE
283			   | _PAGE_COHERENT | _PAGE_GUARDED);
284	wimgxpp |= (flags & _PAGE_RW)? BPP_RW: BPP_RX;
285	bat[1].batu = virt | (bl << 2) | 2; /* Vs=1, Vp=0 */
286	bat[1].batl = BAT_PHYS_ADDR(phys) | wimgxpp;
287	if (flags & _PAGE_USER)
288		bat[1].batu |= 1; 	/* Vp = 1 */
289	if (flags & _PAGE_GUARDED) {
290		/* G bit must be zero in IBATs */
291		flags &= ~_PAGE_EXEC;
292	}
293	if (flags & _PAGE_EXEC)
294		bat[0] = bat[1];
295	else
296		bat[0].batu = bat[0].batl = 0;
297
298	bat_addrs[index].start = virt;
299	bat_addrs[index].limit = virt + ((bl + 1) << 17) - 1;
300	bat_addrs[index].phys = phys;
301}
302
303/*
304 * Preload a translation in the hash table
305 */
306void hash_preload(struct mm_struct *mm, unsigned long ea)
307{
308	pmd_t *pmd;
309
310	if (!Hash)
311		return;
312	pmd = pmd_off(mm, ea);
313	if (!pmd_none(*pmd))
314		add_hash_page(mm->context.id, ea, pmd_val(*pmd));
315}
316
317/*
318 * This is called at the end of handling a user page fault, when the
319 * fault has been handled by updating a PTE in the linux page tables.
320 * We use it to preload an HPTE into the hash table corresponding to
321 * the updated linux PTE.
322 *
323 * This must always be called with the pte lock held.
324 */
325void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
326		      pte_t *ptep)
327{
328	if (!mmu_has_feature(MMU_FTR_HPTE_TABLE))
329		return;
330	/*
331	 * We don't need to worry about _PAGE_PRESENT here because we are
332	 * called with either mm->page_table_lock held or ptl lock held
333	 */
334
335	/* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
336	if (!pte_young(*ptep) || address >= TASK_SIZE)
337		return;
338
339	/* We have to test for regs NULL since init will get here first thing at boot */
340	if (!current->thread.regs)
341		return;
342
343	/* We also avoid filling the hash if not coming from a fault */
344	if (TRAP(current->thread.regs) != 0x300 && TRAP(current->thread.regs) != 0x400)
345		return;
346
347	hash_preload(vma->vm_mm, address);
348}
349
350/*
351 * Initialize the hash table and patch the instructions in hashtable.S.
352 */
353void __init MMU_init_hw(void)
354{
355	unsigned int n_hpteg, lg_n_hpteg;
356
357	if (!mmu_has_feature(MMU_FTR_HPTE_TABLE))
358		return;
359
360	if ( ppc_md.progress ) ppc_md.progress("hash:enter", 0x105);
361
362#define LG_HPTEG_SIZE	6		/* 64 bytes per HPTEG */
363#define SDR1_LOW_BITS	((n_hpteg - 1) >> 10)
364#define MIN_N_HPTEG	1024		/* min 64kB hash table */
365
366	/*
367	 * Allow 1 HPTE (1/8 HPTEG) for each page of memory.
368	 * This is less than the recommended amount, but then
369	 * Linux ain't AIX.
370	 */
371	n_hpteg = total_memory / (PAGE_SIZE * 8);
372	if (n_hpteg < MIN_N_HPTEG)
373		n_hpteg = MIN_N_HPTEG;
374	lg_n_hpteg = __ilog2(n_hpteg);
375	if (n_hpteg & (n_hpteg - 1)) {
376		++lg_n_hpteg;		/* round up if not power of 2 */
377		n_hpteg = 1 << lg_n_hpteg;
378	}
379	Hash_size = n_hpteg << LG_HPTEG_SIZE;
380
381	/*
382	 * Find some memory for the hash table.
383	 */
384	if ( ppc_md.progress ) ppc_md.progress("hash:find piece", 0x322);
385	Hash = memblock_alloc(Hash_size, Hash_size);
386	if (!Hash)
387		panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
388		      __func__, Hash_size, Hash_size);
389	_SDR1 = __pa(Hash) | SDR1_LOW_BITS;
390
391	pr_info("Total memory = %lldMB; using %ldkB for hash table\n",
392		(unsigned long long)(total_memory >> 20), Hash_size >> 10);
393
394
395	Hash_mask = n_hpteg - 1;
396	hash_mb2 = hash_mb = 32 - LG_HPTEG_SIZE - lg_n_hpteg;
397	if (lg_n_hpteg > 16)
398		hash_mb2 = 16 - LG_HPTEG_SIZE;
399}
400
401void __init MMU_init_hw_patch(void)
402{
403	unsigned int hmask = Hash_mask >> (16 - LG_HPTEG_SIZE);
404	unsigned int hash = (unsigned int)Hash - PAGE_OFFSET;
405
406	if (!mmu_has_feature(MMU_FTR_HPTE_TABLE))
407		return;
408
409	if (ppc_md.progress)
410		ppc_md.progress("hash:patch", 0x345);
411	if (ppc_md.progress)
412		ppc_md.progress("hash:done", 0x205);
413
414	/* WARNING: Make sure nothing can trigger a KASAN check past this point */
415
416	/*
417	 * Patch up the instructions in hashtable.S:create_hpte
418	 */
419	modify_instruction_site(&patch__hash_page_A0, 0xffff, hash >> 16);
420	modify_instruction_site(&patch__hash_page_A1, 0x7c0, hash_mb << 6);
421	modify_instruction_site(&patch__hash_page_A2, 0x7c0, hash_mb2 << 6);
422	modify_instruction_site(&patch__hash_page_B, 0xffff, hmask);
423	modify_instruction_site(&patch__hash_page_C, 0xffff, hmask);
424
425	/*
426	 * Patch up the instructions in hashtable.S:flush_hash_page
427	 */
428	modify_instruction_site(&patch__flush_hash_A0, 0xffff, hash >> 16);
429	modify_instruction_site(&patch__flush_hash_A1, 0x7c0, hash_mb << 6);
430	modify_instruction_site(&patch__flush_hash_A2, 0x7c0, hash_mb2 << 6);
431	modify_instruction_site(&patch__flush_hash_B, 0xffff, hmask);
432}
433
434void setup_initial_memory_limit(phys_addr_t first_memblock_base,
435				phys_addr_t first_memblock_size)
436{
437	/* We don't currently support the first MEMBLOCK not mapping 0
438	 * physical on those processors
439	 */
440	BUG_ON(first_memblock_base != 0);
441
442	memblock_set_current_limit(min_t(u64, first_memblock_size, SZ_256M));
443}
444
445void __init print_system_hash_info(void)
446{
447	pr_info("Hash_size         = 0x%lx\n", Hash_size);
448	if (Hash_mask)
449		pr_info("Hash_mask         = 0x%lx\n", Hash_mask);
450}
451
452#ifdef CONFIG_PPC_KUEP
453void __init setup_kuep(bool disabled)
454{
455	pr_info("Activating Kernel Userspace Execution Prevention\n");
456
457	if (disabled)
458		pr_warn("KUEP cannot be disabled yet on 6xx when compiled in\n");
459}
460#endif
461
462#ifdef CONFIG_PPC_KUAP
463void __init setup_kuap(bool disabled)
464{
465	pr_info("Activating Kernel Userspace Access Protection\n");
466
467	if (disabled)
468		pr_warn("KUAP cannot be disabled yet on 6xx when compiled in\n");
469}
470#endif
471