1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * This file contains the routines setting up the linux page tables.
4 *  -- paulus
5 *
6 *  Derived from arch/ppc/mm/init.c:
7 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
8 *
9 *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
10 *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
11 *    Copyright (C) 1996 Paul Mackerras
12 *
13 *  Derived from "arch/i386/mm/init.c"
14 *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
15 */
16
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/types.h>
20#include <linux/mm.h>
21#include <linux/vmalloc.h>
22#include <linux/init.h>
23#include <linux/highmem.h>
24#include <linux/memblock.h>
25#include <linux/slab.h>
26
27#include <asm/pgalloc.h>
28#include <asm/fixmap.h>
29#include <asm/setup.h>
30#include <asm/sections.h>
31#include <asm/early_ioremap.h>
32
33#include <mm/mmu_decl.h>
34
35extern char etext[], _stext[], _sinittext[], _einittext[];
36
37static u8 early_fixmap_pagetable[FIXMAP_PTE_SIZE] __page_aligned_data;
38
39notrace void __init early_ioremap_init(void)
40{
41	unsigned long addr = ALIGN_DOWN(FIXADDR_START, PGDIR_SIZE);
42	pte_t *ptep = (pte_t *)early_fixmap_pagetable;
43	pmd_t *pmdp = pmd_off_k(addr);
44
45	for (; (s32)(FIXADDR_TOP - addr) > 0;
46	     addr += PGDIR_SIZE, ptep += PTRS_PER_PTE, pmdp++)
47		pmd_populate_kernel(&init_mm, pmdp, ptep);
48
49	early_ioremap_setup();
50}
51
52static void __init *early_alloc_pgtable(unsigned long size)
53{
54	void *ptr = memblock_alloc(size, size);
55
56	if (!ptr)
57		panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
58		      __func__, size, size);
59
60	return ptr;
61}
62
63pte_t __init *early_pte_alloc_kernel(pmd_t *pmdp, unsigned long va)
64{
65	if (pmd_none(*pmdp)) {
66		pte_t *ptep = early_alloc_pgtable(PTE_FRAG_SIZE);
67
68		pmd_populate_kernel(&init_mm, pmdp, ptep);
69	}
70	return pte_offset_kernel(pmdp, va);
71}
72
73
74int __ref map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot)
75{
76	pmd_t *pd;
77	pte_t *pg;
78	int err = -ENOMEM;
79
80	/* Use upper 10 bits of VA to index the first level map */
81	pd = pmd_off_k(va);
82	/* Use middle 10 bits of VA to index the second-level map */
83	if (likely(slab_is_available()))
84		pg = pte_alloc_kernel(pd, va);
85	else
86		pg = early_pte_alloc_kernel(pd, va);
87	if (pg != 0) {
88		err = 0;
89		/* The PTE should never be already set nor present in the
90		 * hash table
91		 */
92		BUG_ON((pte_present(*pg) | pte_hashpte(*pg)) && pgprot_val(prot));
93		set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, prot));
94	}
95	smp_wmb();
96	return err;
97}
98
99/*
100 * Map in a chunk of physical memory starting at start.
101 */
102static void __init __mapin_ram_chunk(unsigned long offset, unsigned long top)
103{
104	unsigned long v, s;
105	phys_addr_t p;
106	int ktext;
107
108	s = offset;
109	v = PAGE_OFFSET + s;
110	p = memstart_addr + s;
111	for (; s < top; s += PAGE_SIZE) {
112		ktext = ((char *)v >= _stext && (char *)v < etext) ||
113			((char *)v >= _sinittext && (char *)v < _einittext);
114		map_kernel_page(v, p, ktext ? PAGE_KERNEL_TEXT : PAGE_KERNEL);
115#ifdef CONFIG_PPC_BOOK3S_32
116		if (ktext)
117			hash_preload(&init_mm, v);
118#endif
119		v += PAGE_SIZE;
120		p += PAGE_SIZE;
121	}
122}
123
124void __init mapin_ram(void)
125{
126	phys_addr_t base, end;
127	u64 i;
128
129	for_each_mem_range(i, &base, &end) {
130		phys_addr_t top = min(end, total_lowmem);
131
132		if (base >= top)
133			continue;
134		base = mmu_mapin_ram(base, top);
135		__mapin_ram_chunk(base, top);
136	}
137}
138
139static int __change_page_attr_noflush(struct page *page, pgprot_t prot)
140{
141	pte_t *kpte;
142	unsigned long address;
143
144	BUG_ON(PageHighMem(page));
145	address = (unsigned long)page_address(page);
146
147	if (v_block_mapped(address))
148		return 0;
149	kpte = virt_to_kpte(address);
150	if (!kpte)
151		return -EINVAL;
152	__set_pte_at(&init_mm, address, kpte, mk_pte(page, prot), 0);
153
154	return 0;
155}
156
157/*
158 * Change the page attributes of an page in the linear mapping.
159 *
160 * THIS DOES NOTHING WITH BAT MAPPINGS, DEBUG USE ONLY
161 */
162static int change_page_attr(struct page *page, int numpages, pgprot_t prot)
163{
164	int i, err = 0;
165	unsigned long flags;
166	struct page *start = page;
167
168	local_irq_save(flags);
169	for (i = 0; i < numpages; i++, page++) {
170		err = __change_page_attr_noflush(page, prot);
171		if (err)
172			break;
173	}
174	wmb();
175	local_irq_restore(flags);
176	flush_tlb_kernel_range((unsigned long)page_address(start),
177			       (unsigned long)page_address(page));
178	return err;
179}
180
181void mark_initmem_nx(void)
182{
183	struct page *page = virt_to_page(_sinittext);
184	unsigned long numpages = PFN_UP((unsigned long)_einittext) -
185				 PFN_DOWN((unsigned long)_sinittext);
186
187	if (v_block_mapped((unsigned long)_sinittext))
188		mmu_mark_initmem_nx();
189	else
190		change_page_attr(page, numpages, PAGE_KERNEL);
191}
192
193#ifdef CONFIG_STRICT_KERNEL_RWX
194void mark_rodata_ro(void)
195{
196	struct page *page;
197	unsigned long numpages;
198
199	if (v_block_mapped((unsigned long)_stext + 1)) {
200		mmu_mark_rodata_ro();
201		ptdump_check_wx();
202		return;
203	}
204
205	page = virt_to_page(_stext);
206	numpages = PFN_UP((unsigned long)_etext) -
207		   PFN_DOWN((unsigned long)_stext);
208
209	change_page_attr(page, numpages, PAGE_KERNEL_ROX);
210	/*
211	 * mark .rodata as read only. Use __init_begin rather than __end_rodata
212	 * to cover NOTES and EXCEPTION_TABLE.
213	 */
214	page = virt_to_page(__start_rodata);
215	numpages = PFN_UP((unsigned long)__init_begin) -
216		   PFN_DOWN((unsigned long)__start_rodata);
217
218	change_page_attr(page, numpages, PAGE_KERNEL_RO);
219
220	// mark_initmem_nx() should have already run by now
221	ptdump_check_wx();
222}
223#endif
224
225#ifdef CONFIG_DEBUG_PAGEALLOC
226void __kernel_map_pages(struct page *page, int numpages, int enable)
227{
228	if (PageHighMem(page))
229		return;
230
231	change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0));
232}
233#endif /* CONFIG_DEBUG_PAGEALLOC */
234