1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * This file contains the routines setting up the linux page tables.
4 *  -- paulus
5 *
6 *  Derived from arch/ppc/mm/init.c:
7 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
8 *
9 *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
10 *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
11 *    Copyright (C) 1996 Paul Mackerras
12 *
13 *  Derived from "arch/i386/mm/init.c"
14 *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
15 */
16
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/types.h>
20#include <linux/mm.h>
21#include <linux/vmalloc.h>
22#include <linux/init.h>
23#include <linux/highmem.h>
24#include <linux/memblock.h>
25#include <linux/slab.h>
26#include <linux/set_memory.h>
27
28#include <asm/pgalloc.h>
29#include <asm/fixmap.h>
30#include <asm/setup.h>
31#include <asm/sections.h>
32#include <asm/early_ioremap.h>
33
34#include <mm/mmu_decl.h>
35
36static u8 early_fixmap_pagetable[FIXMAP_PTE_SIZE] __page_aligned_data;
37
38notrace void __init early_ioremap_init(void)
39{
40	unsigned long addr = ALIGN_DOWN(FIXADDR_START, PGDIR_SIZE);
41	pte_t *ptep = (pte_t *)early_fixmap_pagetable;
42	pmd_t *pmdp = pmd_off_k(addr);
43
44	for (; (s32)(FIXADDR_TOP - addr) > 0;
45	     addr += PGDIR_SIZE, ptep += PTRS_PER_PTE, pmdp++)
46		pmd_populate_kernel(&init_mm, pmdp, ptep);
47
48	early_ioremap_setup();
49}
50
51static void __init *early_alloc_pgtable(unsigned long size)
52{
53	void *ptr = memblock_alloc(size, size);
54
55	if (!ptr)
56		panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
57		      __func__, size, size);
58
59	return ptr;
60}
61
62pte_t __init *early_pte_alloc_kernel(pmd_t *pmdp, unsigned long va)
63{
64	if (pmd_none(*pmdp)) {
65		pte_t *ptep = early_alloc_pgtable(PTE_FRAG_SIZE);
66
67		pmd_populate_kernel(&init_mm, pmdp, ptep);
68	}
69	return pte_offset_kernel(pmdp, va);
70}
71
72
73int __ref map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot)
74{
75	pmd_t *pd;
76	pte_t *pg;
77	int err = -ENOMEM;
78
79	/* Use upper 10 bits of VA to index the first level map */
80	pd = pmd_off_k(va);
81	/* Use middle 10 bits of VA to index the second-level map */
82	if (likely(slab_is_available()))
83		pg = pte_alloc_kernel(pd, va);
84	else
85		pg = early_pte_alloc_kernel(pd, va);
86	if (pg) {
87		err = 0;
88		/* The PTE should never be already set nor present in the
89		 * hash table
90		 */
91		BUG_ON((pte_present(*pg) | pte_hashpte(*pg)) && pgprot_val(prot));
92		set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, prot));
93	}
94	smp_wmb();
95	return err;
96}
97
98/*
99 * Map in a chunk of physical memory starting at start.
100 */
101static void __init __mapin_ram_chunk(unsigned long offset, unsigned long top)
102{
103	unsigned long v, s;
104	phys_addr_t p;
105	bool ktext;
106
107	s = offset;
108	v = PAGE_OFFSET + s;
109	p = memstart_addr + s;
110	for (; s < top; s += PAGE_SIZE) {
111		ktext = core_kernel_text(v);
112		map_kernel_page(v, p, ktext ? PAGE_KERNEL_TEXT : PAGE_KERNEL);
113		v += PAGE_SIZE;
114		p += PAGE_SIZE;
115	}
116}
117
118void __init mapin_ram(void)
119{
120	phys_addr_t base, end;
121	u64 i;
122
123	for_each_mem_range(i, &base, &end) {
124		phys_addr_t top = min(end, total_lowmem);
125
126		if (base >= top)
127			continue;
128		base = mmu_mapin_ram(base, top);
129		__mapin_ram_chunk(base, top);
130	}
131}
132
133void mark_initmem_nx(void)
134{
135	unsigned long numpages = PFN_UP((unsigned long)_einittext) -
136				 PFN_DOWN((unsigned long)_sinittext);
137
138	mmu_mark_initmem_nx();
139
140	if (!v_block_mapped((unsigned long)_sinittext)) {
141		set_memory_nx((unsigned long)_sinittext, numpages);
142		set_memory_rw((unsigned long)_sinittext, numpages);
143	}
144}
145
146#ifdef CONFIG_STRICT_KERNEL_RWX
147void mark_rodata_ro(void)
148{
149	unsigned long numpages;
150
151	if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX) && mmu_has_feature(MMU_FTR_HPTE_TABLE))
152		pr_warn("This platform has HASH MMU, STRICT_MODULE_RWX won't work\n");
153
154	if (v_block_mapped((unsigned long)_stext + 1)) {
155		mmu_mark_rodata_ro();
156		ptdump_check_wx();
157		return;
158	}
159
160	/*
161	 * mark text and rodata as read only. __end_rodata is set by
162	 * powerpc's linker script and includes tables and data
163	 * requiring relocation which are not put in RO_DATA.
164	 */
165	numpages = PFN_UP((unsigned long)__end_rodata) -
166		   PFN_DOWN((unsigned long)_stext);
167
168	set_memory_ro((unsigned long)_stext, numpages);
169
170	// mark_initmem_nx() should have already run by now
171	ptdump_check_wx();
172}
173#endif
174
175#if defined(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) && defined(CONFIG_DEBUG_PAGEALLOC)
176void __kernel_map_pages(struct page *page, int numpages, int enable)
177{
178	unsigned long addr = (unsigned long)page_address(page);
179
180	if (PageHighMem(page))
181		return;
182
183	if (enable)
184		set_memory_p(addr, numpages);
185	else
186		set_memory_np(addr, numpages);
187}
188#endif /* CONFIG_DEBUG_PAGEALLOC */
189