xref: /kernel/linux/linux-5.10/arch/riscv/mm/init.c (revision 8c2ecf20)
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2012 Regents of the University of California
4 * Copyright (C) 2019 Western Digital Corporation or its affiliates.
5 */
6
7#include <linux/init.h>
8#include <linux/mm.h>
9#include <linux/memblock.h>
10#include <linux/initrd.h>
11#include <linux/swap.h>
12#include <linux/sizes.h>
13#include <linux/of_fdt.h>
14#include <linux/libfdt.h>
15#include <linux/set_memory.h>
16#include <linux/dma-map-ops.h>
17
18#include <asm/fixmap.h>
19#include <asm/tlbflush.h>
20#include <asm/sections.h>
21#include <asm/soc.h>
22#include <asm/io.h>
23#include <asm/ptdump.h>
24
25#include "../kernel/head.h"
26
27unsigned long kernel_virt_addr = KERNEL_LINK_ADDR;
28EXPORT_SYMBOL(kernel_virt_addr);
29
30unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
31							__page_aligned_bss;
32EXPORT_SYMBOL(empty_zero_page);
33
34extern char _start[];
35#define DTB_EARLY_BASE_VA      PGDIR_SIZE
36void *dtb_early_va __initdata;
37uintptr_t dtb_early_pa __initdata;
38
39struct pt_alloc_ops {
40	pte_t *(*get_pte_virt)(phys_addr_t pa);
41	phys_addr_t (*alloc_pte)(uintptr_t va);
42#ifndef __PAGETABLE_PMD_FOLDED
43	pmd_t *(*get_pmd_virt)(phys_addr_t pa);
44	phys_addr_t (*alloc_pmd)(uintptr_t va);
45#endif
46};
47
48static phys_addr_t dma32_phys_limit __ro_after_init;
49
50static void __init zone_sizes_init(void)
51{
52	unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, };
53
54#ifdef CONFIG_ZONE_DMA32
55	max_zone_pfns[ZONE_DMA32] = PFN_DOWN(dma32_phys_limit);
56#endif
57	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
58
59	free_area_init(max_zone_pfns);
60}
61
62static void setup_zero_page(void)
63{
64	memset((void *)empty_zero_page, 0, PAGE_SIZE);
65}
66
67#if defined(CONFIG_MMU) && defined(CONFIG_DEBUG_VM)
68static inline void print_mlk(char *name, unsigned long b, unsigned long t)
69{
70	pr_notice("%12s : 0x%08lx - 0x%08lx   (%4ld kB)\n", name, b, t,
71		  (((t) - (b)) >> 10));
72}
73
74static inline void print_mlm(char *name, unsigned long b, unsigned long t)
75{
76	pr_notice("%12s : 0x%08lx - 0x%08lx   (%4ld MB)\n", name, b, t,
77		  (((t) - (b)) >> 20));
78}
79
80static void print_vm_layout(void)
81{
82	pr_notice("Virtual kernel memory layout:\n");
83	print_mlk("fixmap", (unsigned long)FIXADDR_START,
84		  (unsigned long)FIXADDR_TOP);
85	print_mlm("pci io", (unsigned long)PCI_IO_START,
86		  (unsigned long)PCI_IO_END);
87	print_mlm("vmemmap", (unsigned long)VMEMMAP_START,
88		  (unsigned long)VMEMMAP_END);
89	print_mlm("vmalloc", (unsigned long)VMALLOC_START,
90		  (unsigned long)VMALLOC_END);
91	print_mlm("lowmem", (unsigned long)PAGE_OFFSET,
92		  (unsigned long)high_memory);
93#ifdef CONFIG_64BIT
94	print_mlm("kernel", (unsigned long)KERNEL_LINK_ADDR,
95		  (unsigned long)ADDRESS_SPACE_END);
96#endif
97}
98#else
99static void print_vm_layout(void) { }
100#endif /* CONFIG_DEBUG_VM */
101
102void __init mem_init(void)
103{
104#ifdef CONFIG_FLATMEM
105	BUG_ON(!mem_map);
106#endif /* CONFIG_FLATMEM */
107
108	high_memory = (void *)(__va(PFN_PHYS(max_low_pfn)));
109	memblock_free_all();
110
111	mem_init_print_info(NULL);
112	print_vm_layout();
113}
114
115#ifdef CONFIG_BLK_DEV_INITRD
116static void __init setup_initrd(void)
117{
118	phys_addr_t start;
119	unsigned long size;
120
121	/* Ignore the virtul address computed during device tree parsing */
122	initrd_start = initrd_end = 0;
123
124	if (!phys_initrd_size)
125		return;
126	/*
127	 * Round the memory region to page boundaries as per free_initrd_mem()
128	 * This allows us to detect whether the pages overlapping the initrd
129	 * are in use, but more importantly, reserves the entire set of pages
130	 * as we don't want these pages allocated for other purposes.
131	 */
132	start = round_down(phys_initrd_start, PAGE_SIZE);
133	size = phys_initrd_size + (phys_initrd_start - start);
134	size = round_up(size, PAGE_SIZE);
135
136	if (!memblock_is_region_memory(start, size)) {
137		pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region",
138		       (u64)start, size);
139		goto disable;
140	}
141
142	if (memblock_is_region_reserved(start, size)) {
143		pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region\n",
144		       (u64)start, size);
145		goto disable;
146	}
147
148	memblock_reserve(start, size);
149	/* Now convert initrd to virtual addresses */
150	initrd_start = (unsigned long)__va(phys_initrd_start);
151	initrd_end = initrd_start + phys_initrd_size;
152	initrd_below_start_ok = 1;
153
154	pr_info("Initial ramdisk at: 0x%p (%lu bytes)\n",
155		(void *)(initrd_start), size);
156	return;
157disable:
158	pr_cont(" - disabling initrd\n");
159	initrd_start = 0;
160	initrd_end = 0;
161}
162#endif /* CONFIG_BLK_DEV_INITRD */
163
164void __init setup_bootmem(void)
165{
166	phys_addr_t mem_start = 0;
167	phys_addr_t start, dram_end, end = 0;
168	phys_addr_t vmlinux_end = __pa_symbol(&_end);
169	phys_addr_t vmlinux_start = __pa_symbol(&_start);
170	phys_addr_t max_mapped_addr = __pa(~(ulong)0);
171	u64 i;
172
173	/*
174	 * Reserve from the start of the kernel to the end of the kernel
175	 */
176#if defined(CONFIG_64BIT) && defined(CONFIG_STRICT_KERNEL_RWX)
177	/*
178	 * Make sure we align the reservation on PMD_SIZE since we will
179	 * map the kernel in the linear mapping as read-only: we do not want
180	 * any allocation to happen between _end and the next pmd aligned page.
181	 */
182	vmlinux_end = (vmlinux_end + PMD_SIZE - 1) & PMD_MASK;
183#endif
184	memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
185
186	/*
187	 * The maximal physical memory size is -PAGE_OFFSET.
188	 * Make sure that any memory beyond mem_start + (-PAGE_OFFSET) is removed
189	 * as it is unusable by kernel.
190	 */
191	memblock_enforce_memory_limit(-PAGE_OFFSET);
192
193	/* Reserve from the start of the kernel to the end of the kernel */
194	memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
195
196	dram_end = memblock_end_of_DRAM();
197
198	/*
199	 * memblock allocator is not aware of the fact that last 4K bytes of
200	 * the addressable memory can not be mapped because of IS_ERR_VALUE
201	 * macro. Make sure that last 4k bytes are not usable by memblock
202	 * if end of dram is equal to maximum addressable memory.
203	 */
204	if (max_mapped_addr == (dram_end - 1))
205		memblock_set_current_limit(max_mapped_addr - 4096);
206
207	max_pfn = PFN_DOWN(dram_end);
208	max_low_pfn = max_pfn;
209	dma32_phys_limit = min(4UL * SZ_1G, (unsigned long)PFN_PHYS(max_low_pfn));
210	set_max_mapnr(max_low_pfn);
211
212#ifdef CONFIG_BLK_DEV_INITRD
213	setup_initrd();
214#endif /* CONFIG_BLK_DEV_INITRD */
215
216	/*
217	 * Avoid using early_init_fdt_reserve_self() since __pa() does
218	 * not work for DTB pointers that are fixmap addresses
219	 */
220	memblock_reserve(dtb_early_pa, fdt_totalsize(dtb_early_va));
221
222	dma_contiguous_reserve(dma32_phys_limit);
223	memblock_allow_resize();
224	memblock_dump_all();
225}
226
227#ifdef CONFIG_MMU
228static struct pt_alloc_ops pt_ops;
229
230/* Offset between linear mapping virtual address and kernel load address */
231unsigned long va_pa_offset;
232EXPORT_SYMBOL(va_pa_offset);
233#ifdef CONFIG_64BIT
234/* Offset between kernel mapping virtual address and kernel load address */
235unsigned long va_kernel_pa_offset;
236EXPORT_SYMBOL(va_kernel_pa_offset);
237#endif
238unsigned long pfn_base;
239EXPORT_SYMBOL(pfn_base);
240
241pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
242pgd_t trampoline_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
243pte_t fixmap_pte[PTRS_PER_PTE] __page_aligned_bss;
244
245pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
246
247void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
248{
249	unsigned long addr = __fix_to_virt(idx);
250	pte_t *ptep;
251
252	BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
253
254	ptep = &fixmap_pte[pte_index(addr)];
255
256	if (pgprot_val(prot))
257		set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, prot));
258	else
259		pte_clear(&init_mm, addr, ptep);
260	local_flush_tlb_page(addr);
261}
262
263static inline pte_t *__init get_pte_virt_early(phys_addr_t pa)
264{
265	return (pte_t *)((uintptr_t)pa);
266}
267
268static inline pte_t *__init get_pte_virt_fixmap(phys_addr_t pa)
269{
270	clear_fixmap(FIX_PTE);
271	return (pte_t *)set_fixmap_offset(FIX_PTE, pa);
272}
273
274static inline pte_t *get_pte_virt_late(phys_addr_t pa)
275{
276	return (pte_t *) __va(pa);
277}
278
279static inline phys_addr_t __init alloc_pte_early(uintptr_t va)
280{
281	/*
282	 * We only create PMD or PGD early mappings so we
283	 * should never reach here with MMU disabled.
284	 */
285	BUG();
286}
287
288static inline phys_addr_t __init alloc_pte_fixmap(uintptr_t va)
289{
290	return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
291}
292
293static phys_addr_t alloc_pte_late(uintptr_t va)
294{
295	unsigned long vaddr;
296
297	vaddr = __get_free_page(GFP_KERNEL);
298	if (!vaddr || !pgtable_pte_page_ctor(virt_to_page(vaddr)))
299		BUG();
300	return __pa(vaddr);
301}
302
303static void __init create_pte_mapping(pte_t *ptep,
304				      uintptr_t va, phys_addr_t pa,
305				      phys_addr_t sz, pgprot_t prot)
306{
307	uintptr_t pte_idx = pte_index(va);
308
309	BUG_ON(sz != PAGE_SIZE);
310
311	if (pte_none(ptep[pte_idx]))
312		ptep[pte_idx] = pfn_pte(PFN_DOWN(pa), prot);
313}
314
315#ifndef __PAGETABLE_PMD_FOLDED
316
317pmd_t trampoline_pmd[PTRS_PER_PMD] __page_aligned_bss;
318pmd_t fixmap_pmd[PTRS_PER_PMD] __page_aligned_bss;
319pmd_t early_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE);
320pmd_t early_dtb_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE);
321
322static pmd_t *__init get_pmd_virt_early(phys_addr_t pa)
323{
324	/* Before MMU is enabled */
325	return (pmd_t *)((uintptr_t)pa);
326}
327
328static pmd_t *__init get_pmd_virt_fixmap(phys_addr_t pa)
329{
330	clear_fixmap(FIX_PMD);
331	return (pmd_t *)set_fixmap_offset(FIX_PMD, pa);
332}
333
334static pmd_t *get_pmd_virt_late(phys_addr_t pa)
335{
336	return (pmd_t *) __va(pa);
337}
338
339static phys_addr_t __init alloc_pmd_early(uintptr_t va)
340{
341	BUG_ON((va - kernel_virt_addr) >> PGDIR_SHIFT);
342	return (uintptr_t)early_pmd;
343}
344
345static phys_addr_t __init alloc_pmd_fixmap(uintptr_t va)
346{
347	return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
348}
349
350static phys_addr_t alloc_pmd_late(uintptr_t va)
351{
352	unsigned long vaddr;
353
354	vaddr = __get_free_page(GFP_KERNEL);
355	BUG_ON(!vaddr);
356	return __pa(vaddr);
357}
358
359static void __init create_pmd_mapping(pmd_t *pmdp,
360				      uintptr_t va, phys_addr_t pa,
361				      phys_addr_t sz, pgprot_t prot)
362{
363	pte_t *ptep;
364	phys_addr_t pte_phys;
365	uintptr_t pmd_idx = pmd_index(va);
366
367	if (sz == PMD_SIZE) {
368		if (pmd_none(pmdp[pmd_idx]))
369			pmdp[pmd_idx] = pfn_pmd(PFN_DOWN(pa), prot);
370		return;
371	}
372
373	if (pmd_none(pmdp[pmd_idx])) {
374		pte_phys = pt_ops.alloc_pte(va);
375		pmdp[pmd_idx] = pfn_pmd(PFN_DOWN(pte_phys), PAGE_TABLE);
376		ptep = pt_ops.get_pte_virt(pte_phys);
377		memset(ptep, 0, PAGE_SIZE);
378	} else {
379		pte_phys = PFN_PHYS(_pmd_pfn(pmdp[pmd_idx]));
380		ptep = pt_ops.get_pte_virt(pte_phys);
381	}
382
383	create_pte_mapping(ptep, va, pa, sz, prot);
384}
385
386#define pgd_next_t		pmd_t
387#define alloc_pgd_next(__va)	pt_ops.alloc_pmd(__va)
388#define get_pgd_next_virt(__pa)	pt_ops.get_pmd_virt(__pa)
389#define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot)	\
390	create_pmd_mapping(__nextp, __va, __pa, __sz, __prot)
391#define fixmap_pgd_next		fixmap_pmd
392#else
393#define pgd_next_t		pte_t
394#define alloc_pgd_next(__va)	pt_ops.alloc_pte(__va)
395#define get_pgd_next_virt(__pa)	pt_ops.get_pte_virt(__pa)
396#define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot)	\
397	create_pte_mapping(__nextp, __va, __pa, __sz, __prot)
398#define fixmap_pgd_next		fixmap_pte
399#endif
400
401void __init create_pgd_mapping(pgd_t *pgdp,
402				      uintptr_t va, phys_addr_t pa,
403				      phys_addr_t sz, pgprot_t prot)
404{
405	pgd_next_t *nextp;
406	phys_addr_t next_phys;
407	uintptr_t pgd_idx = pgd_index(va);
408
409	if (sz == PGDIR_SIZE) {
410		if (pgd_val(pgdp[pgd_idx]) == 0)
411			pgdp[pgd_idx] = pfn_pgd(PFN_DOWN(pa), prot);
412		return;
413	}
414
415	if (pgd_val(pgdp[pgd_idx]) == 0) {
416		next_phys = alloc_pgd_next(va);
417		pgdp[pgd_idx] = pfn_pgd(PFN_DOWN(next_phys), PAGE_TABLE);
418		nextp = get_pgd_next_virt(next_phys);
419		memset(nextp, 0, PAGE_SIZE);
420	} else {
421		next_phys = PFN_PHYS(_pgd_pfn(pgdp[pgd_idx]));
422		nextp = get_pgd_next_virt(next_phys);
423	}
424
425	create_pgd_next_mapping(nextp, va, pa, sz, prot);
426}
427
428static uintptr_t __init best_map_size(phys_addr_t base, phys_addr_t size)
429{
430	/* Upgrade to PMD_SIZE mappings whenever possible */
431	if ((base & (PMD_SIZE - 1)) || (size & (PMD_SIZE - 1)))
432		return PAGE_SIZE;
433
434	return PMD_SIZE;
435}
436
437/*
438 * setup_vm() is called from head.S with MMU-off.
439 *
440 * Following requirements should be honoured for setup_vm() to work
441 * correctly:
442 * 1) It should use PC-relative addressing for accessing kernel symbols.
443 *    To achieve this we always use GCC cmodel=medany.
444 * 2) The compiler instrumentation for FTRACE will not work for setup_vm()
445 *    so disable compiler instrumentation when FTRACE is enabled.
446 *
447 * Currently, the above requirements are honoured by using custom CFLAGS
448 * for init.o in mm/Makefile.
449 */
450
451#ifndef __riscv_cmodel_medany
452#error "setup_vm() is called from head.S before relocate so it should not use absolute addressing."
453#endif
454
455uintptr_t load_pa, load_sz;
456
457static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size)
458{
459	uintptr_t va, end_va;
460
461	end_va = kernel_virt_addr + load_sz;
462	for (va = kernel_virt_addr; va < end_va; va += map_size)
463		create_pgd_mapping(pgdir, va,
464				   load_pa + (va - kernel_virt_addr),
465				   map_size, PAGE_KERNEL_EXEC);
466}
467
468asmlinkage void __init setup_vm(uintptr_t dtb_pa)
469{
470	uintptr_t pa;
471	uintptr_t map_size;
472#ifndef __PAGETABLE_PMD_FOLDED
473	pmd_t fix_bmap_spmd, fix_bmap_epmd;
474#endif
475	load_pa = (uintptr_t)(&_start);
476	load_sz = (uintptr_t)(&_end) - load_pa;
477
478	va_pa_offset = PAGE_OFFSET - load_pa;
479#ifdef CONFIG_64BIT
480	va_kernel_pa_offset = kernel_virt_addr - load_pa;
481#endif
482
483	pfn_base = PFN_DOWN(load_pa);
484
485	/*
486	 * Enforce boot alignment requirements of RV32 and
487	 * RV64 by only allowing PMD or PGD mappings.
488	 */
489	map_size = PMD_SIZE;
490
491	/* Sanity check alignment and size */
492	BUG_ON((PAGE_OFFSET % PGDIR_SIZE) != 0);
493	BUG_ON((load_pa % map_size) != 0);
494
495	pt_ops.alloc_pte = alloc_pte_early;
496	pt_ops.get_pte_virt = get_pte_virt_early;
497#ifndef __PAGETABLE_PMD_FOLDED
498	pt_ops.alloc_pmd = alloc_pmd_early;
499	pt_ops.get_pmd_virt = get_pmd_virt_early;
500#endif
501	/* Setup early PGD for fixmap */
502	create_pgd_mapping(early_pg_dir, FIXADDR_START,
503			   (uintptr_t)fixmap_pgd_next, PGDIR_SIZE, PAGE_TABLE);
504
505#ifndef __PAGETABLE_PMD_FOLDED
506	/* Setup fixmap PMD */
507	create_pmd_mapping(fixmap_pmd, FIXADDR_START,
508			   (uintptr_t)fixmap_pte, PMD_SIZE, PAGE_TABLE);
509	/* Setup trampoline PGD and PMD */
510	create_pgd_mapping(trampoline_pg_dir, kernel_virt_addr,
511			   (uintptr_t)trampoline_pmd, PGDIR_SIZE, PAGE_TABLE);
512	create_pmd_mapping(trampoline_pmd, kernel_virt_addr,
513			   load_pa, PMD_SIZE, PAGE_KERNEL_EXEC);
514#else
515	/* Setup trampoline PGD */
516	create_pgd_mapping(trampoline_pg_dir, kernel_virt_addr,
517			   load_pa, PGDIR_SIZE, PAGE_KERNEL_EXEC);
518#endif
519
520	/*
521	 * Setup early PGD covering entire kernel which will allow
522	 * us to reach paging_init(). We map all memory banks later
523	 * in setup_vm_final() below.
524	 */
525	create_kernel_page_table(early_pg_dir, map_size);
526
527#ifndef __PAGETABLE_PMD_FOLDED
528	/* Setup early PMD for DTB */
529	create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA,
530			   (uintptr_t)early_dtb_pmd, PGDIR_SIZE, PAGE_TABLE);
531#ifndef CONFIG_BUILTIN_DTB
532	/* Create two consecutive PMD mappings for FDT early scan */
533	pa = dtb_pa & ~(PMD_SIZE - 1);
534	create_pmd_mapping(early_dtb_pmd, DTB_EARLY_BASE_VA,
535			   pa, PMD_SIZE, PAGE_KERNEL);
536	create_pmd_mapping(early_dtb_pmd, DTB_EARLY_BASE_VA + PMD_SIZE,
537			   pa + PMD_SIZE, PMD_SIZE, PAGE_KERNEL);
538	dtb_early_va = (void *)DTB_EARLY_BASE_VA + (dtb_pa & (PMD_SIZE - 1));
539#else /* CONFIG_BUILTIN_DTB */
540#ifdef CONFIG_64BIT
541	/*
542	 * __va can't be used since it would return a linear mapping address
543	 * whereas dtb_early_va will be used before setup_vm_final installs
544	 * the linear mapping.
545	 */
546	dtb_early_va = kernel_mapping_pa_to_va(dtb_pa);
547#else
548	dtb_early_va = __va(dtb_pa);
549#endif /* CONFIG_64BIT */
550#endif /* CONFIG_BUILTIN_DTB */
551#else
552#ifndef CONFIG_BUILTIN_DTB
553	/* Create two consecutive PGD mappings for FDT early scan */
554	pa = dtb_pa & ~(PGDIR_SIZE - 1);
555	create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA,
556			   pa, PGDIR_SIZE, PAGE_KERNEL);
557	create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA + PGDIR_SIZE,
558			   pa + PGDIR_SIZE, PGDIR_SIZE, PAGE_KERNEL);
559	dtb_early_va = (void *)DTB_EARLY_BASE_VA + (dtb_pa & (PGDIR_SIZE - 1));
560#else /* CONFIG_BUILTIN_DTB */
561#ifdef CONFIG_64BIT
562	dtb_early_va = kernel_mapping_pa_to_va(dtb_pa);
563#else
564	dtb_early_va = __va(dtb_pa);
565#endif /* CONFIG_64BIT */
566#endif /* CONFIG_BUILTIN_DTB */
567#endif
568	dtb_early_pa = dtb_pa;
569
570	/*
571	 * Bootime fixmap only can handle PMD_SIZE mapping. Thus, boot-ioremap
572	 * range can not span multiple pmds.
573	 */
574	BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
575		     != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
576
577#ifndef __PAGETABLE_PMD_FOLDED
578	/*
579	 * Early ioremap fixmap is already created as it lies within first 2MB
580	 * of fixmap region. We always map PMD_SIZE. Thus, both FIX_BTMAP_END
581	 * FIX_BTMAP_BEGIN should lie in the same pmd. Verify that and warn
582	 * the user if not.
583	 */
584	fix_bmap_spmd = fixmap_pmd[pmd_index(__fix_to_virt(FIX_BTMAP_BEGIN))];
585	fix_bmap_epmd = fixmap_pmd[pmd_index(__fix_to_virt(FIX_BTMAP_END))];
586	if (pmd_val(fix_bmap_spmd) != pmd_val(fix_bmap_epmd)) {
587		WARN_ON(1);
588		pr_warn("fixmap btmap start [%08lx] != end [%08lx]\n",
589			pmd_val(fix_bmap_spmd), pmd_val(fix_bmap_epmd));
590		pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
591			fix_to_virt(FIX_BTMAP_BEGIN));
592		pr_warn("fix_to_virt(FIX_BTMAP_END):   %08lx\n",
593			fix_to_virt(FIX_BTMAP_END));
594
595		pr_warn("FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
596		pr_warn("FIX_BTMAP_BEGIN:     %d\n", FIX_BTMAP_BEGIN);
597	}
598#endif
599}
600
601#ifdef CONFIG_64BIT
602void protect_kernel_linear_mapping_text_rodata(void)
603{
604	unsigned long text_start = (unsigned long)lm_alias(_start);
605	unsigned long init_text_start = (unsigned long)lm_alias(__init_text_begin);
606	unsigned long rodata_start = (unsigned long)lm_alias(__start_rodata);
607	unsigned long data_start = (unsigned long)lm_alias(_data);
608
609	set_memory_ro(text_start, (init_text_start - text_start) >> PAGE_SHIFT);
610	set_memory_nx(text_start, (init_text_start - text_start) >> PAGE_SHIFT);
611
612	set_memory_ro(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
613	set_memory_nx(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
614}
615#endif
616
617static void __init setup_vm_final(void)
618{
619	uintptr_t va, map_size;
620	phys_addr_t pa, start, end;
621	u64 i;
622
623	/**
624	 * MMU is enabled at this point. But page table setup is not complete yet.
625	 * fixmap page table alloc functions should be used at this point
626	 */
627	pt_ops.alloc_pte = alloc_pte_fixmap;
628	pt_ops.get_pte_virt = get_pte_virt_fixmap;
629#ifndef __PAGETABLE_PMD_FOLDED
630	pt_ops.alloc_pmd = alloc_pmd_fixmap;
631	pt_ops.get_pmd_virt = get_pmd_virt_fixmap;
632#endif
633	/* Setup swapper PGD for fixmap */
634	create_pgd_mapping(swapper_pg_dir, FIXADDR_START,
635			   __pa_symbol(fixmap_pgd_next),
636			   PGDIR_SIZE, PAGE_TABLE);
637
638	/* Map all memory banks in the linear mapping */
639	for_each_mem_range(i, &start, &end) {
640		if (start >= end)
641			break;
642		if (start <= __pa(PAGE_OFFSET) &&
643		    __pa(PAGE_OFFSET) < end)
644			start = __pa(PAGE_OFFSET);
645
646		map_size = best_map_size(start, end - start);
647		for (pa = start; pa < end; pa += map_size) {
648			va = (uintptr_t)__va(pa);
649			create_pgd_mapping(swapper_pg_dir, va, pa,
650					   map_size,
651#ifdef CONFIG_64BIT
652					   PAGE_KERNEL
653#else
654					   PAGE_KERNEL_EXEC
655#endif
656					);
657
658		}
659	}
660
661#ifdef CONFIG_64BIT
662	/* Map the kernel */
663	create_kernel_page_table(swapper_pg_dir, PMD_SIZE);
664#endif
665
666	/* Clear fixmap PTE and PMD mappings */
667	clear_fixmap(FIX_PTE);
668	clear_fixmap(FIX_PMD);
669
670	/* Move to swapper page table */
671	csr_write(CSR_SATP, PFN_DOWN(__pa_symbol(swapper_pg_dir)) | SATP_MODE);
672	local_flush_tlb_all();
673
674	/* generic page allocation functions must be used to setup page table */
675	pt_ops.alloc_pte = alloc_pte_late;
676	pt_ops.get_pte_virt = get_pte_virt_late;
677#ifndef __PAGETABLE_PMD_FOLDED
678	pt_ops.alloc_pmd = alloc_pmd_late;
679	pt_ops.get_pmd_virt = get_pmd_virt_late;
680#endif
681}
682#else
683asmlinkage void __init setup_vm(uintptr_t dtb_pa)
684{
685#ifdef CONFIG_BUILTIN_DTB
686	dtb_early_va = soc_lookup_builtin_dtb();
687	if (!dtb_early_va) {
688		/* Fallback to first available DTS */
689		dtb_early_va = (void *) __dtb_start;
690	}
691#else
692	dtb_early_va = (void *)dtb_pa;
693#endif
694	dtb_early_pa = dtb_pa;
695}
696
697static inline void setup_vm_final(void)
698{
699}
700#endif /* CONFIG_MMU */
701
702#ifdef CONFIG_STRICT_KERNEL_RWX
703void protect_kernel_text_data(void)
704{
705	unsigned long text_start = (unsigned long)_start;
706	unsigned long init_text_start = (unsigned long)__init_text_begin;
707	unsigned long init_data_start = (unsigned long)__init_data_begin;
708	unsigned long rodata_start = (unsigned long)__start_rodata;
709	unsigned long data_start = (unsigned long)_data;
710#if defined(CONFIG_64BIT) && defined(CONFIG_MMU)
711	unsigned long end_va = kernel_virt_addr + load_sz;
712#else
713	unsigned long end_va = (unsigned long)(__va(PFN_PHYS(max_low_pfn)));
714#endif
715
716	set_memory_ro(text_start, (init_text_start - text_start) >> PAGE_SHIFT);
717	set_memory_ro(init_text_start, (init_data_start - init_text_start) >> PAGE_SHIFT);
718	set_memory_nx(init_data_start, (rodata_start - init_data_start) >> PAGE_SHIFT);
719	/* rodata section is marked readonly in mark_rodata_ro */
720	set_memory_nx(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
721	set_memory_nx(data_start, (end_va - data_start) >> PAGE_SHIFT);
722}
723
724void mark_rodata_ro(void)
725{
726	unsigned long rodata_start = (unsigned long)__start_rodata;
727	unsigned long data_start = (unsigned long)_data;
728
729	set_memory_ro(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
730
731	debug_checkwx();
732}
733#endif
734
735static void __init resource_init(void)
736{
737	struct memblock_region *region;
738
739	for_each_mem_region(region) {
740		struct resource *res;
741
742		res = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES);
743		if (!res)
744			panic("%s: Failed to allocate %zu bytes\n", __func__,
745			      sizeof(struct resource));
746
747		if (memblock_is_nomap(region)) {
748			res->name = "reserved";
749			res->flags = IORESOURCE_MEM;
750		} else {
751			res->name = "System RAM";
752			res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
753		}
754		res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
755		res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
756
757		request_resource(&iomem_resource, res);
758	}
759}
760
761void __init paging_init(void)
762{
763	setup_vm_final();
764	setup_zero_page();
765}
766
767void __init misc_mem_init(void)
768{
769	sparse_init();
770	zone_sizes_init();
771	resource_init();
772}
773
774#ifdef CONFIG_SPARSEMEM_VMEMMAP
775int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
776			       struct vmem_altmap *altmap)
777{
778	return vmemmap_populate_basepages(start, end, node, NULL);
779}
780#endif
781