xref: /kernel/linux/linux-5.10/arch/loongarch/mm/init.c (revision 8c2ecf20)
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2020 Loongson Technology Corporation Limited
4 */
5#include <linux/bug.h>
6#include <linux/init.h>
7#include <linux/export.h>
8#include <linux/signal.h>
9#include <linux/sched.h>
10#include <linux/smp.h>
11#include <linux/kernel.h>
12#include <linux/errno.h>
13#include <linux/string.h>
14#include <linux/types.h>
15#include <linux/pagemap.h>
16#include <linux/ptrace.h>
17#include <linux/memblock.h>
18#include <linux/mm.h>
19#include <linux/mman.h>
20#include <linux/highmem.h>
21#include <linux/swap.h>
22#include <linux/proc_fs.h>
23#include <linux/pfn.h>
24#include <linux/hardirq.h>
25#include <linux/gfp.h>
26#include <linux/hugetlb.h>
27#include <linux/mmzone.h>
28
29#include <asm/asm-offsets.h>
30#include <asm/bootinfo.h>
31#include <asm/cpu.h>
32#include <asm/dma.h>
33#include <asm/kmap_types.h>
34#include <asm/mmu_context.h>
35#include <asm/sections.h>
36#include <asm/pgtable.h>
37#include <asm/pgalloc.h>
38#include <asm/tlb.h>
39
40unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
41EXPORT_SYMBOL(empty_zero_page);
42
43void copy_user_highpage(struct page *to, struct page *from,
44	unsigned long vaddr, struct vm_area_struct *vma)
45{
46	void *vfrom, *vto;
47
48	vto = kmap_atomic(to);
49	vfrom = kmap_atomic(from);
50	copy_page(vto, vfrom);
51	kunmap_atomic(vfrom);
52	kunmap_atomic(vto);
53	/* Make sure this page is cleared on other CPU's too before using it */
54	smp_wmb();
55}
56
57int __ref page_is_ram(unsigned long pfn)
58{
59	unsigned long addr = PFN_PHYS(pfn);
60
61	return memblock_is_memory(addr) && !memblock_is_reserved(addr);
62}
63
64#ifndef CONFIG_NEED_MULTIPLE_NODES
65void __init paging_init(void)
66{
67	unsigned long max_zone_pfns[MAX_NR_ZONES];
68
69#ifdef CONFIG_ZONE_DMA
70	max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
71#endif
72#ifdef CONFIG_ZONE_DMA32
73	max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
74#endif
75	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
76
77	free_area_init(max_zone_pfns);
78}
79
80void __init mem_init(void)
81{
82	max_mapnr = max_low_pfn;
83	high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
84
85	memblock_free_all();
86	mem_init_print_info(NULL);
87}
88#endif /* !CONFIG_NEED_MULTIPLE_NODES */
89
90void __ref free_initmem(void)
91{
92	free_initmem_default(POISON_FREE_INITMEM);
93}
94
95#ifdef CONFIG_MEMORY_HOTPLUG
96int arch_add_memory(int nid, u64 start, u64 size, struct mhp_params *params)
97{
98	unsigned long start_pfn = start >> PAGE_SHIFT;
99	unsigned long nr_pages = size >> PAGE_SHIFT;
100	int ret;
101
102	ret = __add_pages(nid, start_pfn, nr_pages, params);
103
104	if (ret)
105		printk("%s: Problem encountered in __add_pages() as ret=%d\n",
106				__func__,  ret);
107
108	return ret;
109}
110
111void arch_remove_memory(int nid, u64 start,
112		u64 size, struct vmem_altmap *altmap)
113{
114	unsigned long start_pfn = start >> PAGE_SHIFT;
115	unsigned long nr_pages = size >> PAGE_SHIFT;
116	struct page *page = pfn_to_page(start_pfn);
117
118	/* With altmap the first mapped page is offset from @start */
119	if (altmap)
120		page += vmem_altmap_offset(altmap);
121	__remove_pages(start_pfn, nr_pages, altmap);
122}
123
124#ifdef CONFIG_NUMA
125int memory_add_physaddr_to_nid(u64 start)
126{
127	return pa_to_nid(start);
128}
129EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
130#endif
131#endif
132
133#ifdef CONFIG_SPARSEMEM_VMEMMAP
134void __meminit arch_vmemmap_verify(pte_t *pte, int node,
135				unsigned long start, unsigned long end)
136{
137	unsigned long pfn = pte_pfn(*pte);
138	int actual_node = early_pfn_to_nid(pfn);
139
140	if (node_distance(actual_node, node) > LOCAL_DISTANCE)
141		pr_warn("[%lx-%lx] potential offnode page_structs\n",
142			start, end - 1);
143}
144
145void * __meminit arch_vmemmap_alloc_block_zero(unsigned long size, int node)
146{
147	void *p = vmemmap_alloc_block(size, node);
148
149	if (!p)
150		return NULL;
151	memset(p, 0, size);
152
153	return p;
154}
155
156pte_t * __meminit arch_vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node)
157{
158	pte_t *pte = pte_offset_kernel(pmd, addr);
159	if (pte_none(*pte)) {
160		pte_t entry;
161		void *p = arch_vmemmap_alloc_block_zero(PAGE_SIZE, node);
162		if (!p)
163			return NULL;
164		entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL);
165		set_pte_at(&init_mm, addr, pte, entry);
166	}
167	return pte;
168}
169
170pmd_t * __meminit arch_vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node)
171{
172	pmd_t *pmd = pmd_offset(pud, addr);
173	if (pmd_none(*pmd)) {
174		void *p = arch_vmemmap_alloc_block_zero(PAGE_SIZE, node);
175		if (!p)
176			return NULL;
177		pmd_populate_kernel(&init_mm, pmd, p);
178	}
179	return pmd;
180}
181
182pud_t * __meminit arch_vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node)
183{
184	pud_t *pud = pud_offset(p4d, addr);
185	if (pud_none(*pud)) {
186		void *p = arch_vmemmap_alloc_block_zero(PAGE_SIZE, node);
187		if (!p)
188			return NULL;
189#ifndef __PAGETABLE_PMD_FOLDED
190		pmd_init((unsigned long)p, (unsigned long)invalid_pte_table);
191#endif
192		pud_populate(&init_mm, pud, p);
193	}
194	return pud;
195}
196
197p4d_t * __meminit arch_vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node)
198{
199	p4d_t *p4d = p4d_offset(pgd, addr);
200	if (p4d_none(*p4d)) {
201		void *p = arch_vmemmap_alloc_block_zero(PAGE_SIZE, node);
202		if (!p)
203			return NULL;
204#ifndef __PAGETABLE_PUD_FOLDED
205		pud_init((unsigned long)p, (unsigned long)invalid_pmd_table);
206#endif
207		p4d_populate(&init_mm, p4d, p);
208	}
209	return p4d;
210}
211
212pgd_t * __meminit arch_vmemmap_pgd_populate(unsigned long addr, int node)
213{
214	pgd_t *pgd = pgd_offset_k(addr);
215	if (pgd_none(*pgd)) {
216		void *p = arch_vmemmap_alloc_block_zero(PAGE_SIZE, node);
217		if (!p)
218			return NULL;
219		pgd_populate(&init_mm, pgd, p);
220	}
221	return pgd;
222}
223
224int __meminit arch_vmemmap_populate_basepages(unsigned long start,
225					 unsigned long end, int node)
226{
227	unsigned long addr = start;
228	pgd_t *pgd;
229	p4d_t *p4d;
230	pud_t *pud;
231	pmd_t *pmd;
232	pte_t *pte;
233
234	for (; addr < end; addr += PAGE_SIZE) {
235		pgd = arch_vmemmap_pgd_populate(addr, node);
236		if (!pgd)
237			return -ENOMEM;
238		p4d = arch_vmemmap_p4d_populate(pgd, addr, node);
239		if (!p4d)
240			return -ENOMEM;
241		pud = arch_vmemmap_pud_populate(p4d, addr, node);
242		if (!pud)
243			return -ENOMEM;
244		pmd = arch_vmemmap_pmd_populate(pud, addr, node);
245		if (!pmd)
246			return -ENOMEM;
247		pte = arch_vmemmap_pte_populate(pmd, addr, node);
248		if (!pte)
249			return -ENOMEM;
250		arch_vmemmap_verify(pte, node, addr, addr + PAGE_SIZE);
251	}
252
253	return 0;
254}
255
256int __meminit arch_vmemmap_populate_hugepages(unsigned long start,
257					 unsigned long end, int node)
258{
259	unsigned long addr = start;
260	unsigned long next;
261	pgd_t *pgd;
262	p4d_t *p4d;
263	pud_t *pud;
264	pmd_t *pmd;
265
266	for (addr = start; addr < end; addr = next) {
267		next = pmd_addr_end(addr, end);
268
269		pgd = arch_vmemmap_pgd_populate(addr, node);
270		if (!pgd)
271			return -ENOMEM;
272		p4d = arch_vmemmap_p4d_populate(pgd, addr, node);
273		if (!p4d)
274			return -ENOMEM;
275		pud = arch_vmemmap_pud_populate(p4d, addr, node);
276		if (!pud)
277			return -ENOMEM;
278
279		pmd = pmd_offset(pud, addr);
280		if (pmd_none(*pmd)) {
281			void *p = NULL;
282
283			p = arch_vmemmap_alloc_block_zero(PMD_SIZE, node);
284			if (p) {
285				pmd_t entry;
286
287				entry = pfn_pmd(virt_to_pfn(p), PAGE_KERNEL);
288				pmd_val(entry) |= _PAGE_HUGE | _PAGE_HGLOBAL;
289				set_pmd_at(&init_mm, addr, pmd, entry);
290
291				continue;
292			}
293		} else if (pmd_val(*pmd) & _PAGE_HUGE) {
294			arch_vmemmap_verify((pte_t *)pmd, node, addr, next);
295			continue;
296		}
297		if (arch_vmemmap_populate_basepages(addr, next, node))
298			return -ENOMEM;
299	}
300
301	return 0;
302}
303
304int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
305		struct vmem_altmap *altmap)
306{
307	return arch_vmemmap_populate_hugepages(start, end, node);
308}
309void vmemmap_free(unsigned long start, unsigned long end,
310		struct vmem_altmap *altmap)
311{
312}
313#endif
314
315pte_t * __init populate_kernel_pte(unsigned long addr)
316{
317	pgd_t *pgd = pgd_offset_k(addr);
318	p4d_t *p4d = p4d_offset(pgd, addr);
319	pud_t *pud;
320	pmd_t *pmd;
321
322	if (p4d_none(*p4d)) {
323		pud = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
324		if (!pud)
325			panic("%s: Failed to allocate memory\n", __func__);
326		p4d_populate(&init_mm, p4d, pud);
327#ifndef __PAGETABLE_PUD_FOLDED
328		pud_init((unsigned long)pud, (unsigned long)invalid_pmd_table);
329#endif
330	}
331
332	pud = pud_offset(p4d, addr);
333	if (pud_none(*pud)) {
334		pmd = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
335		if (!pmd)
336			panic("%s: Failed to allocate memory\n", __func__);
337		pud_populate(&init_mm, pud, pmd);
338#ifndef __PAGETABLE_PMD_FOLDED
339		pmd_init((unsigned long)pmd, (unsigned long)invalid_pte_table);
340#endif
341	}
342
343	pmd = pmd_offset(pud, addr);
344	if (!pmd_present(*pmd)) {
345		pte_t *pte;
346
347		pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
348		if (!pte)
349			panic("%s: Failed to allocate memory\n", __func__);
350		pmd_populate_kernel(&init_mm, pmd, pte);
351	}
352
353	return pte_offset_kernel(pmd, addr);
354}
355
356void __init __set_fixmap(enum fixed_addresses idx,
357			       phys_addr_t phys, pgprot_t flags)
358{
359	unsigned long addr = __fix_to_virt(idx);
360	pte_t *ptep;
361
362	BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
363
364	ptep = populate_kernel_pte(addr);
365	if (!pte_none(*ptep)) {
366		pte_ERROR(*ptep);
367		return;
368	}
369
370	if (pgprot_val(flags))
371		set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, flags));
372	else {
373		pte_clear(&init_mm, addr, ptep);
374		flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
375	}
376}
377
378/*
379 * Align swapper_pg_dir in to 64K, allows its address to be loaded
380 * with a single LUI instruction in the TLB handlers.  If we used
381 * __aligned(64K), its size would get rounded up to the alignment
382 * size, and waste space.  So we place it in its own section and align
383 * it in the linker script.
384 */
385pgd_t swapper_pg_dir[_PTRS_PER_PGD] __section(".bss..swapper_pg_dir");
386
387pgd_t invalid_pg_dir[_PTRS_PER_PGD] __page_aligned_bss;
388#ifndef __PAGETABLE_PUD_FOLDED
389pud_t invalid_pud_table[PTRS_PER_PUD] __page_aligned_bss;
390EXPORT_SYMBOL(invalid_pud_table);
391#endif
392#ifndef __PAGETABLE_PMD_FOLDED
393pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss;
394EXPORT_SYMBOL(invalid_pmd_table);
395#endif
396pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;
397EXPORT_SYMBOL(invalid_pte_table);
398