1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 *  PowerPC version
4 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 *
6 *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
7 *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
8 *    Copyright (C) 1996 Paul Mackerras
9 *
10 *  Derived from "arch/i386/mm/init.c"
11 *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
12 *
13 *  Dave Engebretsen <engebret@us.ibm.com>
14 *      Rework for PPC64 port.
15 */
16
17#undef DEBUG
18
19#include <linux/signal.h>
20#include <linux/sched.h>
21#include <linux/kernel.h>
22#include <linux/errno.h>
23#include <linux/string.h>
24#include <linux/types.h>
25#include <linux/mman.h>
26#include <linux/mm.h>
27#include <linux/swap.h>
28#include <linux/stddef.h>
29#include <linux/vmalloc.h>
30#include <linux/init.h>
31#include <linux/delay.h>
32#include <linux/highmem.h>
33#include <linux/idr.h>
34#include <linux/nodemask.h>
35#include <linux/module.h>
36#include <linux/poison.h>
37#include <linux/memblock.h>
38#include <linux/hugetlb.h>
39#include <linux/slab.h>
40#include <linux/of_fdt.h>
41#include <linux/libfdt.h>
42#include <linux/memremap.h>
43
44#include <asm/pgalloc.h>
45#include <asm/page.h>
46#include <asm/prom.h>
47#include <asm/rtas.h>
48#include <asm/io.h>
49#include <asm/mmu_context.h>
50#include <asm/mmu.h>
51#include <linux/uaccess.h>
52#include <asm/smp.h>
53#include <asm/machdep.h>
54#include <asm/tlb.h>
55#include <asm/eeh.h>
56#include <asm/processor.h>
57#include <asm/mmzone.h>
58#include <asm/cputable.h>
59#include <asm/sections.h>
60#include <asm/iommu.h>
61#include <asm/vdso.h>
62
63#include <mm/mmu_decl.h>
64
65#ifdef CONFIG_SPARSEMEM_VMEMMAP
66/*
67 * Given an address within the vmemmap, determine the page that
68 * represents the start of the subsection it is within.  Note that we have to
69 * do this by hand as the proffered address may not be correctly aligned.
70 * Subtraction of non-aligned pointers produces undefined results.
71 */
72static struct page * __meminit vmemmap_subsection_start(unsigned long vmemmap_addr)
73{
74	unsigned long start_pfn;
75	unsigned long offset = vmemmap_addr - ((unsigned long)(vmemmap));
76
77	/* Return the pfn of the start of the section. */
78	start_pfn = (offset / sizeof(struct page)) & PAGE_SUBSECTION_MASK;
79	return pfn_to_page(start_pfn);
80}
81
82/*
83 * Since memory is added in sub-section chunks, before creating a new vmemmap
84 * mapping, the kernel should check whether there is an existing memmap mapping
85 * covering the new subsection added. This is needed because kernel can map
86 * vmemmap area using 16MB pages which will cover a memory range of 16G. Such
87 * a range covers multiple subsections (2M)
88 *
89 * If any subsection in the 16G range mapped by vmemmap is valid we consider the
90 * vmemmap populated (There is a page table entry already present). We can't do
91 * a page table lookup here because with the hash translation we don't keep
92 * vmemmap details in linux page table.
93 */
94static int __meminit vmemmap_populated(unsigned long vmemmap_addr, int vmemmap_map_size)
95{
96	struct page *start;
97	unsigned long vmemmap_end = vmemmap_addr + vmemmap_map_size;
98	start = vmemmap_subsection_start(vmemmap_addr);
99
100	for (; (unsigned long)start < vmemmap_end; start += PAGES_PER_SUBSECTION)
101		/*
102		 * pfn valid check here is intended to really check
103		 * whether we have any subsection already initialized
104		 * in this range.
105		 */
106		if (pfn_valid(page_to_pfn(start)))
107			return 1;
108
109	return 0;
110}
111
112/*
113 * vmemmap virtual address space management does not have a traditonal page
114 * table to track which virtual struct pages are backed by physical mapping.
115 * The virtual to physical mappings are tracked in a simple linked list
116 * format. 'vmemmap_list' maintains the entire vmemmap physical mapping at
117 * all times where as the 'next' list maintains the available
118 * vmemmap_backing structures which have been deleted from the
119 * 'vmemmap_global' list during system runtime (memory hotplug remove
120 * operation). The freed 'vmemmap_backing' structures are reused later when
121 * new requests come in without allocating fresh memory. This pointer also
122 * tracks the allocated 'vmemmap_backing' structures as we allocate one
123 * full page memory at a time when we dont have any.
124 */
125struct vmemmap_backing *vmemmap_list;
126static struct vmemmap_backing *next;
127
128/*
129 * The same pointer 'next' tracks individual chunks inside the allocated
130 * full page during the boot time and again tracks the freeed nodes during
131 * runtime. It is racy but it does not happen as they are separated by the
132 * boot process. Will create problem if some how we have memory hotplug
133 * operation during boot !!
134 */
135static int num_left;
136static int num_freed;
137
138static __meminit struct vmemmap_backing * vmemmap_list_alloc(int node)
139{
140	struct vmemmap_backing *vmem_back;
141	/* get from freed entries first */
142	if (num_freed) {
143		num_freed--;
144		vmem_back = next;
145		next = next->list;
146
147		return vmem_back;
148	}
149
150	/* allocate a page when required and hand out chunks */
151	if (!num_left) {
152		next = vmemmap_alloc_block(PAGE_SIZE, node);
153		if (unlikely(!next)) {
154			WARN_ON(1);
155			return NULL;
156		}
157		num_left = PAGE_SIZE / sizeof(struct vmemmap_backing);
158	}
159
160	num_left--;
161
162	return next++;
163}
164
165static __meminit int vmemmap_list_populate(unsigned long phys,
166					   unsigned long start,
167					   int node)
168{
169	struct vmemmap_backing *vmem_back;
170
171	vmem_back = vmemmap_list_alloc(node);
172	if (unlikely(!vmem_back)) {
173		pr_debug("vmemap list allocation failed\n");
174		return -ENOMEM;
175	}
176
177	vmem_back->phys = phys;
178	vmem_back->virt_addr = start;
179	vmem_back->list = vmemmap_list;
180
181	vmemmap_list = vmem_back;
182	return 0;
183}
184
185static bool altmap_cross_boundary(struct vmem_altmap *altmap, unsigned long start,
186				unsigned long page_size)
187{
188	unsigned long nr_pfn = page_size / sizeof(struct page);
189	unsigned long start_pfn = page_to_pfn((struct page *)start);
190
191	if ((start_pfn + nr_pfn - 1) > altmap->end_pfn)
192		return true;
193
194	if (start_pfn < altmap->base_pfn)
195		return true;
196
197	return false;
198}
199
200int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
201		struct vmem_altmap *altmap)
202{
203	bool altmap_alloc;
204	unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
205
206	/* Align to the page size of the linear mapping. */
207	start = ALIGN_DOWN(start, page_size);
208
209	pr_debug("vmemmap_populate %lx..%lx, node %d\n", start, end, node);
210
211	for (; start < end; start += page_size) {
212		void *p = NULL;
213		int rc;
214
215		/*
216		 * This vmemmap range is backing different subsections. If any
217		 * of that subsection is marked valid, that means we already
218		 * have initialized a page table covering this range and hence
219		 * the vmemmap range is populated.
220		 */
221		if (vmemmap_populated(start, page_size))
222			continue;
223
224		/*
225		 * Allocate from the altmap first if we have one. This may
226		 * fail due to alignment issues when using 16MB hugepages, so
227		 * fall back to system memory if the altmap allocation fail.
228		 */
229		if (altmap && !altmap_cross_boundary(altmap, start, page_size)) {
230			p = vmemmap_alloc_block_buf(page_size, node, altmap);
231			if (!p)
232				pr_debug("altmap block allocation failed, falling back to system memory");
233			else
234				altmap_alloc = true;
235		}
236		if (!p) {
237			p = vmemmap_alloc_block_buf(page_size, node, NULL);
238			altmap_alloc = false;
239		}
240		if (!p)
241			return -ENOMEM;
242
243		if (vmemmap_list_populate(__pa(p), start, node)) {
244			/*
245			 * If we don't populate vmemap list, we don't have
246			 * the ability to free the allocated vmemmap
247			 * pages in section_deactivate. Hence free them
248			 * here.
249			 */
250			int nr_pfns = page_size >> PAGE_SHIFT;
251			unsigned long page_order = get_order(page_size);
252
253			if (altmap_alloc)
254				vmem_altmap_free(altmap, nr_pfns);
255			else
256				free_pages((unsigned long)p, page_order);
257			return -ENOMEM;
258		}
259
260		pr_debug("      * %016lx..%016lx allocated at %p\n",
261			 start, start + page_size, p);
262
263		rc = vmemmap_create_mapping(start, page_size, __pa(p));
264		if (rc < 0) {
265			pr_warn("%s: Unable to create vmemmap mapping: %d\n",
266				__func__, rc);
267			return -EFAULT;
268		}
269	}
270
271	return 0;
272}
273
274#ifdef CONFIG_MEMORY_HOTPLUG
275static unsigned long vmemmap_list_free(unsigned long start)
276{
277	struct vmemmap_backing *vmem_back, *vmem_back_prev;
278
279	vmem_back_prev = vmem_back = vmemmap_list;
280
281	/* look for it with prev pointer recorded */
282	for (; vmem_back; vmem_back = vmem_back->list) {
283		if (vmem_back->virt_addr == start)
284			break;
285		vmem_back_prev = vmem_back;
286	}
287
288	if (unlikely(!vmem_back))
289		return 0;
290
291	/* remove it from vmemmap_list */
292	if (vmem_back == vmemmap_list) /* remove head */
293		vmemmap_list = vmem_back->list;
294	else
295		vmem_back_prev->list = vmem_back->list;
296
297	/* next point to this freed entry */
298	vmem_back->list = next;
299	next = vmem_back;
300	num_freed++;
301
302	return vmem_back->phys;
303}
304
305void __ref vmemmap_free(unsigned long start, unsigned long end,
306		struct vmem_altmap *altmap)
307{
308	unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
309	unsigned long page_order = get_order(page_size);
310	unsigned long alt_start = ~0, alt_end = ~0;
311	unsigned long base_pfn;
312
313	start = ALIGN_DOWN(start, page_size);
314	if (altmap) {
315		alt_start = altmap->base_pfn;
316		alt_end = altmap->base_pfn + altmap->reserve + altmap->free;
317	}
318
319	pr_debug("vmemmap_free %lx...%lx\n", start, end);
320
321	for (; start < end; start += page_size) {
322		unsigned long nr_pages, addr;
323		struct page *page;
324
325		/*
326		 * We have already marked the subsection we are trying to remove
327		 * invalid. So if we want to remove the vmemmap range, we
328		 * need to make sure there is no subsection marked valid
329		 * in this range.
330		 */
331		if (vmemmap_populated(start, page_size))
332			continue;
333
334		addr = vmemmap_list_free(start);
335		if (!addr)
336			continue;
337
338		page = pfn_to_page(addr >> PAGE_SHIFT);
339		nr_pages = 1 << page_order;
340		base_pfn = PHYS_PFN(addr);
341
342		if (base_pfn >= alt_start && base_pfn < alt_end) {
343			vmem_altmap_free(altmap, nr_pages);
344		} else if (PageReserved(page)) {
345			/* allocated from bootmem */
346			if (page_size < PAGE_SIZE) {
347				/*
348				 * this shouldn't happen, but if it is
349				 * the case, leave the memory there
350				 */
351				WARN_ON_ONCE(1);
352			} else {
353				while (nr_pages--)
354					free_reserved_page(page++);
355			}
356		} else {
357			free_pages((unsigned long)(__va(addr)), page_order);
358		}
359
360		vmemmap_remove_mapping(start, page_size);
361	}
362}
363#endif
364void register_page_bootmem_memmap(unsigned long section_nr,
365				  struct page *start_page, unsigned long size)
366{
367}
368
369#endif /* CONFIG_SPARSEMEM_VMEMMAP */
370
371#ifdef CONFIG_PPC_BOOK3S_64
372static bool disable_radix = !IS_ENABLED(CONFIG_PPC_RADIX_MMU_DEFAULT);
373
374static int __init parse_disable_radix(char *p)
375{
376	bool val;
377
378	if (!p)
379		val = true;
380	else if (kstrtobool(p, &val))
381		return -EINVAL;
382
383	disable_radix = val;
384
385	return 0;
386}
387early_param("disable_radix", parse_disable_radix);
388
389/*
390 * If we're running under a hypervisor, we need to check the contents of
391 * /chosen/ibm,architecture-vec-5 to see if the hypervisor is willing to do
392 * radix.  If not, we clear the radix feature bit so we fall back to hash.
393 */
394static void __init early_check_vec5(void)
395{
396	unsigned long root, chosen;
397	int size;
398	const u8 *vec5;
399	u8 mmu_supported;
400
401	root = of_get_flat_dt_root();
402	chosen = of_get_flat_dt_subnode_by_name(root, "chosen");
403	if (chosen == -FDT_ERR_NOTFOUND) {
404		cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
405		return;
406	}
407	vec5 = of_get_flat_dt_prop(chosen, "ibm,architecture-vec-5", &size);
408	if (!vec5) {
409		cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
410		return;
411	}
412	if (size <= OV5_INDX(OV5_MMU_SUPPORT)) {
413		cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
414		return;
415	}
416
417	/* Check for supported configuration */
418	mmu_supported = vec5[OV5_INDX(OV5_MMU_SUPPORT)] &
419			OV5_FEAT(OV5_MMU_SUPPORT);
420	if (mmu_supported == OV5_FEAT(OV5_MMU_RADIX)) {
421		/* Hypervisor only supports radix - check enabled && GTSE */
422		if (!early_radix_enabled()) {
423			pr_warn("WARNING: Ignoring cmdline option disable_radix\n");
424		}
425		if (!(vec5[OV5_INDX(OV5_RADIX_GTSE)] &
426						OV5_FEAT(OV5_RADIX_GTSE))) {
427			cur_cpu_spec->mmu_features &= ~MMU_FTR_GTSE;
428		} else
429			cur_cpu_spec->mmu_features |= MMU_FTR_GTSE;
430		/* Do radix anyway - the hypervisor said we had to */
431		cur_cpu_spec->mmu_features |= MMU_FTR_TYPE_RADIX;
432	} else if (mmu_supported == OV5_FEAT(OV5_MMU_HASH)) {
433		/* Hypervisor only supports hash - disable radix */
434		cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
435		cur_cpu_spec->mmu_features &= ~MMU_FTR_GTSE;
436	}
437}
438
439void __init mmu_early_init_devtree(void)
440{
441	/* Disable radix mode based on kernel command line. */
442	if (disable_radix)
443		cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
444
445	/*
446	 * Check /chosen/ibm,architecture-vec-5 if running as a guest.
447	 * When running bare-metal, we can use radix if we like
448	 * even though the ibm,architecture-vec-5 property created by
449	 * skiboot doesn't have the necessary bits set.
450	 */
451	if (!(mfmsr() & MSR_HV))
452		early_check_vec5();
453
454	if (early_radix_enabled()) {
455		radix__early_init_devtree();
456		/*
457		 * We have finalized the translation we are going to use by now.
458		 * Radix mode is not limited by RMA / VRMA addressing.
459		 * Hence don't limit memblock allocations.
460		 */
461		ppc64_rma_size = ULONG_MAX;
462		memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
463	} else
464		hash__early_init_devtree();
465}
466#endif /* CONFIG_PPC_BOOK3S_64 */
467