xref: /kernel/linux/linux-5.10/arch/x86/mm/numa.c (revision 8c2ecf20)
1// SPDX-License-Identifier: GPL-2.0-only
2/* Common code for 32 and 64-bit NUMA */
3#include <linux/acpi.h>
4#include <linux/kernel.h>
5#include <linux/mm.h>
6#include <linux/string.h>
7#include <linux/init.h>
8#include <linux/memblock.h>
9#include <linux/mmzone.h>
10#include <linux/ctype.h>
11#include <linux/nodemask.h>
12#include <linux/sched.h>
13#include <linux/topology.h>
14
15#include <asm/e820/api.h>
16#include <asm/proto.h>
17#include <asm/dma.h>
18#include <asm/amd_nb.h>
19
20#include "numa_internal.h"
21
22int numa_off;
23nodemask_t numa_nodes_parsed __initdata;
24
25struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
26EXPORT_SYMBOL(node_data);
27
28static struct numa_meminfo numa_meminfo __initdata_or_meminfo;
29static struct numa_meminfo numa_reserved_meminfo __initdata_or_meminfo;
30
31static int numa_distance_cnt;
32static u8 *numa_distance;
33
34static __init int numa_setup(char *opt)
35{
36	if (!opt)
37		return -EINVAL;
38	if (!strncmp(opt, "off", 3))
39		numa_off = 1;
40	if (!strncmp(opt, "fake=", 5))
41		return numa_emu_cmdline(opt + 5);
42	if (!strncmp(opt, "noacpi", 6))
43		disable_srat();
44	if (!strncmp(opt, "nohmat", 6))
45		disable_hmat();
46	return 0;
47}
48early_param("numa", numa_setup);
49
50/*
51 * apicid, cpu, node mappings
52 */
53s16 __apicid_to_node[MAX_LOCAL_APIC] = {
54	[0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
55};
56
57int numa_cpu_node(int cpu)
58{
59	int apicid = early_per_cpu(x86_cpu_to_apicid, cpu);
60
61	if (apicid != BAD_APICID)
62		return __apicid_to_node[apicid];
63	return NUMA_NO_NODE;
64}
65
66cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
67EXPORT_SYMBOL(node_to_cpumask_map);
68
69/*
70 * Map cpu index to node index
71 */
72DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
73EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
74
75void numa_set_node(int cpu, int node)
76{
77	int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
78
79	/* early setting, no percpu area yet */
80	if (cpu_to_node_map) {
81		cpu_to_node_map[cpu] = node;
82		return;
83	}
84
85#ifdef CONFIG_DEBUG_PER_CPU_MAPS
86	if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) {
87		printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu);
88		dump_stack();
89		return;
90	}
91#endif
92	per_cpu(x86_cpu_to_node_map, cpu) = node;
93
94	set_cpu_numa_node(cpu, node);
95}
96
97void numa_clear_node(int cpu)
98{
99	numa_set_node(cpu, NUMA_NO_NODE);
100}
101
102/*
103 * Allocate node_to_cpumask_map based on number of available nodes
104 * Requires node_possible_map to be valid.
105 *
106 * Note: cpumask_of_node() is not valid until after this is done.
107 * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.)
108 */
109void __init setup_node_to_cpumask_map(void)
110{
111	unsigned int node;
112
113	/* setup nr_node_ids if not done yet */
114	if (nr_node_ids == MAX_NUMNODES)
115		setup_nr_node_ids();
116
117	/* allocate the map */
118	for (node = 0; node < nr_node_ids; node++)
119		alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
120
121	/* cpumask_of_node() will now work */
122	pr_debug("Node to cpumask map for %u nodes\n", nr_node_ids);
123}
124
125static int __init numa_add_memblk_to(int nid, u64 start, u64 end,
126				     struct numa_meminfo *mi)
127{
128	/* ignore zero length blks */
129	if (start == end)
130		return 0;
131
132	/* whine about and ignore invalid blks */
133	if (start > end || nid < 0 || nid >= MAX_NUMNODES) {
134		pr_warn("Warning: invalid memblk node %d [mem %#010Lx-%#010Lx]\n",
135			nid, start, end - 1);
136		return 0;
137	}
138
139	if (mi->nr_blks >= NR_NODE_MEMBLKS) {
140		pr_err("too many memblk ranges\n");
141		return -EINVAL;
142	}
143
144	mi->blk[mi->nr_blks].start = start;
145	mi->blk[mi->nr_blks].end = end;
146	mi->blk[mi->nr_blks].nid = nid;
147	mi->nr_blks++;
148	return 0;
149}
150
151/**
152 * numa_remove_memblk_from - Remove one numa_memblk from a numa_meminfo
153 * @idx: Index of memblk to remove
154 * @mi: numa_meminfo to remove memblk from
155 *
156 * Remove @idx'th numa_memblk from @mi by shifting @mi->blk[] and
157 * decrementing @mi->nr_blks.
158 */
159void __init numa_remove_memblk_from(int idx, struct numa_meminfo *mi)
160{
161	mi->nr_blks--;
162	memmove(&mi->blk[idx], &mi->blk[idx + 1],
163		(mi->nr_blks - idx) * sizeof(mi->blk[0]));
164}
165
166/**
167 * numa_move_tail_memblk - Move a numa_memblk from one numa_meminfo to another
168 * @dst: numa_meminfo to append block to
169 * @idx: Index of memblk to remove
170 * @src: numa_meminfo to remove memblk from
171 */
172static void __init numa_move_tail_memblk(struct numa_meminfo *dst, int idx,
173					 struct numa_meminfo *src)
174{
175	dst->blk[dst->nr_blks++] = src->blk[idx];
176	numa_remove_memblk_from(idx, src);
177}
178
179/**
180 * numa_add_memblk - Add one numa_memblk to numa_meminfo
181 * @nid: NUMA node ID of the new memblk
182 * @start: Start address of the new memblk
183 * @end: End address of the new memblk
184 *
185 * Add a new memblk to the default numa_meminfo.
186 *
187 * RETURNS:
188 * 0 on success, -errno on failure.
189 */
190int __init numa_add_memblk(int nid, u64 start, u64 end)
191{
192	return numa_add_memblk_to(nid, start, end, &numa_meminfo);
193}
194
195/* Allocate NODE_DATA for a node on the local memory */
196static void __init alloc_node_data(int nid)
197{
198	const size_t nd_size = roundup(sizeof(pg_data_t), PAGE_SIZE);
199	u64 nd_pa;
200	void *nd;
201	int tnid;
202
203	/*
204	 * Allocate node data.  Try node-local memory and then any node.
205	 * Never allocate in DMA zone.
206	 */
207	nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
208	if (!nd_pa) {
209		pr_err("Cannot find %zu bytes in any node (initial node: %d)\n",
210		       nd_size, nid);
211		return;
212	}
213	nd = __va(nd_pa);
214
215	/* report and initialize */
216	printk(KERN_INFO "NODE_DATA(%d) allocated [mem %#010Lx-%#010Lx]\n", nid,
217	       nd_pa, nd_pa + nd_size - 1);
218	tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
219	if (tnid != nid)
220		printk(KERN_INFO "    NODE_DATA(%d) on node %d\n", nid, tnid);
221
222	node_data[nid] = nd;
223	memset(NODE_DATA(nid), 0, sizeof(pg_data_t));
224
225	node_set_online(nid);
226}
227
228/**
229 * numa_cleanup_meminfo - Cleanup a numa_meminfo
230 * @mi: numa_meminfo to clean up
231 *
232 * Sanitize @mi by merging and removing unnecessary memblks.  Also check for
233 * conflicts and clear unused memblks.
234 *
235 * RETURNS:
236 * 0 on success, -errno on failure.
237 */
238int __init numa_cleanup_meminfo(struct numa_meminfo *mi)
239{
240	const u64 low = 0;
241	const u64 high = PFN_PHYS(max_pfn);
242	int i, j, k;
243
244	/* first, trim all entries */
245	for (i = 0; i < mi->nr_blks; i++) {
246		struct numa_memblk *bi = &mi->blk[i];
247
248		/* move / save reserved memory ranges */
249		if (!memblock_overlaps_region(&memblock.memory,
250					bi->start, bi->end - bi->start)) {
251			numa_move_tail_memblk(&numa_reserved_meminfo, i--, mi);
252			continue;
253		}
254
255		/* make sure all non-reserved blocks are inside the limits */
256		bi->start = max(bi->start, low);
257
258		/* preserve info for non-RAM areas above 'max_pfn': */
259		if (bi->end > high) {
260			numa_add_memblk_to(bi->nid, high, bi->end,
261					   &numa_reserved_meminfo);
262			bi->end = high;
263		}
264
265		/* and there's no empty block */
266		if (bi->start >= bi->end)
267			numa_remove_memblk_from(i--, mi);
268	}
269
270	/* merge neighboring / overlapping entries */
271	for (i = 0; i < mi->nr_blks; i++) {
272		struct numa_memblk *bi = &mi->blk[i];
273
274		for (j = i + 1; j < mi->nr_blks; j++) {
275			struct numa_memblk *bj = &mi->blk[j];
276			u64 start, end;
277
278			/*
279			 * See whether there are overlapping blocks.  Whine
280			 * about but allow overlaps of the same nid.  They
281			 * will be merged below.
282			 */
283			if (bi->end > bj->start && bi->start < bj->end) {
284				if (bi->nid != bj->nid) {
285					pr_err("node %d [mem %#010Lx-%#010Lx] overlaps with node %d [mem %#010Lx-%#010Lx]\n",
286					       bi->nid, bi->start, bi->end - 1,
287					       bj->nid, bj->start, bj->end - 1);
288					return -EINVAL;
289				}
290				pr_warn("Warning: node %d [mem %#010Lx-%#010Lx] overlaps with itself [mem %#010Lx-%#010Lx]\n",
291					bi->nid, bi->start, bi->end - 1,
292					bj->start, bj->end - 1);
293			}
294
295			/*
296			 * Join together blocks on the same node, holes
297			 * between which don't overlap with memory on other
298			 * nodes.
299			 */
300			if (bi->nid != bj->nid)
301				continue;
302			start = min(bi->start, bj->start);
303			end = max(bi->end, bj->end);
304			for (k = 0; k < mi->nr_blks; k++) {
305				struct numa_memblk *bk = &mi->blk[k];
306
307				if (bi->nid == bk->nid)
308					continue;
309				if (start < bk->end && end > bk->start)
310					break;
311			}
312			if (k < mi->nr_blks)
313				continue;
314			printk(KERN_INFO "NUMA: Node %d [mem %#010Lx-%#010Lx] + [mem %#010Lx-%#010Lx] -> [mem %#010Lx-%#010Lx]\n",
315			       bi->nid, bi->start, bi->end - 1, bj->start,
316			       bj->end - 1, start, end - 1);
317			bi->start = start;
318			bi->end = end;
319			numa_remove_memblk_from(j--, mi);
320		}
321	}
322
323	/* clear unused ones */
324	for (i = mi->nr_blks; i < ARRAY_SIZE(mi->blk); i++) {
325		mi->blk[i].start = mi->blk[i].end = 0;
326		mi->blk[i].nid = NUMA_NO_NODE;
327	}
328
329	return 0;
330}
331
332/*
333 * Set nodes, which have memory in @mi, in *@nodemask.
334 */
335static void __init numa_nodemask_from_meminfo(nodemask_t *nodemask,
336					      const struct numa_meminfo *mi)
337{
338	int i;
339
340	for (i = 0; i < ARRAY_SIZE(mi->blk); i++)
341		if (mi->blk[i].start != mi->blk[i].end &&
342		    mi->blk[i].nid != NUMA_NO_NODE)
343			node_set(mi->blk[i].nid, *nodemask);
344}
345
346/**
347 * numa_reset_distance - Reset NUMA distance table
348 *
349 * The current table is freed.  The next numa_set_distance() call will
350 * create a new one.
351 */
352void __init numa_reset_distance(void)
353{
354	size_t size = numa_distance_cnt * numa_distance_cnt * sizeof(numa_distance[0]);
355
356	/* numa_distance could be 1LU marking allocation failure, test cnt */
357	if (numa_distance_cnt)
358		memblock_free(__pa(numa_distance), size);
359	numa_distance_cnt = 0;
360	numa_distance = NULL;	/* enable table creation */
361}
362
363static int __init numa_alloc_distance(void)
364{
365	nodemask_t nodes_parsed;
366	size_t size;
367	int i, j, cnt = 0;
368	u64 phys;
369
370	/* size the new table and allocate it */
371	nodes_parsed = numa_nodes_parsed;
372	numa_nodemask_from_meminfo(&nodes_parsed, &numa_meminfo);
373
374	for_each_node_mask(i, nodes_parsed)
375		cnt = i;
376	cnt++;
377	size = cnt * cnt * sizeof(numa_distance[0]);
378
379	phys = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped),
380				      size, PAGE_SIZE);
381	if (!phys) {
382		pr_warn("Warning: can't allocate distance table!\n");
383		/* don't retry until explicitly reset */
384		numa_distance = (void *)1LU;
385		return -ENOMEM;
386	}
387	memblock_reserve(phys, size);
388
389	numa_distance = __va(phys);
390	numa_distance_cnt = cnt;
391
392	/* fill with the default distances */
393	for (i = 0; i < cnt; i++)
394		for (j = 0; j < cnt; j++)
395			numa_distance[i * cnt + j] = i == j ?
396				LOCAL_DISTANCE : REMOTE_DISTANCE;
397	printk(KERN_DEBUG "NUMA: Initialized distance table, cnt=%d\n", cnt);
398
399	return 0;
400}
401
402/**
403 * numa_set_distance - Set NUMA distance from one NUMA to another
404 * @from: the 'from' node to set distance
405 * @to: the 'to'  node to set distance
406 * @distance: NUMA distance
407 *
408 * Set the distance from node @from to @to to @distance.  If distance table
409 * doesn't exist, one which is large enough to accommodate all the currently
410 * known nodes will be created.
411 *
412 * If such table cannot be allocated, a warning is printed and further
413 * calls are ignored until the distance table is reset with
414 * numa_reset_distance().
415 *
416 * If @from or @to is higher than the highest known node or lower than zero
417 * at the time of table creation or @distance doesn't make sense, the call
418 * is ignored.
419 * This is to allow simplification of specific NUMA config implementations.
420 */
421void __init numa_set_distance(int from, int to, int distance)
422{
423	if (!numa_distance && numa_alloc_distance() < 0)
424		return;
425
426	if (from >= numa_distance_cnt || to >= numa_distance_cnt ||
427			from < 0 || to < 0) {
428		pr_warn_once("Warning: node ids are out of bound, from=%d to=%d distance=%d\n",
429			     from, to, distance);
430		return;
431	}
432
433	if ((u8)distance != distance ||
434	    (from == to && distance != LOCAL_DISTANCE)) {
435		pr_warn_once("Warning: invalid distance parameter, from=%d to=%d distance=%d\n",
436			     from, to, distance);
437		return;
438	}
439
440	numa_distance[from * numa_distance_cnt + to] = distance;
441}
442
443int __node_distance(int from, int to)
444{
445	if (from >= numa_distance_cnt || to >= numa_distance_cnt)
446		return from == to ? LOCAL_DISTANCE : REMOTE_DISTANCE;
447	return numa_distance[from * numa_distance_cnt + to];
448}
449EXPORT_SYMBOL(__node_distance);
450
451/*
452 * Sanity check to catch more bad NUMA configurations (they are amazingly
453 * common).  Make sure the nodes cover all memory.
454 */
455static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
456{
457	u64 numaram, e820ram;
458	int i;
459
460	numaram = 0;
461	for (i = 0; i < mi->nr_blks; i++) {
462		u64 s = mi->blk[i].start >> PAGE_SHIFT;
463		u64 e = mi->blk[i].end >> PAGE_SHIFT;
464		numaram += e - s;
465		numaram -= __absent_pages_in_range(mi->blk[i].nid, s, e);
466		if ((s64)numaram < 0)
467			numaram = 0;
468	}
469
470	e820ram = max_pfn - absent_pages_in_range(0, max_pfn);
471
472	/* We seem to lose 3 pages somewhere. Allow 1M of slack. */
473	if ((s64)(e820ram - numaram) >= (1 << (20 - PAGE_SHIFT))) {
474		printk(KERN_ERR "NUMA: nodes only cover %LuMB of your %LuMB e820 RAM. Not used.\n",
475		       (numaram << PAGE_SHIFT) >> 20,
476		       (e820ram << PAGE_SHIFT) >> 20);
477		return false;
478	}
479	return true;
480}
481
482/*
483 * Mark all currently memblock-reserved physical memory (which covers the
484 * kernel's own memory ranges) as hot-unswappable.
485 */
486static void __init numa_clear_kernel_node_hotplug(void)
487{
488	nodemask_t reserved_nodemask = NODE_MASK_NONE;
489	struct memblock_region *mb_region;
490	int i;
491
492	/*
493	 * We have to do some preprocessing of memblock regions, to
494	 * make them suitable for reservation.
495	 *
496	 * At this time, all memory regions reserved by memblock are
497	 * used by the kernel, but those regions are not split up
498	 * along node boundaries yet, and don't necessarily have their
499	 * node ID set yet either.
500	 *
501	 * So iterate over all memory known to the x86 architecture,
502	 * and use those ranges to set the nid in memblock.reserved.
503	 * This will split up the memblock regions along node
504	 * boundaries and will set the node IDs as well.
505	 */
506	for (i = 0; i < numa_meminfo.nr_blks; i++) {
507		struct numa_memblk *mb = numa_meminfo.blk + i;
508		int ret;
509
510		ret = memblock_set_node(mb->start, mb->end - mb->start, &memblock.reserved, mb->nid);
511		WARN_ON_ONCE(ret);
512	}
513
514	/*
515	 * Now go over all reserved memblock regions, to construct a
516	 * node mask of all kernel reserved memory areas.
517	 *
518	 * [ Note, when booting with mem=nn[kMG] or in a kdump kernel,
519	 *   numa_meminfo might not include all memblock.reserved
520	 *   memory ranges, because quirks such as trim_snb_memory()
521	 *   reserve specific pages for Sandy Bridge graphics. ]
522	 */
523	for_each_reserved_mem_region(mb_region) {
524		int nid = memblock_get_region_node(mb_region);
525
526		if (nid != MAX_NUMNODES)
527			node_set(nid, reserved_nodemask);
528	}
529
530	/*
531	 * Finally, clear the MEMBLOCK_HOTPLUG flag for all memory
532	 * belonging to the reserved node mask.
533	 *
534	 * Note that this will include memory regions that reside
535	 * on nodes that contain kernel memory - entire nodes
536	 * become hot-unpluggable:
537	 */
538	for (i = 0; i < numa_meminfo.nr_blks; i++) {
539		struct numa_memblk *mb = numa_meminfo.blk + i;
540
541		if (!node_isset(mb->nid, reserved_nodemask))
542			continue;
543
544		memblock_clear_hotplug(mb->start, mb->end - mb->start);
545	}
546}
547
548static int __init numa_register_memblks(struct numa_meminfo *mi)
549{
550	int i, nid;
551
552	/* Account for nodes with cpus and no memory */
553	node_possible_map = numa_nodes_parsed;
554	numa_nodemask_from_meminfo(&node_possible_map, mi);
555	if (WARN_ON(nodes_empty(node_possible_map)))
556		return -EINVAL;
557
558	for (i = 0; i < mi->nr_blks; i++) {
559		struct numa_memblk *mb = &mi->blk[i];
560		memblock_set_node(mb->start, mb->end - mb->start,
561				  &memblock.memory, mb->nid);
562	}
563
564	/*
565	 * At very early time, the kernel have to use some memory such as
566	 * loading the kernel image. We cannot prevent this anyway. So any
567	 * node the kernel resides in should be un-hotpluggable.
568	 *
569	 * And when we come here, alloc node data won't fail.
570	 */
571	numa_clear_kernel_node_hotplug();
572
573	/*
574	 * If sections array is gonna be used for pfn -> nid mapping, check
575	 * whether its granularity is fine enough.
576	 */
577	if (IS_ENABLED(NODE_NOT_IN_PAGE_FLAGS)) {
578		unsigned long pfn_align = node_map_pfn_alignment();
579
580		if (pfn_align && pfn_align < PAGES_PER_SECTION) {
581			pr_warn("Node alignment %LuMB < min %LuMB, rejecting NUMA config\n",
582				PFN_PHYS(pfn_align) >> 20,
583				PFN_PHYS(PAGES_PER_SECTION) >> 20);
584			return -EINVAL;
585		}
586	}
587	if (!numa_meminfo_cover_memory(mi))
588		return -EINVAL;
589
590	/* Finally register nodes. */
591	for_each_node_mask(nid, node_possible_map) {
592		u64 start = PFN_PHYS(max_pfn);
593		u64 end = 0;
594
595		for (i = 0; i < mi->nr_blks; i++) {
596			if (nid != mi->blk[i].nid)
597				continue;
598			start = min(mi->blk[i].start, start);
599			end = max(mi->blk[i].end, end);
600		}
601
602		if (start >= end)
603			continue;
604
605		alloc_node_data(nid);
606	}
607
608	/* Dump memblock with node info and return. */
609	memblock_dump_all();
610	return 0;
611}
612
613/*
614 * There are unfortunately some poorly designed mainboards around that
615 * only connect memory to a single CPU. This breaks the 1:1 cpu->node
616 * mapping. To avoid this fill in the mapping for all possible CPUs,
617 * as the number of CPUs is not known yet. We round robin the existing
618 * nodes.
619 */
620static void __init numa_init_array(void)
621{
622	int rr, i;
623
624	rr = first_node(node_online_map);
625	for (i = 0; i < nr_cpu_ids; i++) {
626		if (early_cpu_to_node(i) != NUMA_NO_NODE)
627			continue;
628		numa_set_node(i, rr);
629		rr = next_node_in(rr, node_online_map);
630	}
631}
632
633static int __init numa_init(int (*init_func)(void))
634{
635	int i;
636	int ret;
637
638	for (i = 0; i < MAX_LOCAL_APIC; i++)
639		set_apicid_to_node(i, NUMA_NO_NODE);
640
641	nodes_clear(numa_nodes_parsed);
642	nodes_clear(node_possible_map);
643	nodes_clear(node_online_map);
644	memset(&numa_meminfo, 0, sizeof(numa_meminfo));
645	WARN_ON(memblock_set_node(0, ULLONG_MAX, &memblock.memory,
646				  MAX_NUMNODES));
647	WARN_ON(memblock_set_node(0, ULLONG_MAX, &memblock.reserved,
648				  MAX_NUMNODES));
649	/* In case that parsing SRAT failed. */
650	WARN_ON(memblock_clear_hotplug(0, ULLONG_MAX));
651	numa_reset_distance();
652
653	ret = init_func();
654	if (ret < 0)
655		return ret;
656
657	/*
658	 * We reset memblock back to the top-down direction
659	 * here because if we configured ACPI_NUMA, we have
660	 * parsed SRAT in init_func(). It is ok to have the
661	 * reset here even if we did't configure ACPI_NUMA
662	 * or acpi numa init fails and fallbacks to dummy
663	 * numa init.
664	 */
665	memblock_set_bottom_up(false);
666
667	ret = numa_cleanup_meminfo(&numa_meminfo);
668	if (ret < 0)
669		return ret;
670
671	numa_emulation(&numa_meminfo, numa_distance_cnt);
672
673	ret = numa_register_memblks(&numa_meminfo);
674	if (ret < 0)
675		return ret;
676
677	for (i = 0; i < nr_cpu_ids; i++) {
678		int nid = early_cpu_to_node(i);
679
680		if (nid == NUMA_NO_NODE)
681			continue;
682		if (!node_online(nid))
683			numa_clear_node(i);
684	}
685	numa_init_array();
686
687	return 0;
688}
689
690/**
691 * dummy_numa_init - Fallback dummy NUMA init
692 *
693 * Used if there's no underlying NUMA architecture, NUMA initialization
694 * fails, or NUMA is disabled on the command line.
695 *
696 * Must online at least one node and add memory blocks that cover all
697 * allowed memory.  This function must not fail.
698 */
699static int __init dummy_numa_init(void)
700{
701	printk(KERN_INFO "%s\n",
702	       numa_off ? "NUMA turned off" : "No NUMA configuration found");
703	printk(KERN_INFO "Faking a node at [mem %#018Lx-%#018Lx]\n",
704	       0LLU, PFN_PHYS(max_pfn) - 1);
705
706	node_set(0, numa_nodes_parsed);
707	numa_add_memblk(0, 0, PFN_PHYS(max_pfn));
708
709	return 0;
710}
711
712/**
713 * x86_numa_init - Initialize NUMA
714 *
715 * Try each configured NUMA initialization method until one succeeds.  The
716 * last fallback is dummy single node config encompassing whole memory and
717 * never fails.
718 */
719void __init x86_numa_init(void)
720{
721	if (!numa_off) {
722#ifdef CONFIG_ACPI_NUMA
723		if (!numa_init(x86_acpi_numa_init))
724			return;
725#endif
726#ifdef CONFIG_AMD_NUMA
727		if (!numa_init(amd_numa_init))
728			return;
729#endif
730	}
731
732	numa_init(dummy_numa_init);
733}
734
735static void __init init_memory_less_node(int nid)
736{
737	/* Allocate and initialize node data. Memory-less node is now online.*/
738	alloc_node_data(nid);
739	free_area_init_memoryless_node(nid);
740
741	/*
742	 * All zonelists will be built later in start_kernel() after per cpu
743	 * areas are initialized.
744	 */
745}
746
747/*
748 * A node may exist which has one or more Generic Initiators but no CPUs and no
749 * memory.
750 *
751 * This function must be called after init_cpu_to_node(), to ensure that any
752 * memoryless CPU nodes have already been brought online, and before the
753 * node_data[nid] is needed for zone list setup in build_all_zonelists().
754 *
755 * When this function is called, any nodes containing either memory and/or CPUs
756 * will already be online and there is no need to do anything extra, even if
757 * they also contain one or more Generic Initiators.
758 */
759void __init init_gi_nodes(void)
760{
761	int nid;
762
763	for_each_node_state(nid, N_GENERIC_INITIATOR)
764		if (!node_online(nid))
765			init_memory_less_node(nid);
766}
767
768/*
769 * Setup early cpu_to_node.
770 *
771 * Populate cpu_to_node[] only if x86_cpu_to_apicid[],
772 * and apicid_to_node[] tables have valid entries for a CPU.
773 * This means we skip cpu_to_node[] initialisation for NUMA
774 * emulation and faking node case (when running a kernel compiled
775 * for NUMA on a non NUMA box), which is OK as cpu_to_node[]
776 * is already initialized in a round robin manner at numa_init_array,
777 * prior to this call, and this initialization is good enough
778 * for the fake NUMA cases.
779 *
780 * Called before the per_cpu areas are setup.
781 */
782void __init init_cpu_to_node(void)
783{
784	int cpu;
785	u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid);
786
787	BUG_ON(cpu_to_apicid == NULL);
788
789	for_each_possible_cpu(cpu) {
790		int node = numa_cpu_node(cpu);
791
792		if (node == NUMA_NO_NODE)
793			continue;
794
795		if (!node_online(node))
796			init_memory_less_node(node);
797
798		numa_set_node(cpu, node);
799	}
800}
801
802#ifndef CONFIG_DEBUG_PER_CPU_MAPS
803
804# ifndef CONFIG_NUMA_EMU
805void numa_add_cpu(int cpu)
806{
807	cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
808}
809
810void numa_remove_cpu(int cpu)
811{
812	cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
813}
814# endif	/* !CONFIG_NUMA_EMU */
815
816#else	/* !CONFIG_DEBUG_PER_CPU_MAPS */
817
818int __cpu_to_node(int cpu)
819{
820	if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
821		printk(KERN_WARNING
822			"cpu_to_node(%d): usage too early!\n", cpu);
823		dump_stack();
824		return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
825	}
826	return per_cpu(x86_cpu_to_node_map, cpu);
827}
828EXPORT_SYMBOL(__cpu_to_node);
829
830/*
831 * Same function as cpu_to_node() but used if called before the
832 * per_cpu areas are setup.
833 */
834int early_cpu_to_node(int cpu)
835{
836	if (early_per_cpu_ptr(x86_cpu_to_node_map))
837		return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
838
839	if (!cpu_possible(cpu)) {
840		printk(KERN_WARNING
841			"early_cpu_to_node(%d): no per_cpu area!\n", cpu);
842		dump_stack();
843		return NUMA_NO_NODE;
844	}
845	return per_cpu(x86_cpu_to_node_map, cpu);
846}
847
848void debug_cpumask_set_cpu(int cpu, int node, bool enable)
849{
850	struct cpumask *mask;
851
852	if (node == NUMA_NO_NODE) {
853		/* early_cpu_to_node() already emits a warning and trace */
854		return;
855	}
856	mask = node_to_cpumask_map[node];
857	if (!cpumask_available(mask)) {
858		pr_err("node_to_cpumask_map[%i] NULL\n", node);
859		dump_stack();
860		return;
861	}
862
863	if (enable)
864		cpumask_set_cpu(cpu, mask);
865	else
866		cpumask_clear_cpu(cpu, mask);
867
868	printk(KERN_DEBUG "%s cpu %d node %d: mask now %*pbl\n",
869		enable ? "numa_add_cpu" : "numa_remove_cpu",
870		cpu, node, cpumask_pr_args(mask));
871	return;
872}
873
874# ifndef CONFIG_NUMA_EMU
875static void numa_set_cpumask(int cpu, bool enable)
876{
877	debug_cpumask_set_cpu(cpu, early_cpu_to_node(cpu), enable);
878}
879
880void numa_add_cpu(int cpu)
881{
882	numa_set_cpumask(cpu, true);
883}
884
885void numa_remove_cpu(int cpu)
886{
887	numa_set_cpumask(cpu, false);
888}
889# endif	/* !CONFIG_NUMA_EMU */
890
891/*
892 * Returns a pointer to the bitmask of CPUs on Node 'node'.
893 */
894const struct cpumask *cpumask_of_node(int node)
895{
896	if ((unsigned)node >= nr_node_ids) {
897		printk(KERN_WARNING
898			"cpumask_of_node(%d): (unsigned)node >= nr_node_ids(%u)\n",
899			node, nr_node_ids);
900		dump_stack();
901		return cpu_none_mask;
902	}
903	if (!cpumask_available(node_to_cpumask_map[node])) {
904		printk(KERN_WARNING
905			"cpumask_of_node(%d): no node_to_cpumask_map!\n",
906			node);
907		dump_stack();
908		return cpu_online_mask;
909	}
910	return node_to_cpumask_map[node];
911}
912EXPORT_SYMBOL(cpumask_of_node);
913
914#endif	/* !CONFIG_DEBUG_PER_CPU_MAPS */
915
916#ifdef CONFIG_NUMA_KEEP_MEMINFO
917static int meminfo_to_nid(struct numa_meminfo *mi, u64 start)
918{
919	int i;
920
921	for (i = 0; i < mi->nr_blks; i++)
922		if (mi->blk[i].start <= start && mi->blk[i].end > start)
923			return mi->blk[i].nid;
924	return NUMA_NO_NODE;
925}
926
927int phys_to_target_node(phys_addr_t start)
928{
929	int nid = meminfo_to_nid(&numa_meminfo, start);
930
931	/*
932	 * Prefer online nodes, but if reserved memory might be
933	 * hot-added continue the search with reserved ranges.
934	 */
935	if (nid != NUMA_NO_NODE)
936		return nid;
937
938	return meminfo_to_nid(&numa_reserved_meminfo, start);
939}
940EXPORT_SYMBOL_GPL(phys_to_target_node);
941
942int memory_add_physaddr_to_nid(u64 start)
943{
944	int nid = meminfo_to_nid(&numa_meminfo, start);
945
946	if (nid == NUMA_NO_NODE)
947		nid = numa_meminfo.blk[0].nid;
948	return nid;
949}
950EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
951#endif
952