1// SPDX-License-Identifier: GPL-2.0 2/* 3 * linux/arch/alpha/mm/numa.c 4 * 5 * DISCONTIGMEM NUMA alpha support. 6 * 7 * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE 8 */ 9 10#include <linux/types.h> 11#include <linux/kernel.h> 12#include <linux/mm.h> 13#include <linux/memblock.h> 14#include <linux/swap.h> 15#include <linux/initrd.h> 16#include <linux/pfn.h> 17#include <linux/module.h> 18 19#include <asm/hwrpb.h> 20#include <asm/sections.h> 21 22pg_data_t node_data[MAX_NUMNODES]; 23EXPORT_SYMBOL(node_data); 24 25#undef DEBUG_DISCONTIG 26#ifdef DEBUG_DISCONTIG 27#define DBGDCONT(args...) printk(args) 28#else 29#define DBGDCONT(args...) 30#endif 31 32#define for_each_mem_cluster(memdesc, _cluster, i) \ 33 for ((_cluster) = (memdesc)->cluster, (i) = 0; \ 34 (i) < (memdesc)->numclusters; (i)++, (_cluster)++) 35 36static void __init show_mem_layout(void) 37{ 38 struct memclust_struct * cluster; 39 struct memdesc_struct * memdesc; 40 int i; 41 42 /* Find free clusters, and init and free the bootmem accordingly. */ 43 memdesc = (struct memdesc_struct *) 44 (hwrpb->mddt_offset + (unsigned long) hwrpb); 45 46 printk("Raw memory layout:\n"); 47 for_each_mem_cluster(memdesc, cluster, i) { 48 printk(" memcluster %2d, usage %1lx, start %8lu, end %8lu\n", 49 i, cluster->usage, cluster->start_pfn, 50 cluster->start_pfn + cluster->numpages); 51 } 52} 53 54static void __init 55setup_memory_node(int nid, void *kernel_end) 56{ 57 extern unsigned long mem_size_limit; 58 struct memclust_struct * cluster; 59 struct memdesc_struct * memdesc; 60 unsigned long start_kernel_pfn, end_kernel_pfn; 61 unsigned long start, end; 62 unsigned long node_pfn_start, node_pfn_end; 63 unsigned long node_min_pfn, node_max_pfn; 64 int i; 65 int show_init = 0; 66 67 /* Find the bounds of current node */ 68 node_pfn_start = (node_mem_start(nid)) >> PAGE_SHIFT; 69 node_pfn_end = node_pfn_start + (node_mem_size(nid) >> PAGE_SHIFT); 70 71 /* Find free clusters, and init and free the bootmem accordingly. */ 72 memdesc = (struct memdesc_struct *) 73 (hwrpb->mddt_offset + (unsigned long) hwrpb); 74 75 /* find the bounds of this node (node_min_pfn/node_max_pfn) */ 76 node_min_pfn = ~0UL; 77 node_max_pfn = 0UL; 78 for_each_mem_cluster(memdesc, cluster, i) { 79 /* Bit 0 is console/PALcode reserved. Bit 1 is 80 non-volatile memory -- we might want to mark 81 this for later. */ 82 if (cluster->usage & 3) 83 continue; 84 85 start = cluster->start_pfn; 86 end = start + cluster->numpages; 87 88 if (start >= node_pfn_end || end <= node_pfn_start) 89 continue; 90 91 if (!show_init) { 92 show_init = 1; 93 printk("Initializing bootmem allocator on Node ID %d\n", nid); 94 } 95 printk(" memcluster %2d, usage %1lx, start %8lu, end %8lu\n", 96 i, cluster->usage, cluster->start_pfn, 97 cluster->start_pfn + cluster->numpages); 98 99 if (start < node_pfn_start) 100 start = node_pfn_start; 101 if (end > node_pfn_end) 102 end = node_pfn_end; 103 104 if (start < node_min_pfn) 105 node_min_pfn = start; 106 if (end > node_max_pfn) 107 node_max_pfn = end; 108 } 109 110 if (mem_size_limit && node_max_pfn > mem_size_limit) { 111 static int msg_shown = 0; 112 if (!msg_shown) { 113 msg_shown = 1; 114 printk("setup: forcing memory size to %ldK (from %ldK).\n", 115 mem_size_limit << (PAGE_SHIFT - 10), 116 node_max_pfn << (PAGE_SHIFT - 10)); 117 } 118 node_max_pfn = mem_size_limit; 119 } 120 121 if (node_min_pfn >= node_max_pfn) 122 return; 123 124 /* Update global {min,max}_low_pfn from node information. */ 125 if (node_min_pfn < min_low_pfn) 126 min_low_pfn = node_min_pfn; 127 if (node_max_pfn > max_low_pfn) 128 max_pfn = max_low_pfn = node_max_pfn; 129 130#if 0 /* we'll try this one again in a little while */ 131 /* Cute trick to make sure our local node data is on local memory */ 132 node_data[nid] = (pg_data_t *)(__va(node_min_pfn << PAGE_SHIFT)); 133#endif 134 printk(" Detected node memory: start %8lu, end %8lu\n", 135 node_min_pfn, node_max_pfn); 136 137 DBGDCONT(" DISCONTIG: node_data[%d] is at 0x%p\n", nid, NODE_DATA(nid)); 138 139 /* Find the bounds of kernel memory. */ 140 start_kernel_pfn = PFN_DOWN(KERNEL_START_PHYS); 141 end_kernel_pfn = PFN_UP(virt_to_phys(kernel_end)); 142 143 if (!nid && (node_max_pfn < end_kernel_pfn || node_min_pfn > start_kernel_pfn)) 144 panic("kernel loaded out of ram"); 145 146 memblock_add_node(PFN_PHYS(node_min_pfn), 147 (node_max_pfn - node_min_pfn) << PAGE_SHIFT, nid); 148 149 /* Zone start phys-addr must be 2^(MAX_ORDER-1) aligned. 150 Note that we round this down, not up - node memory 151 has much larger alignment than 8Mb, so it's safe. */ 152 node_min_pfn &= ~((1UL << (MAX_ORDER-1))-1); 153 154 NODE_DATA(nid)->node_start_pfn = node_min_pfn; 155 NODE_DATA(nid)->node_present_pages = node_max_pfn - node_min_pfn; 156 157 node_set_online(nid); 158} 159 160void __init 161setup_memory(void *kernel_end) 162{ 163 unsigned long kernel_size; 164 int nid; 165 166 show_mem_layout(); 167 168 nodes_clear(node_online_map); 169 170 min_low_pfn = ~0UL; 171 max_low_pfn = 0UL; 172 for (nid = 0; nid < MAX_NUMNODES; nid++) 173 setup_memory_node(nid, kernel_end); 174 175 kernel_size = virt_to_phys(kernel_end) - KERNEL_START_PHYS; 176 memblock_reserve(KERNEL_START_PHYS, kernel_size); 177 178#ifdef CONFIG_BLK_DEV_INITRD 179 initrd_start = INITRD_START; 180 if (initrd_start) { 181 extern void *move_initrd(unsigned long); 182 183 initrd_end = initrd_start+INITRD_SIZE; 184 printk("Initial ramdisk at: 0x%p (%lu bytes)\n", 185 (void *) initrd_start, INITRD_SIZE); 186 187 if ((void *)initrd_end > phys_to_virt(PFN_PHYS(max_low_pfn))) { 188 if (!move_initrd(PFN_PHYS(max_low_pfn))) 189 printk("initrd extends beyond end of memory " 190 "(0x%08lx > 0x%p)\ndisabling initrd\n", 191 initrd_end, 192 phys_to_virt(PFN_PHYS(max_low_pfn))); 193 } else { 194 nid = kvaddr_to_nid(initrd_start); 195 memblock_reserve(virt_to_phys((void *)initrd_start), 196 INITRD_SIZE); 197 } 198 } 199#endif /* CONFIG_BLK_DEV_INITRD */ 200} 201 202void __init paging_init(void) 203{ 204 unsigned long max_zone_pfn[MAX_NR_ZONES] = {0, }; 205 unsigned long dma_local_pfn; 206 207 /* 208 * The old global MAX_DMA_ADDRESS per-arch API doesn't fit 209 * in the NUMA model, for now we convert it to a pfn and 210 * we interpret this pfn as a local per-node information. 211 * This issue isn't very important since none of these machines 212 * have legacy ISA slots anyways. 213 */ 214 dma_local_pfn = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT; 215 216 max_zone_pfn[ZONE_DMA] = dma_local_pfn; 217 max_zone_pfn[ZONE_NORMAL] = max_pfn; 218 219 free_area_init(max_zone_pfn); 220 221 /* Initialize the kernel's ZERO_PGE. */ 222 memset((void *)ZERO_PGE, 0, PAGE_SIZE); 223} 224