Lines Matching refs:node

34  * Track per-node information needed to setup the boot memory allocator, the
35 * per-node areas, and the real VM.
51 * To prevent cache aliasing effects, align per-node structures so that they
52 * start at addresses that are strided by node number.
55 #define NODEDATA_ALIGN(addr, node) \
57 (((node)*PERCPU_PAGE_SIZE) & (MAX_NODE_ALIGN_OFFSET - 1)))
60 * build_node_maps - callback to setup mem_data structs for each node
63 * @node: node where this range resides
66 * treat as a virtually contiguous block (i.e. each node). Each such block
72 int node)
79 if (!mem_data[node].min_pfn) {
80 mem_data[node].min_pfn = spfn;
81 mem_data[node].max_pfn = epfn;
83 mem_data[node].min_pfn = min(spfn, mem_data[node].min_pfn);
84 mem_data[node].max_pfn = max(epfn, mem_data[node].max_pfn);
91 * early_nr_cpus_node - return number of cpus on a given node
92 * @node: node to check
94 * Count the number of cpus on @node. We can't use nr_cpus_node() yet because
96 * called yet. Note that node 0 will also count all non-existent cpus.
98 static int early_nr_cpus_node(int node)
103 if (node == node_cpuid[cpu].nid)
111 * @node: the node id.
113 static unsigned long compute_pernodesize(int node)
117 cpus = early_nr_cpus_node(node);
119 pernodesize += node * L1_CACHE_BYTES;
128 * per_cpu_node_setup - setup per-cpu areas on each node
129 * @cpu_data: per-cpu area on this node
130 * @node: node to setup
133 * setup __per_cpu_offset for each CPU on this node. Return a pointer to
136 static void *per_cpu_node_setup(void *cpu_data, int node)
144 if (node != node_cpuid[cpu].nid)
155 * area for cpu0 is on the correct node and its
189 int node, prev_node, unit, nr_units;
203 /* build cpu_map, units are grouped by node */
205 for_each_node(node)
207 if (node == node_cpuid[cpu].nid)
227 * CPUs are put into groups according to node. Walk cpu_map
228 * and create new groups at node boundaries.
234 node = node_cpuid[cpu].nid;
236 if (node == prev_node) {
240 prev_node = node;
255 * @node: the node id.
259 static void __init fill_pernode(int node, unsigned long pernode,
263 int cpus = early_nr_cpus_node(node);
265 mem_data[node].pernode_addr = pernode;
266 mem_data[node].pernode_size = pernodesize;
271 pernode += node * L1_CACHE_BYTES;
273 pgdat_list[node] = __va(pernode);
276 mem_data[node].node_data = __va(pernode);
280 cpu_data = per_cpu_node_setup(cpu_data, node);
286 * find_pernode_space - allocate memory for memory map and per-node structures
289 * @node: node where this range resides
292 * pg_data_ts and the per-node data struct. Each node will have something like
297 * |~~~~~~~~~~~~~~~~~~~~~~~~| <-- NODEDATA_ALIGN(start, node) for the first
314 int node)
323 * Make sure this memory falls within this node's usable memory
326 if (spfn < mem_data[node].min_pfn || epfn > mem_data[node].max_pfn)
329 /* Don't setup this node's local space twice... */
330 if (mem_data[node].pernode_addr)
337 pernodesize = compute_pernodesize(node);
338 pernode = NODEDATA_ALIGN(start, node);
342 fill_pernode(node, pernode, pernodesize);
348 * reserve_pernode_space - reserve memory for per-node space
350 * Reserve the space used by the bootmem maps & per-node space in the boot
357 int node;
359 for_each_online_node(node) {
360 if (node_isset(node, memory_less_mask))
363 /* Now the per-node space */
364 size = mem_data[node].pernode_size;
365 base = __pa(mem_data[node].pernode_addr);
373 int node;
378 * because we are halfway through initialization of the new node's
379 * structures. If for_each_online_node() is used, a new node's
383 for_each_node(node) {
384 if (pgdat_list[node]) {
385 dst = LOCAL_DATA_ADDR(pgdat_list[node])->pg_data_ptrs;
392 * initialize_pernode_data - fixup per-cpu & per-node pointers
394 * Each node's per-node area has a copy of the global pg_data_t list, so
395 * we copy that to each node here, as well as setting the per-cpu pointer
396 * to the local node data structure.
400 int cpu, node;
407 node = node_cpuid[cpu].nid;
409 mem_data[node].node_data;
415 node = node_cpuid[cpu].nid;
418 cpu0_cpu_info->node_data = mem_data[node].node_data;
425 * node but fall back to any other node when __alloc_bootmem_node fails
427 * @nid: node id
428 * @pernodesize: size of this node's pernode data
434 int bestnode = NUMA_NO_NODE, node, anynode = 0;
436 for_each_online_node(node) {
437 if (node_isset(node, memory_less_mask))
439 else if (node_distance(nid, node) < best) {
440 best = node_distance(nid, node);
441 bestnode = node;
443 anynode = node;
469 int node;
471 for_each_node_mask(node, memory_less_mask) {
472 pernodesize = compute_pernodesize(node);
473 pernode = memory_less_node_alloc(node, pernodesize);
474 fill_pernode(node, __pa(pernode), pernodesize);
484 * allocate the per-cpu and per-node structures.
488 int node;
494 printk(KERN_ERR "node info missing!\n");
507 for_each_online_node(node)
508 if (mem_data[node].min_pfn)
509 node_clear(node, memory_less_mask);
543 * call_pernode_memory - use SRAT to call callback functions with node info
549 * out to which node a block of memory belongs. Ignore memory that we cannot
569 /* No SRAT table, so assume one node (node 0) */
591 * paging_init() sets up the page tables for each node of the system and frees
625 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
628 return vmemmap_populate_basepages(start, end, node, NULL);