Lines Matching defs:node
131 DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */
149 * Array of node states.
608 pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n",
834 * zone/node ids for pages that could never merge.
1575 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
1914 /* Initialise remaining memory on a node */
1926 /* Bind memory initialisation thread to a local node if possible */
1984 pr_info("node %d deferred pages initialised in %ums\n",
2549 * likelihood of future fallbacks. Wake kswapd now as the node
3855 * want to get it from a node that is within its dirty
3856 * limit, such that no single node holds more than its
3858 * The dirty limits take into account the node's
3864 * exceed the per-node dirty limit in the slowpath
3887 * If moving to a remote node, retry but allow
4092 * The OOM killer may not free memory on a specific node.
4973 * to the current task context. It means that any node ok.
5333 * pages on a node.
5334 * @nid: the preferred node ID where memory should be allocated
5338 * Like alloc_pages_exact(), but try to allocate on node nid first before falling
5393 /* Just pick one node, since fallback list is circular */
5522 * Determine whether the node should be displayed or not, depending on whether
5531 * no node mask - aka implicit memory numa policy. Do not bother with
5783 * Add all populated zones of a node to the zonelist.
5838 * find_next_best_node - find the next node that should appear in a given node's fallback list
5839 * @node: node whose fallback list we're appending
5842 * We use a number of factors to determine which is the next node that should
5843 * appear on a given node's fallback list. The node should not have appeared
5844 * already in @node's fallback list, and it should be the next closest node
5846 * from each node to each node in the system), and should also prefer nodes
5850 * Return: node id of the found node or %NUMA_NO_NODE if no node is found.
5852 static int find_next_best_node(int node, nodemask_t *used_node_mask)
5858 /* Use the local node if we haven't already */
5859 if (!node_isset(node, *used_node_mask)) {
5860 node_set(node, *used_node_mask);
5861 return node;
5866 /* Don't want a node to appear more than once */
5871 val = node_distance(node, n);
5873 /* Penalize nodes under us ("prefer the next node") */
5874 val += (n < node);
5880 /* Slight preference for less loaded node */
5898 * Build zonelists ordered by node and zones within node.
5913 pg_data_t *node = NODE_DATA(node_order[i]);
5915 nr_zones = build_zonerefs_node(node, zonerefs);
5940 * exhausted, but results in overflowing to remote node while memory
5947 int node, load, nr_nodes = 0;
5957 while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
5959 * We don't want to pressure a particular node.
5960 * So adding penalty to the first node in same
5963 if (node_distance(local_node, node) !=
5965 node_load[node] = load;
5967 node_order[nr_nodes++] = node;
5968 prev_node = node;
5978 * Return node id of node used for "local" allocations.
5979 * I.e., first node id of first zone in arg node's generic zonelist.
5983 int local_memory_node(int node)
5987 z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
6000 int node, local_node;
6013 * We don't want to pressure a particular node, so when
6014 * building the zones for node N, we make sure that the
6016 * node N+1 (modulo N)
6018 for (node = local_node + 1; node < MAX_NUMNODES; node++) {
6019 if (!node_online(node))
6021 nr_zones = build_zonerefs_node(NODE_DATA(node), zonerefs);
6024 for (node = 0; node < local_node; node++) {
6025 if (!node_online(node))
6027 nr_zones = build_zonerefs_node(NODE_DATA(node), zonerefs);
6083 * This node is hotadded and no memory is yet present. So just
6097 * We now know the "local memory node" for each node--
6098 * i.e., the node of the first zone in the generic zonelist.
6102 * node/memory hotplug, we'll fixup all on-line cpus.
6364 * - zone and node links point to zone and node that span the page if the
6366 * - zone and node links point to adjacent zone/node if the hole falls on
6368 * zone/node above the hole except for the trailing pages in the last
6369 * section that will be appended to the zone/node below.
6373 int zone, int node)
6384 __init_single_page(pfn_to_page(pfn), pfn, zone, node);
6390 pr_info("On node %d, zone %s: %lld pages in unavailable ranges",
6391 node, zone_names[zone], pgcnt);
6396 int zone, int node)
6432 struct pglist_data *node = NODE_DATA(nid);
6435 struct zone *zone = node->node_zones + j;
6451 * node.
6670 "Initialising map node %d zone %lu pfns %lu -> %lu\n",
6680 * get_pfn_range_for_nid - Return the start and end page frames for a node
6682 * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
6683 * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
6685 * It returns the start and end page frame of a node based on information
6686 * provided by memblock_set_node(). If called for a node
6710 * assumption is made that zones within a node are ordered in monotonic
6733 * in each node depending on the size of each node and how evenly kernelcore
6735 * provided by the architecture for a given node by using the end of the
6737 * zones within a node are in order of monotonic increases memory addresses
6746 /* Only adjust if ZONE_MOVABLE is on this node */
6767 * Return the number of pages a zone spans in a node, including holes
6779 /* When hotadd a new node from cpu_up(), the node should be empty */
6790 /* Check that this node has pages within the zone's required range */
6794 /* Move the zone boundaries inside the node if necessary */
6803 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
6835 /* Return the number of page frames in holes in a zone on a node */
6846 /* When hotadd a new node from cpu_up(), the node should be empty */
6924 printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
6961 panic("Failed to allocate %ld bytes for zone %s pageblock flags on node %d\n",
7085 * - init all zones belonging to this node
7203 panic("Failed to allocate %ld bytes for node %d memory map\n",
7207 pr_debug("%s: node %d, pgdat %08lx, node_mem_map %08lx\n",
7212 * With no DISCONTIG, the global mem_map is just set as node 0's
7249 pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid,
7267 * Figure out the number of possible node ids.
7281 * This function should be called after node map is populated and sorted.
7287 * nodes are shifted by 256MiB, 256MiB. Note that if only the last node is
7292 * populated node map.
7295 * requirement (single node).
7314 * too coarse to separate the current node from the last.
7361 * Find the PFN the Movable zone begins in each node. Kernel memory
7481 * Recalculate kernelcore_node if the division per node
7491 * 0, the rest of the node is usable by ZONE_MOVABLE
7495 /* Go through each range of PFNs within this node */
7541 * break if the kernelcore for this node has been
7554 * less node in the count. This will push zone_movable_pfn[nid] further
7580 /* Any regular or high memory on that node ? */
7610 * This will call free_area_init_node() for each active node in the system.
7612 * zone in each node and their holes is calculated. If the maximum PFN
7650 /* Find the PFNs that ZONE_MOVABLE begins at in each node */
7671 /* Print out the PFNs ZONE_MOVABLE begins at in each node */
7672 pr_info("Movable zone start for each node\n");
7680 * Print out the early node map, and initialize the
7684 pr_info("Early memory node ranges\n");
7686 pr_info(" node %3d: [mem %#018Lx-%#018Lx]\n", nid,
7692 /* Initialise every node */
7699 /* Any memory on that node */
8838 * @nid: Target node