Lines Matching defs:node
41 #include <linux/node.h>
1107 int node = NUMA_NO_NODE;
1119 * no need to ask again on the same node. Pool is node rather than
1122 if (zone_to_nid(zone) == node)
1124 node = zone_to_nid(zone);
1126 page = dequeue_huge_page_node_exact(h, node);
1180 * node for alloc or free.
1198 * returns the previously saved node ["this node"] from which to
1200 * next node from which to allocate, handling wrap at end of node
1218 * node ["this node"] from which to free a huge page. Advance the
1219 * next node id whether or not we find a free huge page to free so
1220 * that the next attempt to free addresses the next node.
1234 #define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask) \
1237 ((node = hstate_next_node_to_alloc(hs, mask)) || 1); \
1240 #define for_each_node_mask_to_free(hs, nr_nodes, node, mask) \
1243 ((node = hstate_next_node_to_free(hs, mask)) || 1); \
1292 int node;
1302 for_each_node_mask(node, *nodemask) {
1303 if (node == nid || !hugetlb_cma[node])
1306 page = cma_alloc(hugetlb_cma[node], nr_pages,
1526 struct llist_node *node;
1529 node = llist_del_all(&hpage_freelist);
1531 while (node) {
1532 page = container_of((struct address_space **)node,
1534 node = node->next;
1677 * failed, do not continue to try hard on the same node. Use the
1738 * Allocates a fresh page to the hugetlb allocator pool in the node interleaved
1745 int nr_nodes, node;
1748 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1749 page = alloc_fresh_huge_page(h, gfp_mask, node, nodes_allowed,
1764 * Free huge page from pool from next node to free.
1772 int nr_nodes, node;
1775 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
1780 if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
1781 !list_empty(&h->hugepage_freelists[node])) {
1783 list_entry(h->hugepage_freelists[node].next,
1787 h->free_huge_pages_node[node]--;
1790 h->surplus_huge_pages_node[node]--;
2011 int node;
2014 node = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
2015 page = alloc_huge_page_nodemask(h, node, nodemask, gfp_mask);
2469 int nr_nodes, node;
2471 for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
2476 0, MEMBLOCK_ALLOC_ACCESSIBLE, node);
2534 * Bit mask controlling how hard we retry per-node allocations.
2546 /* bit mask controlling how hard we retry per-node allocations */
2636 * Increment or decrement surplus_huge_pages. Keep node-specific counters
2643 int nr_nodes, node;
2648 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
2649 if (h->surplus_huge_pages_node[node])
2653 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
2654 if (h->surplus_huge_pages_node[node] <
2655 h->nr_huge_pages_node[node])
2663 h->surplus_huge_pages_node[node] += delta;
2675 * Bit mask controlling how hard we retry per-node allocations.
2687 * Check for a node specific request.
2688 * Changing node specific huge page count may require a corresponding
2689 * change to the global count. In any case, the passed node mask
2690 * (nodes_allowed) will restrict alloc/free to the specified node.
3049 * node_hstate/s - associate per node hstate attributes, via their kobjects,
3050 * with node devices in node_devices[] using a parallel array. The array
3051 * index of a node device or _hstate == node id.
3052 * This is here to avoid any static dependency of the node device driver, in
3062 * A subset of global hstate attributes for node devices
3076 * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
3077 * Returns node id via non-NULL nidp.
3099 * Unregister hstate attributes from a single node device.
3102 static void hugetlb_unregister_node(struct node *node)
3105 struct node_hstate *nhs = &node_hstates[node->dev.id];
3124 * Register hstate attributes for a single node device.
3127 static void hugetlb_register_node(struct node *node)
3130 struct node_hstate *nhs = &node_hstates[node->dev.id];
3137 &node->dev.kobj);
3146 pr_err("HugeTLB: Unable to add hstate %s for node %d\n",
3147 h->name, node->dev.id);
3148 hugetlb_unregister_node(node);
3155 * hugetlb init time: register hstate attributes for all registered node
3164 struct node *node = node_devices[nid];
3165 if (node->dev.id == nid)
3166 hugetlb_register_node(node);
3170 * Let the node device driver know we're here so it can
3171 * [un]register hstate attributes on node hotplug.
3436 int node;
3444 for_each_node_mask(node, cpuset_current_mems_allowed) {
3446 (mpol_allowed && node_isset(node, *mpol_allowed)))
3447 nr += array[node];
3636 * task or memory node can be dynamically moved between cpusets.
3645 * also determines from which node the kernel will allocate memory
5717 * Also note that we have to transfer the per-node surplus state
5719 * the per-node's.
5769 pr_info("hugetlb_cma: reserve %lu MiB, up to %lu MiB per node\n",
5785 pr_warn("hugetlb_cma: reservation failed: err %d, node %d",
5791 pr_info("hugetlb_cma: reserved %lu MiB on node %d\n",