Lines Matching refs:node

46 #include <linux/node.h>
1362 int node = NUMA_NO_NODE;
1374 * no need to ask again on the same node. Pool is node rather than
1377 if (zone_to_nid(zone) == node)
1379 node = zone_to_nid(zone);
1381 folio = dequeue_hugetlb_folio_node_exact(h, node);
1451 * node for alloc or free.
1469 * returns the previously saved node ["this node"] from which to
1471 * next node from which to allocate, handling wrap at end of node
1489 * node ["this node"] from which to free a huge page. Advance the
1490 * next node id whether or not we find a free huge page to free so
1491 * that the next attempt to free addresses the next node.
1505 #define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask) \
1508 ((node = hstate_next_node_to_alloc(hs, mask)) || 1); \
1511 #define for_each_node_mask_to_free(hs, nr_nodes, node, mask) \
1514 ((node = hstate_next_node_to_free(hs, mask)) || 1); \
1581 int node;
1591 for_each_node_mask(node, *nodemask) {
1592 if (node == nid || !hugetlb_cma[node])
1595 page = cma_alloc(hugetlb_cma[node], nr_pages,
1819 struct llist_node *node;
1821 node = llist_del_all(&hpage_freelist);
1823 while (node) {
1827 page = container_of((struct address_space **)node,
1829 node = node->next;
2140 * failed, do not continue to try hard on the same node. Use the
2232 * Allocates a fresh page to the hugetlb allocator pool in the node interleaved
2239 int nr_nodes, node;
2242 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
2243 folio = alloc_fresh_hugetlb_folio(h, gfp_mask, node,
2255 * Remove huge page from pool from next node to free. Attempt to keep
2265 int nr_nodes, node;
2270 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
2275 if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
2276 !list_empty(&h->hugepage_freelists[node])) {
2277 page = list_entry(h->hugepage_freelists[node].next,
2529 int node;
2532 node = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
2533 folio = alloc_hugetlb_folio_nodemask(h, node, nodemask, gfp_mask);
3180 int nr_nodes, node;
3182 /* do node specific alloc */
3190 /* allocate from next node when distributing huge pages */
3191 for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
3194 0, MEMBLOCK_ALLOC_ACCESSIBLE, node);
3271 pr_warn("HugeTLB: allocating %u of page size %s failed node%d. Only allocated %lu hugepages.\n",
3289 /* do node specific alloc */
3300 /* below will do all node balanced alloc */
3303 * Bit mask controlling how hard we retry per-node allocations.
3315 /* bit mask controlling how hard we retry per-node allocations */
3426 * Increment or decrement surplus_huge_pages. Keep node-specific counters
3433 int nr_nodes, node;
3439 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
3440 if (h->surplus_huge_pages_node[node])
3444 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
3445 if (h->surplus_huge_pages_node[node] <
3446 h->nr_huge_pages_node[node])
3454 h->surplus_huge_pages_node[node] += delta;
3468 * Bit mask controlling how hard we retry per-node allocations.
3486 * Check for a node specific request.
3487 * Changing node specific huge page count may require a corresponding
3488 * change to the global count. In any case, the passed node mask
3489 * (nodes_allowed) will restrict alloc/free to the specified node.
3677 int nr_nodes, node;
3688 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
3689 list_for_each_entry(folio, &h->hugepage_freelists[node], lru) {
4061 * node_hstate/s - associate per node hstate attributes, via their kobjects,
4062 * with node devices in node_devices[] using a parallel array. The array
4063 * index of a node device or _hstate == node id.
4064 * This is here to avoid any static dependency of the node device driver, in
4074 * A subset of global hstate attributes for node devices
4088 * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
4089 * Returns node id via non-NULL nidp.
4111 * Unregister hstate attributes from a single node device.
4114 void hugetlb_unregister_node(struct node *node)
4117 struct node_hstate *nhs = &node_hstates[node->dev.id];
4141 * Register hstate attributes for a single node device.
4144 void hugetlb_register_node(struct node *node)
4147 struct node_hstate *nhs = &node_hstates[node->dev.id];
4157 &node->dev.kobj);
4166 pr_err("HugeTLB: Unable to add hstate %s for node %d\n",
4167 h->name, node->dev.id);
4168 hugetlb_unregister_node(node);
4175 * hugetlb init time: register hstate attributes for all registered node
4369 int node = NUMA_NO_NODE;
4400 /* Parameter is node format */
4403 pr_warn("HugeTLB: architecture can't support node specific alloc, ignoring!\n");
4408 node = array_index_nospec(tmp, MAX_NUMNODES);
4414 default_hugepages_in_node[node] = tmp;
4416 parsed_hstate->max_huge_pages_node[node] = tmp;
4418 /* Go to parse next node*/
4568 int node;
4575 for_each_node_mask(node, cpuset_current_mems_allowed) {
4576 if (!mbind_nodemask || node_isset(node, *mbind_nodemask))
4577 nr += array[node];
4805 * task or memory node can be dynamically moved between cpusets.
4814 * also determines from which node the kernel will allocate memory
7277 * Also note that we have to transfer the per-node surplus state
7279 * the per-node's.
7290 * There is no need to transfer the per-node surplus state
7291 * when we do not cross the node.
7418 pr_warn("hugetlb_cma: invalid node %d specified\n", nid);
7425 pr_warn("hugetlb_cma: cma area of node %d should be at least %lu MiB\n",
7451 pr_info("hugetlb_cma: reserve %lu MiB, up to %lu MiB per node\n",
7482 pr_warn("hugetlb_cma: reservation failed: err %d, node %d",
7488 pr_info("hugetlb_cma: reserved %lu MiB on node %d\n",