Lines Matching refs:nodes
15 * interleave Allocate memory interleaved over a set of nodes,
22 * bind Only allocate memory on a specific set of nodes,
26 * the allocation to memory nodes instead
34 * preferred many Try a set of nodes first before normal fallback. This is
185 int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
186 void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
202 static int mpol_new_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
204 if (nodes_empty(*nodes))
206 pol->nodes = *nodes;
210 static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
212 if (nodes_empty(*nodes))
215 nodes_clear(pol->nodes);
216 node_set(first_node(*nodes), pol->nodes);
222 * any, for the new policy. mpol_new() has already validated the nodes
229 const nodemask_t *nodes, struct nodemask_scratch *nsc)
245 VM_BUG_ON(!nodes);
248 mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
250 nodes_and(nsc->mask2, *nodes, nsc->mask1);
253 pol->w.user_nodemask = *nodes;
263 * initialization. You must invoke mpol_set_nodemask() to set nodes.
266 nodemask_t *nodes)
270 pr_debug("setting mode %d flags %d nodes[0] %lx\n",
271 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
274 if (nodes && !nodes_empty(*nodes))
278 VM_BUG_ON(!nodes);
286 if (nodes_empty(*nodes)) {
294 if (!nodes_empty(*nodes) ||
298 } else if (nodes_empty(*nodes))
319 static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
323 static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
328 nodes_and(tmp, pol->w.user_nodemask, *nodes);
330 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
332 nodes_remap(tmp, pol->nodes, pol->w.cpuset_mems_allowed,
333 *nodes);
334 pol->w.cpuset_mems_allowed = *nodes;
338 tmp = *nodes;
340 pol->nodes = tmp;
344 const nodemask_t *nodes)
346 pol->w.cpuset_mems_allowed = *nodes;
350 * mpol_rebind_policy - Migrate a policy to a different set of nodes
734 * If pages found in a given range are on a set of nodes (determined by
735 * @nodes and @flags,) it's isolated and queued to the pagelist which is
748 nodemask_t *nodes, unsigned long flags,
755 .nmask = nodes,
863 nodemask_t *nodes)
872 new = mpol_new(mode, flags, nodes);
879 ret = mpol_set_nodemask(new, nodes, scratch);
903 static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
905 nodes_clear(*nodes);
914 *nodes = p->nodes;
999 *policy = next_node_in(current->il_prev, pol->nodes);
1138 * This lets us pick a pair of nodes to migrate between, such that
1167 * However if the number of source nodes is not equal to
1168 * the number of destination nodes we can not preserve
1188 /* dest not in remaining from nodes? */
1311 pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1407 static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
1411 nodes_clear(*nodes);
1418 * When the user specified more nodes than supported just check
1439 return get_bitmap(nodes_addr(*nodes), nmask, maxnode);
1444 nodemask_t *nodes)
1464 nodes_addr(*nodes), maxnode);
1466 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1492 nodemask_t nodes;
1501 err = get_nodes(&nodes, nmask, maxnode);
1505 return do_mbind(start, len, lmode, mode_flags, &nodes, flags);
1587 nodemask_t nodes;
1595 err = get_nodes(&nodes, nmask, maxnode);
1599 return do_set_mempolicy(lmode, mode_flags, &nodes);
1658 /* Is the user allowed to access the target nodes? */
1713 nodemask_t nodes;
1720 err = do_get_mempolicy(&pval, &nodes, addr, flags);
1729 err = copy_nodes_to_user(nmask, maxnode, &nodes);
1846 * if policy->nodes has movable memory only,
1849 * policy->nodes is intersect with node_states[N_MEMORY].
1851 * policy->nodes has movable memory only.
1853 if (!nodes_intersects(policy->nodes, node_states[N_HIGH_MEMORY]))
1860 * Return a nodemask representing a mempolicy for filtering nodes for
1870 cpuset_nodemask_valid_mems_allowed(&policy->nodes))
1871 return &policy->nodes;
1874 return &policy->nodes;
1889 nd = first_node(policy->nodes);
1913 next = next_node_in(me->il_prev, policy->nodes);
1937 return first_node(policy->nodes);
1955 &policy->nodes);
1968 * node in pol->nodes (starting from n=0), wrapping around if n exceeds the
1969 * number of present nodes.
1973 nodemask_t nodemask = pol->nodes;
1981 * Between first_node() and next_node(), pol->nodes could be changed
1982 * by other threads. So we put pol->nodes in a local stack.
2050 *nodemask = &(*mpol)->nodes;
2085 *mask = mempolicy->nodes;
2107 * memory allocated from all nodes in system.
2123 ret = nodes_intersects(mempolicy->nodes, *mask);
2156 * preferred nodes but skip the direct reclaim and allow the
2158 * nodes in system.
2162 page = __alloc_pages(preferred_gfp, order, nid, &pol->nodes);
2229 * node and don't fall back to other nodes, as the cost of
2236 hpage_node = first_node(pol->nodes);
2326 int nodes;
2333 nodes = nodes_weight(pol->nodes);
2334 nr_pages_per_node = nr_pages / nodes;
2335 delta = nr_pages - nodes * nr_pages_per_node;
2337 for (i = 0; i < nodes; i++) {
2367 nr_allocated = __alloc_pages_bulk(preferred_gfp, nid, &pol->nodes,
2469 return !!nodes_equal(a->nodes, b->nodes);
2609 if (node_isset(curnid, pol->nodes))
2611 polnid = first_node(pol->nodes);
2619 /* Optimize placement among multiple nodes via NUMA balancing */
2621 if (node_isset(thisnid, pol->nodes))
2631 * If no allowed nodes, use current [!misplaced].
2633 if (node_isset(curnid, pol->nodes))
2638 &pol->nodes);
2842 npol ? nodes_addr(npol->nodes)[0] : NUMA_NO_NODE);
2940 .nodes = nodemask_of_node(nid),
2946 * enabled across suitably sized nodes (default is >= 16MB), or
3010 nodemask_t nodes;
3021 if (nodelist_parse(nodelist, nodes))
3023 if (!nodes_subset(nodes, node_states[N_MEMORY]))
3026 nodes_clear(nodes);
3036 * we use first_node(nodes) to grab a single node, so here
3037 * nodelist (or nodes) cannot be empty.
3045 if (nodes_empty(nodes))
3051 * Default to online nodes with memory if no nodelist
3054 nodes = node_states[N_MEMORY];
3093 new = mpol_new(mode, mode_flags, &nodes);
3098 * Save nodes for mpol_to_str() to show the tmpfs mount options
3102 new->nodes = nodes;
3104 nodes_clear(new->nodes);
3105 node_set(first_node(nodes), new->nodes);
3111 * Save nodes for contextualization: this will be used to "clone"
3114 new->w.user_nodemask = nodes;
3143 nodemask_t nodes = NODE_MASK_NONE;
3160 nodes = pol->nodes;
3182 if (!nodes_empty(nodes))
3184 nodemask_pr_args(&nodes));