Lines Matching refs:pol
167 struct mempolicy *pol = p->mempolicy;
170 if (pol)
171 return pol;
175 pol = &preferred_node_policy[node];
177 if (pol->mode)
178 return pol;
185 int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
186 void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
189 static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
191 return pol->flags & MPOL_MODE_FLAGS;
202 static int mpol_new_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
206 pol->nodes = *nodes;
210 static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
215 nodes_clear(pol->nodes);
216 node_set(first_node(*nodes), pol->nodes);
228 static int mpol_set_nodemask(struct mempolicy *pol,
234 * Default (pol==NULL) resp. local memory policies are not a
238 if (!pol || pol->mode == MPOL_LOCAL)
247 if (pol->flags & MPOL_F_RELATIVE_NODES)
252 if (mpol_store_user_nodemask(pol))
253 pol->w.user_nodemask = *nodes;
255 pol->w.cpuset_mems_allowed = cpuset_current_mems_allowed;
257 ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
319 static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
323 static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
327 if (pol->flags & MPOL_F_STATIC_NODES)
328 nodes_and(tmp, pol->w.user_nodemask, *nodes);
329 else if (pol->flags & MPOL_F_RELATIVE_NODES)
330 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
332 nodes_remap(tmp, pol->nodes, pol->w.cpuset_mems_allowed,
334 pol->w.cpuset_mems_allowed = *nodes;
340 pol->nodes = tmp;
343 static void mpol_rebind_preferred(struct mempolicy *pol,
346 pol->w.cpuset_mems_allowed = *nodes;
356 static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
358 if (!pol || pol->mode == MPOL_LOCAL)
360 if (!mpol_store_user_nodemask(pol) &&
361 nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
364 mpol_ops[pol->mode].rebind(pol, newmask);
780 struct mempolicy *pol)
793 new = mpol_dup(pol);
944 struct mempolicy *pol = current->mempolicy, *pol_refcount = NULL;
973 pol = vma->vm_ops->get_policy(vma, addr);
975 pol = vma->vm_policy;
979 if (!pol)
980 pol = &default_policy; /* indicates default behavior */
986 * drop the mmap_lock, after which only "pol" remains
989 pol_refcount = pol;
991 mpol_get(pol);
997 } else if (pol == current->mempolicy &&
998 pol->mode == MPOL_INTERLEAVE) {
999 *policy = next_node_in(current->il_prev, pol->nodes);
1005 *policy = pol == &default_policy ? MPOL_DEFAULT :
1006 pol->mode;
1011 *policy |= (pol->flags & MPOL_MODE_FLAGS);
1016 if (mpol_store_user_nodemask(pol)) {
1017 *nmask = pol->w.user_nodemask;
1020 get_policy_nodemask(pol, nmask);
1026 mpol_cond_put(pol);
1772 struct mempolicy *pol = NULL;
1776 pol = vma->vm_ops->get_policy(vma, addr);
1778 pol = vma->vm_policy;
1786 if (mpol_needs_cond_ref(pol))
1787 mpol_get(pol);
1791 return pol;
1809 struct mempolicy *pol = __get_vma_policy(vma, addr);
1811 if (!pol)
1812 pol = get_task_policy(current);
1814 return pol;
1819 struct mempolicy *pol;
1824 pol = vma->vm_ops->get_policy(vma, vma->vm_start);
1825 if (pol && (pol->flags & MPOL_F_MOF))
1827 mpol_cond_put(pol);
1832 pol = vma->vm_policy;
1833 if (!pol)
1834 pol = get_task_policy(current);
1836 return pol->flags & MPOL_F_MOF;
1968 * node in pol->nodes (starting from n=0), wrapping around if n exceeds the
1971 static unsigned offset_il_node(struct mempolicy *pol, unsigned long n)
1973 nodemask_t nodemask = pol->nodes;
1981 * Between first_node() and next_node(), pol->nodes could be changed
1982 * by other threads. So we put pol->nodes in a local stack.
1997 static inline unsigned interleave_nid(struct mempolicy *pol,
2013 return offset_il_node(pol, off);
2015 return interleave_nodes(pol);
2149 int nid, struct mempolicy *pol)
2162 page = __alloc_pages(preferred_gfp, order, nid, &pol->nodes);
2187 struct mempolicy *pol;
2193 pol = get_vma_policy(vma, addr);
2195 if (pol->mode == MPOL_INTERLEAVE) {
2199 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
2200 mpol_cond_put(pol);
2209 if (pol->mode == MPOL_PREFERRED_MANY) {
2212 node = policy_node(gfp, pol, node);
2214 page = alloc_pages_preferred_many(gfp, order, node, pol);
2215 mpol_cond_put(pol);
2235 if (pol->mode == MPOL_PREFERRED)
2236 hpage_node = first_node(pol->nodes);
2238 nmask = policy_nodemask(gfp, pol);
2240 mpol_cond_put(pol);
2262 nmask = policy_nodemask(gfp, pol);
2263 preferred_nid = policy_node(gfp, pol, node);
2265 mpol_cond_put(pol);
2287 struct mempolicy *pol = &default_policy;
2291 pol = get_task_policy(current);
2297 if (pol->mode == MPOL_INTERLEAVE)
2298 page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
2299 else if (pol->mode == MPOL_PREFERRED_MANY)
2301 policy_node(gfp, pol, numa_node_id()), pol);
2304 policy_node(gfp, pol, numa_node_id()),
2305 policy_nodemask(gfp, pol));
2323 struct mempolicy *pol, unsigned long nr_pages,
2333 nodes = nodes_weight(pol->nodes);
2340 interleave_nodes(pol), NULL,
2346 interleave_nodes(pol), NULL,
2358 struct mempolicy *pol, unsigned long nr_pages,
2367 nr_allocated = __alloc_pages_bulk(preferred_gfp, nid, &pol->nodes,
2386 struct mempolicy *pol = &default_policy;
2389 pol = get_task_policy(current);
2391 if (pol->mode == MPOL_INTERLEAVE)
2392 return alloc_pages_bulk_array_interleave(gfp, pol,
2395 if (pol->mode == MPOL_PREFERRED_MANY)
2397 numa_node_id(), pol, nr_pages, page_array);
2399 return __alloc_pages_bulk(gfp, policy_node(gfp, pol, numa_node_id()),
2400 policy_nodemask(gfp, pol), nr_pages, NULL,
2406 struct mempolicy *pol = mpol_dup(vma_policy(src));
2408 if (IS_ERR(pol))
2409 return PTR_ERR(pol);
2410 dst->vm_policy = pol;
2551 struct mempolicy *pol = NULL;
2560 pol = sn->policy;
2563 return pol;
2588 struct mempolicy *pol;
2597 pol = get_vma_policy(vma, addr);
2598 if (!(pol->flags & MPOL_F_MOF))
2601 switch (pol->mode) {
2605 polnid = offset_il_node(pol, pgoff);
2609 if (node_isset(curnid, pol->nodes))
2611 polnid = first_node(pol->nodes);
2620 if (pol->flags & MPOL_F_MORON) {
2621 if (node_isset(thisnid, pol->nodes))
2633 if (node_isset(curnid, pol->nodes))
2638 &pol->nodes);
2647 if (pol->flags & MPOL_F_MORON) {
2657 mpol_cond_put(pol);
2670 struct mempolicy *pol;
2673 pol = task->mempolicy;
2676 mpol_put(pol);
2687 unsigned long end, struct mempolicy *pol)
2691 node->policy = pol;
2695 struct mempolicy *pol)
2704 newpol = mpol_dup(pol);
3134 * @pol: pointer to mempolicy to be formatted
3136 * Convert @pol into a string. If @buffer is too short, truncate the string.
3140 void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
3147 if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
3148 mode = pol->mode;
3149 flags = pol->flags;
3160 nodes = pol->nodes;