Lines Matching refs:policy

3  * Simple NUMA memory policy for the Linux kernel.
8 * NUMA policy allows the user to give hints in which node(s) memory should
13 * The VMA policy has priority over the process policy for a page fault.
19 * for anonymous memory. For process policy an process counter
32 * process policy.
38 * use the process policy. This is what Linux always did
41 * The process policy is applied for most non interrupt memory allocations
43 * try to allocate on the local CPU. The VMA policy is only applied for memory
46 * Currently there are a few corner cases in swapping where the policy
47 * is not applied, but the majority should be handled. When process policy
51 * requesting a lower zone just use default policy. This implies that
55 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
60 fix mmap readahead to honour policy and enable policy for any page cache
63 global policy for page cache? currently it uses process policy. Requires
65 handle mremap for shared memory (currently ignored for the policy)
67 make bind policy root only? It can trigger oom much faster and the
124 * run-time system-wide default policy => local allocation
222 * any, for the new policy. mpol_new() has already validated the nodes
223 * parameter with respect to the policy mode and flags.
262 * This function just creates a new policy, does some check and simple
268 struct mempolicy *policy;
300 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
301 if (!policy)
303 atomic_set(&policy->refcnt, 1);
304 policy->mode = mode;
305 policy->flags = flags;
306 policy->home_node = NUMA_NO_NODE;
308 return policy;
350 * mpol_rebind_policy - Migrate a policy to a different set of nodes
458 * policy.
505 * on a node that does not follow the policy.
776 * Apply policy to a single VMA
813 /* Split or merge the VMA (if required) and apply the new policy */
861 /* Set the process memory policy */
899 * Return nodemask for policy for get_mempolicy() query
937 /* Retrieve NUMA policy */
938 static long do_get_mempolicy(int *policy, nodemask_t *nmask,
953 *policy = 0; /* just so it's initialized */
962 * Do NOT fall back to task policy if the
963 * vma/shared policy at addr is NULL. We
996 *policy = err;
999 *policy = next_node_in(current->il_prev, pol->nodes);
1005 *policy = pol == &default_policy ? MPOL_DEFAULT :
1009 * the policy to userspace.
1011 *policy |= (pol->flags & MPOL_MODE_FLAGS);
1212 * Allocate a new page for page migration based on vma policy.
1240 * if !vma, vma_alloc_folio() will use task or system default policy
1305 * If we are using the default policy then operation
1545 * If any vma in the range got policy other than MPOL_BIND
1582 /* Set the process memory policy */
1704 /* Retrieve NUMA policy */
1705 static int kernel_get_mempolicy(int __user *policy,
1725 if (policy && put_user(pval, policy))
1734 SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1738 return kernel_get_mempolicy(policy, nmask, maxnode, addr, flags);
1781 * shmem_alloc_page() passes MPOL_F_SHARED policy with
1796 * @vma: virtual memory area whose policy is sought
1797 * @addr: address in @vma for shared policy lookup
1799 * Returns effective policy for a VMA at specified address.
1800 * Falls back to current->mempolicy or system default policy, as necessary.
1839 bool apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1846 * if policy->nodes has movable memory only,
1847 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1849 * policy->nodes is intersect with node_states[N_MEMORY].
1851 * policy->nodes has movable memory only.
1853 if (!nodes_intersects(policy->nodes, node_states[N_HIGH_MEMORY]))
1863 nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
1865 int mode = policy->mode;
1869 apply_policy_zone(policy, gfp_zone(gfp)) &&
1870 cpuset_nodemask_valid_mems_allowed(&policy->nodes))
1871 return &policy->nodes;
1874 return &policy->nodes;
1884 * secures the nodemask limit for 'bind' and 'prefer-many' policy.
1886 static int policy_node(gfp_t gfp, struct mempolicy *policy, int nd)
1888 if (policy->mode == MPOL_PREFERRED) {
1889 nd = first_node(policy->nodes);
1892 * __GFP_THISNODE shouldn't even be used with the bind policy
1894 * requested node and not break the policy.
1896 WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE));
1899 if ((policy->mode == MPOL_BIND ||
1900 policy->mode == MPOL_PREFERRED_MANY) &&
1901 policy->home_node != NUMA_NO_NODE)
1902 return policy->home_node;
1908 static unsigned interleave_nodes(struct mempolicy *policy)
1913 next = next_node_in(me->il_prev, policy->nodes);
1920 * Depending on the memory policy provide a node from which to allocate the
1925 struct mempolicy *policy;
1931 policy = current->mempolicy;
1932 if (!policy)
1935 switch (policy->mode) {
1937 return first_node(policy->nodes);
1940 return interleave_nodes(policy);
1948 * Follow bind policy behavior and start allocation at the
1955 &policy->nodes);
2021 * @vma: virtual memory area whose policy is sought
2022 * @addr: address in @vma for shared policy lookup and interleave policy
2025 * @nodemask: pointer to nodemask pointer for 'bind' and 'prefer-many' policy
2029 * If the effective policy is 'bind' or 'prefer-many', returns a pointer
2059 * to indicate default policy. Otherwise, extract the policy nodemask
2060 * for 'bind' or 'interleave' policy into the argument nodemask, or
2062 * 'preferred' or 'local' policy and return 'true' to indicate presence
2105 * the policy nodemask. Otherwise, return true for all other policies
2106 * including "interleave", as a tsk with "interleave" policy may have
2129 /* Allocate a page in interleaved policy.
2178 * NUMA policy. When @vma is not NULL the caller must hold the mmap_lock
2226 * For hugepage allocation and non-interleave policy which
2232 * If the policy is interleave or does not allow the current
2278 * to a multiple of 8 * PAGE_SIZE bytes). The NUMA policy of the current
2479 * Shared memory backing store policy support.
2522 * Insert a new shared policy into the list. Caller holds sp->lock for
2544 new->policy ? new->policy->mode : 0);
2547 /* Find shared policy intersecting idx */
2559 mpol_get(sn->policy);
2560 pol = sn->policy;
2568 mpol_put(n->policy);
2573 * mpol_misplaced - check whether current page node is valid in policy
2579 * Lookup current policy node id for vma,addr and "compare to" page's
2584 * policy, or a suitable node ID to allocate a replacement page from.
2629 * use current page if in policy nodemask,
2666 * policy.
2691 node->policy = pol;
2715 /* Replace a policy range. */
2736 /* Old policy spanning whole new range. */
2741 *mpol_new = *n->policy;
2783 * mpol_shared_policy_init - initialize shared policy for inode
2784 * @sp: pointer to inode shared policy
2787 * Install non-NULL @mpol in inode's shared policy rb-tree.
2817 /* Create pseudo-vma that contains just the policy */
2819 pvma.vm_end = TASK_SIZE; /* policy covers entire file */
2855 /* Free a backing policy store on inode delete. */
2945 * Set interleaving policy for system init. Interleaving is only
2974 /* Reset policy of current process to default */