Lines Matching defs:page
13 * The VMA policy has priority over the process policy for a page fault.
60 fix mmap readahead to honour policy and enable policy for any page cache
63 global policy for page cache? currently it uses process policy. Requires
454 * special page is met, i.e. zero page, or unmovable page is found
474 if (is_huge_zero_page(&folio->page)) {
502 * special page is met, i.e. zero page, or unmovable page is found
611 * every page is mapped to the same process. Doing that is very
732 * Walk through page tables and collect pages to be migrated.
739 * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were
741 * 0 - queue pages successfully or no misplaced page.
926 struct page *p = NULL;
1043 * every page is mapped to the same process. Doing that is very
1110 * Returns the number of page that could not be moved.
1212 * Allocate a new page for page migration based on vma policy.
1213 * Start by assuming the page is mapped by the same vma as contains @start.
1226 address = page_address_in_vma(&src->page, vma);
1335 * miss a concurrently inserted page.
1861 * page allocation
2027 * Returns a nid suitable for a huge page allocation and a pointer
2129 /* Allocate a page in interleaved policy.
2131 static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
2134 struct page *page;
2136 page = __alloc_pages(gfp, order, nid, NULL);
2139 return page;
2140 if (page && page_to_nid(page) == nid) {
2142 __count_numa_event(page_zone(page), NUMA_INTERLEAVE_HIT);
2145 return page;
2148 static struct page *alloc_pages_preferred_many(gfp_t gfp, unsigned int order,
2151 struct page *page;
2162 page = __alloc_pages(preferred_gfp, order, nid, &pol->nodes);
2163 if (!page)
2164 page = __alloc_pages(gfp, order, nid, NULL);
2166 return page;
2196 struct page *page;
2202 page = alloc_page_interleave(gfp, order, nid);
2203 folio = (struct folio *)page;
2210 struct page *page;
2214 page = alloc_pages_preferred_many(gfp, order, node, pol);
2216 folio = (struct folio *)page;
2277 * first page is naturally aligned (eg an order-3 allocation will be aligned
2283 * Return: The page on success or NULL if allocation fails.
2285 struct page *alloc_pages(gfp_t gfp, unsigned order)
2288 struct page *page;
2298 page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
2300 page = alloc_pages_preferred_many(gfp, order,
2303 page = __alloc_pages(gfp, order,
2307 return page;
2313 struct page *page = alloc_pages(gfp | __GFP_COMP, order);
2314 struct folio *folio = (struct folio *)page;
2324 struct page **page_array)
2359 struct page **page_array)
2384 unsigned long nr_pages, struct page **page_array)
2573 * mpol_misplaced - check whether current page node is valid in policy
2575 * @page: page to be checked
2576 * @vma: vm area where page mapped
2577 * @addr: virtual address where page mapped
2579 * Lookup current policy node id for vma,addr and "compare to" page's
2583 * Return: NUMA_NO_NODE if the page is in a node that is valid for this
2584 * policy, or a suitable node ID to allocate a replacement page from.
2586 int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2590 int curnid = page_to_nid(page);
2629 * use current page if in policy nodemask,
2646 /* Migrate the page towards the node whose CPU is referencing it */
2650 if (!should_numa_migrate_memory(current, page, curnid, thiscpu))