Lines Matching defs:page

13  * The VMA policy has priority over the process policy for a page fault.
57 fix mmap readahead to honour policy and enable policy for any page cache
60 global policy for page cache? currently it uses process policy. Requires
432 static int migrate_page_add(struct page *page, struct list_head *pagelist,
445 * Check if the page's nid is in qp->nmask.
450 static inline bool queue_pages_required(struct page *page,
453 int nid = page_to_nid(page);
462 * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
466 * existing page was already on a node that does not follow the
474 struct page *page;
482 page = pmd_page(*pmd);
483 if (is_huge_zero_page(page)) {
489 if (!queue_pages_required(page, qp))
496 migrate_page_add(page, qp->pagelist, flags)) {
514 * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
516 * -EIO - only MPOL_MF_STRICT was specified and an existing page was already
523 struct page *page;
546 page = vm_normal_page(vma, addr, *pte);
547 if (!page)
553 if (PageReserved(page))
555 if (!queue_pages_required(page, qp))
569 if (migrate_page_add(page, qp->pagelist, flags))
591 struct page *page;
599 page = pte_page(entry);
600 if (!queue_pages_required(page, qp))
605 * STRICT alone means only detecting misplaced page and no
616 * Detecting misplaced page but allow migrating pages which
625 (flags & MPOL_MF_MOVE && page_mapcount(page) == 1 &&
627 if (isolate_hugetlb(page, qp->pagelist) &&
630 * Failed to isolate page but allow migrating pages
728 * Walk through page tables and collect pages to be migrated.
735 * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were
737 * 0 - queue pages successfully or no misplaced page.
920 struct page *p = NULL;
1033 * page migration, thp tail pages can be passed.
1035 static int migrate_page_add(struct page *page, struct list_head *pagelist,
1038 struct page *head = compound_head(page);
1040 * Avoid migrating a page that is shared with others.
1050 * Non-movable page may reach here. And, there may be
1104 * Returns the number of page that could not be moved.
1206 * Allocate a new page for page migration based on vma policy.
1207 * Start by assuming the page is mapped by the same vma as contains @start.
1212 static struct page *new_page(struct page *page, unsigned long start)
1219 address = page_address_in_vma(page, vma);
1225 if (PageHuge(page)) {
1226 return alloc_huge_page_vma(page_hstate(compound_head(page)),
1228 } else if (PageTransHuge(page)) {
1229 struct page *thp;
1246 static int migrate_page_add(struct page *page, struct list_head *pagelist,
1258 static struct page *new_page(struct page *page, unsigned long start)
1867 * page allocation
2007 * Returns a nid suitable for a huge page allocation and a pointer
2126 /* Allocate a page in interleaved policy.
2128 static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
2131 struct page *page;
2133 page = __alloc_pages(gfp, order, nid);
2136 return page;
2137 if (page && page_to_nid(page) == nid) {
2139 __inc_numa_state(page_zone(page), NUMA_INTERLEAVE_HIT);
2142 return page;
2146 * alloc_pages_vma - Allocate a page for a VMA.
2161 * This function allocates a page from the kernel page pool and applies
2166 * NULL when no page can be allocated.
2168 struct page *
2173 struct page *page;
2184 page = alloc_page_interleave(gfp, order, nid);
2211 page = __alloc_pages_node(hpage_node,
2220 if (!page && (gfp & __GFP_DIRECT_RECLAIM))
2221 page = __alloc_pages_nodemask(gfp, order,
2230 page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask);
2233 return page;
2246 * @order: Power of two of allocation size in pages. 0 is a single page.
2248 * Allocate a page from the kernel page pool. When not in
2250 * Returns NULL when no page can be allocated.
2252 struct page *alloc_pages_current(gfp_t gfp, unsigned order)
2255 struct page *page;
2265 page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
2267 page = __alloc_pages_nodemask(gfp, order,
2271 return page;
2443 * mpol_misplaced - check whether current page node is valid in policy
2445 * @page: page to be checked
2446 * @vma: vm area where page mapped
2447 * @addr: virtual address where page mapped
2449 * Lookup current policy node id for vma,addr and "compare to" page's
2453 * -1 - not misplaced, page is in the right node
2454 * node - node id where the page should be
2459 int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2463 int curnid = page_to_nid(page);
2492 * use current page if in policy nodemask,
2509 /* Migrate the page towards the node whose CPU is referencing it */
2513 if (!should_numa_migrate_memory(current, page, curnid, thiscpu))