Lines Matching refs:page

54  * Defrag is invoked by khugepaged hugepage allocations and by page faults
71 struct page *huge_zero_page __read_mostly;
94 /* khugepaged doesn't collapse DAX vma, but page fault is fine. */
109 * Skip the check for page fault. Huge fault does the check in fault
145 * Allow page fault since anon_vma may be not initialized until
146 * the first page fault.
156 struct page *zero_page;
191 struct page *mm_get_huge_zero_page(struct mm_struct *mm)
214 /* we can free zero page only if last reference remains */
222 struct page *zero_page = xchg(&huge_zero_page, NULL);
472 * we use page->mapping and page->index in second tail page
592 return is_huge_zero_page(&folio->page) ||
647 struct page *page, gfp_t gfp)
650 struct folio *folio = page_folio(page);
671 clear_huge_page(page, vmf->address, HPAGE_PMD_NR);
689 /* Deliver the page fault to userland */
699 entry = mk_huge_pmd(page, vma->vm_page_prot);
759 /* Caller must hold page table lock. */
762 struct page *zero_page)
791 struct page *zero_page;
832 return __do_huge_pmd_anonymous_page(vmf, &folio->page, gfp);
1022 struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
1027 struct page *page;
1045 * caller will manage the page reference count.
1054 page = pfn_to_page(pfn);
1055 ret = try_grab_page(page, flags);
1057 page = ERR_PTR(ret);
1059 return page;
1067 struct page *src_page;
1118 * When page table lock is held, the huge zero pmd should not be
1119 * under splitting since we don't split the page itself, only pmd to
1120 * a page table.
1124 * get_huge_zero_page() will never allocate a new page here,
1125 * since we already have a zero page to copy. It just takes a
1177 struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
1182 struct page *page;
1200 * caller will manage the page reference count.
1211 page = pfn_to_page(pfn);
1213 ret = try_grab_page(page, flags);
1215 page = ERR_PTR(ret);
1217 return page;
1238 * When page table lock is held, the huge zero pud should not be
1239 * under splitting since we don't split the page itself, only pud to
1240 * a page table.
1294 struct page *page;
1311 page = pmd_page(orig_pmd);
1312 folio = page_folio(page);
1313 VM_BUG_ON_PAGE(!PageHead(page), page);
1316 if (PageAnonExclusive(page))
1334 if (PageAnonExclusive(page)) {
1352 page_move_anon_rmap(page, vma);
1378 struct page *page;
1397 page = vm_normal_page_pmd(vma, addr, pmd);
1398 return page && PageAnon(page) && PageAnonExclusive(page);
1406 static inline bool can_follow_write_pmd(pmd_t pmd, struct page *page,
1410 /* If the pmd is writable, we can write to the page. */
1431 * See can_change_pte_writable(): we broke COW and could map the page
1432 * writable if we have an exclusive anonymous page ...
1434 if (!page || !PageAnon(page) || !PageAnonExclusive(page))
1443 struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
1449 struct page *page;
1454 page = pmd_page(*pmd);
1455 VM_BUG_ON_PAGE(!PageHead(page) && !is_zone_device_page(page), page);
1458 !can_follow_write_pmd(*pmd, page, vma, flags))
1461 /* Avoid dumping huge zero page */
1468 if (!pmd_write(*pmd) && gup_must_unshare(vma, flags, page))
1471 VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) &&
1472 !PageAnonExclusive(page), page);
1474 ret = try_grab_page(page, flags);
1481 page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
1482 VM_BUG_ON_PAGE(!PageCompound(page) && !is_zone_device_page(page), page);
1484 return page;
1487 /* NUMA hinting page fault entry point for trans huge pmds */
1493 struct page *page;
1517 page = vm_normal_page_pmd(vma, haddr, pmd);
1518 if (!page)
1525 page_nid = page_to_nid(page);
1527 * For memory tiering mode, cpupid of slow memory page is used
1528 * to record page access time. So use default value.
1531 last_cpupid = page_cpupid_last(page);
1532 target_nid = numa_migrate_prep(page, vma, haddr, page_nid,
1536 put_page(page);
1543 migrated = migrate_misplaced_page(page, vma, target_nid);
1577 * Return true if we do MADV_FREE successfully on entire pmd page.
1688 struct page *page = NULL;
1692 page = pmd_page(orig_pmd);
1693 page_remove_rmap(page, vma, true);
1694 VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
1695 VM_BUG_ON_PAGE(!PageHead(page), page);
1701 page = pfn_swap_entry_to_page(entry);
1706 if (PageAnon(page)) {
1712 add_mm_counter(tlb->mm, mm_counter_file(page), -HPAGE_PMD_NR);
1717 tlb_remove_page_size(tlb, page, HPAGE_PMD_SIZE);
1729 * PTE page table if new_pmd is on different PMD page table.
1759 * inserted a page table, if racing against shmem/file collapse.
1828 struct page *page = pfn_swap_entry_to_page(entry);
1837 if (PageAnon(page))
1859 struct page *page;
1862 * Avoid trapping faults against the zero page. The read-only
1864 * local/remote hits to the zero page are not interesting.
1872 page = pmd_page(*pmd);
1873 toptier = node_is_toptier(page_to_nid(page));
1884 xchg_page_access_time(page, jiffies_to_msecs(jiffies));
1936 * Returns page table lock pointer if a given pmd maps a thp, NULL otherwise.
1938 * Note that if it returns page table lock pointer, this routine returns without
1939 * unlocking page table lock. So callers must unlock it.
1953 * Returns page table lock pointer if a given pud maps a thp, NULL otherwise.
1955 * Note that if it returns page table lock pointer, this routine returns without
1956 * unlocking page table lock. So callers must unlock it.
1983 /* No zero page support yet */
2038 * replacing a zero pmd write protected page with a zero pte write
2039 * protected page.
2070 struct page *page;
2090 * We are going to unmap this huge page. So
2101 page = pfn_swap_entry_to_page(entry);
2103 page = pmd_page(old_pmd);
2104 if (!PageDirty(page) && pmd_dirty(old_pmd))
2105 set_page_dirty(page);
2106 if (!PageReferenced(page) && pmd_young(old_pmd))
2107 SetPageReferenced(page);
2108 page_remove_rmap(page, vma, true);
2109 put_page(page);
2111 add_mm_counter(mm, mm_counter_file(page), -HPAGE_PMD_NR);
2121 * We are going from a zero huge page write protected to zero
2122 * small page also write protected so it does not seems useful
2133 * free), userland could trigger a small page size TLB miss on the
2137 * 383 on page 105. Intel should be safe but is also warns that it's
2155 page = pfn_swap_entry_to_page(entry);
2157 if (PageAnon(page))
2164 page = pmd_page(old_pmd);
2167 SetPageDirty(page);
2174 VM_BUG_ON_PAGE(!page_count(page), page);
2191 anon_exclusive = PageAnon(page) && PageAnonExclusive(page);
2192 if (freeze && anon_exclusive && page_try_share_anon_rmap(page))
2195 page_ref_add(page, HPAGE_PMD_NR - 1);
2218 page_to_pfn(page + i));
2221 page_to_pfn(page + i));
2224 page_to_pfn(page + i));
2235 entry = mk_pte(page + i, READ_ONCE(vma->vm_page_prot));
2239 SetPageAnonExclusive(page + i);
2249 page_add_anon_rmap(page + i, vma, addr, RMAP_NONE);
2258 page_remove_rmap(page, vma, true);
2260 put_page(page);
2381 static void lru_add_page_tail(struct page *head, struct page *tail,
2390 /* page reclaim is reclaiming a huge page */
2408 struct page *head = &folio->page;
2409 struct page *page_tail = head + tail;
2419 * Clone page flags before unfreezing refcount.
2449 /* ->mapping in first and second tail page is replaced by other uses */
2456 * page->private should not be set in tail pages. Fix up and warn once
2466 /* Page flags must be visible before we make the page non-compound. */
2470 * Clear PageTail before unfreezing page refcount.
2477 /* Finally unfreeze refcount. Additional reference from page cache. */
2496 static void __split_huge_page(struct page *page, struct list_head *list,
2499 struct folio *folio = page_folio(page);
2500 struct page *head = &folio->page;
2523 /* Some pages can be beyond EOF: drop them from page cache */
2534 } else if (!PageAnon(page)) {
2559 /* Additional pin to page cache */
2573 struct page *subpage = head + i;
2574 if (subpage == page)
2580 * like if add_to_swap() is running on a lru page that
2589 /* Racy check whether the huge page can be split */
2594 /* Additional pins from page cache */
2606 * This function splits huge page into normal pages. @page can point to any
2607 * subpage of huge page to split. Split doesn't change the position of @page.
2609 * Only caller must hold pin on the @page, otherwise split fails with -EBUSY.
2610 * The huge page must be locked.
2614 * Both head page and tail pages will inherit mapping, flags, and so on from
2617 * GUP pin and PG_locked transferred to @page. Rest subpages can be freed if
2621 * Returns -EBUSY if the page is pinned or if anon_vma disappeared from under
2624 int split_huge_page_to_list(struct page *page, struct list_head *list)
2626 struct folio *folio = page_folio(page);
2638 is_hzp = is_huge_zero_page(&folio->page);
2640 pr_warn_ratelimited("Called split_huge_page for huge zero page\n");
2695 * which cannot be nested inside the page tree lock. So note
2705 * Racy check if we can split the page, before unmap_folio() will
2719 * Check if the folio is present in page cache.
2752 __split_huge_page(page, list, end);
2810 * The try_to_unmap() in page reclaim path might reach here too,
2812 * And, if page reclaim is already handling the same folio, it is
2816 * handled by page reclaim since THP swap would add the folio into
2886 /* split_huge_page() removes page from list on success */
2899 * Stop shrinker if we didn't split any page, but the queue is empty.
2919 struct page *page;
2932 page = pfn_to_online_page(pfn);
2933 if (!page || PageTail(page))
2935 folio = page_folio(page);
2939 if (unlikely(page_folio(page) != folio))
3009 * always increase addr by PAGE_SIZE, since we could have a PTE page
3014 struct page *page;
3027 page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
3029 if (IS_ERR_OR_NULL(page))
3032 folio = page_folio(page);
3079 pr_debug("split file-backed THPs in file: %s, page offset: [0x%lx - 0x%lx]\n",
3205 struct page *page)
3222 anon_exclusive = PageAnon(page) && PageAnonExclusive(page);
3223 if (anon_exclusive && page_try_share_anon_rmap(page)) {
3229 set_page_dirty(page);
3231 entry = make_writable_migration_entry(page_to_pfn(page));
3233 entry = make_readable_exclusive_migration_entry(page_to_pfn(page));
3235 entry = make_readable_migration_entry(page_to_pfn(page));
3246 page_remove_rmap(page, vma, true);
3247 put_page(page);
3253 void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)