Lines Matching refs:page
82 * it would have happened if the vma was large enough during page
366 * register it here without waiting a page fault that
496 static void release_pte_page(struct page *page)
498 release_pte_folio(page_folio(page));
527 static bool is_refcount_suitable(struct page *page)
531 expected_refcount = total_mapcount(page);
532 if (PageSwapCache(page))
533 expected_refcount += compound_nr(page);
535 return page_count(page) == expected_refcount;
544 struct page *page = NULL;
573 page = vm_normal_page(vma, address, pteval);
574 if (unlikely(!page) || unlikely(is_zone_device_page(page))) {
579 VM_BUG_ON_PAGE(!PageAnon(page), page);
581 if (page_mapcount(page) > 1) {
591 if (PageCompound(page)) {
592 struct page *p;
593 page = compound_head(page);
596 * Check if we have dealt with the compound page
600 if (page == p)
607 * page can't be freed from under us. NOTE: PG_lock
611 if (!trylock_page(page)) {
617 * Check if the page has any GUP (or other external) pins.
619 * The page table that maps the page has been already unlinked
620 * from the page table tree and this process cannot get
621 * an additional pin on the page.
623 * New pins can come later if the page is shared across fork,
625 * the page, only trigger CoW.
627 if (!is_refcount_suitable(page)) {
628 unlock_page(page);
634 * Isolate the page to avoid collapsing an hugepage
637 if (!isolate_lru_page(page)) {
638 unlock_page(page);
642 mod_node_page_state(page_pgdat(page),
643 NR_ISOLATED_ANON + page_is_file_lru(page),
644 compound_nr(page));
645 VM_BUG_ON_PAGE(!PageLocked(page), page);
646 VM_BUG_ON_PAGE(PageLRU(page), page);
648 if (PageCompound(page))
649 list_add_tail(&page->lru, compound_pagelist);
653 * enough young pte to justify collapsing the page
656 (pte_young(pteval) || page_is_young(page) ||
657 PageReferenced(page) || mmu_notifier_test_young(vma->vm_mm,
671 trace_mm_collapse_huge_page_isolate(page, none_or_zero,
677 trace_mm_collapse_huge_page_isolate(page, none_or_zero,
688 struct page *src_page;
689 struct page *tmp;
744 * Re-establish the PMD to point to the original page table
762 * otherwise restores the original page table and releases isolated raw pages.
766 * @page: the new hugepage to copy contents to
775 struct page *page,
783 struct page *src_page;
793 _pte++, page++, _address += PAGE_SIZE) {
796 clear_user_highpage(page, _address);
800 if (copy_mc_user_highpage(page, src_page, _address, vma) > 0) {
890 static bool hpage_collapse_alloc_page(struct page **hpage, gfp_t gfp, int node,
1033 /* Likely, but not guaranteed, that page lock failed */
1058 static int alloc_charge_hpage(struct page **hpage, struct mm_struct *mm,
1088 struct page *hpage;
1249 struct page *page = NULL;
1306 * Don't collapse the page if any of the small
1320 page = vm_normal_page(vma, _address, pteval);
1321 if (unlikely(!page) || unlikely(is_zone_device_page(page))) {
1326 if (page_mapcount(page) > 1) {
1336 page = compound_head(page);
1339 * Record which node the original page is from and save this
1344 node = page_to_nid(page);
1350 if (!PageLRU(page)) {
1354 if (PageLocked(page)) {
1358 if (!PageAnon(page)) {
1364 * Check if the page has any GUP (or other external) pins.
1369 * later. However it may report false positive if the page
1373 if (!is_refcount_suitable(page)) {
1380 * enough young pte to justify collapsing the page
1383 (pte_young(pteval) || page_is_young(page) ||
1384 PageReferenced(page) || mmu_notifier_test_young(vma->vm_mm,
1406 trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
1438 pmd_t *pmdp, struct page *hpage)
1466 * right THP. If so, retract the page table so the THP can refault in with
1476 struct page *hpage;
1485 /* First check VMA found, in case page tables are being torn down */
1490 /* Fast check before locking page if already PMD-mapped */
1497 * in the page cache with a single hugepage. If a mm were to fault-in
1540 if (!start_pte) /* mmap_lock + page lock should prevent this */
1543 /* step 1: check all mapped PTEs are to the right huge page */
1546 struct page *page;
1553 /* page swapped out, abort */
1559 page = vm_normal_page(vma, addr, ptent);
1560 if (WARN_ON_ONCE(page && is_zone_device_page(page)))
1561 page = NULL;
1564 * page table, but the new page will not be a subpage of hpage.
1566 if (hpage + i != page)
1580 * inserts a valid as-if-COWed PTE without even looking up page cache.
1581 * So page lock of hpage does not protect from it, so we must not drop
1588 if (!start_pte) /* mmap_lock + page lock should prevent this */
1595 /* step 2: clear page table and adjust rmap */
1598 struct page *page;
1605 * page lock stops more PTEs of the hpage being faulted in, but
1613 page = vm_normal_page(vma, addr, ptent);
1614 if (hpage + i != page)
1620 * PTE dirty? Shmem page is already dirty; file is read-only.
1623 page_remove_rmap(page, vma, false);
1637 /* step 4: remove empty page table */
1696 * page tables from, as PMD-mapping is likely to be split later.
1714 * the page table because there may be pte markers installed.
1716 * skip this one: it will always be mapped in small page size
1733 * Huge page lock is still held, so normally the page table
1773 * - allocate and lock a new huge page;
1774 * - scan page cache, locking old pages
1776 * - copy data to new page
1780 * - finalize updates to the page cache;
1782 * + unlock huge page;
1786 * + unlock and free huge page;
1793 struct page *hpage;
1794 struct page *page;
1795 struct page *tmp;
1835 page = xas_load(&xas);
1839 if (!page) {
1855 if (xa_is_value(page) || !PageUptodate(page)) {
1857 /* swap in or instantiate fallocated page */
1865 page = folio_file_page(folio, index);
1866 } else if (trylock_page(page)) {
1867 get_page(page);
1874 if (!page || xa_is_value(page)) {
1881 page = find_lock_page(mapping, index);
1882 if (unlikely(page == NULL)) {
1886 } else if (PageDirty(page)) {
1889 * so this page is dirty because it hasn't
1895 * revisits this page.
1904 } else if (PageWriteback(page)) {
1908 } else if (trylock_page(page)) {
1909 get_page(page);
1918 * The page must be locked, so we can drop the i_pages lock
1921 VM_BUG_ON_PAGE(!PageLocked(page), page);
1923 /* make sure the page is up to date */
1924 if (unlikely(!PageUptodate(page))) {
1931 * we locked the first page, then a THP might be there already.
1934 if (PageTransCompound(page)) {
1935 struct page *head = compound_head(page);
1945 folio = page_folio(page);
1956 * page is dirty because it hasn't been flushed
1980 VM_BUG_ON_PAGE(page != xa_load(xas.xa, index), page);
1983 * We control three references to the page:
1985 * - one reference from page cache;
1988 * page will have to fetch it from the page cache. That requires
1989 * locking the page to handle truncate, so any new usage will be
1990 * blocked until we unlock page after collapse/during rollback.
1992 if (page_count(page) != 3) {
1995 putback_lru_page(page);
2002 list_add_tail(&page->lru, &pagelist);
2005 unlock_page(page);
2006 put_page(page);
2015 * visible. Ensures the page cache will be truncated if the
2048 list_for_each_entry(page, &pagelist, lru) {
2049 while (index < page->index) {
2053 if (copy_mc_highpage(hpage + (page->index % HPAGE_PMD_NR), page) > 0) {
2089 * If userspace observed a missing page in a VMA with a MODE_MISSING
2091 * page. If so, we need to roll back to avoid suppressing such an
2093 * guarantees that the kernel doesn't fill a missing page with a zero
2094 * page, so they don't matter here.
2136 * Mark hpage as uptodate before inserting it into the page cache so
2137 * that it isn't mistaken for an fallocated but unwritten page.
2154 * Remove pte page tables, so we can re-fault the page as huge.
2165 list_for_each_entry_safe(page, tmp, &pagelist, lru) {
2166 list_del(&page->lru);
2167 page->mapping = NULL;
2168 ClearPageActive(page);
2169 ClearPageUnevictable(page);
2170 unlock_page(page);
2171 folio_put_refs(page_folio(page), 3);
2177 /* Something went wrong: roll back page cache changes */
2185 list_for_each_entry_safe(page, tmp, &pagelist, lru) {
2186 list_del(&page->lru);
2187 unlock_page(page);
2188 putback_lru_page(page);
2189 put_page(page);
2219 struct page *page = NULL;
2231 xas_for_each(&xas, page, start + HPAGE_PMD_NR - 1) {
2232 if (xas_retry(&xas, page))
2235 if (xa_is_value(page)) {
2248 * into a PMD sized page
2250 if (PageTransCompound(page)) {
2251 struct page *head = compound_head(page);
2260 * by the caller won't touch the page cache, and so
2267 node = page_to_nid(page);
2274 if (!PageLRU(page)) {
2279 if (page_count(page) !=
2280 1 + page_mapcount(page) + page_has_private(page)) {
2286 * We probably should check if the page is referenced here, but
2310 trace_mm_khugepaged_scan_file(mm, page, file, present, swap, result);