Lines Matching refs:page
10 * Provides methods for unmapping each kind of mapped page:
25 * page->flags PG_locked (lock_page) * (see huegtlbfs below)
28 * hugetlb_fault_mutex (hugetlbfs specific page fault mutex)
50 * hugetlb_fault_mutex (hugetlbfs specific page fault mutex)
51 * page->flags PG_locked (lock_page)
274 * searches where page is mapped.
466 * Getting a lock on a stable anon_vma from a page off the LRU is tricky!
470 * have been relevant to this page.
472 * The page might have been remapped to a different anon_vma or the anon_vma
477 * ensure that any anon_vma obtained from the page will still be valid for as
481 * chain and verify that the page in question is indeed mapped in it
485 * page_remove_rmap() that the anon_vma pointer from page->mapping is valid
489 struct anon_vma *page_get_anon_vma(struct page *page)
495 anon_mapping = (unsigned long)READ_ONCE(page->mapping);
498 if (!page_mapped(page))
508 * If this page is still mapped, then its anon_vma cannot have been
514 if (!page_mapped(page)) {
532 struct anon_vma *page_lock_anon_vma_read(struct page *page)
539 anon_mapping = (unsigned long)READ_ONCE(page->mapping);
542 if (!page_mapped(page))
549 * If the page is still mapped, then this anon_vma is still
553 if (!page_mapped(page)) {
566 if (!page_mapped(page)) {
603 * before any IO is initiated on the page to prevent lost writes. Similarly,
644 * before the page is queued for IO.
673 * the page and flushing the page. If this race occurs, it potentially allows
709 * At what user virtual address is page expected in vma?
710 * Caller should check the page is actually part of the vma.
712 unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
714 if (PageAnon(page)) {
715 struct anon_vma *page__anon_vma = page_anon_vma(page);
725 } else if (vma->vm_file->f_mapping != compound_head(page)->mapping) {
729 return vma_address(page, vma);
775 static bool page_referenced_one(struct page *page, struct vm_area_struct *vma,
780 .page = page,
805 * If the page has been used in another mapping,
808 * PG_referenced or activated the page.
818 /* unexpected pmd-mapped page? */
826 clear_page_idle(page);
827 if (test_and_clear_page_young(page))
853 * page_referenced - test if the page was referenced
854 * @page: the page to test
855 * @is_locked: caller holds lock on the page
857 * @vm_flags: collect encountered vma->vm_flags who actually referenced the page
859 * Quick test_and_clear_referenced for all mappings to a page,
860 * returns the number of ptes which referenced the page.
862 int page_referenced(struct page *page,
869 .mapcount = total_mapcount(page),
883 if (!page_rmapping(page))
886 if (!is_locked && (!PageAnon(page) || PageKsm(page))) {
887 we_locked = trylock_page(page);
901 rmap_walk(page, &rwc);
905 unlock_page(page);
910 static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
914 .page = page,
924 * the page can not be free from this function.
928 vma_address_end(page, vma));
956 flush_cache_page(vma, address, page_to_pfn(page));
963 /* unexpected pmd-mapped page? */
970 * downgrading page table protection not changing it to point
971 * to a new page.
992 int page_mkclean(struct page *page)
1002 BUG_ON(!PageLocked(page));
1004 if (!page_mapped(page))
1007 mapping = page_mapping(page);
1011 rmap_walk(page, &rwc);
1018 * page_move_anon_rmap - move a page to our anon_vma
1019 * @page: the page to move to our anon_vma
1020 * @vma: the vma the page belongs to
1022 * When a page belongs exclusively to one process after a COW event,
1023 * that page can be moved into the anon_vma that belongs to just that
1027 void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma)
1031 page = compound_head(page);
1033 VM_BUG_ON_PAGE(!PageLocked(page), page);
1042 WRITE_ONCE(page->mapping, (struct address_space *) anon_vma);
1047 * @page: Page or Hugepage to add to rmap
1048 * @vma: VM area to add page to.
1050 * @exclusive: the page is exclusively owned by the current process
1052 static void __page_set_anon_rmap(struct page *page,
1059 if (PageAnon(page))
1063 * If the page isn't exclusively mapped into this vma,
1065 * page mapping!
1071 page->mapping = (struct address_space *) anon_vma;
1072 page->index = linear_page_index(vma, address);
1077 * @page: the page to add the mapping to
1081 static void __page_check_anon_rmap(struct page *page,
1085 * The page's anon-rmap details (mapping and index) are guaranteed to
1089 * always holds the page locked, except if called from page_dup_rmap,
1090 * in which case the page is already known to be setup.
1096 VM_BUG_ON_PAGE(page_anon_vma(page)->root != vma->anon_vma->root, page);
1097 VM_BUG_ON_PAGE(page_to_pgoff(page) != linear_page_index(vma, address),
1098 page);
1102 * page_add_anon_rmap - add pte mapping to an anonymous page
1103 * @page: the page to add the mapping to
1106 * @compound: charge the page as compound or small page
1108 * The caller needs to hold the pte lock, and the page must be locked in
1113 void page_add_anon_rmap(struct page *page,
1116 do_page_add_anon_rmap(page, vma, address, compound ? RMAP_COMPOUND : 0);
1124 void do_page_add_anon_rmap(struct page *page,
1130 if (unlikely(PageKsm(page)))
1131 lock_page_memcg(page);
1133 VM_BUG_ON_PAGE(!PageLocked(page), page);
1137 VM_BUG_ON_PAGE(!PageLocked(page), page);
1138 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
1139 mapcount = compound_mapcount_ptr(page);
1142 first = atomic_inc_and_test(&page->_mapcount);
1146 int nr = compound ? thp_nr_pages(page) : 1;
1154 __inc_lruvec_page_state(page, NR_ANON_THPS);
1155 __mod_lruvec_page_state(page, NR_ANON_MAPPED, nr);
1158 if (unlikely(PageKsm(page))) {
1159 unlock_page_memcg(page);
1165 __page_set_anon_rmap(page, vma, address,
1168 __page_check_anon_rmap(page, vma, address);
1172 * page_add_new_anon_rmap - add pte mapping to a new anonymous page
1173 * @page: the page to add the mapping to
1176 * @compound: charge the page as compound or small page
1182 void page_add_new_anon_rmap(struct page *page,
1185 int nr = compound ? thp_nr_pages(page) : 1;
1188 __SetPageSwapBacked(page);
1190 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
1192 atomic_set(compound_mapcount_ptr(page), 0);
1193 if (hpage_pincount_available(page))
1194 atomic_set(compound_pincount_ptr(page), 0);
1196 __inc_lruvec_page_state(page, NR_ANON_THPS);
1199 VM_BUG_ON_PAGE(PageTransCompound(page), page);
1201 atomic_set(&page->_mapcount, 0);
1203 __mod_lruvec_page_state(page, NR_ANON_MAPPED, nr);
1204 __page_set_anon_rmap(page, vma, address, 1);
1208 * page_add_file_rmap - add pte mapping to a file page
1209 * @page: the page to add the mapping to
1210 * @compound: charge the page as compound or small page
1214 void page_add_file_rmap(struct page *page, bool compound)
1218 VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page);
1219 lock_page_memcg(page);
1220 if (compound && PageTransHuge(page)) {
1221 for (i = 0, nr = 0; i < thp_nr_pages(page); i++) {
1222 if (atomic_inc_and_test(&page[i]._mapcount))
1225 if (!atomic_inc_and_test(compound_mapcount_ptr(page)))
1227 if (PageSwapBacked(page))
1228 __inc_node_page_state(page, NR_SHMEM_PMDMAPPED);
1230 __inc_node_page_state(page, NR_FILE_PMDMAPPED);
1232 if (PageTransCompound(page) && page_mapping(page)) {
1233 VM_WARN_ON_ONCE(!PageLocked(page));
1235 SetPageDoubleMap(compound_head(page));
1236 if (PageMlocked(page))
1237 clear_page_mlock(compound_head(page));
1239 if (!atomic_inc_and_test(&page->_mapcount))
1242 __mod_lruvec_page_state(page, NR_FILE_MAPPED, nr);
1244 unlock_page_memcg(page);
1247 static void page_remove_file_rmap(struct page *page, bool compound)
1251 VM_BUG_ON_PAGE(compound && !PageHead(page), page);
1254 if (unlikely(PageHuge(page))) {
1256 atomic_dec(compound_mapcount_ptr(page));
1260 /* page still mapped by someone else? */
1261 if (compound && PageTransHuge(page)) {
1262 for (i = 0, nr = 0; i < thp_nr_pages(page); i++) {
1263 if (atomic_add_negative(-1, &page[i]._mapcount))
1266 if (!atomic_add_negative(-1, compound_mapcount_ptr(page)))
1268 if (PageSwapBacked(page))
1269 __dec_node_page_state(page, NR_SHMEM_PMDMAPPED);
1271 __dec_node_page_state(page, NR_FILE_PMDMAPPED);
1273 if (!atomic_add_negative(-1, &page->_mapcount))
1282 __mod_lruvec_page_state(page, NR_FILE_MAPPED, -nr);
1284 if (unlikely(PageMlocked(page)))
1285 clear_page_mlock(page);
1288 static void page_remove_anon_compound_rmap(struct page *page)
1292 if (!atomic_add_negative(-1, compound_mapcount_ptr(page)))
1296 if (unlikely(PageHuge(page)))
1302 __dec_lruvec_page_state(page, NR_ANON_THPS);
1304 if (TestClearPageDoubleMap(page)) {
1309 for (i = 0, nr = 0; i < thp_nr_pages(page); i++) {
1310 if (atomic_add_negative(-1, &page[i]._mapcount))
1315 * Queue the page for deferred split if at least one small
1316 * page of the compound page is unmapped, but at least one
1317 * small page is still mapped.
1319 if (nr && nr < thp_nr_pages(page))
1320 deferred_split_huge_page(page);
1322 nr = thp_nr_pages(page);
1325 if (unlikely(PageMlocked(page)))
1326 clear_page_mlock(page);
1329 __mod_lruvec_page_state(page, NR_ANON_MAPPED, -nr);
1333 * page_remove_rmap - take down pte mapping from a page
1334 * @page: page to remove mapping from
1335 * @compound: uncharge the page as compound or small page
1339 void page_remove_rmap(struct page *page, bool compound)
1341 lock_page_memcg(page);
1343 if (!PageAnon(page)) {
1344 page_remove_file_rmap(page, compound);
1349 page_remove_anon_compound_rmap(page);
1353 /* page still mapped by someone else? */
1354 if (!atomic_add_negative(-1, &page->_mapcount))
1362 __dec_lruvec_page_state(page, NR_ANON_MAPPED);
1364 if (unlikely(PageMlocked(page)))
1365 clear_page_mlock(page);
1367 if (PageTransCompound(page))
1368 deferred_split_huge_page(compound_head(page));
1380 unlock_page_memcg(page);
1386 static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1391 .page = page,
1396 struct page *subpage;
1405 * if page table locking is skipped: use TTU_SYNC to wait for that.
1415 is_zone_device_page(page) && !is_device_private_page(page))
1420 flags & TTU_SPLIT_FREEZE, page);
1428 * Note that the page can not be free in this function as call of
1429 * try_to_unmap() must hold a reference on the page.
1431 range.end = PageKsm(page) ?
1432 address + PAGE_SIZE : vma_address_end(page, vma);
1435 if (PageHuge(page)) {
1449 VM_BUG_ON_PAGE(PageHuge(page) || !PageTransCompound(page), page);
1451 set_pmd_migration_entry(&pvmw, page);
1464 * If the page is mlock()d, we cannot swap it out.
1471 if (!PageTransCompound(page)) {
1476 mlock_vma_page(page);
1487 VM_BUG_ON_PAGE(!pvmw.pte, page);
1489 subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte);
1492 if (PageHuge(page) && !PageAnon(page)) {
1502 * page. There is no way of knowing exactly
1513 * The ref count of the PMD page was dropped
1518 * unmap the actual page and drop map count
1528 is_zone_device_page(page)) {
1535 * Store the pfn of the page in a special migration
1539 entry = make_migration_entry(page, 0);
1543 * pteval maps a zone device page and is therefore
1558 * migrated, just set it to page. This will need to be
1562 subpage = page;
1566 /* Nuke the page table entry. */
1571 * a remote CPU could still be writing to the page.
1584 /* Move the dirty bit to the page. Now the pte is gone. */
1586 set_page_dirty(page);
1591 if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) {
1593 if (PageHuge(page)) {
1594 hugetlb_count_sub(compound_nr(page), mm);
1599 dec_mm_counter(mm, mm_counter(page));
1608 * The guest indicated that the page content is of no
1612 * page. When userfaultfd is active, we must not drop
1613 * this page though, as its main user (postcopy
1617 dec_mm_counter(mm, mm_counter(page));
1634 * Store the pfn of the page in a special migration
1650 } else if (PageAnon(page)) {
1657 if (unlikely(PageSwapBacked(page) != PageSwapCache(page))) {
1667 /* MADV_FREE page check */
1668 if (!PageSwapBacked(page)) {
1678 ref_count = page_ref_count(page);
1679 map_count = page_mapcount(page);
1682 * Order reads for page refcount and dirty flag
1688 * The only page refs must be one from isolation
1692 !PageDirty(page)) {
1701 * If the page was redirtied, it cannot be
1702 * discarded. Remap the page to page table.
1705 SetPageSwapBacked(page);
1742 * This is a locked file-backed page, thus it cannot
1743 * be removed from the page cache and replaced by a new
1744 * page before mmu_notifier_invalidate_range_end, so no
1745 * concurrent thread might update its page table to
1746 * point at new page while a device still is using this
1747 * page.
1751 dec_mm_counter(mm, mm_counter_file(page));
1756 * done above for all cases requiring it to happen under page
1761 page_remove_rmap(subpage, PageHuge(page));
1762 put_page(page);
1775 static int page_not_mapped(struct page *page)
1777 return !page_mapped(page);
1781 * try_to_unmap - try to remove all page table mappings to a page
1782 * @page: the page to get unmapped
1785 * Tries to remove all the page table entries which are mapping this
1786 * page, used in the pageout path. Caller must hold the page lock.
1790 bool try_to_unmap(struct page *page, enum ttu_flags flags)
1802 * page tables leading to a race where migration cannot
1808 && !PageKsm(page) && PageAnon(page))
1812 rmap_walk_locked(page, &rwc);
1814 rmap_walk(page, &rwc);
1820 * if page table locking is skipped: use TTU_SYNC to wait for that.
1822 return !page_mapcount(page);
1826 * try_to_munlock - try to munlock a page
1827 * @page: the page to be munlocked
1829 * Called from munlock code. Checks all of the VMAs mapping the page
1830 * to make sure nobody else has this page mlocked. The page will be
1834 void try_to_munlock(struct page *page)
1844 VM_BUG_ON_PAGE(!PageLocked(page) || PageLRU(page), page);
1845 VM_BUG_ON_PAGE(PageCompound(page) && PageDoubleMap(page), page);
1847 rmap_walk(page, &rwc);
1859 static struct anon_vma *rmap_walk_anon_lock(struct page *page,
1865 return rwc->anon_lock(page);
1873 anon_vma = page_anon_vma(page);
1882 * rmap_walk_anon - do something to anonymous page using the object-based
1884 * @page: the page to be handled
1887 * Find all the mappings of a page using the mapping pointer and the vma chains
1891 * where the page was found will be held for write. So, we won't recheck
1895 static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc,
1903 anon_vma = page_anon_vma(page);
1905 VM_BUG_ON_PAGE(!anon_vma, page);
1907 anon_vma = rmap_walk_anon_lock(page, rwc);
1912 pgoff_start = page_to_pgoff(page);
1913 pgoff_end = pgoff_start + thp_nr_pages(page) - 1;
1917 unsigned long address = vma_address(page, vma);
1925 if (!rwc->rmap_one(page, vma, address, rwc->arg))
1927 if (rwc->done && rwc->done(page))
1936 * rmap_walk_file - do something to file page using the object-based rmap method
1937 * @page: the page to be handled
1940 * Find all the mappings of a page using the mapping pointer and the vma chains
1944 * where the page was found will be held for write. So, we won't recheck
1948 static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc,
1951 struct address_space *mapping = page_mapping(page);
1956 * The page lock not only makes sure that page->mapping cannot
1961 VM_BUG_ON_PAGE(!PageLocked(page), page);
1966 pgoff_start = page_to_pgoff(page);
1967 pgoff_end = pgoff_start + thp_nr_pages(page) - 1;
1972 unsigned long address = vma_address(page, vma);
1980 if (!rwc->rmap_one(page, vma, address, rwc->arg))
1982 if (rwc->done && rwc->done(page))
1991 void rmap_walk(struct page *page, struct rmap_walk_control *rwc)
1993 if (unlikely(PageKsm(page)))
1994 rmap_walk_ksm(page, rwc);
1995 else if (PageAnon(page))
1996 rmap_walk_anon(page, rwc, false);
1998 rmap_walk_file(page, rwc, false);
2002 void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc)
2005 VM_BUG_ON_PAGE(PageKsm(page), page);
2006 if (PageAnon(page))
2007 rmap_walk_anon(page, rwc, true);
2009 rmap_walk_file(page, rwc, true);
2018 void hugepage_add_anon_rmap(struct page *page,
2024 BUG_ON(!PageLocked(page));
2027 first = atomic_inc_and_test(compound_mapcount_ptr(page));
2029 __page_set_anon_rmap(page, vma, address, 0);
2032 void hugepage_add_new_anon_rmap(struct page *page,
2036 atomic_set(compound_mapcount_ptr(page), 0);
2037 if (hpage_pincount_available(page))
2038 atomic_set(compound_pincount_ptr(page), 0);
2040 __page_set_anon_rmap(page, vma, address, 1);