Lines Matching defs:page
73 * by their contents. Because each such page is write-protected, searching on
78 * mapping from a KSM page to virtual addresses that map this page.
87 * different KSM page copy of that content
111 * take 10 attempts to find a page in the unstable tree, once it is found,
112 * it is secured in the stable tree. (When we scan a new page, we first
147 * @node: rb node of this ksm page in the stable tree
151 * @hlist: hlist head of rmap_items using this ksm page
152 * @kpfn: page frame number of this ksm page (perhaps temporarily on wrong nid)
189 * @nid: NUMA node id of unstable tree in which linked (may not match page)
192 * @oldchecksum: previous checksum of the page at that virtual address
251 /* The number of page slots additionally sharing those nodes */
269 /* Maximum number of page slots sharing a stable node */
278 /* Checksum of an empty (zeroed) page */
421 * page tables after it has passed through ksm_exit() - which, if necessary,
435 struct page *page = NULL;
446 page = vm_normal_page(walk->vma, addr, ptent);
455 page = pfn_swap_entry_to_page(entry);
457 /* return 1 if the page is an normal ksm page or KSM-placed zero page */
458 ret = (page && PageKsm(page)) || is_ksm_zero_pte(*pte);
474 * We use break_ksm to break COW on a ksm page by triggering unsharing,
475 * such that the ksm page will get replaced by an exclusive anonymous page.
477 * We take great care only to touch a ksm page, in a VM_MERGEABLE vma,
479 * Could a ksm page appear anywhere else? Actually yes, in a VM_PFNMAP
506 * We must loop until we no longer find a KSM page because
526 * will retry to break_cow on each pass, so should recover the page
586 static struct page *get_mergeable_page(struct ksm_rmap_item *rmap_item)
591 struct page *page;
598 page = follow_page(vma, addr, FOLL_GET);
599 if (IS_ERR_OR_NULL(page))
601 if (is_zone_device_page(page))
603 if (PageAnon(page)) {
604 flush_anon_page(vma, page, addr);
605 flush_dcache_page(page);
608 put_page(page);
610 page = NULL;
613 return page;
717 * get_ksm_page: checks if the page indicated by the stable node
718 * is still its ksm page, despite having held no reference to it.
719 * In which case we can trust the content of the page, and it
720 * returns the gotten page; but if the page has now been zapped,
722 * But beware, the stable node's page might be being migrated.
724 * You would expect the stable_node to hold a reference to the ksm page.
725 * But if it increments the page's count, swapping out has to wait for
726 * ksmd to come around again before it can free the page, which may take
728 * "keyhole reference": access to the ksm page from the stable node peeps
729 * out through its keyhole to see if that page still holds the right key,
731 * page to reset its page->mapping to NULL, and relies on no other use of
732 * a page to put something that might look like our key in page->mapping.
735 static struct page *get_ksm_page(struct ksm_stable_node *stable_node,
738 struct page *page;
746 page = pfn_to_page(kpfn);
747 if (READ_ONCE(page->mapping) != expected_mapping)
751 * We cannot do anything with the page while its refcount is 0.
752 * Usually 0 means free, or tail of a higher-order page: in which
754 * however, it might mean that the page is under page_ref_freeze().
756 * the same is in reuse_ksm_page() case; but if page is swapcache
757 * in folio_migrate_mapping(), it might still be our page,
760 while (!get_page_unless_zero(page)) {
762 * Another check for page->mapping != expected_mapping would
764 * optimize the common case, when the page is or is about to
767 * page->mapping reset to NULL later, in free_pages_prepare().
769 if (!PageSwapCache(page))
774 if (READ_ONCE(page->mapping) != expected_mapping) {
775 put_page(page);
780 if (!trylock_page(page)) {
781 put_page(page);
785 lock_page(page);
788 if (READ_ONCE(page->mapping) != expected_mapping) {
789 unlock_page(page);
790 put_page(page);
794 return page;
798 * We come here from above when page->mapping or !PageSwapCache
818 struct page *page;
821 page = get_ksm_page(stable_node, GET_KSM_PAGE_LOCK);
822 if (!page)
826 unlock_page(page);
827 put_page(page);
878 * page and upping mmap_lock. Nor does it fit with the way we skip dup'ing
909 static inline struct ksm_stable_node *page_stable_node(struct page *page)
911 return folio_stable_node(page_folio(page));
914 static inline void set_page_stable_node(struct page *page,
917 VM_BUG_ON_PAGE(PageAnon(page) && PageAnonExclusive(page), page);
918 page->mapping = (void *)((unsigned long)stable_node | PAGE_MAPPING_KSM);
927 struct page *page;
930 page = get_ksm_page(stable_node, GET_KSM_PAGE_LOCK);
931 if (!page) {
944 if (!page_mapped(page)) {
947 * since that allows for an unmapped ksm page to be recognized
949 * This page might be in an LRU cache waiting to be freed,
953 set_page_stable_node(page, NULL);
958 unlock_page(page);
959 put_page(page);
1086 static u32 calc_checksum(struct page *page)
1089 void *addr = kmap_atomic(page);
1095 static int write_protect_page(struct vm_area_struct *vma, struct page *page,
1099 DEFINE_PAGE_VMA_WALK(pvmw, page, vma, 0, 0);
1106 pvmw.address = page_address_in_vma(page, vma);
1110 BUG_ON(PageTransCompound(page));
1121 anon_exclusive = PageAnonExclusive(page);
1125 swapped = PageSwapCache(page);
1126 flush_cache_page(vma, pvmw.address, page_to_pfn(page));
1136 * No need to notify as we are downgrading page table to read
1137 * only not changing it to point to a new page.
1144 * page
1146 if (page_mapcount(page) + 1 + swapped != page_count(page)) {
1152 if (anon_exclusive && page_try_share_anon_rmap(page)) {
1158 set_page_dirty(page);
1178 * replace_page - replace page in vma by new ksm page
1179 * @vma: vma that holds the pte pointing to page
1180 * @page: the page we are replacing by kpage
1181 * @kpage: the ksm page we replace page by
1186 static int replace_page(struct vm_area_struct *vma, struct page *page,
1187 struct page *kpage, pte_t orig_pte)
1200 addr = page_address_in_vma(page, vma);
1227 VM_BUG_ON_PAGE(PageAnonExclusive(page), page);
1240 * Use pte_mkdirty to mark the zero page mapped by KSM, and then
1242 * the dirty bit in zero page's PTE is set.
1248 * We're replacing an anonymous page with a zero page, which is
1258 * No need to notify as we are replacing a read only page with another
1259 * read only page with the same content.
1266 folio = page_folio(page);
1267 page_remove_rmap(page, vma, false);
1282 * @vma: the vma that holds the pte pointing to page
1283 * @page: the PageAnon page that we want to replace with kpage
1284 * @kpage: the PageKsm page that we want to map instead of page,
1285 * or NULL the first time when we want to use page as kpage.
1290 struct page *page, struct page *kpage)
1295 if (page == kpage) /* ksm page forked */
1298 if (!PageAnon(page))
1302 * We need the page lock to read a stable PageSwapCache in
1306 * then come back to this page when it is unlocked.
1308 if (!trylock_page(page))
1311 if (PageTransCompound(page)) {
1312 if (split_huge_page(page))
1317 * If this anonymous page is mapped only here, its pte may need
1322 if (write_protect_page(vma, page, &orig_pte) == 0) {
1325 * While we hold page lock, upgrade page from
1329 set_page_stable_node(page, NULL);
1330 mark_page_accessed(page);
1332 * Page reclaim just frees a clean page with no dirty
1333 * ptes: make sure that the ksm page would be swapped.
1335 if (!PageDirty(page))
1336 SetPageDirty(page);
1338 } else if (pages_identical(page, kpage))
1339 err = replace_page(vma, page, kpage, orig_pte);
1343 unlock_page(page);
1350 * but no new kernel page is allocated: kpage must already be a ksm page.
1355 struct page *page, struct page *kpage)
1366 err = try_to_merge_one_page(vma, page, kpage);
1378 trace_ksm_merge_with_ksm_page(kpage, page_to_pfn(kpage ? kpage : page),
1385 * to be merged into one page.
1388 * pages into one ksm page, NULL otherwise.
1390 * Note that this function upgrades page to ksm page: if one of the pages
1391 * is already a ksm page, try_to_merge_with_ksm_page should be used.
1393 static struct page *try_to_merge_two_pages(struct ksm_rmap_item *rmap_item,
1394 struct page *page,
1396 struct page *tree_page)
1400 err = try_to_merge_with_ksm_page(rmap_item, page, NULL);
1403 tree_page, page);
1405 * If that fails, we have a ksm page with only one pte
1411 return err ? NULL : page;
1434 static struct page *stable_node_dup(struct ksm_stable_node **_stable_node_dup,
1441 struct page *_tree_page, *tree_page = NULL;
1580 static struct page *__stable_node_chain(struct ksm_stable_node **_stable_node_dup,
1602 static __always_inline struct page *chain_prune(struct ksm_stable_node **s_n_d,
1609 static __always_inline struct page *chain(struct ksm_stable_node **s_n_d,
1614 struct page *tree_page;
1623 * stable_tree_search - search for page inside the stable tree
1625 * This function checks if there is a page inside the stable tree
1626 * with identical content to the page that we are scanning right now.
1631 static struct page *stable_tree_search(struct page *page)
1640 page_node = page_stable_node(page);
1642 /* ksm page forked */
1643 get_page(page);
1644 return page;
1647 nid = get_kpfn_nid(page_to_pfn(page));
1654 struct page *tree_page;
1686 * Take any of the stable_node dups page of
1711 ret = memcmp_pages(page, tree_page);
1723 * Test if the migrated page should be merged
1725 * 1 we can migrate it with another KSM page
1728 if (page_mapcount(page) > 1)
1737 * page in any of the existing
1740 * scanned page to find itself a match
1742 * brand new KSM page to add later to
1749 * Lock and unlock the stable_node's page (which
1750 * might already have been migrated) so that page
1787 get_page(page);
1788 return page;
1813 get_page(page);
1815 page = NULL;
1818 page = NULL;
1829 get_page(page);
1831 page = NULL;
1833 page = NULL;
1838 return page;
1863 * of the current nid for this page
1875 * stable_tree_insert - insert stable tree node pointing to new ksm page
1881 static struct ksm_stable_node *stable_tree_insert(struct page *kpage)
1899 struct page *tree_page;
1919 * Take any of the stable_node dups page of
1987 * unstable_tree_search_insert - search for identical page,
1990 * This function searches for a page in the unstable tree identical to the
1991 * page currently being scanned; and if no identical page is found in the
1995 * to the currently scanned page, NULL otherwise.
2002 struct page *page,
2003 struct page **tree_pagep)
2010 nid = get_kpfn_nid(page_to_pfn(page));
2016 struct page *tree_page;
2026 * Don't substitute a ksm page for a forked page.
2028 if (page == tree_page) {
2033 ret = memcmp_pages(page, tree_page);
2070 * the same ksm page.
2107 * cmp_and_merge_page - first see if page can be merged into the stable tree;
2108 * if not, compare checksum to previous and if it's the same, see if page can
2109 * be inserted into the unstable tree, or merged with a page already there and
2112 * @page: the page that we are searching identical page to.
2113 * @rmap_item: the reverse mapping into the virtual address of this page
2115 static void cmp_and_merge_page(struct page *page, struct ksm_rmap_item *rmap_item)
2119 struct page *tree_page = NULL;
2121 struct page *kpage;
2126 stable_node = page_stable_node(page);
2146 /* We first start with searching the page inside the stable tree */
2147 kpage = stable_tree_search(page);
2148 if (kpage == page && rmap_item->head == stable_node) {
2159 err = try_to_merge_with_ksm_page(rmap_item, page, kpage);
2162 * The page was successfully merged:
2175 * If the hash value of the page has changed from the last time
2176 * we calculated it, this page is changing frequently: therefore we
2180 checksum = calc_checksum(page);
2187 * Same checksum as an empty page. We attempt to merge it with the
2188 * appropriate zero page if the user enabled this via sysfs.
2196 err = try_to_merge_one_page(vma, page,
2210 * In case of failure, the page was not really empty, so we
2217 unstable_tree_search_insert(rmap_item, page, &tree_page);
2221 kpage = try_to_merge_two_pages(rmap_item, page,
2225 * page, then we actually ended up increasing the reference
2226 * count of the same compound page twice, and split_huge_page
2233 split = PageTransCompound(page)
2234 && compound_head(page) == compound_head(tree_page);
2252 * If we fail to insert the page into the stable tree,
2254 * to a ksm page left outside the stable tree,
2265 * compound page. We will split the page now, but no
2268 * the page is locked, it is better to skip it and
2271 if (!trylock_page(page))
2273 split_huge_page(page);
2274 unlock_page(page);
2308 static struct ksm_rmap_item *scan_get_next_rmap_item(struct page **page)
2327 * LRU cache, raised page count preventing write_protect_page
2345 struct page *page;
2349 page = get_ksm_page(stable_node,
2351 if (page)
2352 put_page(page);
2396 *page = follow_page(vma, ksm_scan.address, FOLL_GET);
2397 if (IS_ERR_OR_NULL(*page)) {
2402 if (is_zone_device_page(*page))
2404 if (PageAnon(*page)) {
2405 flush_anon_page(vma, *page, ksm_scan.address);
2406 flush_dcache_page(*page);
2414 put_page(*page);
2419 put_page(*page);
2488 struct page *page;
2493 rmap_item = scan_get_next_rmap_item(&page);
2496 cmp_and_merge_page(page, rmap_item);
2497 put_page(page);
2793 struct page *ksm_might_need_to_copy(struct page *page,
2796 struct folio *folio = page_folio(page);
2798 struct page *new_page;
2800 if (PageKsm(page)) {
2801 if (page_stable_node(page) &&
2803 return page; /* no need to copy it */
2805 return page; /* no need to copy it */
2806 } else if (page->index == linear_page_index(vma, address) &&
2808 return page; /* still no need to copy it */
2810 if (PageHWPoison(page))
2812 if (!PageUptodate(page))
2813 return page; /* let do_swap_page report the error */
2822 if (copy_mc_user_highpage(new_page, page, address, vma)) {
2824 memory_failure_queue(page_to_pfn(page), 0);
2847 * Rely on the page lock to protect against concurrent modifications
2848 * to that page's node of the stable tree.
2910 * Collect processes when the error hit an ksm page.
2912 void collect_procs_ksm(struct page *page, struct list_head *to_kill,
2917 struct folio *folio = page_folio(page);
2942 add_to_kill_ksm(t, page, vma, to_kill,
2973 set_page_stable_node(&folio->page, NULL);
2996 * Don't get_ksm_page, page has already gone:
2997 * which is why we keep kpfn instead of page*
3081 * Most of the work is done by page migration; but there might
3085 * non-existent struct page.
3317 * When a KSM page is created it is shared by 2 mappings. This
3486 /* The correct value depends on page size and endianness */