Lines Matching refs:page

68  * by their contents.  Because each such page is write-protected, searching on
73 * mapping from a KSM page to virtual addresses that map this page.
82 * different KSM page copy of that content
106 * take 10 attempts to find a page in the unstable tree, once it is found,
107 * it is secured in the stable tree. (When we scan a new page, we first
146 * @node: rb node of this ksm page in the stable tree
150 * @hlist: hlist head of rmap_items using this ksm page
151 * @kpfn: page frame number of this ksm page (perhaps temporarily on wrong nid)
188 * @nid: NUMA node id of unstable tree in which linked (may not match page)
191 * @oldchecksum: previous checksum of the page at that virtual address
249 /* The number of page slots additionally sharing those nodes */
267 /* Maximum number of page slots sharing a stable node */
276 /* Checksum of an empty (zeroed) page */
445 * page tables after it has passed through ksm_exit() - which, if necessary,
457 * We use break_ksm to break COW on a ksm page: it's a stripped down
459 * if (get_user_pages(addr, 1, FOLL_WRITE, &page, NULL) == 1)
460 * put_page(page);
462 * but taking great care only to touch a ksm page, in a VM_MERGEABLE vma,
464 * Could a ksm page appear anywhere else? Actually yes, in a VM_PFNMAP
473 struct page *page;
478 page = follow_page(vma, addr,
480 if (IS_ERR_OR_NULL(page))
482 if (PageKsm(page))
488 put_page(page);
514 * will retry to break_cow on each pass, so should recover the page
554 static struct page *get_mergeable_page(struct rmap_item *rmap_item)
559 struct page *page;
566 page = follow_page(vma, addr, FOLL_GET);
567 if (IS_ERR_OR_NULL(page))
569 if (PageAnon(page)) {
570 flush_anon_page(vma, page, addr);
571 flush_dcache_page(page);
573 put_page(page);
575 page = NULL;
578 return page;
678 * get_ksm_page: checks if the page indicated by the stable node
679 * is still its ksm page, despite having held no reference to it.
680 * In which case we can trust the content of the page, and it
681 * returns the gotten page; but if the page has now been zapped,
683 * But beware, the stable node's page might be being migrated.
685 * You would expect the stable_node to hold a reference to the ksm page.
686 * But if it increments the page's count, swapping out has to wait for
687 * ksmd to come around again before it can free the page, which may take
689 * "keyhole reference": access to the ksm page from the stable node peeps
690 * out through its keyhole to see if that page still holds the right key,
692 * page to reset its page->mapping to NULL, and relies on no other use of
693 * a page to put something that might look like our key in page->mapping.
696 static struct page *get_ksm_page(struct stable_node *stable_node,
699 struct page *page;
707 page = pfn_to_page(kpfn);
708 if (READ_ONCE(page->mapping) != expected_mapping)
712 * We cannot do anything with the page while its refcount is 0.
713 * Usually 0 means free, or tail of a higher-order page: in which
715 * however, it might mean that the page is under page_ref_freeze().
717 * the same is in reuse_ksm_page() case; but if page is swapcache
718 * in migrate_page_move_mapping(), it might still be our page,
721 while (!get_page_unless_zero(page)) {
723 * Another check for page->mapping != expected_mapping would
725 * optimize the common case, when the page is or is about to
728 * page->mapping reset to NULL later, in free_pages_prepare().
730 if (!PageSwapCache(page))
735 if (READ_ONCE(page->mapping) != expected_mapping) {
736 put_page(page);
741 if (!trylock_page(page)) {
742 put_page(page);
746 lock_page(page);
749 if (READ_ONCE(page->mapping) != expected_mapping) {
750 unlock_page(page);
751 put_page(page);
755 return page;
759 * We come here from above when page->mapping or !PageSwapCache
779 struct page *page;
782 page = get_ksm_page(stable_node, GET_KSM_PAGE_LOCK);
783 if (!page)
787 unlock_page(page);
788 put_page(page);
837 * page and upping mmap_lock. Nor does it fit with the way we skip dup'ing
863 static inline struct stable_node *page_stable_node(struct page *page)
865 return PageKsm(page) ? page_rmapping(page) : NULL;
868 static inline void set_page_stable_node(struct page *page,
871 page->mapping = (void *)((unsigned long)stable_node | PAGE_MAPPING_KSM);
880 struct page *page;
883 page = get_ksm_page(stable_node, GET_KSM_PAGE_LOCK);
884 if (!page) {
897 if (!page_mapped(page)) {
900 * since that allows for an unmapped ksm page to be recognized
902 * This page might be in a pagevec waiting to be freed,
906 set_page_stable_node(page, NULL);
911 unlock_page(page);
912 put_page(page);
1026 static u32 calc_checksum(struct page *page)
1029 void *addr = kmap_atomic(page);
1035 static int write_protect_page(struct vm_area_struct *vma, struct page *page,
1040 .page = page,
1047 pvmw.address = page_address_in_vma(page, vma);
1051 BUG_ON(PageTransCompound(page));
1068 swapped = PageSwapCache(page);
1069 flush_cache_page(vma, pvmw.address, page_to_pfn(page));
1079 * No need to notify as we are downgrading page table to read
1080 * only not changing it to point to a new page.
1087 * page
1089 if (page_mapcount(page) + 1 + swapped != page_count(page)) {
1094 set_page_dirty(page);
1114 * replace_page - replace page in vma by new ksm page
1115 * @vma: vma that holds the pte pointing to page
1116 * @page: the page we are replacing by kpage
1117 * @kpage: the ksm page we replace page by
1122 static int replace_page(struct vm_area_struct *vma, struct page *page,
1123 struct page *kpage, pte_t orig_pte)
1134 addr = page_address_in_vma(page, vma);
1164 * We're replacing an anonymous page with a zero page, which is
1174 * No need to notify as we are replacing a read only page with another
1175 * read only page with the same content.
1182 page_remove_rmap(page, false);
1183 if (!page_mapped(page))
1184 try_to_free_swap(page);
1185 put_page(page);
1197 * @vma: the vma that holds the pte pointing to page
1198 * @page: the PageAnon page that we want to replace with kpage
1199 * @kpage: the PageKsm page that we want to map instead of page,
1200 * or NULL the first time when we want to use page as kpage.
1205 struct page *page, struct page *kpage)
1210 if (page == kpage) /* ksm page forked */
1213 if (!PageAnon(page))
1216 if(!xpm_integrity_check_one_page_merge(page, kpage))
1220 * We need the page lock to read a stable PageSwapCache in
1224 * then come back to this page when it is unlocked.
1226 if (!trylock_page(page))
1229 if (PageTransCompound(page)) {
1230 if (split_huge_page(page))
1235 * If this anonymous page is mapped only here, its pte may need
1240 if (write_protect_page(vma, page, &orig_pte) == 0) {
1243 * While we hold page lock, upgrade page from
1247 set_page_stable_node(page, NULL);
1248 mark_page_accessed(page);
1250 * Page reclaim just frees a clean page with no dirty
1251 * ptes: make sure that the ksm page would be swapped.
1253 if (!PageDirty(page))
1254 SetPageDirty(page);
1256 } else if (pages_identical(page, kpage))
1257 err = replace_page(vma, page, kpage, orig_pte);
1261 munlock_vma_page(page);
1263 unlock_page(page);
1266 page = kpage; /* for final unlock */
1271 unlock_page(page);
1278 * but no new kernel page is allocated: kpage must already be a ksm page.
1283 struct page *page, struct page *kpage)
1294 err = try_to_merge_one_page(vma, page, kpage);
1311 * to be merged into one page.
1314 * pages into one ksm page, NULL otherwise.
1316 * Note that this function upgrades page to ksm page: if one of the pages
1317 * is already a ksm page, try_to_merge_with_ksm_page should be used.
1319 static struct page *try_to_merge_two_pages(struct rmap_item *rmap_item,
1320 struct page *page,
1322 struct page *tree_page)
1326 err = try_to_merge_with_ksm_page(rmap_item, page, NULL);
1329 tree_page, page);
1331 * If that fails, we have a ksm page with only one pte
1337 return err ? NULL : page;
1360 static struct page *stable_node_dup(struct stable_node **_stable_node_dup,
1367 struct page *_tree_page, *tree_page = NULL;
1506 static struct page *__stable_node_chain(struct stable_node **_stable_node_dup,
1528 static __always_inline struct page *chain_prune(struct stable_node **s_n_d,
1535 static __always_inline struct page *chain(struct stable_node **s_n_d,
1540 struct page *tree_page;
1549 * stable_tree_search - search for page inside the stable tree
1551 * This function checks if there is a page inside the stable tree
1552 * with identical content to the page that we are scanning right now.
1557 static struct page *stable_tree_search(struct page *page)
1566 page_node = page_stable_node(page);
1568 /* ksm page forked */
1569 get_page(page);
1570 return page;
1573 nid = get_kpfn_nid(page_to_pfn(page));
1580 struct page *tree_page;
1612 * Take any of the stable_node dups page of
1637 ret = memcmp_pages(page, tree_page);
1649 * Test if the migrated page should be merged
1651 * 1 we can migrate it with another KSM page
1654 if (page_mapcount(page) > 1)
1663 * page in any of the existing
1666 * scanned page to find itself a match
1668 * brand new KSM page to add later to
1675 * Lock and unlock the stable_node's page (which
1676 * might already have been migrated) so that page
1713 get_page(page);
1714 return page;
1739 get_page(page);
1741 page = NULL;
1744 page = NULL;
1755 get_page(page);
1757 page = NULL;
1759 page = NULL;
1764 return page;
1790 * of the current nid for this page
1803 * stable_tree_insert - insert stable tree node pointing to new ksm page
1809 static struct stable_node *stable_tree_insert(struct page *kpage)
1827 struct page *tree_page;
1847 * Take any of the stable_node dups page of
1915 * unstable_tree_search_insert - search for identical page,
1918 * This function searches for a page in the unstable tree identical to the
1919 * page currently being scanned; and if no identical page is found in the
1923 * to the currently scanned page, NULL otherwise.
1930 struct page *page,
1931 struct page **tree_pagep)
1938 nid = get_kpfn_nid(page_to_pfn(page));
1944 struct page *tree_page;
1954 * Don't substitute a ksm page for a forked page.
1956 if (page == tree_page) {
1961 ret = memcmp_pages(page, tree_page);
1998 * the same ksm page.
2033 * cmp_and_merge_page - first see if page can be merged into the stable tree;
2034 * if not, compare checksum to previous and if it's the same, see if page can
2035 * be inserted into the unstable tree, or merged with a page already there and
2038 * @page: the page that we are searching identical page to.
2039 * @rmap_item: the reverse mapping into the virtual address of this page
2041 static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
2045 struct page *tree_page = NULL;
2047 struct page *kpage;
2052 stable_node = page_stable_node(page);
2072 /* We first start with searching the page inside the stable tree */
2073 kpage = stable_tree_search(page);
2074 if (kpage == page && rmap_item->head == stable_node) {
2085 err = try_to_merge_with_ksm_page(rmap_item, page, kpage);
2088 * The page was successfully merged:
2101 * If the hash value of the page has changed from the last time
2102 * we calculated it, this page is changing frequently: therefore we
2106 checksum = calc_checksum(page);
2113 * Same checksum as an empty page. We attempt to merge it with the
2114 * appropriate zero page if the user enabled this via sysfs.
2122 err = try_to_merge_one_page(vma, page,
2133 * In case of failure, the page was not really empty, so we
2140 unstable_tree_search_insert(rmap_item, page, &tree_page);
2144 kpage = try_to_merge_two_pages(rmap_item, page,
2148 * page, then we actually ended up increasing the reference
2149 * count of the same compound page twice, and split_huge_page
2156 split = PageTransCompound(page)
2157 && compound_head(page) == compound_head(tree_page);
2175 * If we fail to insert the page into the stable tree,
2177 * to a ksm page left outside the stable tree,
2188 * compound page. We will split the page now, but no
2191 * the page is locked, it is better to skip it and
2194 if (!trylock_page(page))
2196 split_huge_page(page);
2197 unlock_page(page);
2230 static struct rmap_item *scan_get_next_rmap_item(struct page **page)
2245 * pagevecs, raised page count preventing write_protect_page
2263 struct page *page;
2267 page = get_ksm_page(stable_node,
2269 if (page)
2270 put_page(page);
2311 *page = follow_page(vma, ksm_scan.address, FOLL_GET);
2312 if (IS_ERR_OR_NULL(*page)) {
2317 if (PageAnon(*page)) {
2318 flush_anon_page(vma, *page, ksm_scan.address);
2319 flush_dcache_page(*page);
2327 put_page(*page);
2331 put_page(*page);
2396 struct page *page;
2400 rmap_item = scan_get_next_rmap_item(&page);
2403 cmp_and_merge_page(page, rmap_item);
2404 put_page(page);
2574 struct page *ksm_might_need_to_copy(struct page *page,
2577 struct anon_vma *anon_vma = page_anon_vma(page);
2578 struct page *new_page;
2580 if (PageKsm(page)) {
2581 if (page_stable_node(page) &&
2583 return page; /* no need to copy it */
2585 return page; /* no need to copy it */
2587 page->index == linear_page_index(vma, address)) {
2588 return page; /* still no need to copy it */
2590 if (!PageUptodate(page))
2591 return page; /* let do_swap_page report the error */
2599 copy_user_highpage(new_page, page, address, vma);
2609 void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc)
2615 VM_BUG_ON_PAGE(!PageKsm(page), page);
2618 * Rely on the page lock to protect against concurrent modifications
2619 * to that page's node of the stable tree.
2621 VM_BUG_ON_PAGE(!PageLocked(page), page);
2623 stable_node = page_stable_node(page);
2658 if (!rwc->rmap_one(page, vma, addr, rwc->arg)) {
2662 if (rwc->done && rwc->done(page)) {
2674 void ksm_migrate_page(struct page *newpage, struct page *oldpage)
2716 * Don't get_ksm_page, page has already gone:
2717 * which is why we keep kpfn instead of page*
2801 * Most of the work is done by page migration; but there might
2805 * non-existent struct page.
3030 * When a KSM page is created it is shared by 2 mappings. This
3170 /* The correct value depends on page size and endianness */