Lines Matching refs:page
120 * corresponding page
131 struct page *page;
134 page = find_get_page(swap_address_space(entry), offset);
135 if (!page)
139 * called by vmscan.c at reclaiming pages. So, we hold a lock on a page,
144 if (trylock_page(page)) {
146 ((flags & TTRS_UNMAPPED) && !page_mapped(page)) ||
147 ((flags & TTRS_FULL) && mem_cgroup_swap_full(page)))
148 ret = try_to_free_swap(page);
149 unlock_page(page);
151 put_page(page);
178 /* Do not discard the swap header page! */
224 sector_t swap_page_sector(struct page *page)
226 struct swap_info_struct *sis = page_swap_info(page);
231 offset = __page_file_index(page);
565 * counter becomes 0, which means no page in the cluster is in using, we can
998 * page swap is disabled. Warn and fail the allocation.
1116 * callers probably all tried to get a page from the same si
1294 * swapoff, such as page lock, page table lock, etc. The caller must
1310 * the page is read from the swap device, the PTE is verified not
1311 * changed with the page table locked to check whether the swap device
1391 void put_swap_page(struct page *page, swp_entry_t entry)
1400 int size = swap_entry_size(thp_nr_pages(page));
1491 * How many references to page are currently swapped out?
1495 int page_swapcount(struct page *page)
1503 entry.val = page_private(page);
1567 struct page *page;
1586 page = vmalloc_to_page(p->swap_map + offset);
1588 VM_BUG_ON(page_private(page) != SWP_CONTINUED);
1591 page = list_next_entry(page, lru);
1592 map = kmap_atomic(page);
1631 static bool page_swapped(struct page *page)
1636 if (!IS_ENABLED(CONFIG_THP_SWAP) || likely(!PageTransCompound(page)))
1637 return page_swapcount(page) != 0;
1639 page = compound_head(page);
1640 entry.val = page_private(page);
1647 static int page_trans_huge_map_swapcount(struct page *page, int *total_mapcount,
1658 VM_BUG_ON_PAGE(PageHuge(page), page);
1660 if (!IS_ENABLED(CONFIG_THP_SWAP) || likely(!PageTransCompound(page))) {
1661 mapcount = page_trans_huge_mapcount(page, total_mapcount);
1662 if (PageSwapCache(page))
1663 swapcount = page_swapcount(page);
1669 page = compound_head(page);
1672 if (PageSwapCache(page)) {
1675 entry.val = page_private(page);
1685 mapcount = atomic_read(&page[i]._mapcount) + 1;
1694 if (PageDoubleMap(page)) {
1698 mapcount = compound_mapcount(page);
1710 * We can write to an anon page without COW if there are no other references
1719 bool reuse_swap_page(struct page *page, int *total_map_swapcount)
1723 VM_BUG_ON_PAGE(!PageLocked(page), page);
1724 if (unlikely(PageKsm(page)))
1726 count = page_trans_huge_map_swapcount(page, &total_mapcount,
1730 if (count == 1 && PageSwapCache(page) &&
1731 (likely(!PageTransCompound(page)) ||
1733 total_swapcount == page_swapcount(page))) {
1734 if (!PageWriteback(page)) {
1735 page = compound_head(page);
1736 delete_from_swap_cache(page);
1737 SetPageDirty(page);
1742 entry.val = page_private(page);
1756 * If swap is getting full, or if there are no more mappings of this page,
1759 int try_to_free_swap(struct page *page)
1761 VM_BUG_ON_PAGE(!PageLocked(page), page);
1763 if (!PageSwapCache(page))
1765 if (PageWriteback(page))
1767 if (page_swapped(page))
1776 * the swap from a page which has already been recorded in the
1777 * image as a clean swapcache page, and then reuse its swap for
1778 * another page of the image. On waking from hibernation, the
1779 * original page might be freed under memory pressure, then
1788 page = compound_head(page);
1789 delete_from_swap_cache(page);
1790 SetPageDirty(page);
1796 * free the page cache entry if it is the last user.
1929 unsigned long addr, swp_entry_t entry, struct page *page)
1931 struct page *swapcache;
1936 swapcache = page;
1937 page = ksm_might_need_to_copy(page, vma, addr);
1938 if (unlikely(!page))
1949 get_page(page);
1951 pte_mkold(mk_pte(page, vma->vm_page_prot)));
1952 if (page == swapcache) {
1953 page_add_anon_rmap(page, vma, addr, false);
1955 page_add_new_anon_rmap(page, vma, addr, false);
1956 lru_cache_add_inactive_or_unevictable(page, vma);
1961 if (page != swapcache) {
1962 unlock_page(page);
1963 put_page(page);
1973 struct page *page;
1999 page = lookup_swap_cache(entry, vma, addr);
2000 if (!page) {
2004 page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
2007 if (!page) {
2013 lock_page(page);
2014 wait_on_page_writeback(page);
2015 ret = unuse_pte(vma, pmd, addr, entry, page);
2017 unlock_page(page);
2018 put_page(page);
2022 try_to_free_swap(page);
2023 unlock_page(page);
2024 put_page(page);
2194 struct page *page;
2248 page = find_get_page(swap_address_space(entry), i);
2249 if (!page)
2253 * It is conceivable that a racing task removed this page from
2254 * swap cache just before we acquired the page lock. The page
2258 lock_page(page);
2259 wait_on_page_writeback(page);
2260 try_to_free_swap(page);
2261 unlock_page(page);
2262 put_page(page);
2316 * corresponds to page offset for the specified swap entry.
2317 * Note that the type of this function is sector_t, but it returns page offset
2335 * Returns the page offset into bdev for the specified page's swap entry.
2337 sector_t map_swap_page(struct page *page, struct block_device **bdev)
2340 entry.val = page_private(page);
2368 * Add a block range (and the corresponding page range) into this swapdev's
2371 * This function rather assumes that it is called in ascending page order.
2383 * function is called in ascending page order.
2418 * time for locating where on disk a page belongs.
2442 * map_swap_page() has been measured at about 0.3 per page. - akpm.
3097 nr_good_pages = maxpages - 1; /* omit header page */
3193 struct page *page = NULL;
3246 page = read_mapping_page(mapping, 0, swap_file);
3247 if (IS_ERR(page)) {
3248 error = PTR_ERR(page);
3251 swap_header = kmap(page);
3327 /* frontswap enabled? set up bit-per-page map for frontswap */
3346 * perform discards for released swap page-clusters.
3428 if (page && !IS_ERR(page)) {
3429 kunmap(page);
3430 put_page(page);
3569 * might occur if a page table entry has got corrupted.
3598 struct swap_info_struct *page_swap_info(struct page *page)
3600 swp_entry_t entry = { .val = page_private(page) };
3607 struct address_space *__page_file_mapping(struct page *page)
3609 return page_swap_info(page)->swap_file->f_mapping;
3613 pgoff_t __page_file_index(struct page *page)
3615 swp_entry_t swap = { .val = page_private(page) };
3622 * beyond SWAP_MAP_MAX, it allocates a new page and links that to the entry's
3623 * page of the original vmalloc'ed swap_map, to hold the continuation count
3628 * on the original swap_map, only referring to a continuation page when the
3632 * page table locks; if it fails, add_swap_count_continuation(, GFP_KERNEL)
3639 struct page *head;
3640 struct page *page;
3641 struct page *list_page;
3648 * for latency not to zero a page while GFP_ATOMIC and holding locks.
3650 page = alloc_page(gfp_mask | __GFP_HIGHMEM);
3677 if (!page) {
3684 * no architecture is using highmem pages for kernel page tables: so it
3685 * will not corrupt the GFP_ATOMIC caller's atomic page table kmaps.
3692 * Page allocation does not initialize the page's lru field,
3707 * a continuation page, free our allocation and use this one.
3724 list_add_tail(&page->lru, &head->lru);
3725 page = NULL; /* now it's attached, don't free it */
3733 if (page)
3734 __free_page(page);
3740 * from SWAP_MAP_MAX, check if there is already a continuation page to carry
3741 * into, carry if so, or else fail until a new continuation page is allocated;
3750 struct page *head;
3751 struct page *page;
3763 page = list_next_entry(head, lru);
3764 map = kmap_atomic(page) + offset;
3775 page = list_next_entry(page, lru);
3776 BUG_ON(page == head);
3777 map = kmap_atomic(page) + offset;
3781 page = list_next_entry(page, lru);
3782 if (page == head) {
3786 map = kmap_atomic(page) + offset;
3787 init_map: *map = 0; /* we didn't zero the page */
3791 while ((page = list_prev_entry(page, lru)) != head) {
3792 map = kmap_atomic(page) + offset;
3805 page = list_next_entry(page, lru);
3806 BUG_ON(page == head);
3807 map = kmap_atomic(page) + offset;
3814 while ((page = list_prev_entry(page, lru)) != head) {
3815 map = kmap_atomic(page) + offset;
3836 struct page *head;
3839 struct page *page, *next;
3841 list_for_each_entry_safe(page, next, &head->lru, lru) {
3842 list_del(&page->lru);
3843 __free_page(page);
3850 void cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask)
3853 int nid = page_to_nid(page);