Lines Matching defs:page
94 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
105 pgoff_t next; /* the next page offset to be fallocated */
140 static bool shmem_should_replace_page(struct page *page, gfp_t gfp);
141 static int shmem_replace_page(struct page **pagep, gfp_t gfp,
144 struct page **pagep, enum sgp_type sgp,
148 struct page **pagep, enum sgp_type sgp,
153 struct page **pagep, enum sgp_type sgp)
433 * Checking page is not enough: by the time a SwapCache page is locked, it
450 * only allocate huge pages if the page will be fully within i_size,
528 struct page *page;
579 page = find_get_page(inode->i_mapping,
581 if (!page)
584 /* No huge page at the end of the file: nothing to split */
585 if (!PageTransHuge(page)) {
586 put_page(page);
592 * to lock the page at this time.
597 if (!trylock_page(page)) {
598 put_page(page);
602 ret = split_huge_page(page);
603 unlock_page(page);
604 put_page(page);
672 static int shmem_add_to_page_cache(struct page *page,
677 XA_STATE_ORDER(xas, &mapping->i_pages, index, compound_order(page));
679 unsigned long nr = compound_nr(page);
682 VM_BUG_ON_PAGE(PageTail(page), page);
683 VM_BUG_ON_PAGE(index != round_down(index, nr), page);
684 VM_BUG_ON_PAGE(!PageLocked(page), page);
685 VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
686 VM_BUG_ON(expected && PageTransHuge(page));
688 page_ref_add(page, nr);
689 page->mapping = mapping;
690 page->index = index;
692 if (!PageSwapCache(page)) {
693 error = mem_cgroup_charge(page, charge_mm, gfp);
695 if (PageTransHuge(page)) {
702 cgroup_throttle_swaprate(page, gfp);
714 xas_store(&xas, page);
719 if (PageTransHuge(page)) {
721 __inc_node_page_state(page, NR_SHMEM_THPS);
724 __mod_lruvec_page_state(page, NR_FILE_PAGES, nr);
725 __mod_lruvec_page_state(page, NR_SHMEM, nr);
737 page->mapping = NULL;
738 page_ref_sub(page, nr);
743 * Like delete_from_page_cache, but substitutes swap for page.
745 static void shmem_delete_from_page_cache(struct page *page, void *radswap)
747 struct address_space *mapping = page->mapping;
750 VM_BUG_ON_PAGE(PageCompound(page), page);
753 error = shmem_replace_entry(mapping, page->index, page, radswap);
754 page->mapping = NULL;
756 __dec_lruvec_page_state(page, NR_FILE_PAGES);
757 __dec_lruvec_page_state(page, NR_SHMEM);
759 put_page(page);
764 * Remove swap entry from page cache, free the swap and its page cache.
789 struct page *page;
793 xas_for_each(&xas, page, end - 1) {
794 if (xas_retry(&xas, page))
796 if (xa_is_value(page))
875 * Check whether a hole-punch or truncation needs to split a huge page,
878 * Eviction (or truncation to 0 size) should never need to split a huge page;
883 * huge page: so the split below relies upon find_get_entries() having stopped
884 * when it found a subpage of the huge page, without getting further references.
886 static bool shmem_punch_compound(struct page *page, pgoff_t start, pgoff_t end)
888 if (!PageTransCompound(page))
891 /* Just proceed to delete a huge page wholly within the range punched */
892 if (PageHead(page) &&
893 page->index >= start && page->index + HPAGE_PMD_NR <= end)
896 /* Try to split huge page, so we can truly punch the hole or truncate */
897 return split_huge_page(page) >= 0;
901 * Remove range of pages and swap entries from page cache, and free them.
931 struct page *page = pvec.pages[i];
937 if (xa_is_value(page)) {
941 index, page);
945 VM_BUG_ON_PAGE(page_to_pgoff(page) != index, page);
947 if (!trylock_page(page))
950 if ((!unfalloc || !PageUptodate(page)) &&
951 page_mapping(page) == mapping) {
952 VM_BUG_ON_PAGE(PageWriteback(page), page);
953 if (shmem_punch_compound(page, start, end))
954 truncate_inode_page(mapping, page);
956 unlock_page(page);
965 struct page *page = NULL;
966 shmem_getpage(inode, start - 1, &page, SGP_READ);
967 if (page) {
973 zero_user_segment(page, partial_start, top);
974 set_page_dirty(page);
975 unlock_page(page);
976 put_page(page);
980 struct page *page = NULL;
981 shmem_getpage(inode, end, &page, SGP_READ);
982 if (page) {
983 zero_user_segment(page, 0, partial_end);
984 set_page_dirty(page);
985 unlock_page(page);
986 put_page(page);
1008 struct page *page = pvec.pages[i];
1014 if (xa_is_value(page)) {
1017 if (shmem_free_swap(mapping, index, page)) {
1018 /* Swap was replaced by page: retry */
1026 lock_page(page);
1028 if (!unfalloc || !PageUptodate(page)) {
1029 if (page_mapping(page) != mapping) {
1031 unlock_page(page);
1035 VM_BUG_ON_PAGE(PageWriteback(page), page);
1036 if (shmem_punch_compound(page, start, end))
1037 truncate_inode_page(mapping, page);
1039 /* Wipe the page and don't get stuck */
1040 clear_highpage(page);
1041 flush_dcache_page(page);
1042 set_page_dirty(page);
1048 unlock_page(page);
1130 * Part of the huge page can be beyond i_size: subject
1194 struct page **entries, pgoff_t *indices,
1198 struct page *page;
1206 xas_for_each(&xas, page, ULONG_MAX) {
1207 if (xas_retry(&xas, page))
1210 if (!xa_is_value(page))
1213 entry = radix_to_swp_entry(page);
1221 entries[ret] = page;
1236 * Move the swapped pages for an inode to page cache. Returns the count
1248 struct page *page = pvec.pages[i];
1250 if (!xa_is_value(page))
1253 &page, SGP_CACHE,
1257 unlock_page(page);
1258 put_page(page);
1269 * If swap found in inode, free it and move page from swapcache to filecache.
1362 * Move the page from the page cache to the swap cache.
1364 static int shmem_writepage(struct page *page, struct writeback_control *wbc)
1372 VM_BUG_ON_PAGE(PageCompound(page), page);
1373 BUG_ON(!PageLocked(page));
1374 mapping = page->mapping;
1375 index = page->index;
1398 * fallocated page arriving here is now to initialize it and write it.
1400 * That's okay for a page already fallocated earlier, but if we have
1402 * of this page in case we have to undo it, and (b) it may not be a
1404 * reactivate the page, and let shmem_fallocate() quit when too many.
1406 if (!PageUptodate(page)) {
1422 clear_highpage(page);
1423 flush_dcache_page(page);
1424 SetPageUptodate(page);
1427 swap = get_swap_page(page);
1433 * if it's not already there. Do it now before the page is
1443 if (add_to_swap_cache(page, swap,
1452 shmem_delete_from_page_cache(page, swp_to_radix_entry(swap));
1455 BUG_ON(page_mapped(page));
1456 swap_writepage(page, wbc);
1461 put_swap_page(page, swap);
1463 set_page_dirty(page);
1465 return AOP_WRITEPAGE_ACTIVATE; /* Return with page locked */
1466 unlock_page(page);
1523 static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
1527 struct page *page;
1533 page = swap_cluster_readahead(swap, gfp, &vmf);
1536 return page;
1539 static struct page *shmem_alloc_hugepage(gfp_t gfp,
1545 struct page *page;
1553 page = alloc_pages_vma(gfp | __GFP_COMP | __GFP_NORETRY | __GFP_NOWARN,
1556 if (page)
1557 prep_transhuge_page(page);
1560 return page;
1563 static struct page *shmem_alloc_page(gfp_t gfp,
1567 struct page *page;
1570 page = alloc_page_vma(gfp, &pvma, 0);
1573 return page;
1576 static struct page *shmem_alloc_and_acct_page(gfp_t gfp,
1581 struct page *page;
1593 page = shmem_alloc_hugepage(gfp, info, index);
1595 page = shmem_alloc_page(gfp, info, index);
1596 if (page) {
1597 __SetPageLocked(page);
1598 __SetPageSwapBacked(page);
1599 return page;
1609 * When a page is moved from swapcache to shmem filecache (either by the
1614 * we may need to copy to a suitable page before moving to filecache.
1620 static bool shmem_should_replace_page(struct page *page, gfp_t gfp)
1622 return page_zonenum(page) > gfp_zone(gfp);
1625 static int shmem_replace_page(struct page **pagep, gfp_t gfp,
1628 struct page *oldpage, *newpage;
1674 * both PageSwapCache and page_private after getting page lock;
1693 * Swap in the page pointed to by *pagep.
1694 * Caller has to make sure that *pagep contains a valid swapped page.
1695 * Returns 0 and the page in pagep if success. On failure, returns the
1699 struct page **pagep, enum sgp_type sgp,
1706 struct page *page;
1715 page = lookup_swap_cache(swap, NULL, 0);
1716 if (!page) {
1724 page = shmem_swapin(swap, gfp, info, index);
1725 if (!page) {
1731 /* We have to do this with page locked to prevent races */
1732 lock_page(page);
1733 if (!PageSwapCache(page) || page_private(page) != swap.val ||
1738 if (!PageUptodate(page)) {
1742 wait_on_page_writeback(page);
1746 * physical page after reading from swap.
1748 arch_swap_restore(swap, page);
1750 if (shmem_should_replace_page(page, gfp)) {
1751 error = shmem_replace_page(&page, gfp, info, index);
1756 error = shmem_add_to_page_cache(page, mapping, index,
1768 mark_page_accessed(page);
1770 delete_from_swap_cache(page);
1771 set_page_dirty(page);
1774 *pagep = page;
1780 if (page) {
1781 unlock_page(page);
1782 put_page(page);
1789 * shmem_getpage_gfp - find page in cache, or get from swap, or allocate
1793 * entry since a page cannot live in both the swap and page cache.
1799 struct page **pagep, enum sgp_type sgp, gfp_t gfp,
1807 struct page *page;
1827 page = find_lock_entry(mapping, index);
1828 if (xa_is_value(page)) {
1829 error = shmem_swapin_page(inode, index, &page,
1834 *pagep = page;
1838 if (page)
1839 hindex = page->index;
1840 if (page && sgp == SGP_WRITE)
1841 mark_page_accessed(page);
1843 /* fallocated page? */
1844 if (page && !PageUptodate(page)) {
1847 unlock_page(page);
1848 put_page(page);
1849 page = NULL;
1852 if (page || sgp == SGP_READ)
1895 page = shmem_alloc_and_acct_page(gfp, inode, index, true);
1896 if (IS_ERR(page)) {
1898 page = shmem_alloc_and_acct_page(gfp, inode,
1901 if (IS_ERR(page)) {
1904 error = PTR_ERR(page);
1905 page = NULL;
1909 * Try to reclaim some space by splitting a huge page
1924 if (PageTransHuge(page))
1930 __SetPageReferenced(page);
1932 error = shmem_add_to_page_cache(page, mapping, hindex,
1937 lru_cache_add(page);
1940 info->alloced += compound_nr(page);
1941 inode->i_blocks += BLOCKS_PER_PAGE << compound_order(page);
1946 if (PageTransHuge(page) &&
1950 * Part of the huge page is beyond i_size: subject
1967 * Let SGP_FALLOC use the SGP_WRITE optimization on a new page.
1973 * Let SGP_WRITE caller clear ends if write does not fill page;
1974 * but SGP_FALLOC on a page fallocated earlier must initialize
1977 if (sgp != SGP_WRITE && !PageUptodate(page)) {
1980 for (i = 0; i < compound_nr(page); i++) {
1981 clear_highpage(page + i);
1982 flush_dcache_page(page + i);
1984 SetPageUptodate(page);
1991 ClearPageDirty(page);
1992 delete_from_page_cache(page);
2001 *pagep = page + index - hindex;
2008 shmem_inode_unacct_blocks(inode, compound_nr(page));
2010 if (PageTransHuge(page)) {
2011 unlock_page(page);
2012 put_page(page);
2016 if (page) {
2017 unlock_page(page);
2018 put_page(page);
2058 * keep up, as each new page needs its own unmap_mapping_range() call,
2119 err = shmem_getpage_gfp(inode, vmf->pgoff, &vmf->page, sgp,
2357 struct page **pagep)
2366 struct page *page;
2374 * We may have got a page, returned -ENOENT triggering a retry,
2375 * and now we find ourselves with -ENOMEM. Release the page, to
2386 page = shmem_alloc_page(gfp, info, pgoff);
2387 if (!page)
2391 page_kaddr = kmap_atomic(page);
2399 *pagep = page;
2401 /* don't free the page */
2405 clear_highpage(page);
2408 page = *pagep;
2412 VM_BUG_ON(PageLocked(page) || PageSwapBacked(page));
2413 __SetPageLocked(page);
2414 __SetPageSwapBacked(page);
2415 __SetPageUptodate(page);
2423 ret = shmem_add_to_page_cache(page, mapping, pgoff, NULL,
2428 _dst_pte = mk_pte(page, dst_vma->vm_page_prot);
2434 * VM_WRITE permission, so mark the page dirty or it
2439 set_page_dirty(page);
2453 lru_cache_add(page);
2461 inc_mm_counter(dst_mm, mm_counter_file(page));
2462 page_add_file_rmap(page, false);
2468 unlock_page(page);
2474 ClearPageDirty(page);
2475 delete_from_page_cache(page);
2477 unlock_page(page);
2478 put_page(page);
2489 struct page **pagep)
2500 struct page *page = NULL;
2503 dst_addr, 0, true, &page);
2519 struct page **pagep, void **fsdata)
2540 struct page *page, void *fsdata)
2547 if (!PageUptodate(page)) {
2548 struct page *head = compound_head(page);
2549 if (PageTransCompound(page)) {
2553 if (head + i == page)
2561 zero_user_segments(page, 0, from,
2566 set_page_dirty(page);
2567 unlock_page(page);
2568 put_page(page);
2597 struct page *page = NULL;
2611 error = shmem_getpage(inode, index, &page, sgp);
2617 if (page) {
2619 set_page_dirty(page);
2620 unlock_page(page);
2633 if (page)
2634 put_page(page);
2640 if (page) {
2642 * If users can be writing to this page using arbitrary
2644 * before reading the page on the kernel side.
2647 flush_dcache_page(page);
2649 * Mark the page accessed if we read the beginning.
2652 mark_page_accessed(page);
2654 page = ZERO_PAGE(0);
2655 get_page(page);
2659 * Ok, we have the page, and it's up-to-date, so
2662 ret = copy_page_to_iter(page, offset, nr, to);
2668 put_page(page);
2684 * llseek SEEK_DATA or SEEK_HOLE through the page cache.
2689 struct page *page;
2713 page = pvec.pages[i];
2714 if (page && !xa_is_value(page)) {
2715 if (!PageUptodate(page))
2716 page = NULL;
2719 (page && whence == SEEK_DATA) ||
2720 (!page && whence == SEEK_HOLE)) {
2846 struct page *page;
2857 error = shmem_getpage(inode, index, &page, SGP_FALLOC);
2870 * No need for lock or barrier: we have the page lock.
2873 if (!PageUptodate(page))
2883 set_page_dirty(page);
2884 unlock_page(page);
2885 put_page(page);
3149 struct page *page;
3177 error = shmem_getpage(inode, 0, &page, SGP_WRITE);
3184 memcpy(page_address(page), symname, len);
3185 SetPageUptodate(page);
3186 set_page_dirty(page);
3187 unlock_page(page);
3188 put_page(page);
3207 struct page *page = NULL;
3210 page = find_get_page(inode->i_mapping, 0);
3211 if (!page)
3213 if (!PageUptodate(page)) {
3214 put_page(page);
3218 error = shmem_getpage(inode, 0, &page, SGP_READ);
3221 unlock_page(page);
3223 set_delayed_call(done, shmem_put_link, page);
3224 return page_address(page);
3754 * tmpfs instance, limiting inodes to one per page of lowmem;
4313 * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags.
4314 * @mapping: the page's address_space
4315 * @index: the page index
4316 * @gfp: the page allocator flags to use if allocating
4319 * with any new page allocations done using the specified allocation flags.
4327 struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
4332 struct page *page;
4336 error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE,
4339 page = ERR_PTR(error);
4341 unlock_page(page);
4342 return page;