Lines Matching defs:page

96 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
107 pgoff_t next; /* the next page offset to be fallocated */
432 * to stop a racing shmem_recalc_inode() from thinking that a page has
492 * Checking page is not enough: by the time a SwapCache page is locked, it
509 * only allocate huge pages if the page will be fully within i_size,
676 /* No huge page at the end of the file: nothing to split */
684 * to lock the page at this time.
848 * Remove swap entry from page cache, free the swap and its page cache.
873 struct page *page;
878 xas_for_each(&xas, page, max) {
879 if (xas_retry(&xas, page))
881 if (xa_is_value(page))
978 * Remove range of pages and swap entries from page cache, and free them.
1082 /* Swap was replaced by page: retry */
1106 * If we split a page, reset the loop so
1330 * Move the swapped pages for an inode to page cache. Returns the count
1363 * If swap found in inode, free it and move page from swapcache to filecache.
1437 * Move the page from the page cache to the swap cache.
1439 static int shmem_writepage(struct page *page, struct writeback_control *wbc)
1441 struct folio *folio = page_folio(page);
1473 if (split_huge_page(page) < 0)
1475 folio = page_folio(page);
1538 swap_writepage(&folio->page, wbc);
1609 struct page *page;
1615 page = swap_cluster_readahead(swap, gfp, &vmf);
1618 if (!page)
1620 return page_folio(page);
1714 * When a page is moved from swapcache to shmem filecache (either by the
1719 * we may need to copy to a suitable page before moving to filecache.
1782 * both PageSwapCache and page_private after getting page lock;
1935 * shmem_get_folio_gfp - find page in cache, or get from swap, or allocate
1939 * entry since a page cannot live in both the swap and page cache.
2191 * keep up, as each new page needs its own unmap_mapping_range() call,
2249 vmf->page = folio_file_page(folio, vmf->pgoff);
2587 * We may have got a page, returned -ENOENT triggering a retry,
2588 * and now we find ourselves with -ENOMEM. Release the page, to
2613 * process B thread 1 takes page fault, read lock on own mmap lock
2618 * Disable page faults to prevent potential deadlock
2632 /* don't free the page */
2638 clear_user_highpage(&folio->page, dst_addr);
2663 &folio->page, true, flags);
2688 struct page **pagep, void **fsdata)
2724 struct page *page, void *fsdata)
2726 struct folio *folio = page_folio(page);
2763 struct page *page = NULL;
2786 page = folio_file_page(folio, index);
2787 if (PageHWPoison(page)) {
2813 * If users can be writing to this page using arbitrary
2815 * before reading the page on the kernel side.
2818 flush_dcache_page(page);
2820 * Mark the page accessed if we read the beginning.
2825 * Ok, we have the page, and it's up-to-date, so
2828 ret = copy_page_to_iter(page, offset, nr, to);
2835 * faster to copy the zero page instead of clearing.
2840 * But submitting the same page twice in a row to
2923 .page = ZERO_PAGE(0),
2976 * part of the page is not copied back to userspace (unless
2986 * If users can be writing to this page using arbitrary
2988 * before reading the page on the kernel side.
2994 * Ok, we have the page, and it's up-to-date, so we can
3165 * No need for lock or barrier: we have the page lock.
4301 * tmpfs instance, limiting inodes to one per page of lowmem;
4458 /* Keep the page in page cache instead of truncating it */
4460 struct page *page)
4893 * shmem_read_folio_gfp - read into page cache, using specified page allocation flags.
4896 * @gfp: the page allocator flags to use if allocating
4899 * with any new page allocations done using the specified allocation flags.
4932 struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
4936 struct page *page;
4939 return &folio->page;
4941 page = folio_file_page(folio, index);
4942 if (PageHWPoison(page)) {
4947 return page;