Lines Matching defs:folio
127 struct folio *folio, void *shadow)
129 XA_STATE(xas, &mapping->i_pages, folio->index);
135 if (!folio_test_hugetlb(folio)) {
136 xas_set_order(&xas, folio->index, folio_order(folio));
137 nr = folio_nr_pages(folio);
140 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
145 folio->mapping = NULL;
151 struct folio *folio)
155 VM_BUG_ON_FOLIO(folio_mapped(folio), folio);
156 if (!IS_ENABLED(CONFIG_DEBUG_VM) && unlikely(folio_mapped(folio))) {
158 current->comm, folio_pfn(folio));
159 dump_page(&folio->page, "still mapped when deleted");
163 if (mapping_exiting(mapping) && !folio_test_large(folio)) {
164 int mapcount = page_mapcount(&folio->page);
166 if (folio_ref_count(folio) >= mapcount + 2) {
173 page_mapcount_reset(&folio->page);
174 folio_ref_sub(folio, mapcount);
180 if (folio_test_hugetlb(folio))
183 nr = folio_nr_pages(folio);
185 __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, -nr);
186 if (folio_test_swapbacked(folio)) {
187 __lruvec_stat_mod_folio(folio, NR_SHMEM, -nr);
188 if (folio_test_pmd_mappable(folio))
189 __lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, -nr);
190 } else if (folio_test_pmd_mappable(folio)) {
191 __lruvec_stat_mod_folio(folio, NR_FILE_THPS, -nr);
196 * At this point folio must be either written or cleaned by
197 * truncate. Dirty folio here signals a bug and loss of
204 * Below fixes dirty accounting after removing the folio entirely
206 * folio and anyway will be cleared before returning folio to
209 if (WARN_ON_ONCE(folio_test_dirty(folio) &&
211 folio_account_cleaned(folio, inode_to_wb(mapping->host));
219 void __filemap_remove_folio(struct folio *folio, void *shadow)
221 struct address_space *mapping = folio->mapping;
223 trace_mm_filemap_delete_from_page_cache(folio);
224 filemap_unaccount_folio(mapping, folio);
225 page_cache_delete(mapping, folio, shadow);
228 void filemap_free_folio(struct address_space *mapping, struct folio *folio)
230 void (*free_folio)(struct folio *);
235 free_folio(folio);
237 if (folio_test_large(folio) && !folio_test_hugetlb(folio))
238 refs = folio_nr_pages(folio);
239 folio_put_refs(folio, refs);
243 * filemap_remove_folio - Remove folio from page cache.
244 * @folio: The folio.
247 * verified to be in the page cache. It will never put the folio into
250 void filemap_remove_folio(struct folio *folio)
252 struct address_space *mapping = folio->mapping;
254 BUG_ON(!folio_test_locked(folio));
257 __filemap_remove_folio(folio, NULL);
263 filemap_free_folio(mapping, folio);
285 struct folio *folio;
288 xas_for_each(&xas, folio, ULONG_MAX) {
293 if (xa_is_value(folio))
302 if (folio != fbatch->folios[i]) {
303 VM_BUG_ON_FOLIO(folio->index >
304 fbatch->folios[i]->index, folio);
308 WARN_ON_ONCE(!folio_test_locked(folio));
310 folio->mapping = NULL;
311 /* Leave folio->index set: truncation lookup relies on it */
315 total_pages += folio_nr_pages(folio);
331 struct folio *folio = fbatch->folios[i];
333 trace_mm_filemap_delete_from_page_cache(folio);
334 filemap_unaccount_folio(mapping, folio);
475 struct folio *folio;
484 folio = xas_find(&xas, max);
485 if (xas_retry(&xas, folio))
488 if (xa_is_value(folio))
499 return folio != NULL;
523 struct folio *folio = fbatch.folios[i];
525 folio_wait_writeback(folio);
526 folio_clear_error(folio);
636 struct folio *folio;
642 xas_for_each(&xas, folio, max) {
643 if (xas_retry(&xas, folio))
645 if (xa_is_value(folio))
647 if (folio_test_dirty(folio) || folio_test_locked(folio) ||
648 folio_test_writeback(folio))
652 return folio != NULL;
795 * replace_page_cache_folio - replace a pagecache folio with a new one
796 * @old: folio to be replaced
797 * @new: folio to replace with
799 * This function replaces a folio in the pagecache with a new one. On
800 * success it acquires the pagecache reference for the new folio and
801 * drops it for the old folio. Both the old and new folios must be
802 * locked. This function does not add the new folio to the LRU, the
807 void replace_page_cache_folio(struct folio *old, struct folio *new)
810 void (*free_folio)(struct folio *) = mapping->a_ops->free_folio;
845 struct folio *folio, pgoff_t index, gfp_t gfp, void **shadowp)
848 int huge = folio_test_hugetlb(folio);
852 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
853 VM_BUG_ON_FOLIO(folio_test_swapbacked(folio), folio);
857 int error = mem_cgroup_charge(folio, NULL, gfp);
858 VM_BUG_ON_FOLIO(index & (folio_nr_pages(folio) - 1), folio);
862 xas_set_order(&xas, index, folio_order(folio));
863 nr = folio_nr_pages(folio);
867 folio_ref_add(folio, nr);
868 folio->mapping = mapping;
869 folio->index = xas.xa_index;
875 if (order > folio_order(folio))
892 if (order > folio_order(folio)) {
900 xas_store(&xas, folio);
908 __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr);
909 if (folio_test_pmd_mappable(folio))
910 __lruvec_stat_mod_folio(folio,
920 trace_mm_filemap_add_to_page_cache(folio);
924 mem_cgroup_uncharge(folio);
925 folio->mapping = NULL;
927 folio_put_refs(folio, nr);
932 int filemap_add_folio(struct address_space *mapping, struct folio *folio,
938 __folio_set_locked(folio);
939 ret = __filemap_add_folio(mapping, folio, index, gfp, &shadow);
941 __folio_clear_locked(folio);
944 * The folio might have been evicted from cache only
946 * any other repeatedly accessed folio.
951 WARN_ON_ONCE(folio_test_active(folio));
953 workingset_refault(folio, shadow);
954 folio_add_lru(folio);
961 struct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order)
964 struct folio *folio;
971 folio = __folio_alloc_node(gfp, order, n);
972 } while (!folio && read_mems_allowed_retry(cpuset_mems_cookie));
974 return folio;
1033 static wait_queue_head_t *folio_waitqueue(struct folio *folio)
1035 return &folio_wait_table[hash_ptr(folio, PAGE_WAIT_TABLE_BITS)];
1098 if (test_bit(key->bit_nr, &key->folio->flags))
1101 if (test_and_set_bit(key->bit_nr, &key->folio->flags))
1133 static void folio_wake_bit(struct folio *folio, int bit_nr)
1135 wait_queue_head_t *q = folio_waitqueue(folio);
1140 key.folio = folio;
1175 folio_clear_waiters(folio);
1180 static void folio_wake(struct folio *folio, int bit)
1182 if (!folio_test_waiters(folio))
1184 folio_wake_bit(folio, bit);
1203 * Attempt to check (or get) the folio flag, and mark us done
1206 static inline bool folio_trylock_flag(struct folio *folio, int bit_nr,
1210 if (test_and_set_bit(bit_nr, &folio->flags))
1212 } else if (test_bit(bit_nr, &folio->flags))
1222 static inline int folio_wait_bit_common(struct folio *folio, int bit_nr,
1225 wait_queue_head_t *q = folio_waitqueue(folio);
1234 !folio_test_uptodate(folio) && folio_test_workingset(folio)) {
1242 wait_page.folio = folio;
1268 folio_set_waiters(folio);
1269 if (!folio_trylock_flag(folio, bit_nr, wait))
1279 * We can drop our reference to the folio.
1282 folio_put(folio);
1319 if (unlikely(test_and_set_bit(bit_nr, folio_flags(folio, 0))))
1328 * waiter from the wait-queues, but the folio waiters bit will remain
1384 struct folio *folio = page_folio(pfn_swap_entry_to_page(entry));
1386 q = folio_waitqueue(folio);
1387 if (!folio_test_uptodate(folio) && folio_test_workingset(folio)) {
1395 wait_page.folio = folio;
1400 folio_set_waiters(folio);
1401 if (!folio_trylock_flag(folio, PG_locked, wait))
1438 void folio_wait_bit(struct folio *folio, int bit_nr)
1440 folio_wait_bit_common(folio, bit_nr, TASK_UNINTERRUPTIBLE, SHARED);
1444 int folio_wait_bit_killable(struct folio *folio, int bit_nr)
1446 return folio_wait_bit_common(folio, bit_nr, TASK_KILLABLE, SHARED);
1452 * @folio: The folio to wait for.
1455 * The caller should hold a reference on @folio. They expect the page to
1457 * (for example) by holding the reference while waiting for the folio to
1459 * dereference @folio.
1461 * Return: 0 if the folio was unlocked or -EINTR if interrupted by a signal.
1463 static int folio_put_wait_locked(struct folio *folio, int state)
1465 return folio_wait_bit_common(folio, PG_locked, state, DROP);
1469 * folio_add_wait_queue - Add an arbitrary waiter to a folio's wait queue
1470 * @folio: Folio defining the wait queue of interest
1473 * Add an arbitrary @waiter to the wait queue for the nominated @folio.
1475 void folio_add_wait_queue(struct folio *folio, wait_queue_entry_t *waiter)
1477 wait_queue_head_t *q = folio_waitqueue(folio);
1482 folio_set_waiters(folio);
1511 * folio_unlock - Unlock a locked folio.
1512 * @folio: The folio.
1514 * Unlocks the folio and wakes up any thread sleeping on the page lock.
1519 void folio_unlock(struct folio *folio)
1524 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
1525 if (clear_bit_unlock_is_negative_byte(PG_locked, folio_flags(folio, 0)))
1526 folio_wake_bit(folio, PG_locked);
1532 * @folio: The folio.
1534 * Clear the PG_private_2 bit on a folio and wake up any sleepers waiting for
1535 * it. The folio reference held for PG_private_2 being set is released.
1537 * This is, for example, used when a netfs folio is being written to a local
1538 * disk cache, thereby allowing writes to the cache for the same folio to be
1541 void folio_end_private_2(struct folio *folio)
1543 VM_BUG_ON_FOLIO(!folio_test_private_2(folio), folio);
1544 clear_bit_unlock(PG_private_2, folio_flags(folio, 0));
1545 folio_wake_bit(folio, PG_private_2);
1546 folio_put(folio);
1551 * folio_wait_private_2 - Wait for PG_private_2 to be cleared on a folio.
1552 * @folio: The folio to wait on.
1554 * Wait for PG_private_2 (aka PG_fscache) to be cleared on a folio.
1556 void folio_wait_private_2(struct folio *folio)
1558 while (folio_test_private_2(folio))
1559 folio_wait_bit(folio, PG_private_2);
1564 * folio_wait_private_2_killable - Wait for PG_private_2 to be cleared on a folio.
1565 * @folio: The folio to wait on.
1567 * Wait for PG_private_2 (aka PG_fscache) to be cleared on a folio or until a
1574 int folio_wait_private_2_killable(struct folio *folio)
1578 while (folio_test_private_2(folio)) {
1579 ret = folio_wait_bit_killable(folio, PG_private_2);
1589 * folio_end_writeback - End writeback against a folio.
1590 * @folio: The folio.
1592 void folio_end_writeback(struct folio *folio)
1597 * to shuffle a folio marked for immediate reclaim is too mild
1599 * end of every folio writeback.
1601 if (folio_test_reclaim(folio)) {
1602 folio_clear_reclaim(folio);
1603 folio_rotate_reclaimable(folio);
1607 * Writeback does not hold a folio reference of its own, relying
1609 * But here we must make sure that the folio is not freed and
1612 folio_get(folio);
1613 if (!__folio_end_writeback(folio))
1617 folio_wake(folio, PG_writeback);
1618 acct_reclaim_writeback(folio);
1619 folio_put(folio);
1624 * __folio_lock - Get a lock on the folio, assuming we need to sleep to get it.
1625 * @folio: The folio to lock
1627 void __folio_lock(struct folio *folio)
1629 folio_wait_bit_common(folio, PG_locked, TASK_UNINTERRUPTIBLE,
1634 int __folio_lock_killable(struct folio *folio)
1636 return folio_wait_bit_common(folio, PG_locked, TASK_KILLABLE,
1641 static int __folio_lock_async(struct folio *folio, struct wait_page_queue *wait)
1643 struct wait_queue_head *q = folio_waitqueue(folio);
1646 wait->folio = folio;
1651 folio_set_waiters(folio);
1652 ret = !folio_trylock(folio);
1669 * 0 - folio is locked.
1670 * non-zero - folio is not locked.
1676 * with the folio locked and the mmap_lock/per-VMA lock is left unperturbed.
1678 vm_fault_t __folio_lock_or_retry(struct folio *folio, struct vm_fault *vmf)
1692 folio_wait_locked_killable(folio);
1694 folio_wait_locked(folio);
1700 ret = __folio_lock_killable(folio);
1706 __folio_lock(folio);
1787 * 1. Load the folio from i_pages
1789 * 3. If the folio is not found by xas_reload(), put the refcount and retry
1809 * Looks up the page cache entry at @mapping & @index. If it is a folio,
1811 * of a previously evicted folio, or a swap entry from shmem/tmpfs,
1814 * Return: The folio, swap or shadow entry, %NULL if nothing is found.
1819 struct folio *folio;
1824 folio = xas_load(&xas);
1825 if (xas_retry(&xas, folio))
1831 if (!folio || xa_is_value(folio))
1834 if (!folio_try_get_rcu(folio))
1837 if (unlikely(folio != xas_reload(&xas))) {
1838 folio_put(folio);
1844 return folio;
1848 * __filemap_get_folio - Find and get a reference to a folio.
1851 * @fgp_flags: %FGP flags modify how the folio is returned.
1859 * If this function returns a folio, it is returned with an increased refcount.
1861 * Return: The found folio or an ERR_PTR() otherwise.
1863 struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index,
1866 struct folio *folio;
1869 folio = filemap_get_entry(mapping, index);
1870 if (xa_is_value(folio))
1871 folio = NULL;
1872 if (!folio)
1877 if (!folio_trylock(folio)) {
1878 folio_put(folio);
1882 folio_lock(folio);
1886 if (unlikely(folio->mapping != mapping)) {
1887 folio_unlock(folio);
1888 folio_put(folio);
1891 VM_BUG_ON_FOLIO(!folio_contains(folio, index), folio);
1895 folio_mark_accessed(folio);
1898 if (folio_test_idle(folio))
1899 folio_clear_idle(folio);
1903 folio_wait_stable(folio);
1905 if (!folio && (fgp_flags & FGP_CREAT)) {
1924 /* If we're not aligned, allocate a smaller folio */
1936 folio = filemap_alloc_folio(alloc_gfp, order);
1937 if (!folio)
1942 __folio_set_referenced(folio);
1944 err = filemap_add_folio(mapping, folio, index, gfp);
1947 folio_put(folio);
1948 folio = NULL;
1959 if (folio && (fgp_flags & FGP_FOR_MMAP))
1960 folio_unlock(folio);
1963 if (!folio)
1965 return folio;
1969 static inline struct folio *find_get_entry(struct xa_state *xas, pgoff_t max,
1972 struct folio *folio;
1976 folio = xas_find(xas, max);
1978 folio = xas_find_marked(xas, max, mark);
1980 if (xas_retry(xas, folio))
1987 if (!folio || xa_is_value(folio))
1988 return folio;
1990 if (!folio_try_get_rcu(folio))
1993 if (unlikely(folio != xas_reload(xas))) {
1994 folio_put(folio);
1998 return folio;
2028 struct folio *folio;
2031 while ((folio = find_get_entry(&xas, end, XA_PRESENT)) != NULL) {
2033 if (!folio_batch_add(fbatch, folio))
2042 folio = fbatch->folios[idx];
2043 if (!xa_is_value(folio) && !folio_test_hugetlb(folio))
2044 nr = folio_nr_pages(folio);
2074 struct folio *folio;
2077 while ((folio = find_get_entry(&xas, end, XA_PRESENT))) {
2078 if (!xa_is_value(folio)) {
2079 if (folio->index < *start)
2081 if (folio_next_index(folio) - 1 > end)
2083 if (!folio_trylock(folio))
2085 if (folio->mapping != mapping ||
2086 folio_test_writeback(folio))
2088 VM_BUG_ON_FOLIO(!folio_contains(folio, xas.xa_index),
2089 folio);
2092 if (!folio_batch_add(fbatch, folio))
2096 folio_unlock(folio);
2098 folio_put(folio);
2106 folio = fbatch->folios[idx];
2107 if (!xa_is_value(folio) && !folio_test_hugetlb(folio))
2108 nr = folio_nr_pages(folio);
2125 * The first folio may start before @start; if it does, it will contain
2126 * @start. The final folio may extend beyond @end; if it does, it will
2128 * between the folios if there are indices which have no folio in the
2133 * We also update @start to index the next folio for the traversal.
2139 struct folio *folio;
2142 while ((folio = find_get_entry(&xas, end, XA_PRESENT)) != NULL) {
2144 if (xa_is_value(folio))
2146 if (!folio_batch_add(fbatch, folio)) {
2147 unsigned long nr = folio_nr_pages(folio);
2149 if (folio_test_hugetlb(folio))
2151 *start = folio->index + nr;
2185 * Also update @start to be positioned for traversal of the next folio.
2193 struct folio *folio;
2197 for (folio = xas_load(&xas); folio && xas.xa_index <= end;
2198 folio = xas_next(&xas)) {
2199 if (xas_retry(&xas, folio))
2205 if (xa_is_value(folio))
2208 if (!folio_try_get_rcu(folio))
2211 if (unlikely(folio != xas_reload(&xas)))
2214 if (!folio_batch_add(fbatch, folio)) {
2215 nr = folio_nr_pages(folio);
2217 if (folio_test_hugetlb(folio))
2219 *start = folio->index + nr;
2224 folio_put(folio);
2234 folio = fbatch->folios[nr - 1];
2235 if (folio_test_hugetlb(folio))
2236 *start = folio->index + 1;
2238 *start = folio_next_index(folio);
2257 * Also update @start to index the next folio for traversal.
2263 struct folio *folio;
2266 while ((folio = find_get_entry(&xas, end, tag)) != NULL) {
2272 if (xa_is_value(folio))
2274 if (!folio_batch_add(fbatch, folio)) {
2275 unsigned long nr = folio_nr_pages(folio);
2277 if (folio_test_hugetlb(folio))
2279 *start = folio->index + nr;
2325 * the middle of a folio, the entire folio will be returned. The last
2326 * folio in the batch may have the readahead flag set or the uptodate flag
2333 struct folio *folio;
2336 for (folio = xas_load(&xas); folio; folio = xas_next(&xas)) {
2337 if (xas_retry(&xas, folio))
2339 if (xas.xa_index > max || xa_is_value(folio))
2341 if (xa_is_sibling(folio))
2343 if (!folio_try_get_rcu(folio))
2346 if (unlikely(folio != xas_reload(&xas)))
2349 if (!folio_batch_add(fbatch, folio))
2351 if (!folio_test_uptodate(folio))
2353 if (folio_test_readahead(folio))
2355 xas_advance(&xas, folio_next_index(folio) - 1);
2358 folio_put(folio);
2366 struct folio *folio)
2368 bool workingset = folio_test_workingset(folio);
2377 folio_clear_error(folio);
2382 error = filler(file, folio);
2388 error = folio_wait_locked_killable(folio);
2391 if (folio_test_uptodate(folio))
2399 loff_t pos, size_t count, struct folio *folio,
2402 if (folio_test_uptodate(folio))
2409 if (mapping->host->i_blkbits >= folio_shift(folio))
2412 if (folio_pos(folio) > pos) {
2413 count -= folio_pos(folio) - pos;
2416 pos -= folio_pos(folio);
2419 return mapping->a_ops->is_partially_uptodate(folio, pos, count);
2424 struct folio *folio, bool need_uptodate)
2435 if (!folio_trylock(folio)) {
2445 folio_put_wait_locked(folio, TASK_KILLABLE);
2448 error = __folio_lock_async(folio, iocb->ki_waitq);
2454 if (!folio->mapping)
2458 if (filemap_range_uptodate(mapping, iocb->ki_pos, count, folio,
2467 folio);
2470 folio_unlock(folio);
2474 folio_put(folio);
2482 struct folio *folio;
2485 folio = filemap_alloc_folio(mapping_gfp_mask(mapping), 0);
2486 if (!folio)
2494 * release invalidate_lock after inserting the folio into
2495 * the page cache as the locked folio would then be enough to
2503 error = filemap_add_folio(mapping, folio, index,
2510 error = filemap_read_folio(file, mapping->a_ops->read_folio, folio);
2515 folio_batch_add(fbatch, folio);
2519 folio_put(folio);
2524 struct address_space *mapping, struct folio *folio,
2527 DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, folio->index);
2531 page_cache_async_ra(&ractl, folio, last_index - folio->index);
2543 struct folio *folio;
2570 folio = fbatch->folios[folio_batch_count(fbatch) - 1];
2571 if (folio_test_readahead(folio)) {
2572 err = filemap_readahead(iocb, filp, mapping, folio, last_index);
2576 if (!folio_test_uptodate(folio)) {
2580 err = filemap_update_page(iocb, mapping, count, folio,
2589 folio_put(folio);
2597 static inline bool pos_same_folio(loff_t pos1, loff_t pos2, struct folio *folio)
2599 unsigned int shift = folio_shift(folio);
2685 * When a read accesses the same folio several times, only
2693 struct folio *folio = fbatch.folios[i];
2694 size_t fsize = folio_size(folio);
2700 if (end_offset < folio_pos(folio))
2703 folio_mark_accessed(folio);
2705 * If users can be writing to this folio using arbitrary
2707 * before reading the folio on the kernel side.
2710 flush_dcache_folio(folio);
2712 copied = copy_folio_to_iter(folio, offset, bytes, iter);
2845 * Splice subpages from a folio into a pipe.
2848 struct folio *folio, loff_t fpos, size_t size)
2851 size_t spliced = 0, offset = offset_in_folio(folio, fpos);
2853 page = folio_page(folio, offset / PAGE_SIZE);
2854 size = min(size, folio_size(folio) - offset);
2868 folio_get(folio);
2952 struct folio *folio = fbatch.folios[i];
2955 if (folio_pos(folio) >= end_offset)
2957 folio_mark_accessed(folio);
2960 * If users can be writing to this folio using arbitrary
2962 * before reading the folio on the kernel side.
2965 flush_dcache_folio(folio);
2968 n = splice_folio_into_pipe(pipe, folio, *ppos, n);
2991 struct address_space *mapping, struct folio *folio,
2997 if (xa_is_value(folio) || folio_test_uptodate(folio))
3004 folio_lock(folio);
3005 if (unlikely(folio->mapping != mapping))
3008 offset = offset_in_folio(folio, start) & ~(bsz - 1);
3011 if (ops->is_partially_uptodate(folio, offset, bsz) ==
3016 } while (offset < folio_size(folio));
3018 folio_unlock(folio);
3023 static inline size_t seek_folio_size(struct xa_state *xas, struct folio *folio)
3025 if (xa_is_value(folio))
3027 return folio_size(folio);
3054 struct folio *folio;
3060 while ((folio = find_get_entry(&xas, max, XA_PRESENT))) {
3070 seek_size = seek_folio_size(&xas, folio);
3072 start = folio_seek_hole_data(&xas, mapping, folio, start, pos,
3080 if (!xa_is_value(folio))
3081 folio_put(folio);
3087 if (folio && !xa_is_value(folio))
3088 folio_put(folio);
3099 * @folio - the folio to lock.
3103 * mmap_lock. It differs in that it actually returns the folio locked
3104 * if it returns 1 and 0 if it couldn't lock the folio. If we did have
3108 static int lock_folio_maybe_drop_mmap(struct vm_fault *vmf, struct folio *folio,
3111 if (folio_trylock(folio))
3124 if (__folio_lock_killable(folio)) {
3136 __folio_lock(folio);
3218 struct folio *folio)
3234 if (folio_test_readahead(folio)) {
3236 page_cache_async_ra(&ractl, folio, ra->ra_pages);
3272 struct folio *folio;
3283 folio = filemap_get_folio(mapping, index);
3284 if (likely(!IS_ERR(folio))) {
3290 fpin = do_async_mmap_readahead(vmf, folio);
3291 if (unlikely(!folio_test_uptodate(folio))) {
3310 folio = __filemap_get_folio(mapping, index,
3313 if (IS_ERR(folio)) {
3321 if (!lock_folio_maybe_drop_mmap(vmf, folio, &fpin))
3325 if (unlikely(folio->mapping != mapping)) {
3326 folio_unlock(folio);
3327 folio_put(folio);
3330 VM_BUG_ON_FOLIO(!folio_contains(folio, index), folio);
3336 if (unlikely(!folio_test_uptodate(folio))) {
3344 folio_unlock(folio);
3345 folio_put(folio);
3357 folio_unlock(folio);
3369 folio_unlock(folio);
3370 folio_put(folio);
3374 vmf->page = folio_file_page(folio, index);
3385 error = filemap_read_folio(file, mapping->a_ops->read_folio, folio);
3388 folio_put(folio);
3402 if (!IS_ERR(folio))
3403 folio_put(folio);
3412 static bool filemap_map_pmd(struct vm_fault *vmf, struct folio *folio,
3419 folio_unlock(folio);
3420 folio_put(folio);
3424 if (pmd_none(*vmf->pmd) && folio_test_pmd_mappable(folio)) {
3425 struct page *page = folio_file_page(folio, start);
3429 folio_unlock(folio);
3440 static struct folio *next_uptodate_folio(struct xa_state *xas,
3443 struct folio *folio = xas_next_entry(xas, end_pgoff);
3447 if (!folio)
3449 if (xas_retry(xas, folio))
3451 if (xa_is_value(folio))
3453 if (folio_test_locked(folio))
3455 if (!folio_try_get_rcu(folio))
3458 if (unlikely(folio != xas_reload(xas)))
3460 if (!folio_test_uptodate(folio) || folio_test_readahead(folio))
3462 if (!folio_trylock(folio))
3464 if (folio->mapping != mapping)
3466 if (!folio_test_uptodate(folio))
3471 return folio;
3473 folio_unlock(folio);
3475 folio_put(folio);
3476 } while ((folio = xas_next_entry(xas, end_pgoff)) != NULL);
3482 * Map page range [start_page, start_page + nr_pages) of folio.
3483 * start_page is gotten from start by folio_page(folio, start)
3486 struct folio *folio, unsigned long start,
3491 struct page *page = folio_page(folio, start);
3513 set_pte_range(vmf, folio, page, count, addr);
3514 folio_ref_add(folio, count);
3527 set_pte_range(vmf, folio, page, count, addr);
3528 folio_ref_add(folio, count);
3539 struct folio *folio, unsigned long addr,
3543 struct page *page = &folio->page;
3561 set_pte_range(vmf, folio, page, 1, addr);
3562 folio_ref_inc(folio);
3576 struct folio *folio;
3581 folio = next_uptodate_folio(&xas, mapping, end_pgoff);
3582 if (!folio)
3585 if (filemap_map_pmd(vmf, folio, start_pgoff)) {
3593 folio_unlock(folio);
3594 folio_put(folio);
3603 end = folio->index + folio_nr_pages(folio) - 1;
3606 if (!folio_test_large(folio))
3608 folio, addr, &mmap_miss);
3610 ret |= filemap_map_folio_range(vmf, folio,
3611 xas.xa_index - folio->index, addr,
3614 folio_unlock(folio);
3615 folio_put(folio);
3616 } while ((folio = next_uptodate_folio(&xas, mapping, end_pgoff)) != NULL);
3634 struct folio *folio = page_folio(vmf->page);
3639 folio_lock(folio);
3640 if (folio->mapping != mapping) {
3641 folio_unlock(folio);
3646 * We mark the folio dirty already here so that when freeze is in
3648 * see the dirty folio and writeprotect it again.
3650 folio_mark_dirty(folio);
3651 folio_wait_stable(folio);
3704 static struct folio *do_read_cache_folio(struct address_space *mapping,
3707 struct folio *folio;
3713 folio = filemap_get_folio(mapping, index);
3714 if (IS_ERR(folio)) {
3715 folio = filemap_alloc_folio(gfp, 0);
3716 if (!folio)
3718 err = filemap_add_folio(mapping, folio, index, gfp);
3720 folio_put(folio);
3729 if (folio_test_uptodate(folio))
3732 if (!folio_trylock(folio)) {
3733 folio_put_wait_locked(folio, TASK_UNINTERRUPTIBLE);
3738 if (!folio->mapping) {
3739 folio_unlock(folio);
3740 folio_put(folio);
3745 if (folio_test_uptodate(folio)) {
3746 folio_unlock(folio);
3751 err = filemap_read_folio(file, filler, folio);
3753 folio_put(folio);
3760 folio_mark_accessed(folio);
3761 return folio;
3771 * Read one page into the page cache. If it succeeds, the folio returned
3772 * will contain @index, but it may not be the first page of the folio.
3778 * Return: An uptodate folio on success, ERR_PTR() on failure.
3780 struct folio *read_cache_folio(struct address_space *mapping, pgoff_t index,
3790 * @mapping: The address_space for the folio.
3791 * @index: The index that the allocated folio will contain.
3803 * Return: Uptodate folio on success, ERR_PTR() on failure.
3805 struct folio *mapping_read_folio_gfp(struct address_space *mapping,
3815 struct folio *folio;
3817 folio = do_read_cache_folio(mapping, index, filler, file, gfp);
3818 if (IS_ERR(folio))
3819 return &folio->page;
3820 return folio_file_page(folio, index);
4109 * filemap_release_folio() - Release fs-specific metadata on a folio.
4110 * @folio: The folio which the kernel is trying to free.
4113 * The address_space is trying to release any data attached to a folio
4114 * (presumably at folio->private).
4117 * indicating that the folio has other metadata associated with it.
4125 bool filemap_release_folio(struct folio *folio, gfp_t gfp)
4127 struct address_space * const mapping = folio->mapping;
4129 BUG_ON(!folio_test_locked(folio));
4130 if (!folio_needs_release(folio))
4132 if (folio_test_writeback(folio))
4136 return mapping->a_ops->release_folio(folio, gfp);
4137 return try_to_free_buffers(folio);
4158 struct folio *folio;
4161 xas_for_each(&xas, folio, last_index) {
4167 * Don't deref the folio. It is not pinned, and might
4177 if (xas_retry(&xas, folio))
4192 if (xa_is_value(folio)) {
4194 void *shadow = (void *)folio;
4202 swp_entry_t swp = radix_to_swp_entry(folio);