162306a36Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0-only
262306a36Sopenharmony_ci/*
362306a36Sopenharmony_ci *	linux/mm/filemap.c
462306a36Sopenharmony_ci *
562306a36Sopenharmony_ci * Copyright (C) 1994-1999  Linus Torvalds
662306a36Sopenharmony_ci */
762306a36Sopenharmony_ci
862306a36Sopenharmony_ci/*
962306a36Sopenharmony_ci * This file handles the generic file mmap semantics used by
1062306a36Sopenharmony_ci * most "normal" filesystems (but you don't /have/ to use this:
1162306a36Sopenharmony_ci * the NFS filesystem used to do this differently, for example)
1262306a36Sopenharmony_ci */
1362306a36Sopenharmony_ci#include <linux/export.h>
1462306a36Sopenharmony_ci#include <linux/compiler.h>
1562306a36Sopenharmony_ci#include <linux/dax.h>
1662306a36Sopenharmony_ci#include <linux/fs.h>
1762306a36Sopenharmony_ci#include <linux/sched/signal.h>
1862306a36Sopenharmony_ci#include <linux/uaccess.h>
1962306a36Sopenharmony_ci#include <linux/capability.h>
2062306a36Sopenharmony_ci#include <linux/kernel_stat.h>
2162306a36Sopenharmony_ci#include <linux/gfp.h>
2262306a36Sopenharmony_ci#include <linux/mm.h>
2362306a36Sopenharmony_ci#include <linux/swap.h>
2462306a36Sopenharmony_ci#include <linux/swapops.h>
2562306a36Sopenharmony_ci#include <linux/syscalls.h>
2662306a36Sopenharmony_ci#include <linux/mman.h>
2762306a36Sopenharmony_ci#include <linux/pagemap.h>
2862306a36Sopenharmony_ci#include <linux/file.h>
2962306a36Sopenharmony_ci#include <linux/uio.h>
3062306a36Sopenharmony_ci#include <linux/error-injection.h>
3162306a36Sopenharmony_ci#include <linux/hash.h>
3262306a36Sopenharmony_ci#include <linux/writeback.h>
3362306a36Sopenharmony_ci#include <linux/backing-dev.h>
3462306a36Sopenharmony_ci#include <linux/pagevec.h>
3562306a36Sopenharmony_ci#include <linux/security.h>
3662306a36Sopenharmony_ci#include <linux/cpuset.h>
3762306a36Sopenharmony_ci#include <linux/hugetlb.h>
3862306a36Sopenharmony_ci#include <linux/memcontrol.h>
3962306a36Sopenharmony_ci#include <linux/shmem_fs.h>
4062306a36Sopenharmony_ci#include <linux/rmap.h>
4162306a36Sopenharmony_ci#include <linux/delayacct.h>
4262306a36Sopenharmony_ci#include <linux/psi.h>
4362306a36Sopenharmony_ci#include <linux/ramfs.h>
4462306a36Sopenharmony_ci#include <linux/page_idle.h>
4562306a36Sopenharmony_ci#include <linux/migrate.h>
4662306a36Sopenharmony_ci#include <linux/pipe_fs_i.h>
4762306a36Sopenharmony_ci#include <linux/splice.h>
4862306a36Sopenharmony_ci#include <asm/pgalloc.h>
4962306a36Sopenharmony_ci#include <asm/tlbflush.h>
5062306a36Sopenharmony_ci#include "internal.h"
5162306a36Sopenharmony_ci
5262306a36Sopenharmony_ci#define CREATE_TRACE_POINTS
5362306a36Sopenharmony_ci#include <trace/events/filemap.h>
5462306a36Sopenharmony_ci
5562306a36Sopenharmony_ci/*
5662306a36Sopenharmony_ci * FIXME: remove all knowledge of the buffer layer from the core VM
5762306a36Sopenharmony_ci */
5862306a36Sopenharmony_ci#include <linux/buffer_head.h> /* for try_to_free_buffers */
5962306a36Sopenharmony_ci
6062306a36Sopenharmony_ci#include <asm/mman.h>
6162306a36Sopenharmony_ci
6262306a36Sopenharmony_ci#include "swap.h"
6362306a36Sopenharmony_ci
6462306a36Sopenharmony_ci/*
6562306a36Sopenharmony_ci * Shared mappings implemented 30.11.1994. It's not fully working yet,
6662306a36Sopenharmony_ci * though.
6762306a36Sopenharmony_ci *
6862306a36Sopenharmony_ci * Shared mappings now work. 15.8.1995  Bruno.
6962306a36Sopenharmony_ci *
7062306a36Sopenharmony_ci * finished 'unifying' the page and buffer cache and SMP-threaded the
7162306a36Sopenharmony_ci * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com>
7262306a36Sopenharmony_ci *
7362306a36Sopenharmony_ci * SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <andrea@suse.de>
7462306a36Sopenharmony_ci */
7562306a36Sopenharmony_ci
7662306a36Sopenharmony_ci/*
7762306a36Sopenharmony_ci * Lock ordering:
7862306a36Sopenharmony_ci *
7962306a36Sopenharmony_ci *  ->i_mmap_rwsem		(truncate_pagecache)
8062306a36Sopenharmony_ci *    ->private_lock		(__free_pte->block_dirty_folio)
8162306a36Sopenharmony_ci *      ->swap_lock		(exclusive_swap_page, others)
8262306a36Sopenharmony_ci *        ->i_pages lock
8362306a36Sopenharmony_ci *
8462306a36Sopenharmony_ci *  ->i_rwsem
8562306a36Sopenharmony_ci *    ->invalidate_lock		(acquired by fs in truncate path)
8662306a36Sopenharmony_ci *      ->i_mmap_rwsem		(truncate->unmap_mapping_range)
8762306a36Sopenharmony_ci *
8862306a36Sopenharmony_ci *  ->mmap_lock
8962306a36Sopenharmony_ci *    ->i_mmap_rwsem
9062306a36Sopenharmony_ci *      ->page_table_lock or pte_lock	(various, mainly in memory.c)
9162306a36Sopenharmony_ci *        ->i_pages lock	(arch-dependent flush_dcache_mmap_lock)
9262306a36Sopenharmony_ci *
9362306a36Sopenharmony_ci *  ->mmap_lock
9462306a36Sopenharmony_ci *    ->invalidate_lock		(filemap_fault)
9562306a36Sopenharmony_ci *      ->lock_page		(filemap_fault, access_process_vm)
9662306a36Sopenharmony_ci *
9762306a36Sopenharmony_ci *  ->i_rwsem			(generic_perform_write)
9862306a36Sopenharmony_ci *    ->mmap_lock		(fault_in_readable->do_page_fault)
9962306a36Sopenharmony_ci *
10062306a36Sopenharmony_ci *  bdi->wb.list_lock
10162306a36Sopenharmony_ci *    sb_lock			(fs/fs-writeback.c)
10262306a36Sopenharmony_ci *    ->i_pages lock		(__sync_single_inode)
10362306a36Sopenharmony_ci *
10462306a36Sopenharmony_ci *  ->i_mmap_rwsem
10562306a36Sopenharmony_ci *    ->anon_vma.lock		(vma_merge)
10662306a36Sopenharmony_ci *
10762306a36Sopenharmony_ci *  ->anon_vma.lock
10862306a36Sopenharmony_ci *    ->page_table_lock or pte_lock	(anon_vma_prepare and various)
10962306a36Sopenharmony_ci *
11062306a36Sopenharmony_ci *  ->page_table_lock or pte_lock
11162306a36Sopenharmony_ci *    ->swap_lock		(try_to_unmap_one)
11262306a36Sopenharmony_ci *    ->private_lock		(try_to_unmap_one)
11362306a36Sopenharmony_ci *    ->i_pages lock		(try_to_unmap_one)
11462306a36Sopenharmony_ci *    ->lruvec->lru_lock	(follow_page->mark_page_accessed)
11562306a36Sopenharmony_ci *    ->lruvec->lru_lock	(check_pte_range->isolate_lru_page)
11662306a36Sopenharmony_ci *    ->private_lock		(page_remove_rmap->set_page_dirty)
11762306a36Sopenharmony_ci *    ->i_pages lock		(page_remove_rmap->set_page_dirty)
11862306a36Sopenharmony_ci *    bdi.wb->list_lock		(page_remove_rmap->set_page_dirty)
11962306a36Sopenharmony_ci *    ->inode->i_lock		(page_remove_rmap->set_page_dirty)
12062306a36Sopenharmony_ci *    ->memcg->move_lock	(page_remove_rmap->folio_memcg_lock)
12162306a36Sopenharmony_ci *    bdi.wb->list_lock		(zap_pte_range->set_page_dirty)
12262306a36Sopenharmony_ci *    ->inode->i_lock		(zap_pte_range->set_page_dirty)
12362306a36Sopenharmony_ci *    ->private_lock		(zap_pte_range->block_dirty_folio)
12462306a36Sopenharmony_ci */
12562306a36Sopenharmony_ci
12662306a36Sopenharmony_cistatic void page_cache_delete(struct address_space *mapping,
12762306a36Sopenharmony_ci				   struct folio *folio, void *shadow)
12862306a36Sopenharmony_ci{
12962306a36Sopenharmony_ci	XA_STATE(xas, &mapping->i_pages, folio->index);
13062306a36Sopenharmony_ci	long nr = 1;
13162306a36Sopenharmony_ci
13262306a36Sopenharmony_ci	mapping_set_update(&xas, mapping);
13362306a36Sopenharmony_ci
13462306a36Sopenharmony_ci	/* hugetlb pages are represented by a single entry in the xarray */
13562306a36Sopenharmony_ci	if (!folio_test_hugetlb(folio)) {
13662306a36Sopenharmony_ci		xas_set_order(&xas, folio->index, folio_order(folio));
13762306a36Sopenharmony_ci		nr = folio_nr_pages(folio);
13862306a36Sopenharmony_ci	}
13962306a36Sopenharmony_ci
14062306a36Sopenharmony_ci	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
14162306a36Sopenharmony_ci
14262306a36Sopenharmony_ci	xas_store(&xas, shadow);
14362306a36Sopenharmony_ci	xas_init_marks(&xas);
14462306a36Sopenharmony_ci
14562306a36Sopenharmony_ci	folio->mapping = NULL;
14662306a36Sopenharmony_ci	/* Leave page->index set: truncation lookup relies upon it */
14762306a36Sopenharmony_ci	mapping->nrpages -= nr;
14862306a36Sopenharmony_ci}
14962306a36Sopenharmony_ci
15062306a36Sopenharmony_cistatic void filemap_unaccount_folio(struct address_space *mapping,
15162306a36Sopenharmony_ci		struct folio *folio)
15262306a36Sopenharmony_ci{
15362306a36Sopenharmony_ci	long nr;
15462306a36Sopenharmony_ci
15562306a36Sopenharmony_ci	VM_BUG_ON_FOLIO(folio_mapped(folio), folio);
15662306a36Sopenharmony_ci	if (!IS_ENABLED(CONFIG_DEBUG_VM) && unlikely(folio_mapped(folio))) {
15762306a36Sopenharmony_ci		pr_alert("BUG: Bad page cache in process %s  pfn:%05lx\n",
15862306a36Sopenharmony_ci			 current->comm, folio_pfn(folio));
15962306a36Sopenharmony_ci		dump_page(&folio->page, "still mapped when deleted");
16062306a36Sopenharmony_ci		dump_stack();
16162306a36Sopenharmony_ci		add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
16262306a36Sopenharmony_ci
16362306a36Sopenharmony_ci		if (mapping_exiting(mapping) && !folio_test_large(folio)) {
16462306a36Sopenharmony_ci			int mapcount = page_mapcount(&folio->page);
16562306a36Sopenharmony_ci
16662306a36Sopenharmony_ci			if (folio_ref_count(folio) >= mapcount + 2) {
16762306a36Sopenharmony_ci				/*
16862306a36Sopenharmony_ci				 * All vmas have already been torn down, so it's
16962306a36Sopenharmony_ci				 * a good bet that actually the page is unmapped
17062306a36Sopenharmony_ci				 * and we'd rather not leak it: if we're wrong,
17162306a36Sopenharmony_ci				 * another bad page check should catch it later.
17262306a36Sopenharmony_ci				 */
17362306a36Sopenharmony_ci				page_mapcount_reset(&folio->page);
17462306a36Sopenharmony_ci				folio_ref_sub(folio, mapcount);
17562306a36Sopenharmony_ci			}
17662306a36Sopenharmony_ci		}
17762306a36Sopenharmony_ci	}
17862306a36Sopenharmony_ci
17962306a36Sopenharmony_ci	/* hugetlb folios do not participate in page cache accounting. */
18062306a36Sopenharmony_ci	if (folio_test_hugetlb(folio))
18162306a36Sopenharmony_ci		return;
18262306a36Sopenharmony_ci
18362306a36Sopenharmony_ci	nr = folio_nr_pages(folio);
18462306a36Sopenharmony_ci
18562306a36Sopenharmony_ci	__lruvec_stat_mod_folio(folio, NR_FILE_PAGES, -nr);
18662306a36Sopenharmony_ci	if (folio_test_swapbacked(folio)) {
18762306a36Sopenharmony_ci		__lruvec_stat_mod_folio(folio, NR_SHMEM, -nr);
18862306a36Sopenharmony_ci		if (folio_test_pmd_mappable(folio))
18962306a36Sopenharmony_ci			__lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, -nr);
19062306a36Sopenharmony_ci	} else if (folio_test_pmd_mappable(folio)) {
19162306a36Sopenharmony_ci		__lruvec_stat_mod_folio(folio, NR_FILE_THPS, -nr);
19262306a36Sopenharmony_ci		filemap_nr_thps_dec(mapping);
19362306a36Sopenharmony_ci	}
19462306a36Sopenharmony_ci
19562306a36Sopenharmony_ci	/*
19662306a36Sopenharmony_ci	 * At this point folio must be either written or cleaned by
19762306a36Sopenharmony_ci	 * truncate.  Dirty folio here signals a bug and loss of
19862306a36Sopenharmony_ci	 * unwritten data - on ordinary filesystems.
19962306a36Sopenharmony_ci	 *
20062306a36Sopenharmony_ci	 * But it's harmless on in-memory filesystems like tmpfs; and can
20162306a36Sopenharmony_ci	 * occur when a driver which did get_user_pages() sets page dirty
20262306a36Sopenharmony_ci	 * before putting it, while the inode is being finally evicted.
20362306a36Sopenharmony_ci	 *
20462306a36Sopenharmony_ci	 * Below fixes dirty accounting after removing the folio entirely
20562306a36Sopenharmony_ci	 * but leaves the dirty flag set: it has no effect for truncated
20662306a36Sopenharmony_ci	 * folio and anyway will be cleared before returning folio to
20762306a36Sopenharmony_ci	 * buddy allocator.
20862306a36Sopenharmony_ci	 */
20962306a36Sopenharmony_ci	if (WARN_ON_ONCE(folio_test_dirty(folio) &&
21062306a36Sopenharmony_ci			 mapping_can_writeback(mapping)))
21162306a36Sopenharmony_ci		folio_account_cleaned(folio, inode_to_wb(mapping->host));
21262306a36Sopenharmony_ci}
21362306a36Sopenharmony_ci
21462306a36Sopenharmony_ci/*
21562306a36Sopenharmony_ci * Delete a page from the page cache and free it. Caller has to make
21662306a36Sopenharmony_ci * sure the page is locked and that nobody else uses it - or that usage
21762306a36Sopenharmony_ci * is safe.  The caller must hold the i_pages lock.
21862306a36Sopenharmony_ci */
21962306a36Sopenharmony_civoid __filemap_remove_folio(struct folio *folio, void *shadow)
22062306a36Sopenharmony_ci{
22162306a36Sopenharmony_ci	struct address_space *mapping = folio->mapping;
22262306a36Sopenharmony_ci
22362306a36Sopenharmony_ci	trace_mm_filemap_delete_from_page_cache(folio);
22462306a36Sopenharmony_ci	filemap_unaccount_folio(mapping, folio);
22562306a36Sopenharmony_ci	page_cache_delete(mapping, folio, shadow);
22662306a36Sopenharmony_ci}
22762306a36Sopenharmony_ci
22862306a36Sopenharmony_civoid filemap_free_folio(struct address_space *mapping, struct folio *folio)
22962306a36Sopenharmony_ci{
23062306a36Sopenharmony_ci	void (*free_folio)(struct folio *);
23162306a36Sopenharmony_ci	int refs = 1;
23262306a36Sopenharmony_ci
23362306a36Sopenharmony_ci	free_folio = mapping->a_ops->free_folio;
23462306a36Sopenharmony_ci	if (free_folio)
23562306a36Sopenharmony_ci		free_folio(folio);
23662306a36Sopenharmony_ci
23762306a36Sopenharmony_ci	if (folio_test_large(folio) && !folio_test_hugetlb(folio))
23862306a36Sopenharmony_ci		refs = folio_nr_pages(folio);
23962306a36Sopenharmony_ci	folio_put_refs(folio, refs);
24062306a36Sopenharmony_ci}
24162306a36Sopenharmony_ci
24262306a36Sopenharmony_ci/**
24362306a36Sopenharmony_ci * filemap_remove_folio - Remove folio from page cache.
24462306a36Sopenharmony_ci * @folio: The folio.
24562306a36Sopenharmony_ci *
24662306a36Sopenharmony_ci * This must be called only on folios that are locked and have been
24762306a36Sopenharmony_ci * verified to be in the page cache.  It will never put the folio into
24862306a36Sopenharmony_ci * the free list because the caller has a reference on the page.
24962306a36Sopenharmony_ci */
25062306a36Sopenharmony_civoid filemap_remove_folio(struct folio *folio)
25162306a36Sopenharmony_ci{
25262306a36Sopenharmony_ci	struct address_space *mapping = folio->mapping;
25362306a36Sopenharmony_ci
25462306a36Sopenharmony_ci	BUG_ON(!folio_test_locked(folio));
25562306a36Sopenharmony_ci	spin_lock(&mapping->host->i_lock);
25662306a36Sopenharmony_ci	xa_lock_irq(&mapping->i_pages);
25762306a36Sopenharmony_ci	__filemap_remove_folio(folio, NULL);
25862306a36Sopenharmony_ci	xa_unlock_irq(&mapping->i_pages);
25962306a36Sopenharmony_ci	if (mapping_shrinkable(mapping))
26062306a36Sopenharmony_ci		inode_add_lru(mapping->host);
26162306a36Sopenharmony_ci	spin_unlock(&mapping->host->i_lock);
26262306a36Sopenharmony_ci
26362306a36Sopenharmony_ci	filemap_free_folio(mapping, folio);
26462306a36Sopenharmony_ci}
26562306a36Sopenharmony_ci
26662306a36Sopenharmony_ci/*
26762306a36Sopenharmony_ci * page_cache_delete_batch - delete several folios from page cache
26862306a36Sopenharmony_ci * @mapping: the mapping to which folios belong
26962306a36Sopenharmony_ci * @fbatch: batch of folios to delete
27062306a36Sopenharmony_ci *
27162306a36Sopenharmony_ci * The function walks over mapping->i_pages and removes folios passed in
27262306a36Sopenharmony_ci * @fbatch from the mapping. The function expects @fbatch to be sorted
27362306a36Sopenharmony_ci * by page index and is optimised for it to be dense.
27462306a36Sopenharmony_ci * It tolerates holes in @fbatch (mapping entries at those indices are not
27562306a36Sopenharmony_ci * modified).
27662306a36Sopenharmony_ci *
27762306a36Sopenharmony_ci * The function expects the i_pages lock to be held.
27862306a36Sopenharmony_ci */
27962306a36Sopenharmony_cistatic void page_cache_delete_batch(struct address_space *mapping,
28062306a36Sopenharmony_ci			     struct folio_batch *fbatch)
28162306a36Sopenharmony_ci{
28262306a36Sopenharmony_ci	XA_STATE(xas, &mapping->i_pages, fbatch->folios[0]->index);
28362306a36Sopenharmony_ci	long total_pages = 0;
28462306a36Sopenharmony_ci	int i = 0;
28562306a36Sopenharmony_ci	struct folio *folio;
28662306a36Sopenharmony_ci
28762306a36Sopenharmony_ci	mapping_set_update(&xas, mapping);
28862306a36Sopenharmony_ci	xas_for_each(&xas, folio, ULONG_MAX) {
28962306a36Sopenharmony_ci		if (i >= folio_batch_count(fbatch))
29062306a36Sopenharmony_ci			break;
29162306a36Sopenharmony_ci
29262306a36Sopenharmony_ci		/* A swap/dax/shadow entry got inserted? Skip it. */
29362306a36Sopenharmony_ci		if (xa_is_value(folio))
29462306a36Sopenharmony_ci			continue;
29562306a36Sopenharmony_ci		/*
29662306a36Sopenharmony_ci		 * A page got inserted in our range? Skip it. We have our
29762306a36Sopenharmony_ci		 * pages locked so they are protected from being removed.
29862306a36Sopenharmony_ci		 * If we see a page whose index is higher than ours, it
29962306a36Sopenharmony_ci		 * means our page has been removed, which shouldn't be
30062306a36Sopenharmony_ci		 * possible because we're holding the PageLock.
30162306a36Sopenharmony_ci		 */
30262306a36Sopenharmony_ci		if (folio != fbatch->folios[i]) {
30362306a36Sopenharmony_ci			VM_BUG_ON_FOLIO(folio->index >
30462306a36Sopenharmony_ci					fbatch->folios[i]->index, folio);
30562306a36Sopenharmony_ci			continue;
30662306a36Sopenharmony_ci		}
30762306a36Sopenharmony_ci
30862306a36Sopenharmony_ci		WARN_ON_ONCE(!folio_test_locked(folio));
30962306a36Sopenharmony_ci
31062306a36Sopenharmony_ci		folio->mapping = NULL;
31162306a36Sopenharmony_ci		/* Leave folio->index set: truncation lookup relies on it */
31262306a36Sopenharmony_ci
31362306a36Sopenharmony_ci		i++;
31462306a36Sopenharmony_ci		xas_store(&xas, NULL);
31562306a36Sopenharmony_ci		total_pages += folio_nr_pages(folio);
31662306a36Sopenharmony_ci	}
31762306a36Sopenharmony_ci	mapping->nrpages -= total_pages;
31862306a36Sopenharmony_ci}
31962306a36Sopenharmony_ci
32062306a36Sopenharmony_civoid delete_from_page_cache_batch(struct address_space *mapping,
32162306a36Sopenharmony_ci				  struct folio_batch *fbatch)
32262306a36Sopenharmony_ci{
32362306a36Sopenharmony_ci	int i;
32462306a36Sopenharmony_ci
32562306a36Sopenharmony_ci	if (!folio_batch_count(fbatch))
32662306a36Sopenharmony_ci		return;
32762306a36Sopenharmony_ci
32862306a36Sopenharmony_ci	spin_lock(&mapping->host->i_lock);
32962306a36Sopenharmony_ci	xa_lock_irq(&mapping->i_pages);
33062306a36Sopenharmony_ci	for (i = 0; i < folio_batch_count(fbatch); i++) {
33162306a36Sopenharmony_ci		struct folio *folio = fbatch->folios[i];
33262306a36Sopenharmony_ci
33362306a36Sopenharmony_ci		trace_mm_filemap_delete_from_page_cache(folio);
33462306a36Sopenharmony_ci		filemap_unaccount_folio(mapping, folio);
33562306a36Sopenharmony_ci	}
33662306a36Sopenharmony_ci	page_cache_delete_batch(mapping, fbatch);
33762306a36Sopenharmony_ci	xa_unlock_irq(&mapping->i_pages);
33862306a36Sopenharmony_ci	if (mapping_shrinkable(mapping))
33962306a36Sopenharmony_ci		inode_add_lru(mapping->host);
34062306a36Sopenharmony_ci	spin_unlock(&mapping->host->i_lock);
34162306a36Sopenharmony_ci
34262306a36Sopenharmony_ci	for (i = 0; i < folio_batch_count(fbatch); i++)
34362306a36Sopenharmony_ci		filemap_free_folio(mapping, fbatch->folios[i]);
34462306a36Sopenharmony_ci}
34562306a36Sopenharmony_ci
34662306a36Sopenharmony_ciint filemap_check_errors(struct address_space *mapping)
34762306a36Sopenharmony_ci{
34862306a36Sopenharmony_ci	int ret = 0;
34962306a36Sopenharmony_ci	/* Check for outstanding write errors */
35062306a36Sopenharmony_ci	if (test_bit(AS_ENOSPC, &mapping->flags) &&
35162306a36Sopenharmony_ci	    test_and_clear_bit(AS_ENOSPC, &mapping->flags))
35262306a36Sopenharmony_ci		ret = -ENOSPC;
35362306a36Sopenharmony_ci	if (test_bit(AS_EIO, &mapping->flags) &&
35462306a36Sopenharmony_ci	    test_and_clear_bit(AS_EIO, &mapping->flags))
35562306a36Sopenharmony_ci		ret = -EIO;
35662306a36Sopenharmony_ci	return ret;
35762306a36Sopenharmony_ci}
35862306a36Sopenharmony_ciEXPORT_SYMBOL(filemap_check_errors);
35962306a36Sopenharmony_ci
36062306a36Sopenharmony_cistatic int filemap_check_and_keep_errors(struct address_space *mapping)
36162306a36Sopenharmony_ci{
36262306a36Sopenharmony_ci	/* Check for outstanding write errors */
36362306a36Sopenharmony_ci	if (test_bit(AS_EIO, &mapping->flags))
36462306a36Sopenharmony_ci		return -EIO;
36562306a36Sopenharmony_ci	if (test_bit(AS_ENOSPC, &mapping->flags))
36662306a36Sopenharmony_ci		return -ENOSPC;
36762306a36Sopenharmony_ci	return 0;
36862306a36Sopenharmony_ci}
36962306a36Sopenharmony_ci
37062306a36Sopenharmony_ci/**
37162306a36Sopenharmony_ci * filemap_fdatawrite_wbc - start writeback on mapping dirty pages in range
37262306a36Sopenharmony_ci * @mapping:	address space structure to write
37362306a36Sopenharmony_ci * @wbc:	the writeback_control controlling the writeout
37462306a36Sopenharmony_ci *
37562306a36Sopenharmony_ci * Call writepages on the mapping using the provided wbc to control the
37662306a36Sopenharmony_ci * writeout.
37762306a36Sopenharmony_ci *
37862306a36Sopenharmony_ci * Return: %0 on success, negative error code otherwise.
37962306a36Sopenharmony_ci */
38062306a36Sopenharmony_ciint filemap_fdatawrite_wbc(struct address_space *mapping,
38162306a36Sopenharmony_ci			   struct writeback_control *wbc)
38262306a36Sopenharmony_ci{
38362306a36Sopenharmony_ci	int ret;
38462306a36Sopenharmony_ci
38562306a36Sopenharmony_ci	if (!mapping_can_writeback(mapping) ||
38662306a36Sopenharmony_ci	    !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
38762306a36Sopenharmony_ci		return 0;
38862306a36Sopenharmony_ci
38962306a36Sopenharmony_ci	wbc_attach_fdatawrite_inode(wbc, mapping->host);
39062306a36Sopenharmony_ci	ret = do_writepages(mapping, wbc);
39162306a36Sopenharmony_ci	wbc_detach_inode(wbc);
39262306a36Sopenharmony_ci	return ret;
39362306a36Sopenharmony_ci}
39462306a36Sopenharmony_ciEXPORT_SYMBOL(filemap_fdatawrite_wbc);
39562306a36Sopenharmony_ci
39662306a36Sopenharmony_ci/**
39762306a36Sopenharmony_ci * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
39862306a36Sopenharmony_ci * @mapping:	address space structure to write
39962306a36Sopenharmony_ci * @start:	offset in bytes where the range starts
40062306a36Sopenharmony_ci * @end:	offset in bytes where the range ends (inclusive)
40162306a36Sopenharmony_ci * @sync_mode:	enable synchronous operation
40262306a36Sopenharmony_ci *
40362306a36Sopenharmony_ci * Start writeback against all of a mapping's dirty pages that lie
40462306a36Sopenharmony_ci * within the byte offsets <start, end> inclusive.
40562306a36Sopenharmony_ci *
40662306a36Sopenharmony_ci * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as
40762306a36Sopenharmony_ci * opposed to a regular memory cleansing writeback.  The difference between
40862306a36Sopenharmony_ci * these two operations is that if a dirty page/buffer is encountered, it must
40962306a36Sopenharmony_ci * be waited upon, and not just skipped over.
41062306a36Sopenharmony_ci *
41162306a36Sopenharmony_ci * Return: %0 on success, negative error code otherwise.
41262306a36Sopenharmony_ci */
41362306a36Sopenharmony_ciint __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
41462306a36Sopenharmony_ci				loff_t end, int sync_mode)
41562306a36Sopenharmony_ci{
41662306a36Sopenharmony_ci	struct writeback_control wbc = {
41762306a36Sopenharmony_ci		.sync_mode = sync_mode,
41862306a36Sopenharmony_ci		.nr_to_write = LONG_MAX,
41962306a36Sopenharmony_ci		.range_start = start,
42062306a36Sopenharmony_ci		.range_end = end,
42162306a36Sopenharmony_ci	};
42262306a36Sopenharmony_ci
42362306a36Sopenharmony_ci	return filemap_fdatawrite_wbc(mapping, &wbc);
42462306a36Sopenharmony_ci}
42562306a36Sopenharmony_ci
42662306a36Sopenharmony_cistatic inline int __filemap_fdatawrite(struct address_space *mapping,
42762306a36Sopenharmony_ci	int sync_mode)
42862306a36Sopenharmony_ci{
42962306a36Sopenharmony_ci	return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode);
43062306a36Sopenharmony_ci}
43162306a36Sopenharmony_ci
43262306a36Sopenharmony_ciint filemap_fdatawrite(struct address_space *mapping)
43362306a36Sopenharmony_ci{
43462306a36Sopenharmony_ci	return __filemap_fdatawrite(mapping, WB_SYNC_ALL);
43562306a36Sopenharmony_ci}
43662306a36Sopenharmony_ciEXPORT_SYMBOL(filemap_fdatawrite);
43762306a36Sopenharmony_ci
43862306a36Sopenharmony_ciint filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
43962306a36Sopenharmony_ci				loff_t end)
44062306a36Sopenharmony_ci{
44162306a36Sopenharmony_ci	return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL);
44262306a36Sopenharmony_ci}
44362306a36Sopenharmony_ciEXPORT_SYMBOL(filemap_fdatawrite_range);
44462306a36Sopenharmony_ci
44562306a36Sopenharmony_ci/**
44662306a36Sopenharmony_ci * filemap_flush - mostly a non-blocking flush
44762306a36Sopenharmony_ci * @mapping:	target address_space
44862306a36Sopenharmony_ci *
44962306a36Sopenharmony_ci * This is a mostly non-blocking flush.  Not suitable for data-integrity
45062306a36Sopenharmony_ci * purposes - I/O may not be started against all dirty pages.
45162306a36Sopenharmony_ci *
45262306a36Sopenharmony_ci * Return: %0 on success, negative error code otherwise.
45362306a36Sopenharmony_ci */
45462306a36Sopenharmony_ciint filemap_flush(struct address_space *mapping)
45562306a36Sopenharmony_ci{
45662306a36Sopenharmony_ci	return __filemap_fdatawrite(mapping, WB_SYNC_NONE);
45762306a36Sopenharmony_ci}
45862306a36Sopenharmony_ciEXPORT_SYMBOL(filemap_flush);
45962306a36Sopenharmony_ci
46062306a36Sopenharmony_ci/**
46162306a36Sopenharmony_ci * filemap_range_has_page - check if a page exists in range.
46262306a36Sopenharmony_ci * @mapping:           address space within which to check
46362306a36Sopenharmony_ci * @start_byte:        offset in bytes where the range starts
46462306a36Sopenharmony_ci * @end_byte:          offset in bytes where the range ends (inclusive)
46562306a36Sopenharmony_ci *
46662306a36Sopenharmony_ci * Find at least one page in the range supplied, usually used to check if
46762306a36Sopenharmony_ci * direct writing in this range will trigger a writeback.
46862306a36Sopenharmony_ci *
46962306a36Sopenharmony_ci * Return: %true if at least one page exists in the specified range,
47062306a36Sopenharmony_ci * %false otherwise.
47162306a36Sopenharmony_ci */
47262306a36Sopenharmony_cibool filemap_range_has_page(struct address_space *mapping,
47362306a36Sopenharmony_ci			   loff_t start_byte, loff_t end_byte)
47462306a36Sopenharmony_ci{
47562306a36Sopenharmony_ci	struct folio *folio;
47662306a36Sopenharmony_ci	XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT);
47762306a36Sopenharmony_ci	pgoff_t max = end_byte >> PAGE_SHIFT;
47862306a36Sopenharmony_ci
47962306a36Sopenharmony_ci	if (end_byte < start_byte)
48062306a36Sopenharmony_ci		return false;
48162306a36Sopenharmony_ci
48262306a36Sopenharmony_ci	rcu_read_lock();
48362306a36Sopenharmony_ci	for (;;) {
48462306a36Sopenharmony_ci		folio = xas_find(&xas, max);
48562306a36Sopenharmony_ci		if (xas_retry(&xas, folio))
48662306a36Sopenharmony_ci			continue;
48762306a36Sopenharmony_ci		/* Shadow entries don't count */
48862306a36Sopenharmony_ci		if (xa_is_value(folio))
48962306a36Sopenharmony_ci			continue;
49062306a36Sopenharmony_ci		/*
49162306a36Sopenharmony_ci		 * We don't need to try to pin this page; we're about to
49262306a36Sopenharmony_ci		 * release the RCU lock anyway.  It is enough to know that
49362306a36Sopenharmony_ci		 * there was a page here recently.
49462306a36Sopenharmony_ci		 */
49562306a36Sopenharmony_ci		break;
49662306a36Sopenharmony_ci	}
49762306a36Sopenharmony_ci	rcu_read_unlock();
49862306a36Sopenharmony_ci
49962306a36Sopenharmony_ci	return folio != NULL;
50062306a36Sopenharmony_ci}
50162306a36Sopenharmony_ciEXPORT_SYMBOL(filemap_range_has_page);
50262306a36Sopenharmony_ci
50362306a36Sopenharmony_cistatic void __filemap_fdatawait_range(struct address_space *mapping,
50462306a36Sopenharmony_ci				     loff_t start_byte, loff_t end_byte)
50562306a36Sopenharmony_ci{
50662306a36Sopenharmony_ci	pgoff_t index = start_byte >> PAGE_SHIFT;
50762306a36Sopenharmony_ci	pgoff_t end = end_byte >> PAGE_SHIFT;
50862306a36Sopenharmony_ci	struct folio_batch fbatch;
50962306a36Sopenharmony_ci	unsigned nr_folios;
51062306a36Sopenharmony_ci
51162306a36Sopenharmony_ci	folio_batch_init(&fbatch);
51262306a36Sopenharmony_ci
51362306a36Sopenharmony_ci	while (index <= end) {
51462306a36Sopenharmony_ci		unsigned i;
51562306a36Sopenharmony_ci
51662306a36Sopenharmony_ci		nr_folios = filemap_get_folios_tag(mapping, &index, end,
51762306a36Sopenharmony_ci				PAGECACHE_TAG_WRITEBACK, &fbatch);
51862306a36Sopenharmony_ci
51962306a36Sopenharmony_ci		if (!nr_folios)
52062306a36Sopenharmony_ci			break;
52162306a36Sopenharmony_ci
52262306a36Sopenharmony_ci		for (i = 0; i < nr_folios; i++) {
52362306a36Sopenharmony_ci			struct folio *folio = fbatch.folios[i];
52462306a36Sopenharmony_ci
52562306a36Sopenharmony_ci			folio_wait_writeback(folio);
52662306a36Sopenharmony_ci			folio_clear_error(folio);
52762306a36Sopenharmony_ci		}
52862306a36Sopenharmony_ci		folio_batch_release(&fbatch);
52962306a36Sopenharmony_ci		cond_resched();
53062306a36Sopenharmony_ci	}
53162306a36Sopenharmony_ci}
53262306a36Sopenharmony_ci
53362306a36Sopenharmony_ci/**
53462306a36Sopenharmony_ci * filemap_fdatawait_range - wait for writeback to complete
53562306a36Sopenharmony_ci * @mapping:		address space structure to wait for
53662306a36Sopenharmony_ci * @start_byte:		offset in bytes where the range starts
53762306a36Sopenharmony_ci * @end_byte:		offset in bytes where the range ends (inclusive)
53862306a36Sopenharmony_ci *
53962306a36Sopenharmony_ci * Walk the list of under-writeback pages of the given address space
54062306a36Sopenharmony_ci * in the given range and wait for all of them.  Check error status of
54162306a36Sopenharmony_ci * the address space and return it.
54262306a36Sopenharmony_ci *
54362306a36Sopenharmony_ci * Since the error status of the address space is cleared by this function,
54462306a36Sopenharmony_ci * callers are responsible for checking the return value and handling and/or
54562306a36Sopenharmony_ci * reporting the error.
54662306a36Sopenharmony_ci *
54762306a36Sopenharmony_ci * Return: error status of the address space.
54862306a36Sopenharmony_ci */
54962306a36Sopenharmony_ciint filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte,
55062306a36Sopenharmony_ci			    loff_t end_byte)
55162306a36Sopenharmony_ci{
55262306a36Sopenharmony_ci	__filemap_fdatawait_range(mapping, start_byte, end_byte);
55362306a36Sopenharmony_ci	return filemap_check_errors(mapping);
55462306a36Sopenharmony_ci}
55562306a36Sopenharmony_ciEXPORT_SYMBOL(filemap_fdatawait_range);
55662306a36Sopenharmony_ci
55762306a36Sopenharmony_ci/**
55862306a36Sopenharmony_ci * filemap_fdatawait_range_keep_errors - wait for writeback to complete
55962306a36Sopenharmony_ci * @mapping:		address space structure to wait for
56062306a36Sopenharmony_ci * @start_byte:		offset in bytes where the range starts
56162306a36Sopenharmony_ci * @end_byte:		offset in bytes where the range ends (inclusive)
56262306a36Sopenharmony_ci *
56362306a36Sopenharmony_ci * Walk the list of under-writeback pages of the given address space in the
56462306a36Sopenharmony_ci * given range and wait for all of them.  Unlike filemap_fdatawait_range(),
56562306a36Sopenharmony_ci * this function does not clear error status of the address space.
56662306a36Sopenharmony_ci *
56762306a36Sopenharmony_ci * Use this function if callers don't handle errors themselves.  Expected
56862306a36Sopenharmony_ci * call sites are system-wide / filesystem-wide data flushers: e.g. sync(2),
56962306a36Sopenharmony_ci * fsfreeze(8)
57062306a36Sopenharmony_ci */
57162306a36Sopenharmony_ciint filemap_fdatawait_range_keep_errors(struct address_space *mapping,
57262306a36Sopenharmony_ci		loff_t start_byte, loff_t end_byte)
57362306a36Sopenharmony_ci{
57462306a36Sopenharmony_ci	__filemap_fdatawait_range(mapping, start_byte, end_byte);
57562306a36Sopenharmony_ci	return filemap_check_and_keep_errors(mapping);
57662306a36Sopenharmony_ci}
57762306a36Sopenharmony_ciEXPORT_SYMBOL(filemap_fdatawait_range_keep_errors);
57862306a36Sopenharmony_ci
57962306a36Sopenharmony_ci/**
58062306a36Sopenharmony_ci * file_fdatawait_range - wait for writeback to complete
58162306a36Sopenharmony_ci * @file:		file pointing to address space structure to wait for
58262306a36Sopenharmony_ci * @start_byte:		offset in bytes where the range starts
58362306a36Sopenharmony_ci * @end_byte:		offset in bytes where the range ends (inclusive)
58462306a36Sopenharmony_ci *
58562306a36Sopenharmony_ci * Walk the list of under-writeback pages of the address space that file
58662306a36Sopenharmony_ci * refers to, in the given range and wait for all of them.  Check error
58762306a36Sopenharmony_ci * status of the address space vs. the file->f_wb_err cursor and return it.
58862306a36Sopenharmony_ci *
58962306a36Sopenharmony_ci * Since the error status of the file is advanced by this function,
59062306a36Sopenharmony_ci * callers are responsible for checking the return value and handling and/or
59162306a36Sopenharmony_ci * reporting the error.
59262306a36Sopenharmony_ci *
59362306a36Sopenharmony_ci * Return: error status of the address space vs. the file->f_wb_err cursor.
59462306a36Sopenharmony_ci */
59562306a36Sopenharmony_ciint file_fdatawait_range(struct file *file, loff_t start_byte, loff_t end_byte)
59662306a36Sopenharmony_ci{
59762306a36Sopenharmony_ci	struct address_space *mapping = file->f_mapping;
59862306a36Sopenharmony_ci
59962306a36Sopenharmony_ci	__filemap_fdatawait_range(mapping, start_byte, end_byte);
60062306a36Sopenharmony_ci	return file_check_and_advance_wb_err(file);
60162306a36Sopenharmony_ci}
60262306a36Sopenharmony_ciEXPORT_SYMBOL(file_fdatawait_range);
60362306a36Sopenharmony_ci
60462306a36Sopenharmony_ci/**
60562306a36Sopenharmony_ci * filemap_fdatawait_keep_errors - wait for writeback without clearing errors
60662306a36Sopenharmony_ci * @mapping: address space structure to wait for
60762306a36Sopenharmony_ci *
60862306a36Sopenharmony_ci * Walk the list of under-writeback pages of the given address space
60962306a36Sopenharmony_ci * and wait for all of them.  Unlike filemap_fdatawait(), this function
61062306a36Sopenharmony_ci * does not clear error status of the address space.
61162306a36Sopenharmony_ci *
61262306a36Sopenharmony_ci * Use this function if callers don't handle errors themselves.  Expected
61362306a36Sopenharmony_ci * call sites are system-wide / filesystem-wide data flushers: e.g. sync(2),
61462306a36Sopenharmony_ci * fsfreeze(8)
61562306a36Sopenharmony_ci *
61662306a36Sopenharmony_ci * Return: error status of the address space.
61762306a36Sopenharmony_ci */
61862306a36Sopenharmony_ciint filemap_fdatawait_keep_errors(struct address_space *mapping)
61962306a36Sopenharmony_ci{
62062306a36Sopenharmony_ci	__filemap_fdatawait_range(mapping, 0, LLONG_MAX);
62162306a36Sopenharmony_ci	return filemap_check_and_keep_errors(mapping);
62262306a36Sopenharmony_ci}
62362306a36Sopenharmony_ciEXPORT_SYMBOL(filemap_fdatawait_keep_errors);
62462306a36Sopenharmony_ci
62562306a36Sopenharmony_ci/* Returns true if writeback might be needed or already in progress. */
62662306a36Sopenharmony_cistatic bool mapping_needs_writeback(struct address_space *mapping)
62762306a36Sopenharmony_ci{
62862306a36Sopenharmony_ci	return mapping->nrpages;
62962306a36Sopenharmony_ci}
63062306a36Sopenharmony_ci
63162306a36Sopenharmony_cibool filemap_range_has_writeback(struct address_space *mapping,
63262306a36Sopenharmony_ci				 loff_t start_byte, loff_t end_byte)
63362306a36Sopenharmony_ci{
63462306a36Sopenharmony_ci	XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT);
63562306a36Sopenharmony_ci	pgoff_t max = end_byte >> PAGE_SHIFT;
63662306a36Sopenharmony_ci	struct folio *folio;
63762306a36Sopenharmony_ci
63862306a36Sopenharmony_ci	if (end_byte < start_byte)
63962306a36Sopenharmony_ci		return false;
64062306a36Sopenharmony_ci
64162306a36Sopenharmony_ci	rcu_read_lock();
64262306a36Sopenharmony_ci	xas_for_each(&xas, folio, max) {
64362306a36Sopenharmony_ci		if (xas_retry(&xas, folio))
64462306a36Sopenharmony_ci			continue;
64562306a36Sopenharmony_ci		if (xa_is_value(folio))
64662306a36Sopenharmony_ci			continue;
64762306a36Sopenharmony_ci		if (folio_test_dirty(folio) || folio_test_locked(folio) ||
64862306a36Sopenharmony_ci				folio_test_writeback(folio))
64962306a36Sopenharmony_ci			break;
65062306a36Sopenharmony_ci	}
65162306a36Sopenharmony_ci	rcu_read_unlock();
65262306a36Sopenharmony_ci	return folio != NULL;
65362306a36Sopenharmony_ci}
65462306a36Sopenharmony_ciEXPORT_SYMBOL_GPL(filemap_range_has_writeback);
65562306a36Sopenharmony_ci
65662306a36Sopenharmony_ci/**
65762306a36Sopenharmony_ci * filemap_write_and_wait_range - write out & wait on a file range
65862306a36Sopenharmony_ci * @mapping:	the address_space for the pages
65962306a36Sopenharmony_ci * @lstart:	offset in bytes where the range starts
66062306a36Sopenharmony_ci * @lend:	offset in bytes where the range ends (inclusive)
66162306a36Sopenharmony_ci *
66262306a36Sopenharmony_ci * Write out and wait upon file offsets lstart->lend, inclusive.
66362306a36Sopenharmony_ci *
66462306a36Sopenharmony_ci * Note that @lend is inclusive (describes the last byte to be written) so
66562306a36Sopenharmony_ci * that this function can be used to write to the very end-of-file (end = -1).
66662306a36Sopenharmony_ci *
66762306a36Sopenharmony_ci * Return: error status of the address space.
66862306a36Sopenharmony_ci */
66962306a36Sopenharmony_ciint filemap_write_and_wait_range(struct address_space *mapping,
67062306a36Sopenharmony_ci				 loff_t lstart, loff_t lend)
67162306a36Sopenharmony_ci{
67262306a36Sopenharmony_ci	int err = 0, err2;
67362306a36Sopenharmony_ci
67462306a36Sopenharmony_ci	if (lend < lstart)
67562306a36Sopenharmony_ci		return 0;
67662306a36Sopenharmony_ci
67762306a36Sopenharmony_ci	if (mapping_needs_writeback(mapping)) {
67862306a36Sopenharmony_ci		err = __filemap_fdatawrite_range(mapping, lstart, lend,
67962306a36Sopenharmony_ci						 WB_SYNC_ALL);
68062306a36Sopenharmony_ci		/*
68162306a36Sopenharmony_ci		 * Even if the above returned error, the pages may be
68262306a36Sopenharmony_ci		 * written partially (e.g. -ENOSPC), so we wait for it.
68362306a36Sopenharmony_ci		 * But the -EIO is special case, it may indicate the worst
68462306a36Sopenharmony_ci		 * thing (e.g. bug) happened, so we avoid waiting for it.
68562306a36Sopenharmony_ci		 */
68662306a36Sopenharmony_ci		if (err != -EIO)
68762306a36Sopenharmony_ci			__filemap_fdatawait_range(mapping, lstart, lend);
68862306a36Sopenharmony_ci	}
68962306a36Sopenharmony_ci	err2 = filemap_check_errors(mapping);
69062306a36Sopenharmony_ci	if (!err)
69162306a36Sopenharmony_ci		err = err2;
69262306a36Sopenharmony_ci	return err;
69362306a36Sopenharmony_ci}
69462306a36Sopenharmony_ciEXPORT_SYMBOL(filemap_write_and_wait_range);
69562306a36Sopenharmony_ci
69662306a36Sopenharmony_civoid __filemap_set_wb_err(struct address_space *mapping, int err)
69762306a36Sopenharmony_ci{
69862306a36Sopenharmony_ci	errseq_t eseq = errseq_set(&mapping->wb_err, err);
69962306a36Sopenharmony_ci
70062306a36Sopenharmony_ci	trace_filemap_set_wb_err(mapping, eseq);
70162306a36Sopenharmony_ci}
70262306a36Sopenharmony_ciEXPORT_SYMBOL(__filemap_set_wb_err);
70362306a36Sopenharmony_ci
70462306a36Sopenharmony_ci/**
70562306a36Sopenharmony_ci * file_check_and_advance_wb_err - report wb error (if any) that was previously
70662306a36Sopenharmony_ci * 				   and advance wb_err to current one
70762306a36Sopenharmony_ci * @file: struct file on which the error is being reported
70862306a36Sopenharmony_ci *
70962306a36Sopenharmony_ci * When userland calls fsync (or something like nfsd does the equivalent), we
71062306a36Sopenharmony_ci * want to report any writeback errors that occurred since the last fsync (or
71162306a36Sopenharmony_ci * since the file was opened if there haven't been any).
71262306a36Sopenharmony_ci *
71362306a36Sopenharmony_ci * Grab the wb_err from the mapping. If it matches what we have in the file,
71462306a36Sopenharmony_ci * then just quickly return 0. The file is all caught up.
71562306a36Sopenharmony_ci *
71662306a36Sopenharmony_ci * If it doesn't match, then take the mapping value, set the "seen" flag in
71762306a36Sopenharmony_ci * it and try to swap it into place. If it works, or another task beat us
71862306a36Sopenharmony_ci * to it with the new value, then update the f_wb_err and return the error
71962306a36Sopenharmony_ci * portion. The error at this point must be reported via proper channels
72062306a36Sopenharmony_ci * (a'la fsync, or NFS COMMIT operation, etc.).
72162306a36Sopenharmony_ci *
72262306a36Sopenharmony_ci * While we handle mapping->wb_err with atomic operations, the f_wb_err
72362306a36Sopenharmony_ci * value is protected by the f_lock since we must ensure that it reflects
72462306a36Sopenharmony_ci * the latest value swapped in for this file descriptor.
72562306a36Sopenharmony_ci *
72662306a36Sopenharmony_ci * Return: %0 on success, negative error code otherwise.
72762306a36Sopenharmony_ci */
72862306a36Sopenharmony_ciint file_check_and_advance_wb_err(struct file *file)
72962306a36Sopenharmony_ci{
73062306a36Sopenharmony_ci	int err = 0;
73162306a36Sopenharmony_ci	errseq_t old = READ_ONCE(file->f_wb_err);
73262306a36Sopenharmony_ci	struct address_space *mapping = file->f_mapping;
73362306a36Sopenharmony_ci
73462306a36Sopenharmony_ci	/* Locklessly handle the common case where nothing has changed */
73562306a36Sopenharmony_ci	if (errseq_check(&mapping->wb_err, old)) {
73662306a36Sopenharmony_ci		/* Something changed, must use slow path */
73762306a36Sopenharmony_ci		spin_lock(&file->f_lock);
73862306a36Sopenharmony_ci		old = file->f_wb_err;
73962306a36Sopenharmony_ci		err = errseq_check_and_advance(&mapping->wb_err,
74062306a36Sopenharmony_ci						&file->f_wb_err);
74162306a36Sopenharmony_ci		trace_file_check_and_advance_wb_err(file, old);
74262306a36Sopenharmony_ci		spin_unlock(&file->f_lock);
74362306a36Sopenharmony_ci	}
74462306a36Sopenharmony_ci
74562306a36Sopenharmony_ci	/*
74662306a36Sopenharmony_ci	 * We're mostly using this function as a drop in replacement for
74762306a36Sopenharmony_ci	 * filemap_check_errors. Clear AS_EIO/AS_ENOSPC to emulate the effect
74862306a36Sopenharmony_ci	 * that the legacy code would have had on these flags.
74962306a36Sopenharmony_ci	 */
75062306a36Sopenharmony_ci	clear_bit(AS_EIO, &mapping->flags);
75162306a36Sopenharmony_ci	clear_bit(AS_ENOSPC, &mapping->flags);
75262306a36Sopenharmony_ci	return err;
75362306a36Sopenharmony_ci}
75462306a36Sopenharmony_ciEXPORT_SYMBOL(file_check_and_advance_wb_err);
75562306a36Sopenharmony_ci
75662306a36Sopenharmony_ci/**
75762306a36Sopenharmony_ci * file_write_and_wait_range - write out & wait on a file range
75862306a36Sopenharmony_ci * @file:	file pointing to address_space with pages
75962306a36Sopenharmony_ci * @lstart:	offset in bytes where the range starts
76062306a36Sopenharmony_ci * @lend:	offset in bytes where the range ends (inclusive)
76162306a36Sopenharmony_ci *
76262306a36Sopenharmony_ci * Write out and wait upon file offsets lstart->lend, inclusive.
76362306a36Sopenharmony_ci *
76462306a36Sopenharmony_ci * Note that @lend is inclusive (describes the last byte to be written) so
76562306a36Sopenharmony_ci * that this function can be used to write to the very end-of-file (end = -1).
76662306a36Sopenharmony_ci *
76762306a36Sopenharmony_ci * After writing out and waiting on the data, we check and advance the
76862306a36Sopenharmony_ci * f_wb_err cursor to the latest value, and return any errors detected there.
76962306a36Sopenharmony_ci *
77062306a36Sopenharmony_ci * Return: %0 on success, negative error code otherwise.
77162306a36Sopenharmony_ci */
77262306a36Sopenharmony_ciint file_write_and_wait_range(struct file *file, loff_t lstart, loff_t lend)
77362306a36Sopenharmony_ci{
77462306a36Sopenharmony_ci	int err = 0, err2;
77562306a36Sopenharmony_ci	struct address_space *mapping = file->f_mapping;
77662306a36Sopenharmony_ci
77762306a36Sopenharmony_ci	if (lend < lstart)
77862306a36Sopenharmony_ci		return 0;
77962306a36Sopenharmony_ci
78062306a36Sopenharmony_ci	if (mapping_needs_writeback(mapping)) {
78162306a36Sopenharmony_ci		err = __filemap_fdatawrite_range(mapping, lstart, lend,
78262306a36Sopenharmony_ci						 WB_SYNC_ALL);
78362306a36Sopenharmony_ci		/* See comment of filemap_write_and_wait() */
78462306a36Sopenharmony_ci		if (err != -EIO)
78562306a36Sopenharmony_ci			__filemap_fdatawait_range(mapping, lstart, lend);
78662306a36Sopenharmony_ci	}
78762306a36Sopenharmony_ci	err2 = file_check_and_advance_wb_err(file);
78862306a36Sopenharmony_ci	if (!err)
78962306a36Sopenharmony_ci		err = err2;
79062306a36Sopenharmony_ci	return err;
79162306a36Sopenharmony_ci}
79262306a36Sopenharmony_ciEXPORT_SYMBOL(file_write_and_wait_range);
79362306a36Sopenharmony_ci
79462306a36Sopenharmony_ci/**
79562306a36Sopenharmony_ci * replace_page_cache_folio - replace a pagecache folio with a new one
79662306a36Sopenharmony_ci * @old:	folio to be replaced
79762306a36Sopenharmony_ci * @new:	folio to replace with
79862306a36Sopenharmony_ci *
79962306a36Sopenharmony_ci * This function replaces a folio in the pagecache with a new one.  On
80062306a36Sopenharmony_ci * success it acquires the pagecache reference for the new folio and
80162306a36Sopenharmony_ci * drops it for the old folio.  Both the old and new folios must be
80262306a36Sopenharmony_ci * locked.  This function does not add the new folio to the LRU, the
80362306a36Sopenharmony_ci * caller must do that.
80462306a36Sopenharmony_ci *
80562306a36Sopenharmony_ci * The remove + add is atomic.  This function cannot fail.
80662306a36Sopenharmony_ci */
80762306a36Sopenharmony_civoid replace_page_cache_folio(struct folio *old, struct folio *new)
80862306a36Sopenharmony_ci{
80962306a36Sopenharmony_ci	struct address_space *mapping = old->mapping;
81062306a36Sopenharmony_ci	void (*free_folio)(struct folio *) = mapping->a_ops->free_folio;
81162306a36Sopenharmony_ci	pgoff_t offset = old->index;
81262306a36Sopenharmony_ci	XA_STATE(xas, &mapping->i_pages, offset);
81362306a36Sopenharmony_ci
81462306a36Sopenharmony_ci	VM_BUG_ON_FOLIO(!folio_test_locked(old), old);
81562306a36Sopenharmony_ci	VM_BUG_ON_FOLIO(!folio_test_locked(new), new);
81662306a36Sopenharmony_ci	VM_BUG_ON_FOLIO(new->mapping, new);
81762306a36Sopenharmony_ci
81862306a36Sopenharmony_ci	folio_get(new);
81962306a36Sopenharmony_ci	new->mapping = mapping;
82062306a36Sopenharmony_ci	new->index = offset;
82162306a36Sopenharmony_ci
82262306a36Sopenharmony_ci	mem_cgroup_migrate(old, new);
82362306a36Sopenharmony_ci
82462306a36Sopenharmony_ci	xas_lock_irq(&xas);
82562306a36Sopenharmony_ci	xas_store(&xas, new);
82662306a36Sopenharmony_ci
82762306a36Sopenharmony_ci	old->mapping = NULL;
82862306a36Sopenharmony_ci	/* hugetlb pages do not participate in page cache accounting. */
82962306a36Sopenharmony_ci	if (!folio_test_hugetlb(old))
83062306a36Sopenharmony_ci		__lruvec_stat_sub_folio(old, NR_FILE_PAGES);
83162306a36Sopenharmony_ci	if (!folio_test_hugetlb(new))
83262306a36Sopenharmony_ci		__lruvec_stat_add_folio(new, NR_FILE_PAGES);
83362306a36Sopenharmony_ci	if (folio_test_swapbacked(old))
83462306a36Sopenharmony_ci		__lruvec_stat_sub_folio(old, NR_SHMEM);
83562306a36Sopenharmony_ci	if (folio_test_swapbacked(new))
83662306a36Sopenharmony_ci		__lruvec_stat_add_folio(new, NR_SHMEM);
83762306a36Sopenharmony_ci	xas_unlock_irq(&xas);
83862306a36Sopenharmony_ci	if (free_folio)
83962306a36Sopenharmony_ci		free_folio(old);
84062306a36Sopenharmony_ci	folio_put(old);
84162306a36Sopenharmony_ci}
84262306a36Sopenharmony_ciEXPORT_SYMBOL_GPL(replace_page_cache_folio);
84362306a36Sopenharmony_ci
84462306a36Sopenharmony_cinoinline int __filemap_add_folio(struct address_space *mapping,
84562306a36Sopenharmony_ci		struct folio *folio, pgoff_t index, gfp_t gfp, void **shadowp)
84662306a36Sopenharmony_ci{
84762306a36Sopenharmony_ci	XA_STATE(xas, &mapping->i_pages, index);
84862306a36Sopenharmony_ci	int huge = folio_test_hugetlb(folio);
84962306a36Sopenharmony_ci	bool charged = false;
85062306a36Sopenharmony_ci	long nr = 1;
85162306a36Sopenharmony_ci
85262306a36Sopenharmony_ci	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
85362306a36Sopenharmony_ci	VM_BUG_ON_FOLIO(folio_test_swapbacked(folio), folio);
85462306a36Sopenharmony_ci	mapping_set_update(&xas, mapping);
85562306a36Sopenharmony_ci
85662306a36Sopenharmony_ci	if (!huge) {
85762306a36Sopenharmony_ci		int error = mem_cgroup_charge(folio, NULL, gfp);
85862306a36Sopenharmony_ci		VM_BUG_ON_FOLIO(index & (folio_nr_pages(folio) - 1), folio);
85962306a36Sopenharmony_ci		if (error)
86062306a36Sopenharmony_ci			return error;
86162306a36Sopenharmony_ci		charged = true;
86262306a36Sopenharmony_ci		xas_set_order(&xas, index, folio_order(folio));
86362306a36Sopenharmony_ci		nr = folio_nr_pages(folio);
86462306a36Sopenharmony_ci	}
86562306a36Sopenharmony_ci
86662306a36Sopenharmony_ci	gfp &= GFP_RECLAIM_MASK;
86762306a36Sopenharmony_ci	folio_ref_add(folio, nr);
86862306a36Sopenharmony_ci	folio->mapping = mapping;
86962306a36Sopenharmony_ci	folio->index = xas.xa_index;
87062306a36Sopenharmony_ci
87162306a36Sopenharmony_ci	do {
87262306a36Sopenharmony_ci		unsigned int order = xa_get_order(xas.xa, xas.xa_index);
87362306a36Sopenharmony_ci		void *entry, *old = NULL;
87462306a36Sopenharmony_ci
87562306a36Sopenharmony_ci		if (order > folio_order(folio))
87662306a36Sopenharmony_ci			xas_split_alloc(&xas, xa_load(xas.xa, xas.xa_index),
87762306a36Sopenharmony_ci					order, gfp);
87862306a36Sopenharmony_ci		xas_lock_irq(&xas);
87962306a36Sopenharmony_ci		xas_for_each_conflict(&xas, entry) {
88062306a36Sopenharmony_ci			old = entry;
88162306a36Sopenharmony_ci			if (!xa_is_value(entry)) {
88262306a36Sopenharmony_ci				xas_set_err(&xas, -EEXIST);
88362306a36Sopenharmony_ci				goto unlock;
88462306a36Sopenharmony_ci			}
88562306a36Sopenharmony_ci		}
88662306a36Sopenharmony_ci
88762306a36Sopenharmony_ci		if (old) {
88862306a36Sopenharmony_ci			if (shadowp)
88962306a36Sopenharmony_ci				*shadowp = old;
89062306a36Sopenharmony_ci			/* entry may have been split before we acquired lock */
89162306a36Sopenharmony_ci			order = xa_get_order(xas.xa, xas.xa_index);
89262306a36Sopenharmony_ci			if (order > folio_order(folio)) {
89362306a36Sopenharmony_ci				/* How to handle large swap entries? */
89462306a36Sopenharmony_ci				BUG_ON(shmem_mapping(mapping));
89562306a36Sopenharmony_ci				xas_split(&xas, old, order);
89662306a36Sopenharmony_ci				xas_reset(&xas);
89762306a36Sopenharmony_ci			}
89862306a36Sopenharmony_ci		}
89962306a36Sopenharmony_ci
90062306a36Sopenharmony_ci		xas_store(&xas, folio);
90162306a36Sopenharmony_ci		if (xas_error(&xas))
90262306a36Sopenharmony_ci			goto unlock;
90362306a36Sopenharmony_ci
90462306a36Sopenharmony_ci		mapping->nrpages += nr;
90562306a36Sopenharmony_ci
90662306a36Sopenharmony_ci		/* hugetlb pages do not participate in page cache accounting */
90762306a36Sopenharmony_ci		if (!huge) {
90862306a36Sopenharmony_ci			__lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr);
90962306a36Sopenharmony_ci			if (folio_test_pmd_mappable(folio))
91062306a36Sopenharmony_ci				__lruvec_stat_mod_folio(folio,
91162306a36Sopenharmony_ci						NR_FILE_THPS, nr);
91262306a36Sopenharmony_ci		}
91362306a36Sopenharmony_ciunlock:
91462306a36Sopenharmony_ci		xas_unlock_irq(&xas);
91562306a36Sopenharmony_ci	} while (xas_nomem(&xas, gfp));
91662306a36Sopenharmony_ci
91762306a36Sopenharmony_ci	if (xas_error(&xas))
91862306a36Sopenharmony_ci		goto error;
91962306a36Sopenharmony_ci
92062306a36Sopenharmony_ci	trace_mm_filemap_add_to_page_cache(folio);
92162306a36Sopenharmony_ci	return 0;
92262306a36Sopenharmony_cierror:
92362306a36Sopenharmony_ci	if (charged)
92462306a36Sopenharmony_ci		mem_cgroup_uncharge(folio);
92562306a36Sopenharmony_ci	folio->mapping = NULL;
92662306a36Sopenharmony_ci	/* Leave page->index set: truncation relies upon it */
92762306a36Sopenharmony_ci	folio_put_refs(folio, nr);
92862306a36Sopenharmony_ci	return xas_error(&xas);
92962306a36Sopenharmony_ci}
93062306a36Sopenharmony_ciALLOW_ERROR_INJECTION(__filemap_add_folio, ERRNO);
93162306a36Sopenharmony_ci
93262306a36Sopenharmony_ciint filemap_add_folio(struct address_space *mapping, struct folio *folio,
93362306a36Sopenharmony_ci				pgoff_t index, gfp_t gfp)
93462306a36Sopenharmony_ci{
93562306a36Sopenharmony_ci	void *shadow = NULL;
93662306a36Sopenharmony_ci	int ret;
93762306a36Sopenharmony_ci
93862306a36Sopenharmony_ci	__folio_set_locked(folio);
93962306a36Sopenharmony_ci	ret = __filemap_add_folio(mapping, folio, index, gfp, &shadow);
94062306a36Sopenharmony_ci	if (unlikely(ret))
94162306a36Sopenharmony_ci		__folio_clear_locked(folio);
94262306a36Sopenharmony_ci	else {
94362306a36Sopenharmony_ci		/*
94462306a36Sopenharmony_ci		 * The folio might have been evicted from cache only
94562306a36Sopenharmony_ci		 * recently, in which case it should be activated like
94662306a36Sopenharmony_ci		 * any other repeatedly accessed folio.
94762306a36Sopenharmony_ci		 * The exception is folios getting rewritten; evicting other
94862306a36Sopenharmony_ci		 * data from the working set, only to cache data that will
94962306a36Sopenharmony_ci		 * get overwritten with something else, is a waste of memory.
95062306a36Sopenharmony_ci		 */
95162306a36Sopenharmony_ci		WARN_ON_ONCE(folio_test_active(folio));
95262306a36Sopenharmony_ci		if (!(gfp & __GFP_WRITE) && shadow)
95362306a36Sopenharmony_ci			workingset_refault(folio, shadow);
95462306a36Sopenharmony_ci		folio_add_lru(folio);
95562306a36Sopenharmony_ci	}
95662306a36Sopenharmony_ci	return ret;
95762306a36Sopenharmony_ci}
95862306a36Sopenharmony_ciEXPORT_SYMBOL_GPL(filemap_add_folio);
95962306a36Sopenharmony_ci
96062306a36Sopenharmony_ci#ifdef CONFIG_NUMA
96162306a36Sopenharmony_cistruct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order)
96262306a36Sopenharmony_ci{
96362306a36Sopenharmony_ci	int n;
96462306a36Sopenharmony_ci	struct folio *folio;
96562306a36Sopenharmony_ci
96662306a36Sopenharmony_ci	if (cpuset_do_page_mem_spread()) {
96762306a36Sopenharmony_ci		unsigned int cpuset_mems_cookie;
96862306a36Sopenharmony_ci		do {
96962306a36Sopenharmony_ci			cpuset_mems_cookie = read_mems_allowed_begin();
97062306a36Sopenharmony_ci			n = cpuset_mem_spread_node();
97162306a36Sopenharmony_ci			folio = __folio_alloc_node(gfp, order, n);
97262306a36Sopenharmony_ci		} while (!folio && read_mems_allowed_retry(cpuset_mems_cookie));
97362306a36Sopenharmony_ci
97462306a36Sopenharmony_ci		return folio;
97562306a36Sopenharmony_ci	}
97662306a36Sopenharmony_ci	return folio_alloc(gfp, order);
97762306a36Sopenharmony_ci}
97862306a36Sopenharmony_ciEXPORT_SYMBOL(filemap_alloc_folio);
97962306a36Sopenharmony_ci#endif
98062306a36Sopenharmony_ci
98162306a36Sopenharmony_ci/*
98262306a36Sopenharmony_ci * filemap_invalidate_lock_two - lock invalidate_lock for two mappings
98362306a36Sopenharmony_ci *
98462306a36Sopenharmony_ci * Lock exclusively invalidate_lock of any passed mapping that is not NULL.
98562306a36Sopenharmony_ci *
98662306a36Sopenharmony_ci * @mapping1: the first mapping to lock
98762306a36Sopenharmony_ci * @mapping2: the second mapping to lock
98862306a36Sopenharmony_ci */
98962306a36Sopenharmony_civoid filemap_invalidate_lock_two(struct address_space *mapping1,
99062306a36Sopenharmony_ci				 struct address_space *mapping2)
99162306a36Sopenharmony_ci{
99262306a36Sopenharmony_ci	if (mapping1 > mapping2)
99362306a36Sopenharmony_ci		swap(mapping1, mapping2);
99462306a36Sopenharmony_ci	if (mapping1)
99562306a36Sopenharmony_ci		down_write(&mapping1->invalidate_lock);
99662306a36Sopenharmony_ci	if (mapping2 && mapping1 != mapping2)
99762306a36Sopenharmony_ci		down_write_nested(&mapping2->invalidate_lock, 1);
99862306a36Sopenharmony_ci}
99962306a36Sopenharmony_ciEXPORT_SYMBOL(filemap_invalidate_lock_two);
100062306a36Sopenharmony_ci
100162306a36Sopenharmony_ci/*
100262306a36Sopenharmony_ci * filemap_invalidate_unlock_two - unlock invalidate_lock for two mappings
100362306a36Sopenharmony_ci *
100462306a36Sopenharmony_ci * Unlock exclusive invalidate_lock of any passed mapping that is not NULL.
100562306a36Sopenharmony_ci *
100662306a36Sopenharmony_ci * @mapping1: the first mapping to unlock
100762306a36Sopenharmony_ci * @mapping2: the second mapping to unlock
100862306a36Sopenharmony_ci */
100962306a36Sopenharmony_civoid filemap_invalidate_unlock_two(struct address_space *mapping1,
101062306a36Sopenharmony_ci				   struct address_space *mapping2)
101162306a36Sopenharmony_ci{
101262306a36Sopenharmony_ci	if (mapping1)
101362306a36Sopenharmony_ci		up_write(&mapping1->invalidate_lock);
101462306a36Sopenharmony_ci	if (mapping2 && mapping1 != mapping2)
101562306a36Sopenharmony_ci		up_write(&mapping2->invalidate_lock);
101662306a36Sopenharmony_ci}
101762306a36Sopenharmony_ciEXPORT_SYMBOL(filemap_invalidate_unlock_two);
101862306a36Sopenharmony_ci
101962306a36Sopenharmony_ci/*
102062306a36Sopenharmony_ci * In order to wait for pages to become available there must be
102162306a36Sopenharmony_ci * waitqueues associated with pages. By using a hash table of
102262306a36Sopenharmony_ci * waitqueues where the bucket discipline is to maintain all
102362306a36Sopenharmony_ci * waiters on the same queue and wake all when any of the pages
102462306a36Sopenharmony_ci * become available, and for the woken contexts to check to be
102562306a36Sopenharmony_ci * sure the appropriate page became available, this saves space
102662306a36Sopenharmony_ci * at a cost of "thundering herd" phenomena during rare hash
102762306a36Sopenharmony_ci * collisions.
102862306a36Sopenharmony_ci */
102962306a36Sopenharmony_ci#define PAGE_WAIT_TABLE_BITS 8
103062306a36Sopenharmony_ci#define PAGE_WAIT_TABLE_SIZE (1 << PAGE_WAIT_TABLE_BITS)
103162306a36Sopenharmony_cistatic wait_queue_head_t folio_wait_table[PAGE_WAIT_TABLE_SIZE] __cacheline_aligned;
103262306a36Sopenharmony_ci
103362306a36Sopenharmony_cistatic wait_queue_head_t *folio_waitqueue(struct folio *folio)
103462306a36Sopenharmony_ci{
103562306a36Sopenharmony_ci	return &folio_wait_table[hash_ptr(folio, PAGE_WAIT_TABLE_BITS)];
103662306a36Sopenharmony_ci}
103762306a36Sopenharmony_ci
103862306a36Sopenharmony_civoid __init pagecache_init(void)
103962306a36Sopenharmony_ci{
104062306a36Sopenharmony_ci	int i;
104162306a36Sopenharmony_ci
104262306a36Sopenharmony_ci	for (i = 0; i < PAGE_WAIT_TABLE_SIZE; i++)
104362306a36Sopenharmony_ci		init_waitqueue_head(&folio_wait_table[i]);
104462306a36Sopenharmony_ci
104562306a36Sopenharmony_ci	page_writeback_init();
104662306a36Sopenharmony_ci}
104762306a36Sopenharmony_ci
104862306a36Sopenharmony_ci/*
104962306a36Sopenharmony_ci * The page wait code treats the "wait->flags" somewhat unusually, because
105062306a36Sopenharmony_ci * we have multiple different kinds of waits, not just the usual "exclusive"
105162306a36Sopenharmony_ci * one.
105262306a36Sopenharmony_ci *
105362306a36Sopenharmony_ci * We have:
105462306a36Sopenharmony_ci *
105562306a36Sopenharmony_ci *  (a) no special bits set:
105662306a36Sopenharmony_ci *
105762306a36Sopenharmony_ci *	We're just waiting for the bit to be released, and when a waker
105862306a36Sopenharmony_ci *	calls the wakeup function, we set WQ_FLAG_WOKEN and wake it up,
105962306a36Sopenharmony_ci *	and remove it from the wait queue.
106062306a36Sopenharmony_ci *
106162306a36Sopenharmony_ci *	Simple and straightforward.
106262306a36Sopenharmony_ci *
106362306a36Sopenharmony_ci *  (b) WQ_FLAG_EXCLUSIVE:
106462306a36Sopenharmony_ci *
106562306a36Sopenharmony_ci *	The waiter is waiting to get the lock, and only one waiter should
106662306a36Sopenharmony_ci *	be woken up to avoid any thundering herd behavior. We'll set the
106762306a36Sopenharmony_ci *	WQ_FLAG_WOKEN bit, wake it up, and remove it from the wait queue.
106862306a36Sopenharmony_ci *
106962306a36Sopenharmony_ci *	This is the traditional exclusive wait.
107062306a36Sopenharmony_ci *
107162306a36Sopenharmony_ci *  (c) WQ_FLAG_EXCLUSIVE | WQ_FLAG_CUSTOM:
107262306a36Sopenharmony_ci *
107362306a36Sopenharmony_ci *	The waiter is waiting to get the bit, and additionally wants the
107462306a36Sopenharmony_ci *	lock to be transferred to it for fair lock behavior. If the lock
107562306a36Sopenharmony_ci *	cannot be taken, we stop walking the wait queue without waking
107662306a36Sopenharmony_ci *	the waiter.
107762306a36Sopenharmony_ci *
107862306a36Sopenharmony_ci *	This is the "fair lock handoff" case, and in addition to setting
107962306a36Sopenharmony_ci *	WQ_FLAG_WOKEN, we set WQ_FLAG_DONE to let the waiter easily see
108062306a36Sopenharmony_ci *	that it now has the lock.
108162306a36Sopenharmony_ci */
108262306a36Sopenharmony_cistatic int wake_page_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *arg)
108362306a36Sopenharmony_ci{
108462306a36Sopenharmony_ci	unsigned int flags;
108562306a36Sopenharmony_ci	struct wait_page_key *key = arg;
108662306a36Sopenharmony_ci	struct wait_page_queue *wait_page
108762306a36Sopenharmony_ci		= container_of(wait, struct wait_page_queue, wait);
108862306a36Sopenharmony_ci
108962306a36Sopenharmony_ci	if (!wake_page_match(wait_page, key))
109062306a36Sopenharmony_ci		return 0;
109162306a36Sopenharmony_ci
109262306a36Sopenharmony_ci	/*
109362306a36Sopenharmony_ci	 * If it's a lock handoff wait, we get the bit for it, and
109462306a36Sopenharmony_ci	 * stop walking (and do not wake it up) if we can't.
109562306a36Sopenharmony_ci	 */
109662306a36Sopenharmony_ci	flags = wait->flags;
109762306a36Sopenharmony_ci	if (flags & WQ_FLAG_EXCLUSIVE) {
109862306a36Sopenharmony_ci		if (test_bit(key->bit_nr, &key->folio->flags))
109962306a36Sopenharmony_ci			return -1;
110062306a36Sopenharmony_ci		if (flags & WQ_FLAG_CUSTOM) {
110162306a36Sopenharmony_ci			if (test_and_set_bit(key->bit_nr, &key->folio->flags))
110262306a36Sopenharmony_ci				return -1;
110362306a36Sopenharmony_ci			flags |= WQ_FLAG_DONE;
110462306a36Sopenharmony_ci		}
110562306a36Sopenharmony_ci	}
110662306a36Sopenharmony_ci
110762306a36Sopenharmony_ci	/*
110862306a36Sopenharmony_ci	 * We are holding the wait-queue lock, but the waiter that
110962306a36Sopenharmony_ci	 * is waiting for this will be checking the flags without
111062306a36Sopenharmony_ci	 * any locking.
111162306a36Sopenharmony_ci	 *
111262306a36Sopenharmony_ci	 * So update the flags atomically, and wake up the waiter
111362306a36Sopenharmony_ci	 * afterwards to avoid any races. This store-release pairs
111462306a36Sopenharmony_ci	 * with the load-acquire in folio_wait_bit_common().
111562306a36Sopenharmony_ci	 */
111662306a36Sopenharmony_ci	smp_store_release(&wait->flags, flags | WQ_FLAG_WOKEN);
111762306a36Sopenharmony_ci	wake_up_state(wait->private, mode);
111862306a36Sopenharmony_ci
111962306a36Sopenharmony_ci	/*
112062306a36Sopenharmony_ci	 * Ok, we have successfully done what we're waiting for,
112162306a36Sopenharmony_ci	 * and we can unconditionally remove the wait entry.
112262306a36Sopenharmony_ci	 *
112362306a36Sopenharmony_ci	 * Note that this pairs with the "finish_wait()" in the
112462306a36Sopenharmony_ci	 * waiter, and has to be the absolute last thing we do.
112562306a36Sopenharmony_ci	 * After this list_del_init(&wait->entry) the wait entry
112662306a36Sopenharmony_ci	 * might be de-allocated and the process might even have
112762306a36Sopenharmony_ci	 * exited.
112862306a36Sopenharmony_ci	 */
112962306a36Sopenharmony_ci	list_del_init_careful(&wait->entry);
113062306a36Sopenharmony_ci	return (flags & WQ_FLAG_EXCLUSIVE) != 0;
113162306a36Sopenharmony_ci}
113262306a36Sopenharmony_ci
113362306a36Sopenharmony_cistatic void folio_wake_bit(struct folio *folio, int bit_nr)
113462306a36Sopenharmony_ci{
113562306a36Sopenharmony_ci	wait_queue_head_t *q = folio_waitqueue(folio);
113662306a36Sopenharmony_ci	struct wait_page_key key;
113762306a36Sopenharmony_ci	unsigned long flags;
113862306a36Sopenharmony_ci	wait_queue_entry_t bookmark;
113962306a36Sopenharmony_ci
114062306a36Sopenharmony_ci	key.folio = folio;
114162306a36Sopenharmony_ci	key.bit_nr = bit_nr;
114262306a36Sopenharmony_ci	key.page_match = 0;
114362306a36Sopenharmony_ci
114462306a36Sopenharmony_ci	bookmark.flags = 0;
114562306a36Sopenharmony_ci	bookmark.private = NULL;
114662306a36Sopenharmony_ci	bookmark.func = NULL;
114762306a36Sopenharmony_ci	INIT_LIST_HEAD(&bookmark.entry);
114862306a36Sopenharmony_ci
114962306a36Sopenharmony_ci	spin_lock_irqsave(&q->lock, flags);
115062306a36Sopenharmony_ci	__wake_up_locked_key_bookmark(q, TASK_NORMAL, &key, &bookmark);
115162306a36Sopenharmony_ci
115262306a36Sopenharmony_ci	while (bookmark.flags & WQ_FLAG_BOOKMARK) {
115362306a36Sopenharmony_ci		/*
115462306a36Sopenharmony_ci		 * Take a breather from holding the lock,
115562306a36Sopenharmony_ci		 * allow pages that finish wake up asynchronously
115662306a36Sopenharmony_ci		 * to acquire the lock and remove themselves
115762306a36Sopenharmony_ci		 * from wait queue
115862306a36Sopenharmony_ci		 */
115962306a36Sopenharmony_ci		spin_unlock_irqrestore(&q->lock, flags);
116062306a36Sopenharmony_ci		cpu_relax();
116162306a36Sopenharmony_ci		spin_lock_irqsave(&q->lock, flags);
116262306a36Sopenharmony_ci		__wake_up_locked_key_bookmark(q, TASK_NORMAL, &key, &bookmark);
116362306a36Sopenharmony_ci	}
116462306a36Sopenharmony_ci
116562306a36Sopenharmony_ci	/*
116662306a36Sopenharmony_ci	 * It's possible to miss clearing waiters here, when we woke our page
116762306a36Sopenharmony_ci	 * waiters, but the hashed waitqueue has waiters for other pages on it.
116862306a36Sopenharmony_ci	 * That's okay, it's a rare case. The next waker will clear it.
116962306a36Sopenharmony_ci	 *
117062306a36Sopenharmony_ci	 * Note that, depending on the page pool (buddy, hugetlb, ZONE_DEVICE,
117162306a36Sopenharmony_ci	 * other), the flag may be cleared in the course of freeing the page;
117262306a36Sopenharmony_ci	 * but that is not required for correctness.
117362306a36Sopenharmony_ci	 */
117462306a36Sopenharmony_ci	if (!waitqueue_active(q) || !key.page_match)
117562306a36Sopenharmony_ci		folio_clear_waiters(folio);
117662306a36Sopenharmony_ci
117762306a36Sopenharmony_ci	spin_unlock_irqrestore(&q->lock, flags);
117862306a36Sopenharmony_ci}
117962306a36Sopenharmony_ci
118062306a36Sopenharmony_cistatic void folio_wake(struct folio *folio, int bit)
118162306a36Sopenharmony_ci{
118262306a36Sopenharmony_ci	if (!folio_test_waiters(folio))
118362306a36Sopenharmony_ci		return;
118462306a36Sopenharmony_ci	folio_wake_bit(folio, bit);
118562306a36Sopenharmony_ci}
118662306a36Sopenharmony_ci
118762306a36Sopenharmony_ci/*
118862306a36Sopenharmony_ci * A choice of three behaviors for folio_wait_bit_common():
118962306a36Sopenharmony_ci */
119062306a36Sopenharmony_cienum behavior {
119162306a36Sopenharmony_ci	EXCLUSIVE,	/* Hold ref to page and take the bit when woken, like
119262306a36Sopenharmony_ci			 * __folio_lock() waiting on then setting PG_locked.
119362306a36Sopenharmony_ci			 */
119462306a36Sopenharmony_ci	SHARED,		/* Hold ref to page and check the bit when woken, like
119562306a36Sopenharmony_ci			 * folio_wait_writeback() waiting on PG_writeback.
119662306a36Sopenharmony_ci			 */
119762306a36Sopenharmony_ci	DROP,		/* Drop ref to page before wait, no check when woken,
119862306a36Sopenharmony_ci			 * like folio_put_wait_locked() on PG_locked.
119962306a36Sopenharmony_ci			 */
120062306a36Sopenharmony_ci};
120162306a36Sopenharmony_ci
120262306a36Sopenharmony_ci/*
120362306a36Sopenharmony_ci * Attempt to check (or get) the folio flag, and mark us done
120462306a36Sopenharmony_ci * if successful.
120562306a36Sopenharmony_ci */
120662306a36Sopenharmony_cistatic inline bool folio_trylock_flag(struct folio *folio, int bit_nr,
120762306a36Sopenharmony_ci					struct wait_queue_entry *wait)
120862306a36Sopenharmony_ci{
120962306a36Sopenharmony_ci	if (wait->flags & WQ_FLAG_EXCLUSIVE) {
121062306a36Sopenharmony_ci		if (test_and_set_bit(bit_nr, &folio->flags))
121162306a36Sopenharmony_ci			return false;
121262306a36Sopenharmony_ci	} else if (test_bit(bit_nr, &folio->flags))
121362306a36Sopenharmony_ci		return false;
121462306a36Sopenharmony_ci
121562306a36Sopenharmony_ci	wait->flags |= WQ_FLAG_WOKEN | WQ_FLAG_DONE;
121662306a36Sopenharmony_ci	return true;
121762306a36Sopenharmony_ci}
121862306a36Sopenharmony_ci
121962306a36Sopenharmony_ci/* How many times do we accept lock stealing from under a waiter? */
122062306a36Sopenharmony_ciint sysctl_page_lock_unfairness = 5;
122162306a36Sopenharmony_ci
122262306a36Sopenharmony_cistatic inline int folio_wait_bit_common(struct folio *folio, int bit_nr,
122362306a36Sopenharmony_ci		int state, enum behavior behavior)
122462306a36Sopenharmony_ci{
122562306a36Sopenharmony_ci	wait_queue_head_t *q = folio_waitqueue(folio);
122662306a36Sopenharmony_ci	int unfairness = sysctl_page_lock_unfairness;
122762306a36Sopenharmony_ci	struct wait_page_queue wait_page;
122862306a36Sopenharmony_ci	wait_queue_entry_t *wait = &wait_page.wait;
122962306a36Sopenharmony_ci	bool thrashing = false;
123062306a36Sopenharmony_ci	unsigned long pflags;
123162306a36Sopenharmony_ci	bool in_thrashing;
123262306a36Sopenharmony_ci
123362306a36Sopenharmony_ci	if (bit_nr == PG_locked &&
123462306a36Sopenharmony_ci	    !folio_test_uptodate(folio) && folio_test_workingset(folio)) {
123562306a36Sopenharmony_ci		delayacct_thrashing_start(&in_thrashing);
123662306a36Sopenharmony_ci		psi_memstall_enter(&pflags);
123762306a36Sopenharmony_ci		thrashing = true;
123862306a36Sopenharmony_ci	}
123962306a36Sopenharmony_ci
124062306a36Sopenharmony_ci	init_wait(wait);
124162306a36Sopenharmony_ci	wait->func = wake_page_function;
124262306a36Sopenharmony_ci	wait_page.folio = folio;
124362306a36Sopenharmony_ci	wait_page.bit_nr = bit_nr;
124462306a36Sopenharmony_ci
124562306a36Sopenharmony_cirepeat:
124662306a36Sopenharmony_ci	wait->flags = 0;
124762306a36Sopenharmony_ci	if (behavior == EXCLUSIVE) {
124862306a36Sopenharmony_ci		wait->flags = WQ_FLAG_EXCLUSIVE;
124962306a36Sopenharmony_ci		if (--unfairness < 0)
125062306a36Sopenharmony_ci			wait->flags |= WQ_FLAG_CUSTOM;
125162306a36Sopenharmony_ci	}
125262306a36Sopenharmony_ci
125362306a36Sopenharmony_ci	/*
125462306a36Sopenharmony_ci	 * Do one last check whether we can get the
125562306a36Sopenharmony_ci	 * page bit synchronously.
125662306a36Sopenharmony_ci	 *
125762306a36Sopenharmony_ci	 * Do the folio_set_waiters() marking before that
125862306a36Sopenharmony_ci	 * to let any waker we _just_ missed know they
125962306a36Sopenharmony_ci	 * need to wake us up (otherwise they'll never
126062306a36Sopenharmony_ci	 * even go to the slow case that looks at the
126162306a36Sopenharmony_ci	 * page queue), and add ourselves to the wait
126262306a36Sopenharmony_ci	 * queue if we need to sleep.
126362306a36Sopenharmony_ci	 *
126462306a36Sopenharmony_ci	 * This part needs to be done under the queue
126562306a36Sopenharmony_ci	 * lock to avoid races.
126662306a36Sopenharmony_ci	 */
126762306a36Sopenharmony_ci	spin_lock_irq(&q->lock);
126862306a36Sopenharmony_ci	folio_set_waiters(folio);
126962306a36Sopenharmony_ci	if (!folio_trylock_flag(folio, bit_nr, wait))
127062306a36Sopenharmony_ci		__add_wait_queue_entry_tail(q, wait);
127162306a36Sopenharmony_ci	spin_unlock_irq(&q->lock);
127262306a36Sopenharmony_ci
127362306a36Sopenharmony_ci	/*
127462306a36Sopenharmony_ci	 * From now on, all the logic will be based on
127562306a36Sopenharmony_ci	 * the WQ_FLAG_WOKEN and WQ_FLAG_DONE flag, to
127662306a36Sopenharmony_ci	 * see whether the page bit testing has already
127762306a36Sopenharmony_ci	 * been done by the wake function.
127862306a36Sopenharmony_ci	 *
127962306a36Sopenharmony_ci	 * We can drop our reference to the folio.
128062306a36Sopenharmony_ci	 */
128162306a36Sopenharmony_ci	if (behavior == DROP)
128262306a36Sopenharmony_ci		folio_put(folio);
128362306a36Sopenharmony_ci
128462306a36Sopenharmony_ci	/*
128562306a36Sopenharmony_ci	 * Note that until the "finish_wait()", or until
128662306a36Sopenharmony_ci	 * we see the WQ_FLAG_WOKEN flag, we need to
128762306a36Sopenharmony_ci	 * be very careful with the 'wait->flags', because
128862306a36Sopenharmony_ci	 * we may race with a waker that sets them.
128962306a36Sopenharmony_ci	 */
129062306a36Sopenharmony_ci	for (;;) {
129162306a36Sopenharmony_ci		unsigned int flags;
129262306a36Sopenharmony_ci
129362306a36Sopenharmony_ci		set_current_state(state);
129462306a36Sopenharmony_ci
129562306a36Sopenharmony_ci		/* Loop until we've been woken or interrupted */
129662306a36Sopenharmony_ci		flags = smp_load_acquire(&wait->flags);
129762306a36Sopenharmony_ci		if (!(flags & WQ_FLAG_WOKEN)) {
129862306a36Sopenharmony_ci			if (signal_pending_state(state, current))
129962306a36Sopenharmony_ci				break;
130062306a36Sopenharmony_ci
130162306a36Sopenharmony_ci			io_schedule();
130262306a36Sopenharmony_ci			continue;
130362306a36Sopenharmony_ci		}
130462306a36Sopenharmony_ci
130562306a36Sopenharmony_ci		/* If we were non-exclusive, we're done */
130662306a36Sopenharmony_ci		if (behavior != EXCLUSIVE)
130762306a36Sopenharmony_ci			break;
130862306a36Sopenharmony_ci
130962306a36Sopenharmony_ci		/* If the waker got the lock for us, we're done */
131062306a36Sopenharmony_ci		if (flags & WQ_FLAG_DONE)
131162306a36Sopenharmony_ci			break;
131262306a36Sopenharmony_ci
131362306a36Sopenharmony_ci		/*
131462306a36Sopenharmony_ci		 * Otherwise, if we're getting the lock, we need to
131562306a36Sopenharmony_ci		 * try to get it ourselves.
131662306a36Sopenharmony_ci		 *
131762306a36Sopenharmony_ci		 * And if that fails, we'll have to retry this all.
131862306a36Sopenharmony_ci		 */
131962306a36Sopenharmony_ci		if (unlikely(test_and_set_bit(bit_nr, folio_flags(folio, 0))))
132062306a36Sopenharmony_ci			goto repeat;
132162306a36Sopenharmony_ci
132262306a36Sopenharmony_ci		wait->flags |= WQ_FLAG_DONE;
132362306a36Sopenharmony_ci		break;
132462306a36Sopenharmony_ci	}
132562306a36Sopenharmony_ci
132662306a36Sopenharmony_ci	/*
132762306a36Sopenharmony_ci	 * If a signal happened, this 'finish_wait()' may remove the last
132862306a36Sopenharmony_ci	 * waiter from the wait-queues, but the folio waiters bit will remain
132962306a36Sopenharmony_ci	 * set. That's ok. The next wakeup will take care of it, and trying
133062306a36Sopenharmony_ci	 * to do it here would be difficult and prone to races.
133162306a36Sopenharmony_ci	 */
133262306a36Sopenharmony_ci	finish_wait(q, wait);
133362306a36Sopenharmony_ci
133462306a36Sopenharmony_ci	if (thrashing) {
133562306a36Sopenharmony_ci		delayacct_thrashing_end(&in_thrashing);
133662306a36Sopenharmony_ci		psi_memstall_leave(&pflags);
133762306a36Sopenharmony_ci	}
133862306a36Sopenharmony_ci
133962306a36Sopenharmony_ci	/*
134062306a36Sopenharmony_ci	 * NOTE! The wait->flags weren't stable until we've done the
134162306a36Sopenharmony_ci	 * 'finish_wait()', and we could have exited the loop above due
134262306a36Sopenharmony_ci	 * to a signal, and had a wakeup event happen after the signal
134362306a36Sopenharmony_ci	 * test but before the 'finish_wait()'.
134462306a36Sopenharmony_ci	 *
134562306a36Sopenharmony_ci	 * So only after the finish_wait() can we reliably determine
134662306a36Sopenharmony_ci	 * if we got woken up or not, so we can now figure out the final
134762306a36Sopenharmony_ci	 * return value based on that state without races.
134862306a36Sopenharmony_ci	 *
134962306a36Sopenharmony_ci	 * Also note that WQ_FLAG_WOKEN is sufficient for a non-exclusive
135062306a36Sopenharmony_ci	 * waiter, but an exclusive one requires WQ_FLAG_DONE.
135162306a36Sopenharmony_ci	 */
135262306a36Sopenharmony_ci	if (behavior == EXCLUSIVE)
135362306a36Sopenharmony_ci		return wait->flags & WQ_FLAG_DONE ? 0 : -EINTR;
135462306a36Sopenharmony_ci
135562306a36Sopenharmony_ci	return wait->flags & WQ_FLAG_WOKEN ? 0 : -EINTR;
135662306a36Sopenharmony_ci}
135762306a36Sopenharmony_ci
135862306a36Sopenharmony_ci#ifdef CONFIG_MIGRATION
135962306a36Sopenharmony_ci/**
136062306a36Sopenharmony_ci * migration_entry_wait_on_locked - Wait for a migration entry to be removed
136162306a36Sopenharmony_ci * @entry: migration swap entry.
136262306a36Sopenharmony_ci * @ptl: already locked ptl. This function will drop the lock.
136362306a36Sopenharmony_ci *
136462306a36Sopenharmony_ci * Wait for a migration entry referencing the given page to be removed. This is
136562306a36Sopenharmony_ci * equivalent to put_and_wait_on_page_locked(page, TASK_UNINTERRUPTIBLE) except
136662306a36Sopenharmony_ci * this can be called without taking a reference on the page. Instead this
136762306a36Sopenharmony_ci * should be called while holding the ptl for the migration entry referencing
136862306a36Sopenharmony_ci * the page.
136962306a36Sopenharmony_ci *
137062306a36Sopenharmony_ci * Returns after unlocking the ptl.
137162306a36Sopenharmony_ci *
137262306a36Sopenharmony_ci * This follows the same logic as folio_wait_bit_common() so see the comments
137362306a36Sopenharmony_ci * there.
137462306a36Sopenharmony_ci */
137562306a36Sopenharmony_civoid migration_entry_wait_on_locked(swp_entry_t entry, spinlock_t *ptl)
137662306a36Sopenharmony_ci	__releases(ptl)
137762306a36Sopenharmony_ci{
137862306a36Sopenharmony_ci	struct wait_page_queue wait_page;
137962306a36Sopenharmony_ci	wait_queue_entry_t *wait = &wait_page.wait;
138062306a36Sopenharmony_ci	bool thrashing = false;
138162306a36Sopenharmony_ci	unsigned long pflags;
138262306a36Sopenharmony_ci	bool in_thrashing;
138362306a36Sopenharmony_ci	wait_queue_head_t *q;
138462306a36Sopenharmony_ci	struct folio *folio = page_folio(pfn_swap_entry_to_page(entry));
138562306a36Sopenharmony_ci
138662306a36Sopenharmony_ci	q = folio_waitqueue(folio);
138762306a36Sopenharmony_ci	if (!folio_test_uptodate(folio) && folio_test_workingset(folio)) {
138862306a36Sopenharmony_ci		delayacct_thrashing_start(&in_thrashing);
138962306a36Sopenharmony_ci		psi_memstall_enter(&pflags);
139062306a36Sopenharmony_ci		thrashing = true;
139162306a36Sopenharmony_ci	}
139262306a36Sopenharmony_ci
139362306a36Sopenharmony_ci	init_wait(wait);
139462306a36Sopenharmony_ci	wait->func = wake_page_function;
139562306a36Sopenharmony_ci	wait_page.folio = folio;
139662306a36Sopenharmony_ci	wait_page.bit_nr = PG_locked;
139762306a36Sopenharmony_ci	wait->flags = 0;
139862306a36Sopenharmony_ci
139962306a36Sopenharmony_ci	spin_lock_irq(&q->lock);
140062306a36Sopenharmony_ci	folio_set_waiters(folio);
140162306a36Sopenharmony_ci	if (!folio_trylock_flag(folio, PG_locked, wait))
140262306a36Sopenharmony_ci		__add_wait_queue_entry_tail(q, wait);
140362306a36Sopenharmony_ci	spin_unlock_irq(&q->lock);
140462306a36Sopenharmony_ci
140562306a36Sopenharmony_ci	/*
140662306a36Sopenharmony_ci	 * If a migration entry exists for the page the migration path must hold
140762306a36Sopenharmony_ci	 * a valid reference to the page, and it must take the ptl to remove the
140862306a36Sopenharmony_ci	 * migration entry. So the page is valid until the ptl is dropped.
140962306a36Sopenharmony_ci	 */
141062306a36Sopenharmony_ci	spin_unlock(ptl);
141162306a36Sopenharmony_ci
141262306a36Sopenharmony_ci	for (;;) {
141362306a36Sopenharmony_ci		unsigned int flags;
141462306a36Sopenharmony_ci
141562306a36Sopenharmony_ci		set_current_state(TASK_UNINTERRUPTIBLE);
141662306a36Sopenharmony_ci
141762306a36Sopenharmony_ci		/* Loop until we've been woken or interrupted */
141862306a36Sopenharmony_ci		flags = smp_load_acquire(&wait->flags);
141962306a36Sopenharmony_ci		if (!(flags & WQ_FLAG_WOKEN)) {
142062306a36Sopenharmony_ci			if (signal_pending_state(TASK_UNINTERRUPTIBLE, current))
142162306a36Sopenharmony_ci				break;
142262306a36Sopenharmony_ci
142362306a36Sopenharmony_ci			io_schedule();
142462306a36Sopenharmony_ci			continue;
142562306a36Sopenharmony_ci		}
142662306a36Sopenharmony_ci		break;
142762306a36Sopenharmony_ci	}
142862306a36Sopenharmony_ci
142962306a36Sopenharmony_ci	finish_wait(q, wait);
143062306a36Sopenharmony_ci
143162306a36Sopenharmony_ci	if (thrashing) {
143262306a36Sopenharmony_ci		delayacct_thrashing_end(&in_thrashing);
143362306a36Sopenharmony_ci		psi_memstall_leave(&pflags);
143462306a36Sopenharmony_ci	}
143562306a36Sopenharmony_ci}
143662306a36Sopenharmony_ci#endif
143762306a36Sopenharmony_ci
143862306a36Sopenharmony_civoid folio_wait_bit(struct folio *folio, int bit_nr)
143962306a36Sopenharmony_ci{
144062306a36Sopenharmony_ci	folio_wait_bit_common(folio, bit_nr, TASK_UNINTERRUPTIBLE, SHARED);
144162306a36Sopenharmony_ci}
144262306a36Sopenharmony_ciEXPORT_SYMBOL(folio_wait_bit);
144362306a36Sopenharmony_ci
144462306a36Sopenharmony_ciint folio_wait_bit_killable(struct folio *folio, int bit_nr)
144562306a36Sopenharmony_ci{
144662306a36Sopenharmony_ci	return folio_wait_bit_common(folio, bit_nr, TASK_KILLABLE, SHARED);
144762306a36Sopenharmony_ci}
144862306a36Sopenharmony_ciEXPORT_SYMBOL(folio_wait_bit_killable);
144962306a36Sopenharmony_ci
145062306a36Sopenharmony_ci/**
145162306a36Sopenharmony_ci * folio_put_wait_locked - Drop a reference and wait for it to be unlocked
145262306a36Sopenharmony_ci * @folio: The folio to wait for.
145362306a36Sopenharmony_ci * @state: The sleep state (TASK_KILLABLE, TASK_UNINTERRUPTIBLE, etc).
145462306a36Sopenharmony_ci *
145562306a36Sopenharmony_ci * The caller should hold a reference on @folio.  They expect the page to
145662306a36Sopenharmony_ci * become unlocked relatively soon, but do not wish to hold up migration
145762306a36Sopenharmony_ci * (for example) by holding the reference while waiting for the folio to
145862306a36Sopenharmony_ci * come unlocked.  After this function returns, the caller should not
145962306a36Sopenharmony_ci * dereference @folio.
146062306a36Sopenharmony_ci *
146162306a36Sopenharmony_ci * Return: 0 if the folio was unlocked or -EINTR if interrupted by a signal.
146262306a36Sopenharmony_ci */
146362306a36Sopenharmony_cistatic int folio_put_wait_locked(struct folio *folio, int state)
146462306a36Sopenharmony_ci{
146562306a36Sopenharmony_ci	return folio_wait_bit_common(folio, PG_locked, state, DROP);
146662306a36Sopenharmony_ci}
146762306a36Sopenharmony_ci
146862306a36Sopenharmony_ci/**
146962306a36Sopenharmony_ci * folio_add_wait_queue - Add an arbitrary waiter to a folio's wait queue
147062306a36Sopenharmony_ci * @folio: Folio defining the wait queue of interest
147162306a36Sopenharmony_ci * @waiter: Waiter to add to the queue
147262306a36Sopenharmony_ci *
147362306a36Sopenharmony_ci * Add an arbitrary @waiter to the wait queue for the nominated @folio.
147462306a36Sopenharmony_ci */
147562306a36Sopenharmony_civoid folio_add_wait_queue(struct folio *folio, wait_queue_entry_t *waiter)
147662306a36Sopenharmony_ci{
147762306a36Sopenharmony_ci	wait_queue_head_t *q = folio_waitqueue(folio);
147862306a36Sopenharmony_ci	unsigned long flags;
147962306a36Sopenharmony_ci
148062306a36Sopenharmony_ci	spin_lock_irqsave(&q->lock, flags);
148162306a36Sopenharmony_ci	__add_wait_queue_entry_tail(q, waiter);
148262306a36Sopenharmony_ci	folio_set_waiters(folio);
148362306a36Sopenharmony_ci	spin_unlock_irqrestore(&q->lock, flags);
148462306a36Sopenharmony_ci}
148562306a36Sopenharmony_ciEXPORT_SYMBOL_GPL(folio_add_wait_queue);
148662306a36Sopenharmony_ci
148762306a36Sopenharmony_ci#ifndef clear_bit_unlock_is_negative_byte
148862306a36Sopenharmony_ci
148962306a36Sopenharmony_ci/*
149062306a36Sopenharmony_ci * PG_waiters is the high bit in the same byte as PG_lock.
149162306a36Sopenharmony_ci *
149262306a36Sopenharmony_ci * On x86 (and on many other architectures), we can clear PG_lock and
149362306a36Sopenharmony_ci * test the sign bit at the same time. But if the architecture does
149462306a36Sopenharmony_ci * not support that special operation, we just do this all by hand
149562306a36Sopenharmony_ci * instead.
149662306a36Sopenharmony_ci *
149762306a36Sopenharmony_ci * The read of PG_waiters has to be after (or concurrently with) PG_locked
149862306a36Sopenharmony_ci * being cleared, but a memory barrier should be unnecessary since it is
149962306a36Sopenharmony_ci * in the same byte as PG_locked.
150062306a36Sopenharmony_ci */
150162306a36Sopenharmony_cistatic inline bool clear_bit_unlock_is_negative_byte(long nr, volatile void *mem)
150262306a36Sopenharmony_ci{
150362306a36Sopenharmony_ci	clear_bit_unlock(nr, mem);
150462306a36Sopenharmony_ci	/* smp_mb__after_atomic(); */
150562306a36Sopenharmony_ci	return test_bit(PG_waiters, mem);
150662306a36Sopenharmony_ci}
150762306a36Sopenharmony_ci
150862306a36Sopenharmony_ci#endif
150962306a36Sopenharmony_ci
151062306a36Sopenharmony_ci/**
151162306a36Sopenharmony_ci * folio_unlock - Unlock a locked folio.
151262306a36Sopenharmony_ci * @folio: The folio.
151362306a36Sopenharmony_ci *
151462306a36Sopenharmony_ci * Unlocks the folio and wakes up any thread sleeping on the page lock.
151562306a36Sopenharmony_ci *
151662306a36Sopenharmony_ci * Context: May be called from interrupt or process context.  May not be
151762306a36Sopenharmony_ci * called from NMI context.
151862306a36Sopenharmony_ci */
151962306a36Sopenharmony_civoid folio_unlock(struct folio *folio)
152062306a36Sopenharmony_ci{
152162306a36Sopenharmony_ci	/* Bit 7 allows x86 to check the byte's sign bit */
152262306a36Sopenharmony_ci	BUILD_BUG_ON(PG_waiters != 7);
152362306a36Sopenharmony_ci	BUILD_BUG_ON(PG_locked > 7);
152462306a36Sopenharmony_ci	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
152562306a36Sopenharmony_ci	if (clear_bit_unlock_is_negative_byte(PG_locked, folio_flags(folio, 0)))
152662306a36Sopenharmony_ci		folio_wake_bit(folio, PG_locked);
152762306a36Sopenharmony_ci}
152862306a36Sopenharmony_ciEXPORT_SYMBOL(folio_unlock);
152962306a36Sopenharmony_ci
153062306a36Sopenharmony_ci/**
153162306a36Sopenharmony_ci * folio_end_private_2 - Clear PG_private_2 and wake any waiters.
153262306a36Sopenharmony_ci * @folio: The folio.
153362306a36Sopenharmony_ci *
153462306a36Sopenharmony_ci * Clear the PG_private_2 bit on a folio and wake up any sleepers waiting for
153562306a36Sopenharmony_ci * it.  The folio reference held for PG_private_2 being set is released.
153662306a36Sopenharmony_ci *
153762306a36Sopenharmony_ci * This is, for example, used when a netfs folio is being written to a local
153862306a36Sopenharmony_ci * disk cache, thereby allowing writes to the cache for the same folio to be
153962306a36Sopenharmony_ci * serialised.
154062306a36Sopenharmony_ci */
154162306a36Sopenharmony_civoid folio_end_private_2(struct folio *folio)
154262306a36Sopenharmony_ci{
154362306a36Sopenharmony_ci	VM_BUG_ON_FOLIO(!folio_test_private_2(folio), folio);
154462306a36Sopenharmony_ci	clear_bit_unlock(PG_private_2, folio_flags(folio, 0));
154562306a36Sopenharmony_ci	folio_wake_bit(folio, PG_private_2);
154662306a36Sopenharmony_ci	folio_put(folio);
154762306a36Sopenharmony_ci}
154862306a36Sopenharmony_ciEXPORT_SYMBOL(folio_end_private_2);
154962306a36Sopenharmony_ci
155062306a36Sopenharmony_ci/**
155162306a36Sopenharmony_ci * folio_wait_private_2 - Wait for PG_private_2 to be cleared on a folio.
155262306a36Sopenharmony_ci * @folio: The folio to wait on.
155362306a36Sopenharmony_ci *
155462306a36Sopenharmony_ci * Wait for PG_private_2 (aka PG_fscache) to be cleared on a folio.
155562306a36Sopenharmony_ci */
155662306a36Sopenharmony_civoid folio_wait_private_2(struct folio *folio)
155762306a36Sopenharmony_ci{
155862306a36Sopenharmony_ci	while (folio_test_private_2(folio))
155962306a36Sopenharmony_ci		folio_wait_bit(folio, PG_private_2);
156062306a36Sopenharmony_ci}
156162306a36Sopenharmony_ciEXPORT_SYMBOL(folio_wait_private_2);
156262306a36Sopenharmony_ci
156362306a36Sopenharmony_ci/**
156462306a36Sopenharmony_ci * folio_wait_private_2_killable - Wait for PG_private_2 to be cleared on a folio.
156562306a36Sopenharmony_ci * @folio: The folio to wait on.
156662306a36Sopenharmony_ci *
156762306a36Sopenharmony_ci * Wait for PG_private_2 (aka PG_fscache) to be cleared on a folio or until a
156862306a36Sopenharmony_ci * fatal signal is received by the calling task.
156962306a36Sopenharmony_ci *
157062306a36Sopenharmony_ci * Return:
157162306a36Sopenharmony_ci * - 0 if successful.
157262306a36Sopenharmony_ci * - -EINTR if a fatal signal was encountered.
157362306a36Sopenharmony_ci */
157462306a36Sopenharmony_ciint folio_wait_private_2_killable(struct folio *folio)
157562306a36Sopenharmony_ci{
157662306a36Sopenharmony_ci	int ret = 0;
157762306a36Sopenharmony_ci
157862306a36Sopenharmony_ci	while (folio_test_private_2(folio)) {
157962306a36Sopenharmony_ci		ret = folio_wait_bit_killable(folio, PG_private_2);
158062306a36Sopenharmony_ci		if (ret < 0)
158162306a36Sopenharmony_ci			break;
158262306a36Sopenharmony_ci	}
158362306a36Sopenharmony_ci
158462306a36Sopenharmony_ci	return ret;
158562306a36Sopenharmony_ci}
158662306a36Sopenharmony_ciEXPORT_SYMBOL(folio_wait_private_2_killable);
158762306a36Sopenharmony_ci
158862306a36Sopenharmony_ci/**
158962306a36Sopenharmony_ci * folio_end_writeback - End writeback against a folio.
159062306a36Sopenharmony_ci * @folio: The folio.
159162306a36Sopenharmony_ci */
159262306a36Sopenharmony_civoid folio_end_writeback(struct folio *folio)
159362306a36Sopenharmony_ci{
159462306a36Sopenharmony_ci	/*
159562306a36Sopenharmony_ci	 * folio_test_clear_reclaim() could be used here but it is an
159662306a36Sopenharmony_ci	 * atomic operation and overkill in this particular case. Failing
159762306a36Sopenharmony_ci	 * to shuffle a folio marked for immediate reclaim is too mild
159862306a36Sopenharmony_ci	 * a gain to justify taking an atomic operation penalty at the
159962306a36Sopenharmony_ci	 * end of every folio writeback.
160062306a36Sopenharmony_ci	 */
160162306a36Sopenharmony_ci	if (folio_test_reclaim(folio)) {
160262306a36Sopenharmony_ci		folio_clear_reclaim(folio);
160362306a36Sopenharmony_ci		folio_rotate_reclaimable(folio);
160462306a36Sopenharmony_ci	}
160562306a36Sopenharmony_ci
160662306a36Sopenharmony_ci	/*
160762306a36Sopenharmony_ci	 * Writeback does not hold a folio reference of its own, relying
160862306a36Sopenharmony_ci	 * on truncation to wait for the clearing of PG_writeback.
160962306a36Sopenharmony_ci	 * But here we must make sure that the folio is not freed and
161062306a36Sopenharmony_ci	 * reused before the folio_wake().
161162306a36Sopenharmony_ci	 */
161262306a36Sopenharmony_ci	folio_get(folio);
161362306a36Sopenharmony_ci	if (!__folio_end_writeback(folio))
161462306a36Sopenharmony_ci		BUG();
161562306a36Sopenharmony_ci
161662306a36Sopenharmony_ci	smp_mb__after_atomic();
161762306a36Sopenharmony_ci	folio_wake(folio, PG_writeback);
161862306a36Sopenharmony_ci	acct_reclaim_writeback(folio);
161962306a36Sopenharmony_ci	folio_put(folio);
162062306a36Sopenharmony_ci}
162162306a36Sopenharmony_ciEXPORT_SYMBOL(folio_end_writeback);
162262306a36Sopenharmony_ci
162362306a36Sopenharmony_ci/**
162462306a36Sopenharmony_ci * __folio_lock - Get a lock on the folio, assuming we need to sleep to get it.
162562306a36Sopenharmony_ci * @folio: The folio to lock
162662306a36Sopenharmony_ci */
162762306a36Sopenharmony_civoid __folio_lock(struct folio *folio)
162862306a36Sopenharmony_ci{
162962306a36Sopenharmony_ci	folio_wait_bit_common(folio, PG_locked, TASK_UNINTERRUPTIBLE,
163062306a36Sopenharmony_ci				EXCLUSIVE);
163162306a36Sopenharmony_ci}
163262306a36Sopenharmony_ciEXPORT_SYMBOL(__folio_lock);
163362306a36Sopenharmony_ci
163462306a36Sopenharmony_ciint __folio_lock_killable(struct folio *folio)
163562306a36Sopenharmony_ci{
163662306a36Sopenharmony_ci	return folio_wait_bit_common(folio, PG_locked, TASK_KILLABLE,
163762306a36Sopenharmony_ci					EXCLUSIVE);
163862306a36Sopenharmony_ci}
163962306a36Sopenharmony_ciEXPORT_SYMBOL_GPL(__folio_lock_killable);
164062306a36Sopenharmony_ci
164162306a36Sopenharmony_cistatic int __folio_lock_async(struct folio *folio, struct wait_page_queue *wait)
164262306a36Sopenharmony_ci{
164362306a36Sopenharmony_ci	struct wait_queue_head *q = folio_waitqueue(folio);
164462306a36Sopenharmony_ci	int ret = 0;
164562306a36Sopenharmony_ci
164662306a36Sopenharmony_ci	wait->folio = folio;
164762306a36Sopenharmony_ci	wait->bit_nr = PG_locked;
164862306a36Sopenharmony_ci
164962306a36Sopenharmony_ci	spin_lock_irq(&q->lock);
165062306a36Sopenharmony_ci	__add_wait_queue_entry_tail(q, &wait->wait);
165162306a36Sopenharmony_ci	folio_set_waiters(folio);
165262306a36Sopenharmony_ci	ret = !folio_trylock(folio);
165362306a36Sopenharmony_ci	/*
165462306a36Sopenharmony_ci	 * If we were successful now, we know we're still on the
165562306a36Sopenharmony_ci	 * waitqueue as we're still under the lock. This means it's
165662306a36Sopenharmony_ci	 * safe to remove and return success, we know the callback
165762306a36Sopenharmony_ci	 * isn't going to trigger.
165862306a36Sopenharmony_ci	 */
165962306a36Sopenharmony_ci	if (!ret)
166062306a36Sopenharmony_ci		__remove_wait_queue(q, &wait->wait);
166162306a36Sopenharmony_ci	else
166262306a36Sopenharmony_ci		ret = -EIOCBQUEUED;
166362306a36Sopenharmony_ci	spin_unlock_irq(&q->lock);
166462306a36Sopenharmony_ci	return ret;
166562306a36Sopenharmony_ci}
166662306a36Sopenharmony_ci
166762306a36Sopenharmony_ci/*
166862306a36Sopenharmony_ci * Return values:
166962306a36Sopenharmony_ci * 0 - folio is locked.
167062306a36Sopenharmony_ci * non-zero - folio is not locked.
167162306a36Sopenharmony_ci *     mmap_lock or per-VMA lock has been released (mmap_read_unlock() or
167262306a36Sopenharmony_ci *     vma_end_read()), unless flags had both FAULT_FLAG_ALLOW_RETRY and
167362306a36Sopenharmony_ci *     FAULT_FLAG_RETRY_NOWAIT set, in which case the lock is still held.
167462306a36Sopenharmony_ci *
167562306a36Sopenharmony_ci * If neither ALLOW_RETRY nor KILLABLE are set, will always return 0
167662306a36Sopenharmony_ci * with the folio locked and the mmap_lock/per-VMA lock is left unperturbed.
167762306a36Sopenharmony_ci */
167862306a36Sopenharmony_civm_fault_t __folio_lock_or_retry(struct folio *folio, struct vm_fault *vmf)
167962306a36Sopenharmony_ci{
168062306a36Sopenharmony_ci	unsigned int flags = vmf->flags;
168162306a36Sopenharmony_ci
168262306a36Sopenharmony_ci	if (fault_flag_allow_retry_first(flags)) {
168362306a36Sopenharmony_ci		/*
168462306a36Sopenharmony_ci		 * CAUTION! In this case, mmap_lock/per-VMA lock is not
168562306a36Sopenharmony_ci		 * released even though returning VM_FAULT_RETRY.
168662306a36Sopenharmony_ci		 */
168762306a36Sopenharmony_ci		if (flags & FAULT_FLAG_RETRY_NOWAIT)
168862306a36Sopenharmony_ci			return VM_FAULT_RETRY;
168962306a36Sopenharmony_ci
169062306a36Sopenharmony_ci		release_fault_lock(vmf);
169162306a36Sopenharmony_ci		if (flags & FAULT_FLAG_KILLABLE)
169262306a36Sopenharmony_ci			folio_wait_locked_killable(folio);
169362306a36Sopenharmony_ci		else
169462306a36Sopenharmony_ci			folio_wait_locked(folio);
169562306a36Sopenharmony_ci		return VM_FAULT_RETRY;
169662306a36Sopenharmony_ci	}
169762306a36Sopenharmony_ci	if (flags & FAULT_FLAG_KILLABLE) {
169862306a36Sopenharmony_ci		bool ret;
169962306a36Sopenharmony_ci
170062306a36Sopenharmony_ci		ret = __folio_lock_killable(folio);
170162306a36Sopenharmony_ci		if (ret) {
170262306a36Sopenharmony_ci			release_fault_lock(vmf);
170362306a36Sopenharmony_ci			return VM_FAULT_RETRY;
170462306a36Sopenharmony_ci		}
170562306a36Sopenharmony_ci	} else {
170662306a36Sopenharmony_ci		__folio_lock(folio);
170762306a36Sopenharmony_ci	}
170862306a36Sopenharmony_ci
170962306a36Sopenharmony_ci	return 0;
171062306a36Sopenharmony_ci}
171162306a36Sopenharmony_ci
171262306a36Sopenharmony_ci/**
171362306a36Sopenharmony_ci * page_cache_next_miss() - Find the next gap in the page cache.
171462306a36Sopenharmony_ci * @mapping: Mapping.
171562306a36Sopenharmony_ci * @index: Index.
171662306a36Sopenharmony_ci * @max_scan: Maximum range to search.
171762306a36Sopenharmony_ci *
171862306a36Sopenharmony_ci * Search the range [index, min(index + max_scan - 1, ULONG_MAX)] for the
171962306a36Sopenharmony_ci * gap with the lowest index.
172062306a36Sopenharmony_ci *
172162306a36Sopenharmony_ci * This function may be called under the rcu_read_lock.  However, this will
172262306a36Sopenharmony_ci * not atomically search a snapshot of the cache at a single point in time.
172362306a36Sopenharmony_ci * For example, if a gap is created at index 5, then subsequently a gap is
172462306a36Sopenharmony_ci * created at index 10, page_cache_next_miss covering both indices may
172562306a36Sopenharmony_ci * return 10 if called under the rcu_read_lock.
172662306a36Sopenharmony_ci *
172762306a36Sopenharmony_ci * Return: The index of the gap if found, otherwise an index outside the
172862306a36Sopenharmony_ci * range specified (in which case 'return - index >= max_scan' will be true).
172962306a36Sopenharmony_ci * In the rare case of index wrap-around, 0 will be returned.
173062306a36Sopenharmony_ci */
173162306a36Sopenharmony_cipgoff_t page_cache_next_miss(struct address_space *mapping,
173262306a36Sopenharmony_ci			     pgoff_t index, unsigned long max_scan)
173362306a36Sopenharmony_ci{
173462306a36Sopenharmony_ci	XA_STATE(xas, &mapping->i_pages, index);
173562306a36Sopenharmony_ci
173662306a36Sopenharmony_ci	while (max_scan--) {
173762306a36Sopenharmony_ci		void *entry = xas_next(&xas);
173862306a36Sopenharmony_ci		if (!entry || xa_is_value(entry))
173962306a36Sopenharmony_ci			break;
174062306a36Sopenharmony_ci		if (xas.xa_index == 0)
174162306a36Sopenharmony_ci			break;
174262306a36Sopenharmony_ci	}
174362306a36Sopenharmony_ci
174462306a36Sopenharmony_ci	return xas.xa_index;
174562306a36Sopenharmony_ci}
174662306a36Sopenharmony_ciEXPORT_SYMBOL(page_cache_next_miss);
174762306a36Sopenharmony_ci
174862306a36Sopenharmony_ci/**
174962306a36Sopenharmony_ci * page_cache_prev_miss() - Find the previous gap in the page cache.
175062306a36Sopenharmony_ci * @mapping: Mapping.
175162306a36Sopenharmony_ci * @index: Index.
175262306a36Sopenharmony_ci * @max_scan: Maximum range to search.
175362306a36Sopenharmony_ci *
175462306a36Sopenharmony_ci * Search the range [max(index - max_scan + 1, 0), index] for the
175562306a36Sopenharmony_ci * gap with the highest index.
175662306a36Sopenharmony_ci *
175762306a36Sopenharmony_ci * This function may be called under the rcu_read_lock.  However, this will
175862306a36Sopenharmony_ci * not atomically search a snapshot of the cache at a single point in time.
175962306a36Sopenharmony_ci * For example, if a gap is created at index 10, then subsequently a gap is
176062306a36Sopenharmony_ci * created at index 5, page_cache_prev_miss() covering both indices may
176162306a36Sopenharmony_ci * return 5 if called under the rcu_read_lock.
176262306a36Sopenharmony_ci *
176362306a36Sopenharmony_ci * Return: The index of the gap if found, otherwise an index outside the
176462306a36Sopenharmony_ci * range specified (in which case 'index - return >= max_scan' will be true).
176562306a36Sopenharmony_ci * In the rare case of wrap-around, ULONG_MAX will be returned.
176662306a36Sopenharmony_ci */
176762306a36Sopenharmony_cipgoff_t page_cache_prev_miss(struct address_space *mapping,
176862306a36Sopenharmony_ci			     pgoff_t index, unsigned long max_scan)
176962306a36Sopenharmony_ci{
177062306a36Sopenharmony_ci	XA_STATE(xas, &mapping->i_pages, index);
177162306a36Sopenharmony_ci
177262306a36Sopenharmony_ci	while (max_scan--) {
177362306a36Sopenharmony_ci		void *entry = xas_prev(&xas);
177462306a36Sopenharmony_ci		if (!entry || xa_is_value(entry))
177562306a36Sopenharmony_ci			break;
177662306a36Sopenharmony_ci		if (xas.xa_index == ULONG_MAX)
177762306a36Sopenharmony_ci			break;
177862306a36Sopenharmony_ci	}
177962306a36Sopenharmony_ci
178062306a36Sopenharmony_ci	return xas.xa_index;
178162306a36Sopenharmony_ci}
178262306a36Sopenharmony_ciEXPORT_SYMBOL(page_cache_prev_miss);
178362306a36Sopenharmony_ci
178462306a36Sopenharmony_ci/*
178562306a36Sopenharmony_ci * Lockless page cache protocol:
178662306a36Sopenharmony_ci * On the lookup side:
178762306a36Sopenharmony_ci * 1. Load the folio from i_pages
178862306a36Sopenharmony_ci * 2. Increment the refcount if it's not zero
178962306a36Sopenharmony_ci * 3. If the folio is not found by xas_reload(), put the refcount and retry
179062306a36Sopenharmony_ci *
179162306a36Sopenharmony_ci * On the removal side:
179262306a36Sopenharmony_ci * A. Freeze the page (by zeroing the refcount if nobody else has a reference)
179362306a36Sopenharmony_ci * B. Remove the page from i_pages
179462306a36Sopenharmony_ci * C. Return the page to the page allocator
179562306a36Sopenharmony_ci *
179662306a36Sopenharmony_ci * This means that any page may have its reference count temporarily
179762306a36Sopenharmony_ci * increased by a speculative page cache (or fast GUP) lookup as it can
179862306a36Sopenharmony_ci * be allocated by another user before the RCU grace period expires.
179962306a36Sopenharmony_ci * Because the refcount temporarily acquired here may end up being the
180062306a36Sopenharmony_ci * last refcount on the page, any page allocation must be freeable by
180162306a36Sopenharmony_ci * folio_put().
180262306a36Sopenharmony_ci */
180362306a36Sopenharmony_ci
180462306a36Sopenharmony_ci/*
180562306a36Sopenharmony_ci * filemap_get_entry - Get a page cache entry.
180662306a36Sopenharmony_ci * @mapping: the address_space to search
180762306a36Sopenharmony_ci * @index: The page cache index.
180862306a36Sopenharmony_ci *
180962306a36Sopenharmony_ci * Looks up the page cache entry at @mapping & @index.  If it is a folio,
181062306a36Sopenharmony_ci * it is returned with an increased refcount.  If it is a shadow entry
181162306a36Sopenharmony_ci * of a previously evicted folio, or a swap entry from shmem/tmpfs,
181262306a36Sopenharmony_ci * it is returned without further action.
181362306a36Sopenharmony_ci *
181462306a36Sopenharmony_ci * Return: The folio, swap or shadow entry, %NULL if nothing is found.
181562306a36Sopenharmony_ci */
181662306a36Sopenharmony_civoid *filemap_get_entry(struct address_space *mapping, pgoff_t index)
181762306a36Sopenharmony_ci{
181862306a36Sopenharmony_ci	XA_STATE(xas, &mapping->i_pages, index);
181962306a36Sopenharmony_ci	struct folio *folio;
182062306a36Sopenharmony_ci
182162306a36Sopenharmony_ci	rcu_read_lock();
182262306a36Sopenharmony_cirepeat:
182362306a36Sopenharmony_ci	xas_reset(&xas);
182462306a36Sopenharmony_ci	folio = xas_load(&xas);
182562306a36Sopenharmony_ci	if (xas_retry(&xas, folio))
182662306a36Sopenharmony_ci		goto repeat;
182762306a36Sopenharmony_ci	/*
182862306a36Sopenharmony_ci	 * A shadow entry of a recently evicted page, or a swap entry from
182962306a36Sopenharmony_ci	 * shmem/tmpfs.  Return it without attempting to raise page count.
183062306a36Sopenharmony_ci	 */
183162306a36Sopenharmony_ci	if (!folio || xa_is_value(folio))
183262306a36Sopenharmony_ci		goto out;
183362306a36Sopenharmony_ci
183462306a36Sopenharmony_ci	if (!folio_try_get_rcu(folio))
183562306a36Sopenharmony_ci		goto repeat;
183662306a36Sopenharmony_ci
183762306a36Sopenharmony_ci	if (unlikely(folio != xas_reload(&xas))) {
183862306a36Sopenharmony_ci		folio_put(folio);
183962306a36Sopenharmony_ci		goto repeat;
184062306a36Sopenharmony_ci	}
184162306a36Sopenharmony_ciout:
184262306a36Sopenharmony_ci	rcu_read_unlock();
184362306a36Sopenharmony_ci
184462306a36Sopenharmony_ci	return folio;
184562306a36Sopenharmony_ci}
184662306a36Sopenharmony_ci
184762306a36Sopenharmony_ci/**
184862306a36Sopenharmony_ci * __filemap_get_folio - Find and get a reference to a folio.
184962306a36Sopenharmony_ci * @mapping: The address_space to search.
185062306a36Sopenharmony_ci * @index: The page index.
185162306a36Sopenharmony_ci * @fgp_flags: %FGP flags modify how the folio is returned.
185262306a36Sopenharmony_ci * @gfp: Memory allocation flags to use if %FGP_CREAT is specified.
185362306a36Sopenharmony_ci *
185462306a36Sopenharmony_ci * Looks up the page cache entry at @mapping & @index.
185562306a36Sopenharmony_ci *
185662306a36Sopenharmony_ci * If %FGP_LOCK or %FGP_CREAT are specified then the function may sleep even
185762306a36Sopenharmony_ci * if the %GFP flags specified for %FGP_CREAT are atomic.
185862306a36Sopenharmony_ci *
185962306a36Sopenharmony_ci * If this function returns a folio, it is returned with an increased refcount.
186062306a36Sopenharmony_ci *
186162306a36Sopenharmony_ci * Return: The found folio or an ERR_PTR() otherwise.
186262306a36Sopenharmony_ci */
186362306a36Sopenharmony_cistruct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index,
186462306a36Sopenharmony_ci		fgf_t fgp_flags, gfp_t gfp)
186562306a36Sopenharmony_ci{
186662306a36Sopenharmony_ci	struct folio *folio;
186762306a36Sopenharmony_ci
186862306a36Sopenharmony_cirepeat:
186962306a36Sopenharmony_ci	folio = filemap_get_entry(mapping, index);
187062306a36Sopenharmony_ci	if (xa_is_value(folio))
187162306a36Sopenharmony_ci		folio = NULL;
187262306a36Sopenharmony_ci	if (!folio)
187362306a36Sopenharmony_ci		goto no_page;
187462306a36Sopenharmony_ci
187562306a36Sopenharmony_ci	if (fgp_flags & FGP_LOCK) {
187662306a36Sopenharmony_ci		if (fgp_flags & FGP_NOWAIT) {
187762306a36Sopenharmony_ci			if (!folio_trylock(folio)) {
187862306a36Sopenharmony_ci				folio_put(folio);
187962306a36Sopenharmony_ci				return ERR_PTR(-EAGAIN);
188062306a36Sopenharmony_ci			}
188162306a36Sopenharmony_ci		} else {
188262306a36Sopenharmony_ci			folio_lock(folio);
188362306a36Sopenharmony_ci		}
188462306a36Sopenharmony_ci
188562306a36Sopenharmony_ci		/* Has the page been truncated? */
188662306a36Sopenharmony_ci		if (unlikely(folio->mapping != mapping)) {
188762306a36Sopenharmony_ci			folio_unlock(folio);
188862306a36Sopenharmony_ci			folio_put(folio);
188962306a36Sopenharmony_ci			goto repeat;
189062306a36Sopenharmony_ci		}
189162306a36Sopenharmony_ci		VM_BUG_ON_FOLIO(!folio_contains(folio, index), folio);
189262306a36Sopenharmony_ci	}
189362306a36Sopenharmony_ci
189462306a36Sopenharmony_ci	if (fgp_flags & FGP_ACCESSED)
189562306a36Sopenharmony_ci		folio_mark_accessed(folio);
189662306a36Sopenharmony_ci	else if (fgp_flags & FGP_WRITE) {
189762306a36Sopenharmony_ci		/* Clear idle flag for buffer write */
189862306a36Sopenharmony_ci		if (folio_test_idle(folio))
189962306a36Sopenharmony_ci			folio_clear_idle(folio);
190062306a36Sopenharmony_ci	}
190162306a36Sopenharmony_ci
190262306a36Sopenharmony_ci	if (fgp_flags & FGP_STABLE)
190362306a36Sopenharmony_ci		folio_wait_stable(folio);
190462306a36Sopenharmony_cino_page:
190562306a36Sopenharmony_ci	if (!folio && (fgp_flags & FGP_CREAT)) {
190662306a36Sopenharmony_ci		unsigned order = FGF_GET_ORDER(fgp_flags);
190762306a36Sopenharmony_ci		int err;
190862306a36Sopenharmony_ci
190962306a36Sopenharmony_ci		if ((fgp_flags & FGP_WRITE) && mapping_can_writeback(mapping))
191062306a36Sopenharmony_ci			gfp |= __GFP_WRITE;
191162306a36Sopenharmony_ci		if (fgp_flags & FGP_NOFS)
191262306a36Sopenharmony_ci			gfp &= ~__GFP_FS;
191362306a36Sopenharmony_ci		if (fgp_flags & FGP_NOWAIT) {
191462306a36Sopenharmony_ci			gfp &= ~GFP_KERNEL;
191562306a36Sopenharmony_ci			gfp |= GFP_NOWAIT | __GFP_NOWARN;
191662306a36Sopenharmony_ci		}
191762306a36Sopenharmony_ci		if (WARN_ON_ONCE(!(fgp_flags & (FGP_LOCK | FGP_FOR_MMAP))))
191862306a36Sopenharmony_ci			fgp_flags |= FGP_LOCK;
191962306a36Sopenharmony_ci
192062306a36Sopenharmony_ci		if (!mapping_large_folio_support(mapping))
192162306a36Sopenharmony_ci			order = 0;
192262306a36Sopenharmony_ci		if (order > MAX_PAGECACHE_ORDER)
192362306a36Sopenharmony_ci			order = MAX_PAGECACHE_ORDER;
192462306a36Sopenharmony_ci		/* If we're not aligned, allocate a smaller folio */
192562306a36Sopenharmony_ci		if (index & ((1UL << order) - 1))
192662306a36Sopenharmony_ci			order = __ffs(index);
192762306a36Sopenharmony_ci
192862306a36Sopenharmony_ci		do {
192962306a36Sopenharmony_ci			gfp_t alloc_gfp = gfp;
193062306a36Sopenharmony_ci
193162306a36Sopenharmony_ci			err = -ENOMEM;
193262306a36Sopenharmony_ci			if (order == 1)
193362306a36Sopenharmony_ci				order = 0;
193462306a36Sopenharmony_ci			if (order > 0)
193562306a36Sopenharmony_ci				alloc_gfp |= __GFP_NORETRY | __GFP_NOWARN;
193662306a36Sopenharmony_ci			folio = filemap_alloc_folio(alloc_gfp, order);
193762306a36Sopenharmony_ci			if (!folio)
193862306a36Sopenharmony_ci				continue;
193962306a36Sopenharmony_ci
194062306a36Sopenharmony_ci			/* Init accessed so avoid atomic mark_page_accessed later */
194162306a36Sopenharmony_ci			if (fgp_flags & FGP_ACCESSED)
194262306a36Sopenharmony_ci				__folio_set_referenced(folio);
194362306a36Sopenharmony_ci
194462306a36Sopenharmony_ci			err = filemap_add_folio(mapping, folio, index, gfp);
194562306a36Sopenharmony_ci			if (!err)
194662306a36Sopenharmony_ci				break;
194762306a36Sopenharmony_ci			folio_put(folio);
194862306a36Sopenharmony_ci			folio = NULL;
194962306a36Sopenharmony_ci		} while (order-- > 0);
195062306a36Sopenharmony_ci
195162306a36Sopenharmony_ci		if (err == -EEXIST)
195262306a36Sopenharmony_ci			goto repeat;
195362306a36Sopenharmony_ci		if (err)
195462306a36Sopenharmony_ci			return ERR_PTR(err);
195562306a36Sopenharmony_ci		/*
195662306a36Sopenharmony_ci		 * filemap_add_folio locks the page, and for mmap
195762306a36Sopenharmony_ci		 * we expect an unlocked page.
195862306a36Sopenharmony_ci		 */
195962306a36Sopenharmony_ci		if (folio && (fgp_flags & FGP_FOR_MMAP))
196062306a36Sopenharmony_ci			folio_unlock(folio);
196162306a36Sopenharmony_ci	}
196262306a36Sopenharmony_ci
196362306a36Sopenharmony_ci	if (!folio)
196462306a36Sopenharmony_ci		return ERR_PTR(-ENOENT);
196562306a36Sopenharmony_ci	return folio;
196662306a36Sopenharmony_ci}
196762306a36Sopenharmony_ciEXPORT_SYMBOL(__filemap_get_folio);
196862306a36Sopenharmony_ci
196962306a36Sopenharmony_cistatic inline struct folio *find_get_entry(struct xa_state *xas, pgoff_t max,
197062306a36Sopenharmony_ci		xa_mark_t mark)
197162306a36Sopenharmony_ci{
197262306a36Sopenharmony_ci	struct folio *folio;
197362306a36Sopenharmony_ci
197462306a36Sopenharmony_ciretry:
197562306a36Sopenharmony_ci	if (mark == XA_PRESENT)
197662306a36Sopenharmony_ci		folio = xas_find(xas, max);
197762306a36Sopenharmony_ci	else
197862306a36Sopenharmony_ci		folio = xas_find_marked(xas, max, mark);
197962306a36Sopenharmony_ci
198062306a36Sopenharmony_ci	if (xas_retry(xas, folio))
198162306a36Sopenharmony_ci		goto retry;
198262306a36Sopenharmony_ci	/*
198362306a36Sopenharmony_ci	 * A shadow entry of a recently evicted page, a swap
198462306a36Sopenharmony_ci	 * entry from shmem/tmpfs or a DAX entry.  Return it
198562306a36Sopenharmony_ci	 * without attempting to raise page count.
198662306a36Sopenharmony_ci	 */
198762306a36Sopenharmony_ci	if (!folio || xa_is_value(folio))
198862306a36Sopenharmony_ci		return folio;
198962306a36Sopenharmony_ci
199062306a36Sopenharmony_ci	if (!folio_try_get_rcu(folio))
199162306a36Sopenharmony_ci		goto reset;
199262306a36Sopenharmony_ci
199362306a36Sopenharmony_ci	if (unlikely(folio != xas_reload(xas))) {
199462306a36Sopenharmony_ci		folio_put(folio);
199562306a36Sopenharmony_ci		goto reset;
199662306a36Sopenharmony_ci	}
199762306a36Sopenharmony_ci
199862306a36Sopenharmony_ci	return folio;
199962306a36Sopenharmony_cireset:
200062306a36Sopenharmony_ci	xas_reset(xas);
200162306a36Sopenharmony_ci	goto retry;
200262306a36Sopenharmony_ci}
200362306a36Sopenharmony_ci
200462306a36Sopenharmony_ci/**
200562306a36Sopenharmony_ci * find_get_entries - gang pagecache lookup
200662306a36Sopenharmony_ci * @mapping:	The address_space to search
200762306a36Sopenharmony_ci * @start:	The starting page cache index
200862306a36Sopenharmony_ci * @end:	The final page index (inclusive).
200962306a36Sopenharmony_ci * @fbatch:	Where the resulting entries are placed.
201062306a36Sopenharmony_ci * @indices:	The cache indices corresponding to the entries in @entries
201162306a36Sopenharmony_ci *
201262306a36Sopenharmony_ci * find_get_entries() will search for and return a batch of entries in
201362306a36Sopenharmony_ci * the mapping.  The entries are placed in @fbatch.  find_get_entries()
201462306a36Sopenharmony_ci * takes a reference on any actual folios it returns.
201562306a36Sopenharmony_ci *
201662306a36Sopenharmony_ci * The entries have ascending indexes.  The indices may not be consecutive
201762306a36Sopenharmony_ci * due to not-present entries or large folios.
201862306a36Sopenharmony_ci *
201962306a36Sopenharmony_ci * Any shadow entries of evicted folios, or swap entries from
202062306a36Sopenharmony_ci * shmem/tmpfs, are included in the returned array.
202162306a36Sopenharmony_ci *
202262306a36Sopenharmony_ci * Return: The number of entries which were found.
202362306a36Sopenharmony_ci */
202462306a36Sopenharmony_ciunsigned find_get_entries(struct address_space *mapping, pgoff_t *start,
202562306a36Sopenharmony_ci		pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices)
202662306a36Sopenharmony_ci{
202762306a36Sopenharmony_ci	XA_STATE(xas, &mapping->i_pages, *start);
202862306a36Sopenharmony_ci	struct folio *folio;
202962306a36Sopenharmony_ci
203062306a36Sopenharmony_ci	rcu_read_lock();
203162306a36Sopenharmony_ci	while ((folio = find_get_entry(&xas, end, XA_PRESENT)) != NULL) {
203262306a36Sopenharmony_ci		indices[fbatch->nr] = xas.xa_index;
203362306a36Sopenharmony_ci		if (!folio_batch_add(fbatch, folio))
203462306a36Sopenharmony_ci			break;
203562306a36Sopenharmony_ci	}
203662306a36Sopenharmony_ci	rcu_read_unlock();
203762306a36Sopenharmony_ci
203862306a36Sopenharmony_ci	if (folio_batch_count(fbatch)) {
203962306a36Sopenharmony_ci		unsigned long nr = 1;
204062306a36Sopenharmony_ci		int idx = folio_batch_count(fbatch) - 1;
204162306a36Sopenharmony_ci
204262306a36Sopenharmony_ci		folio = fbatch->folios[idx];
204362306a36Sopenharmony_ci		if (!xa_is_value(folio) && !folio_test_hugetlb(folio))
204462306a36Sopenharmony_ci			nr = folio_nr_pages(folio);
204562306a36Sopenharmony_ci		*start = indices[idx] + nr;
204662306a36Sopenharmony_ci	}
204762306a36Sopenharmony_ci	return folio_batch_count(fbatch);
204862306a36Sopenharmony_ci}
204962306a36Sopenharmony_ci
205062306a36Sopenharmony_ci/**
205162306a36Sopenharmony_ci * find_lock_entries - Find a batch of pagecache entries.
205262306a36Sopenharmony_ci * @mapping:	The address_space to search.
205362306a36Sopenharmony_ci * @start:	The starting page cache index.
205462306a36Sopenharmony_ci * @end:	The final page index (inclusive).
205562306a36Sopenharmony_ci * @fbatch:	Where the resulting entries are placed.
205662306a36Sopenharmony_ci * @indices:	The cache indices of the entries in @fbatch.
205762306a36Sopenharmony_ci *
205862306a36Sopenharmony_ci * find_lock_entries() will return a batch of entries from @mapping.
205962306a36Sopenharmony_ci * Swap, shadow and DAX entries are included.  Folios are returned
206062306a36Sopenharmony_ci * locked and with an incremented refcount.  Folios which are locked
206162306a36Sopenharmony_ci * by somebody else or under writeback are skipped.  Folios which are
206262306a36Sopenharmony_ci * partially outside the range are not returned.
206362306a36Sopenharmony_ci *
206462306a36Sopenharmony_ci * The entries have ascending indexes.  The indices may not be consecutive
206562306a36Sopenharmony_ci * due to not-present entries, large folios, folios which could not be
206662306a36Sopenharmony_ci * locked or folios under writeback.
206762306a36Sopenharmony_ci *
206862306a36Sopenharmony_ci * Return: The number of entries which were found.
206962306a36Sopenharmony_ci */
207062306a36Sopenharmony_ciunsigned find_lock_entries(struct address_space *mapping, pgoff_t *start,
207162306a36Sopenharmony_ci		pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices)
207262306a36Sopenharmony_ci{
207362306a36Sopenharmony_ci	XA_STATE(xas, &mapping->i_pages, *start);
207462306a36Sopenharmony_ci	struct folio *folio;
207562306a36Sopenharmony_ci
207662306a36Sopenharmony_ci	rcu_read_lock();
207762306a36Sopenharmony_ci	while ((folio = find_get_entry(&xas, end, XA_PRESENT))) {
207862306a36Sopenharmony_ci		if (!xa_is_value(folio)) {
207962306a36Sopenharmony_ci			if (folio->index < *start)
208062306a36Sopenharmony_ci				goto put;
208162306a36Sopenharmony_ci			if (folio_next_index(folio) - 1 > end)
208262306a36Sopenharmony_ci				goto put;
208362306a36Sopenharmony_ci			if (!folio_trylock(folio))
208462306a36Sopenharmony_ci				goto put;
208562306a36Sopenharmony_ci			if (folio->mapping != mapping ||
208662306a36Sopenharmony_ci			    folio_test_writeback(folio))
208762306a36Sopenharmony_ci				goto unlock;
208862306a36Sopenharmony_ci			VM_BUG_ON_FOLIO(!folio_contains(folio, xas.xa_index),
208962306a36Sopenharmony_ci					folio);
209062306a36Sopenharmony_ci		}
209162306a36Sopenharmony_ci		indices[fbatch->nr] = xas.xa_index;
209262306a36Sopenharmony_ci		if (!folio_batch_add(fbatch, folio))
209362306a36Sopenharmony_ci			break;
209462306a36Sopenharmony_ci		continue;
209562306a36Sopenharmony_ciunlock:
209662306a36Sopenharmony_ci		folio_unlock(folio);
209762306a36Sopenharmony_ciput:
209862306a36Sopenharmony_ci		folio_put(folio);
209962306a36Sopenharmony_ci	}
210062306a36Sopenharmony_ci	rcu_read_unlock();
210162306a36Sopenharmony_ci
210262306a36Sopenharmony_ci	if (folio_batch_count(fbatch)) {
210362306a36Sopenharmony_ci		unsigned long nr = 1;
210462306a36Sopenharmony_ci		int idx = folio_batch_count(fbatch) - 1;
210562306a36Sopenharmony_ci
210662306a36Sopenharmony_ci		folio = fbatch->folios[idx];
210762306a36Sopenharmony_ci		if (!xa_is_value(folio) && !folio_test_hugetlb(folio))
210862306a36Sopenharmony_ci			nr = folio_nr_pages(folio);
210962306a36Sopenharmony_ci		*start = indices[idx] + nr;
211062306a36Sopenharmony_ci	}
211162306a36Sopenharmony_ci	return folio_batch_count(fbatch);
211262306a36Sopenharmony_ci}
211362306a36Sopenharmony_ci
211462306a36Sopenharmony_ci/**
211562306a36Sopenharmony_ci * filemap_get_folios - Get a batch of folios
211662306a36Sopenharmony_ci * @mapping:	The address_space to search
211762306a36Sopenharmony_ci * @start:	The starting page index
211862306a36Sopenharmony_ci * @end:	The final page index (inclusive)
211962306a36Sopenharmony_ci * @fbatch:	The batch to fill.
212062306a36Sopenharmony_ci *
212162306a36Sopenharmony_ci * Search for and return a batch of folios in the mapping starting at
212262306a36Sopenharmony_ci * index @start and up to index @end (inclusive).  The folios are returned
212362306a36Sopenharmony_ci * in @fbatch with an elevated reference count.
212462306a36Sopenharmony_ci *
212562306a36Sopenharmony_ci * The first folio may start before @start; if it does, it will contain
212662306a36Sopenharmony_ci * @start.  The final folio may extend beyond @end; if it does, it will
212762306a36Sopenharmony_ci * contain @end.  The folios have ascending indices.  There may be gaps
212862306a36Sopenharmony_ci * between the folios if there are indices which have no folio in the
212962306a36Sopenharmony_ci * page cache.  If folios are added to or removed from the page cache
213062306a36Sopenharmony_ci * while this is running, they may or may not be found by this call.
213162306a36Sopenharmony_ci *
213262306a36Sopenharmony_ci * Return: The number of folios which were found.
213362306a36Sopenharmony_ci * We also update @start to index the next folio for the traversal.
213462306a36Sopenharmony_ci */
213562306a36Sopenharmony_ciunsigned filemap_get_folios(struct address_space *mapping, pgoff_t *start,
213662306a36Sopenharmony_ci		pgoff_t end, struct folio_batch *fbatch)
213762306a36Sopenharmony_ci{
213862306a36Sopenharmony_ci	XA_STATE(xas, &mapping->i_pages, *start);
213962306a36Sopenharmony_ci	struct folio *folio;
214062306a36Sopenharmony_ci
214162306a36Sopenharmony_ci	rcu_read_lock();
214262306a36Sopenharmony_ci	while ((folio = find_get_entry(&xas, end, XA_PRESENT)) != NULL) {
214362306a36Sopenharmony_ci		/* Skip over shadow, swap and DAX entries */
214462306a36Sopenharmony_ci		if (xa_is_value(folio))
214562306a36Sopenharmony_ci			continue;
214662306a36Sopenharmony_ci		if (!folio_batch_add(fbatch, folio)) {
214762306a36Sopenharmony_ci			unsigned long nr = folio_nr_pages(folio);
214862306a36Sopenharmony_ci
214962306a36Sopenharmony_ci			if (folio_test_hugetlb(folio))
215062306a36Sopenharmony_ci				nr = 1;
215162306a36Sopenharmony_ci			*start = folio->index + nr;
215262306a36Sopenharmony_ci			goto out;
215362306a36Sopenharmony_ci		}
215462306a36Sopenharmony_ci	}
215562306a36Sopenharmony_ci
215662306a36Sopenharmony_ci	/*
215762306a36Sopenharmony_ci	 * We come here when there is no page beyond @end. We take care to not
215862306a36Sopenharmony_ci	 * overflow the index @start as it confuses some of the callers. This
215962306a36Sopenharmony_ci	 * breaks the iteration when there is a page at index -1 but that is
216062306a36Sopenharmony_ci	 * already broken anyway.
216162306a36Sopenharmony_ci	 */
216262306a36Sopenharmony_ci	if (end == (pgoff_t)-1)
216362306a36Sopenharmony_ci		*start = (pgoff_t)-1;
216462306a36Sopenharmony_ci	else
216562306a36Sopenharmony_ci		*start = end + 1;
216662306a36Sopenharmony_ciout:
216762306a36Sopenharmony_ci	rcu_read_unlock();
216862306a36Sopenharmony_ci
216962306a36Sopenharmony_ci	return folio_batch_count(fbatch);
217062306a36Sopenharmony_ci}
217162306a36Sopenharmony_ciEXPORT_SYMBOL(filemap_get_folios);
217262306a36Sopenharmony_ci
217362306a36Sopenharmony_ci/**
217462306a36Sopenharmony_ci * filemap_get_folios_contig - Get a batch of contiguous folios
217562306a36Sopenharmony_ci * @mapping:	The address_space to search
217662306a36Sopenharmony_ci * @start:	The starting page index
217762306a36Sopenharmony_ci * @end:	The final page index (inclusive)
217862306a36Sopenharmony_ci * @fbatch:	The batch to fill
217962306a36Sopenharmony_ci *
218062306a36Sopenharmony_ci * filemap_get_folios_contig() works exactly like filemap_get_folios(),
218162306a36Sopenharmony_ci * except the returned folios are guaranteed to be contiguous. This may
218262306a36Sopenharmony_ci * not return all contiguous folios if the batch gets filled up.
218362306a36Sopenharmony_ci *
218462306a36Sopenharmony_ci * Return: The number of folios found.
218562306a36Sopenharmony_ci * Also update @start to be positioned for traversal of the next folio.
218662306a36Sopenharmony_ci */
218762306a36Sopenharmony_ci
218862306a36Sopenharmony_ciunsigned filemap_get_folios_contig(struct address_space *mapping,
218962306a36Sopenharmony_ci		pgoff_t *start, pgoff_t end, struct folio_batch *fbatch)
219062306a36Sopenharmony_ci{
219162306a36Sopenharmony_ci	XA_STATE(xas, &mapping->i_pages, *start);
219262306a36Sopenharmony_ci	unsigned long nr;
219362306a36Sopenharmony_ci	struct folio *folio;
219462306a36Sopenharmony_ci
219562306a36Sopenharmony_ci	rcu_read_lock();
219662306a36Sopenharmony_ci
219762306a36Sopenharmony_ci	for (folio = xas_load(&xas); folio && xas.xa_index <= end;
219862306a36Sopenharmony_ci			folio = xas_next(&xas)) {
219962306a36Sopenharmony_ci		if (xas_retry(&xas, folio))
220062306a36Sopenharmony_ci			continue;
220162306a36Sopenharmony_ci		/*
220262306a36Sopenharmony_ci		 * If the entry has been swapped out, we can stop looking.
220362306a36Sopenharmony_ci		 * No current caller is looking for DAX entries.
220462306a36Sopenharmony_ci		 */
220562306a36Sopenharmony_ci		if (xa_is_value(folio))
220662306a36Sopenharmony_ci			goto update_start;
220762306a36Sopenharmony_ci
220862306a36Sopenharmony_ci		if (!folio_try_get_rcu(folio))
220962306a36Sopenharmony_ci			goto retry;
221062306a36Sopenharmony_ci
221162306a36Sopenharmony_ci		if (unlikely(folio != xas_reload(&xas)))
221262306a36Sopenharmony_ci			goto put_folio;
221362306a36Sopenharmony_ci
221462306a36Sopenharmony_ci		if (!folio_batch_add(fbatch, folio)) {
221562306a36Sopenharmony_ci			nr = folio_nr_pages(folio);
221662306a36Sopenharmony_ci
221762306a36Sopenharmony_ci			if (folio_test_hugetlb(folio))
221862306a36Sopenharmony_ci				nr = 1;
221962306a36Sopenharmony_ci			*start = folio->index + nr;
222062306a36Sopenharmony_ci			goto out;
222162306a36Sopenharmony_ci		}
222262306a36Sopenharmony_ci		continue;
222362306a36Sopenharmony_ciput_folio:
222462306a36Sopenharmony_ci		folio_put(folio);
222562306a36Sopenharmony_ci
222662306a36Sopenharmony_ciretry:
222762306a36Sopenharmony_ci		xas_reset(&xas);
222862306a36Sopenharmony_ci	}
222962306a36Sopenharmony_ci
223062306a36Sopenharmony_ciupdate_start:
223162306a36Sopenharmony_ci	nr = folio_batch_count(fbatch);
223262306a36Sopenharmony_ci
223362306a36Sopenharmony_ci	if (nr) {
223462306a36Sopenharmony_ci		folio = fbatch->folios[nr - 1];
223562306a36Sopenharmony_ci		if (folio_test_hugetlb(folio))
223662306a36Sopenharmony_ci			*start = folio->index + 1;
223762306a36Sopenharmony_ci		else
223862306a36Sopenharmony_ci			*start = folio_next_index(folio);
223962306a36Sopenharmony_ci	}
224062306a36Sopenharmony_ciout:
224162306a36Sopenharmony_ci	rcu_read_unlock();
224262306a36Sopenharmony_ci	return folio_batch_count(fbatch);
224362306a36Sopenharmony_ci}
224462306a36Sopenharmony_ciEXPORT_SYMBOL(filemap_get_folios_contig);
224562306a36Sopenharmony_ci
224662306a36Sopenharmony_ci/**
224762306a36Sopenharmony_ci * filemap_get_folios_tag - Get a batch of folios matching @tag
224862306a36Sopenharmony_ci * @mapping:    The address_space to search
224962306a36Sopenharmony_ci * @start:      The starting page index
225062306a36Sopenharmony_ci * @end:        The final page index (inclusive)
225162306a36Sopenharmony_ci * @tag:        The tag index
225262306a36Sopenharmony_ci * @fbatch:     The batch to fill
225362306a36Sopenharmony_ci *
225462306a36Sopenharmony_ci * Same as filemap_get_folios(), but only returning folios tagged with @tag.
225562306a36Sopenharmony_ci *
225662306a36Sopenharmony_ci * Return: The number of folios found.
225762306a36Sopenharmony_ci * Also update @start to index the next folio for traversal.
225862306a36Sopenharmony_ci */
225962306a36Sopenharmony_ciunsigned filemap_get_folios_tag(struct address_space *mapping, pgoff_t *start,
226062306a36Sopenharmony_ci			pgoff_t end, xa_mark_t tag, struct folio_batch *fbatch)
226162306a36Sopenharmony_ci{
226262306a36Sopenharmony_ci	XA_STATE(xas, &mapping->i_pages, *start);
226362306a36Sopenharmony_ci	struct folio *folio;
226462306a36Sopenharmony_ci
226562306a36Sopenharmony_ci	rcu_read_lock();
226662306a36Sopenharmony_ci	while ((folio = find_get_entry(&xas, end, tag)) != NULL) {
226762306a36Sopenharmony_ci		/*
226862306a36Sopenharmony_ci		 * Shadow entries should never be tagged, but this iteration
226962306a36Sopenharmony_ci		 * is lockless so there is a window for page reclaim to evict
227062306a36Sopenharmony_ci		 * a page we saw tagged. Skip over it.
227162306a36Sopenharmony_ci		 */
227262306a36Sopenharmony_ci		if (xa_is_value(folio))
227362306a36Sopenharmony_ci			continue;
227462306a36Sopenharmony_ci		if (!folio_batch_add(fbatch, folio)) {
227562306a36Sopenharmony_ci			unsigned long nr = folio_nr_pages(folio);
227662306a36Sopenharmony_ci
227762306a36Sopenharmony_ci			if (folio_test_hugetlb(folio))
227862306a36Sopenharmony_ci				nr = 1;
227962306a36Sopenharmony_ci			*start = folio->index + nr;
228062306a36Sopenharmony_ci			goto out;
228162306a36Sopenharmony_ci		}
228262306a36Sopenharmony_ci	}
228362306a36Sopenharmony_ci	/*
228462306a36Sopenharmony_ci	 * We come here when there is no page beyond @end. We take care to not
228562306a36Sopenharmony_ci	 * overflow the index @start as it confuses some of the callers. This
228662306a36Sopenharmony_ci	 * breaks the iteration when there is a page at index -1 but that is
228762306a36Sopenharmony_ci	 * already broke anyway.
228862306a36Sopenharmony_ci	 */
228962306a36Sopenharmony_ci	if (end == (pgoff_t)-1)
229062306a36Sopenharmony_ci		*start = (pgoff_t)-1;
229162306a36Sopenharmony_ci	else
229262306a36Sopenharmony_ci		*start = end + 1;
229362306a36Sopenharmony_ciout:
229462306a36Sopenharmony_ci	rcu_read_unlock();
229562306a36Sopenharmony_ci
229662306a36Sopenharmony_ci	return folio_batch_count(fbatch);
229762306a36Sopenharmony_ci}
229862306a36Sopenharmony_ciEXPORT_SYMBOL(filemap_get_folios_tag);
229962306a36Sopenharmony_ci
230062306a36Sopenharmony_ci/*
230162306a36Sopenharmony_ci * CD/DVDs are error prone. When a medium error occurs, the driver may fail
230262306a36Sopenharmony_ci * a _large_ part of the i/o request. Imagine the worst scenario:
230362306a36Sopenharmony_ci *
230462306a36Sopenharmony_ci *      ---R__________________________________________B__________
230562306a36Sopenharmony_ci *         ^ reading here                             ^ bad block(assume 4k)
230662306a36Sopenharmony_ci *
230762306a36Sopenharmony_ci * read(R) => miss => readahead(R...B) => media error => frustrating retries
230862306a36Sopenharmony_ci * => failing the whole request => read(R) => read(R+1) =>
230962306a36Sopenharmony_ci * readahead(R+1...B+1) => bang => read(R+2) => read(R+3) =>
231062306a36Sopenharmony_ci * readahead(R+3...B+2) => bang => read(R+3) => read(R+4) =>
231162306a36Sopenharmony_ci * readahead(R+4...B+3) => bang => read(R+4) => read(R+5) => ......
231262306a36Sopenharmony_ci *
231362306a36Sopenharmony_ci * It is going insane. Fix it by quickly scaling down the readahead size.
231462306a36Sopenharmony_ci */
231562306a36Sopenharmony_cistatic void shrink_readahead_size_eio(struct file_ra_state *ra)
231662306a36Sopenharmony_ci{
231762306a36Sopenharmony_ci	ra->ra_pages /= 4;
231862306a36Sopenharmony_ci}
231962306a36Sopenharmony_ci
232062306a36Sopenharmony_ci/*
232162306a36Sopenharmony_ci * filemap_get_read_batch - Get a batch of folios for read
232262306a36Sopenharmony_ci *
232362306a36Sopenharmony_ci * Get a batch of folios which represent a contiguous range of bytes in
232462306a36Sopenharmony_ci * the file.  No exceptional entries will be returned.  If @index is in
232562306a36Sopenharmony_ci * the middle of a folio, the entire folio will be returned.  The last
232662306a36Sopenharmony_ci * folio in the batch may have the readahead flag set or the uptodate flag
232762306a36Sopenharmony_ci * clear so that the caller can take the appropriate action.
232862306a36Sopenharmony_ci */
232962306a36Sopenharmony_cistatic void filemap_get_read_batch(struct address_space *mapping,
233062306a36Sopenharmony_ci		pgoff_t index, pgoff_t max, struct folio_batch *fbatch)
233162306a36Sopenharmony_ci{
233262306a36Sopenharmony_ci	XA_STATE(xas, &mapping->i_pages, index);
233362306a36Sopenharmony_ci	struct folio *folio;
233462306a36Sopenharmony_ci
233562306a36Sopenharmony_ci	rcu_read_lock();
233662306a36Sopenharmony_ci	for (folio = xas_load(&xas); folio; folio = xas_next(&xas)) {
233762306a36Sopenharmony_ci		if (xas_retry(&xas, folio))
233862306a36Sopenharmony_ci			continue;
233962306a36Sopenharmony_ci		if (xas.xa_index > max || xa_is_value(folio))
234062306a36Sopenharmony_ci			break;
234162306a36Sopenharmony_ci		if (xa_is_sibling(folio))
234262306a36Sopenharmony_ci			break;
234362306a36Sopenharmony_ci		if (!folio_try_get_rcu(folio))
234462306a36Sopenharmony_ci			goto retry;
234562306a36Sopenharmony_ci
234662306a36Sopenharmony_ci		if (unlikely(folio != xas_reload(&xas)))
234762306a36Sopenharmony_ci			goto put_folio;
234862306a36Sopenharmony_ci
234962306a36Sopenharmony_ci		if (!folio_batch_add(fbatch, folio))
235062306a36Sopenharmony_ci			break;
235162306a36Sopenharmony_ci		if (!folio_test_uptodate(folio))
235262306a36Sopenharmony_ci			break;
235362306a36Sopenharmony_ci		if (folio_test_readahead(folio))
235462306a36Sopenharmony_ci			break;
235562306a36Sopenharmony_ci		xas_advance(&xas, folio_next_index(folio) - 1);
235662306a36Sopenharmony_ci		continue;
235762306a36Sopenharmony_ciput_folio:
235862306a36Sopenharmony_ci		folio_put(folio);
235962306a36Sopenharmony_ciretry:
236062306a36Sopenharmony_ci		xas_reset(&xas);
236162306a36Sopenharmony_ci	}
236262306a36Sopenharmony_ci	rcu_read_unlock();
236362306a36Sopenharmony_ci}
236462306a36Sopenharmony_ci
236562306a36Sopenharmony_cistatic int filemap_read_folio(struct file *file, filler_t filler,
236662306a36Sopenharmony_ci		struct folio *folio)
236762306a36Sopenharmony_ci{
236862306a36Sopenharmony_ci	bool workingset = folio_test_workingset(folio);
236962306a36Sopenharmony_ci	unsigned long pflags;
237062306a36Sopenharmony_ci	int error;
237162306a36Sopenharmony_ci
237262306a36Sopenharmony_ci	/*
237362306a36Sopenharmony_ci	 * A previous I/O error may have been due to temporary failures,
237462306a36Sopenharmony_ci	 * eg. multipath errors.  PG_error will be set again if read_folio
237562306a36Sopenharmony_ci	 * fails.
237662306a36Sopenharmony_ci	 */
237762306a36Sopenharmony_ci	folio_clear_error(folio);
237862306a36Sopenharmony_ci
237962306a36Sopenharmony_ci	/* Start the actual read. The read will unlock the page. */
238062306a36Sopenharmony_ci	if (unlikely(workingset))
238162306a36Sopenharmony_ci		psi_memstall_enter(&pflags);
238262306a36Sopenharmony_ci	error = filler(file, folio);
238362306a36Sopenharmony_ci	if (unlikely(workingset))
238462306a36Sopenharmony_ci		psi_memstall_leave(&pflags);
238562306a36Sopenharmony_ci	if (error)
238662306a36Sopenharmony_ci		return error;
238762306a36Sopenharmony_ci
238862306a36Sopenharmony_ci	error = folio_wait_locked_killable(folio);
238962306a36Sopenharmony_ci	if (error)
239062306a36Sopenharmony_ci		return error;
239162306a36Sopenharmony_ci	if (folio_test_uptodate(folio))
239262306a36Sopenharmony_ci		return 0;
239362306a36Sopenharmony_ci	if (file)
239462306a36Sopenharmony_ci		shrink_readahead_size_eio(&file->f_ra);
239562306a36Sopenharmony_ci	return -EIO;
239662306a36Sopenharmony_ci}
239762306a36Sopenharmony_ci
239862306a36Sopenharmony_cistatic bool filemap_range_uptodate(struct address_space *mapping,
239962306a36Sopenharmony_ci		loff_t pos, size_t count, struct folio *folio,
240062306a36Sopenharmony_ci		bool need_uptodate)
240162306a36Sopenharmony_ci{
240262306a36Sopenharmony_ci	if (folio_test_uptodate(folio))
240362306a36Sopenharmony_ci		return true;
240462306a36Sopenharmony_ci	/* pipes can't handle partially uptodate pages */
240562306a36Sopenharmony_ci	if (need_uptodate)
240662306a36Sopenharmony_ci		return false;
240762306a36Sopenharmony_ci	if (!mapping->a_ops->is_partially_uptodate)
240862306a36Sopenharmony_ci		return false;
240962306a36Sopenharmony_ci	if (mapping->host->i_blkbits >= folio_shift(folio))
241062306a36Sopenharmony_ci		return false;
241162306a36Sopenharmony_ci
241262306a36Sopenharmony_ci	if (folio_pos(folio) > pos) {
241362306a36Sopenharmony_ci		count -= folio_pos(folio) - pos;
241462306a36Sopenharmony_ci		pos = 0;
241562306a36Sopenharmony_ci	} else {
241662306a36Sopenharmony_ci		pos -= folio_pos(folio);
241762306a36Sopenharmony_ci	}
241862306a36Sopenharmony_ci
241962306a36Sopenharmony_ci	return mapping->a_ops->is_partially_uptodate(folio, pos, count);
242062306a36Sopenharmony_ci}
242162306a36Sopenharmony_ci
242262306a36Sopenharmony_cistatic int filemap_update_page(struct kiocb *iocb,
242362306a36Sopenharmony_ci		struct address_space *mapping, size_t count,
242462306a36Sopenharmony_ci		struct folio *folio, bool need_uptodate)
242562306a36Sopenharmony_ci{
242662306a36Sopenharmony_ci	int error;
242762306a36Sopenharmony_ci
242862306a36Sopenharmony_ci	if (iocb->ki_flags & IOCB_NOWAIT) {
242962306a36Sopenharmony_ci		if (!filemap_invalidate_trylock_shared(mapping))
243062306a36Sopenharmony_ci			return -EAGAIN;
243162306a36Sopenharmony_ci	} else {
243262306a36Sopenharmony_ci		filemap_invalidate_lock_shared(mapping);
243362306a36Sopenharmony_ci	}
243462306a36Sopenharmony_ci
243562306a36Sopenharmony_ci	if (!folio_trylock(folio)) {
243662306a36Sopenharmony_ci		error = -EAGAIN;
243762306a36Sopenharmony_ci		if (iocb->ki_flags & (IOCB_NOWAIT | IOCB_NOIO))
243862306a36Sopenharmony_ci			goto unlock_mapping;
243962306a36Sopenharmony_ci		if (!(iocb->ki_flags & IOCB_WAITQ)) {
244062306a36Sopenharmony_ci			filemap_invalidate_unlock_shared(mapping);
244162306a36Sopenharmony_ci			/*
244262306a36Sopenharmony_ci			 * This is where we usually end up waiting for a
244362306a36Sopenharmony_ci			 * previously submitted readahead to finish.
244462306a36Sopenharmony_ci			 */
244562306a36Sopenharmony_ci			folio_put_wait_locked(folio, TASK_KILLABLE);
244662306a36Sopenharmony_ci			return AOP_TRUNCATED_PAGE;
244762306a36Sopenharmony_ci		}
244862306a36Sopenharmony_ci		error = __folio_lock_async(folio, iocb->ki_waitq);
244962306a36Sopenharmony_ci		if (error)
245062306a36Sopenharmony_ci			goto unlock_mapping;
245162306a36Sopenharmony_ci	}
245262306a36Sopenharmony_ci
245362306a36Sopenharmony_ci	error = AOP_TRUNCATED_PAGE;
245462306a36Sopenharmony_ci	if (!folio->mapping)
245562306a36Sopenharmony_ci		goto unlock;
245662306a36Sopenharmony_ci
245762306a36Sopenharmony_ci	error = 0;
245862306a36Sopenharmony_ci	if (filemap_range_uptodate(mapping, iocb->ki_pos, count, folio,
245962306a36Sopenharmony_ci				   need_uptodate))
246062306a36Sopenharmony_ci		goto unlock;
246162306a36Sopenharmony_ci
246262306a36Sopenharmony_ci	error = -EAGAIN;
246362306a36Sopenharmony_ci	if (iocb->ki_flags & (IOCB_NOIO | IOCB_NOWAIT | IOCB_WAITQ))
246462306a36Sopenharmony_ci		goto unlock;
246562306a36Sopenharmony_ci
246662306a36Sopenharmony_ci	error = filemap_read_folio(iocb->ki_filp, mapping->a_ops->read_folio,
246762306a36Sopenharmony_ci			folio);
246862306a36Sopenharmony_ci	goto unlock_mapping;
246962306a36Sopenharmony_ciunlock:
247062306a36Sopenharmony_ci	folio_unlock(folio);
247162306a36Sopenharmony_ciunlock_mapping:
247262306a36Sopenharmony_ci	filemap_invalidate_unlock_shared(mapping);
247362306a36Sopenharmony_ci	if (error == AOP_TRUNCATED_PAGE)
247462306a36Sopenharmony_ci		folio_put(folio);
247562306a36Sopenharmony_ci	return error;
247662306a36Sopenharmony_ci}
247762306a36Sopenharmony_ci
247862306a36Sopenharmony_cistatic int filemap_create_folio(struct file *file,
247962306a36Sopenharmony_ci		struct address_space *mapping, pgoff_t index,
248062306a36Sopenharmony_ci		struct folio_batch *fbatch)
248162306a36Sopenharmony_ci{
248262306a36Sopenharmony_ci	struct folio *folio;
248362306a36Sopenharmony_ci	int error;
248462306a36Sopenharmony_ci
248562306a36Sopenharmony_ci	folio = filemap_alloc_folio(mapping_gfp_mask(mapping), 0);
248662306a36Sopenharmony_ci	if (!folio)
248762306a36Sopenharmony_ci		return -ENOMEM;
248862306a36Sopenharmony_ci
248962306a36Sopenharmony_ci	/*
249062306a36Sopenharmony_ci	 * Protect against truncate / hole punch. Grabbing invalidate_lock
249162306a36Sopenharmony_ci	 * here assures we cannot instantiate and bring uptodate new
249262306a36Sopenharmony_ci	 * pagecache folios after evicting page cache during truncate
249362306a36Sopenharmony_ci	 * and before actually freeing blocks.	Note that we could
249462306a36Sopenharmony_ci	 * release invalidate_lock after inserting the folio into
249562306a36Sopenharmony_ci	 * the page cache as the locked folio would then be enough to
249662306a36Sopenharmony_ci	 * synchronize with hole punching. But there are code paths
249762306a36Sopenharmony_ci	 * such as filemap_update_page() filling in partially uptodate
249862306a36Sopenharmony_ci	 * pages or ->readahead() that need to hold invalidate_lock
249962306a36Sopenharmony_ci	 * while mapping blocks for IO so let's hold the lock here as
250062306a36Sopenharmony_ci	 * well to keep locking rules simple.
250162306a36Sopenharmony_ci	 */
250262306a36Sopenharmony_ci	filemap_invalidate_lock_shared(mapping);
250362306a36Sopenharmony_ci	error = filemap_add_folio(mapping, folio, index,
250462306a36Sopenharmony_ci			mapping_gfp_constraint(mapping, GFP_KERNEL));
250562306a36Sopenharmony_ci	if (error == -EEXIST)
250662306a36Sopenharmony_ci		error = AOP_TRUNCATED_PAGE;
250762306a36Sopenharmony_ci	if (error)
250862306a36Sopenharmony_ci		goto error;
250962306a36Sopenharmony_ci
251062306a36Sopenharmony_ci	error = filemap_read_folio(file, mapping->a_ops->read_folio, folio);
251162306a36Sopenharmony_ci	if (error)
251262306a36Sopenharmony_ci		goto error;
251362306a36Sopenharmony_ci
251462306a36Sopenharmony_ci	filemap_invalidate_unlock_shared(mapping);
251562306a36Sopenharmony_ci	folio_batch_add(fbatch, folio);
251662306a36Sopenharmony_ci	return 0;
251762306a36Sopenharmony_cierror:
251862306a36Sopenharmony_ci	filemap_invalidate_unlock_shared(mapping);
251962306a36Sopenharmony_ci	folio_put(folio);
252062306a36Sopenharmony_ci	return error;
252162306a36Sopenharmony_ci}
252262306a36Sopenharmony_ci
252362306a36Sopenharmony_cistatic int filemap_readahead(struct kiocb *iocb, struct file *file,
252462306a36Sopenharmony_ci		struct address_space *mapping, struct folio *folio,
252562306a36Sopenharmony_ci		pgoff_t last_index)
252662306a36Sopenharmony_ci{
252762306a36Sopenharmony_ci	DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, folio->index);
252862306a36Sopenharmony_ci
252962306a36Sopenharmony_ci	if (iocb->ki_flags & IOCB_NOIO)
253062306a36Sopenharmony_ci		return -EAGAIN;
253162306a36Sopenharmony_ci	page_cache_async_ra(&ractl, folio, last_index - folio->index);
253262306a36Sopenharmony_ci	return 0;
253362306a36Sopenharmony_ci}
253462306a36Sopenharmony_ci
253562306a36Sopenharmony_cistatic int filemap_get_pages(struct kiocb *iocb, size_t count,
253662306a36Sopenharmony_ci		struct folio_batch *fbatch, bool need_uptodate)
253762306a36Sopenharmony_ci{
253862306a36Sopenharmony_ci	struct file *filp = iocb->ki_filp;
253962306a36Sopenharmony_ci	struct address_space *mapping = filp->f_mapping;
254062306a36Sopenharmony_ci	struct file_ra_state *ra = &filp->f_ra;
254162306a36Sopenharmony_ci	pgoff_t index = iocb->ki_pos >> PAGE_SHIFT;
254262306a36Sopenharmony_ci	pgoff_t last_index;
254362306a36Sopenharmony_ci	struct folio *folio;
254462306a36Sopenharmony_ci	int err = 0;
254562306a36Sopenharmony_ci
254662306a36Sopenharmony_ci	/* "last_index" is the index of the page beyond the end of the read */
254762306a36Sopenharmony_ci	last_index = DIV_ROUND_UP(iocb->ki_pos + count, PAGE_SIZE);
254862306a36Sopenharmony_ciretry:
254962306a36Sopenharmony_ci	if (fatal_signal_pending(current))
255062306a36Sopenharmony_ci		return -EINTR;
255162306a36Sopenharmony_ci
255262306a36Sopenharmony_ci	filemap_get_read_batch(mapping, index, last_index - 1, fbatch);
255362306a36Sopenharmony_ci	if (!folio_batch_count(fbatch)) {
255462306a36Sopenharmony_ci		if (iocb->ki_flags & IOCB_NOIO)
255562306a36Sopenharmony_ci			return -EAGAIN;
255662306a36Sopenharmony_ci		page_cache_sync_readahead(mapping, ra, filp, index,
255762306a36Sopenharmony_ci				last_index - index);
255862306a36Sopenharmony_ci		filemap_get_read_batch(mapping, index, last_index - 1, fbatch);
255962306a36Sopenharmony_ci	}
256062306a36Sopenharmony_ci	if (!folio_batch_count(fbatch)) {
256162306a36Sopenharmony_ci		if (iocb->ki_flags & (IOCB_NOWAIT | IOCB_WAITQ))
256262306a36Sopenharmony_ci			return -EAGAIN;
256362306a36Sopenharmony_ci		err = filemap_create_folio(filp, mapping,
256462306a36Sopenharmony_ci				iocb->ki_pos >> PAGE_SHIFT, fbatch);
256562306a36Sopenharmony_ci		if (err == AOP_TRUNCATED_PAGE)
256662306a36Sopenharmony_ci			goto retry;
256762306a36Sopenharmony_ci		return err;
256862306a36Sopenharmony_ci	}
256962306a36Sopenharmony_ci
257062306a36Sopenharmony_ci	folio = fbatch->folios[folio_batch_count(fbatch) - 1];
257162306a36Sopenharmony_ci	if (folio_test_readahead(folio)) {
257262306a36Sopenharmony_ci		err = filemap_readahead(iocb, filp, mapping, folio, last_index);
257362306a36Sopenharmony_ci		if (err)
257462306a36Sopenharmony_ci			goto err;
257562306a36Sopenharmony_ci	}
257662306a36Sopenharmony_ci	if (!folio_test_uptodate(folio)) {
257762306a36Sopenharmony_ci		if ((iocb->ki_flags & IOCB_WAITQ) &&
257862306a36Sopenharmony_ci		    folio_batch_count(fbatch) > 1)
257962306a36Sopenharmony_ci			iocb->ki_flags |= IOCB_NOWAIT;
258062306a36Sopenharmony_ci		err = filemap_update_page(iocb, mapping, count, folio,
258162306a36Sopenharmony_ci					  need_uptodate);
258262306a36Sopenharmony_ci		if (err)
258362306a36Sopenharmony_ci			goto err;
258462306a36Sopenharmony_ci	}
258562306a36Sopenharmony_ci
258662306a36Sopenharmony_ci	return 0;
258762306a36Sopenharmony_cierr:
258862306a36Sopenharmony_ci	if (err < 0)
258962306a36Sopenharmony_ci		folio_put(folio);
259062306a36Sopenharmony_ci	if (likely(--fbatch->nr))
259162306a36Sopenharmony_ci		return 0;
259262306a36Sopenharmony_ci	if (err == AOP_TRUNCATED_PAGE)
259362306a36Sopenharmony_ci		goto retry;
259462306a36Sopenharmony_ci	return err;
259562306a36Sopenharmony_ci}
259662306a36Sopenharmony_ci
259762306a36Sopenharmony_cistatic inline bool pos_same_folio(loff_t pos1, loff_t pos2, struct folio *folio)
259862306a36Sopenharmony_ci{
259962306a36Sopenharmony_ci	unsigned int shift = folio_shift(folio);
260062306a36Sopenharmony_ci
260162306a36Sopenharmony_ci	return (pos1 >> shift == pos2 >> shift);
260262306a36Sopenharmony_ci}
260362306a36Sopenharmony_ci
260462306a36Sopenharmony_ci/**
260562306a36Sopenharmony_ci * filemap_read - Read data from the page cache.
260662306a36Sopenharmony_ci * @iocb: The iocb to read.
260762306a36Sopenharmony_ci * @iter: Destination for the data.
260862306a36Sopenharmony_ci * @already_read: Number of bytes already read by the caller.
260962306a36Sopenharmony_ci *
261062306a36Sopenharmony_ci * Copies data from the page cache.  If the data is not currently present,
261162306a36Sopenharmony_ci * uses the readahead and read_folio address_space operations to fetch it.
261262306a36Sopenharmony_ci *
261362306a36Sopenharmony_ci * Return: Total number of bytes copied, including those already read by
261462306a36Sopenharmony_ci * the caller.  If an error happens before any bytes are copied, returns
261562306a36Sopenharmony_ci * a negative error number.
261662306a36Sopenharmony_ci */
261762306a36Sopenharmony_cissize_t filemap_read(struct kiocb *iocb, struct iov_iter *iter,
261862306a36Sopenharmony_ci		ssize_t already_read)
261962306a36Sopenharmony_ci{
262062306a36Sopenharmony_ci	struct file *filp = iocb->ki_filp;
262162306a36Sopenharmony_ci	struct file_ra_state *ra = &filp->f_ra;
262262306a36Sopenharmony_ci	struct address_space *mapping = filp->f_mapping;
262362306a36Sopenharmony_ci	struct inode *inode = mapping->host;
262462306a36Sopenharmony_ci	struct folio_batch fbatch;
262562306a36Sopenharmony_ci	int i, error = 0;
262662306a36Sopenharmony_ci	bool writably_mapped;
262762306a36Sopenharmony_ci	loff_t isize, end_offset;
262862306a36Sopenharmony_ci	loff_t last_pos = ra->prev_pos;
262962306a36Sopenharmony_ci
263062306a36Sopenharmony_ci	if (unlikely(iocb->ki_pos >= inode->i_sb->s_maxbytes))
263162306a36Sopenharmony_ci		return 0;
263262306a36Sopenharmony_ci	if (unlikely(!iov_iter_count(iter)))
263362306a36Sopenharmony_ci		return 0;
263462306a36Sopenharmony_ci
263562306a36Sopenharmony_ci	iov_iter_truncate(iter, inode->i_sb->s_maxbytes);
263662306a36Sopenharmony_ci	folio_batch_init(&fbatch);
263762306a36Sopenharmony_ci
263862306a36Sopenharmony_ci	do {
263962306a36Sopenharmony_ci		cond_resched();
264062306a36Sopenharmony_ci
264162306a36Sopenharmony_ci		/*
264262306a36Sopenharmony_ci		 * If we've already successfully copied some data, then we
264362306a36Sopenharmony_ci		 * can no longer safely return -EIOCBQUEUED. Hence mark
264462306a36Sopenharmony_ci		 * an async read NOWAIT at that point.
264562306a36Sopenharmony_ci		 */
264662306a36Sopenharmony_ci		if ((iocb->ki_flags & IOCB_WAITQ) && already_read)
264762306a36Sopenharmony_ci			iocb->ki_flags |= IOCB_NOWAIT;
264862306a36Sopenharmony_ci
264962306a36Sopenharmony_ci		if (unlikely(iocb->ki_pos >= i_size_read(inode)))
265062306a36Sopenharmony_ci			break;
265162306a36Sopenharmony_ci
265262306a36Sopenharmony_ci		error = filemap_get_pages(iocb, iter->count, &fbatch, false);
265362306a36Sopenharmony_ci		if (error < 0)
265462306a36Sopenharmony_ci			break;
265562306a36Sopenharmony_ci
265662306a36Sopenharmony_ci		/*
265762306a36Sopenharmony_ci		 * i_size must be checked after we know the pages are Uptodate.
265862306a36Sopenharmony_ci		 *
265962306a36Sopenharmony_ci		 * Checking i_size after the check allows us to calculate
266062306a36Sopenharmony_ci		 * the correct value for "nr", which means the zero-filled
266162306a36Sopenharmony_ci		 * part of the page is not copied back to userspace (unless
266262306a36Sopenharmony_ci		 * another truncate extends the file - this is desired though).
266362306a36Sopenharmony_ci		 */
266462306a36Sopenharmony_ci		isize = i_size_read(inode);
266562306a36Sopenharmony_ci		if (unlikely(iocb->ki_pos >= isize))
266662306a36Sopenharmony_ci			goto put_folios;
266762306a36Sopenharmony_ci		end_offset = min_t(loff_t, isize, iocb->ki_pos + iter->count);
266862306a36Sopenharmony_ci
266962306a36Sopenharmony_ci		/*
267062306a36Sopenharmony_ci		 * Pairs with a barrier in
267162306a36Sopenharmony_ci		 * block_write_end()->mark_buffer_dirty() or other page
267262306a36Sopenharmony_ci		 * dirtying routines like iomap_write_end() to ensure
267362306a36Sopenharmony_ci		 * changes to page contents are visible before we see
267462306a36Sopenharmony_ci		 * increased inode size.
267562306a36Sopenharmony_ci		 */
267662306a36Sopenharmony_ci		smp_rmb();
267762306a36Sopenharmony_ci
267862306a36Sopenharmony_ci		/*
267962306a36Sopenharmony_ci		 * Once we start copying data, we don't want to be touching any
268062306a36Sopenharmony_ci		 * cachelines that might be contended:
268162306a36Sopenharmony_ci		 */
268262306a36Sopenharmony_ci		writably_mapped = mapping_writably_mapped(mapping);
268362306a36Sopenharmony_ci
268462306a36Sopenharmony_ci		/*
268562306a36Sopenharmony_ci		 * When a read accesses the same folio several times, only
268662306a36Sopenharmony_ci		 * mark it as accessed the first time.
268762306a36Sopenharmony_ci		 */
268862306a36Sopenharmony_ci		if (!pos_same_folio(iocb->ki_pos, last_pos - 1,
268962306a36Sopenharmony_ci				    fbatch.folios[0]))
269062306a36Sopenharmony_ci			folio_mark_accessed(fbatch.folios[0]);
269162306a36Sopenharmony_ci
269262306a36Sopenharmony_ci		for (i = 0; i < folio_batch_count(&fbatch); i++) {
269362306a36Sopenharmony_ci			struct folio *folio = fbatch.folios[i];
269462306a36Sopenharmony_ci			size_t fsize = folio_size(folio);
269562306a36Sopenharmony_ci			size_t offset = iocb->ki_pos & (fsize - 1);
269662306a36Sopenharmony_ci			size_t bytes = min_t(loff_t, end_offset - iocb->ki_pos,
269762306a36Sopenharmony_ci					     fsize - offset);
269862306a36Sopenharmony_ci			size_t copied;
269962306a36Sopenharmony_ci
270062306a36Sopenharmony_ci			if (end_offset < folio_pos(folio))
270162306a36Sopenharmony_ci				break;
270262306a36Sopenharmony_ci			if (i > 0)
270362306a36Sopenharmony_ci				folio_mark_accessed(folio);
270462306a36Sopenharmony_ci			/*
270562306a36Sopenharmony_ci			 * If users can be writing to this folio using arbitrary
270662306a36Sopenharmony_ci			 * virtual addresses, take care of potential aliasing
270762306a36Sopenharmony_ci			 * before reading the folio on the kernel side.
270862306a36Sopenharmony_ci			 */
270962306a36Sopenharmony_ci			if (writably_mapped)
271062306a36Sopenharmony_ci				flush_dcache_folio(folio);
271162306a36Sopenharmony_ci
271262306a36Sopenharmony_ci			copied = copy_folio_to_iter(folio, offset, bytes, iter);
271362306a36Sopenharmony_ci
271462306a36Sopenharmony_ci			already_read += copied;
271562306a36Sopenharmony_ci			iocb->ki_pos += copied;
271662306a36Sopenharmony_ci			last_pos = iocb->ki_pos;
271762306a36Sopenharmony_ci
271862306a36Sopenharmony_ci			if (copied < bytes) {
271962306a36Sopenharmony_ci				error = -EFAULT;
272062306a36Sopenharmony_ci				break;
272162306a36Sopenharmony_ci			}
272262306a36Sopenharmony_ci		}
272362306a36Sopenharmony_ciput_folios:
272462306a36Sopenharmony_ci		for (i = 0; i < folio_batch_count(&fbatch); i++)
272562306a36Sopenharmony_ci			folio_put(fbatch.folios[i]);
272662306a36Sopenharmony_ci		folio_batch_init(&fbatch);
272762306a36Sopenharmony_ci	} while (iov_iter_count(iter) && iocb->ki_pos < isize && !error);
272862306a36Sopenharmony_ci
272962306a36Sopenharmony_ci	file_accessed(filp);
273062306a36Sopenharmony_ci	ra->prev_pos = last_pos;
273162306a36Sopenharmony_ci	return already_read ? already_read : error;
273262306a36Sopenharmony_ci}
273362306a36Sopenharmony_ciEXPORT_SYMBOL_GPL(filemap_read);
273462306a36Sopenharmony_ci
273562306a36Sopenharmony_ciint kiocb_write_and_wait(struct kiocb *iocb, size_t count)
273662306a36Sopenharmony_ci{
273762306a36Sopenharmony_ci	struct address_space *mapping = iocb->ki_filp->f_mapping;
273862306a36Sopenharmony_ci	loff_t pos = iocb->ki_pos;
273962306a36Sopenharmony_ci	loff_t end = pos + count - 1;
274062306a36Sopenharmony_ci
274162306a36Sopenharmony_ci	if (iocb->ki_flags & IOCB_NOWAIT) {
274262306a36Sopenharmony_ci		if (filemap_range_needs_writeback(mapping, pos, end))
274362306a36Sopenharmony_ci			return -EAGAIN;
274462306a36Sopenharmony_ci		return 0;
274562306a36Sopenharmony_ci	}
274662306a36Sopenharmony_ci
274762306a36Sopenharmony_ci	return filemap_write_and_wait_range(mapping, pos, end);
274862306a36Sopenharmony_ci}
274962306a36Sopenharmony_ci
275062306a36Sopenharmony_ciint kiocb_invalidate_pages(struct kiocb *iocb, size_t count)
275162306a36Sopenharmony_ci{
275262306a36Sopenharmony_ci	struct address_space *mapping = iocb->ki_filp->f_mapping;
275362306a36Sopenharmony_ci	loff_t pos = iocb->ki_pos;
275462306a36Sopenharmony_ci	loff_t end = pos + count - 1;
275562306a36Sopenharmony_ci	int ret;
275662306a36Sopenharmony_ci
275762306a36Sopenharmony_ci	if (iocb->ki_flags & IOCB_NOWAIT) {
275862306a36Sopenharmony_ci		/* we could block if there are any pages in the range */
275962306a36Sopenharmony_ci		if (filemap_range_has_page(mapping, pos, end))
276062306a36Sopenharmony_ci			return -EAGAIN;
276162306a36Sopenharmony_ci	} else {
276262306a36Sopenharmony_ci		ret = filemap_write_and_wait_range(mapping, pos, end);
276362306a36Sopenharmony_ci		if (ret)
276462306a36Sopenharmony_ci			return ret;
276562306a36Sopenharmony_ci	}
276662306a36Sopenharmony_ci
276762306a36Sopenharmony_ci	/*
276862306a36Sopenharmony_ci	 * After a write we want buffered reads to be sure to go to disk to get
276962306a36Sopenharmony_ci	 * the new data.  We invalidate clean cached page from the region we're
277062306a36Sopenharmony_ci	 * about to write.  We do this *before* the write so that we can return
277162306a36Sopenharmony_ci	 * without clobbering -EIOCBQUEUED from ->direct_IO().
277262306a36Sopenharmony_ci	 */
277362306a36Sopenharmony_ci	return invalidate_inode_pages2_range(mapping, pos >> PAGE_SHIFT,
277462306a36Sopenharmony_ci					     end >> PAGE_SHIFT);
277562306a36Sopenharmony_ci}
277662306a36Sopenharmony_ci
277762306a36Sopenharmony_ci/**
277862306a36Sopenharmony_ci * generic_file_read_iter - generic filesystem read routine
277962306a36Sopenharmony_ci * @iocb:	kernel I/O control block
278062306a36Sopenharmony_ci * @iter:	destination for the data read
278162306a36Sopenharmony_ci *
278262306a36Sopenharmony_ci * This is the "read_iter()" routine for all filesystems
278362306a36Sopenharmony_ci * that can use the page cache directly.
278462306a36Sopenharmony_ci *
278562306a36Sopenharmony_ci * The IOCB_NOWAIT flag in iocb->ki_flags indicates that -EAGAIN shall
278662306a36Sopenharmony_ci * be returned when no data can be read without waiting for I/O requests
278762306a36Sopenharmony_ci * to complete; it doesn't prevent readahead.
278862306a36Sopenharmony_ci *
278962306a36Sopenharmony_ci * The IOCB_NOIO flag in iocb->ki_flags indicates that no new I/O
279062306a36Sopenharmony_ci * requests shall be made for the read or for readahead.  When no data
279162306a36Sopenharmony_ci * can be read, -EAGAIN shall be returned.  When readahead would be
279262306a36Sopenharmony_ci * triggered, a partial, possibly empty read shall be returned.
279362306a36Sopenharmony_ci *
279462306a36Sopenharmony_ci * Return:
279562306a36Sopenharmony_ci * * number of bytes copied, even for partial reads
279662306a36Sopenharmony_ci * * negative error code (or 0 if IOCB_NOIO) if nothing was read
279762306a36Sopenharmony_ci */
279862306a36Sopenharmony_cissize_t
279962306a36Sopenharmony_cigeneric_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
280062306a36Sopenharmony_ci{
280162306a36Sopenharmony_ci	size_t count = iov_iter_count(iter);
280262306a36Sopenharmony_ci	ssize_t retval = 0;
280362306a36Sopenharmony_ci
280462306a36Sopenharmony_ci	if (!count)
280562306a36Sopenharmony_ci		return 0; /* skip atime */
280662306a36Sopenharmony_ci
280762306a36Sopenharmony_ci	if (iocb->ki_flags & IOCB_DIRECT) {
280862306a36Sopenharmony_ci		struct file *file = iocb->ki_filp;
280962306a36Sopenharmony_ci		struct address_space *mapping = file->f_mapping;
281062306a36Sopenharmony_ci		struct inode *inode = mapping->host;
281162306a36Sopenharmony_ci
281262306a36Sopenharmony_ci		retval = kiocb_write_and_wait(iocb, count);
281362306a36Sopenharmony_ci		if (retval < 0)
281462306a36Sopenharmony_ci			return retval;
281562306a36Sopenharmony_ci		file_accessed(file);
281662306a36Sopenharmony_ci
281762306a36Sopenharmony_ci		retval = mapping->a_ops->direct_IO(iocb, iter);
281862306a36Sopenharmony_ci		if (retval >= 0) {
281962306a36Sopenharmony_ci			iocb->ki_pos += retval;
282062306a36Sopenharmony_ci			count -= retval;
282162306a36Sopenharmony_ci		}
282262306a36Sopenharmony_ci		if (retval != -EIOCBQUEUED)
282362306a36Sopenharmony_ci			iov_iter_revert(iter, count - iov_iter_count(iter));
282462306a36Sopenharmony_ci
282562306a36Sopenharmony_ci		/*
282662306a36Sopenharmony_ci		 * Btrfs can have a short DIO read if we encounter
282762306a36Sopenharmony_ci		 * compressed extents, so if there was an error, or if
282862306a36Sopenharmony_ci		 * we've already read everything we wanted to, or if
282962306a36Sopenharmony_ci		 * there was a short read because we hit EOF, go ahead
283062306a36Sopenharmony_ci		 * and return.  Otherwise fallthrough to buffered io for
283162306a36Sopenharmony_ci		 * the rest of the read.  Buffered reads will not work for
283262306a36Sopenharmony_ci		 * DAX files, so don't bother trying.
283362306a36Sopenharmony_ci		 */
283462306a36Sopenharmony_ci		if (retval < 0 || !count || IS_DAX(inode))
283562306a36Sopenharmony_ci			return retval;
283662306a36Sopenharmony_ci		if (iocb->ki_pos >= i_size_read(inode))
283762306a36Sopenharmony_ci			return retval;
283862306a36Sopenharmony_ci	}
283962306a36Sopenharmony_ci
284062306a36Sopenharmony_ci	return filemap_read(iocb, iter, retval);
284162306a36Sopenharmony_ci}
284262306a36Sopenharmony_ciEXPORT_SYMBOL(generic_file_read_iter);
284362306a36Sopenharmony_ci
284462306a36Sopenharmony_ci/*
284562306a36Sopenharmony_ci * Splice subpages from a folio into a pipe.
284662306a36Sopenharmony_ci */
284762306a36Sopenharmony_cisize_t splice_folio_into_pipe(struct pipe_inode_info *pipe,
284862306a36Sopenharmony_ci			      struct folio *folio, loff_t fpos, size_t size)
284962306a36Sopenharmony_ci{
285062306a36Sopenharmony_ci	struct page *page;
285162306a36Sopenharmony_ci	size_t spliced = 0, offset = offset_in_folio(folio, fpos);
285262306a36Sopenharmony_ci
285362306a36Sopenharmony_ci	page = folio_page(folio, offset / PAGE_SIZE);
285462306a36Sopenharmony_ci	size = min(size, folio_size(folio) - offset);
285562306a36Sopenharmony_ci	offset %= PAGE_SIZE;
285662306a36Sopenharmony_ci
285762306a36Sopenharmony_ci	while (spliced < size &&
285862306a36Sopenharmony_ci	       !pipe_full(pipe->head, pipe->tail, pipe->max_usage)) {
285962306a36Sopenharmony_ci		struct pipe_buffer *buf = pipe_head_buf(pipe);
286062306a36Sopenharmony_ci		size_t part = min_t(size_t, PAGE_SIZE - offset, size - spliced);
286162306a36Sopenharmony_ci
286262306a36Sopenharmony_ci		*buf = (struct pipe_buffer) {
286362306a36Sopenharmony_ci			.ops	= &page_cache_pipe_buf_ops,
286462306a36Sopenharmony_ci			.page	= page,
286562306a36Sopenharmony_ci			.offset	= offset,
286662306a36Sopenharmony_ci			.len	= part,
286762306a36Sopenharmony_ci		};
286862306a36Sopenharmony_ci		folio_get(folio);
286962306a36Sopenharmony_ci		pipe->head++;
287062306a36Sopenharmony_ci		page++;
287162306a36Sopenharmony_ci		spliced += part;
287262306a36Sopenharmony_ci		offset = 0;
287362306a36Sopenharmony_ci	}
287462306a36Sopenharmony_ci
287562306a36Sopenharmony_ci	return spliced;
287662306a36Sopenharmony_ci}
287762306a36Sopenharmony_ci
287862306a36Sopenharmony_ci/**
287962306a36Sopenharmony_ci * filemap_splice_read -  Splice data from a file's pagecache into a pipe
288062306a36Sopenharmony_ci * @in: The file to read from
288162306a36Sopenharmony_ci * @ppos: Pointer to the file position to read from
288262306a36Sopenharmony_ci * @pipe: The pipe to splice into
288362306a36Sopenharmony_ci * @len: The amount to splice
288462306a36Sopenharmony_ci * @flags: The SPLICE_F_* flags
288562306a36Sopenharmony_ci *
288662306a36Sopenharmony_ci * This function gets folios from a file's pagecache and splices them into the
288762306a36Sopenharmony_ci * pipe.  Readahead will be called as necessary to fill more folios.  This may
288862306a36Sopenharmony_ci * be used for blockdevs also.
288962306a36Sopenharmony_ci *
289062306a36Sopenharmony_ci * Return: On success, the number of bytes read will be returned and *@ppos
289162306a36Sopenharmony_ci * will be updated if appropriate; 0 will be returned if there is no more data
289262306a36Sopenharmony_ci * to be read; -EAGAIN will be returned if the pipe had no space, and some
289362306a36Sopenharmony_ci * other negative error code will be returned on error.  A short read may occur
289462306a36Sopenharmony_ci * if the pipe has insufficient space, we reach the end of the data or we hit a
289562306a36Sopenharmony_ci * hole.
289662306a36Sopenharmony_ci */
289762306a36Sopenharmony_cissize_t filemap_splice_read(struct file *in, loff_t *ppos,
289862306a36Sopenharmony_ci			    struct pipe_inode_info *pipe,
289962306a36Sopenharmony_ci			    size_t len, unsigned int flags)
290062306a36Sopenharmony_ci{
290162306a36Sopenharmony_ci	struct folio_batch fbatch;
290262306a36Sopenharmony_ci	struct kiocb iocb;
290362306a36Sopenharmony_ci	size_t total_spliced = 0, used, npages;
290462306a36Sopenharmony_ci	loff_t isize, end_offset;
290562306a36Sopenharmony_ci	bool writably_mapped;
290662306a36Sopenharmony_ci	int i, error = 0;
290762306a36Sopenharmony_ci
290862306a36Sopenharmony_ci	if (unlikely(*ppos >= in->f_mapping->host->i_sb->s_maxbytes))
290962306a36Sopenharmony_ci		return 0;
291062306a36Sopenharmony_ci
291162306a36Sopenharmony_ci	init_sync_kiocb(&iocb, in);
291262306a36Sopenharmony_ci	iocb.ki_pos = *ppos;
291362306a36Sopenharmony_ci
291462306a36Sopenharmony_ci	/* Work out how much data we can actually add into the pipe */
291562306a36Sopenharmony_ci	used = pipe_occupancy(pipe->head, pipe->tail);
291662306a36Sopenharmony_ci	npages = max_t(ssize_t, pipe->max_usage - used, 0);
291762306a36Sopenharmony_ci	len = min_t(size_t, len, npages * PAGE_SIZE);
291862306a36Sopenharmony_ci
291962306a36Sopenharmony_ci	folio_batch_init(&fbatch);
292062306a36Sopenharmony_ci
292162306a36Sopenharmony_ci	do {
292262306a36Sopenharmony_ci		cond_resched();
292362306a36Sopenharmony_ci
292462306a36Sopenharmony_ci		if (*ppos >= i_size_read(in->f_mapping->host))
292562306a36Sopenharmony_ci			break;
292662306a36Sopenharmony_ci
292762306a36Sopenharmony_ci		iocb.ki_pos = *ppos;
292862306a36Sopenharmony_ci		error = filemap_get_pages(&iocb, len, &fbatch, true);
292962306a36Sopenharmony_ci		if (error < 0)
293062306a36Sopenharmony_ci			break;
293162306a36Sopenharmony_ci
293262306a36Sopenharmony_ci		/*
293362306a36Sopenharmony_ci		 * i_size must be checked after we know the pages are Uptodate.
293462306a36Sopenharmony_ci		 *
293562306a36Sopenharmony_ci		 * Checking i_size after the check allows us to calculate
293662306a36Sopenharmony_ci		 * the correct value for "nr", which means the zero-filled
293762306a36Sopenharmony_ci		 * part of the page is not copied back to userspace (unless
293862306a36Sopenharmony_ci		 * another truncate extends the file - this is desired though).
293962306a36Sopenharmony_ci		 */
294062306a36Sopenharmony_ci		isize = i_size_read(in->f_mapping->host);
294162306a36Sopenharmony_ci		if (unlikely(*ppos >= isize))
294262306a36Sopenharmony_ci			break;
294362306a36Sopenharmony_ci		end_offset = min_t(loff_t, isize, *ppos + len);
294462306a36Sopenharmony_ci
294562306a36Sopenharmony_ci		/*
294662306a36Sopenharmony_ci		 * Once we start copying data, we don't want to be touching any
294762306a36Sopenharmony_ci		 * cachelines that might be contended:
294862306a36Sopenharmony_ci		 */
294962306a36Sopenharmony_ci		writably_mapped = mapping_writably_mapped(in->f_mapping);
295062306a36Sopenharmony_ci
295162306a36Sopenharmony_ci		for (i = 0; i < folio_batch_count(&fbatch); i++) {
295262306a36Sopenharmony_ci			struct folio *folio = fbatch.folios[i];
295362306a36Sopenharmony_ci			size_t n;
295462306a36Sopenharmony_ci
295562306a36Sopenharmony_ci			if (folio_pos(folio) >= end_offset)
295662306a36Sopenharmony_ci				goto out;
295762306a36Sopenharmony_ci			folio_mark_accessed(folio);
295862306a36Sopenharmony_ci
295962306a36Sopenharmony_ci			/*
296062306a36Sopenharmony_ci			 * If users can be writing to this folio using arbitrary
296162306a36Sopenharmony_ci			 * virtual addresses, take care of potential aliasing
296262306a36Sopenharmony_ci			 * before reading the folio on the kernel side.
296362306a36Sopenharmony_ci			 */
296462306a36Sopenharmony_ci			if (writably_mapped)
296562306a36Sopenharmony_ci				flush_dcache_folio(folio);
296662306a36Sopenharmony_ci
296762306a36Sopenharmony_ci			n = min_t(loff_t, len, isize - *ppos);
296862306a36Sopenharmony_ci			n = splice_folio_into_pipe(pipe, folio, *ppos, n);
296962306a36Sopenharmony_ci			if (!n)
297062306a36Sopenharmony_ci				goto out;
297162306a36Sopenharmony_ci			len -= n;
297262306a36Sopenharmony_ci			total_spliced += n;
297362306a36Sopenharmony_ci			*ppos += n;
297462306a36Sopenharmony_ci			in->f_ra.prev_pos = *ppos;
297562306a36Sopenharmony_ci			if (pipe_full(pipe->head, pipe->tail, pipe->max_usage))
297662306a36Sopenharmony_ci				goto out;
297762306a36Sopenharmony_ci		}
297862306a36Sopenharmony_ci
297962306a36Sopenharmony_ci		folio_batch_release(&fbatch);
298062306a36Sopenharmony_ci	} while (len);
298162306a36Sopenharmony_ci
298262306a36Sopenharmony_ciout:
298362306a36Sopenharmony_ci	folio_batch_release(&fbatch);
298462306a36Sopenharmony_ci	file_accessed(in);
298562306a36Sopenharmony_ci
298662306a36Sopenharmony_ci	return total_spliced ? total_spliced : error;
298762306a36Sopenharmony_ci}
298862306a36Sopenharmony_ciEXPORT_SYMBOL(filemap_splice_read);
298962306a36Sopenharmony_ci
299062306a36Sopenharmony_cistatic inline loff_t folio_seek_hole_data(struct xa_state *xas,
299162306a36Sopenharmony_ci		struct address_space *mapping, struct folio *folio,
299262306a36Sopenharmony_ci		loff_t start, loff_t end, bool seek_data)
299362306a36Sopenharmony_ci{
299462306a36Sopenharmony_ci	const struct address_space_operations *ops = mapping->a_ops;
299562306a36Sopenharmony_ci	size_t offset, bsz = i_blocksize(mapping->host);
299662306a36Sopenharmony_ci
299762306a36Sopenharmony_ci	if (xa_is_value(folio) || folio_test_uptodate(folio))
299862306a36Sopenharmony_ci		return seek_data ? start : end;
299962306a36Sopenharmony_ci	if (!ops->is_partially_uptodate)
300062306a36Sopenharmony_ci		return seek_data ? end : start;
300162306a36Sopenharmony_ci
300262306a36Sopenharmony_ci	xas_pause(xas);
300362306a36Sopenharmony_ci	rcu_read_unlock();
300462306a36Sopenharmony_ci	folio_lock(folio);
300562306a36Sopenharmony_ci	if (unlikely(folio->mapping != mapping))
300662306a36Sopenharmony_ci		goto unlock;
300762306a36Sopenharmony_ci
300862306a36Sopenharmony_ci	offset = offset_in_folio(folio, start) & ~(bsz - 1);
300962306a36Sopenharmony_ci
301062306a36Sopenharmony_ci	do {
301162306a36Sopenharmony_ci		if (ops->is_partially_uptodate(folio, offset, bsz) ==
301262306a36Sopenharmony_ci							seek_data)
301362306a36Sopenharmony_ci			break;
301462306a36Sopenharmony_ci		start = (start + bsz) & ~(bsz - 1);
301562306a36Sopenharmony_ci		offset += bsz;
301662306a36Sopenharmony_ci	} while (offset < folio_size(folio));
301762306a36Sopenharmony_ciunlock:
301862306a36Sopenharmony_ci	folio_unlock(folio);
301962306a36Sopenharmony_ci	rcu_read_lock();
302062306a36Sopenharmony_ci	return start;
302162306a36Sopenharmony_ci}
302262306a36Sopenharmony_ci
302362306a36Sopenharmony_cistatic inline size_t seek_folio_size(struct xa_state *xas, struct folio *folio)
302462306a36Sopenharmony_ci{
302562306a36Sopenharmony_ci	if (xa_is_value(folio))
302662306a36Sopenharmony_ci		return PAGE_SIZE << xa_get_order(xas->xa, xas->xa_index);
302762306a36Sopenharmony_ci	return folio_size(folio);
302862306a36Sopenharmony_ci}
302962306a36Sopenharmony_ci
303062306a36Sopenharmony_ci/**
303162306a36Sopenharmony_ci * mapping_seek_hole_data - Seek for SEEK_DATA / SEEK_HOLE in the page cache.
303262306a36Sopenharmony_ci * @mapping: Address space to search.
303362306a36Sopenharmony_ci * @start: First byte to consider.
303462306a36Sopenharmony_ci * @end: Limit of search (exclusive).
303562306a36Sopenharmony_ci * @whence: Either SEEK_HOLE or SEEK_DATA.
303662306a36Sopenharmony_ci *
303762306a36Sopenharmony_ci * If the page cache knows which blocks contain holes and which blocks
303862306a36Sopenharmony_ci * contain data, your filesystem can use this function to implement
303962306a36Sopenharmony_ci * SEEK_HOLE and SEEK_DATA.  This is useful for filesystems which are
304062306a36Sopenharmony_ci * entirely memory-based such as tmpfs, and filesystems which support
304162306a36Sopenharmony_ci * unwritten extents.
304262306a36Sopenharmony_ci *
304362306a36Sopenharmony_ci * Return: The requested offset on success, or -ENXIO if @whence specifies
304462306a36Sopenharmony_ci * SEEK_DATA and there is no data after @start.  There is an implicit hole
304562306a36Sopenharmony_ci * after @end - 1, so SEEK_HOLE returns @end if all the bytes between @start
304662306a36Sopenharmony_ci * and @end contain data.
304762306a36Sopenharmony_ci */
304862306a36Sopenharmony_ciloff_t mapping_seek_hole_data(struct address_space *mapping, loff_t start,
304962306a36Sopenharmony_ci		loff_t end, int whence)
305062306a36Sopenharmony_ci{
305162306a36Sopenharmony_ci	XA_STATE(xas, &mapping->i_pages, start >> PAGE_SHIFT);
305262306a36Sopenharmony_ci	pgoff_t max = (end - 1) >> PAGE_SHIFT;
305362306a36Sopenharmony_ci	bool seek_data = (whence == SEEK_DATA);
305462306a36Sopenharmony_ci	struct folio *folio;
305562306a36Sopenharmony_ci
305662306a36Sopenharmony_ci	if (end <= start)
305762306a36Sopenharmony_ci		return -ENXIO;
305862306a36Sopenharmony_ci
305962306a36Sopenharmony_ci	rcu_read_lock();
306062306a36Sopenharmony_ci	while ((folio = find_get_entry(&xas, max, XA_PRESENT))) {
306162306a36Sopenharmony_ci		loff_t pos = (u64)xas.xa_index << PAGE_SHIFT;
306262306a36Sopenharmony_ci		size_t seek_size;
306362306a36Sopenharmony_ci
306462306a36Sopenharmony_ci		if (start < pos) {
306562306a36Sopenharmony_ci			if (!seek_data)
306662306a36Sopenharmony_ci				goto unlock;
306762306a36Sopenharmony_ci			start = pos;
306862306a36Sopenharmony_ci		}
306962306a36Sopenharmony_ci
307062306a36Sopenharmony_ci		seek_size = seek_folio_size(&xas, folio);
307162306a36Sopenharmony_ci		pos = round_up((u64)pos + 1, seek_size);
307262306a36Sopenharmony_ci		start = folio_seek_hole_data(&xas, mapping, folio, start, pos,
307362306a36Sopenharmony_ci				seek_data);
307462306a36Sopenharmony_ci		if (start < pos)
307562306a36Sopenharmony_ci			goto unlock;
307662306a36Sopenharmony_ci		if (start >= end)
307762306a36Sopenharmony_ci			break;
307862306a36Sopenharmony_ci		if (seek_size > PAGE_SIZE)
307962306a36Sopenharmony_ci			xas_set(&xas, pos >> PAGE_SHIFT);
308062306a36Sopenharmony_ci		if (!xa_is_value(folio))
308162306a36Sopenharmony_ci			folio_put(folio);
308262306a36Sopenharmony_ci	}
308362306a36Sopenharmony_ci	if (seek_data)
308462306a36Sopenharmony_ci		start = -ENXIO;
308562306a36Sopenharmony_ciunlock:
308662306a36Sopenharmony_ci	rcu_read_unlock();
308762306a36Sopenharmony_ci	if (folio && !xa_is_value(folio))
308862306a36Sopenharmony_ci		folio_put(folio);
308962306a36Sopenharmony_ci	if (start > end)
309062306a36Sopenharmony_ci		return end;
309162306a36Sopenharmony_ci	return start;
309262306a36Sopenharmony_ci}
309362306a36Sopenharmony_ci
309462306a36Sopenharmony_ci#ifdef CONFIG_MMU
309562306a36Sopenharmony_ci#define MMAP_LOTSAMISS  (100)
309662306a36Sopenharmony_ci/*
309762306a36Sopenharmony_ci * lock_folio_maybe_drop_mmap - lock the page, possibly dropping the mmap_lock
309862306a36Sopenharmony_ci * @vmf - the vm_fault for this fault.
309962306a36Sopenharmony_ci * @folio - the folio to lock.
310062306a36Sopenharmony_ci * @fpin - the pointer to the file we may pin (or is already pinned).
310162306a36Sopenharmony_ci *
310262306a36Sopenharmony_ci * This works similar to lock_folio_or_retry in that it can drop the
310362306a36Sopenharmony_ci * mmap_lock.  It differs in that it actually returns the folio locked
310462306a36Sopenharmony_ci * if it returns 1 and 0 if it couldn't lock the folio.  If we did have
310562306a36Sopenharmony_ci * to drop the mmap_lock then fpin will point to the pinned file and
310662306a36Sopenharmony_ci * needs to be fput()'ed at a later point.
310762306a36Sopenharmony_ci */
310862306a36Sopenharmony_cistatic int lock_folio_maybe_drop_mmap(struct vm_fault *vmf, struct folio *folio,
310962306a36Sopenharmony_ci				     struct file **fpin)
311062306a36Sopenharmony_ci{
311162306a36Sopenharmony_ci	if (folio_trylock(folio))
311262306a36Sopenharmony_ci		return 1;
311362306a36Sopenharmony_ci
311462306a36Sopenharmony_ci	/*
311562306a36Sopenharmony_ci	 * NOTE! This will make us return with VM_FAULT_RETRY, but with
311662306a36Sopenharmony_ci	 * the mmap_lock still held. That's how FAULT_FLAG_RETRY_NOWAIT
311762306a36Sopenharmony_ci	 * is supposed to work. We have way too many special cases..
311862306a36Sopenharmony_ci	 */
311962306a36Sopenharmony_ci	if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
312062306a36Sopenharmony_ci		return 0;
312162306a36Sopenharmony_ci
312262306a36Sopenharmony_ci	*fpin = maybe_unlock_mmap_for_io(vmf, *fpin);
312362306a36Sopenharmony_ci	if (vmf->flags & FAULT_FLAG_KILLABLE) {
312462306a36Sopenharmony_ci		if (__folio_lock_killable(folio)) {
312562306a36Sopenharmony_ci			/*
312662306a36Sopenharmony_ci			 * We didn't have the right flags to drop the mmap_lock,
312762306a36Sopenharmony_ci			 * but all fault_handlers only check for fatal signals
312862306a36Sopenharmony_ci			 * if we return VM_FAULT_RETRY, so we need to drop the
312962306a36Sopenharmony_ci			 * mmap_lock here and return 0 if we don't have a fpin.
313062306a36Sopenharmony_ci			 */
313162306a36Sopenharmony_ci			if (*fpin == NULL)
313262306a36Sopenharmony_ci				mmap_read_unlock(vmf->vma->vm_mm);
313362306a36Sopenharmony_ci			return 0;
313462306a36Sopenharmony_ci		}
313562306a36Sopenharmony_ci	} else
313662306a36Sopenharmony_ci		__folio_lock(folio);
313762306a36Sopenharmony_ci
313862306a36Sopenharmony_ci	return 1;
313962306a36Sopenharmony_ci}
314062306a36Sopenharmony_ci
314162306a36Sopenharmony_ci/*
314262306a36Sopenharmony_ci * Synchronous readahead happens when we don't even find a page in the page
314362306a36Sopenharmony_ci * cache at all.  We don't want to perform IO under the mmap sem, so if we have
314462306a36Sopenharmony_ci * to drop the mmap sem we return the file that was pinned in order for us to do
314562306a36Sopenharmony_ci * that.  If we didn't pin a file then we return NULL.  The file that is
314662306a36Sopenharmony_ci * returned needs to be fput()'ed when we're done with it.
314762306a36Sopenharmony_ci */
314862306a36Sopenharmony_cistatic struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
314962306a36Sopenharmony_ci{
315062306a36Sopenharmony_ci	struct file *file = vmf->vma->vm_file;
315162306a36Sopenharmony_ci	struct file_ra_state *ra = &file->f_ra;
315262306a36Sopenharmony_ci	struct address_space *mapping = file->f_mapping;
315362306a36Sopenharmony_ci	DEFINE_READAHEAD(ractl, file, ra, mapping, vmf->pgoff);
315462306a36Sopenharmony_ci	struct file *fpin = NULL;
315562306a36Sopenharmony_ci	unsigned long vm_flags = vmf->vma->vm_flags;
315662306a36Sopenharmony_ci	unsigned int mmap_miss;
315762306a36Sopenharmony_ci
315862306a36Sopenharmony_ci#ifdef CONFIG_TRANSPARENT_HUGEPAGE
315962306a36Sopenharmony_ci	/* Use the readahead code, even if readahead is disabled */
316062306a36Sopenharmony_ci	if (vm_flags & VM_HUGEPAGE) {
316162306a36Sopenharmony_ci		fpin = maybe_unlock_mmap_for_io(vmf, fpin);
316262306a36Sopenharmony_ci		ractl._index &= ~((unsigned long)HPAGE_PMD_NR - 1);
316362306a36Sopenharmony_ci		ra->size = HPAGE_PMD_NR;
316462306a36Sopenharmony_ci		/*
316562306a36Sopenharmony_ci		 * Fetch two PMD folios, so we get the chance to actually
316662306a36Sopenharmony_ci		 * readahead, unless we've been told not to.
316762306a36Sopenharmony_ci		 */
316862306a36Sopenharmony_ci		if (!(vm_flags & VM_RAND_READ))
316962306a36Sopenharmony_ci			ra->size *= 2;
317062306a36Sopenharmony_ci		ra->async_size = HPAGE_PMD_NR;
317162306a36Sopenharmony_ci		page_cache_ra_order(&ractl, ra, HPAGE_PMD_ORDER);
317262306a36Sopenharmony_ci		return fpin;
317362306a36Sopenharmony_ci	}
317462306a36Sopenharmony_ci#endif
317562306a36Sopenharmony_ci
317662306a36Sopenharmony_ci	/* If we don't want any read-ahead, don't bother */
317762306a36Sopenharmony_ci	if (vm_flags & VM_RAND_READ)
317862306a36Sopenharmony_ci		return fpin;
317962306a36Sopenharmony_ci	if (!ra->ra_pages)
318062306a36Sopenharmony_ci		return fpin;
318162306a36Sopenharmony_ci
318262306a36Sopenharmony_ci	if (vm_flags & VM_SEQ_READ) {
318362306a36Sopenharmony_ci		fpin = maybe_unlock_mmap_for_io(vmf, fpin);
318462306a36Sopenharmony_ci		page_cache_sync_ra(&ractl, ra->ra_pages);
318562306a36Sopenharmony_ci		return fpin;
318662306a36Sopenharmony_ci	}
318762306a36Sopenharmony_ci
318862306a36Sopenharmony_ci	/* Avoid banging the cache line if not needed */
318962306a36Sopenharmony_ci	mmap_miss = READ_ONCE(ra->mmap_miss);
319062306a36Sopenharmony_ci	if (mmap_miss < MMAP_LOTSAMISS * 10)
319162306a36Sopenharmony_ci		WRITE_ONCE(ra->mmap_miss, ++mmap_miss);
319262306a36Sopenharmony_ci
319362306a36Sopenharmony_ci	/*
319462306a36Sopenharmony_ci	 * Do we miss much more than hit in this file? If so,
319562306a36Sopenharmony_ci	 * stop bothering with read-ahead. It will only hurt.
319662306a36Sopenharmony_ci	 */
319762306a36Sopenharmony_ci	if (mmap_miss > MMAP_LOTSAMISS)
319862306a36Sopenharmony_ci		return fpin;
319962306a36Sopenharmony_ci
320062306a36Sopenharmony_ci	/*
320162306a36Sopenharmony_ci	 * mmap read-around
320262306a36Sopenharmony_ci	 */
320362306a36Sopenharmony_ci	fpin = maybe_unlock_mmap_for_io(vmf, fpin);
320462306a36Sopenharmony_ci	ra->start = max_t(long, 0, vmf->pgoff - ra->ra_pages / 2);
320562306a36Sopenharmony_ci	ra->size = ra->ra_pages;
320662306a36Sopenharmony_ci	ra->async_size = ra->ra_pages / 4;
320762306a36Sopenharmony_ci	ractl._index = ra->start;
320862306a36Sopenharmony_ci	page_cache_ra_order(&ractl, ra, 0);
320962306a36Sopenharmony_ci	return fpin;
321062306a36Sopenharmony_ci}
321162306a36Sopenharmony_ci
321262306a36Sopenharmony_ci/*
321362306a36Sopenharmony_ci * Asynchronous readahead happens when we find the page and PG_readahead,
321462306a36Sopenharmony_ci * so we want to possibly extend the readahead further.  We return the file that
321562306a36Sopenharmony_ci * was pinned if we have to drop the mmap_lock in order to do IO.
321662306a36Sopenharmony_ci */
321762306a36Sopenharmony_cistatic struct file *do_async_mmap_readahead(struct vm_fault *vmf,
321862306a36Sopenharmony_ci					    struct folio *folio)
321962306a36Sopenharmony_ci{
322062306a36Sopenharmony_ci	struct file *file = vmf->vma->vm_file;
322162306a36Sopenharmony_ci	struct file_ra_state *ra = &file->f_ra;
322262306a36Sopenharmony_ci	DEFINE_READAHEAD(ractl, file, ra, file->f_mapping, vmf->pgoff);
322362306a36Sopenharmony_ci	struct file *fpin = NULL;
322462306a36Sopenharmony_ci	unsigned int mmap_miss;
322562306a36Sopenharmony_ci
322662306a36Sopenharmony_ci	/* If we don't want any read-ahead, don't bother */
322762306a36Sopenharmony_ci	if (vmf->vma->vm_flags & VM_RAND_READ || !ra->ra_pages)
322862306a36Sopenharmony_ci		return fpin;
322962306a36Sopenharmony_ci
323062306a36Sopenharmony_ci	mmap_miss = READ_ONCE(ra->mmap_miss);
323162306a36Sopenharmony_ci	if (mmap_miss)
323262306a36Sopenharmony_ci		WRITE_ONCE(ra->mmap_miss, --mmap_miss);
323362306a36Sopenharmony_ci
323462306a36Sopenharmony_ci	if (folio_test_readahead(folio)) {
323562306a36Sopenharmony_ci		fpin = maybe_unlock_mmap_for_io(vmf, fpin);
323662306a36Sopenharmony_ci		page_cache_async_ra(&ractl, folio, ra->ra_pages);
323762306a36Sopenharmony_ci	}
323862306a36Sopenharmony_ci	return fpin;
323962306a36Sopenharmony_ci}
324062306a36Sopenharmony_ci
324162306a36Sopenharmony_ci/**
324262306a36Sopenharmony_ci * filemap_fault - read in file data for page fault handling
324362306a36Sopenharmony_ci * @vmf:	struct vm_fault containing details of the fault
324462306a36Sopenharmony_ci *
324562306a36Sopenharmony_ci * filemap_fault() is invoked via the vma operations vector for a
324662306a36Sopenharmony_ci * mapped memory region to read in file data during a page fault.
324762306a36Sopenharmony_ci *
324862306a36Sopenharmony_ci * The goto's are kind of ugly, but this streamlines the normal case of having
324962306a36Sopenharmony_ci * it in the page cache, and handles the special cases reasonably without
325062306a36Sopenharmony_ci * having a lot of duplicated code.
325162306a36Sopenharmony_ci *
325262306a36Sopenharmony_ci * vma->vm_mm->mmap_lock must be held on entry.
325362306a36Sopenharmony_ci *
325462306a36Sopenharmony_ci * If our return value has VM_FAULT_RETRY set, it's because the mmap_lock
325562306a36Sopenharmony_ci * may be dropped before doing I/O or by lock_folio_maybe_drop_mmap().
325662306a36Sopenharmony_ci *
325762306a36Sopenharmony_ci * If our return value does not have VM_FAULT_RETRY set, the mmap_lock
325862306a36Sopenharmony_ci * has not been released.
325962306a36Sopenharmony_ci *
326062306a36Sopenharmony_ci * We never return with VM_FAULT_RETRY and a bit from VM_FAULT_ERROR set.
326162306a36Sopenharmony_ci *
326262306a36Sopenharmony_ci * Return: bitwise-OR of %VM_FAULT_ codes.
326362306a36Sopenharmony_ci */
326462306a36Sopenharmony_civm_fault_t filemap_fault(struct vm_fault *vmf)
326562306a36Sopenharmony_ci{
326662306a36Sopenharmony_ci	int error;
326762306a36Sopenharmony_ci	struct file *file = vmf->vma->vm_file;
326862306a36Sopenharmony_ci	struct file *fpin = NULL;
326962306a36Sopenharmony_ci	struct address_space *mapping = file->f_mapping;
327062306a36Sopenharmony_ci	struct inode *inode = mapping->host;
327162306a36Sopenharmony_ci	pgoff_t max_idx, index = vmf->pgoff;
327262306a36Sopenharmony_ci	struct folio *folio;
327362306a36Sopenharmony_ci	vm_fault_t ret = 0;
327462306a36Sopenharmony_ci	bool mapping_locked = false;
327562306a36Sopenharmony_ci
327662306a36Sopenharmony_ci	max_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
327762306a36Sopenharmony_ci	if (unlikely(index >= max_idx))
327862306a36Sopenharmony_ci		return VM_FAULT_SIGBUS;
327962306a36Sopenharmony_ci
328062306a36Sopenharmony_ci	/*
328162306a36Sopenharmony_ci	 * Do we have something in the page cache already?
328262306a36Sopenharmony_ci	 */
328362306a36Sopenharmony_ci	folio = filemap_get_folio(mapping, index);
328462306a36Sopenharmony_ci	if (likely(!IS_ERR(folio))) {
328562306a36Sopenharmony_ci		/*
328662306a36Sopenharmony_ci		 * We found the page, so try async readahead before waiting for
328762306a36Sopenharmony_ci		 * the lock.
328862306a36Sopenharmony_ci		 */
328962306a36Sopenharmony_ci		if (!(vmf->flags & FAULT_FLAG_TRIED))
329062306a36Sopenharmony_ci			fpin = do_async_mmap_readahead(vmf, folio);
329162306a36Sopenharmony_ci		if (unlikely(!folio_test_uptodate(folio))) {
329262306a36Sopenharmony_ci			filemap_invalidate_lock_shared(mapping);
329362306a36Sopenharmony_ci			mapping_locked = true;
329462306a36Sopenharmony_ci		}
329562306a36Sopenharmony_ci	} else {
329662306a36Sopenharmony_ci		/* No page in the page cache at all */
329762306a36Sopenharmony_ci		count_vm_event(PGMAJFAULT);
329862306a36Sopenharmony_ci		count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT);
329962306a36Sopenharmony_ci		ret = VM_FAULT_MAJOR;
330062306a36Sopenharmony_ci		fpin = do_sync_mmap_readahead(vmf);
330162306a36Sopenharmony_ciretry_find:
330262306a36Sopenharmony_ci		/*
330362306a36Sopenharmony_ci		 * See comment in filemap_create_folio() why we need
330462306a36Sopenharmony_ci		 * invalidate_lock
330562306a36Sopenharmony_ci		 */
330662306a36Sopenharmony_ci		if (!mapping_locked) {
330762306a36Sopenharmony_ci			filemap_invalidate_lock_shared(mapping);
330862306a36Sopenharmony_ci			mapping_locked = true;
330962306a36Sopenharmony_ci		}
331062306a36Sopenharmony_ci		folio = __filemap_get_folio(mapping, index,
331162306a36Sopenharmony_ci					  FGP_CREAT|FGP_FOR_MMAP,
331262306a36Sopenharmony_ci					  vmf->gfp_mask);
331362306a36Sopenharmony_ci		if (IS_ERR(folio)) {
331462306a36Sopenharmony_ci			if (fpin)
331562306a36Sopenharmony_ci				goto out_retry;
331662306a36Sopenharmony_ci			filemap_invalidate_unlock_shared(mapping);
331762306a36Sopenharmony_ci			return VM_FAULT_OOM;
331862306a36Sopenharmony_ci		}
331962306a36Sopenharmony_ci	}
332062306a36Sopenharmony_ci
332162306a36Sopenharmony_ci	if (!lock_folio_maybe_drop_mmap(vmf, folio, &fpin))
332262306a36Sopenharmony_ci		goto out_retry;
332362306a36Sopenharmony_ci
332462306a36Sopenharmony_ci	/* Did it get truncated? */
332562306a36Sopenharmony_ci	if (unlikely(folio->mapping != mapping)) {
332662306a36Sopenharmony_ci		folio_unlock(folio);
332762306a36Sopenharmony_ci		folio_put(folio);
332862306a36Sopenharmony_ci		goto retry_find;
332962306a36Sopenharmony_ci	}
333062306a36Sopenharmony_ci	VM_BUG_ON_FOLIO(!folio_contains(folio, index), folio);
333162306a36Sopenharmony_ci
333262306a36Sopenharmony_ci	/*
333362306a36Sopenharmony_ci	 * We have a locked page in the page cache, now we need to check
333462306a36Sopenharmony_ci	 * that it's up-to-date. If not, it is going to be due to an error.
333562306a36Sopenharmony_ci	 */
333662306a36Sopenharmony_ci	if (unlikely(!folio_test_uptodate(folio))) {
333762306a36Sopenharmony_ci		/*
333862306a36Sopenharmony_ci		 * The page was in cache and uptodate and now it is not.
333962306a36Sopenharmony_ci		 * Strange but possible since we didn't hold the page lock all
334062306a36Sopenharmony_ci		 * the time. Let's drop everything get the invalidate lock and
334162306a36Sopenharmony_ci		 * try again.
334262306a36Sopenharmony_ci		 */
334362306a36Sopenharmony_ci		if (!mapping_locked) {
334462306a36Sopenharmony_ci			folio_unlock(folio);
334562306a36Sopenharmony_ci			folio_put(folio);
334662306a36Sopenharmony_ci			goto retry_find;
334762306a36Sopenharmony_ci		}
334862306a36Sopenharmony_ci		goto page_not_uptodate;
334962306a36Sopenharmony_ci	}
335062306a36Sopenharmony_ci
335162306a36Sopenharmony_ci	/*
335262306a36Sopenharmony_ci	 * We've made it this far and we had to drop our mmap_lock, now is the
335362306a36Sopenharmony_ci	 * time to return to the upper layer and have it re-find the vma and
335462306a36Sopenharmony_ci	 * redo the fault.
335562306a36Sopenharmony_ci	 */
335662306a36Sopenharmony_ci	if (fpin) {
335762306a36Sopenharmony_ci		folio_unlock(folio);
335862306a36Sopenharmony_ci		goto out_retry;
335962306a36Sopenharmony_ci	}
336062306a36Sopenharmony_ci	if (mapping_locked)
336162306a36Sopenharmony_ci		filemap_invalidate_unlock_shared(mapping);
336262306a36Sopenharmony_ci
336362306a36Sopenharmony_ci	/*
336462306a36Sopenharmony_ci	 * Found the page and have a reference on it.
336562306a36Sopenharmony_ci	 * We must recheck i_size under page lock.
336662306a36Sopenharmony_ci	 */
336762306a36Sopenharmony_ci	max_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
336862306a36Sopenharmony_ci	if (unlikely(index >= max_idx)) {
336962306a36Sopenharmony_ci		folio_unlock(folio);
337062306a36Sopenharmony_ci		folio_put(folio);
337162306a36Sopenharmony_ci		return VM_FAULT_SIGBUS;
337262306a36Sopenharmony_ci	}
337362306a36Sopenharmony_ci
337462306a36Sopenharmony_ci	vmf->page = folio_file_page(folio, index);
337562306a36Sopenharmony_ci	return ret | VM_FAULT_LOCKED;
337662306a36Sopenharmony_ci
337762306a36Sopenharmony_cipage_not_uptodate:
337862306a36Sopenharmony_ci	/*
337962306a36Sopenharmony_ci	 * Umm, take care of errors if the page isn't up-to-date.
338062306a36Sopenharmony_ci	 * Try to re-read it _once_. We do this synchronously,
338162306a36Sopenharmony_ci	 * because there really aren't any performance issues here
338262306a36Sopenharmony_ci	 * and we need to check for errors.
338362306a36Sopenharmony_ci	 */
338462306a36Sopenharmony_ci	fpin = maybe_unlock_mmap_for_io(vmf, fpin);
338562306a36Sopenharmony_ci	error = filemap_read_folio(file, mapping->a_ops->read_folio, folio);
338662306a36Sopenharmony_ci	if (fpin)
338762306a36Sopenharmony_ci		goto out_retry;
338862306a36Sopenharmony_ci	folio_put(folio);
338962306a36Sopenharmony_ci
339062306a36Sopenharmony_ci	if (!error || error == AOP_TRUNCATED_PAGE)
339162306a36Sopenharmony_ci		goto retry_find;
339262306a36Sopenharmony_ci	filemap_invalidate_unlock_shared(mapping);
339362306a36Sopenharmony_ci
339462306a36Sopenharmony_ci	return VM_FAULT_SIGBUS;
339562306a36Sopenharmony_ci
339662306a36Sopenharmony_ciout_retry:
339762306a36Sopenharmony_ci	/*
339862306a36Sopenharmony_ci	 * We dropped the mmap_lock, we need to return to the fault handler to
339962306a36Sopenharmony_ci	 * re-find the vma and come back and find our hopefully still populated
340062306a36Sopenharmony_ci	 * page.
340162306a36Sopenharmony_ci	 */
340262306a36Sopenharmony_ci	if (!IS_ERR(folio))
340362306a36Sopenharmony_ci		folio_put(folio);
340462306a36Sopenharmony_ci	if (mapping_locked)
340562306a36Sopenharmony_ci		filemap_invalidate_unlock_shared(mapping);
340662306a36Sopenharmony_ci	if (fpin)
340762306a36Sopenharmony_ci		fput(fpin);
340862306a36Sopenharmony_ci	return ret | VM_FAULT_RETRY;
340962306a36Sopenharmony_ci}
341062306a36Sopenharmony_ciEXPORT_SYMBOL(filemap_fault);
341162306a36Sopenharmony_ci
341262306a36Sopenharmony_cistatic bool filemap_map_pmd(struct vm_fault *vmf, struct folio *folio,
341362306a36Sopenharmony_ci		pgoff_t start)
341462306a36Sopenharmony_ci{
341562306a36Sopenharmony_ci	struct mm_struct *mm = vmf->vma->vm_mm;
341662306a36Sopenharmony_ci
341762306a36Sopenharmony_ci	/* Huge page is mapped? No need to proceed. */
341862306a36Sopenharmony_ci	if (pmd_trans_huge(*vmf->pmd)) {
341962306a36Sopenharmony_ci		folio_unlock(folio);
342062306a36Sopenharmony_ci		folio_put(folio);
342162306a36Sopenharmony_ci		return true;
342262306a36Sopenharmony_ci	}
342362306a36Sopenharmony_ci
342462306a36Sopenharmony_ci	if (pmd_none(*vmf->pmd) && folio_test_pmd_mappable(folio)) {
342562306a36Sopenharmony_ci		struct page *page = folio_file_page(folio, start);
342662306a36Sopenharmony_ci		vm_fault_t ret = do_set_pmd(vmf, page);
342762306a36Sopenharmony_ci		if (!ret) {
342862306a36Sopenharmony_ci			/* The page is mapped successfully, reference consumed. */
342962306a36Sopenharmony_ci			folio_unlock(folio);
343062306a36Sopenharmony_ci			return true;
343162306a36Sopenharmony_ci		}
343262306a36Sopenharmony_ci	}
343362306a36Sopenharmony_ci
343462306a36Sopenharmony_ci	if (pmd_none(*vmf->pmd) && vmf->prealloc_pte)
343562306a36Sopenharmony_ci		pmd_install(mm, vmf->pmd, &vmf->prealloc_pte);
343662306a36Sopenharmony_ci
343762306a36Sopenharmony_ci	return false;
343862306a36Sopenharmony_ci}
343962306a36Sopenharmony_ci
344062306a36Sopenharmony_cistatic struct folio *next_uptodate_folio(struct xa_state *xas,
344162306a36Sopenharmony_ci		struct address_space *mapping, pgoff_t end_pgoff)
344262306a36Sopenharmony_ci{
344362306a36Sopenharmony_ci	struct folio *folio = xas_next_entry(xas, end_pgoff);
344462306a36Sopenharmony_ci	unsigned long max_idx;
344562306a36Sopenharmony_ci
344662306a36Sopenharmony_ci	do {
344762306a36Sopenharmony_ci		if (!folio)
344862306a36Sopenharmony_ci			return NULL;
344962306a36Sopenharmony_ci		if (xas_retry(xas, folio))
345062306a36Sopenharmony_ci			continue;
345162306a36Sopenharmony_ci		if (xa_is_value(folio))
345262306a36Sopenharmony_ci			continue;
345362306a36Sopenharmony_ci		if (folio_test_locked(folio))
345462306a36Sopenharmony_ci			continue;
345562306a36Sopenharmony_ci		if (!folio_try_get_rcu(folio))
345662306a36Sopenharmony_ci			continue;
345762306a36Sopenharmony_ci		/* Has the page moved or been split? */
345862306a36Sopenharmony_ci		if (unlikely(folio != xas_reload(xas)))
345962306a36Sopenharmony_ci			goto skip;
346062306a36Sopenharmony_ci		if (!folio_test_uptodate(folio) || folio_test_readahead(folio))
346162306a36Sopenharmony_ci			goto skip;
346262306a36Sopenharmony_ci		if (!folio_trylock(folio))
346362306a36Sopenharmony_ci			goto skip;
346462306a36Sopenharmony_ci		if (folio->mapping != mapping)
346562306a36Sopenharmony_ci			goto unlock;
346662306a36Sopenharmony_ci		if (!folio_test_uptodate(folio))
346762306a36Sopenharmony_ci			goto unlock;
346862306a36Sopenharmony_ci		max_idx = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);
346962306a36Sopenharmony_ci		if (xas->xa_index >= max_idx)
347062306a36Sopenharmony_ci			goto unlock;
347162306a36Sopenharmony_ci		return folio;
347262306a36Sopenharmony_ciunlock:
347362306a36Sopenharmony_ci		folio_unlock(folio);
347462306a36Sopenharmony_ciskip:
347562306a36Sopenharmony_ci		folio_put(folio);
347662306a36Sopenharmony_ci	} while ((folio = xas_next_entry(xas, end_pgoff)) != NULL);
347762306a36Sopenharmony_ci
347862306a36Sopenharmony_ci	return NULL;
347962306a36Sopenharmony_ci}
348062306a36Sopenharmony_ci
348162306a36Sopenharmony_ci/*
348262306a36Sopenharmony_ci * Map page range [start_page, start_page + nr_pages) of folio.
348362306a36Sopenharmony_ci * start_page is gotten from start by folio_page(folio, start)
348462306a36Sopenharmony_ci */
348562306a36Sopenharmony_cistatic vm_fault_t filemap_map_folio_range(struct vm_fault *vmf,
348662306a36Sopenharmony_ci			struct folio *folio, unsigned long start,
348762306a36Sopenharmony_ci			unsigned long addr, unsigned int nr_pages,
348862306a36Sopenharmony_ci			unsigned int *mmap_miss)
348962306a36Sopenharmony_ci{
349062306a36Sopenharmony_ci	vm_fault_t ret = 0;
349162306a36Sopenharmony_ci	struct page *page = folio_page(folio, start);
349262306a36Sopenharmony_ci	unsigned int count = 0;
349362306a36Sopenharmony_ci	pte_t *old_ptep = vmf->pte;
349462306a36Sopenharmony_ci
349562306a36Sopenharmony_ci	do {
349662306a36Sopenharmony_ci		if (PageHWPoison(page + count))
349762306a36Sopenharmony_ci			goto skip;
349862306a36Sopenharmony_ci
349962306a36Sopenharmony_ci		(*mmap_miss)++;
350062306a36Sopenharmony_ci
350162306a36Sopenharmony_ci		/*
350262306a36Sopenharmony_ci		 * NOTE: If there're PTE markers, we'll leave them to be
350362306a36Sopenharmony_ci		 * handled in the specific fault path, and it'll prohibit the
350462306a36Sopenharmony_ci		 * fault-around logic.
350562306a36Sopenharmony_ci		 */
350662306a36Sopenharmony_ci		if (!pte_none(vmf->pte[count]))
350762306a36Sopenharmony_ci			goto skip;
350862306a36Sopenharmony_ci
350962306a36Sopenharmony_ci		count++;
351062306a36Sopenharmony_ci		continue;
351162306a36Sopenharmony_ciskip:
351262306a36Sopenharmony_ci		if (count) {
351362306a36Sopenharmony_ci			set_pte_range(vmf, folio, page, count, addr);
351462306a36Sopenharmony_ci			folio_ref_add(folio, count);
351562306a36Sopenharmony_ci			if (in_range(vmf->address, addr, count * PAGE_SIZE))
351662306a36Sopenharmony_ci				ret = VM_FAULT_NOPAGE;
351762306a36Sopenharmony_ci		}
351862306a36Sopenharmony_ci
351962306a36Sopenharmony_ci		count++;
352062306a36Sopenharmony_ci		page += count;
352162306a36Sopenharmony_ci		vmf->pte += count;
352262306a36Sopenharmony_ci		addr += count * PAGE_SIZE;
352362306a36Sopenharmony_ci		count = 0;
352462306a36Sopenharmony_ci	} while (--nr_pages > 0);
352562306a36Sopenharmony_ci
352662306a36Sopenharmony_ci	if (count) {
352762306a36Sopenharmony_ci		set_pte_range(vmf, folio, page, count, addr);
352862306a36Sopenharmony_ci		folio_ref_add(folio, count);
352962306a36Sopenharmony_ci		if (in_range(vmf->address, addr, count * PAGE_SIZE))
353062306a36Sopenharmony_ci			ret = VM_FAULT_NOPAGE;
353162306a36Sopenharmony_ci	}
353262306a36Sopenharmony_ci
353362306a36Sopenharmony_ci	vmf->pte = old_ptep;
353462306a36Sopenharmony_ci
353562306a36Sopenharmony_ci	return ret;
353662306a36Sopenharmony_ci}
353762306a36Sopenharmony_ci
353862306a36Sopenharmony_cistatic vm_fault_t filemap_map_order0_folio(struct vm_fault *vmf,
353962306a36Sopenharmony_ci		struct folio *folio, unsigned long addr,
354062306a36Sopenharmony_ci		unsigned int *mmap_miss)
354162306a36Sopenharmony_ci{
354262306a36Sopenharmony_ci	vm_fault_t ret = 0;
354362306a36Sopenharmony_ci	struct page *page = &folio->page;
354462306a36Sopenharmony_ci
354562306a36Sopenharmony_ci	if (PageHWPoison(page))
354662306a36Sopenharmony_ci		return ret;
354762306a36Sopenharmony_ci
354862306a36Sopenharmony_ci	(*mmap_miss)++;
354962306a36Sopenharmony_ci
355062306a36Sopenharmony_ci	/*
355162306a36Sopenharmony_ci	 * NOTE: If there're PTE markers, we'll leave them to be
355262306a36Sopenharmony_ci	 * handled in the specific fault path, and it'll prohibit
355362306a36Sopenharmony_ci	 * the fault-around logic.
355462306a36Sopenharmony_ci	 */
355562306a36Sopenharmony_ci	if (!pte_none(ptep_get(vmf->pte)))
355662306a36Sopenharmony_ci		return ret;
355762306a36Sopenharmony_ci
355862306a36Sopenharmony_ci	if (vmf->address == addr)
355962306a36Sopenharmony_ci		ret = VM_FAULT_NOPAGE;
356062306a36Sopenharmony_ci
356162306a36Sopenharmony_ci	set_pte_range(vmf, folio, page, 1, addr);
356262306a36Sopenharmony_ci	folio_ref_inc(folio);
356362306a36Sopenharmony_ci
356462306a36Sopenharmony_ci	return ret;
356562306a36Sopenharmony_ci}
356662306a36Sopenharmony_ci
356762306a36Sopenharmony_civm_fault_t filemap_map_pages(struct vm_fault *vmf,
356862306a36Sopenharmony_ci			     pgoff_t start_pgoff, pgoff_t end_pgoff)
356962306a36Sopenharmony_ci{
357062306a36Sopenharmony_ci	struct vm_area_struct *vma = vmf->vma;
357162306a36Sopenharmony_ci	struct file *file = vma->vm_file;
357262306a36Sopenharmony_ci	struct address_space *mapping = file->f_mapping;
357362306a36Sopenharmony_ci	pgoff_t last_pgoff = start_pgoff;
357462306a36Sopenharmony_ci	unsigned long addr;
357562306a36Sopenharmony_ci	XA_STATE(xas, &mapping->i_pages, start_pgoff);
357662306a36Sopenharmony_ci	struct folio *folio;
357762306a36Sopenharmony_ci	vm_fault_t ret = 0;
357862306a36Sopenharmony_ci	unsigned int nr_pages = 0, mmap_miss = 0, mmap_miss_saved;
357962306a36Sopenharmony_ci
358062306a36Sopenharmony_ci	rcu_read_lock();
358162306a36Sopenharmony_ci	folio = next_uptodate_folio(&xas, mapping, end_pgoff);
358262306a36Sopenharmony_ci	if (!folio)
358362306a36Sopenharmony_ci		goto out;
358462306a36Sopenharmony_ci
358562306a36Sopenharmony_ci	if (filemap_map_pmd(vmf, folio, start_pgoff)) {
358662306a36Sopenharmony_ci		ret = VM_FAULT_NOPAGE;
358762306a36Sopenharmony_ci		goto out;
358862306a36Sopenharmony_ci	}
358962306a36Sopenharmony_ci
359062306a36Sopenharmony_ci	addr = vma->vm_start + ((start_pgoff - vma->vm_pgoff) << PAGE_SHIFT);
359162306a36Sopenharmony_ci	vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, addr, &vmf->ptl);
359262306a36Sopenharmony_ci	if (!vmf->pte) {
359362306a36Sopenharmony_ci		folio_unlock(folio);
359462306a36Sopenharmony_ci		folio_put(folio);
359562306a36Sopenharmony_ci		goto out;
359662306a36Sopenharmony_ci	}
359762306a36Sopenharmony_ci	do {
359862306a36Sopenharmony_ci		unsigned long end;
359962306a36Sopenharmony_ci
360062306a36Sopenharmony_ci		addr += (xas.xa_index - last_pgoff) << PAGE_SHIFT;
360162306a36Sopenharmony_ci		vmf->pte += xas.xa_index - last_pgoff;
360262306a36Sopenharmony_ci		last_pgoff = xas.xa_index;
360362306a36Sopenharmony_ci		end = folio->index + folio_nr_pages(folio) - 1;
360462306a36Sopenharmony_ci		nr_pages = min(end, end_pgoff) - xas.xa_index + 1;
360562306a36Sopenharmony_ci
360662306a36Sopenharmony_ci		if (!folio_test_large(folio))
360762306a36Sopenharmony_ci			ret |= filemap_map_order0_folio(vmf,
360862306a36Sopenharmony_ci					folio, addr, &mmap_miss);
360962306a36Sopenharmony_ci		else
361062306a36Sopenharmony_ci			ret |= filemap_map_folio_range(vmf, folio,
361162306a36Sopenharmony_ci					xas.xa_index - folio->index, addr,
361262306a36Sopenharmony_ci					nr_pages, &mmap_miss);
361362306a36Sopenharmony_ci
361462306a36Sopenharmony_ci		folio_unlock(folio);
361562306a36Sopenharmony_ci		folio_put(folio);
361662306a36Sopenharmony_ci	} while ((folio = next_uptodate_folio(&xas, mapping, end_pgoff)) != NULL);
361762306a36Sopenharmony_ci	pte_unmap_unlock(vmf->pte, vmf->ptl);
361862306a36Sopenharmony_ciout:
361962306a36Sopenharmony_ci	rcu_read_unlock();
362062306a36Sopenharmony_ci
362162306a36Sopenharmony_ci	mmap_miss_saved = READ_ONCE(file->f_ra.mmap_miss);
362262306a36Sopenharmony_ci	if (mmap_miss >= mmap_miss_saved)
362362306a36Sopenharmony_ci		WRITE_ONCE(file->f_ra.mmap_miss, 0);
362462306a36Sopenharmony_ci	else
362562306a36Sopenharmony_ci		WRITE_ONCE(file->f_ra.mmap_miss, mmap_miss_saved - mmap_miss);
362662306a36Sopenharmony_ci
362762306a36Sopenharmony_ci	return ret;
362862306a36Sopenharmony_ci}
362962306a36Sopenharmony_ciEXPORT_SYMBOL(filemap_map_pages);
363062306a36Sopenharmony_ci
363162306a36Sopenharmony_civm_fault_t filemap_page_mkwrite(struct vm_fault *vmf)
363262306a36Sopenharmony_ci{
363362306a36Sopenharmony_ci	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
363462306a36Sopenharmony_ci	struct folio *folio = page_folio(vmf->page);
363562306a36Sopenharmony_ci	vm_fault_t ret = VM_FAULT_LOCKED;
363662306a36Sopenharmony_ci
363762306a36Sopenharmony_ci	sb_start_pagefault(mapping->host->i_sb);
363862306a36Sopenharmony_ci	file_update_time(vmf->vma->vm_file);
363962306a36Sopenharmony_ci	folio_lock(folio);
364062306a36Sopenharmony_ci	if (folio->mapping != mapping) {
364162306a36Sopenharmony_ci		folio_unlock(folio);
364262306a36Sopenharmony_ci		ret = VM_FAULT_NOPAGE;
364362306a36Sopenharmony_ci		goto out;
364462306a36Sopenharmony_ci	}
364562306a36Sopenharmony_ci	/*
364662306a36Sopenharmony_ci	 * We mark the folio dirty already here so that when freeze is in
364762306a36Sopenharmony_ci	 * progress, we are guaranteed that writeback during freezing will
364862306a36Sopenharmony_ci	 * see the dirty folio and writeprotect it again.
364962306a36Sopenharmony_ci	 */
365062306a36Sopenharmony_ci	folio_mark_dirty(folio);
365162306a36Sopenharmony_ci	folio_wait_stable(folio);
365262306a36Sopenharmony_ciout:
365362306a36Sopenharmony_ci	sb_end_pagefault(mapping->host->i_sb);
365462306a36Sopenharmony_ci	return ret;
365562306a36Sopenharmony_ci}
365662306a36Sopenharmony_ci
365762306a36Sopenharmony_ciconst struct vm_operations_struct generic_file_vm_ops = {
365862306a36Sopenharmony_ci	.fault		= filemap_fault,
365962306a36Sopenharmony_ci	.map_pages	= filemap_map_pages,
366062306a36Sopenharmony_ci	.page_mkwrite	= filemap_page_mkwrite,
366162306a36Sopenharmony_ci};
366262306a36Sopenharmony_ci
366362306a36Sopenharmony_ci/* This is used for a general mmap of a disk file */
366462306a36Sopenharmony_ci
366562306a36Sopenharmony_ciint generic_file_mmap(struct file *file, struct vm_area_struct *vma)
366662306a36Sopenharmony_ci{
366762306a36Sopenharmony_ci	struct address_space *mapping = file->f_mapping;
366862306a36Sopenharmony_ci
366962306a36Sopenharmony_ci	if (!mapping->a_ops->read_folio)
367062306a36Sopenharmony_ci		return -ENOEXEC;
367162306a36Sopenharmony_ci	file_accessed(file);
367262306a36Sopenharmony_ci	vma->vm_ops = &generic_file_vm_ops;
367362306a36Sopenharmony_ci	return 0;
367462306a36Sopenharmony_ci}
367562306a36Sopenharmony_ci
367662306a36Sopenharmony_ci/*
367762306a36Sopenharmony_ci * This is for filesystems which do not implement ->writepage.
367862306a36Sopenharmony_ci */
367962306a36Sopenharmony_ciint generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma)
368062306a36Sopenharmony_ci{
368162306a36Sopenharmony_ci	if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
368262306a36Sopenharmony_ci		return -EINVAL;
368362306a36Sopenharmony_ci	return generic_file_mmap(file, vma);
368462306a36Sopenharmony_ci}
368562306a36Sopenharmony_ci#else
368662306a36Sopenharmony_civm_fault_t filemap_page_mkwrite(struct vm_fault *vmf)
368762306a36Sopenharmony_ci{
368862306a36Sopenharmony_ci	return VM_FAULT_SIGBUS;
368962306a36Sopenharmony_ci}
369062306a36Sopenharmony_ciint generic_file_mmap(struct file *file, struct vm_area_struct *vma)
369162306a36Sopenharmony_ci{
369262306a36Sopenharmony_ci	return -ENOSYS;
369362306a36Sopenharmony_ci}
369462306a36Sopenharmony_ciint generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma)
369562306a36Sopenharmony_ci{
369662306a36Sopenharmony_ci	return -ENOSYS;
369762306a36Sopenharmony_ci}
369862306a36Sopenharmony_ci#endif /* CONFIG_MMU */
369962306a36Sopenharmony_ci
370062306a36Sopenharmony_ciEXPORT_SYMBOL(filemap_page_mkwrite);
370162306a36Sopenharmony_ciEXPORT_SYMBOL(generic_file_mmap);
370262306a36Sopenharmony_ciEXPORT_SYMBOL(generic_file_readonly_mmap);
370362306a36Sopenharmony_ci
370462306a36Sopenharmony_cistatic struct folio *do_read_cache_folio(struct address_space *mapping,
370562306a36Sopenharmony_ci		pgoff_t index, filler_t filler, struct file *file, gfp_t gfp)
370662306a36Sopenharmony_ci{
370762306a36Sopenharmony_ci	struct folio *folio;
370862306a36Sopenharmony_ci	int err;
370962306a36Sopenharmony_ci
371062306a36Sopenharmony_ci	if (!filler)
371162306a36Sopenharmony_ci		filler = mapping->a_ops->read_folio;
371262306a36Sopenharmony_cirepeat:
371362306a36Sopenharmony_ci	folio = filemap_get_folio(mapping, index);
371462306a36Sopenharmony_ci	if (IS_ERR(folio)) {
371562306a36Sopenharmony_ci		folio = filemap_alloc_folio(gfp, 0);
371662306a36Sopenharmony_ci		if (!folio)
371762306a36Sopenharmony_ci			return ERR_PTR(-ENOMEM);
371862306a36Sopenharmony_ci		err = filemap_add_folio(mapping, folio, index, gfp);
371962306a36Sopenharmony_ci		if (unlikely(err)) {
372062306a36Sopenharmony_ci			folio_put(folio);
372162306a36Sopenharmony_ci			if (err == -EEXIST)
372262306a36Sopenharmony_ci				goto repeat;
372362306a36Sopenharmony_ci			/* Presumably ENOMEM for xarray node */
372462306a36Sopenharmony_ci			return ERR_PTR(err);
372562306a36Sopenharmony_ci		}
372662306a36Sopenharmony_ci
372762306a36Sopenharmony_ci		goto filler;
372862306a36Sopenharmony_ci	}
372962306a36Sopenharmony_ci	if (folio_test_uptodate(folio))
373062306a36Sopenharmony_ci		goto out;
373162306a36Sopenharmony_ci
373262306a36Sopenharmony_ci	if (!folio_trylock(folio)) {
373362306a36Sopenharmony_ci		folio_put_wait_locked(folio, TASK_UNINTERRUPTIBLE);
373462306a36Sopenharmony_ci		goto repeat;
373562306a36Sopenharmony_ci	}
373662306a36Sopenharmony_ci
373762306a36Sopenharmony_ci	/* Folio was truncated from mapping */
373862306a36Sopenharmony_ci	if (!folio->mapping) {
373962306a36Sopenharmony_ci		folio_unlock(folio);
374062306a36Sopenharmony_ci		folio_put(folio);
374162306a36Sopenharmony_ci		goto repeat;
374262306a36Sopenharmony_ci	}
374362306a36Sopenharmony_ci
374462306a36Sopenharmony_ci	/* Someone else locked and filled the page in a very small window */
374562306a36Sopenharmony_ci	if (folio_test_uptodate(folio)) {
374662306a36Sopenharmony_ci		folio_unlock(folio);
374762306a36Sopenharmony_ci		goto out;
374862306a36Sopenharmony_ci	}
374962306a36Sopenharmony_ci
375062306a36Sopenharmony_cifiller:
375162306a36Sopenharmony_ci	err = filemap_read_folio(file, filler, folio);
375262306a36Sopenharmony_ci	if (err) {
375362306a36Sopenharmony_ci		folio_put(folio);
375462306a36Sopenharmony_ci		if (err == AOP_TRUNCATED_PAGE)
375562306a36Sopenharmony_ci			goto repeat;
375662306a36Sopenharmony_ci		return ERR_PTR(err);
375762306a36Sopenharmony_ci	}
375862306a36Sopenharmony_ci
375962306a36Sopenharmony_ciout:
376062306a36Sopenharmony_ci	folio_mark_accessed(folio);
376162306a36Sopenharmony_ci	return folio;
376262306a36Sopenharmony_ci}
376362306a36Sopenharmony_ci
376462306a36Sopenharmony_ci/**
376562306a36Sopenharmony_ci * read_cache_folio - Read into page cache, fill it if needed.
376662306a36Sopenharmony_ci * @mapping: The address_space to read from.
376762306a36Sopenharmony_ci * @index: The index to read.
376862306a36Sopenharmony_ci * @filler: Function to perform the read, or NULL to use aops->read_folio().
376962306a36Sopenharmony_ci * @file: Passed to filler function, may be NULL if not required.
377062306a36Sopenharmony_ci *
377162306a36Sopenharmony_ci * Read one page into the page cache.  If it succeeds, the folio returned
377262306a36Sopenharmony_ci * will contain @index, but it may not be the first page of the folio.
377362306a36Sopenharmony_ci *
377462306a36Sopenharmony_ci * If the filler function returns an error, it will be returned to the
377562306a36Sopenharmony_ci * caller.
377662306a36Sopenharmony_ci *
377762306a36Sopenharmony_ci * Context: May sleep.  Expects mapping->invalidate_lock to be held.
377862306a36Sopenharmony_ci * Return: An uptodate folio on success, ERR_PTR() on failure.
377962306a36Sopenharmony_ci */
378062306a36Sopenharmony_cistruct folio *read_cache_folio(struct address_space *mapping, pgoff_t index,
378162306a36Sopenharmony_ci		filler_t filler, struct file *file)
378262306a36Sopenharmony_ci{
378362306a36Sopenharmony_ci	return do_read_cache_folio(mapping, index, filler, file,
378462306a36Sopenharmony_ci			mapping_gfp_mask(mapping));
378562306a36Sopenharmony_ci}
378662306a36Sopenharmony_ciEXPORT_SYMBOL(read_cache_folio);
378762306a36Sopenharmony_ci
378862306a36Sopenharmony_ci/**
378962306a36Sopenharmony_ci * mapping_read_folio_gfp - Read into page cache, using specified allocation flags.
379062306a36Sopenharmony_ci * @mapping:	The address_space for the folio.
379162306a36Sopenharmony_ci * @index:	The index that the allocated folio will contain.
379262306a36Sopenharmony_ci * @gfp:	The page allocator flags to use if allocating.
379362306a36Sopenharmony_ci *
379462306a36Sopenharmony_ci * This is the same as "read_cache_folio(mapping, index, NULL, NULL)", but with
379562306a36Sopenharmony_ci * any new memory allocations done using the specified allocation flags.
379662306a36Sopenharmony_ci *
379762306a36Sopenharmony_ci * The most likely error from this function is EIO, but ENOMEM is
379862306a36Sopenharmony_ci * possible and so is EINTR.  If ->read_folio returns another error,
379962306a36Sopenharmony_ci * that will be returned to the caller.
380062306a36Sopenharmony_ci *
380162306a36Sopenharmony_ci * The function expects mapping->invalidate_lock to be already held.
380262306a36Sopenharmony_ci *
380362306a36Sopenharmony_ci * Return: Uptodate folio on success, ERR_PTR() on failure.
380462306a36Sopenharmony_ci */
380562306a36Sopenharmony_cistruct folio *mapping_read_folio_gfp(struct address_space *mapping,
380662306a36Sopenharmony_ci		pgoff_t index, gfp_t gfp)
380762306a36Sopenharmony_ci{
380862306a36Sopenharmony_ci	return do_read_cache_folio(mapping, index, NULL, NULL, gfp);
380962306a36Sopenharmony_ci}
381062306a36Sopenharmony_ciEXPORT_SYMBOL(mapping_read_folio_gfp);
381162306a36Sopenharmony_ci
381262306a36Sopenharmony_cistatic struct page *do_read_cache_page(struct address_space *mapping,
381362306a36Sopenharmony_ci		pgoff_t index, filler_t *filler, struct file *file, gfp_t gfp)
381462306a36Sopenharmony_ci{
381562306a36Sopenharmony_ci	struct folio *folio;
381662306a36Sopenharmony_ci
381762306a36Sopenharmony_ci	folio = do_read_cache_folio(mapping, index, filler, file, gfp);
381862306a36Sopenharmony_ci	if (IS_ERR(folio))
381962306a36Sopenharmony_ci		return &folio->page;
382062306a36Sopenharmony_ci	return folio_file_page(folio, index);
382162306a36Sopenharmony_ci}
382262306a36Sopenharmony_ci
382362306a36Sopenharmony_cistruct page *read_cache_page(struct address_space *mapping,
382462306a36Sopenharmony_ci			pgoff_t index, filler_t *filler, struct file *file)
382562306a36Sopenharmony_ci{
382662306a36Sopenharmony_ci	return do_read_cache_page(mapping, index, filler, file,
382762306a36Sopenharmony_ci			mapping_gfp_mask(mapping));
382862306a36Sopenharmony_ci}
382962306a36Sopenharmony_ciEXPORT_SYMBOL(read_cache_page);
383062306a36Sopenharmony_ci
383162306a36Sopenharmony_ci/**
383262306a36Sopenharmony_ci * read_cache_page_gfp - read into page cache, using specified page allocation flags.
383362306a36Sopenharmony_ci * @mapping:	the page's address_space
383462306a36Sopenharmony_ci * @index:	the page index
383562306a36Sopenharmony_ci * @gfp:	the page allocator flags to use if allocating
383662306a36Sopenharmony_ci *
383762306a36Sopenharmony_ci * This is the same as "read_mapping_page(mapping, index, NULL)", but with
383862306a36Sopenharmony_ci * any new page allocations done using the specified allocation flags.
383962306a36Sopenharmony_ci *
384062306a36Sopenharmony_ci * If the page does not get brought uptodate, return -EIO.
384162306a36Sopenharmony_ci *
384262306a36Sopenharmony_ci * The function expects mapping->invalidate_lock to be already held.
384362306a36Sopenharmony_ci *
384462306a36Sopenharmony_ci * Return: up to date page on success, ERR_PTR() on failure.
384562306a36Sopenharmony_ci */
384662306a36Sopenharmony_cistruct page *read_cache_page_gfp(struct address_space *mapping,
384762306a36Sopenharmony_ci				pgoff_t index,
384862306a36Sopenharmony_ci				gfp_t gfp)
384962306a36Sopenharmony_ci{
385062306a36Sopenharmony_ci	return do_read_cache_page(mapping, index, NULL, NULL, gfp);
385162306a36Sopenharmony_ci}
385262306a36Sopenharmony_ciEXPORT_SYMBOL(read_cache_page_gfp);
385362306a36Sopenharmony_ci
385462306a36Sopenharmony_ci/*
385562306a36Sopenharmony_ci * Warn about a page cache invalidation failure during a direct I/O write.
385662306a36Sopenharmony_ci */
385762306a36Sopenharmony_cistatic void dio_warn_stale_pagecache(struct file *filp)
385862306a36Sopenharmony_ci{
385962306a36Sopenharmony_ci	static DEFINE_RATELIMIT_STATE(_rs, 86400 * HZ, DEFAULT_RATELIMIT_BURST);
386062306a36Sopenharmony_ci	char pathname[128];
386162306a36Sopenharmony_ci	char *path;
386262306a36Sopenharmony_ci
386362306a36Sopenharmony_ci	errseq_set(&filp->f_mapping->wb_err, -EIO);
386462306a36Sopenharmony_ci	if (__ratelimit(&_rs)) {
386562306a36Sopenharmony_ci		path = file_path(filp, pathname, sizeof(pathname));
386662306a36Sopenharmony_ci		if (IS_ERR(path))
386762306a36Sopenharmony_ci			path = "(unknown)";
386862306a36Sopenharmony_ci		pr_crit("Page cache invalidation failure on direct I/O.  Possible data corruption due to collision with buffered I/O!\n");
386962306a36Sopenharmony_ci		pr_crit("File: %s PID: %d Comm: %.20s\n", path, current->pid,
387062306a36Sopenharmony_ci			current->comm);
387162306a36Sopenharmony_ci	}
387262306a36Sopenharmony_ci}
387362306a36Sopenharmony_ci
387462306a36Sopenharmony_civoid kiocb_invalidate_post_direct_write(struct kiocb *iocb, size_t count)
387562306a36Sopenharmony_ci{
387662306a36Sopenharmony_ci	struct address_space *mapping = iocb->ki_filp->f_mapping;
387762306a36Sopenharmony_ci
387862306a36Sopenharmony_ci	if (mapping->nrpages &&
387962306a36Sopenharmony_ci	    invalidate_inode_pages2_range(mapping,
388062306a36Sopenharmony_ci			iocb->ki_pos >> PAGE_SHIFT,
388162306a36Sopenharmony_ci			(iocb->ki_pos + count - 1) >> PAGE_SHIFT))
388262306a36Sopenharmony_ci		dio_warn_stale_pagecache(iocb->ki_filp);
388362306a36Sopenharmony_ci}
388462306a36Sopenharmony_ci
388562306a36Sopenharmony_cissize_t
388662306a36Sopenharmony_cigeneric_file_direct_write(struct kiocb *iocb, struct iov_iter *from)
388762306a36Sopenharmony_ci{
388862306a36Sopenharmony_ci	struct address_space *mapping = iocb->ki_filp->f_mapping;
388962306a36Sopenharmony_ci	size_t write_len = iov_iter_count(from);
389062306a36Sopenharmony_ci	ssize_t written;
389162306a36Sopenharmony_ci
389262306a36Sopenharmony_ci	/*
389362306a36Sopenharmony_ci	 * If a page can not be invalidated, return 0 to fall back
389462306a36Sopenharmony_ci	 * to buffered write.
389562306a36Sopenharmony_ci	 */
389662306a36Sopenharmony_ci	written = kiocb_invalidate_pages(iocb, write_len);
389762306a36Sopenharmony_ci	if (written) {
389862306a36Sopenharmony_ci		if (written == -EBUSY)
389962306a36Sopenharmony_ci			return 0;
390062306a36Sopenharmony_ci		return written;
390162306a36Sopenharmony_ci	}
390262306a36Sopenharmony_ci
390362306a36Sopenharmony_ci	written = mapping->a_ops->direct_IO(iocb, from);
390462306a36Sopenharmony_ci
390562306a36Sopenharmony_ci	/*
390662306a36Sopenharmony_ci	 * Finally, try again to invalidate clean pages which might have been
390762306a36Sopenharmony_ci	 * cached by non-direct readahead, or faulted in by get_user_pages()
390862306a36Sopenharmony_ci	 * if the source of the write was an mmap'ed region of the file
390962306a36Sopenharmony_ci	 * we're writing.  Either one is a pretty crazy thing to do,
391062306a36Sopenharmony_ci	 * so we don't support it 100%.  If this invalidation
391162306a36Sopenharmony_ci	 * fails, tough, the write still worked...
391262306a36Sopenharmony_ci	 *
391362306a36Sopenharmony_ci	 * Most of the time we do not need this since dio_complete() will do
391462306a36Sopenharmony_ci	 * the invalidation for us. However there are some file systems that
391562306a36Sopenharmony_ci	 * do not end up with dio_complete() being called, so let's not break
391662306a36Sopenharmony_ci	 * them by removing it completely.
391762306a36Sopenharmony_ci	 *
391862306a36Sopenharmony_ci	 * Noticeable example is a blkdev_direct_IO().
391962306a36Sopenharmony_ci	 *
392062306a36Sopenharmony_ci	 * Skip invalidation for async writes or if mapping has no pages.
392162306a36Sopenharmony_ci	 */
392262306a36Sopenharmony_ci	if (written > 0) {
392362306a36Sopenharmony_ci		struct inode *inode = mapping->host;
392462306a36Sopenharmony_ci		loff_t pos = iocb->ki_pos;
392562306a36Sopenharmony_ci
392662306a36Sopenharmony_ci		kiocb_invalidate_post_direct_write(iocb, written);
392762306a36Sopenharmony_ci		pos += written;
392862306a36Sopenharmony_ci		write_len -= written;
392962306a36Sopenharmony_ci		if (pos > i_size_read(inode) && !S_ISBLK(inode->i_mode)) {
393062306a36Sopenharmony_ci			i_size_write(inode, pos);
393162306a36Sopenharmony_ci			mark_inode_dirty(inode);
393262306a36Sopenharmony_ci		}
393362306a36Sopenharmony_ci		iocb->ki_pos = pos;
393462306a36Sopenharmony_ci	}
393562306a36Sopenharmony_ci	if (written != -EIOCBQUEUED)
393662306a36Sopenharmony_ci		iov_iter_revert(from, write_len - iov_iter_count(from));
393762306a36Sopenharmony_ci	return written;
393862306a36Sopenharmony_ci}
393962306a36Sopenharmony_ciEXPORT_SYMBOL(generic_file_direct_write);
394062306a36Sopenharmony_ci
394162306a36Sopenharmony_cissize_t generic_perform_write(struct kiocb *iocb, struct iov_iter *i)
394262306a36Sopenharmony_ci{
394362306a36Sopenharmony_ci	struct file *file = iocb->ki_filp;
394462306a36Sopenharmony_ci	loff_t pos = iocb->ki_pos;
394562306a36Sopenharmony_ci	struct address_space *mapping = file->f_mapping;
394662306a36Sopenharmony_ci	const struct address_space_operations *a_ops = mapping->a_ops;
394762306a36Sopenharmony_ci	long status = 0;
394862306a36Sopenharmony_ci	ssize_t written = 0;
394962306a36Sopenharmony_ci
395062306a36Sopenharmony_ci	do {
395162306a36Sopenharmony_ci		struct page *page;
395262306a36Sopenharmony_ci		unsigned long offset;	/* Offset into pagecache page */
395362306a36Sopenharmony_ci		unsigned long bytes;	/* Bytes to write to page */
395462306a36Sopenharmony_ci		size_t copied;		/* Bytes copied from user */
395562306a36Sopenharmony_ci		void *fsdata = NULL;
395662306a36Sopenharmony_ci
395762306a36Sopenharmony_ci		offset = (pos & (PAGE_SIZE - 1));
395862306a36Sopenharmony_ci		bytes = min_t(unsigned long, PAGE_SIZE - offset,
395962306a36Sopenharmony_ci						iov_iter_count(i));
396062306a36Sopenharmony_ci
396162306a36Sopenharmony_ciagain:
396262306a36Sopenharmony_ci		/*
396362306a36Sopenharmony_ci		 * Bring in the user page that we will copy from _first_.
396462306a36Sopenharmony_ci		 * Otherwise there's a nasty deadlock on copying from the
396562306a36Sopenharmony_ci		 * same page as we're writing to, without it being marked
396662306a36Sopenharmony_ci		 * up-to-date.
396762306a36Sopenharmony_ci		 */
396862306a36Sopenharmony_ci		if (unlikely(fault_in_iov_iter_readable(i, bytes) == bytes)) {
396962306a36Sopenharmony_ci			status = -EFAULT;
397062306a36Sopenharmony_ci			break;
397162306a36Sopenharmony_ci		}
397262306a36Sopenharmony_ci
397362306a36Sopenharmony_ci		if (fatal_signal_pending(current)) {
397462306a36Sopenharmony_ci			status = -EINTR;
397562306a36Sopenharmony_ci			break;
397662306a36Sopenharmony_ci		}
397762306a36Sopenharmony_ci
397862306a36Sopenharmony_ci		status = a_ops->write_begin(file, mapping, pos, bytes,
397962306a36Sopenharmony_ci						&page, &fsdata);
398062306a36Sopenharmony_ci		if (unlikely(status < 0))
398162306a36Sopenharmony_ci			break;
398262306a36Sopenharmony_ci
398362306a36Sopenharmony_ci		if (mapping_writably_mapped(mapping))
398462306a36Sopenharmony_ci			flush_dcache_page(page);
398562306a36Sopenharmony_ci
398662306a36Sopenharmony_ci		copied = copy_page_from_iter_atomic(page, offset, bytes, i);
398762306a36Sopenharmony_ci		flush_dcache_page(page);
398862306a36Sopenharmony_ci
398962306a36Sopenharmony_ci		status = a_ops->write_end(file, mapping, pos, bytes, copied,
399062306a36Sopenharmony_ci						page, fsdata);
399162306a36Sopenharmony_ci		if (unlikely(status != copied)) {
399262306a36Sopenharmony_ci			iov_iter_revert(i, copied - max(status, 0L));
399362306a36Sopenharmony_ci			if (unlikely(status < 0))
399462306a36Sopenharmony_ci				break;
399562306a36Sopenharmony_ci		}
399662306a36Sopenharmony_ci		cond_resched();
399762306a36Sopenharmony_ci
399862306a36Sopenharmony_ci		if (unlikely(status == 0)) {
399962306a36Sopenharmony_ci			/*
400062306a36Sopenharmony_ci			 * A short copy made ->write_end() reject the
400162306a36Sopenharmony_ci			 * thing entirely.  Might be memory poisoning
400262306a36Sopenharmony_ci			 * halfway through, might be a race with munmap,
400362306a36Sopenharmony_ci			 * might be severe memory pressure.
400462306a36Sopenharmony_ci			 */
400562306a36Sopenharmony_ci			if (copied)
400662306a36Sopenharmony_ci				bytes = copied;
400762306a36Sopenharmony_ci			goto again;
400862306a36Sopenharmony_ci		}
400962306a36Sopenharmony_ci		pos += status;
401062306a36Sopenharmony_ci		written += status;
401162306a36Sopenharmony_ci
401262306a36Sopenharmony_ci		balance_dirty_pages_ratelimited(mapping);
401362306a36Sopenharmony_ci	} while (iov_iter_count(i));
401462306a36Sopenharmony_ci
401562306a36Sopenharmony_ci	if (!written)
401662306a36Sopenharmony_ci		return status;
401762306a36Sopenharmony_ci	iocb->ki_pos += written;
401862306a36Sopenharmony_ci	return written;
401962306a36Sopenharmony_ci}
402062306a36Sopenharmony_ciEXPORT_SYMBOL(generic_perform_write);
402162306a36Sopenharmony_ci
402262306a36Sopenharmony_ci/**
402362306a36Sopenharmony_ci * __generic_file_write_iter - write data to a file
402462306a36Sopenharmony_ci * @iocb:	IO state structure (file, offset, etc.)
402562306a36Sopenharmony_ci * @from:	iov_iter with data to write
402662306a36Sopenharmony_ci *
402762306a36Sopenharmony_ci * This function does all the work needed for actually writing data to a
402862306a36Sopenharmony_ci * file. It does all basic checks, removes SUID from the file, updates
402962306a36Sopenharmony_ci * modification times and calls proper subroutines depending on whether we
403062306a36Sopenharmony_ci * do direct IO or a standard buffered write.
403162306a36Sopenharmony_ci *
403262306a36Sopenharmony_ci * It expects i_rwsem to be grabbed unless we work on a block device or similar
403362306a36Sopenharmony_ci * object which does not need locking at all.
403462306a36Sopenharmony_ci *
403562306a36Sopenharmony_ci * This function does *not* take care of syncing data in case of O_SYNC write.
403662306a36Sopenharmony_ci * A caller has to handle it. This is mainly due to the fact that we want to
403762306a36Sopenharmony_ci * avoid syncing under i_rwsem.
403862306a36Sopenharmony_ci *
403962306a36Sopenharmony_ci * Return:
404062306a36Sopenharmony_ci * * number of bytes written, even for truncated writes
404162306a36Sopenharmony_ci * * negative error code if no data has been written at all
404262306a36Sopenharmony_ci */
404362306a36Sopenharmony_cissize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
404462306a36Sopenharmony_ci{
404562306a36Sopenharmony_ci	struct file *file = iocb->ki_filp;
404662306a36Sopenharmony_ci	struct address_space *mapping = file->f_mapping;
404762306a36Sopenharmony_ci	struct inode *inode = mapping->host;
404862306a36Sopenharmony_ci	ssize_t ret;
404962306a36Sopenharmony_ci
405062306a36Sopenharmony_ci	ret = file_remove_privs(file);
405162306a36Sopenharmony_ci	if (ret)
405262306a36Sopenharmony_ci		return ret;
405362306a36Sopenharmony_ci
405462306a36Sopenharmony_ci	ret = file_update_time(file);
405562306a36Sopenharmony_ci	if (ret)
405662306a36Sopenharmony_ci		return ret;
405762306a36Sopenharmony_ci
405862306a36Sopenharmony_ci	if (iocb->ki_flags & IOCB_DIRECT) {
405962306a36Sopenharmony_ci		ret = generic_file_direct_write(iocb, from);
406062306a36Sopenharmony_ci		/*
406162306a36Sopenharmony_ci		 * If the write stopped short of completing, fall back to
406262306a36Sopenharmony_ci		 * buffered writes.  Some filesystems do this for writes to
406362306a36Sopenharmony_ci		 * holes, for example.  For DAX files, a buffered write will
406462306a36Sopenharmony_ci		 * not succeed (even if it did, DAX does not handle dirty
406562306a36Sopenharmony_ci		 * page-cache pages correctly).
406662306a36Sopenharmony_ci		 */
406762306a36Sopenharmony_ci		if (ret < 0 || !iov_iter_count(from) || IS_DAX(inode))
406862306a36Sopenharmony_ci			return ret;
406962306a36Sopenharmony_ci		return direct_write_fallback(iocb, from, ret,
407062306a36Sopenharmony_ci				generic_perform_write(iocb, from));
407162306a36Sopenharmony_ci	}
407262306a36Sopenharmony_ci
407362306a36Sopenharmony_ci	return generic_perform_write(iocb, from);
407462306a36Sopenharmony_ci}
407562306a36Sopenharmony_ciEXPORT_SYMBOL(__generic_file_write_iter);
407662306a36Sopenharmony_ci
407762306a36Sopenharmony_ci/**
407862306a36Sopenharmony_ci * generic_file_write_iter - write data to a file
407962306a36Sopenharmony_ci * @iocb:	IO state structure
408062306a36Sopenharmony_ci * @from:	iov_iter with data to write
408162306a36Sopenharmony_ci *
408262306a36Sopenharmony_ci * This is a wrapper around __generic_file_write_iter() to be used by most
408362306a36Sopenharmony_ci * filesystems. It takes care of syncing the file in case of O_SYNC file
408462306a36Sopenharmony_ci * and acquires i_rwsem as needed.
408562306a36Sopenharmony_ci * Return:
408662306a36Sopenharmony_ci * * negative error code if no data has been written at all of
408762306a36Sopenharmony_ci *   vfs_fsync_range() failed for a synchronous write
408862306a36Sopenharmony_ci * * number of bytes written, even for truncated writes
408962306a36Sopenharmony_ci */
409062306a36Sopenharmony_cissize_t generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
409162306a36Sopenharmony_ci{
409262306a36Sopenharmony_ci	struct file *file = iocb->ki_filp;
409362306a36Sopenharmony_ci	struct inode *inode = file->f_mapping->host;
409462306a36Sopenharmony_ci	ssize_t ret;
409562306a36Sopenharmony_ci
409662306a36Sopenharmony_ci	inode_lock(inode);
409762306a36Sopenharmony_ci	ret = generic_write_checks(iocb, from);
409862306a36Sopenharmony_ci	if (ret > 0)
409962306a36Sopenharmony_ci		ret = __generic_file_write_iter(iocb, from);
410062306a36Sopenharmony_ci	inode_unlock(inode);
410162306a36Sopenharmony_ci
410262306a36Sopenharmony_ci	if (ret > 0)
410362306a36Sopenharmony_ci		ret = generic_write_sync(iocb, ret);
410462306a36Sopenharmony_ci	return ret;
410562306a36Sopenharmony_ci}
410662306a36Sopenharmony_ciEXPORT_SYMBOL(generic_file_write_iter);
410762306a36Sopenharmony_ci
410862306a36Sopenharmony_ci/**
410962306a36Sopenharmony_ci * filemap_release_folio() - Release fs-specific metadata on a folio.
411062306a36Sopenharmony_ci * @folio: The folio which the kernel is trying to free.
411162306a36Sopenharmony_ci * @gfp: Memory allocation flags (and I/O mode).
411262306a36Sopenharmony_ci *
411362306a36Sopenharmony_ci * The address_space is trying to release any data attached to a folio
411462306a36Sopenharmony_ci * (presumably at folio->private).
411562306a36Sopenharmony_ci *
411662306a36Sopenharmony_ci * This will also be called if the private_2 flag is set on a page,
411762306a36Sopenharmony_ci * indicating that the folio has other metadata associated with it.
411862306a36Sopenharmony_ci *
411962306a36Sopenharmony_ci * The @gfp argument specifies whether I/O may be performed to release
412062306a36Sopenharmony_ci * this page (__GFP_IO), and whether the call may block
412162306a36Sopenharmony_ci * (__GFP_RECLAIM & __GFP_FS).
412262306a36Sopenharmony_ci *
412362306a36Sopenharmony_ci * Return: %true if the release was successful, otherwise %false.
412462306a36Sopenharmony_ci */
412562306a36Sopenharmony_cibool filemap_release_folio(struct folio *folio, gfp_t gfp)
412662306a36Sopenharmony_ci{
412762306a36Sopenharmony_ci	struct address_space * const mapping = folio->mapping;
412862306a36Sopenharmony_ci
412962306a36Sopenharmony_ci	BUG_ON(!folio_test_locked(folio));
413062306a36Sopenharmony_ci	if (!folio_needs_release(folio))
413162306a36Sopenharmony_ci		return true;
413262306a36Sopenharmony_ci	if (folio_test_writeback(folio))
413362306a36Sopenharmony_ci		return false;
413462306a36Sopenharmony_ci
413562306a36Sopenharmony_ci	if (mapping && mapping->a_ops->release_folio)
413662306a36Sopenharmony_ci		return mapping->a_ops->release_folio(folio, gfp);
413762306a36Sopenharmony_ci	return try_to_free_buffers(folio);
413862306a36Sopenharmony_ci}
413962306a36Sopenharmony_ciEXPORT_SYMBOL(filemap_release_folio);
414062306a36Sopenharmony_ci
414162306a36Sopenharmony_ci#ifdef CONFIG_CACHESTAT_SYSCALL
414262306a36Sopenharmony_ci/**
414362306a36Sopenharmony_ci * filemap_cachestat() - compute the page cache statistics of a mapping
414462306a36Sopenharmony_ci * @mapping:	The mapping to compute the statistics for.
414562306a36Sopenharmony_ci * @first_index:	The starting page cache index.
414662306a36Sopenharmony_ci * @last_index:	The final page index (inclusive).
414762306a36Sopenharmony_ci * @cs:	the cachestat struct to write the result to.
414862306a36Sopenharmony_ci *
414962306a36Sopenharmony_ci * This will query the page cache statistics of a mapping in the
415062306a36Sopenharmony_ci * page range of [first_index, last_index] (inclusive). The statistics
415162306a36Sopenharmony_ci * queried include: number of dirty pages, number of pages marked for
415262306a36Sopenharmony_ci * writeback, and the number of (recently) evicted pages.
415362306a36Sopenharmony_ci */
415462306a36Sopenharmony_cistatic void filemap_cachestat(struct address_space *mapping,
415562306a36Sopenharmony_ci		pgoff_t first_index, pgoff_t last_index, struct cachestat *cs)
415662306a36Sopenharmony_ci{
415762306a36Sopenharmony_ci	XA_STATE(xas, &mapping->i_pages, first_index);
415862306a36Sopenharmony_ci	struct folio *folio;
415962306a36Sopenharmony_ci
416062306a36Sopenharmony_ci	rcu_read_lock();
416162306a36Sopenharmony_ci	xas_for_each(&xas, folio, last_index) {
416262306a36Sopenharmony_ci		int order;
416362306a36Sopenharmony_ci		unsigned long nr_pages;
416462306a36Sopenharmony_ci		pgoff_t folio_first_index, folio_last_index;
416562306a36Sopenharmony_ci
416662306a36Sopenharmony_ci		/*
416762306a36Sopenharmony_ci		 * Don't deref the folio. It is not pinned, and might
416862306a36Sopenharmony_ci		 * get freed (and reused) underneath us.
416962306a36Sopenharmony_ci		 *
417062306a36Sopenharmony_ci		 * We *could* pin it, but that would be expensive for
417162306a36Sopenharmony_ci		 * what should be a fast and lightweight syscall.
417262306a36Sopenharmony_ci		 *
417362306a36Sopenharmony_ci		 * Instead, derive all information of interest from
417462306a36Sopenharmony_ci		 * the rcu-protected xarray.
417562306a36Sopenharmony_ci		 */
417662306a36Sopenharmony_ci
417762306a36Sopenharmony_ci		if (xas_retry(&xas, folio))
417862306a36Sopenharmony_ci			continue;
417962306a36Sopenharmony_ci
418062306a36Sopenharmony_ci		order = xa_get_order(xas.xa, xas.xa_index);
418162306a36Sopenharmony_ci		nr_pages = 1 << order;
418262306a36Sopenharmony_ci		folio_first_index = round_down(xas.xa_index, 1 << order);
418362306a36Sopenharmony_ci		folio_last_index = folio_first_index + nr_pages - 1;
418462306a36Sopenharmony_ci
418562306a36Sopenharmony_ci		/* Folios might straddle the range boundaries, only count covered pages */
418662306a36Sopenharmony_ci		if (folio_first_index < first_index)
418762306a36Sopenharmony_ci			nr_pages -= first_index - folio_first_index;
418862306a36Sopenharmony_ci
418962306a36Sopenharmony_ci		if (folio_last_index > last_index)
419062306a36Sopenharmony_ci			nr_pages -= folio_last_index - last_index;
419162306a36Sopenharmony_ci
419262306a36Sopenharmony_ci		if (xa_is_value(folio)) {
419362306a36Sopenharmony_ci			/* page is evicted */
419462306a36Sopenharmony_ci			void *shadow = (void *)folio;
419562306a36Sopenharmony_ci			bool workingset; /* not used */
419662306a36Sopenharmony_ci
419762306a36Sopenharmony_ci			cs->nr_evicted += nr_pages;
419862306a36Sopenharmony_ci
419962306a36Sopenharmony_ci#ifdef CONFIG_SWAP /* implies CONFIG_MMU */
420062306a36Sopenharmony_ci			if (shmem_mapping(mapping)) {
420162306a36Sopenharmony_ci				/* shmem file - in swap cache */
420262306a36Sopenharmony_ci				swp_entry_t swp = radix_to_swp_entry(folio);
420362306a36Sopenharmony_ci
420462306a36Sopenharmony_ci				shadow = get_shadow_from_swap_cache(swp);
420562306a36Sopenharmony_ci			}
420662306a36Sopenharmony_ci#endif
420762306a36Sopenharmony_ci			if (workingset_test_recent(shadow, true, &workingset))
420862306a36Sopenharmony_ci				cs->nr_recently_evicted += nr_pages;
420962306a36Sopenharmony_ci
421062306a36Sopenharmony_ci			goto resched;
421162306a36Sopenharmony_ci		}
421262306a36Sopenharmony_ci
421362306a36Sopenharmony_ci		/* page is in cache */
421462306a36Sopenharmony_ci		cs->nr_cache += nr_pages;
421562306a36Sopenharmony_ci
421662306a36Sopenharmony_ci		if (xas_get_mark(&xas, PAGECACHE_TAG_DIRTY))
421762306a36Sopenharmony_ci			cs->nr_dirty += nr_pages;
421862306a36Sopenharmony_ci
421962306a36Sopenharmony_ci		if (xas_get_mark(&xas, PAGECACHE_TAG_WRITEBACK))
422062306a36Sopenharmony_ci			cs->nr_writeback += nr_pages;
422162306a36Sopenharmony_ci
422262306a36Sopenharmony_ciresched:
422362306a36Sopenharmony_ci		if (need_resched()) {
422462306a36Sopenharmony_ci			xas_pause(&xas);
422562306a36Sopenharmony_ci			cond_resched_rcu();
422662306a36Sopenharmony_ci		}
422762306a36Sopenharmony_ci	}
422862306a36Sopenharmony_ci	rcu_read_unlock();
422962306a36Sopenharmony_ci}
423062306a36Sopenharmony_ci
423162306a36Sopenharmony_ci/*
423262306a36Sopenharmony_ci * The cachestat(2) system call.
423362306a36Sopenharmony_ci *
423462306a36Sopenharmony_ci * cachestat() returns the page cache statistics of a file in the
423562306a36Sopenharmony_ci * bytes range specified by `off` and `len`: number of cached pages,
423662306a36Sopenharmony_ci * number of dirty pages, number of pages marked for writeback,
423762306a36Sopenharmony_ci * number of evicted pages, and number of recently evicted pages.
423862306a36Sopenharmony_ci *
423962306a36Sopenharmony_ci * An evicted page is a page that is previously in the page cache
424062306a36Sopenharmony_ci * but has been evicted since. A page is recently evicted if its last
424162306a36Sopenharmony_ci * eviction was recent enough that its reentry to the cache would
424262306a36Sopenharmony_ci * indicate that it is actively being used by the system, and that
424362306a36Sopenharmony_ci * there is memory pressure on the system.
424462306a36Sopenharmony_ci *
424562306a36Sopenharmony_ci * `off` and `len` must be non-negative integers. If `len` > 0,
424662306a36Sopenharmony_ci * the queried range is [`off`, `off` + `len`]. If `len` == 0,
424762306a36Sopenharmony_ci * we will query in the range from `off` to the end of the file.
424862306a36Sopenharmony_ci *
424962306a36Sopenharmony_ci * The `flags` argument is unused for now, but is included for future
425062306a36Sopenharmony_ci * extensibility. User should pass 0 (i.e no flag specified).
425162306a36Sopenharmony_ci *
425262306a36Sopenharmony_ci * Currently, hugetlbfs is not supported.
425362306a36Sopenharmony_ci *
425462306a36Sopenharmony_ci * Because the status of a page can change after cachestat() checks it
425562306a36Sopenharmony_ci * but before it returns to the application, the returned values may
425662306a36Sopenharmony_ci * contain stale information.
425762306a36Sopenharmony_ci *
425862306a36Sopenharmony_ci * return values:
425962306a36Sopenharmony_ci *  zero        - success
426062306a36Sopenharmony_ci *  -EFAULT     - cstat or cstat_range points to an illegal address
426162306a36Sopenharmony_ci *  -EINVAL     - invalid flags
426262306a36Sopenharmony_ci *  -EBADF      - invalid file descriptor
426362306a36Sopenharmony_ci *  -EOPNOTSUPP - file descriptor is of a hugetlbfs file
426462306a36Sopenharmony_ci */
426562306a36Sopenharmony_ciSYSCALL_DEFINE4(cachestat, unsigned int, fd,
426662306a36Sopenharmony_ci		struct cachestat_range __user *, cstat_range,
426762306a36Sopenharmony_ci		struct cachestat __user *, cstat, unsigned int, flags)
426862306a36Sopenharmony_ci{
426962306a36Sopenharmony_ci	struct fd f = fdget(fd);
427062306a36Sopenharmony_ci	struct address_space *mapping;
427162306a36Sopenharmony_ci	struct cachestat_range csr;
427262306a36Sopenharmony_ci	struct cachestat cs;
427362306a36Sopenharmony_ci	pgoff_t first_index, last_index;
427462306a36Sopenharmony_ci
427562306a36Sopenharmony_ci	if (!f.file)
427662306a36Sopenharmony_ci		return -EBADF;
427762306a36Sopenharmony_ci
427862306a36Sopenharmony_ci	if (copy_from_user(&csr, cstat_range,
427962306a36Sopenharmony_ci			sizeof(struct cachestat_range))) {
428062306a36Sopenharmony_ci		fdput(f);
428162306a36Sopenharmony_ci		return -EFAULT;
428262306a36Sopenharmony_ci	}
428362306a36Sopenharmony_ci
428462306a36Sopenharmony_ci	/* hugetlbfs is not supported */
428562306a36Sopenharmony_ci	if (is_file_hugepages(f.file)) {
428662306a36Sopenharmony_ci		fdput(f);
428762306a36Sopenharmony_ci		return -EOPNOTSUPP;
428862306a36Sopenharmony_ci	}
428962306a36Sopenharmony_ci
429062306a36Sopenharmony_ci	if (flags != 0) {
429162306a36Sopenharmony_ci		fdput(f);
429262306a36Sopenharmony_ci		return -EINVAL;
429362306a36Sopenharmony_ci	}
429462306a36Sopenharmony_ci
429562306a36Sopenharmony_ci	first_index = csr.off >> PAGE_SHIFT;
429662306a36Sopenharmony_ci	last_index =
429762306a36Sopenharmony_ci		csr.len == 0 ? ULONG_MAX : (csr.off + csr.len - 1) >> PAGE_SHIFT;
429862306a36Sopenharmony_ci	memset(&cs, 0, sizeof(struct cachestat));
429962306a36Sopenharmony_ci	mapping = f.file->f_mapping;
430062306a36Sopenharmony_ci	filemap_cachestat(mapping, first_index, last_index, &cs);
430162306a36Sopenharmony_ci	fdput(f);
430262306a36Sopenharmony_ci
430362306a36Sopenharmony_ci	if (copy_to_user(cstat, &cs, sizeof(struct cachestat)))
430462306a36Sopenharmony_ci		return -EFAULT;
430562306a36Sopenharmony_ci
430662306a36Sopenharmony_ci	return 0;
430762306a36Sopenharmony_ci}
430862306a36Sopenharmony_ci#endif /* CONFIG_CACHESTAT_SYSCALL */
4309