xref: /kernel/linux/linux-6.6/fs/ext4/readpage.c (revision 62306a36)
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/fs/ext4/readpage.c
4 *
5 * Copyright (C) 2002, Linus Torvalds.
6 * Copyright (C) 2015, Google, Inc.
7 *
8 * This was originally taken from fs/mpage.c
9 *
10 * The ext4_mpage_readpages() function here is intended to
11 * replace mpage_readahead() in the general case, not just for
12 * encrypted files.  It has some limitations (see below), where it
13 * will fall back to read_block_full_page(), but these limitations
14 * should only be hit when page_size != block_size.
15 *
16 * This will allow us to attach a callback function to support ext4
17 * encryption.
18 *
19 * If anything unusual happens, such as:
20 *
21 * - encountering a page which has buffers
22 * - encountering a page which has a non-hole after a hole
23 * - encountering a page with non-contiguous blocks
24 *
25 * then this code just gives up and calls the buffer_head-based read function.
26 * It does handle a page which has holes at the end - that is a common case:
27 * the end-of-file on blocksize < PAGE_SIZE setups.
28 *
29 */
30
31#include <linux/kernel.h>
32#include <linux/export.h>
33#include <linux/mm.h>
34#include <linux/kdev_t.h>
35#include <linux/gfp.h>
36#include <linux/bio.h>
37#include <linux/fs.h>
38#include <linux/buffer_head.h>
39#include <linux/blkdev.h>
40#include <linux/highmem.h>
41#include <linux/prefetch.h>
42#include <linux/mpage.h>
43#include <linux/writeback.h>
44#include <linux/backing-dev.h>
45#include <linux/pagevec.h>
46
47#include "ext4.h"
48
49#define NUM_PREALLOC_POST_READ_CTXS	128
50
51static struct kmem_cache *bio_post_read_ctx_cache;
52static mempool_t *bio_post_read_ctx_pool;
53
54/* postprocessing steps for read bios */
55enum bio_post_read_step {
56	STEP_INITIAL = 0,
57	STEP_DECRYPT,
58	STEP_VERITY,
59	STEP_MAX,
60};
61
62struct bio_post_read_ctx {
63	struct bio *bio;
64	struct work_struct work;
65	unsigned int cur_step;
66	unsigned int enabled_steps;
67};
68
69static void __read_end_io(struct bio *bio)
70{
71	struct folio_iter fi;
72
73	bio_for_each_folio_all(fi, bio) {
74		struct folio *folio = fi.folio;
75
76		if (bio->bi_status)
77			folio_clear_uptodate(folio);
78		else
79			folio_mark_uptodate(folio);
80		folio_unlock(folio);
81	}
82	if (bio->bi_private)
83		mempool_free(bio->bi_private, bio_post_read_ctx_pool);
84	bio_put(bio);
85}
86
87static void bio_post_read_processing(struct bio_post_read_ctx *ctx);
88
89static void decrypt_work(struct work_struct *work)
90{
91	struct bio_post_read_ctx *ctx =
92		container_of(work, struct bio_post_read_ctx, work);
93	struct bio *bio = ctx->bio;
94
95	if (fscrypt_decrypt_bio(bio))
96		bio_post_read_processing(ctx);
97	else
98		__read_end_io(bio);
99}
100
101static void verity_work(struct work_struct *work)
102{
103	struct bio_post_read_ctx *ctx =
104		container_of(work, struct bio_post_read_ctx, work);
105	struct bio *bio = ctx->bio;
106
107	/*
108	 * fsverity_verify_bio() may call readahead() again, and although verity
109	 * will be disabled for that, decryption may still be needed, causing
110	 * another bio_post_read_ctx to be allocated.  So to guarantee that
111	 * mempool_alloc() never deadlocks we must free the current ctx first.
112	 * This is safe because verity is the last post-read step.
113	 */
114	BUILD_BUG_ON(STEP_VERITY + 1 != STEP_MAX);
115	mempool_free(ctx, bio_post_read_ctx_pool);
116	bio->bi_private = NULL;
117
118	fsverity_verify_bio(bio);
119
120	__read_end_io(bio);
121}
122
123static void bio_post_read_processing(struct bio_post_read_ctx *ctx)
124{
125	/*
126	 * We use different work queues for decryption and for verity because
127	 * verity may require reading metadata pages that need decryption, and
128	 * we shouldn't recurse to the same workqueue.
129	 */
130	switch (++ctx->cur_step) {
131	case STEP_DECRYPT:
132		if (ctx->enabled_steps & (1 << STEP_DECRYPT)) {
133			INIT_WORK(&ctx->work, decrypt_work);
134			fscrypt_enqueue_decrypt_work(&ctx->work);
135			return;
136		}
137		ctx->cur_step++;
138		fallthrough;
139	case STEP_VERITY:
140		if (ctx->enabled_steps & (1 << STEP_VERITY)) {
141			INIT_WORK(&ctx->work, verity_work);
142			fsverity_enqueue_verify_work(&ctx->work);
143			return;
144		}
145		ctx->cur_step++;
146		fallthrough;
147	default:
148		__read_end_io(ctx->bio);
149	}
150}
151
152static bool bio_post_read_required(struct bio *bio)
153{
154	return bio->bi_private && !bio->bi_status;
155}
156
157/*
158 * I/O completion handler for multipage BIOs.
159 *
160 * The mpage code never puts partial pages into a BIO (except for end-of-file).
161 * If a page does not map to a contiguous run of blocks then it simply falls
162 * back to block_read_full_folio().
163 *
164 * Why is this?  If a page's completion depends on a number of different BIOs
165 * which can complete in any order (or at the same time) then determining the
166 * status of that page is hard.  See end_buffer_async_read() for the details.
167 * There is no point in duplicating all that complexity.
168 */
169static void mpage_end_io(struct bio *bio)
170{
171	if (bio_post_read_required(bio)) {
172		struct bio_post_read_ctx *ctx = bio->bi_private;
173
174		ctx->cur_step = STEP_INITIAL;
175		bio_post_read_processing(ctx);
176		return;
177	}
178	__read_end_io(bio);
179}
180
181static inline bool ext4_need_verity(const struct inode *inode, pgoff_t idx)
182{
183	return fsverity_active(inode) &&
184	       idx < DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
185}
186
187static void ext4_set_bio_post_read_ctx(struct bio *bio,
188				       const struct inode *inode,
189				       pgoff_t first_idx)
190{
191	unsigned int post_read_steps = 0;
192
193	if (fscrypt_inode_uses_fs_layer_crypto(inode))
194		post_read_steps |= 1 << STEP_DECRYPT;
195
196	if (ext4_need_verity(inode, first_idx))
197		post_read_steps |= 1 << STEP_VERITY;
198
199	if (post_read_steps) {
200		/* Due to the mempool, this never fails. */
201		struct bio_post_read_ctx *ctx =
202			mempool_alloc(bio_post_read_ctx_pool, GFP_NOFS);
203
204		ctx->bio = bio;
205		ctx->enabled_steps = post_read_steps;
206		bio->bi_private = ctx;
207	}
208}
209
210static inline loff_t ext4_readpage_limit(struct inode *inode)
211{
212	if (IS_ENABLED(CONFIG_FS_VERITY) && IS_VERITY(inode))
213		return inode->i_sb->s_maxbytes;
214
215	return i_size_read(inode);
216}
217
218int ext4_mpage_readpages(struct inode *inode,
219		struct readahead_control *rac, struct folio *folio)
220{
221	struct bio *bio = NULL;
222	sector_t last_block_in_bio = 0;
223
224	const unsigned blkbits = inode->i_blkbits;
225	const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
226	const unsigned blocksize = 1 << blkbits;
227	sector_t next_block;
228	sector_t block_in_file;
229	sector_t last_block;
230	sector_t last_block_in_file;
231	sector_t blocks[MAX_BUF_PER_PAGE];
232	unsigned page_block;
233	struct block_device *bdev = inode->i_sb->s_bdev;
234	int length;
235	unsigned relative_block = 0;
236	struct ext4_map_blocks map;
237	unsigned int nr_pages = rac ? readahead_count(rac) : 1;
238
239	map.m_pblk = 0;
240	map.m_lblk = 0;
241	map.m_len = 0;
242	map.m_flags = 0;
243
244	for (; nr_pages; nr_pages--) {
245		int fully_mapped = 1;
246		unsigned first_hole = blocks_per_page;
247
248		if (rac)
249			folio = readahead_folio(rac);
250		prefetchw(&folio->flags);
251
252		if (folio_buffers(folio))
253			goto confused;
254
255		block_in_file = next_block =
256			(sector_t)folio->index << (PAGE_SHIFT - blkbits);
257		last_block = block_in_file + nr_pages * blocks_per_page;
258		last_block_in_file = (ext4_readpage_limit(inode) +
259				      blocksize - 1) >> blkbits;
260		if (last_block > last_block_in_file)
261			last_block = last_block_in_file;
262		page_block = 0;
263
264		/*
265		 * Map blocks using the previous result first.
266		 */
267		if ((map.m_flags & EXT4_MAP_MAPPED) &&
268		    block_in_file > map.m_lblk &&
269		    block_in_file < (map.m_lblk + map.m_len)) {
270			unsigned map_offset = block_in_file - map.m_lblk;
271			unsigned last = map.m_len - map_offset;
272
273			for (relative_block = 0; ; relative_block++) {
274				if (relative_block == last) {
275					/* needed? */
276					map.m_flags &= ~EXT4_MAP_MAPPED;
277					break;
278				}
279				if (page_block == blocks_per_page)
280					break;
281				blocks[page_block] = map.m_pblk + map_offset +
282					relative_block;
283				page_block++;
284				block_in_file++;
285			}
286		}
287
288		/*
289		 * Then do more ext4_map_blocks() calls until we are
290		 * done with this folio.
291		 */
292		while (page_block < blocks_per_page) {
293			if (block_in_file < last_block) {
294				map.m_lblk = block_in_file;
295				map.m_len = last_block - block_in_file;
296
297				if (ext4_map_blocks(NULL, inode, &map, 0) < 0) {
298				set_error_page:
299					folio_set_error(folio);
300					folio_zero_segment(folio, 0,
301							  folio_size(folio));
302					folio_unlock(folio);
303					goto next_page;
304				}
305			}
306			if ((map.m_flags & EXT4_MAP_MAPPED) == 0) {
307				fully_mapped = 0;
308				if (first_hole == blocks_per_page)
309					first_hole = page_block;
310				page_block++;
311				block_in_file++;
312				continue;
313			}
314			if (first_hole != blocks_per_page)
315				goto confused;		/* hole -> non-hole */
316
317			/* Contiguous blocks? */
318			if (page_block && blocks[page_block-1] != map.m_pblk-1)
319				goto confused;
320			for (relative_block = 0; ; relative_block++) {
321				if (relative_block == map.m_len) {
322					/* needed? */
323					map.m_flags &= ~EXT4_MAP_MAPPED;
324					break;
325				} else if (page_block == blocks_per_page)
326					break;
327				blocks[page_block] = map.m_pblk+relative_block;
328				page_block++;
329				block_in_file++;
330			}
331		}
332		if (first_hole != blocks_per_page) {
333			folio_zero_segment(folio, first_hole << blkbits,
334					  folio_size(folio));
335			if (first_hole == 0) {
336				if (ext4_need_verity(inode, folio->index) &&
337				    !fsverity_verify_folio(folio))
338					goto set_error_page;
339				folio_mark_uptodate(folio);
340				folio_unlock(folio);
341				continue;
342			}
343		} else if (fully_mapped) {
344			folio_set_mappedtodisk(folio);
345		}
346
347		/*
348		 * This folio will go to BIO.  Do we need to send this
349		 * BIO off first?
350		 */
351		if (bio && (last_block_in_bio != blocks[0] - 1 ||
352			    !fscrypt_mergeable_bio(bio, inode, next_block))) {
353		submit_and_realloc:
354			submit_bio(bio);
355			bio = NULL;
356		}
357		if (bio == NULL) {
358			/*
359			 * bio_alloc will _always_ be able to allocate a bio if
360			 * __GFP_DIRECT_RECLAIM is set, see bio_alloc_bioset().
361			 */
362			bio = bio_alloc(bdev, bio_max_segs(nr_pages),
363					REQ_OP_READ, GFP_KERNEL);
364			fscrypt_set_bio_crypt_ctx(bio, inode, next_block,
365						  GFP_KERNEL);
366			ext4_set_bio_post_read_ctx(bio, inode, folio->index);
367			bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9);
368			bio->bi_end_io = mpage_end_io;
369			if (rac)
370				bio->bi_opf |= REQ_RAHEAD;
371		}
372
373		length = first_hole << blkbits;
374		if (!bio_add_folio(bio, folio, length, 0))
375			goto submit_and_realloc;
376
377		if (((map.m_flags & EXT4_MAP_BOUNDARY) &&
378		     (relative_block == map.m_len)) ||
379		    (first_hole != blocks_per_page)) {
380			submit_bio(bio);
381			bio = NULL;
382		} else
383			last_block_in_bio = blocks[blocks_per_page - 1];
384		continue;
385	confused:
386		if (bio) {
387			submit_bio(bio);
388			bio = NULL;
389		}
390		if (!folio_test_uptodate(folio))
391			block_read_full_folio(folio, ext4_get_block);
392		else
393			folio_unlock(folio);
394next_page:
395		; /* A label shall be followed by a statement until C23 */
396	}
397	if (bio)
398		submit_bio(bio);
399	return 0;
400}
401
402int __init ext4_init_post_read_processing(void)
403{
404	bio_post_read_ctx_cache = KMEM_CACHE(bio_post_read_ctx, SLAB_RECLAIM_ACCOUNT);
405
406	if (!bio_post_read_ctx_cache)
407		goto fail;
408	bio_post_read_ctx_pool =
409		mempool_create_slab_pool(NUM_PREALLOC_POST_READ_CTXS,
410					 bio_post_read_ctx_cache);
411	if (!bio_post_read_ctx_pool)
412		goto fail_free_cache;
413	return 0;
414
415fail_free_cache:
416	kmem_cache_destroy(bio_post_read_ctx_cache);
417fail:
418	return -ENOMEM;
419}
420
421void ext4_exit_post_read_processing(void)
422{
423	mempool_destroy(bio_post_read_ctx_pool);
424	kmem_cache_destroy(bio_post_read_ctx_cache);
425}
426