xref: /kernel/linux/linux-5.10/fs/ext4/page-io.c (revision 8c2ecf20)
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/fs/ext4/page-io.c
4 *
5 * This contains the new page_io functions for ext4
6 *
7 * Written by Theodore Ts'o, 2010.
8 */
9
10#include <linux/fs.h>
11#include <linux/time.h>
12#include <linux/highuid.h>
13#include <linux/pagemap.h>
14#include <linux/quotaops.h>
15#include <linux/string.h>
16#include <linux/buffer_head.h>
17#include <linux/writeback.h>
18#include <linux/pagevec.h>
19#include <linux/mpage.h>
20#include <linux/namei.h>
21#include <linux/uio.h>
22#include <linux/bio.h>
23#include <linux/workqueue.h>
24#include <linux/kernel.h>
25#include <linux/slab.h>
26#include <linux/mm.h>
27#include <linux/backing-dev.h>
28
29#include "ext4_jbd2.h"
30#include "xattr.h"
31#include "acl.h"
32
33static struct kmem_cache *io_end_cachep;
34static struct kmem_cache *io_end_vec_cachep;
35
36int __init ext4_init_pageio(void)
37{
38	io_end_cachep = KMEM_CACHE(ext4_io_end, SLAB_RECLAIM_ACCOUNT);
39	if (io_end_cachep == NULL)
40		return -ENOMEM;
41
42	io_end_vec_cachep = KMEM_CACHE(ext4_io_end_vec, 0);
43	if (io_end_vec_cachep == NULL) {
44		kmem_cache_destroy(io_end_cachep);
45		return -ENOMEM;
46	}
47	return 0;
48}
49
50void ext4_exit_pageio(void)
51{
52	kmem_cache_destroy(io_end_cachep);
53	kmem_cache_destroy(io_end_vec_cachep);
54}
55
56struct ext4_io_end_vec *ext4_alloc_io_end_vec(ext4_io_end_t *io_end)
57{
58	struct ext4_io_end_vec *io_end_vec;
59
60	io_end_vec = kmem_cache_zalloc(io_end_vec_cachep, GFP_NOFS);
61	if (!io_end_vec)
62		return ERR_PTR(-ENOMEM);
63	INIT_LIST_HEAD(&io_end_vec->list);
64	list_add_tail(&io_end_vec->list, &io_end->list_vec);
65	return io_end_vec;
66}
67
68static void ext4_free_io_end_vec(ext4_io_end_t *io_end)
69{
70	struct ext4_io_end_vec *io_end_vec, *tmp;
71
72	if (list_empty(&io_end->list_vec))
73		return;
74	list_for_each_entry_safe(io_end_vec, tmp, &io_end->list_vec, list) {
75		list_del(&io_end_vec->list);
76		kmem_cache_free(io_end_vec_cachep, io_end_vec);
77	}
78}
79
80struct ext4_io_end_vec *ext4_last_io_end_vec(ext4_io_end_t *io_end)
81{
82	BUG_ON(list_empty(&io_end->list_vec));
83	return list_last_entry(&io_end->list_vec, struct ext4_io_end_vec, list);
84}
85
86/*
87 * Print an buffer I/O error compatible with the fs/buffer.c.  This
88 * provides compatibility with dmesg scrapers that look for a specific
89 * buffer I/O error message.  We really need a unified error reporting
90 * structure to userspace ala Digital Unix's uerf system, but it's
91 * probably not going to happen in my lifetime, due to LKML politics...
92 */
93static void buffer_io_error(struct buffer_head *bh)
94{
95	printk_ratelimited(KERN_ERR "Buffer I/O error on device %pg, logical block %llu\n",
96		       bh->b_bdev,
97			(unsigned long long)bh->b_blocknr);
98}
99
100static void ext4_finish_bio(struct bio *bio)
101{
102	struct bio_vec *bvec;
103	struct bvec_iter_all iter_all;
104
105	bio_for_each_segment_all(bvec, bio, iter_all) {
106		struct page *page = bvec->bv_page;
107		struct page *bounce_page = NULL;
108		struct buffer_head *bh, *head;
109		unsigned bio_start = bvec->bv_offset;
110		unsigned bio_end = bio_start + bvec->bv_len;
111		unsigned under_io = 0;
112		unsigned long flags;
113
114		if (!page)
115			continue;
116
117		if (fscrypt_is_bounce_page(page)) {
118			bounce_page = page;
119			page = fscrypt_pagecache_page(bounce_page);
120		}
121
122		if (bio->bi_status) {
123			SetPageError(page);
124			mapping_set_error(page->mapping, -EIO);
125		}
126		bh = head = page_buffers(page);
127		/*
128		 * We check all buffers in the page under b_uptodate_lock
129		 * to avoid races with other end io clearing async_write flags
130		 */
131		spin_lock_irqsave(&head->b_uptodate_lock, flags);
132		do {
133			if (bh_offset(bh) < bio_start ||
134			    bh_offset(bh) + bh->b_size > bio_end) {
135				if (buffer_async_write(bh))
136					under_io++;
137				continue;
138			}
139			clear_buffer_async_write(bh);
140			if (bio->bi_status) {
141				set_buffer_write_io_error(bh);
142				buffer_io_error(bh);
143			}
144		} while ((bh = bh->b_this_page) != head);
145		spin_unlock_irqrestore(&head->b_uptodate_lock, flags);
146		if (!under_io) {
147			fscrypt_free_bounce_page(bounce_page);
148			end_page_writeback(page);
149		}
150	}
151}
152
153static void ext4_release_io_end(ext4_io_end_t *io_end)
154{
155	struct bio *bio, *next_bio;
156
157	BUG_ON(!list_empty(&io_end->list));
158	BUG_ON(io_end->flag & EXT4_IO_END_UNWRITTEN);
159	WARN_ON(io_end->handle);
160
161	for (bio = io_end->bio; bio; bio = next_bio) {
162		next_bio = bio->bi_private;
163		ext4_finish_bio(bio);
164		bio_put(bio);
165	}
166	ext4_free_io_end_vec(io_end);
167	kmem_cache_free(io_end_cachep, io_end);
168}
169
170/*
171 * Check a range of space and convert unwritten extents to written. Note that
172 * we are protected from truncate touching same part of extent tree by the
173 * fact that truncate code waits for all DIO to finish (thus exclusion from
174 * direct IO is achieved) and also waits for PageWriteback bits. Thus we
175 * cannot get to ext4_ext_truncate() before all IOs overlapping that range are
176 * completed (happens from ext4_free_ioend()).
177 */
178static int ext4_end_io_end(ext4_io_end_t *io_end)
179{
180	struct inode *inode = io_end->inode;
181	handle_t *handle = io_end->handle;
182	int ret = 0;
183
184	ext4_debug("ext4_end_io_nolock: io_end 0x%p from inode %lu,list->next 0x%p,"
185		   "list->prev 0x%p\n",
186		   io_end, inode->i_ino, io_end->list.next, io_end->list.prev);
187
188	io_end->handle = NULL;	/* Following call will use up the handle */
189	ret = ext4_convert_unwritten_io_end_vec(handle, io_end);
190	if (ret < 0 && !ext4_forced_shutdown(EXT4_SB(inode->i_sb))) {
191		ext4_msg(inode->i_sb, KERN_EMERG,
192			 "failed to convert unwritten extents to written "
193			 "extents -- potential data loss!  "
194			 "(inode %lu, error %d)", inode->i_ino, ret);
195	}
196	ext4_clear_io_unwritten_flag(io_end);
197	ext4_release_io_end(io_end);
198	return ret;
199}
200
201static void dump_completed_IO(struct inode *inode, struct list_head *head)
202{
203#ifdef	EXT4FS_DEBUG
204	struct list_head *cur, *before, *after;
205	ext4_io_end_t *io_end, *io_end0, *io_end1;
206
207	if (list_empty(head))
208		return;
209
210	ext4_debug("Dump inode %lu completed io list\n", inode->i_ino);
211	list_for_each_entry(io_end, head, list) {
212		cur = &io_end->list;
213		before = cur->prev;
214		io_end0 = container_of(before, ext4_io_end_t, list);
215		after = cur->next;
216		io_end1 = container_of(after, ext4_io_end_t, list);
217
218		ext4_debug("io 0x%p from inode %lu,prev 0x%p,next 0x%p\n",
219			    io_end, inode->i_ino, io_end0, io_end1);
220	}
221#endif
222}
223
224/* Add the io_end to per-inode completed end_io list. */
225static void ext4_add_complete_io(ext4_io_end_t *io_end)
226{
227	struct ext4_inode_info *ei = EXT4_I(io_end->inode);
228	struct ext4_sb_info *sbi = EXT4_SB(io_end->inode->i_sb);
229	struct workqueue_struct *wq;
230	unsigned long flags;
231
232	/* Only reserved conversions from writeback should enter here */
233	WARN_ON(!(io_end->flag & EXT4_IO_END_UNWRITTEN));
234	WARN_ON(!io_end->handle && sbi->s_journal);
235	spin_lock_irqsave(&ei->i_completed_io_lock, flags);
236	wq = sbi->rsv_conversion_wq;
237	if (list_empty(&ei->i_rsv_conversion_list))
238		queue_work(wq, &ei->i_rsv_conversion_work);
239	list_add_tail(&io_end->list, &ei->i_rsv_conversion_list);
240	spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
241}
242
243static int ext4_do_flush_completed_IO(struct inode *inode,
244				      struct list_head *head)
245{
246	ext4_io_end_t *io_end;
247	struct list_head unwritten;
248	unsigned long flags;
249	struct ext4_inode_info *ei = EXT4_I(inode);
250	int err, ret = 0;
251
252	spin_lock_irqsave(&ei->i_completed_io_lock, flags);
253	dump_completed_IO(inode, head);
254	list_replace_init(head, &unwritten);
255	spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
256
257	while (!list_empty(&unwritten)) {
258		io_end = list_entry(unwritten.next, ext4_io_end_t, list);
259		BUG_ON(!(io_end->flag & EXT4_IO_END_UNWRITTEN));
260		list_del_init(&io_end->list);
261
262		err = ext4_end_io_end(io_end);
263		if (unlikely(!ret && err))
264			ret = err;
265	}
266	return ret;
267}
268
269/*
270 * work on completed IO, to convert unwritten extents to extents
271 */
272void ext4_end_io_rsv_work(struct work_struct *work)
273{
274	struct ext4_inode_info *ei = container_of(work, struct ext4_inode_info,
275						  i_rsv_conversion_work);
276	ext4_do_flush_completed_IO(&ei->vfs_inode, &ei->i_rsv_conversion_list);
277}
278
279ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags)
280{
281	ext4_io_end_t *io_end = kmem_cache_zalloc(io_end_cachep, flags);
282
283	if (io_end) {
284		io_end->inode = inode;
285		INIT_LIST_HEAD(&io_end->list);
286		INIT_LIST_HEAD(&io_end->list_vec);
287		atomic_set(&io_end->count, 1);
288	}
289	return io_end;
290}
291
292void ext4_put_io_end_defer(ext4_io_end_t *io_end)
293{
294	if (atomic_dec_and_test(&io_end->count)) {
295		if (!(io_end->flag & EXT4_IO_END_UNWRITTEN) ||
296				list_empty(&io_end->list_vec)) {
297			ext4_release_io_end(io_end);
298			return;
299		}
300		ext4_add_complete_io(io_end);
301	}
302}
303
304int ext4_put_io_end(ext4_io_end_t *io_end)
305{
306	int err = 0;
307
308	if (atomic_dec_and_test(&io_end->count)) {
309		if (io_end->flag & EXT4_IO_END_UNWRITTEN) {
310			err = ext4_convert_unwritten_io_end_vec(io_end->handle,
311								io_end);
312			io_end->handle = NULL;
313			ext4_clear_io_unwritten_flag(io_end);
314		}
315		ext4_release_io_end(io_end);
316	}
317	return err;
318}
319
320ext4_io_end_t *ext4_get_io_end(ext4_io_end_t *io_end)
321{
322	atomic_inc(&io_end->count);
323	return io_end;
324}
325
326/* BIO completion function for page writeback */
327static void ext4_end_bio(struct bio *bio)
328{
329	ext4_io_end_t *io_end = bio->bi_private;
330	sector_t bi_sector = bio->bi_iter.bi_sector;
331	char b[BDEVNAME_SIZE];
332
333	if (WARN_ONCE(!io_end, "io_end is NULL: %s: sector %Lu len %u err %d\n",
334		      bio_devname(bio, b),
335		      (long long) bio->bi_iter.bi_sector,
336		      (unsigned) bio_sectors(bio),
337		      bio->bi_status)) {
338		ext4_finish_bio(bio);
339		bio_put(bio);
340		return;
341	}
342	bio->bi_end_io = NULL;
343
344	if (bio->bi_status) {
345		struct inode *inode = io_end->inode;
346
347		ext4_warning(inode->i_sb, "I/O error %d writing to inode %lu "
348			     "starting block %llu)",
349			     bio->bi_status, inode->i_ino,
350			     (unsigned long long)
351			     bi_sector >> (inode->i_blkbits - 9));
352		mapping_set_error(inode->i_mapping,
353				blk_status_to_errno(bio->bi_status));
354	}
355
356	if (io_end->flag & EXT4_IO_END_UNWRITTEN) {
357		/*
358		 * Link bio into list hanging from io_end. We have to do it
359		 * atomically as bio completions can be racing against each
360		 * other.
361		 */
362		bio->bi_private = xchg(&io_end->bio, bio);
363		ext4_put_io_end_defer(io_end);
364	} else {
365		/*
366		 * Drop io_end reference early. Inode can get freed once
367		 * we finish the bio.
368		 */
369		ext4_put_io_end_defer(io_end);
370		ext4_finish_bio(bio);
371		bio_put(bio);
372	}
373}
374
375void ext4_io_submit(struct ext4_io_submit *io)
376{
377	struct bio *bio = io->io_bio;
378
379	if (bio) {
380		int io_op_flags = io->io_wbc->sync_mode == WB_SYNC_ALL ?
381				  REQ_SYNC : 0;
382		io->io_bio->bi_write_hint = io->io_end->inode->i_write_hint;
383		bio_set_op_attrs(io->io_bio, REQ_OP_WRITE, io_op_flags);
384		submit_bio(io->io_bio);
385	}
386	io->io_bio = NULL;
387}
388
389void ext4_io_submit_init(struct ext4_io_submit *io,
390			 struct writeback_control *wbc)
391{
392	io->io_wbc = wbc;
393	io->io_bio = NULL;
394	io->io_end = NULL;
395}
396
397static void io_submit_init_bio(struct ext4_io_submit *io,
398			       struct buffer_head *bh)
399{
400	struct bio *bio;
401
402	/*
403	 * bio_alloc will _always_ be able to allocate a bio if
404	 * __GFP_DIRECT_RECLAIM is set, see comments for bio_alloc_bioset().
405	 */
406	bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
407	fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO);
408	bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
409	bio_set_dev(bio, bh->b_bdev);
410	bio->bi_end_io = ext4_end_bio;
411	bio->bi_private = ext4_get_io_end(io->io_end);
412	io->io_bio = bio;
413	io->io_next_block = bh->b_blocknr;
414	wbc_init_bio(io->io_wbc, bio);
415}
416
417static void io_submit_add_bh(struct ext4_io_submit *io,
418			     struct inode *inode,
419			     struct page *pagecache_page,
420			     struct page *bounce_page,
421			     struct buffer_head *bh)
422{
423	int ret;
424
425	if (io->io_bio && (bh->b_blocknr != io->io_next_block ||
426			   !fscrypt_mergeable_bio_bh(io->io_bio, bh))) {
427submit_and_retry:
428		ext4_io_submit(io);
429	}
430	if (io->io_bio == NULL) {
431		io_submit_init_bio(io, bh);
432		io->io_bio->bi_write_hint = inode->i_write_hint;
433	}
434	ret = bio_add_page(io->io_bio, bounce_page ?: pagecache_page,
435			   bh->b_size, bh_offset(bh));
436	if (ret != bh->b_size)
437		goto submit_and_retry;
438	wbc_account_cgroup_owner(io->io_wbc, pagecache_page, bh->b_size);
439	io->io_next_block++;
440}
441
442int ext4_bio_write_page(struct ext4_io_submit *io,
443			struct page *page,
444			int len,
445			struct writeback_control *wbc,
446			bool keep_towrite)
447{
448	struct page *bounce_page = NULL;
449	struct inode *inode = page->mapping->host;
450	unsigned block_start;
451	struct buffer_head *bh, *head;
452	int ret = 0;
453	int nr_submitted = 0;
454	int nr_to_submit = 0;
455
456	BUG_ON(!PageLocked(page));
457	BUG_ON(PageWriteback(page));
458
459	if (keep_towrite)
460		set_page_writeback_keepwrite(page);
461	else
462		set_page_writeback(page);
463	ClearPageError(page);
464
465	/*
466	 * Comments copied from block_write_full_page:
467	 *
468	 * The page straddles i_size.  It must be zeroed out on each and every
469	 * writepage invocation because it may be mmapped.  "A file is mapped
470	 * in multiples of the page size.  For a file that is not a multiple of
471	 * the page size, the remaining memory is zeroed when mapped, and
472	 * writes to that region are not written out to the file."
473	 */
474	if (len < PAGE_SIZE)
475		zero_user_segment(page, len, PAGE_SIZE);
476	/*
477	 * In the first loop we prepare and mark buffers to submit. We have to
478	 * mark all buffers in the page before submitting so that
479	 * end_page_writeback() cannot be called from ext4_bio_end_io() when IO
480	 * on the first buffer finishes and we are still working on submitting
481	 * the second buffer.
482	 */
483	bh = head = page_buffers(page);
484	do {
485		block_start = bh_offset(bh);
486		if (block_start >= len) {
487			clear_buffer_dirty(bh);
488			set_buffer_uptodate(bh);
489			continue;
490		}
491		if (!buffer_dirty(bh) || buffer_delay(bh) ||
492		    !buffer_mapped(bh) || buffer_unwritten(bh)) {
493			/* A hole? We can safely clear the dirty bit */
494			if (!buffer_mapped(bh))
495				clear_buffer_dirty(bh);
496			if (io->io_bio)
497				ext4_io_submit(io);
498			continue;
499		}
500		if (buffer_new(bh))
501			clear_buffer_new(bh);
502		set_buffer_async_write(bh);
503		nr_to_submit++;
504	} while ((bh = bh->b_this_page) != head);
505
506	bh = head = page_buffers(page);
507
508	/*
509	 * If any blocks are being written to an encrypted file, encrypt them
510	 * into a bounce page.  For simplicity, just encrypt until the last
511	 * block which might be needed.  This may cause some unneeded blocks
512	 * (e.g. holes) to be unnecessarily encrypted, but this is rare and
513	 * can't happen in the common case of blocksize == PAGE_SIZE.
514	 */
515	if (fscrypt_inode_uses_fs_layer_crypto(inode) && nr_to_submit) {
516		gfp_t gfp_flags = GFP_NOFS;
517		unsigned int enc_bytes = round_up(len, i_blocksize(inode));
518
519		/*
520		 * Since bounce page allocation uses a mempool, we can only use
521		 * a waiting mask (i.e. request guaranteed allocation) on the
522		 * first page of the bio.  Otherwise it can deadlock.
523		 */
524		if (io->io_bio)
525			gfp_flags = GFP_NOWAIT | __GFP_NOWARN;
526	retry_encrypt:
527		bounce_page = fscrypt_encrypt_pagecache_blocks(page, enc_bytes,
528							       0, gfp_flags);
529		if (IS_ERR(bounce_page)) {
530			ret = PTR_ERR(bounce_page);
531			if (ret == -ENOMEM &&
532			    (io->io_bio || wbc->sync_mode == WB_SYNC_ALL)) {
533				gfp_flags = GFP_NOFS;
534				if (io->io_bio)
535					ext4_io_submit(io);
536				else
537					gfp_flags |= __GFP_NOFAIL;
538				congestion_wait(BLK_RW_ASYNC, HZ/50);
539				goto retry_encrypt;
540			}
541
542			printk_ratelimited(KERN_ERR "%s: ret = %d\n", __func__, ret);
543			redirty_page_for_writepage(wbc, page);
544			do {
545				clear_buffer_async_write(bh);
546				bh = bh->b_this_page;
547			} while (bh != head);
548			goto unlock;
549		}
550	}
551
552	/* Now submit buffers to write */
553	do {
554		if (!buffer_async_write(bh))
555			continue;
556		io_submit_add_bh(io, inode, page, bounce_page, bh);
557		nr_submitted++;
558		clear_buffer_dirty(bh);
559	} while ((bh = bh->b_this_page) != head);
560
561unlock:
562	unlock_page(page);
563	/* Nothing submitted - we have to end page writeback */
564	if (!nr_submitted)
565		end_page_writeback(page);
566	return ret;
567}
568