xref: /kernel/linux/linux-6.6/fs/buffer.c (revision 62306a36)
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 *  linux/fs/buffer.c
4 *
5 *  Copyright (C) 1991, 1992, 2002  Linus Torvalds
6 */
7
8/*
9 * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
10 *
11 * Removed a lot of unnecessary code and simplified things now that
12 * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
13 *
14 * Speed up hash, lru, and free list operations.  Use gfp() for allocating
15 * hash table, use SLAB cache for buffer heads. SMP threading.  -DaveM
16 *
17 * Added 32k buffer block sizes - these are required older ARM systems. - RMK
18 *
19 * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
20 */
21
22#include <linux/kernel.h>
23#include <linux/sched/signal.h>
24#include <linux/syscalls.h>
25#include <linux/fs.h>
26#include <linux/iomap.h>
27#include <linux/mm.h>
28#include <linux/percpu.h>
29#include <linux/slab.h>
30#include <linux/capability.h>
31#include <linux/blkdev.h>
32#include <linux/file.h>
33#include <linux/quotaops.h>
34#include <linux/highmem.h>
35#include <linux/export.h>
36#include <linux/backing-dev.h>
37#include <linux/writeback.h>
38#include <linux/hash.h>
39#include <linux/suspend.h>
40#include <linux/buffer_head.h>
41#include <linux/task_io_accounting_ops.h>
42#include <linux/bio.h>
43#include <linux/cpu.h>
44#include <linux/bitops.h>
45#include <linux/mpage.h>
46#include <linux/bit_spinlock.h>
47#include <linux/pagevec.h>
48#include <linux/sched/mm.h>
49#include <trace/events/block.h>
50#include <linux/fscrypt.h>
51#include <linux/fsverity.h>
52#include <linux/sched/isolation.h>
53
54#include "internal.h"
55
56static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
57static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh,
58			  struct writeback_control *wbc);
59
60#define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
61
62inline void touch_buffer(struct buffer_head *bh)
63{
64	trace_block_touch_buffer(bh);
65	folio_mark_accessed(bh->b_folio);
66}
67EXPORT_SYMBOL(touch_buffer);
68
69void __lock_buffer(struct buffer_head *bh)
70{
71	wait_on_bit_lock_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE);
72}
73EXPORT_SYMBOL(__lock_buffer);
74
75void unlock_buffer(struct buffer_head *bh)
76{
77	clear_bit_unlock(BH_Lock, &bh->b_state);
78	smp_mb__after_atomic();
79	wake_up_bit(&bh->b_state, BH_Lock);
80}
81EXPORT_SYMBOL(unlock_buffer);
82
83/*
84 * Returns if the folio has dirty or writeback buffers. If all the buffers
85 * are unlocked and clean then the folio_test_dirty information is stale. If
86 * any of the buffers are locked, it is assumed they are locked for IO.
87 */
88void buffer_check_dirty_writeback(struct folio *folio,
89				     bool *dirty, bool *writeback)
90{
91	struct buffer_head *head, *bh;
92	*dirty = false;
93	*writeback = false;
94
95	BUG_ON(!folio_test_locked(folio));
96
97	head = folio_buffers(folio);
98	if (!head)
99		return;
100
101	if (folio_test_writeback(folio))
102		*writeback = true;
103
104	bh = head;
105	do {
106		if (buffer_locked(bh))
107			*writeback = true;
108
109		if (buffer_dirty(bh))
110			*dirty = true;
111
112		bh = bh->b_this_page;
113	} while (bh != head);
114}
115
116/*
117 * Block until a buffer comes unlocked.  This doesn't stop it
118 * from becoming locked again - you have to lock it yourself
119 * if you want to preserve its state.
120 */
121void __wait_on_buffer(struct buffer_head * bh)
122{
123	wait_on_bit_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE);
124}
125EXPORT_SYMBOL(__wait_on_buffer);
126
127static void buffer_io_error(struct buffer_head *bh, char *msg)
128{
129	if (!test_bit(BH_Quiet, &bh->b_state))
130		printk_ratelimited(KERN_ERR
131			"Buffer I/O error on dev %pg, logical block %llu%s\n",
132			bh->b_bdev, (unsigned long long)bh->b_blocknr, msg);
133}
134
135/*
136 * End-of-IO handler helper function which does not touch the bh after
137 * unlocking it.
138 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
139 * a race there is benign: unlock_buffer() only use the bh's address for
140 * hashing after unlocking the buffer, so it doesn't actually touch the bh
141 * itself.
142 */
143static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
144{
145	if (uptodate) {
146		set_buffer_uptodate(bh);
147	} else {
148		/* This happens, due to failed read-ahead attempts. */
149		clear_buffer_uptodate(bh);
150	}
151	unlock_buffer(bh);
152}
153
154/*
155 * Default synchronous end-of-IO handler..  Just mark it up-to-date and
156 * unlock the buffer.
157 */
158void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
159{
160	__end_buffer_read_notouch(bh, uptodate);
161	put_bh(bh);
162}
163EXPORT_SYMBOL(end_buffer_read_sync);
164
165void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
166{
167	if (uptodate) {
168		set_buffer_uptodate(bh);
169	} else {
170		buffer_io_error(bh, ", lost sync page write");
171		mark_buffer_write_io_error(bh);
172		clear_buffer_uptodate(bh);
173	}
174	unlock_buffer(bh);
175	put_bh(bh);
176}
177EXPORT_SYMBOL(end_buffer_write_sync);
178
179/*
180 * Various filesystems appear to want __find_get_block to be non-blocking.
181 * But it's the page lock which protects the buffers.  To get around this,
182 * we get exclusion from try_to_free_buffers with the blockdev mapping's
183 * private_lock.
184 *
185 * Hack idea: for the blockdev mapping, private_lock contention
186 * may be quite high.  This code could TryLock the page, and if that
187 * succeeds, there is no need to take private_lock.
188 */
189static struct buffer_head *
190__find_get_block_slow(struct block_device *bdev, sector_t block)
191{
192	struct inode *bd_inode = bdev->bd_inode;
193	struct address_space *bd_mapping = bd_inode->i_mapping;
194	struct buffer_head *ret = NULL;
195	pgoff_t index;
196	struct buffer_head *bh;
197	struct buffer_head *head;
198	struct folio *folio;
199	int all_mapped = 1;
200	static DEFINE_RATELIMIT_STATE(last_warned, HZ, 1);
201
202	index = block >> (PAGE_SHIFT - bd_inode->i_blkbits);
203	folio = __filemap_get_folio(bd_mapping, index, FGP_ACCESSED, 0);
204	if (IS_ERR(folio))
205		goto out;
206
207	spin_lock(&bd_mapping->private_lock);
208	head = folio_buffers(folio);
209	if (!head)
210		goto out_unlock;
211	bh = head;
212	do {
213		if (!buffer_mapped(bh))
214			all_mapped = 0;
215		else if (bh->b_blocknr == block) {
216			ret = bh;
217			get_bh(bh);
218			goto out_unlock;
219		}
220		bh = bh->b_this_page;
221	} while (bh != head);
222
223	/* we might be here because some of the buffers on this page are
224	 * not mapped.  This is due to various races between
225	 * file io on the block device and getblk.  It gets dealt with
226	 * elsewhere, don't buffer_error if we had some unmapped buffers
227	 */
228	ratelimit_set_flags(&last_warned, RATELIMIT_MSG_ON_RELEASE);
229	if (all_mapped && __ratelimit(&last_warned)) {
230		printk("__find_get_block_slow() failed. block=%llu, "
231		       "b_blocknr=%llu, b_state=0x%08lx, b_size=%zu, "
232		       "device %pg blocksize: %d\n",
233		       (unsigned long long)block,
234		       (unsigned long long)bh->b_blocknr,
235		       bh->b_state, bh->b_size, bdev,
236		       1 << bd_inode->i_blkbits);
237	}
238out_unlock:
239	spin_unlock(&bd_mapping->private_lock);
240	folio_put(folio);
241out:
242	return ret;
243}
244
245static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
246{
247	unsigned long flags;
248	struct buffer_head *first;
249	struct buffer_head *tmp;
250	struct folio *folio;
251	int folio_uptodate = 1;
252
253	BUG_ON(!buffer_async_read(bh));
254
255	folio = bh->b_folio;
256	if (uptodate) {
257		set_buffer_uptodate(bh);
258	} else {
259		clear_buffer_uptodate(bh);
260		buffer_io_error(bh, ", async page read");
261		folio_set_error(folio);
262	}
263
264	/*
265	 * Be _very_ careful from here on. Bad things can happen if
266	 * two buffer heads end IO at almost the same time and both
267	 * decide that the page is now completely done.
268	 */
269	first = folio_buffers(folio);
270	spin_lock_irqsave(&first->b_uptodate_lock, flags);
271	clear_buffer_async_read(bh);
272	unlock_buffer(bh);
273	tmp = bh;
274	do {
275		if (!buffer_uptodate(tmp))
276			folio_uptodate = 0;
277		if (buffer_async_read(tmp)) {
278			BUG_ON(!buffer_locked(tmp));
279			goto still_busy;
280		}
281		tmp = tmp->b_this_page;
282	} while (tmp != bh);
283	spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
284
285	/*
286	 * If all of the buffers are uptodate then we can set the page
287	 * uptodate.
288	 */
289	if (folio_uptodate)
290		folio_mark_uptodate(folio);
291	folio_unlock(folio);
292	return;
293
294still_busy:
295	spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
296	return;
297}
298
299struct postprocess_bh_ctx {
300	struct work_struct work;
301	struct buffer_head *bh;
302};
303
304static void verify_bh(struct work_struct *work)
305{
306	struct postprocess_bh_ctx *ctx =
307		container_of(work, struct postprocess_bh_ctx, work);
308	struct buffer_head *bh = ctx->bh;
309	bool valid;
310
311	valid = fsverity_verify_blocks(bh->b_folio, bh->b_size, bh_offset(bh));
312	end_buffer_async_read(bh, valid);
313	kfree(ctx);
314}
315
316static bool need_fsverity(struct buffer_head *bh)
317{
318	struct folio *folio = bh->b_folio;
319	struct inode *inode = folio->mapping->host;
320
321	return fsverity_active(inode) &&
322		/* needed by ext4 */
323		folio->index < DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
324}
325
326static void decrypt_bh(struct work_struct *work)
327{
328	struct postprocess_bh_ctx *ctx =
329		container_of(work, struct postprocess_bh_ctx, work);
330	struct buffer_head *bh = ctx->bh;
331	int err;
332
333	err = fscrypt_decrypt_pagecache_blocks(bh->b_folio, bh->b_size,
334					       bh_offset(bh));
335	if (err == 0 && need_fsverity(bh)) {
336		/*
337		 * We use different work queues for decryption and for verity
338		 * because verity may require reading metadata pages that need
339		 * decryption, and we shouldn't recurse to the same workqueue.
340		 */
341		INIT_WORK(&ctx->work, verify_bh);
342		fsverity_enqueue_verify_work(&ctx->work);
343		return;
344	}
345	end_buffer_async_read(bh, err == 0);
346	kfree(ctx);
347}
348
349/*
350 * I/O completion handler for block_read_full_folio() - pages
351 * which come unlocked at the end of I/O.
352 */
353static void end_buffer_async_read_io(struct buffer_head *bh, int uptodate)
354{
355	struct inode *inode = bh->b_folio->mapping->host;
356	bool decrypt = fscrypt_inode_uses_fs_layer_crypto(inode);
357	bool verify = need_fsverity(bh);
358
359	/* Decrypt (with fscrypt) and/or verify (with fsverity) if needed. */
360	if (uptodate && (decrypt || verify)) {
361		struct postprocess_bh_ctx *ctx =
362			kmalloc(sizeof(*ctx), GFP_ATOMIC);
363
364		if (ctx) {
365			ctx->bh = bh;
366			if (decrypt) {
367				INIT_WORK(&ctx->work, decrypt_bh);
368				fscrypt_enqueue_decrypt_work(&ctx->work);
369			} else {
370				INIT_WORK(&ctx->work, verify_bh);
371				fsverity_enqueue_verify_work(&ctx->work);
372			}
373			return;
374		}
375		uptodate = 0;
376	}
377	end_buffer_async_read(bh, uptodate);
378}
379
380/*
381 * Completion handler for block_write_full_page() - pages which are unlocked
382 * during I/O, and which have PageWriteback cleared upon I/O completion.
383 */
384void end_buffer_async_write(struct buffer_head *bh, int uptodate)
385{
386	unsigned long flags;
387	struct buffer_head *first;
388	struct buffer_head *tmp;
389	struct folio *folio;
390
391	BUG_ON(!buffer_async_write(bh));
392
393	folio = bh->b_folio;
394	if (uptodate) {
395		set_buffer_uptodate(bh);
396	} else {
397		buffer_io_error(bh, ", lost async page write");
398		mark_buffer_write_io_error(bh);
399		clear_buffer_uptodate(bh);
400		folio_set_error(folio);
401	}
402
403	first = folio_buffers(folio);
404	spin_lock_irqsave(&first->b_uptodate_lock, flags);
405
406	clear_buffer_async_write(bh);
407	unlock_buffer(bh);
408	tmp = bh->b_this_page;
409	while (tmp != bh) {
410		if (buffer_async_write(tmp)) {
411			BUG_ON(!buffer_locked(tmp));
412			goto still_busy;
413		}
414		tmp = tmp->b_this_page;
415	}
416	spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
417	folio_end_writeback(folio);
418	return;
419
420still_busy:
421	spin_unlock_irqrestore(&first->b_uptodate_lock, flags);
422	return;
423}
424EXPORT_SYMBOL(end_buffer_async_write);
425
426/*
427 * If a page's buffers are under async readin (end_buffer_async_read
428 * completion) then there is a possibility that another thread of
429 * control could lock one of the buffers after it has completed
430 * but while some of the other buffers have not completed.  This
431 * locked buffer would confuse end_buffer_async_read() into not unlocking
432 * the page.  So the absence of BH_Async_Read tells end_buffer_async_read()
433 * that this buffer is not under async I/O.
434 *
435 * The page comes unlocked when it has no locked buffer_async buffers
436 * left.
437 *
438 * PageLocked prevents anyone starting new async I/O reads any of
439 * the buffers.
440 *
441 * PageWriteback is used to prevent simultaneous writeout of the same
442 * page.
443 *
444 * PageLocked prevents anyone from starting writeback of a page which is
445 * under read I/O (PageWriteback is only ever set against a locked page).
446 */
447static void mark_buffer_async_read(struct buffer_head *bh)
448{
449	bh->b_end_io = end_buffer_async_read_io;
450	set_buffer_async_read(bh);
451}
452
453static void mark_buffer_async_write_endio(struct buffer_head *bh,
454					  bh_end_io_t *handler)
455{
456	bh->b_end_io = handler;
457	set_buffer_async_write(bh);
458}
459
460void mark_buffer_async_write(struct buffer_head *bh)
461{
462	mark_buffer_async_write_endio(bh, end_buffer_async_write);
463}
464EXPORT_SYMBOL(mark_buffer_async_write);
465
466
467/*
468 * fs/buffer.c contains helper functions for buffer-backed address space's
469 * fsync functions.  A common requirement for buffer-based filesystems is
470 * that certain data from the backing blockdev needs to be written out for
471 * a successful fsync().  For example, ext2 indirect blocks need to be
472 * written back and waited upon before fsync() returns.
473 *
474 * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
475 * inode_has_buffers() and invalidate_inode_buffers() are provided for the
476 * management of a list of dependent buffers at ->i_mapping->private_list.
477 *
478 * Locking is a little subtle: try_to_free_buffers() will remove buffers
479 * from their controlling inode's queue when they are being freed.  But
480 * try_to_free_buffers() will be operating against the *blockdev* mapping
481 * at the time, not against the S_ISREG file which depends on those buffers.
482 * So the locking for private_list is via the private_lock in the address_space
483 * which backs the buffers.  Which is different from the address_space
484 * against which the buffers are listed.  So for a particular address_space,
485 * mapping->private_lock does *not* protect mapping->private_list!  In fact,
486 * mapping->private_list will always be protected by the backing blockdev's
487 * ->private_lock.
488 *
489 * Which introduces a requirement: all buffers on an address_space's
490 * ->private_list must be from the same address_space: the blockdev's.
491 *
492 * address_spaces which do not place buffers at ->private_list via these
493 * utility functions are free to use private_lock and private_list for
494 * whatever they want.  The only requirement is that list_empty(private_list)
495 * be true at clear_inode() time.
496 *
497 * FIXME: clear_inode should not call invalidate_inode_buffers().  The
498 * filesystems should do that.  invalidate_inode_buffers() should just go
499 * BUG_ON(!list_empty).
500 *
501 * FIXME: mark_buffer_dirty_inode() is a data-plane operation.  It should
502 * take an address_space, not an inode.  And it should be called
503 * mark_buffer_dirty_fsync() to clearly define why those buffers are being
504 * queued up.
505 *
506 * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
507 * list if it is already on a list.  Because if the buffer is on a list,
508 * it *must* already be on the right one.  If not, the filesystem is being
509 * silly.  This will save a ton of locking.  But first we have to ensure
510 * that buffers are taken *off* the old inode's list when they are freed
511 * (presumably in truncate).  That requires careful auditing of all
512 * filesystems (do it inside bforget()).  It could also be done by bringing
513 * b_inode back.
514 */
515
516/*
517 * The buffer's backing address_space's private_lock must be held
518 */
519static void __remove_assoc_queue(struct buffer_head *bh)
520{
521	list_del_init(&bh->b_assoc_buffers);
522	WARN_ON(!bh->b_assoc_map);
523	bh->b_assoc_map = NULL;
524}
525
526int inode_has_buffers(struct inode *inode)
527{
528	return !list_empty(&inode->i_data.private_list);
529}
530
531/*
532 * osync is designed to support O_SYNC io.  It waits synchronously for
533 * all already-submitted IO to complete, but does not queue any new
534 * writes to the disk.
535 *
536 * To do O_SYNC writes, just queue the buffer writes with write_dirty_buffer
537 * as you dirty the buffers, and then use osync_inode_buffers to wait for
538 * completion.  Any other dirty buffers which are not yet queued for
539 * write will not be flushed to disk by the osync.
540 */
541static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
542{
543	struct buffer_head *bh;
544	struct list_head *p;
545	int err = 0;
546
547	spin_lock(lock);
548repeat:
549	list_for_each_prev(p, list) {
550		bh = BH_ENTRY(p);
551		if (buffer_locked(bh)) {
552			get_bh(bh);
553			spin_unlock(lock);
554			wait_on_buffer(bh);
555			if (!buffer_uptodate(bh))
556				err = -EIO;
557			brelse(bh);
558			spin_lock(lock);
559			goto repeat;
560		}
561	}
562	spin_unlock(lock);
563	return err;
564}
565
566/**
567 * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
568 * @mapping: the mapping which wants those buffers written
569 *
570 * Starts I/O against the buffers at mapping->private_list, and waits upon
571 * that I/O.
572 *
573 * Basically, this is a convenience function for fsync().
574 * @mapping is a file or directory which needs those buffers to be written for
575 * a successful fsync().
576 */
577int sync_mapping_buffers(struct address_space *mapping)
578{
579	struct address_space *buffer_mapping = mapping->private_data;
580
581	if (buffer_mapping == NULL || list_empty(&mapping->private_list))
582		return 0;
583
584	return fsync_buffers_list(&buffer_mapping->private_lock,
585					&mapping->private_list);
586}
587EXPORT_SYMBOL(sync_mapping_buffers);
588
589/**
590 * generic_buffers_fsync_noflush - generic buffer fsync implementation
591 * for simple filesystems with no inode lock
592 *
593 * @file:	file to synchronize
594 * @start:	start offset in bytes
595 * @end:	end offset in bytes (inclusive)
596 * @datasync:	only synchronize essential metadata if true
597 *
598 * This is a generic implementation of the fsync method for simple
599 * filesystems which track all non-inode metadata in the buffers list
600 * hanging off the address_space structure.
601 */
602int generic_buffers_fsync_noflush(struct file *file, loff_t start, loff_t end,
603				  bool datasync)
604{
605	struct inode *inode = file->f_mapping->host;
606	int err;
607	int ret;
608
609	err = file_write_and_wait_range(file, start, end);
610	if (err)
611		return err;
612
613	ret = sync_mapping_buffers(inode->i_mapping);
614	if (!(inode->i_state & I_DIRTY_ALL))
615		goto out;
616	if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
617		goto out;
618
619	err = sync_inode_metadata(inode, 1);
620	if (ret == 0)
621		ret = err;
622
623out:
624	/* check and advance again to catch errors after syncing out buffers */
625	err = file_check_and_advance_wb_err(file);
626	if (ret == 0)
627		ret = err;
628	return ret;
629}
630EXPORT_SYMBOL(generic_buffers_fsync_noflush);
631
632/**
633 * generic_buffers_fsync - generic buffer fsync implementation
634 * for simple filesystems with no inode lock
635 *
636 * @file:	file to synchronize
637 * @start:	start offset in bytes
638 * @end:	end offset in bytes (inclusive)
639 * @datasync:	only synchronize essential metadata if true
640 *
641 * This is a generic implementation of the fsync method for simple
642 * filesystems which track all non-inode metadata in the buffers list
643 * hanging off the address_space structure. This also makes sure that
644 * a device cache flush operation is called at the end.
645 */
646int generic_buffers_fsync(struct file *file, loff_t start, loff_t end,
647			  bool datasync)
648{
649	struct inode *inode = file->f_mapping->host;
650	int ret;
651
652	ret = generic_buffers_fsync_noflush(file, start, end, datasync);
653	if (!ret)
654		ret = blkdev_issue_flush(inode->i_sb->s_bdev);
655	return ret;
656}
657EXPORT_SYMBOL(generic_buffers_fsync);
658
659/*
660 * Called when we've recently written block `bblock', and it is known that
661 * `bblock' was for a buffer_boundary() buffer.  This means that the block at
662 * `bblock + 1' is probably a dirty indirect block.  Hunt it down and, if it's
663 * dirty, schedule it for IO.  So that indirects merge nicely with their data.
664 */
665void write_boundary_block(struct block_device *bdev,
666			sector_t bblock, unsigned blocksize)
667{
668	struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
669	if (bh) {
670		if (buffer_dirty(bh))
671			write_dirty_buffer(bh, 0);
672		put_bh(bh);
673	}
674}
675
676void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
677{
678	struct address_space *mapping = inode->i_mapping;
679	struct address_space *buffer_mapping = bh->b_folio->mapping;
680
681	mark_buffer_dirty(bh);
682	if (!mapping->private_data) {
683		mapping->private_data = buffer_mapping;
684	} else {
685		BUG_ON(mapping->private_data != buffer_mapping);
686	}
687	if (!bh->b_assoc_map) {
688		spin_lock(&buffer_mapping->private_lock);
689		list_move_tail(&bh->b_assoc_buffers,
690				&mapping->private_list);
691		bh->b_assoc_map = mapping;
692		spin_unlock(&buffer_mapping->private_lock);
693	}
694}
695EXPORT_SYMBOL(mark_buffer_dirty_inode);
696
697/*
698 * Add a page to the dirty page list.
699 *
700 * It is a sad fact of life that this function is called from several places
701 * deeply under spinlocking.  It may not sleep.
702 *
703 * If the page has buffers, the uptodate buffers are set dirty, to preserve
704 * dirty-state coherency between the page and the buffers.  It the page does
705 * not have buffers then when they are later attached they will all be set
706 * dirty.
707 *
708 * The buffers are dirtied before the page is dirtied.  There's a small race
709 * window in which a writepage caller may see the page cleanness but not the
710 * buffer dirtiness.  That's fine.  If this code were to set the page dirty
711 * before the buffers, a concurrent writepage caller could clear the page dirty
712 * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
713 * page on the dirty page list.
714 *
715 * We use private_lock to lock against try_to_free_buffers while using the
716 * page's buffer list.  Also use this to protect against clean buffers being
717 * added to the page after it was set dirty.
718 *
719 * FIXME: may need to call ->reservepage here as well.  That's rather up to the
720 * address_space though.
721 */
722bool block_dirty_folio(struct address_space *mapping, struct folio *folio)
723{
724	struct buffer_head *head;
725	bool newly_dirty;
726
727	spin_lock(&mapping->private_lock);
728	head = folio_buffers(folio);
729	if (head) {
730		struct buffer_head *bh = head;
731
732		do {
733			set_buffer_dirty(bh);
734			bh = bh->b_this_page;
735		} while (bh != head);
736	}
737	/*
738	 * Lock out page's memcg migration to keep PageDirty
739	 * synchronized with per-memcg dirty page counters.
740	 */
741	folio_memcg_lock(folio);
742	newly_dirty = !folio_test_set_dirty(folio);
743	spin_unlock(&mapping->private_lock);
744
745	if (newly_dirty)
746		__folio_mark_dirty(folio, mapping, 1);
747
748	folio_memcg_unlock(folio);
749
750	if (newly_dirty)
751		__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
752
753	return newly_dirty;
754}
755EXPORT_SYMBOL(block_dirty_folio);
756
757/*
758 * Write out and wait upon a list of buffers.
759 *
760 * We have conflicting pressures: we want to make sure that all
761 * initially dirty buffers get waited on, but that any subsequently
762 * dirtied buffers don't.  After all, we don't want fsync to last
763 * forever if somebody is actively writing to the file.
764 *
765 * Do this in two main stages: first we copy dirty buffers to a
766 * temporary inode list, queueing the writes as we go.  Then we clean
767 * up, waiting for those writes to complete.
768 *
769 * During this second stage, any subsequent updates to the file may end
770 * up refiling the buffer on the original inode's dirty list again, so
771 * there is a chance we will end up with a buffer queued for write but
772 * not yet completed on that list.  So, as a final cleanup we go through
773 * the osync code to catch these locked, dirty buffers without requeuing
774 * any newly dirty buffers for write.
775 */
776static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
777{
778	struct buffer_head *bh;
779	struct list_head tmp;
780	struct address_space *mapping;
781	int err = 0, err2;
782	struct blk_plug plug;
783
784	INIT_LIST_HEAD(&tmp);
785	blk_start_plug(&plug);
786
787	spin_lock(lock);
788	while (!list_empty(list)) {
789		bh = BH_ENTRY(list->next);
790		mapping = bh->b_assoc_map;
791		__remove_assoc_queue(bh);
792		/* Avoid race with mark_buffer_dirty_inode() which does
793		 * a lockless check and we rely on seeing the dirty bit */
794		smp_mb();
795		if (buffer_dirty(bh) || buffer_locked(bh)) {
796			list_add(&bh->b_assoc_buffers, &tmp);
797			bh->b_assoc_map = mapping;
798			if (buffer_dirty(bh)) {
799				get_bh(bh);
800				spin_unlock(lock);
801				/*
802				 * Ensure any pending I/O completes so that
803				 * write_dirty_buffer() actually writes the
804				 * current contents - it is a noop if I/O is
805				 * still in flight on potentially older
806				 * contents.
807				 */
808				write_dirty_buffer(bh, REQ_SYNC);
809
810				/*
811				 * Kick off IO for the previous mapping. Note
812				 * that we will not run the very last mapping,
813				 * wait_on_buffer() will do that for us
814				 * through sync_buffer().
815				 */
816				brelse(bh);
817				spin_lock(lock);
818			}
819		}
820	}
821
822	spin_unlock(lock);
823	blk_finish_plug(&plug);
824	spin_lock(lock);
825
826	while (!list_empty(&tmp)) {
827		bh = BH_ENTRY(tmp.prev);
828		get_bh(bh);
829		mapping = bh->b_assoc_map;
830		__remove_assoc_queue(bh);
831		/* Avoid race with mark_buffer_dirty_inode() which does
832		 * a lockless check and we rely on seeing the dirty bit */
833		smp_mb();
834		if (buffer_dirty(bh)) {
835			list_add(&bh->b_assoc_buffers,
836				 &mapping->private_list);
837			bh->b_assoc_map = mapping;
838		}
839		spin_unlock(lock);
840		wait_on_buffer(bh);
841		if (!buffer_uptodate(bh))
842			err = -EIO;
843		brelse(bh);
844		spin_lock(lock);
845	}
846
847	spin_unlock(lock);
848	err2 = osync_buffers_list(lock, list);
849	if (err)
850		return err;
851	else
852		return err2;
853}
854
855/*
856 * Invalidate any and all dirty buffers on a given inode.  We are
857 * probably unmounting the fs, but that doesn't mean we have already
858 * done a sync().  Just drop the buffers from the inode list.
859 *
860 * NOTE: we take the inode's blockdev's mapping's private_lock.  Which
861 * assumes that all the buffers are against the blockdev.  Not true
862 * for reiserfs.
863 */
864void invalidate_inode_buffers(struct inode *inode)
865{
866	if (inode_has_buffers(inode)) {
867		struct address_space *mapping = &inode->i_data;
868		struct list_head *list = &mapping->private_list;
869		struct address_space *buffer_mapping = mapping->private_data;
870
871		spin_lock(&buffer_mapping->private_lock);
872		while (!list_empty(list))
873			__remove_assoc_queue(BH_ENTRY(list->next));
874		spin_unlock(&buffer_mapping->private_lock);
875	}
876}
877EXPORT_SYMBOL(invalidate_inode_buffers);
878
879/*
880 * Remove any clean buffers from the inode's buffer list.  This is called
881 * when we're trying to free the inode itself.  Those buffers can pin it.
882 *
883 * Returns true if all buffers were removed.
884 */
885int remove_inode_buffers(struct inode *inode)
886{
887	int ret = 1;
888
889	if (inode_has_buffers(inode)) {
890		struct address_space *mapping = &inode->i_data;
891		struct list_head *list = &mapping->private_list;
892		struct address_space *buffer_mapping = mapping->private_data;
893
894		spin_lock(&buffer_mapping->private_lock);
895		while (!list_empty(list)) {
896			struct buffer_head *bh = BH_ENTRY(list->next);
897			if (buffer_dirty(bh)) {
898				ret = 0;
899				break;
900			}
901			__remove_assoc_queue(bh);
902		}
903		spin_unlock(&buffer_mapping->private_lock);
904	}
905	return ret;
906}
907
908/*
909 * Create the appropriate buffers when given a folio for data area and
910 * the size of each buffer.. Use the bh->b_this_page linked list to
911 * follow the buffers created.  Return NULL if unable to create more
912 * buffers.
913 *
914 * The retry flag is used to differentiate async IO (paging, swapping)
915 * which may not fail from ordinary buffer allocations.
916 */
917struct buffer_head *folio_alloc_buffers(struct folio *folio, unsigned long size,
918					bool retry)
919{
920	struct buffer_head *bh, *head;
921	gfp_t gfp = GFP_NOFS | __GFP_ACCOUNT;
922	long offset;
923	struct mem_cgroup *memcg, *old_memcg;
924
925	if (retry)
926		gfp |= __GFP_NOFAIL;
927
928	/* The folio lock pins the memcg */
929	memcg = folio_memcg(folio);
930	old_memcg = set_active_memcg(memcg);
931
932	head = NULL;
933	offset = folio_size(folio);
934	while ((offset -= size) >= 0) {
935		bh = alloc_buffer_head(gfp);
936		if (!bh)
937			goto no_grow;
938
939		bh->b_this_page = head;
940		bh->b_blocknr = -1;
941		head = bh;
942
943		bh->b_size = size;
944
945		/* Link the buffer to its folio */
946		folio_set_bh(bh, folio, offset);
947	}
948out:
949	set_active_memcg(old_memcg);
950	return head;
951/*
952 * In case anything failed, we just free everything we got.
953 */
954no_grow:
955	if (head) {
956		do {
957			bh = head;
958			head = head->b_this_page;
959			free_buffer_head(bh);
960		} while (head);
961	}
962
963	goto out;
964}
965EXPORT_SYMBOL_GPL(folio_alloc_buffers);
966
967struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
968				       bool retry)
969{
970	return folio_alloc_buffers(page_folio(page), size, retry);
971}
972EXPORT_SYMBOL_GPL(alloc_page_buffers);
973
974static inline void link_dev_buffers(struct folio *folio,
975		struct buffer_head *head)
976{
977	struct buffer_head *bh, *tail;
978
979	bh = head;
980	do {
981		tail = bh;
982		bh = bh->b_this_page;
983	} while (bh);
984	tail->b_this_page = head;
985	folio_attach_private(folio, head);
986}
987
988static sector_t blkdev_max_block(struct block_device *bdev, unsigned int size)
989{
990	sector_t retval = ~((sector_t)0);
991	loff_t sz = bdev_nr_bytes(bdev);
992
993	if (sz) {
994		unsigned int sizebits = blksize_bits(size);
995		retval = (sz >> sizebits);
996	}
997	return retval;
998}
999
1000/*
1001 * Initialise the state of a blockdev folio's buffers.
1002 */
1003static sector_t folio_init_buffers(struct folio *folio,
1004		struct block_device *bdev, sector_t block, int size)
1005{
1006	struct buffer_head *head = folio_buffers(folio);
1007	struct buffer_head *bh = head;
1008	bool uptodate = folio_test_uptodate(folio);
1009	sector_t end_block = blkdev_max_block(bdev, size);
1010
1011	do {
1012		if (!buffer_mapped(bh)) {
1013			bh->b_end_io = NULL;
1014			bh->b_private = NULL;
1015			bh->b_bdev = bdev;
1016			bh->b_blocknr = block;
1017			if (uptodate)
1018				set_buffer_uptodate(bh);
1019			if (block < end_block)
1020				set_buffer_mapped(bh);
1021		}
1022		block++;
1023		bh = bh->b_this_page;
1024	} while (bh != head);
1025
1026	/*
1027	 * Caller needs to validate requested block against end of device.
1028	 */
1029	return end_block;
1030}
1031
1032/*
1033 * Create the page-cache page that contains the requested block.
1034 *
1035 * This is used purely for blockdev mappings.
1036 */
1037static int
1038grow_dev_page(struct block_device *bdev, sector_t block,
1039	      pgoff_t index, int size, int sizebits, gfp_t gfp)
1040{
1041	struct inode *inode = bdev->bd_inode;
1042	struct folio *folio;
1043	struct buffer_head *bh;
1044	sector_t end_block;
1045	int ret = 0;
1046	gfp_t gfp_mask;
1047
1048	gfp_mask = mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS) | gfp;
1049
1050	/*
1051	 * XXX: __getblk_slow() can not really deal with failure and
1052	 * will endlessly loop on improvised global reclaim.  Prefer
1053	 * looping in the allocator rather than here, at least that
1054	 * code knows what it's doing.
1055	 */
1056	gfp_mask |= __GFP_NOFAIL;
1057
1058	folio = __filemap_get_folio(inode->i_mapping, index,
1059			FGP_LOCK | FGP_ACCESSED | FGP_CREAT, gfp_mask);
1060
1061	bh = folio_buffers(folio);
1062	if (bh) {
1063		if (bh->b_size == size) {
1064			end_block = folio_init_buffers(folio, bdev,
1065					(sector_t)index << sizebits, size);
1066			goto done;
1067		}
1068		if (!try_to_free_buffers(folio))
1069			goto failed;
1070	}
1071
1072	bh = folio_alloc_buffers(folio, size, true);
1073
1074	/*
1075	 * Link the folio to the buffers and initialise them.  Take the
1076	 * lock to be atomic wrt __find_get_block(), which does not
1077	 * run under the folio lock.
1078	 */
1079	spin_lock(&inode->i_mapping->private_lock);
1080	link_dev_buffers(folio, bh);
1081	end_block = folio_init_buffers(folio, bdev,
1082			(sector_t)index << sizebits, size);
1083	spin_unlock(&inode->i_mapping->private_lock);
1084done:
1085	ret = (block < end_block) ? 1 : -ENXIO;
1086failed:
1087	folio_unlock(folio);
1088	folio_put(folio);
1089	return ret;
1090}
1091
1092/*
1093 * Create buffers for the specified block device block's page.  If
1094 * that page was dirty, the buffers are set dirty also.
1095 */
1096static int
1097grow_buffers(struct block_device *bdev, sector_t block, int size, gfp_t gfp)
1098{
1099	pgoff_t index;
1100	int sizebits;
1101
1102	sizebits = PAGE_SHIFT - __ffs(size);
1103	index = block >> sizebits;
1104
1105	/*
1106	 * Check for a block which wants to lie outside our maximum possible
1107	 * pagecache index.  (this comparison is done using sector_t types).
1108	 */
1109	if (unlikely(index != block >> sizebits)) {
1110		printk(KERN_ERR "%s: requested out-of-range block %llu for "
1111			"device %pg\n",
1112			__func__, (unsigned long long)block,
1113			bdev);
1114		return -EIO;
1115	}
1116
1117	/* Create a page with the proper size buffers.. */
1118	return grow_dev_page(bdev, block, index, size, sizebits, gfp);
1119}
1120
1121static struct buffer_head *
1122__getblk_slow(struct block_device *bdev, sector_t block,
1123	     unsigned size, gfp_t gfp)
1124{
1125	/* Size must be multiple of hard sectorsize */
1126	if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
1127			(size < 512 || size > PAGE_SIZE))) {
1128		printk(KERN_ERR "getblk(): invalid block size %d requested\n",
1129					size);
1130		printk(KERN_ERR "logical block size: %d\n",
1131					bdev_logical_block_size(bdev));
1132
1133		dump_stack();
1134		return NULL;
1135	}
1136
1137	for (;;) {
1138		struct buffer_head *bh;
1139		int ret;
1140
1141		bh = __find_get_block(bdev, block, size);
1142		if (bh)
1143			return bh;
1144
1145		ret = grow_buffers(bdev, block, size, gfp);
1146		if (ret < 0)
1147			return NULL;
1148	}
1149}
1150
1151/*
1152 * The relationship between dirty buffers and dirty pages:
1153 *
1154 * Whenever a page has any dirty buffers, the page's dirty bit is set, and
1155 * the page is tagged dirty in the page cache.
1156 *
1157 * At all times, the dirtiness of the buffers represents the dirtiness of
1158 * subsections of the page.  If the page has buffers, the page dirty bit is
1159 * merely a hint about the true dirty state.
1160 *
1161 * When a page is set dirty in its entirety, all its buffers are marked dirty
1162 * (if the page has buffers).
1163 *
1164 * When a buffer is marked dirty, its page is dirtied, but the page's other
1165 * buffers are not.
1166 *
1167 * Also.  When blockdev buffers are explicitly read with bread(), they
1168 * individually become uptodate.  But their backing page remains not
1169 * uptodate - even if all of its buffers are uptodate.  A subsequent
1170 * block_read_full_folio() against that folio will discover all the uptodate
1171 * buffers, will set the folio uptodate and will perform no I/O.
1172 */
1173
1174/**
1175 * mark_buffer_dirty - mark a buffer_head as needing writeout
1176 * @bh: the buffer_head to mark dirty
1177 *
1178 * mark_buffer_dirty() will set the dirty bit against the buffer, then set
1179 * its backing page dirty, then tag the page as dirty in the page cache
1180 * and then attach the address_space's inode to its superblock's dirty
1181 * inode list.
1182 *
1183 * mark_buffer_dirty() is atomic.  It takes bh->b_folio->mapping->private_lock,
1184 * i_pages lock and mapping->host->i_lock.
1185 */
1186void mark_buffer_dirty(struct buffer_head *bh)
1187{
1188	WARN_ON_ONCE(!buffer_uptodate(bh));
1189
1190	trace_block_dirty_buffer(bh);
1191
1192	/*
1193	 * Very *carefully* optimize the it-is-already-dirty case.
1194	 *
1195	 * Don't let the final "is it dirty" escape to before we
1196	 * perhaps modified the buffer.
1197	 */
1198	if (buffer_dirty(bh)) {
1199		smp_mb();
1200		if (buffer_dirty(bh))
1201			return;
1202	}
1203
1204	if (!test_set_buffer_dirty(bh)) {
1205		struct folio *folio = bh->b_folio;
1206		struct address_space *mapping = NULL;
1207
1208		folio_memcg_lock(folio);
1209		if (!folio_test_set_dirty(folio)) {
1210			mapping = folio->mapping;
1211			if (mapping)
1212				__folio_mark_dirty(folio, mapping, 0);
1213		}
1214		folio_memcg_unlock(folio);
1215		if (mapping)
1216			__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1217	}
1218}
1219EXPORT_SYMBOL(mark_buffer_dirty);
1220
1221void mark_buffer_write_io_error(struct buffer_head *bh)
1222{
1223	set_buffer_write_io_error(bh);
1224	/* FIXME: do we need to set this in both places? */
1225	if (bh->b_folio && bh->b_folio->mapping)
1226		mapping_set_error(bh->b_folio->mapping, -EIO);
1227	if (bh->b_assoc_map) {
1228		mapping_set_error(bh->b_assoc_map, -EIO);
1229		errseq_set(&bh->b_assoc_map->host->i_sb->s_wb_err, -EIO);
1230	}
1231}
1232EXPORT_SYMBOL(mark_buffer_write_io_error);
1233
1234/*
1235 * Decrement a buffer_head's reference count.  If all buffers against a page
1236 * have zero reference count, are clean and unlocked, and if the page is clean
1237 * and unlocked then try_to_free_buffers() may strip the buffers from the page
1238 * in preparation for freeing it (sometimes, rarely, buffers are removed from
1239 * a page but it ends up not being freed, and buffers may later be reattached).
1240 */
1241void __brelse(struct buffer_head * buf)
1242{
1243	if (atomic_read(&buf->b_count)) {
1244		put_bh(buf);
1245		return;
1246	}
1247	WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
1248}
1249EXPORT_SYMBOL(__brelse);
1250
1251/*
1252 * bforget() is like brelse(), except it discards any
1253 * potentially dirty data.
1254 */
1255void __bforget(struct buffer_head *bh)
1256{
1257	clear_buffer_dirty(bh);
1258	if (bh->b_assoc_map) {
1259		struct address_space *buffer_mapping = bh->b_folio->mapping;
1260
1261		spin_lock(&buffer_mapping->private_lock);
1262		list_del_init(&bh->b_assoc_buffers);
1263		bh->b_assoc_map = NULL;
1264		spin_unlock(&buffer_mapping->private_lock);
1265	}
1266	__brelse(bh);
1267}
1268EXPORT_SYMBOL(__bforget);
1269
1270static struct buffer_head *__bread_slow(struct buffer_head *bh)
1271{
1272	lock_buffer(bh);
1273	if (buffer_uptodate(bh)) {
1274		unlock_buffer(bh);
1275		return bh;
1276	} else {
1277		get_bh(bh);
1278		bh->b_end_io = end_buffer_read_sync;
1279		submit_bh(REQ_OP_READ, bh);
1280		wait_on_buffer(bh);
1281		if (buffer_uptodate(bh))
1282			return bh;
1283	}
1284	brelse(bh);
1285	return NULL;
1286}
1287
1288/*
1289 * Per-cpu buffer LRU implementation.  To reduce the cost of __find_get_block().
1290 * The bhs[] array is sorted - newest buffer is at bhs[0].  Buffers have their
1291 * refcount elevated by one when they're in an LRU.  A buffer can only appear
1292 * once in a particular CPU's LRU.  A single buffer can be present in multiple
1293 * CPU's LRUs at the same time.
1294 *
1295 * This is a transparent caching front-end to sb_bread(), sb_getblk() and
1296 * sb_find_get_block().
1297 *
1298 * The LRUs themselves only need locking against invalidate_bh_lrus.  We use
1299 * a local interrupt disable for that.
1300 */
1301
1302#define BH_LRU_SIZE	16
1303
1304struct bh_lru {
1305	struct buffer_head *bhs[BH_LRU_SIZE];
1306};
1307
1308static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
1309
1310#ifdef CONFIG_SMP
1311#define bh_lru_lock()	local_irq_disable()
1312#define bh_lru_unlock()	local_irq_enable()
1313#else
1314#define bh_lru_lock()	preempt_disable()
1315#define bh_lru_unlock()	preempt_enable()
1316#endif
1317
1318static inline void check_irqs_on(void)
1319{
1320#ifdef irqs_disabled
1321	BUG_ON(irqs_disabled());
1322#endif
1323}
1324
1325/*
1326 * Install a buffer_head into this cpu's LRU.  If not already in the LRU, it is
1327 * inserted at the front, and the buffer_head at the back if any is evicted.
1328 * Or, if already in the LRU it is moved to the front.
1329 */
1330static void bh_lru_install(struct buffer_head *bh)
1331{
1332	struct buffer_head *evictee = bh;
1333	struct bh_lru *b;
1334	int i;
1335
1336	check_irqs_on();
1337	bh_lru_lock();
1338
1339	/*
1340	 * the refcount of buffer_head in bh_lru prevents dropping the
1341	 * attached page(i.e., try_to_free_buffers) so it could cause
1342	 * failing page migration.
1343	 * Skip putting upcoming bh into bh_lru until migration is done.
1344	 */
1345	if (lru_cache_disabled() || cpu_is_isolated(smp_processor_id())) {
1346		bh_lru_unlock();
1347		return;
1348	}
1349
1350	b = this_cpu_ptr(&bh_lrus);
1351	for (i = 0; i < BH_LRU_SIZE; i++) {
1352		swap(evictee, b->bhs[i]);
1353		if (evictee == bh) {
1354			bh_lru_unlock();
1355			return;
1356		}
1357	}
1358
1359	get_bh(bh);
1360	bh_lru_unlock();
1361	brelse(evictee);
1362}
1363
1364/*
1365 * Look up the bh in this cpu's LRU.  If it's there, move it to the head.
1366 */
1367static struct buffer_head *
1368lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
1369{
1370	struct buffer_head *ret = NULL;
1371	unsigned int i;
1372
1373	check_irqs_on();
1374	bh_lru_lock();
1375	if (cpu_is_isolated(smp_processor_id())) {
1376		bh_lru_unlock();
1377		return NULL;
1378	}
1379	for (i = 0; i < BH_LRU_SIZE; i++) {
1380		struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]);
1381
1382		if (bh && bh->b_blocknr == block && bh->b_bdev == bdev &&
1383		    bh->b_size == size) {
1384			if (i) {
1385				while (i) {
1386					__this_cpu_write(bh_lrus.bhs[i],
1387						__this_cpu_read(bh_lrus.bhs[i - 1]));
1388					i--;
1389				}
1390				__this_cpu_write(bh_lrus.bhs[0], bh);
1391			}
1392			get_bh(bh);
1393			ret = bh;
1394			break;
1395		}
1396	}
1397	bh_lru_unlock();
1398	return ret;
1399}
1400
1401/*
1402 * Perform a pagecache lookup for the matching buffer.  If it's there, refresh
1403 * it in the LRU and mark it as accessed.  If it is not present then return
1404 * NULL
1405 */
1406struct buffer_head *
1407__find_get_block(struct block_device *bdev, sector_t block, unsigned size)
1408{
1409	struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1410
1411	if (bh == NULL) {
1412		/* __find_get_block_slow will mark the page accessed */
1413		bh = __find_get_block_slow(bdev, block);
1414		if (bh)
1415			bh_lru_install(bh);
1416	} else
1417		touch_buffer(bh);
1418
1419	return bh;
1420}
1421EXPORT_SYMBOL(__find_get_block);
1422
1423/*
1424 * __getblk_gfp() will locate (and, if necessary, create) the buffer_head
1425 * which corresponds to the passed block_device, block and size. The
1426 * returned buffer has its reference count incremented.
1427 *
1428 * __getblk_gfp() will lock up the machine if grow_dev_page's
1429 * try_to_free_buffers() attempt is failing.  FIXME, perhaps?
1430 */
1431struct buffer_head *
1432__getblk_gfp(struct block_device *bdev, sector_t block,
1433	     unsigned size, gfp_t gfp)
1434{
1435	struct buffer_head *bh = __find_get_block(bdev, block, size);
1436
1437	might_sleep();
1438	if (bh == NULL)
1439		bh = __getblk_slow(bdev, block, size, gfp);
1440	return bh;
1441}
1442EXPORT_SYMBOL(__getblk_gfp);
1443
1444/*
1445 * Do async read-ahead on a buffer..
1446 */
1447void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
1448{
1449	struct buffer_head *bh = __getblk(bdev, block, size);
1450	if (likely(bh)) {
1451		bh_readahead(bh, REQ_RAHEAD);
1452		brelse(bh);
1453	}
1454}
1455EXPORT_SYMBOL(__breadahead);
1456
1457/**
1458 *  __bread_gfp() - reads a specified block and returns the bh
1459 *  @bdev: the block_device to read from
1460 *  @block: number of block
1461 *  @size: size (in bytes) to read
1462 *  @gfp: page allocation flag
1463 *
1464 *  Reads a specified block, and returns buffer head that contains it.
1465 *  The page cache can be allocated from non-movable area
1466 *  not to prevent page migration if you set gfp to zero.
1467 *  It returns NULL if the block was unreadable.
1468 */
1469struct buffer_head *
1470__bread_gfp(struct block_device *bdev, sector_t block,
1471		   unsigned size, gfp_t gfp)
1472{
1473	struct buffer_head *bh = __getblk_gfp(bdev, block, size, gfp);
1474
1475	if (likely(bh) && !buffer_uptodate(bh))
1476		bh = __bread_slow(bh);
1477	return bh;
1478}
1479EXPORT_SYMBOL(__bread_gfp);
1480
1481static void __invalidate_bh_lrus(struct bh_lru *b)
1482{
1483	int i;
1484
1485	for (i = 0; i < BH_LRU_SIZE; i++) {
1486		brelse(b->bhs[i]);
1487		b->bhs[i] = NULL;
1488	}
1489}
1490/*
1491 * invalidate_bh_lrus() is called rarely - but not only at unmount.
1492 * This doesn't race because it runs in each cpu either in irq
1493 * or with preempt disabled.
1494 */
1495static void invalidate_bh_lru(void *arg)
1496{
1497	struct bh_lru *b = &get_cpu_var(bh_lrus);
1498
1499	__invalidate_bh_lrus(b);
1500	put_cpu_var(bh_lrus);
1501}
1502
1503bool has_bh_in_lru(int cpu, void *dummy)
1504{
1505	struct bh_lru *b = per_cpu_ptr(&bh_lrus, cpu);
1506	int i;
1507
1508	for (i = 0; i < BH_LRU_SIZE; i++) {
1509		if (b->bhs[i])
1510			return true;
1511	}
1512
1513	return false;
1514}
1515
1516void invalidate_bh_lrus(void)
1517{
1518	on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1);
1519}
1520EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
1521
1522/*
1523 * It's called from workqueue context so we need a bh_lru_lock to close
1524 * the race with preemption/irq.
1525 */
1526void invalidate_bh_lrus_cpu(void)
1527{
1528	struct bh_lru *b;
1529
1530	bh_lru_lock();
1531	b = this_cpu_ptr(&bh_lrus);
1532	__invalidate_bh_lrus(b);
1533	bh_lru_unlock();
1534}
1535
1536void folio_set_bh(struct buffer_head *bh, struct folio *folio,
1537		  unsigned long offset)
1538{
1539	bh->b_folio = folio;
1540	BUG_ON(offset >= folio_size(folio));
1541	if (folio_test_highmem(folio))
1542		/*
1543		 * This catches illegal uses and preserves the offset:
1544		 */
1545		bh->b_data = (char *)(0 + offset);
1546	else
1547		bh->b_data = folio_address(folio) + offset;
1548}
1549EXPORT_SYMBOL(folio_set_bh);
1550
1551/*
1552 * Called when truncating a buffer on a page completely.
1553 */
1554
1555/* Bits that are cleared during an invalidate */
1556#define BUFFER_FLAGS_DISCARD \
1557	(1 << BH_Mapped | 1 << BH_New | 1 << BH_Req | \
1558	 1 << BH_Delay | 1 << BH_Unwritten)
1559
1560static void discard_buffer(struct buffer_head * bh)
1561{
1562	unsigned long b_state;
1563
1564	lock_buffer(bh);
1565	clear_buffer_dirty(bh);
1566	bh->b_bdev = NULL;
1567	b_state = READ_ONCE(bh->b_state);
1568	do {
1569	} while (!try_cmpxchg(&bh->b_state, &b_state,
1570			      b_state & ~BUFFER_FLAGS_DISCARD));
1571	unlock_buffer(bh);
1572}
1573
1574/**
1575 * block_invalidate_folio - Invalidate part or all of a buffer-backed folio.
1576 * @folio: The folio which is affected.
1577 * @offset: start of the range to invalidate
1578 * @length: length of the range to invalidate
1579 *
1580 * block_invalidate_folio() is called when all or part of the folio has been
1581 * invalidated by a truncate operation.
1582 *
1583 * block_invalidate_folio() does not have to release all buffers, but it must
1584 * ensure that no dirty buffer is left outside @offset and that no I/O
1585 * is underway against any of the blocks which are outside the truncation
1586 * point.  Because the caller is about to free (and possibly reuse) those
1587 * blocks on-disk.
1588 */
1589void block_invalidate_folio(struct folio *folio, size_t offset, size_t length)
1590{
1591	struct buffer_head *head, *bh, *next;
1592	size_t curr_off = 0;
1593	size_t stop = length + offset;
1594
1595	BUG_ON(!folio_test_locked(folio));
1596
1597	/*
1598	 * Check for overflow
1599	 */
1600	BUG_ON(stop > folio_size(folio) || stop < length);
1601
1602	head = folio_buffers(folio);
1603	if (!head)
1604		return;
1605
1606	bh = head;
1607	do {
1608		size_t next_off = curr_off + bh->b_size;
1609		next = bh->b_this_page;
1610
1611		/*
1612		 * Are we still fully in range ?
1613		 */
1614		if (next_off > stop)
1615			goto out;
1616
1617		/*
1618		 * is this block fully invalidated?
1619		 */
1620		if (offset <= curr_off)
1621			discard_buffer(bh);
1622		curr_off = next_off;
1623		bh = next;
1624	} while (bh != head);
1625
1626	/*
1627	 * We release buffers only if the entire folio is being invalidated.
1628	 * The get_block cached value has been unconditionally invalidated,
1629	 * so real IO is not possible anymore.
1630	 */
1631	if (length == folio_size(folio))
1632		filemap_release_folio(folio, 0);
1633out:
1634	return;
1635}
1636EXPORT_SYMBOL(block_invalidate_folio);
1637
1638/*
1639 * We attach and possibly dirty the buffers atomically wrt
1640 * block_dirty_folio() via private_lock.  try_to_free_buffers
1641 * is already excluded via the folio lock.
1642 */
1643void folio_create_empty_buffers(struct folio *folio, unsigned long blocksize,
1644				unsigned long b_state)
1645{
1646	struct buffer_head *bh, *head, *tail;
1647
1648	head = folio_alloc_buffers(folio, blocksize, true);
1649	bh = head;
1650	do {
1651		bh->b_state |= b_state;
1652		tail = bh;
1653		bh = bh->b_this_page;
1654	} while (bh);
1655	tail->b_this_page = head;
1656
1657	spin_lock(&folio->mapping->private_lock);
1658	if (folio_test_uptodate(folio) || folio_test_dirty(folio)) {
1659		bh = head;
1660		do {
1661			if (folio_test_dirty(folio))
1662				set_buffer_dirty(bh);
1663			if (folio_test_uptodate(folio))
1664				set_buffer_uptodate(bh);
1665			bh = bh->b_this_page;
1666		} while (bh != head);
1667	}
1668	folio_attach_private(folio, head);
1669	spin_unlock(&folio->mapping->private_lock);
1670}
1671EXPORT_SYMBOL(folio_create_empty_buffers);
1672
1673void create_empty_buffers(struct page *page,
1674			unsigned long blocksize, unsigned long b_state)
1675{
1676	folio_create_empty_buffers(page_folio(page), blocksize, b_state);
1677}
1678EXPORT_SYMBOL(create_empty_buffers);
1679
1680/**
1681 * clean_bdev_aliases: clean a range of buffers in block device
1682 * @bdev: Block device to clean buffers in
1683 * @block: Start of a range of blocks to clean
1684 * @len: Number of blocks to clean
1685 *
1686 * We are taking a range of blocks for data and we don't want writeback of any
1687 * buffer-cache aliases starting from return from this function and until the
1688 * moment when something will explicitly mark the buffer dirty (hopefully that
1689 * will not happen until we will free that block ;-) We don't even need to mark
1690 * it not-uptodate - nobody can expect anything from a newly allocated buffer
1691 * anyway. We used to use unmap_buffer() for such invalidation, but that was
1692 * wrong. We definitely don't want to mark the alias unmapped, for example - it
1693 * would confuse anyone who might pick it with bread() afterwards...
1694 *
1695 * Also..  Note that bforget() doesn't lock the buffer.  So there can be
1696 * writeout I/O going on against recently-freed buffers.  We don't wait on that
1697 * I/O in bforget() - it's more efficient to wait on the I/O only if we really
1698 * need to.  That happens here.
1699 */
1700void clean_bdev_aliases(struct block_device *bdev, sector_t block, sector_t len)
1701{
1702	struct inode *bd_inode = bdev->bd_inode;
1703	struct address_space *bd_mapping = bd_inode->i_mapping;
1704	struct folio_batch fbatch;
1705	pgoff_t index = block >> (PAGE_SHIFT - bd_inode->i_blkbits);
1706	pgoff_t end;
1707	int i, count;
1708	struct buffer_head *bh;
1709	struct buffer_head *head;
1710
1711	end = (block + len - 1) >> (PAGE_SHIFT - bd_inode->i_blkbits);
1712	folio_batch_init(&fbatch);
1713	while (filemap_get_folios(bd_mapping, &index, end, &fbatch)) {
1714		count = folio_batch_count(&fbatch);
1715		for (i = 0; i < count; i++) {
1716			struct folio *folio = fbatch.folios[i];
1717
1718			if (!folio_buffers(folio))
1719				continue;
1720			/*
1721			 * We use folio lock instead of bd_mapping->private_lock
1722			 * to pin buffers here since we can afford to sleep and
1723			 * it scales better than a global spinlock lock.
1724			 */
1725			folio_lock(folio);
1726			/* Recheck when the folio is locked which pins bhs */
1727			head = folio_buffers(folio);
1728			if (!head)
1729				goto unlock_page;
1730			bh = head;
1731			do {
1732				if (!buffer_mapped(bh) || (bh->b_blocknr < block))
1733					goto next;
1734				if (bh->b_blocknr >= block + len)
1735					break;
1736				clear_buffer_dirty(bh);
1737				wait_on_buffer(bh);
1738				clear_buffer_req(bh);
1739next:
1740				bh = bh->b_this_page;
1741			} while (bh != head);
1742unlock_page:
1743			folio_unlock(folio);
1744		}
1745		folio_batch_release(&fbatch);
1746		cond_resched();
1747		/* End of range already reached? */
1748		if (index > end || !index)
1749			break;
1750	}
1751}
1752EXPORT_SYMBOL(clean_bdev_aliases);
1753
1754/*
1755 * Size is a power-of-two in the range 512..PAGE_SIZE,
1756 * and the case we care about most is PAGE_SIZE.
1757 *
1758 * So this *could* possibly be written with those
1759 * constraints in mind (relevant mostly if some
1760 * architecture has a slow bit-scan instruction)
1761 */
1762static inline int block_size_bits(unsigned int blocksize)
1763{
1764	return ilog2(blocksize);
1765}
1766
1767static struct buffer_head *folio_create_buffers(struct folio *folio,
1768						struct inode *inode,
1769						unsigned int b_state)
1770{
1771	BUG_ON(!folio_test_locked(folio));
1772
1773	if (!folio_buffers(folio))
1774		folio_create_empty_buffers(folio,
1775					   1 << READ_ONCE(inode->i_blkbits),
1776					   b_state);
1777	return folio_buffers(folio);
1778}
1779
1780/*
1781 * NOTE! All mapped/uptodate combinations are valid:
1782 *
1783 *	Mapped	Uptodate	Meaning
1784 *
1785 *	No	No		"unknown" - must do get_block()
1786 *	No	Yes		"hole" - zero-filled
1787 *	Yes	No		"allocated" - allocated on disk, not read in
1788 *	Yes	Yes		"valid" - allocated and up-to-date in memory.
1789 *
1790 * "Dirty" is valid only with the last case (mapped+uptodate).
1791 */
1792
1793/*
1794 * While block_write_full_page is writing back the dirty buffers under
1795 * the page lock, whoever dirtied the buffers may decide to clean them
1796 * again at any time.  We handle that by only looking at the buffer
1797 * state inside lock_buffer().
1798 *
1799 * If block_write_full_page() is called for regular writeback
1800 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1801 * locked buffer.   This only can happen if someone has written the buffer
1802 * directly, with submit_bh().  At the address_space level PageWriteback
1803 * prevents this contention from occurring.
1804 *
1805 * If block_write_full_page() is called with wbc->sync_mode ==
1806 * WB_SYNC_ALL, the writes are posted using REQ_SYNC; this
1807 * causes the writes to be flagged as synchronous writes.
1808 */
1809int __block_write_full_folio(struct inode *inode, struct folio *folio,
1810			get_block_t *get_block, struct writeback_control *wbc,
1811			bh_end_io_t *handler)
1812{
1813	int err;
1814	sector_t block;
1815	sector_t last_block;
1816	struct buffer_head *bh, *head;
1817	unsigned int blocksize, bbits;
1818	int nr_underway = 0;
1819	blk_opf_t write_flags = wbc_to_write_flags(wbc);
1820
1821	head = folio_create_buffers(folio, inode,
1822				    (1 << BH_Dirty) | (1 << BH_Uptodate));
1823
1824	/*
1825	 * Be very careful.  We have no exclusion from block_dirty_folio
1826	 * here, and the (potentially unmapped) buffers may become dirty at
1827	 * any time.  If a buffer becomes dirty here after we've inspected it
1828	 * then we just miss that fact, and the folio stays dirty.
1829	 *
1830	 * Buffers outside i_size may be dirtied by block_dirty_folio;
1831	 * handle that here by just cleaning them.
1832	 */
1833
1834	bh = head;
1835	blocksize = bh->b_size;
1836	bbits = block_size_bits(blocksize);
1837
1838	block = (sector_t)folio->index << (PAGE_SHIFT - bbits);
1839	last_block = (i_size_read(inode) - 1) >> bbits;
1840
1841	/*
1842	 * Get all the dirty buffers mapped to disk addresses and
1843	 * handle any aliases from the underlying blockdev's mapping.
1844	 */
1845	do {
1846		if (block > last_block) {
1847			/*
1848			 * mapped buffers outside i_size will occur, because
1849			 * this folio can be outside i_size when there is a
1850			 * truncate in progress.
1851			 */
1852			/*
1853			 * The buffer was zeroed by block_write_full_page()
1854			 */
1855			clear_buffer_dirty(bh);
1856			set_buffer_uptodate(bh);
1857		} else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
1858			   buffer_dirty(bh)) {
1859			WARN_ON(bh->b_size != blocksize);
1860			err = get_block(inode, block, bh, 1);
1861			if (err)
1862				goto recover;
1863			clear_buffer_delay(bh);
1864			if (buffer_new(bh)) {
1865				/* blockdev mappings never come here */
1866				clear_buffer_new(bh);
1867				clean_bdev_bh_alias(bh);
1868			}
1869		}
1870		bh = bh->b_this_page;
1871		block++;
1872	} while (bh != head);
1873
1874	do {
1875		if (!buffer_mapped(bh))
1876			continue;
1877		/*
1878		 * If it's a fully non-blocking write attempt and we cannot
1879		 * lock the buffer then redirty the folio.  Note that this can
1880		 * potentially cause a busy-wait loop from writeback threads
1881		 * and kswapd activity, but those code paths have their own
1882		 * higher-level throttling.
1883		 */
1884		if (wbc->sync_mode != WB_SYNC_NONE) {
1885			lock_buffer(bh);
1886		} else if (!trylock_buffer(bh)) {
1887			folio_redirty_for_writepage(wbc, folio);
1888			continue;
1889		}
1890		if (test_clear_buffer_dirty(bh)) {
1891			mark_buffer_async_write_endio(bh, handler);
1892		} else {
1893			unlock_buffer(bh);
1894		}
1895	} while ((bh = bh->b_this_page) != head);
1896
1897	/*
1898	 * The folio and its buffers are protected by the writeback flag,
1899	 * so we can drop the bh refcounts early.
1900	 */
1901	BUG_ON(folio_test_writeback(folio));
1902	folio_start_writeback(folio);
1903
1904	do {
1905		struct buffer_head *next = bh->b_this_page;
1906		if (buffer_async_write(bh)) {
1907			submit_bh_wbc(REQ_OP_WRITE | write_flags, bh, wbc);
1908			nr_underway++;
1909		}
1910		bh = next;
1911	} while (bh != head);
1912	folio_unlock(folio);
1913
1914	err = 0;
1915done:
1916	if (nr_underway == 0) {
1917		/*
1918		 * The folio was marked dirty, but the buffers were
1919		 * clean.  Someone wrote them back by hand with
1920		 * write_dirty_buffer/submit_bh.  A rare case.
1921		 */
1922		folio_end_writeback(folio);
1923
1924		/*
1925		 * The folio and buffer_heads can be released at any time from
1926		 * here on.
1927		 */
1928	}
1929	return err;
1930
1931recover:
1932	/*
1933	 * ENOSPC, or some other error.  We may already have added some
1934	 * blocks to the file, so we need to write these out to avoid
1935	 * exposing stale data.
1936	 * The folio is currently locked and not marked for writeback
1937	 */
1938	bh = head;
1939	/* Recovery: lock and submit the mapped buffers */
1940	do {
1941		if (buffer_mapped(bh) && buffer_dirty(bh) &&
1942		    !buffer_delay(bh)) {
1943			lock_buffer(bh);
1944			mark_buffer_async_write_endio(bh, handler);
1945		} else {
1946			/*
1947			 * The buffer may have been set dirty during
1948			 * attachment to a dirty folio.
1949			 */
1950			clear_buffer_dirty(bh);
1951		}
1952	} while ((bh = bh->b_this_page) != head);
1953	folio_set_error(folio);
1954	BUG_ON(folio_test_writeback(folio));
1955	mapping_set_error(folio->mapping, err);
1956	folio_start_writeback(folio);
1957	do {
1958		struct buffer_head *next = bh->b_this_page;
1959		if (buffer_async_write(bh)) {
1960			clear_buffer_dirty(bh);
1961			submit_bh_wbc(REQ_OP_WRITE | write_flags, bh, wbc);
1962			nr_underway++;
1963		}
1964		bh = next;
1965	} while (bh != head);
1966	folio_unlock(folio);
1967	goto done;
1968}
1969EXPORT_SYMBOL(__block_write_full_folio);
1970
1971/*
1972 * If a folio has any new buffers, zero them out here, and mark them uptodate
1973 * and dirty so they'll be written out (in order to prevent uninitialised
1974 * block data from leaking). And clear the new bit.
1975 */
1976void folio_zero_new_buffers(struct folio *folio, size_t from, size_t to)
1977{
1978	size_t block_start, block_end;
1979	struct buffer_head *head, *bh;
1980
1981	BUG_ON(!folio_test_locked(folio));
1982	head = folio_buffers(folio);
1983	if (!head)
1984		return;
1985
1986	bh = head;
1987	block_start = 0;
1988	do {
1989		block_end = block_start + bh->b_size;
1990
1991		if (buffer_new(bh)) {
1992			if (block_end > from && block_start < to) {
1993				if (!folio_test_uptodate(folio)) {
1994					size_t start, xend;
1995
1996					start = max(from, block_start);
1997					xend = min(to, block_end);
1998
1999					folio_zero_segment(folio, start, xend);
2000					set_buffer_uptodate(bh);
2001				}
2002
2003				clear_buffer_new(bh);
2004				mark_buffer_dirty(bh);
2005			}
2006		}
2007
2008		block_start = block_end;
2009		bh = bh->b_this_page;
2010	} while (bh != head);
2011}
2012EXPORT_SYMBOL(folio_zero_new_buffers);
2013
2014static int
2015iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh,
2016		const struct iomap *iomap)
2017{
2018	loff_t offset = block << inode->i_blkbits;
2019
2020	bh->b_bdev = iomap->bdev;
2021
2022	/*
2023	 * Block points to offset in file we need to map, iomap contains
2024	 * the offset at which the map starts. If the map ends before the
2025	 * current block, then do not map the buffer and let the caller
2026	 * handle it.
2027	 */
2028	if (offset >= iomap->offset + iomap->length)
2029		return -EIO;
2030
2031	switch (iomap->type) {
2032	case IOMAP_HOLE:
2033		/*
2034		 * If the buffer is not up to date or beyond the current EOF,
2035		 * we need to mark it as new to ensure sub-block zeroing is
2036		 * executed if necessary.
2037		 */
2038		if (!buffer_uptodate(bh) ||
2039		    (offset >= i_size_read(inode)))
2040			set_buffer_new(bh);
2041		return 0;
2042	case IOMAP_DELALLOC:
2043		if (!buffer_uptodate(bh) ||
2044		    (offset >= i_size_read(inode)))
2045			set_buffer_new(bh);
2046		set_buffer_uptodate(bh);
2047		set_buffer_mapped(bh);
2048		set_buffer_delay(bh);
2049		return 0;
2050	case IOMAP_UNWRITTEN:
2051		/*
2052		 * For unwritten regions, we always need to ensure that regions
2053		 * in the block we are not writing to are zeroed. Mark the
2054		 * buffer as new to ensure this.
2055		 */
2056		set_buffer_new(bh);
2057		set_buffer_unwritten(bh);
2058		fallthrough;
2059	case IOMAP_MAPPED:
2060		if ((iomap->flags & IOMAP_F_NEW) ||
2061		    offset >= i_size_read(inode)) {
2062			/*
2063			 * This can happen if truncating the block device races
2064			 * with the check in the caller as i_size updates on
2065			 * block devices aren't synchronized by i_rwsem for
2066			 * block devices.
2067			 */
2068			if (S_ISBLK(inode->i_mode))
2069				return -EIO;
2070			set_buffer_new(bh);
2071		}
2072		bh->b_blocknr = (iomap->addr + offset - iomap->offset) >>
2073				inode->i_blkbits;
2074		set_buffer_mapped(bh);
2075		return 0;
2076	default:
2077		WARN_ON_ONCE(1);
2078		return -EIO;
2079	}
2080}
2081
2082int __block_write_begin_int(struct folio *folio, loff_t pos, unsigned len,
2083		get_block_t *get_block, const struct iomap *iomap)
2084{
2085	unsigned from = pos & (PAGE_SIZE - 1);
2086	unsigned to = from + len;
2087	struct inode *inode = folio->mapping->host;
2088	unsigned block_start, block_end;
2089	sector_t block;
2090	int err = 0;
2091	unsigned blocksize, bbits;
2092	struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
2093
2094	BUG_ON(!folio_test_locked(folio));
2095	BUG_ON(from > PAGE_SIZE);
2096	BUG_ON(to > PAGE_SIZE);
2097	BUG_ON(from > to);
2098
2099	head = folio_create_buffers(folio, inode, 0);
2100	blocksize = head->b_size;
2101	bbits = block_size_bits(blocksize);
2102
2103	block = (sector_t)folio->index << (PAGE_SHIFT - bbits);
2104
2105	for(bh = head, block_start = 0; bh != head || !block_start;
2106	    block++, block_start=block_end, bh = bh->b_this_page) {
2107		block_end = block_start + blocksize;
2108		if (block_end <= from || block_start >= to) {
2109			if (folio_test_uptodate(folio)) {
2110				if (!buffer_uptodate(bh))
2111					set_buffer_uptodate(bh);
2112			}
2113			continue;
2114		}
2115		if (buffer_new(bh))
2116			clear_buffer_new(bh);
2117		if (!buffer_mapped(bh)) {
2118			WARN_ON(bh->b_size != blocksize);
2119			if (get_block)
2120				err = get_block(inode, block, bh, 1);
2121			else
2122				err = iomap_to_bh(inode, block, bh, iomap);
2123			if (err)
2124				break;
2125
2126			if (buffer_new(bh)) {
2127				clean_bdev_bh_alias(bh);
2128				if (folio_test_uptodate(folio)) {
2129					clear_buffer_new(bh);
2130					set_buffer_uptodate(bh);
2131					mark_buffer_dirty(bh);
2132					continue;
2133				}
2134				if (block_end > to || block_start < from)
2135					folio_zero_segments(folio,
2136						to, block_end,
2137						block_start, from);
2138				continue;
2139			}
2140		}
2141		if (folio_test_uptodate(folio)) {
2142			if (!buffer_uptodate(bh))
2143				set_buffer_uptodate(bh);
2144			continue;
2145		}
2146		if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
2147		    !buffer_unwritten(bh) &&
2148		     (block_start < from || block_end > to)) {
2149			bh_read_nowait(bh, 0);
2150			*wait_bh++=bh;
2151		}
2152	}
2153	/*
2154	 * If we issued read requests - let them complete.
2155	 */
2156	while(wait_bh > wait) {
2157		wait_on_buffer(*--wait_bh);
2158		if (!buffer_uptodate(*wait_bh))
2159			err = -EIO;
2160	}
2161	if (unlikely(err))
2162		folio_zero_new_buffers(folio, from, to);
2163	return err;
2164}
2165
2166int __block_write_begin(struct page *page, loff_t pos, unsigned len,
2167		get_block_t *get_block)
2168{
2169	return __block_write_begin_int(page_folio(page), pos, len, get_block,
2170				       NULL);
2171}
2172EXPORT_SYMBOL(__block_write_begin);
2173
2174static void __block_commit_write(struct folio *folio, size_t from, size_t to)
2175{
2176	size_t block_start, block_end;
2177	bool partial = false;
2178	unsigned blocksize;
2179	struct buffer_head *bh, *head;
2180
2181	bh = head = folio_buffers(folio);
2182	blocksize = bh->b_size;
2183
2184	block_start = 0;
2185	do {
2186		block_end = block_start + blocksize;
2187		if (block_end <= from || block_start >= to) {
2188			if (!buffer_uptodate(bh))
2189				partial = true;
2190		} else {
2191			set_buffer_uptodate(bh);
2192			mark_buffer_dirty(bh);
2193		}
2194		if (buffer_new(bh))
2195			clear_buffer_new(bh);
2196
2197		block_start = block_end;
2198		bh = bh->b_this_page;
2199	} while (bh != head);
2200
2201	/*
2202	 * If this is a partial write which happened to make all buffers
2203	 * uptodate then we can optimize away a bogus read_folio() for
2204	 * the next read(). Here we 'discover' whether the folio went
2205	 * uptodate as a result of this (potentially partial) write.
2206	 */
2207	if (!partial)
2208		folio_mark_uptodate(folio);
2209}
2210
2211/*
2212 * block_write_begin takes care of the basic task of block allocation and
2213 * bringing partial write blocks uptodate first.
2214 *
2215 * The filesystem needs to handle block truncation upon failure.
2216 */
2217int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
2218		struct page **pagep, get_block_t *get_block)
2219{
2220	pgoff_t index = pos >> PAGE_SHIFT;
2221	struct page *page;
2222	int status;
2223
2224	page = grab_cache_page_write_begin(mapping, index);
2225	if (!page)
2226		return -ENOMEM;
2227
2228	status = __block_write_begin(page, pos, len, get_block);
2229	if (unlikely(status)) {
2230		unlock_page(page);
2231		put_page(page);
2232		page = NULL;
2233	}
2234
2235	*pagep = page;
2236	return status;
2237}
2238EXPORT_SYMBOL(block_write_begin);
2239
2240int block_write_end(struct file *file, struct address_space *mapping,
2241			loff_t pos, unsigned len, unsigned copied,
2242			struct page *page, void *fsdata)
2243{
2244	struct folio *folio = page_folio(page);
2245	size_t start = pos - folio_pos(folio);
2246
2247	if (unlikely(copied < len)) {
2248		/*
2249		 * The buffers that were written will now be uptodate, so
2250		 * we don't have to worry about a read_folio reading them
2251		 * and overwriting a partial write. However if we have
2252		 * encountered a short write and only partially written
2253		 * into a buffer, it will not be marked uptodate, so a
2254		 * read_folio might come in and destroy our partial write.
2255		 *
2256		 * Do the simplest thing, and just treat any short write to a
2257		 * non uptodate folio as a zero-length write, and force the
2258		 * caller to redo the whole thing.
2259		 */
2260		if (!folio_test_uptodate(folio))
2261			copied = 0;
2262
2263		folio_zero_new_buffers(folio, start+copied, start+len);
2264	}
2265	flush_dcache_folio(folio);
2266
2267	/* This could be a short (even 0-length) commit */
2268	__block_commit_write(folio, start, start + copied);
2269
2270	return copied;
2271}
2272EXPORT_SYMBOL(block_write_end);
2273
2274int generic_write_end(struct file *file, struct address_space *mapping,
2275			loff_t pos, unsigned len, unsigned copied,
2276			struct page *page, void *fsdata)
2277{
2278	struct inode *inode = mapping->host;
2279	loff_t old_size = inode->i_size;
2280	bool i_size_changed = false;
2281
2282	copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
2283
2284	/*
2285	 * No need to use i_size_read() here, the i_size cannot change under us
2286	 * because we hold i_rwsem.
2287	 *
2288	 * But it's important to update i_size while still holding page lock:
2289	 * page writeout could otherwise come in and zero beyond i_size.
2290	 */
2291	if (pos + copied > inode->i_size) {
2292		i_size_write(inode, pos + copied);
2293		i_size_changed = true;
2294	}
2295
2296	unlock_page(page);
2297	put_page(page);
2298
2299	if (old_size < pos)
2300		pagecache_isize_extended(inode, old_size, pos);
2301	/*
2302	 * Don't mark the inode dirty under page lock. First, it unnecessarily
2303	 * makes the holding time of page lock longer. Second, it forces lock
2304	 * ordering of page lock and transaction start for journaling
2305	 * filesystems.
2306	 */
2307	if (i_size_changed)
2308		mark_inode_dirty(inode);
2309	return copied;
2310}
2311EXPORT_SYMBOL(generic_write_end);
2312
2313/*
2314 * block_is_partially_uptodate checks whether buffers within a folio are
2315 * uptodate or not.
2316 *
2317 * Returns true if all buffers which correspond to the specified part
2318 * of the folio are uptodate.
2319 */
2320bool block_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
2321{
2322	unsigned block_start, block_end, blocksize;
2323	unsigned to;
2324	struct buffer_head *bh, *head;
2325	bool ret = true;
2326
2327	head = folio_buffers(folio);
2328	if (!head)
2329		return false;
2330	blocksize = head->b_size;
2331	to = min_t(unsigned, folio_size(folio) - from, count);
2332	to = from + to;
2333	if (from < blocksize && to > folio_size(folio) - blocksize)
2334		return false;
2335
2336	bh = head;
2337	block_start = 0;
2338	do {
2339		block_end = block_start + blocksize;
2340		if (block_end > from && block_start < to) {
2341			if (!buffer_uptodate(bh)) {
2342				ret = false;
2343				break;
2344			}
2345			if (block_end >= to)
2346				break;
2347		}
2348		block_start = block_end;
2349		bh = bh->b_this_page;
2350	} while (bh != head);
2351
2352	return ret;
2353}
2354EXPORT_SYMBOL(block_is_partially_uptodate);
2355
2356/*
2357 * Generic "read_folio" function for block devices that have the normal
2358 * get_block functionality. This is most of the block device filesystems.
2359 * Reads the folio asynchronously --- the unlock_buffer() and
2360 * set/clear_buffer_uptodate() functions propagate buffer state into the
2361 * folio once IO has completed.
2362 */
2363int block_read_full_folio(struct folio *folio, get_block_t *get_block)
2364{
2365	struct inode *inode = folio->mapping->host;
2366	sector_t iblock, lblock;
2367	struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2368	unsigned int blocksize, bbits;
2369	int nr, i;
2370	int fully_mapped = 1;
2371	bool page_error = false;
2372	loff_t limit = i_size_read(inode);
2373
2374	/* This is needed for ext4. */
2375	if (IS_ENABLED(CONFIG_FS_VERITY) && IS_VERITY(inode))
2376		limit = inode->i_sb->s_maxbytes;
2377
2378	VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
2379
2380	head = folio_create_buffers(folio, inode, 0);
2381	blocksize = head->b_size;
2382	bbits = block_size_bits(blocksize);
2383
2384	iblock = (sector_t)folio->index << (PAGE_SHIFT - bbits);
2385	lblock = (limit+blocksize-1) >> bbits;
2386	bh = head;
2387	nr = 0;
2388	i = 0;
2389
2390	do {
2391		if (buffer_uptodate(bh))
2392			continue;
2393
2394		if (!buffer_mapped(bh)) {
2395			int err = 0;
2396
2397			fully_mapped = 0;
2398			if (iblock < lblock) {
2399				WARN_ON(bh->b_size != blocksize);
2400				err = get_block(inode, iblock, bh, 0);
2401				if (err) {
2402					folio_set_error(folio);
2403					page_error = true;
2404				}
2405			}
2406			if (!buffer_mapped(bh)) {
2407				folio_zero_range(folio, i * blocksize,
2408						blocksize);
2409				if (!err)
2410					set_buffer_uptodate(bh);
2411				continue;
2412			}
2413			/*
2414			 * get_block() might have updated the buffer
2415			 * synchronously
2416			 */
2417			if (buffer_uptodate(bh))
2418				continue;
2419		}
2420		arr[nr++] = bh;
2421	} while (i++, iblock++, (bh = bh->b_this_page) != head);
2422
2423	if (fully_mapped)
2424		folio_set_mappedtodisk(folio);
2425
2426	if (!nr) {
2427		/*
2428		 * All buffers are uptodate - we can set the folio uptodate
2429		 * as well. But not if get_block() returned an error.
2430		 */
2431		if (!page_error)
2432			folio_mark_uptodate(folio);
2433		folio_unlock(folio);
2434		return 0;
2435	}
2436
2437	/* Stage two: lock the buffers */
2438	for (i = 0; i < nr; i++) {
2439		bh = arr[i];
2440		lock_buffer(bh);
2441		mark_buffer_async_read(bh);
2442	}
2443
2444	/*
2445	 * Stage 3: start the IO.  Check for uptodateness
2446	 * inside the buffer lock in case another process reading
2447	 * the underlying blockdev brought it uptodate (the sct fix).
2448	 */
2449	for (i = 0; i < nr; i++) {
2450		bh = arr[i];
2451		if (buffer_uptodate(bh))
2452			end_buffer_async_read(bh, 1);
2453		else
2454			submit_bh(REQ_OP_READ, bh);
2455	}
2456	return 0;
2457}
2458EXPORT_SYMBOL(block_read_full_folio);
2459
2460/* utility function for filesystems that need to do work on expanding
2461 * truncates.  Uses filesystem pagecache writes to allow the filesystem to
2462 * deal with the hole.
2463 */
2464int generic_cont_expand_simple(struct inode *inode, loff_t size)
2465{
2466	struct address_space *mapping = inode->i_mapping;
2467	const struct address_space_operations *aops = mapping->a_ops;
2468	struct page *page;
2469	void *fsdata = NULL;
2470	int err;
2471
2472	err = inode_newsize_ok(inode, size);
2473	if (err)
2474		goto out;
2475
2476	err = aops->write_begin(NULL, mapping, size, 0, &page, &fsdata);
2477	if (err)
2478		goto out;
2479
2480	err = aops->write_end(NULL, mapping, size, 0, 0, page, fsdata);
2481	BUG_ON(err > 0);
2482
2483out:
2484	return err;
2485}
2486EXPORT_SYMBOL(generic_cont_expand_simple);
2487
2488static int cont_expand_zero(struct file *file, struct address_space *mapping,
2489			    loff_t pos, loff_t *bytes)
2490{
2491	struct inode *inode = mapping->host;
2492	const struct address_space_operations *aops = mapping->a_ops;
2493	unsigned int blocksize = i_blocksize(inode);
2494	struct page *page;
2495	void *fsdata = NULL;
2496	pgoff_t index, curidx;
2497	loff_t curpos;
2498	unsigned zerofrom, offset, len;
2499	int err = 0;
2500
2501	index = pos >> PAGE_SHIFT;
2502	offset = pos & ~PAGE_MASK;
2503
2504	while (index > (curidx = (curpos = *bytes)>>PAGE_SHIFT)) {
2505		zerofrom = curpos & ~PAGE_MASK;
2506		if (zerofrom & (blocksize-1)) {
2507			*bytes |= (blocksize-1);
2508			(*bytes)++;
2509		}
2510		len = PAGE_SIZE - zerofrom;
2511
2512		err = aops->write_begin(file, mapping, curpos, len,
2513					    &page, &fsdata);
2514		if (err)
2515			goto out;
2516		zero_user(page, zerofrom, len);
2517		err = aops->write_end(file, mapping, curpos, len, len,
2518						page, fsdata);
2519		if (err < 0)
2520			goto out;
2521		BUG_ON(err != len);
2522		err = 0;
2523
2524		balance_dirty_pages_ratelimited(mapping);
2525
2526		if (fatal_signal_pending(current)) {
2527			err = -EINTR;
2528			goto out;
2529		}
2530	}
2531
2532	/* page covers the boundary, find the boundary offset */
2533	if (index == curidx) {
2534		zerofrom = curpos & ~PAGE_MASK;
2535		/* if we will expand the thing last block will be filled */
2536		if (offset <= zerofrom) {
2537			goto out;
2538		}
2539		if (zerofrom & (blocksize-1)) {
2540			*bytes |= (blocksize-1);
2541			(*bytes)++;
2542		}
2543		len = offset - zerofrom;
2544
2545		err = aops->write_begin(file, mapping, curpos, len,
2546					    &page, &fsdata);
2547		if (err)
2548			goto out;
2549		zero_user(page, zerofrom, len);
2550		err = aops->write_end(file, mapping, curpos, len, len,
2551						page, fsdata);
2552		if (err < 0)
2553			goto out;
2554		BUG_ON(err != len);
2555		err = 0;
2556	}
2557out:
2558	return err;
2559}
2560
2561/*
2562 * For moronic filesystems that do not allow holes in file.
2563 * We may have to extend the file.
2564 */
2565int cont_write_begin(struct file *file, struct address_space *mapping,
2566			loff_t pos, unsigned len,
2567			struct page **pagep, void **fsdata,
2568			get_block_t *get_block, loff_t *bytes)
2569{
2570	struct inode *inode = mapping->host;
2571	unsigned int blocksize = i_blocksize(inode);
2572	unsigned int zerofrom;
2573	int err;
2574
2575	err = cont_expand_zero(file, mapping, pos, bytes);
2576	if (err)
2577		return err;
2578
2579	zerofrom = *bytes & ~PAGE_MASK;
2580	if (pos+len > *bytes && zerofrom & (blocksize-1)) {
2581		*bytes |= (blocksize-1);
2582		(*bytes)++;
2583	}
2584
2585	return block_write_begin(mapping, pos, len, pagep, get_block);
2586}
2587EXPORT_SYMBOL(cont_write_begin);
2588
2589void block_commit_write(struct page *page, unsigned from, unsigned to)
2590{
2591	struct folio *folio = page_folio(page);
2592	__block_commit_write(folio, from, to);
2593}
2594EXPORT_SYMBOL(block_commit_write);
2595
2596/*
2597 * block_page_mkwrite() is not allowed to change the file size as it gets
2598 * called from a page fault handler when a page is first dirtied. Hence we must
2599 * be careful to check for EOF conditions here. We set the page up correctly
2600 * for a written page which means we get ENOSPC checking when writing into
2601 * holes and correct delalloc and unwritten extent mapping on filesystems that
2602 * support these features.
2603 *
2604 * We are not allowed to take the i_mutex here so we have to play games to
2605 * protect against truncate races as the page could now be beyond EOF.  Because
2606 * truncate writes the inode size before removing pages, once we have the
2607 * page lock we can determine safely if the page is beyond EOF. If it is not
2608 * beyond EOF, then the page is guaranteed safe against truncation until we
2609 * unlock the page.
2610 *
2611 * Direct callers of this function should protect against filesystem freezing
2612 * using sb_start_pagefault() - sb_end_pagefault() functions.
2613 */
2614int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
2615			 get_block_t get_block)
2616{
2617	struct folio *folio = page_folio(vmf->page);
2618	struct inode *inode = file_inode(vma->vm_file);
2619	unsigned long end;
2620	loff_t size;
2621	int ret;
2622
2623	folio_lock(folio);
2624	size = i_size_read(inode);
2625	if ((folio->mapping != inode->i_mapping) ||
2626	    (folio_pos(folio) >= size)) {
2627		/* We overload EFAULT to mean page got truncated */
2628		ret = -EFAULT;
2629		goto out_unlock;
2630	}
2631
2632	end = folio_size(folio);
2633	/* folio is wholly or partially inside EOF */
2634	if (folio_pos(folio) + end > size)
2635		end = size - folio_pos(folio);
2636
2637	ret = __block_write_begin_int(folio, 0, end, get_block, NULL);
2638	if (unlikely(ret))
2639		goto out_unlock;
2640
2641	__block_commit_write(folio, 0, end);
2642
2643	folio_mark_dirty(folio);
2644	folio_wait_stable(folio);
2645	return 0;
2646out_unlock:
2647	folio_unlock(folio);
2648	return ret;
2649}
2650EXPORT_SYMBOL(block_page_mkwrite);
2651
2652int block_truncate_page(struct address_space *mapping,
2653			loff_t from, get_block_t *get_block)
2654{
2655	pgoff_t index = from >> PAGE_SHIFT;
2656	unsigned blocksize;
2657	sector_t iblock;
2658	size_t offset, length, pos;
2659	struct inode *inode = mapping->host;
2660	struct folio *folio;
2661	struct buffer_head *bh;
2662	int err = 0;
2663
2664	blocksize = i_blocksize(inode);
2665	length = from & (blocksize - 1);
2666
2667	/* Block boundary? Nothing to do */
2668	if (!length)
2669		return 0;
2670
2671	length = blocksize - length;
2672	iblock = (sector_t)index << (PAGE_SHIFT - inode->i_blkbits);
2673
2674	folio = filemap_grab_folio(mapping, index);
2675	if (IS_ERR(folio))
2676		return PTR_ERR(folio);
2677
2678	bh = folio_buffers(folio);
2679	if (!bh) {
2680		folio_create_empty_buffers(folio, blocksize, 0);
2681		bh = folio_buffers(folio);
2682	}
2683
2684	/* Find the buffer that contains "offset" */
2685	offset = offset_in_folio(folio, from);
2686	pos = blocksize;
2687	while (offset >= pos) {
2688		bh = bh->b_this_page;
2689		iblock++;
2690		pos += blocksize;
2691	}
2692
2693	if (!buffer_mapped(bh)) {
2694		WARN_ON(bh->b_size != blocksize);
2695		err = get_block(inode, iblock, bh, 0);
2696		if (err)
2697			goto unlock;
2698		/* unmapped? It's a hole - nothing to do */
2699		if (!buffer_mapped(bh))
2700			goto unlock;
2701	}
2702
2703	/* Ok, it's mapped. Make sure it's up-to-date */
2704	if (folio_test_uptodate(folio))
2705		set_buffer_uptodate(bh);
2706
2707	if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
2708		err = bh_read(bh, 0);
2709		/* Uhhuh. Read error. Complain and punt. */
2710		if (err < 0)
2711			goto unlock;
2712	}
2713
2714	folio_zero_range(folio, offset, length);
2715	mark_buffer_dirty(bh);
2716
2717unlock:
2718	folio_unlock(folio);
2719	folio_put(folio);
2720
2721	return err;
2722}
2723EXPORT_SYMBOL(block_truncate_page);
2724
2725/*
2726 * The generic ->writepage function for buffer-backed address_spaces
2727 */
2728int block_write_full_page(struct page *page, get_block_t *get_block,
2729			struct writeback_control *wbc)
2730{
2731	struct folio *folio = page_folio(page);
2732	struct inode * const inode = folio->mapping->host;
2733	loff_t i_size = i_size_read(inode);
2734
2735	/* Is the folio fully inside i_size? */
2736	if (folio_pos(folio) + folio_size(folio) <= i_size)
2737		return __block_write_full_folio(inode, folio, get_block, wbc,
2738					       end_buffer_async_write);
2739
2740	/* Is the folio fully outside i_size? (truncate in progress) */
2741	if (folio_pos(folio) >= i_size) {
2742		folio_unlock(folio);
2743		return 0; /* don't care */
2744	}
2745
2746	/*
2747	 * The folio straddles i_size.  It must be zeroed out on each and every
2748	 * writepage invocation because it may be mmapped.  "A file is mapped
2749	 * in multiples of the page size.  For a file that is not a multiple of
2750	 * the page size, the remaining memory is zeroed when mapped, and
2751	 * writes to that region are not written out to the file."
2752	 */
2753	folio_zero_segment(folio, offset_in_folio(folio, i_size),
2754			folio_size(folio));
2755	return __block_write_full_folio(inode, folio, get_block, wbc,
2756			end_buffer_async_write);
2757}
2758EXPORT_SYMBOL(block_write_full_page);
2759
2760sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
2761			    get_block_t *get_block)
2762{
2763	struct inode *inode = mapping->host;
2764	struct buffer_head tmp = {
2765		.b_size = i_blocksize(inode),
2766	};
2767
2768	get_block(inode, block, &tmp, 0);
2769	return tmp.b_blocknr;
2770}
2771EXPORT_SYMBOL(generic_block_bmap);
2772
2773static void end_bio_bh_io_sync(struct bio *bio)
2774{
2775	struct buffer_head *bh = bio->bi_private;
2776
2777	if (unlikely(bio_flagged(bio, BIO_QUIET)))
2778		set_bit(BH_Quiet, &bh->b_state);
2779
2780	bh->b_end_io(bh, !bio->bi_status);
2781	bio_put(bio);
2782}
2783
2784static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh,
2785			  struct writeback_control *wbc)
2786{
2787	const enum req_op op = opf & REQ_OP_MASK;
2788	struct bio *bio;
2789
2790	BUG_ON(!buffer_locked(bh));
2791	BUG_ON(!buffer_mapped(bh));
2792	BUG_ON(!bh->b_end_io);
2793	BUG_ON(buffer_delay(bh));
2794	BUG_ON(buffer_unwritten(bh));
2795
2796	/*
2797	 * Only clear out a write error when rewriting
2798	 */
2799	if (test_set_buffer_req(bh) && (op == REQ_OP_WRITE))
2800		clear_buffer_write_io_error(bh);
2801
2802	if (buffer_meta(bh))
2803		opf |= REQ_META;
2804	if (buffer_prio(bh))
2805		opf |= REQ_PRIO;
2806
2807	bio = bio_alloc(bh->b_bdev, 1, opf, GFP_NOIO);
2808
2809	fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO);
2810
2811	bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2812
2813	__bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
2814
2815	bio->bi_end_io = end_bio_bh_io_sync;
2816	bio->bi_private = bh;
2817
2818	/* Take care of bh's that straddle the end of the device */
2819	guard_bio_eod(bio);
2820
2821	if (wbc) {
2822		wbc_init_bio(wbc, bio);
2823		wbc_account_cgroup_owner(wbc, bh->b_page, bh->b_size);
2824	}
2825
2826	submit_bio(bio);
2827}
2828
2829void submit_bh(blk_opf_t opf, struct buffer_head *bh)
2830{
2831	submit_bh_wbc(opf, bh, NULL);
2832}
2833EXPORT_SYMBOL(submit_bh);
2834
2835void write_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags)
2836{
2837	lock_buffer(bh);
2838	if (!test_clear_buffer_dirty(bh)) {
2839		unlock_buffer(bh);
2840		return;
2841	}
2842	bh->b_end_io = end_buffer_write_sync;
2843	get_bh(bh);
2844	submit_bh(REQ_OP_WRITE | op_flags, bh);
2845}
2846EXPORT_SYMBOL(write_dirty_buffer);
2847
2848/*
2849 * For a data-integrity writeout, we need to wait upon any in-progress I/O
2850 * and then start new I/O and then wait upon it.  The caller must have a ref on
2851 * the buffer_head.
2852 */
2853int __sync_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags)
2854{
2855	WARN_ON(atomic_read(&bh->b_count) < 1);
2856	lock_buffer(bh);
2857	if (test_clear_buffer_dirty(bh)) {
2858		/*
2859		 * The bh should be mapped, but it might not be if the
2860		 * device was hot-removed. Not much we can do but fail the I/O.
2861		 */
2862		if (!buffer_mapped(bh)) {
2863			unlock_buffer(bh);
2864			return -EIO;
2865		}
2866
2867		get_bh(bh);
2868		bh->b_end_io = end_buffer_write_sync;
2869		submit_bh(REQ_OP_WRITE | op_flags, bh);
2870		wait_on_buffer(bh);
2871		if (!buffer_uptodate(bh))
2872			return -EIO;
2873	} else {
2874		unlock_buffer(bh);
2875	}
2876	return 0;
2877}
2878EXPORT_SYMBOL(__sync_dirty_buffer);
2879
2880int sync_dirty_buffer(struct buffer_head *bh)
2881{
2882	return __sync_dirty_buffer(bh, REQ_SYNC);
2883}
2884EXPORT_SYMBOL(sync_dirty_buffer);
2885
2886/*
2887 * try_to_free_buffers() checks if all the buffers on this particular folio
2888 * are unused, and releases them if so.
2889 *
2890 * Exclusion against try_to_free_buffers may be obtained by either
2891 * locking the folio or by holding its mapping's private_lock.
2892 *
2893 * If the folio is dirty but all the buffers are clean then we need to
2894 * be sure to mark the folio clean as well.  This is because the folio
2895 * may be against a block device, and a later reattachment of buffers
2896 * to a dirty folio will set *all* buffers dirty.  Which would corrupt
2897 * filesystem data on the same device.
2898 *
2899 * The same applies to regular filesystem folios: if all the buffers are
2900 * clean then we set the folio clean and proceed.  To do that, we require
2901 * total exclusion from block_dirty_folio().  That is obtained with
2902 * private_lock.
2903 *
2904 * try_to_free_buffers() is non-blocking.
2905 */
2906static inline int buffer_busy(struct buffer_head *bh)
2907{
2908	return atomic_read(&bh->b_count) |
2909		(bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
2910}
2911
2912static bool
2913drop_buffers(struct folio *folio, struct buffer_head **buffers_to_free)
2914{
2915	struct buffer_head *head = folio_buffers(folio);
2916	struct buffer_head *bh;
2917
2918	bh = head;
2919	do {
2920		if (buffer_busy(bh))
2921			goto failed;
2922		bh = bh->b_this_page;
2923	} while (bh != head);
2924
2925	do {
2926		struct buffer_head *next = bh->b_this_page;
2927
2928		if (bh->b_assoc_map)
2929			__remove_assoc_queue(bh);
2930		bh = next;
2931	} while (bh != head);
2932	*buffers_to_free = head;
2933	folio_detach_private(folio);
2934	return true;
2935failed:
2936	return false;
2937}
2938
2939bool try_to_free_buffers(struct folio *folio)
2940{
2941	struct address_space * const mapping = folio->mapping;
2942	struct buffer_head *buffers_to_free = NULL;
2943	bool ret = 0;
2944
2945	BUG_ON(!folio_test_locked(folio));
2946	if (folio_test_writeback(folio))
2947		return false;
2948
2949	if (mapping == NULL) {		/* can this still happen? */
2950		ret = drop_buffers(folio, &buffers_to_free);
2951		goto out;
2952	}
2953
2954	spin_lock(&mapping->private_lock);
2955	ret = drop_buffers(folio, &buffers_to_free);
2956
2957	/*
2958	 * If the filesystem writes its buffers by hand (eg ext3)
2959	 * then we can have clean buffers against a dirty folio.  We
2960	 * clean the folio here; otherwise the VM will never notice
2961	 * that the filesystem did any IO at all.
2962	 *
2963	 * Also, during truncate, discard_buffer will have marked all
2964	 * the folio's buffers clean.  We discover that here and clean
2965	 * the folio also.
2966	 *
2967	 * private_lock must be held over this entire operation in order
2968	 * to synchronise against block_dirty_folio and prevent the
2969	 * dirty bit from being lost.
2970	 */
2971	if (ret)
2972		folio_cancel_dirty(folio);
2973	spin_unlock(&mapping->private_lock);
2974out:
2975	if (buffers_to_free) {
2976		struct buffer_head *bh = buffers_to_free;
2977
2978		do {
2979			struct buffer_head *next = bh->b_this_page;
2980			free_buffer_head(bh);
2981			bh = next;
2982		} while (bh != buffers_to_free);
2983	}
2984	return ret;
2985}
2986EXPORT_SYMBOL(try_to_free_buffers);
2987
2988/*
2989 * Buffer-head allocation
2990 */
2991static struct kmem_cache *bh_cachep __read_mostly;
2992
2993/*
2994 * Once the number of bh's in the machine exceeds this level, we start
2995 * stripping them in writeback.
2996 */
2997static unsigned long max_buffer_heads;
2998
2999int buffer_heads_over_limit;
3000
3001struct bh_accounting {
3002	int nr;			/* Number of live bh's */
3003	int ratelimit;		/* Limit cacheline bouncing */
3004};
3005
3006static DEFINE_PER_CPU(struct bh_accounting, bh_accounting) = {0, 0};
3007
3008static void recalc_bh_state(void)
3009{
3010	int i;
3011	int tot = 0;
3012
3013	if (__this_cpu_inc_return(bh_accounting.ratelimit) - 1 < 4096)
3014		return;
3015	__this_cpu_write(bh_accounting.ratelimit, 0);
3016	for_each_online_cpu(i)
3017		tot += per_cpu(bh_accounting, i).nr;
3018	buffer_heads_over_limit = (tot > max_buffer_heads);
3019}
3020
3021struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
3022{
3023	struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
3024	if (ret) {
3025		INIT_LIST_HEAD(&ret->b_assoc_buffers);
3026		spin_lock_init(&ret->b_uptodate_lock);
3027		preempt_disable();
3028		__this_cpu_inc(bh_accounting.nr);
3029		recalc_bh_state();
3030		preempt_enable();
3031	}
3032	return ret;
3033}
3034EXPORT_SYMBOL(alloc_buffer_head);
3035
3036void free_buffer_head(struct buffer_head *bh)
3037{
3038	BUG_ON(!list_empty(&bh->b_assoc_buffers));
3039	kmem_cache_free(bh_cachep, bh);
3040	preempt_disable();
3041	__this_cpu_dec(bh_accounting.nr);
3042	recalc_bh_state();
3043	preempt_enable();
3044}
3045EXPORT_SYMBOL(free_buffer_head);
3046
3047static int buffer_exit_cpu_dead(unsigned int cpu)
3048{
3049	int i;
3050	struct bh_lru *b = &per_cpu(bh_lrus, cpu);
3051
3052	for (i = 0; i < BH_LRU_SIZE; i++) {
3053		brelse(b->bhs[i]);
3054		b->bhs[i] = NULL;
3055	}
3056	this_cpu_add(bh_accounting.nr, per_cpu(bh_accounting, cpu).nr);
3057	per_cpu(bh_accounting, cpu).nr = 0;
3058	return 0;
3059}
3060
3061/**
3062 * bh_uptodate_or_lock - Test whether the buffer is uptodate
3063 * @bh: struct buffer_head
3064 *
3065 * Return true if the buffer is up-to-date and false,
3066 * with the buffer locked, if not.
3067 */
3068int bh_uptodate_or_lock(struct buffer_head *bh)
3069{
3070	if (!buffer_uptodate(bh)) {
3071		lock_buffer(bh);
3072		if (!buffer_uptodate(bh))
3073			return 0;
3074		unlock_buffer(bh);
3075	}
3076	return 1;
3077}
3078EXPORT_SYMBOL(bh_uptodate_or_lock);
3079
3080/**
3081 * __bh_read - Submit read for a locked buffer
3082 * @bh: struct buffer_head
3083 * @op_flags: appending REQ_OP_* flags besides REQ_OP_READ
3084 * @wait: wait until reading finish
3085 *
3086 * Returns zero on success or don't wait, and -EIO on error.
3087 */
3088int __bh_read(struct buffer_head *bh, blk_opf_t op_flags, bool wait)
3089{
3090	int ret = 0;
3091
3092	BUG_ON(!buffer_locked(bh));
3093
3094	get_bh(bh);
3095	bh->b_end_io = end_buffer_read_sync;
3096	submit_bh(REQ_OP_READ | op_flags, bh);
3097	if (wait) {
3098		wait_on_buffer(bh);
3099		if (!buffer_uptodate(bh))
3100			ret = -EIO;
3101	}
3102	return ret;
3103}
3104EXPORT_SYMBOL(__bh_read);
3105
3106/**
3107 * __bh_read_batch - Submit read for a batch of unlocked buffers
3108 * @nr: entry number of the buffer batch
3109 * @bhs: a batch of struct buffer_head
3110 * @op_flags: appending REQ_OP_* flags besides REQ_OP_READ
3111 * @force_lock: force to get a lock on the buffer if set, otherwise drops any
3112 *              buffer that cannot lock.
3113 *
3114 * Returns zero on success or don't wait, and -EIO on error.
3115 */
3116void __bh_read_batch(int nr, struct buffer_head *bhs[],
3117		     blk_opf_t op_flags, bool force_lock)
3118{
3119	int i;
3120
3121	for (i = 0; i < nr; i++) {
3122		struct buffer_head *bh = bhs[i];
3123
3124		if (buffer_uptodate(bh))
3125			continue;
3126
3127		if (force_lock)
3128			lock_buffer(bh);
3129		else
3130			if (!trylock_buffer(bh))
3131				continue;
3132
3133		if (buffer_uptodate(bh)) {
3134			unlock_buffer(bh);
3135			continue;
3136		}
3137
3138		bh->b_end_io = end_buffer_read_sync;
3139		get_bh(bh);
3140		submit_bh(REQ_OP_READ | op_flags, bh);
3141	}
3142}
3143EXPORT_SYMBOL(__bh_read_batch);
3144
3145void __init buffer_init(void)
3146{
3147	unsigned long nrpages;
3148	int ret;
3149
3150	bh_cachep = kmem_cache_create("buffer_head",
3151			sizeof(struct buffer_head), 0,
3152				(SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
3153				SLAB_MEM_SPREAD),
3154				NULL);
3155
3156	/*
3157	 * Limit the bh occupancy to 10% of ZONE_NORMAL
3158	 */
3159	nrpages = (nr_free_buffer_pages() * 10) / 100;
3160	max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
3161	ret = cpuhp_setup_state_nocalls(CPUHP_FS_BUFF_DEAD, "fs/buffer:dead",
3162					NULL, buffer_exit_cpu_dead);
3163	WARN_ON(ret < 0);
3164}
3165