Lines Matching refs:bh

57 static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh,
62 inline void touch_buffer(struct buffer_head *bh)
64 trace_block_touch_buffer(bh);
65 folio_mark_accessed(bh->b_folio);
69 void __lock_buffer(struct buffer_head *bh)
71 wait_on_bit_lock_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE);
75 void unlock_buffer(struct buffer_head *bh)
77 clear_bit_unlock(BH_Lock, &bh->b_state);
79 wake_up_bit(&bh->b_state, BH_Lock);
91 struct buffer_head *head, *bh;
104 bh = head;
106 if (buffer_locked(bh))
109 if (buffer_dirty(bh))
112 bh = bh->b_this_page;
113 } while (bh != head);
121 void __wait_on_buffer(struct buffer_head * bh)
123 wait_on_bit_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE);
127 static void buffer_io_error(struct buffer_head *bh, char *msg)
129 if (!test_bit(BH_Quiet, &bh->b_state))
132 bh->b_bdev, (unsigned long long)bh->b_blocknr, msg);
136 * End-of-IO handler helper function which does not touch the bh after
138 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
139 * a race there is benign: unlock_buffer() only use the bh's address for
140 * hashing after unlocking the buffer, so it doesn't actually touch the bh
143 static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
146 set_buffer_uptodate(bh);
149 clear_buffer_uptodate(bh);
151 unlock_buffer(bh);
158 void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
160 __end_buffer_read_notouch(bh, uptodate);
161 put_bh(bh);
165 void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
168 set_buffer_uptodate(bh);
170 buffer_io_error(bh, ", lost sync page write");
171 mark_buffer_write_io_error(bh);
172 clear_buffer_uptodate(bh);
174 unlock_buffer(bh);
175 put_bh(bh);
196 struct buffer_head *bh;
211 bh = head;
213 if (!buffer_mapped(bh))
215 else if (bh->b_blocknr == block) {
216 ret = bh;
217 get_bh(bh);
220 bh = bh->b_this_page;
221 } while (bh != head);
234 (unsigned long long)bh->b_blocknr,
235 bh->b_state, bh->b_size, bdev,
245 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
253 BUG_ON(!buffer_async_read(bh));
255 folio = bh->b_folio;
257 set_buffer_uptodate(bh);
259 clear_buffer_uptodate(bh);
260 buffer_io_error(bh, ", async page read");
271 clear_buffer_async_read(bh);
272 unlock_buffer(bh);
273 tmp = bh;
282 } while (tmp != bh);
301 struct buffer_head *bh;
308 struct buffer_head *bh = ctx->bh;
311 valid = fsverity_verify_blocks(bh->b_folio, bh->b_size, bh_offset(bh));
312 end_buffer_async_read(bh, valid);
316 static bool need_fsverity(struct buffer_head *bh)
318 struct folio *folio = bh->b_folio;
330 struct buffer_head *bh = ctx->bh;
333 err = fscrypt_decrypt_pagecache_blocks(bh->b_folio, bh->b_size,
334 bh_offset(bh));
335 if (err == 0 && need_fsverity(bh)) {
345 end_buffer_async_read(bh, err == 0);
353 static void end_buffer_async_read_io(struct buffer_head *bh, int uptodate)
355 struct inode *inode = bh->b_folio->mapping->host;
357 bool verify = need_fsverity(bh);
365 ctx->bh = bh;
377 end_buffer_async_read(bh, uptodate);
384 void end_buffer_async_write(struct buffer_head *bh, int uptodate)
391 BUG_ON(!buffer_async_write(bh));
393 folio = bh->b_folio;
395 set_buffer_uptodate(bh);
397 buffer_io_error(bh, ", lost async page write");
398 mark_buffer_write_io_error(bh);
399 clear_buffer_uptodate(bh);
406 clear_buffer_async_write(bh);
407 unlock_buffer(bh);
408 tmp = bh->b_this_page;
409 while (tmp != bh) {
447 static void mark_buffer_async_read(struct buffer_head *bh)
449 bh->b_end_io = end_buffer_async_read_io;
450 set_buffer_async_read(bh);
453 static void mark_buffer_async_write_endio(struct buffer_head *bh,
456 bh->b_end_io = handler;
457 set_buffer_async_write(bh);
460 void mark_buffer_async_write(struct buffer_head *bh)
462 mark_buffer_async_write_endio(bh, end_buffer_async_write);
519 static void __remove_assoc_queue(struct buffer_head *bh)
521 list_del_init(&bh->b_assoc_buffers);
522 WARN_ON(!bh->b_assoc_map);
523 bh->b_assoc_map = NULL;
543 struct buffer_head *bh;
550 bh = BH_ENTRY(p);
551 if (buffer_locked(bh)) {
552 get_bh(bh);
554 wait_on_buffer(bh);
555 if (!buffer_uptodate(bh))
557 brelse(bh);
668 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
669 if (bh) {
670 if (buffer_dirty(bh))
671 write_dirty_buffer(bh, 0);
672 put_bh(bh);
676 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
679 struct address_space *buffer_mapping = bh->b_folio->mapping;
681 mark_buffer_dirty(bh);
687 if (!bh->b_assoc_map) {
689 list_move_tail(&bh->b_assoc_buffers,
691 bh->b_assoc_map = mapping;
730 struct buffer_head *bh = head;
733 set_buffer_dirty(bh);
734 bh = bh->b_this_page;
735 } while (bh != head);
778 struct buffer_head *bh;
789 bh = BH_ENTRY(list->next);
790 mapping = bh->b_assoc_map;
791 __remove_assoc_queue(bh);
795 if (buffer_dirty(bh) || buffer_locked(bh)) {
796 list_add(&bh->b_assoc_buffers, &tmp);
797 bh->b_assoc_map = mapping;
798 if (buffer_dirty(bh)) {
799 get_bh(bh);
808 write_dirty_buffer(bh, REQ_SYNC);
816 brelse(bh);
827 bh = BH_ENTRY(tmp.prev);
828 get_bh(bh);
829 mapping = bh->b_assoc_map;
830 __remove_assoc_queue(bh);
834 if (buffer_dirty(bh)) {
835 list_add(&bh->b_assoc_buffers,
837 bh->b_assoc_map = mapping;
840 wait_on_buffer(bh);
841 if (!buffer_uptodate(bh))
843 brelse(bh);
896 struct buffer_head *bh = BH_ENTRY(list->next);
897 if (buffer_dirty(bh)) {
901 __remove_assoc_queue(bh);
910 * the size of each buffer.. Use the bh->b_this_page linked list to
920 struct buffer_head *bh, *head;
935 bh = alloc_buffer_head(gfp);
936 if (!bh)
939 bh->b_this_page = head;
940 bh->b_blocknr = -1;
941 head = bh;
943 bh->b_size = size;
946 folio_set_bh(bh, folio, offset);
957 bh = head;
959 free_buffer_head(bh);
977 struct buffer_head *bh, *tail;
979 bh = head;
981 tail = bh;
982 bh = bh->b_this_page;
983 } while (bh);
1007 struct buffer_head *bh = head;
1012 if (!buffer_mapped(bh)) {
1013 bh->b_end_io = NULL;
1014 bh->b_private = NULL;
1015 bh->b_bdev = bdev;
1016 bh->b_blocknr = block;
1018 set_buffer_uptodate(bh);
1020 set_buffer_mapped(bh);
1023 bh = bh->b_this_page;
1024 } while (bh != head);
1043 struct buffer_head *bh;
1061 bh = folio_buffers(folio);
1062 if (bh) {
1063 if (bh->b_size == size) {
1072 bh = folio_alloc_buffers(folio, size, true);
1080 link_dev_buffers(folio, bh);
1138 struct buffer_head *bh;
1141 bh = __find_get_block(bdev, block, size);
1142 if (bh)
1143 return bh;
1176 * @bh: the buffer_head to mark dirty
1183 * mark_buffer_dirty() is atomic. It takes bh->b_folio->mapping->private_lock,
1186 void mark_buffer_dirty(struct buffer_head *bh)
1188 WARN_ON_ONCE(!buffer_uptodate(bh));
1190 trace_block_dirty_buffer(bh);
1198 if (buffer_dirty(bh)) {
1200 if (buffer_dirty(bh))
1204 if (!test_set_buffer_dirty(bh)) {
1205 struct folio *folio = bh->b_folio;
1221 void mark_buffer_write_io_error(struct buffer_head *bh)
1223 set_buffer_write_io_error(bh);
1225 if (bh->b_folio && bh->b_folio->mapping)
1226 mapping_set_error(bh->b_folio->mapping, -EIO);
1227 if (bh->b_assoc_map) {
1228 mapping_set_error(bh->b_assoc_map, -EIO);
1229 errseq_set(&bh->b_assoc_map->host->i_sb->s_wb_err, -EIO);
1255 void __bforget(struct buffer_head *bh)
1257 clear_buffer_dirty(bh);
1258 if (bh->b_assoc_map) {
1259 struct address_space *buffer_mapping = bh->b_folio->mapping;
1262 list_del_init(&bh->b_assoc_buffers);
1263 bh->b_assoc_map = NULL;
1266 __brelse(bh);
1270 static struct buffer_head *__bread_slow(struct buffer_head *bh)
1272 lock_buffer(bh);
1273 if (buffer_uptodate(bh)) {
1274 unlock_buffer(bh);
1275 return bh;
1277 get_bh(bh);
1278 bh->b_end_io = end_buffer_read_sync;
1279 submit_bh(REQ_OP_READ, bh);
1280 wait_on_buffer(bh);
1281 if (buffer_uptodate(bh))
1282 return bh;
1284 brelse(bh);
1330 static void bh_lru_install(struct buffer_head *bh)
1332 struct buffer_head *evictee = bh;
1343 * Skip putting upcoming bh into bh_lru until migration is done.
1353 if (evictee == bh) {
1359 get_bh(bh);
1365 * Look up the bh in this cpu's LRU. If it's there, move it to the head.
1380 struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]);
1382 if (bh && bh->b_blocknr == block && bh->b_bdev == bdev &&
1383 bh->b_size == size) {
1390 __this_cpu_write(bh_lrus.bhs[0], bh);
1392 get_bh(bh);
1393 ret = bh;
1409 struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1411 if (bh == NULL) {
1413 bh = __find_get_block_slow(bdev, block);
1414 if (bh)
1415 bh_lru_install(bh);
1417 touch_buffer(bh);
1419 return bh;
1435 struct buffer_head *bh = __find_get_block(bdev, block, size);
1438 if (bh == NULL)
1439 bh = __getblk_slow(bdev, block, size, gfp);
1440 return bh;
1449 struct buffer_head *bh = __getblk(bdev, block, size);
1450 if (likely(bh)) {
1451 bh_readahead(bh, REQ_RAHEAD);
1452 brelse(bh);
1458 * __bread_gfp() - reads a specified block and returns the bh
1473 struct buffer_head *bh = __getblk_gfp(bdev, block, size, gfp);
1475 if (likely(bh) && !buffer_uptodate(bh))
1476 bh = __bread_slow(bh);
1477 return bh;
1536 void folio_set_bh(struct buffer_head *bh, struct folio *folio,
1539 bh->b_folio = folio;
1545 bh->b_data = (char *)(0 + offset);
1547 bh->b_data = folio_address(folio) + offset;
1560 static void discard_buffer(struct buffer_head * bh)
1564 lock_buffer(bh);
1565 clear_buffer_dirty(bh);
1566 bh->b_bdev = NULL;
1567 b_state = READ_ONCE(bh->b_state);
1569 } while (!try_cmpxchg(&bh->b_state, &b_state,
1571 unlock_buffer(bh);
1591 struct buffer_head *head, *bh, *next;
1606 bh = head;
1608 size_t next_off = curr_off + bh->b_size;
1609 next = bh->b_this_page;
1621 discard_buffer(bh);
1623 bh = next;
1624 } while (bh != head);
1646 struct buffer_head *bh, *head, *tail;
1649 bh = head;
1651 bh->b_state |= b_state;
1652 tail = bh;
1653 bh = bh->b_this_page;
1654 } while (bh);
1659 bh = head;
1662 set_buffer_dirty(bh);
1664 set_buffer_uptodate(bh);
1665 bh = bh->b_this_page;
1666 } while (bh != head);
1708 struct buffer_head *bh;
1730 bh = head;
1732 if (!buffer_mapped(bh) || (bh->b_blocknr < block))
1734 if (bh->b_blocknr >= block + len)
1736 clear_buffer_dirty(bh);
1737 wait_on_buffer(bh);
1738 clear_buffer_req(bh);
1740 bh = bh->b_this_page;
1741 } while (bh != head);
1816 struct buffer_head *bh, *head;
1834 bh = head;
1835 blocksize = bh->b_size;
1855 clear_buffer_dirty(bh);
1856 set_buffer_uptodate(bh);
1857 } else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
1858 buffer_dirty(bh)) {
1859 WARN_ON(bh->b_size != blocksize);
1860 err = get_block(inode, block, bh, 1);
1863 clear_buffer_delay(bh);
1864 if (buffer_new(bh)) {
1866 clear_buffer_new(bh);
1867 clean_bdev_bh_alias(bh);
1870 bh = bh->b_this_page;
1872 } while (bh != head);
1875 if (!buffer_mapped(bh))
1885 lock_buffer(bh);
1886 } else if (!trylock_buffer(bh)) {
1890 if (test_clear_buffer_dirty(bh)) {
1891 mark_buffer_async_write_endio(bh, handler);
1893 unlock_buffer(bh);
1895 } while ((bh = bh->b_this_page) != head);
1899 * so we can drop the bh refcounts early.
1905 struct buffer_head *next = bh->b_this_page;
1906 if (buffer_async_write(bh)) {
1907 submit_bh_wbc(REQ_OP_WRITE | write_flags, bh, wbc);
1910 bh = next;
1911 } while (bh != head);
1938 bh = head;
1941 if (buffer_mapped(bh) && buffer_dirty(bh) &&
1942 !buffer_delay(bh)) {
1943 lock_buffer(bh);
1944 mark_buffer_async_write_endio(bh, handler);
1950 clear_buffer_dirty(bh);
1952 } while ((bh = bh->b_this_page) != head);
1958 struct buffer_head *next = bh->b_this_page;
1959 if (buffer_async_write(bh)) {
1960 clear_buffer_dirty(bh);
1961 submit_bh_wbc(REQ_OP_WRITE | write_flags, bh, wbc);
1964 bh = next;
1965 } while (bh != head);
1979 struct buffer_head *head, *bh;
1986 bh = head;
1989 block_end = block_start + bh->b_size;
1991 if (buffer_new(bh)) {
2000 set_buffer_uptodate(bh);
2003 clear_buffer_new(bh);
2004 mark_buffer_dirty(bh);
2009 bh = bh->b_this_page;
2010 } while (bh != head);
2015 iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh,
2020 bh->b_bdev = iomap->bdev;
2038 if (!buffer_uptodate(bh) ||
2040 set_buffer_new(bh);
2043 if (!buffer_uptodate(bh) ||
2045 set_buffer_new(bh);
2046 set_buffer_uptodate(bh);
2047 set_buffer_mapped(bh);
2048 set_buffer_delay(bh);
2056 set_buffer_new(bh);
2057 set_buffer_unwritten(bh);
2070 set_buffer_new(bh);
2072 bh->b_blocknr = (iomap->addr + offset - iomap->offset) >>
2074 set_buffer_mapped(bh);
2092 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
2105 for(bh = head, block_start = 0; bh != head || !block_start;
2106 block++, block_start=block_end, bh = bh->b_this_page) {
2110 if (!buffer_uptodate(bh))
2111 set_buffer_uptodate(bh);
2115 if (buffer_new(bh))
2116 clear_buffer_new(bh);
2117 if (!buffer_mapped(bh)) {
2118 WARN_ON(bh->b_size != blocksize);
2120 err = get_block(inode, block, bh, 1);
2122 err = iomap_to_bh(inode, block, bh, iomap);
2126 if (buffer_new(bh)) {
2127 clean_bdev_bh_alias(bh);
2129 clear_buffer_new(bh);
2130 set_buffer_uptodate(bh);
2131 mark_buffer_dirty(bh);
2142 if (!buffer_uptodate(bh))
2143 set_buffer_uptodate(bh);
2146 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
2147 !buffer_unwritten(bh) &&
2149 bh_read_nowait(bh, 0);
2150 *wait_bh++=bh;
2179 struct buffer_head *bh, *head;
2181 bh = head = folio_buffers(folio);
2182 blocksize = bh->b_size;
2188 if (!buffer_uptodate(bh))
2191 set_buffer_uptodate(bh);
2192 mark_buffer_dirty(bh);
2194 if (buffer_new(bh))
2195 clear_buffer_new(bh);
2198 bh = bh->b_this_page;
2199 } while (bh != head);
2324 struct buffer_head *bh, *head;
2336 bh = head;
2341 if (!buffer_uptodate(bh)) {
2349 bh = bh->b_this_page;
2350 } while (bh != head);
2367 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2386 bh = head;
2391 if (buffer_uptodate(bh))
2394 if (!buffer_mapped(bh)) {
2399 WARN_ON(bh->b_size != blocksize);
2400 err = get_block(inode, iblock, bh, 0);
2406 if (!buffer_mapped(bh)) {
2410 set_buffer_uptodate(bh);
2417 if (buffer_uptodate(bh))
2420 arr[nr++] = bh;
2421 } while (i++, iblock++, (bh = bh->b_this_page) != head);
2439 bh = arr[i];
2440 lock_buffer(bh);
2441 mark_buffer_async_read(bh);
2450 bh = arr[i];
2451 if (buffer_uptodate(bh))
2452 end_buffer_async_read(bh, 1);
2454 submit_bh(REQ_OP_READ, bh);
2661 struct buffer_head *bh;
2678 bh = folio_buffers(folio);
2679 if (!bh) {
2681 bh = folio_buffers(folio);
2688 bh = bh->b_this_page;
2693 if (!buffer_mapped(bh)) {
2694 WARN_ON(bh->b_size != blocksize);
2695 err = get_block(inode, iblock, bh, 0);
2699 if (!buffer_mapped(bh))
2705 set_buffer_uptodate(bh);
2707 if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
2708 err = bh_read(bh, 0);
2715 mark_buffer_dirty(bh);
2775 struct buffer_head *bh = bio->bi_private;
2778 set_bit(BH_Quiet, &bh->b_state);
2780 bh->b_end_io(bh, !bio->bi_status);
2784 static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh,
2790 BUG_ON(!buffer_locked(bh));
2791 BUG_ON(!buffer_mapped(bh));
2792 BUG_ON(!bh->b_end_io);
2793 BUG_ON(buffer_delay(bh));
2794 BUG_ON(buffer_unwritten(bh));
2799 if (test_set_buffer_req(bh) && (op == REQ_OP_WRITE))
2800 clear_buffer_write_io_error(bh);
2802 if (buffer_meta(bh))
2804 if (buffer_prio(bh))
2807 bio = bio_alloc(bh->b_bdev, 1, opf, GFP_NOIO);
2809 fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO);
2811 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
2813 __bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
2816 bio->bi_private = bh;
2818 /* Take care of bh's that straddle the end of the device */
2823 wbc_account_cgroup_owner(wbc, bh->b_page, bh->b_size);
2829 void submit_bh(blk_opf_t opf, struct buffer_head *bh)
2831 submit_bh_wbc(opf, bh, NULL);
2835 void write_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags)
2837 lock_buffer(bh);
2838 if (!test_clear_buffer_dirty(bh)) {
2839 unlock_buffer(bh);
2842 bh->b_end_io = end_buffer_write_sync;
2843 get_bh(bh);
2844 submit_bh(REQ_OP_WRITE | op_flags, bh);
2853 int __sync_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags)
2855 WARN_ON(atomic_read(&bh->b_count) < 1);
2856 lock_buffer(bh);
2857 if (test_clear_buffer_dirty(bh)) {
2859 * The bh should be mapped, but it might not be if the
2862 if (!buffer_mapped(bh)) {
2863 unlock_buffer(bh);
2867 get_bh(bh);
2868 bh->b_end_io = end_buffer_write_sync;
2869 submit_bh(REQ_OP_WRITE | op_flags, bh);
2870 wait_on_buffer(bh);
2871 if (!buffer_uptodate(bh))
2874 unlock_buffer(bh);
2880 int sync_dirty_buffer(struct buffer_head *bh)
2882 return __sync_dirty_buffer(bh, REQ_SYNC);
2906 static inline int buffer_busy(struct buffer_head *bh)
2908 return atomic_read(&bh->b_count) |
2909 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
2916 struct buffer_head *bh;
2918 bh = head;
2920 if (buffer_busy(bh))
2922 bh = bh->b_this_page;
2923 } while (bh != head);
2926 struct buffer_head *next = bh->b_this_page;
2928 if (bh->b_assoc_map)
2929 __remove_assoc_queue(bh);
2930 bh = next;
2931 } while (bh != head);
2976 struct buffer_head *bh = buffers_to_free;
2979 struct buffer_head *next = bh->b_this_page;
2980 free_buffer_head(bh);
2981 bh = next;
2982 } while (bh != buffers_to_free);
2994 * Once the number of bh's in the machine exceeds this level, we start
3002 int nr; /* Number of live bh's */
3036 void free_buffer_head(struct buffer_head *bh)
3038 BUG_ON(!list_empty(&bh->b_assoc_buffers));
3039 kmem_cache_free(bh_cachep, bh);
3063 * @bh: struct buffer_head
3068 int bh_uptodate_or_lock(struct buffer_head *bh)
3070 if (!buffer_uptodate(bh)) {
3071 lock_buffer(bh);
3072 if (!buffer_uptodate(bh))
3074 unlock_buffer(bh);
3082 * @bh: struct buffer_head
3088 int __bh_read(struct buffer_head *bh, blk_opf_t op_flags, bool wait)
3092 BUG_ON(!buffer_locked(bh));
3094 get_bh(bh);
3095 bh->b_end_io = end_buffer_read_sync;
3096 submit_bh(REQ_OP_READ | op_flags, bh);
3098 wait_on_buffer(bh);
3099 if (!buffer_uptodate(bh))
3122 struct buffer_head *bh = bhs[i];
3124 if (buffer_uptodate(bh))
3128 lock_buffer(bh);
3130 if (!trylock_buffer(bh))
3133 if (buffer_uptodate(bh)) {
3134 unlock_buffer(bh);
3138 bh->b_end_io = end_buffer_read_sync;
3139 get_bh(bh);
3140 submit_bh(REQ_OP_READ | op_flags, bh);
3157 * Limit the bh occupancy to 10% of ZONE_NORMAL