Lines Matching refs:bh

55 static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
60 inline void touch_buffer(struct buffer_head *bh)
62 trace_block_touch_buffer(bh);
63 mark_page_accessed(bh->b_page);
67 void __lock_buffer(struct buffer_head *bh)
69 wait_on_bit_lock_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE);
73 void unlock_buffer(struct buffer_head *bh)
75 clear_bit_unlock(BH_Lock, &bh->b_state);
77 wake_up_bit(&bh->b_state, BH_Lock);
89 struct buffer_head *head, *bh;
102 bh = head;
104 if (buffer_locked(bh))
107 if (buffer_dirty(bh))
110 bh = bh->b_this_page;
111 } while (bh != head);
120 void __wait_on_buffer(struct buffer_head * bh)
122 wait_on_bit_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE);
126 static void buffer_io_error(struct buffer_head *bh, char *msg)
128 if (!test_bit(BH_Quiet, &bh->b_state))
131 bh->b_bdev, (unsigned long long)bh->b_blocknr, msg);
135 * End-of-IO handler helper function which does not touch the bh after
137 * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
138 * a race there is benign: unlock_buffer() only use the bh's address for
139 * hashing after unlocking the buffer, so it doesn't actually touch the bh
142 static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
145 set_buffer_uptodate(bh);
148 clear_buffer_uptodate(bh);
150 unlock_buffer(bh);
157 void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
159 __end_buffer_read_notouch(bh, uptodate);
160 put_bh(bh);
164 void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
167 set_buffer_uptodate(bh);
169 buffer_io_error(bh, ", lost sync page write");
170 mark_buffer_write_io_error(bh);
171 clear_buffer_uptodate(bh);
173 unlock_buffer(bh);
174 put_bh(bh);
195 struct buffer_head *bh;
210 bh = head;
212 if (!buffer_mapped(bh))
214 else if (bh->b_blocknr == block) {
215 ret = bh;
216 get_bh(bh);
219 bh = bh->b_this_page;
220 } while (bh != head);
233 (unsigned long long)bh->b_blocknr,
234 bh->b_state, bh->b_size, bdev,
244 static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
252 BUG_ON(!buffer_async_read(bh));
254 page = bh->b_page;
256 set_buffer_uptodate(bh);
258 clear_buffer_uptodate(bh);
259 buffer_io_error(bh, ", async page read");
270 clear_buffer_async_read(bh);
271 unlock_buffer(bh);
272 tmp = bh;
281 } while (tmp != bh);
300 struct buffer_head *bh;
307 struct buffer_head *bh = ctx->bh;
310 err = fscrypt_decrypt_pagecache_blocks(bh->b_page, bh->b_size,
311 bh_offset(bh));
312 end_buffer_async_read(bh, err == 0);
320 static void end_buffer_async_read_io(struct buffer_head *bh, int uptodate)
324 fscrypt_inode_uses_fs_layer_crypto(bh->b_page->mapping->host)) {
329 ctx->bh = bh;
335 end_buffer_async_read(bh, uptodate);
342 void end_buffer_async_write(struct buffer_head *bh, int uptodate)
349 BUG_ON(!buffer_async_write(bh));
351 page = bh->b_page;
353 set_buffer_uptodate(bh);
355 buffer_io_error(bh, ", lost async page write");
356 mark_buffer_write_io_error(bh);
357 clear_buffer_uptodate(bh);
364 clear_buffer_async_write(bh);
365 unlock_buffer(bh);
366 tmp = bh->b_this_page;
367 while (tmp != bh) {
405 static void mark_buffer_async_read(struct buffer_head *bh)
407 bh->b_end_io = end_buffer_async_read_io;
408 set_buffer_async_read(bh);
411 static void mark_buffer_async_write_endio(struct buffer_head *bh,
414 bh->b_end_io = handler;
415 set_buffer_async_write(bh);
418 void mark_buffer_async_write(struct buffer_head *bh)
420 mark_buffer_async_write_endio(bh, end_buffer_async_write);
477 static void __remove_assoc_queue(struct buffer_head *bh)
479 list_del_init(&bh->b_assoc_buffers);
480 WARN_ON(!bh->b_assoc_map);
481 bh->b_assoc_map = NULL;
501 struct buffer_head *bh;
508 bh = BH_ENTRY(p);
509 if (buffer_locked(bh)) {
510 get_bh(bh);
512 wait_on_buffer(bh);
513 if (!buffer_uptodate(bh))
515 brelse(bh);
562 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
563 if (bh) {
564 if (buffer_dirty(bh))
565 ll_rw_block(REQ_OP_WRITE, 0, 1, &bh);
566 put_bh(bh);
570 void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
573 struct address_space *buffer_mapping = bh->b_page->mapping;
575 mark_buffer_dirty(bh);
581 if (!bh->b_assoc_map) {
583 list_move_tail(&bh->b_assoc_buffers,
585 bh->b_assoc_map = mapping;
652 struct buffer_head *bh = head;
655 set_buffer_dirty(bh);
656 bh = bh->b_this_page;
657 } while (bh != head);
700 struct buffer_head *bh;
711 bh = BH_ENTRY(list->next);
712 mapping = bh->b_assoc_map;
713 __remove_assoc_queue(bh);
717 if (buffer_dirty(bh) || buffer_locked(bh)) {
718 list_add(&bh->b_assoc_buffers, &tmp);
719 bh->b_assoc_map = mapping;
720 if (buffer_dirty(bh)) {
721 get_bh(bh);
730 write_dirty_buffer(bh, REQ_SYNC);
738 brelse(bh);
749 bh = BH_ENTRY(tmp.prev);
750 get_bh(bh);
751 mapping = bh->b_assoc_map;
752 __remove_assoc_queue(bh);
756 if (buffer_dirty(bh)) {
757 list_add(&bh->b_assoc_buffers,
759 bh->b_assoc_map = mapping;
762 wait_on_buffer(bh);
763 if (!buffer_uptodate(bh))
765 brelse(bh);
818 struct buffer_head *bh = BH_ENTRY(list->next);
819 if (buffer_dirty(bh)) {
823 __remove_assoc_queue(bh);
832 * the size of each buffer.. Use the bh->b_this_page linked list to
842 struct buffer_head *bh, *head;
856 bh = alloc_buffer_head(gfp);
857 if (!bh)
860 bh->b_this_page = head;
861 bh->b_blocknr = -1;
862 head = bh;
864 bh->b_size = size;
867 set_bh_page(bh, page, offset);
879 bh = head;
881 free_buffer_head(bh);
892 struct buffer_head *bh, *tail;
894 bh = head;
896 tail = bh;
897 bh = bh->b_this_page;
898 } while (bh);
923 struct buffer_head *bh = head;
928 if (!buffer_mapped(bh)) {
929 bh->b_end_io = NULL;
930 bh->b_private = NULL;
931 bh->b_bdev = bdev;
932 bh->b_blocknr = block;
934 set_buffer_uptodate(bh);
936 set_buffer_mapped(bh);
939 bh = bh->b_this_page;
940 } while (bh != head);
959 struct buffer_head *bh;
979 bh = page_buffers(page);
980 if (bh->b_size == size) {
993 bh = alloc_page_buffers(page, size, true);
1001 link_dev_buffers(page, bh);
1063 struct buffer_head *bh;
1066 bh = __find_get_block(bdev, block, size);
1067 if (bh)
1068 return bh;
1101 * @bh: the buffer_head to mark dirty
1108 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
1111 void mark_buffer_dirty(struct buffer_head *bh)
1113 WARN_ON_ONCE(!buffer_uptodate(bh));
1115 trace_block_dirty_buffer(bh);
1123 if (buffer_dirty(bh)) {
1125 if (buffer_dirty(bh))
1129 if (!test_set_buffer_dirty(bh)) {
1130 struct page *page = bh->b_page;
1146 void mark_buffer_write_io_error(struct buffer_head *bh)
1150 set_buffer_write_io_error(bh);
1152 if (bh->b_page && bh->b_page->mapping)
1153 mapping_set_error(bh->b_page->mapping, -EIO);
1154 if (bh->b_assoc_map)
1155 mapping_set_error(bh->b_assoc_map, -EIO);
1157 sb = READ_ONCE(bh->b_bdev->bd_super);
1185 void __bforget(struct buffer_head *bh)
1187 clear_buffer_dirty(bh);
1188 if (bh->b_assoc_map) {
1189 struct address_space *buffer_mapping = bh->b_page->mapping;
1192 list_del_init(&bh->b_assoc_buffers);
1193 bh->b_assoc_map = NULL;
1196 __brelse(bh);
1200 static struct buffer_head *__bread_slow(struct buffer_head *bh)
1202 lock_buffer(bh);
1203 if (buffer_uptodate(bh)) {
1204 unlock_buffer(bh);
1205 return bh;
1207 get_bh(bh);
1208 bh->b_end_io = end_buffer_read_sync;
1209 submit_bh(REQ_OP_READ, 0, bh);
1210 wait_on_buffer(bh);
1211 if (buffer_uptodate(bh))
1212 return bh;
1214 brelse(bh);
1260 static void bh_lru_install(struct buffer_head *bh)
1262 struct buffer_head *evictee = bh;
1272 if (evictee == bh) {
1278 get_bh(bh);
1284 * Look up the bh in this cpu's LRU. If it's there, move it to the head.
1295 struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]);
1297 if (bh && bh->b_blocknr == block && bh->b_bdev == bdev &&
1298 bh->b_size == size) {
1305 __this_cpu_write(bh_lrus.bhs[0], bh);
1307 get_bh(bh);
1308 ret = bh;
1324 struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
1326 if (bh == NULL) {
1328 bh = __find_get_block_slow(bdev, block);
1329 if (bh)
1330 bh_lru_install(bh);
1332 touch_buffer(bh);
1334 return bh;
1350 struct buffer_head *bh = __find_get_block(bdev, block, size);
1353 if (bh == NULL)
1354 bh = __getblk_slow(bdev, block, size, gfp);
1355 return bh;
1364 struct buffer_head *bh = __getblk(bdev, block, size);
1365 if (likely(bh)) {
1366 ll_rw_block(REQ_OP_READ, REQ_RAHEAD, 1, &bh);
1367 brelse(bh);
1375 struct buffer_head *bh = __getblk_gfp(bdev, block, size, gfp);
1376 if (likely(bh)) {
1377 ll_rw_block(REQ_OP_READ, REQ_RAHEAD, 1, &bh);
1378 brelse(bh);
1384 * __bread_gfp() - reads a specified block and returns the bh
1399 struct buffer_head *bh = __getblk_gfp(bdev, block, size, gfp);
1401 if (likely(bh) && !buffer_uptodate(bh))
1402 bh = __bread_slow(bh);
1403 return bh;
1443 void set_bh_page(struct buffer_head *bh,
1446 bh->b_page = page;
1452 bh->b_data = (char *)(0 + offset);
1454 bh->b_data = page_address(page) + offset;
1467 static void discard_buffer(struct buffer_head * bh)
1471 lock_buffer(bh);
1472 clear_buffer_dirty(bh);
1473 bh->b_bdev = NULL;
1474 b_state = bh->b_state;
1476 b_state_old = cmpxchg(&bh->b_state, b_state,
1482 unlock_buffer(bh);
1504 struct buffer_head *head, *bh, *next;
1518 bh = head;
1520 unsigned int next_off = curr_off + bh->b_size;
1521 next = bh->b_this_page;
1533 discard_buffer(bh);
1535 bh = next;
1536 } while (bh != head);
1559 struct buffer_head *bh, *head, *tail;
1562 bh = head;
1564 bh->b_state |= b_state;
1565 tail = bh;
1566 bh = bh->b_this_page;
1567 } while (bh);
1572 bh = head;
1575 set_buffer_dirty(bh);
1577 set_buffer_uptodate(bh);
1578 bh = bh->b_this_page;
1579 } while (bh != head);
1614 struct buffer_head *bh;
1636 bh = head;
1638 if (!buffer_mapped(bh) || (bh->b_blocknr < block))
1640 if (bh->b_blocknr >= block + len)
1642 clear_buffer_dirty(bh);
1643 wait_on_buffer(bh);
1644 clear_buffer_req(bh);
1646 bh = bh->b_this_page;
1647 } while (bh != head);
1719 struct buffer_head *bh, *head;
1737 bh = head;
1738 blocksize = bh->b_size;
1758 clear_buffer_dirty(bh);
1759 set_buffer_uptodate(bh);
1760 } else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
1761 buffer_dirty(bh)) {
1762 WARN_ON(bh->b_size != blocksize);
1763 err = get_block(inode, block, bh, 1);
1766 clear_buffer_delay(bh);
1767 if (buffer_new(bh)) {
1769 clear_buffer_new(bh);
1770 clean_bdev_bh_alias(bh);
1773 bh = bh->b_this_page;
1775 } while (bh != head);
1778 if (!buffer_mapped(bh))
1788 lock_buffer(bh);
1789 } else if (!trylock_buffer(bh)) {
1793 if (test_clear_buffer_dirty(bh)) {
1794 mark_buffer_async_write_endio(bh, handler);
1796 unlock_buffer(bh);
1798 } while ((bh = bh->b_this_page) != head);
1802 * drop the bh refcounts early.
1808 struct buffer_head *next = bh->b_this_page;
1809 if (buffer_async_write(bh)) {
1810 submit_bh_wbc(REQ_OP_WRITE, write_flags, bh,
1814 bh = next;
1815 } while (bh != head);
1842 bh = head;
1845 if (buffer_mapped(bh) && buffer_dirty(bh) &&
1846 !buffer_delay(bh)) {
1847 lock_buffer(bh);
1848 mark_buffer_async_write_endio(bh, handler);
1854 clear_buffer_dirty(bh);
1856 } while ((bh = bh->b_this_page) != head);
1862 struct buffer_head *next = bh->b_this_page;
1863 if (buffer_async_write(bh)) {
1864 clear_buffer_dirty(bh);
1865 submit_bh_wbc(REQ_OP_WRITE, write_flags, bh,
1869 bh = next;
1870 } while (bh != head);
1884 struct buffer_head *head, *bh;
1890 bh = head = page_buffers(page);
1893 block_end = block_start + bh->b_size;
1895 if (buffer_new(bh)) {
1904 set_buffer_uptodate(bh);
1907 clear_buffer_new(bh);
1908 mark_buffer_dirty(bh);
1913 bh = bh->b_this_page;
1914 } while (bh != head);
1919 iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh,
1924 bh->b_bdev = iomap->bdev;
1941 if (!buffer_uptodate(bh) ||
1943 set_buffer_new(bh);
1946 if (!buffer_uptodate(bh) ||
1948 set_buffer_new(bh);
1949 set_buffer_uptodate(bh);
1950 set_buffer_mapped(bh);
1951 set_buffer_delay(bh);
1959 set_buffer_new(bh);
1960 set_buffer_unwritten(bh);
1965 set_buffer_new(bh);
1966 bh->b_blocknr = (iomap->addr + offset - iomap->offset) >>
1968 set_buffer_mapped(bh);
1983 struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
1996 for(bh = head, block_start = 0; bh != head || !block_start;
1997 block++, block_start=block_end, bh = bh->b_this_page) {
2001 if (!buffer_uptodate(bh))
2002 set_buffer_uptodate(bh);
2006 if (buffer_new(bh))
2007 clear_buffer_new(bh);
2008 if (!buffer_mapped(bh)) {
2009 WARN_ON(bh->b_size != blocksize);
2011 err = get_block(inode, block, bh, 1);
2015 iomap_to_bh(inode, block, bh, iomap);
2018 if (buffer_new(bh)) {
2019 clean_bdev_bh_alias(bh);
2021 clear_buffer_new(bh);
2022 set_buffer_uptodate(bh);
2023 mark_buffer_dirty(bh);
2034 if (!buffer_uptodate(bh))
2035 set_buffer_uptodate(bh);
2038 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
2039 !buffer_unwritten(bh) &&
2041 ll_rw_block(REQ_OP_READ, 0, 1, &bh);
2042 *wait_bh++=bh;
2071 struct buffer_head *bh, *head;
2073 bh = head = page_buffers(page);
2074 blocksize = bh->b_size;
2080 if (!buffer_uptodate(bh))
2083 set_buffer_uptodate(bh);
2084 mark_buffer_dirty(bh);
2086 clear_buffer_new(bh);
2089 bh = bh->b_this_page;
2090 } while (bh != head);
2219 struct buffer_head *bh, *head;
2232 bh = head;
2237 if (!buffer_uptodate(bh)) {
2245 bh = bh->b_this_page;
2246 } while (bh != head);
2263 struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
2274 bh = head;
2279 if (buffer_uptodate(bh))
2282 if (!buffer_mapped(bh)) {
2287 WARN_ON(bh->b_size != blocksize);
2288 err = get_block(inode, iblock, bh, 0);
2292 if (!buffer_mapped(bh)) {
2295 set_buffer_uptodate(bh);
2302 if (buffer_uptodate(bh))
2305 arr[nr++] = bh;
2306 } while (i++, iblock++, (bh = bh->b_this_page) != head);
2324 bh = arr[i];
2325 lock_buffer(bh);
2326 mark_buffer_async_read(bh);
2335 bh = arr[i];
2336 if (buffer_uptodate(bh))
2337 end_buffer_async_read(bh, 1);
2339 submit_bh(REQ_OP_READ, 0, bh);
2541 * handler which does not touch the bh after unlocking it.
2543 static void end_buffer_read_nobh(struct buffer_head *bh, int uptodate)
2545 __end_buffer_read_notouch(bh, uptodate);
2555 struct buffer_head *bh;
2560 bh = head;
2563 set_buffer_dirty(bh);
2564 if (!bh->b_this_page)
2565 bh->b_this_page = head;
2566 bh = bh->b_this_page;
2567 } while (bh != head);
2585 struct buffer_head *head, *bh;
2638 for (block_start = 0, block_in_page = 0, bh = head;
2640 block_in_page++, block_start += blocksize, bh = bh->b_this_page) {
2644 bh->b_state = 0;
2649 bh, create);
2652 if (!buffer_mapped(bh))
2654 if (buffer_new(bh))
2655 clean_bdev_bh_alias(bh);
2657 set_buffer_uptodate(bh);
2660 if (buffer_new(bh) || !buffer_mapped(bh)) {
2665 if (buffer_uptodate(bh))
2668 lock_buffer(bh);
2669 bh->b_end_io = end_buffer_read_nobh;
2670 submit_bh(REQ_OP_READ, 0, bh);
2681 for (bh = head; bh; bh = bh->b_this_page) {
2682 wait_on_buffer(bh);
2683 if (!buffer_uptodate(bh))
2724 struct buffer_head *bh;
2744 bh = head;
2746 free_buffer_head(bh);
2883 struct buffer_head *bh;
2905 bh = page_buffers(page);
2908 bh = bh->b_this_page;
2914 if (!buffer_mapped(bh)) {
2915 WARN_ON(bh->b_size != blocksize);
2916 err = get_block(inode, iblock, bh, 0);
2920 if (!buffer_mapped(bh))
2926 set_buffer_uptodate(bh);
2928 if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
2930 ll_rw_block(REQ_OP_READ, 0, 1, &bh);
2931 wait_on_buffer(bh);
2933 if (!buffer_uptodate(bh))
2938 mark_buffer_dirty(bh);
3000 struct buffer_head *bh = bio->bi_private;
3003 set_bit(BH_Quiet, &bh->b_state);
3005 bh->b_end_io(bh, !bio->bi_status);
3009 static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
3014 BUG_ON(!buffer_locked(bh));
3015 BUG_ON(!buffer_mapped(bh));
3016 BUG_ON(!bh->b_end_io);
3017 BUG_ON(buffer_delay(bh));
3018 BUG_ON(buffer_unwritten(bh));
3023 if (test_set_buffer_req(bh) && (op == REQ_OP_WRITE))
3024 clear_buffer_write_io_error(bh);
3028 fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO);
3030 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
3031 bio_set_dev(bio, bh->b_bdev);
3034 bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
3035 BUG_ON(bio->bi_iter.bi_size != bh->b_size);
3038 bio->bi_private = bh;
3040 if (buffer_meta(bh))
3042 if (buffer_prio(bh))
3046 /* Take care of bh's that straddle the end of the device */
3051 wbc_account_cgroup_owner(wbc, bh->b_page, bh->b_size);
3058 int submit_bh(int op, int op_flags, struct buffer_head *bh)
3060 return submit_bh_wbc(op, op_flags, bh, 0, NULL);
3095 struct buffer_head *bh = bhs[i];
3097 if (!trylock_buffer(bh))
3100 if (test_clear_buffer_dirty(bh)) {
3101 bh->b_end_io = end_buffer_write_sync;
3102 get_bh(bh);
3103 submit_bh(op, op_flags, bh);
3107 if (!buffer_uptodate(bh)) {
3108 bh->b_end_io = end_buffer_read_sync;
3109 get_bh(bh);
3110 submit_bh(op, op_flags, bh);
3114 unlock_buffer(bh);
3119 void write_dirty_buffer(struct buffer_head *bh, int op_flags)
3121 lock_buffer(bh);
3122 if (!test_clear_buffer_dirty(bh)) {
3123 unlock_buffer(bh);
3126 bh->b_end_io = end_buffer_write_sync;
3127 get_bh(bh);
3128 submit_bh(REQ_OP_WRITE, op_flags, bh);
3137 int __sync_dirty_buffer(struct buffer_head *bh, int op_flags)
3141 WARN_ON(atomic_read(&bh->b_count) < 1);
3142 lock_buffer(bh);
3143 if (test_clear_buffer_dirty(bh)) {
3145 * The bh should be mapped, but it might not be if the
3148 if (!buffer_mapped(bh)) {
3149 unlock_buffer(bh);
3153 get_bh(bh);
3154 bh->b_end_io = end_buffer_write_sync;
3155 ret = submit_bh(REQ_OP_WRITE, op_flags, bh);
3156 wait_on_buffer(bh);
3157 if (!ret && !buffer_uptodate(bh))
3160 unlock_buffer(bh);
3166 int sync_dirty_buffer(struct buffer_head *bh)
3168 return __sync_dirty_buffer(bh, REQ_SYNC);
3192 static inline int buffer_busy(struct buffer_head *bh)
3194 return atomic_read(&bh->b_count) |
3195 (bh->b_state & ((1 << BH_Dirty) | (1 << BH_Lock)));
3202 struct buffer_head *bh;
3204 bh = head;
3206 if (buffer_busy(bh))
3208 bh = bh->b_this_page;
3209 } while (bh != head);
3212 struct buffer_head *next = bh->b_this_page;
3214 if (bh->b_assoc_map)
3215 __remove_assoc_queue(bh);
3216 bh = next;
3217 } while (bh != head);
3262 struct buffer_head *bh = buffers_to_free;
3265 struct buffer_head *next = bh->b_this_page;
3266 free_buffer_head(bh);
3267 bh = next;
3268 } while (bh != buffers_to_free);
3307 * Once the number of bh's in the machine exceeds this level, we start
3315 int nr; /* Number of live bh's */
3349 void free_buffer_head(struct buffer_head *bh)
3351 BUG_ON(!list_empty(&bh->b_assoc_buffers));
3352 kmem_cache_free(bh_cachep, bh);
3376 * @bh: struct buffer_head
3381 int bh_uptodate_or_lock(struct buffer_head *bh)
3383 if (!buffer_uptodate(bh)) {
3384 lock_buffer(bh);
3385 if (!buffer_uptodate(bh))
3387 unlock_buffer(bh);
3395 * @bh: struct buffer_head
3399 int bh_submit_read(struct buffer_head *bh)
3401 BUG_ON(!buffer_locked(bh));
3403 if (buffer_uptodate(bh)) {
3404 unlock_buffer(bh);
3408 get_bh(bh);
3409 bh->b_end_io = end_buffer_read_sync;
3410 submit_bh(REQ_OP_READ, 0, bh);
3411 wait_on_buffer(bh);
3412 if (buffer_uptodate(bh))
3430 * Limit the bh occupancy to 10% of ZONE_NORMAL