Lines Matching refs:bp
54 static int __xfs_buf_submit(struct xfs_buf *bp, bool wait);
58 struct xfs_buf *bp)
60 return __xfs_buf_submit(bp, !(bp->b_flags & XBF_ASYNC));
65 struct xfs_buf *bp)
72 * to be both for b_addr and bp->b_page_count > 1.
74 return bp->b_addr && bp->b_page_count > 1;
79 struct xfs_buf *bp)
81 return (bp->b_page_count * PAGE_SIZE);
99 struct xfs_buf *bp)
101 if (bp->b_flags & XBF_NO_IOACCT)
104 ASSERT(bp->b_flags & XBF_ASYNC);
105 spin_lock(&bp->b_lock);
106 if (!(bp->b_state & XFS_BSTATE_IN_FLIGHT)) {
107 bp->b_state |= XFS_BSTATE_IN_FLIGHT;
108 percpu_counter_inc(&bp->b_target->bt_io_count);
110 spin_unlock(&bp->b_lock);
119 struct xfs_buf *bp)
121 lockdep_assert_held(&bp->b_lock);
123 if (bp->b_state & XFS_BSTATE_IN_FLIGHT) {
124 bp->b_state &= ~XFS_BSTATE_IN_FLIGHT;
125 percpu_counter_dec(&bp->b_target->bt_io_count);
131 struct xfs_buf *bp)
133 spin_lock(&bp->b_lock);
134 __xfs_buf_ioacct_dec(bp);
135 spin_unlock(&bp->b_lock);
148 struct xfs_buf *bp)
150 ASSERT(xfs_buf_islocked(bp));
152 bp->b_flags |= XBF_STALE;
159 bp->b_flags &= ~_XBF_DELWRI_Q;
167 spin_lock(&bp->b_lock);
168 __xfs_buf_ioacct_dec(bp);
170 atomic_set(&bp->b_lru_ref, 0);
171 if (!(bp->b_state & XFS_BSTATE_DISPOSE) &&
172 (list_lru_del(&bp->b_target->bt_lru, &bp->b_lru)))
173 atomic_dec(&bp->b_hold);
175 ASSERT(atomic_read(&bp->b_hold) >= 1);
176 spin_unlock(&bp->b_lock);
181 struct xfs_buf *bp,
184 ASSERT(bp->b_maps == NULL);
185 bp->b_map_count = map_count;
188 bp->b_maps = &bp->__b_map;
192 bp->b_maps = kmem_zalloc(map_count * sizeof(struct xfs_buf_map),
194 if (!bp->b_maps)
204 struct xfs_buf *bp)
206 if (bp->b_maps != &bp->__b_map) {
207 kmem_free(bp->b_maps);
208 bp->b_maps = NULL;
220 struct xfs_buf *bp;
225 bp = kmem_cache_zalloc(xfs_buf_cache, GFP_NOFS | __GFP_NOFAIL);
233 atomic_set(&bp->b_hold, 1);
234 atomic_set(&bp->b_lru_ref, 1);
235 init_completion(&bp->b_iowait);
236 INIT_LIST_HEAD(&bp->b_lru);
237 INIT_LIST_HEAD(&bp->b_list);
238 INIT_LIST_HEAD(&bp->b_li_list);
239 sema_init(&bp->b_sema, 0); /* held, no waiters */
240 spin_lock_init(&bp->b_lock);
241 bp->b_target = target;
242 bp->b_mount = target->bt_mount;
243 bp->b_flags = flags;
250 error = xfs_buf_get_maps(bp, nmaps);
252 kmem_cache_free(xfs_buf_cache, bp);
256 bp->b_rhash_key = map[0].bm_bn;
257 bp->b_length = 0;
259 bp->b_maps[i].bm_bn = map[i].bm_bn;
260 bp->b_maps[i].bm_len = map[i].bm_len;
261 bp->b_length += map[i].bm_len;
264 atomic_set(&bp->b_pin_count, 0);
265 init_waitqueue_head(&bp->b_waiters);
267 XFS_STATS_INC(bp->b_mount, xb_create);
268 trace_xfs_buf_init(bp, _RET_IP_);
270 *bpp = bp;
276 struct xfs_buf *bp)
280 ASSERT(bp->b_flags & _XBF_PAGES);
282 if (xfs_buf_is_vmapped(bp))
283 vm_unmap_ram(bp->b_addr, bp->b_page_count);
285 for (i = 0; i < bp->b_page_count; i++) {
286 if (bp->b_pages[i])
287 __free_page(bp->b_pages[i]);
289 mm_account_reclaimed_pages(bp->b_page_count);
291 if (bp->b_pages != bp->b_page_array)
292 kmem_free(bp->b_pages);
293 bp->b_pages = NULL;
294 bp->b_flags &= ~_XBF_PAGES;
301 struct xfs_buf *bp = container_of(cb, struct xfs_buf, b_rcu);
303 xfs_buf_free_maps(bp);
304 kmem_cache_free(xfs_buf_cache, bp);
309 struct xfs_buf *bp)
311 trace_xfs_buf_free(bp, _RET_IP_);
313 ASSERT(list_empty(&bp->b_lru));
315 if (bp->b_flags & _XBF_PAGES)
316 xfs_buf_free_pages(bp);
317 else if (bp->b_flags & _XBF_KMEM)
318 kmem_free(bp->b_addr);
320 call_rcu(&bp->b_rcu, xfs_buf_free_callback);
325 struct xfs_buf *bp,
329 size_t size = BBTOB(bp->b_length);
335 bp->b_addr = kmem_alloc(size, kmflag_mask);
336 if (!bp->b_addr)
339 if (((unsigned long)(bp->b_addr + size - 1) & PAGE_MASK) !=
340 ((unsigned long)bp->b_addr & PAGE_MASK)) {
342 kmem_free(bp->b_addr);
343 bp->b_addr = NULL;
346 bp->b_offset = offset_in_page(bp->b_addr);
347 bp->b_pages = bp->b_page_array;
348 bp->b_pages[0] = kmem_to_page(bp->b_addr);
349 bp->b_page_count = 1;
350 bp->b_flags |= _XBF_KMEM;
356 struct xfs_buf *bp,
368 bp->b_page_count = DIV_ROUND_UP(BBTOB(bp->b_length), PAGE_SIZE);
369 if (bp->b_page_count <= XB_PAGES) {
370 bp->b_pages = bp->b_page_array;
372 bp->b_pages = kzalloc(sizeof(struct page *) * bp->b_page_count,
374 if (!bp->b_pages)
377 bp->b_flags |= _XBF_PAGES;
391 filled = alloc_pages_bulk_array(gfp_mask, bp->b_page_count,
392 bp->b_pages);
393 if (filled == bp->b_page_count) {
394 XFS_STATS_INC(bp->b_mount, xb_page_found);
402 xfs_buf_free_pages(bp);
406 XFS_STATS_INC(bp->b_mount, xb_page_retries);
417 struct xfs_buf *bp,
420 ASSERT(bp->b_flags & _XBF_PAGES);
421 if (bp->b_page_count == 1) {
423 bp->b_addr = page_address(bp->b_pages[0]);
425 bp->b_addr = NULL;
440 bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count,
442 if (bp->b_addr)
448 if (!bp->b_addr)
464 const struct xfs_buf *bp = obj;
472 if (bp->b_rhash_key != map->bm_bn)
475 if (unlikely(bp->b_length != map->bm_len)) {
485 ASSERT(bp->b_flags & XBF_STALE);
544 struct xfs_buf *bp,
548 if (!xfs_buf_trylock(bp)) {
549 XFS_STATS_INC(bp->b_mount, xb_busy_locked);
553 xfs_buf_lock(bp);
554 XFS_STATS_INC(bp->b_mount, xb_get_locked_waited);
562 if (bp->b_flags & XBF_STALE) {
564 xfs_buf_unlock(bp);
567 ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
568 bp->b_flags &= _XBF_KMEM | _XBF_PAGES;
569 bp->b_ops = NULL;
581 struct xfs_buf *bp;
585 bp = rhashtable_lookup(&pag->pag_buf_hash, map, xfs_buf_hash_params);
586 if (!bp || !atomic_inc_not_zero(&bp->b_hold)) {
592 error = xfs_buf_find_lock(bp, flags);
594 xfs_buf_rele(bp);
598 trace_xfs_buf_find(bp, flags, _RET_IP_);
599 *bpp = bp;
618 struct xfs_buf *bp;
639 bp = rhashtable_lookup_get_insert_fast(&pag->pag_buf_hash,
641 if (IS_ERR(bp)) {
642 error = PTR_ERR(bp);
646 if (bp) {
648 atomic_inc(&bp->b_hold);
650 error = xfs_buf_find_lock(bp, flags);
652 xfs_buf_rele(bp);
654 *bpp = bp;
685 struct xfs_buf *bp = NULL;
702 error = xfs_buf_lookup(pag, &cmap, flags, &bp);
707 if (unlikely(!bp)) {
715 flags, &bp);
724 if (!bp->b_addr) {
725 error = _xfs_buf_map_pages(bp, flags);
729 bp->b_page_count);
730 xfs_buf_relse(bp);
740 xfs_buf_ioerror(bp, 0);
743 trace_xfs_buf_get(bp, flags, _RET_IP_);
744 *bpp = bp;
754 struct xfs_buf *bp,
758 ASSERT(bp->b_maps[0].bm_bn != XFS_BUF_DADDR_NULL);
760 bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD | XBF_DONE);
761 bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD);
763 return xfs_buf_submit(bp);
785 struct xfs_buf *bp,
788 ASSERT(bp->b_flags & XBF_DONE);
789 ASSERT(bp->b_error == 0);
791 if (!ops || bp->b_ops)
794 bp->b_ops = ops;
795 bp->b_ops->verify_read(bp);
796 if (bp->b_error)
797 bp->b_flags &= ~XBF_DONE;
798 return bp->b_error;
811 struct xfs_buf *bp;
817 error = xfs_buf_get_map(target, map, nmaps, flags, &bp);
821 trace_xfs_buf_read(bp, flags, _RET_IP_);
823 if (!(bp->b_flags & XBF_DONE)) {
826 bp->b_ops = ops;
827 error = _xfs_buf_read(bp, flags);
834 error = xfs_buf_reverify(bp, ops);
838 xfs_buf_relse(bp);
843 bp->b_flags &= ~XBF_READ;
844 ASSERT(bp->b_ops != NULL || ops == NULL);
866 xfs_buf_ioerror_alert(bp, fa);
868 bp->b_flags &= ~XBF_DONE;
869 xfs_buf_stale(bp);
870 xfs_buf_relse(bp);
878 *bpp = bp;
893 struct xfs_buf *bp;
896 XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD, &bp, ops,
915 struct xfs_buf *bp;
920 error = xfs_buf_get_uncached(target, numblks, flags, &bp);
925 ASSERT(bp->b_map_count == 1);
926 bp->b_rhash_key = XFS_BUF_DADDR_NULL;
927 bp->b_maps[0].bm_bn = daddr;
928 bp->b_flags |= XBF_READ;
929 bp->b_ops = ops;
931 xfs_buf_submit(bp);
932 if (bp->b_error) {
933 error = bp->b_error;
934 xfs_buf_relse(bp);
938 *bpp = bp;
950 struct xfs_buf *bp;
956 error = _xfs_buf_alloc(target, &map, 1, flags & XBF_NO_IOACCT, &bp);
960 error = xfs_buf_alloc_pages(bp, flags);
964 error = _xfs_buf_map_pages(bp, 0);
971 trace_xfs_buf_get_uncached(bp, _RET_IP_);
972 *bpp = bp;
976 xfs_buf_free(bp);
987 struct xfs_buf *bp)
989 trace_xfs_buf_hold(bp, _RET_IP_);
990 atomic_inc(&bp->b_hold);
999 struct xfs_buf *bp)
1001 struct xfs_perag *pag = bp->b_pag;
1005 trace_xfs_buf_rele(bp, _RET_IP_);
1008 ASSERT(list_empty(&bp->b_lru));
1009 if (atomic_dec_and_test(&bp->b_hold)) {
1010 xfs_buf_ioacct_dec(bp);
1011 xfs_buf_free(bp);
1016 ASSERT(atomic_read(&bp->b_hold) > 0);
1023 * reference until we take bp->b_lock. Hence if we don't grab b_lock
1028 spin_lock(&bp->b_lock);
1029 release = atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock);
1037 if ((atomic_read(&bp->b_hold) == 1) && !list_empty(&bp->b_lru))
1038 __xfs_buf_ioacct_dec(bp);
1043 __xfs_buf_ioacct_dec(bp);
1044 if (!(bp->b_flags & XBF_STALE) && atomic_read(&bp->b_lru_ref)) {
1050 if (list_lru_add(&bp->b_target->bt_lru, &bp->b_lru)) {
1051 bp->b_state &= ~XFS_BSTATE_DISPOSE;
1052 atomic_inc(&bp->b_hold);
1062 if (!(bp->b_state & XFS_BSTATE_DISPOSE)) {
1063 list_lru_del(&bp->b_target->bt_lru, &bp->b_lru);
1065 ASSERT(list_empty(&bp->b_lru));
1068 ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
1069 rhashtable_remove_fast(&pag->pag_buf_hash, &bp->b_rhash_head,
1077 spin_unlock(&bp->b_lock);
1080 xfs_buf_free(bp);
1097 struct xfs_buf *bp)
1101 locked = down_trylock(&bp->b_sema) == 0;
1103 trace_xfs_buf_trylock(bp, _RET_IP_);
1105 trace_xfs_buf_trylock_fail(bp, _RET_IP_);
1120 struct xfs_buf *bp)
1122 trace_xfs_buf_lock(bp, _RET_IP_);
1124 if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
1125 xfs_log_force(bp->b_mount, 0);
1126 down(&bp->b_sema);
1128 trace_xfs_buf_lock_done(bp, _RET_IP_);
1133 struct xfs_buf *bp)
1135 ASSERT(xfs_buf_islocked(bp));
1137 up(&bp->b_sema);
1138 trace_xfs_buf_unlock(bp, _RET_IP_);
1143 struct xfs_buf *bp)
1147 if (atomic_read(&bp->b_pin_count) == 0)
1150 add_wait_queue(&bp->b_waiters, &wait);
1153 if (atomic_read(&bp->b_pin_count) == 0)
1157 remove_wait_queue(&bp->b_waiters, &wait);
1163 struct xfs_buf *bp)
1168 if (bp->b_target != lasttarg ||
1171 xfs_buf_ioerror_alert(bp, __this_address);
1173 lasttarg = bp->b_target;
1182 struct xfs_buf *bp,
1185 struct xfs_mount *mp = bp->b_mount;
1188 ++bp->b_retries > cfg->max_retries)
1191 time_after(jiffies, cfg->retry_timeout + bp->b_first_retry_time))
1203 * caller handle the error in bp->b_error appropriately.
1220 struct xfs_buf *bp)
1222 struct xfs_mount *mp = bp->b_mount;
1232 xfs_buf_ioerror_alert_ratelimited(bp);
1238 if (bp->b_flags & _XBF_LOGRECOVERY) {
1246 if (!(bp->b_flags & XBF_ASYNC))
1249 trace_xfs_buf_iodone_async(bp, _RET_IP_);
1251 cfg = xfs_error_get_cfg(mp, XFS_ERR_METADATA, bp->b_error);
1252 if (bp->b_last_error != bp->b_error ||
1253 !(bp->b_flags & (XBF_STALE | XBF_WRITE_FAIL))) {
1254 bp->b_last_error = bp->b_error;
1256 !bp->b_first_retry_time)
1257 bp->b_first_retry_time = jiffies;
1265 if (xfs_buf_ioerror_permanent(bp, cfg)) {
1271 if (bp->b_flags & _XBF_INODES)
1272 xfs_buf_inode_io_fail(bp);
1273 else if (bp->b_flags & _XBF_DQUOTS)
1274 xfs_buf_dquot_io_fail(bp);
1276 ASSERT(list_empty(&bp->b_li_list));
1277 xfs_buf_ioerror(bp, 0);
1278 xfs_buf_relse(bp);
1282 xfs_buf_ioerror(bp, 0);
1283 bp->b_flags |= (XBF_DONE | XBF_WRITE_FAIL);
1284 xfs_buf_submit(bp);
1287 xfs_buf_stale(bp);
1288 bp->b_flags |= XBF_DONE;
1289 bp->b_flags &= ~XBF_WRITE;
1290 trace_xfs_buf_error_relse(bp, _RET_IP_);
1296 struct xfs_buf *bp)
1298 trace_xfs_buf_iodone(bp, _RET_IP_);
1304 if (!bp->b_error && bp->b_io_error)
1305 xfs_buf_ioerror(bp, bp->b_io_error);
1307 if (bp->b_flags & XBF_READ) {
1308 if (!bp->b_error && bp->b_ops)
1309 bp->b_ops->verify_read(bp);
1310 if (!bp->b_error)
1311 bp->b_flags |= XBF_DONE;
1313 if (!bp->b_error) {
1314 bp->b_flags &= ~XBF_WRITE_FAIL;
1315 bp->b_flags |= XBF_DONE;
1318 if (unlikely(bp->b_error) && xfs_buf_ioend_handle_error(bp))
1322 bp->b_last_error = 0;
1323 bp->b_retries = 0;
1324 bp->b_first_retry_time = 0;
1331 if (bp->b_log_item)
1332 xfs_buf_item_done(bp);
1334 if (bp->b_flags & _XBF_INODES)
1335 xfs_buf_inode_iodone(bp);
1336 else if (bp->b_flags & _XBF_DQUOTS)
1337 xfs_buf_dquot_iodone(bp);
1341 bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD |
1344 if (bp->b_flags & XBF_ASYNC)
1345 xfs_buf_relse(bp);
1347 complete(&bp->b_iowait);
1354 struct xfs_buf *bp =
1357 xfs_buf_ioend(bp);
1362 struct xfs_buf *bp)
1364 INIT_WORK(&bp->b_ioend_work, xfs_buf_ioend_work);
1365 queue_work(bp->b_mount->m_buf_workqueue, &bp->b_ioend_work);
1370 struct xfs_buf *bp,
1375 bp->b_error = error;
1376 trace_xfs_buf_ioerror(bp, error, failaddr);
1381 struct xfs_buf *bp,
1384 xfs_buf_alert_ratelimited(bp, "XFS: metadata IO error",
1386 func, (uint64_t)xfs_buf_daddr(bp),
1387 bp->b_length, -bp->b_error);
1398 struct xfs_buf *bp)
1400 bp->b_flags &= ~XBF_DONE;
1401 xfs_buf_stale(bp);
1402 xfs_buf_ioerror(bp, -EIO);
1403 xfs_buf_ioend(bp);
1408 struct xfs_buf *bp)
1412 ASSERT(xfs_buf_islocked(bp));
1414 bp->b_flags |= XBF_WRITE;
1415 bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q |
1418 error = xfs_buf_submit(bp);
1420 xfs_force_shutdown(bp->b_mount, SHUTDOWN_META_IO_ERROR);
1428 struct xfs_buf *bp = (struct xfs_buf *)bio->bi_private;
1431 (bp->b_flags & XBF_WRITE) && (bp->b_flags & XBF_ASYNC) &&
1432 XFS_TEST_ERROR(false, bp->b_mount, XFS_ERRTAG_BUF_IOERROR))
1442 cmpxchg(&bp->b_io_error, 0, error);
1445 if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
1446 invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));
1448 if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
1449 xfs_buf_ioend_async(bp);
1455 struct xfs_buf *bp,
1462 unsigned int total_nr_pages = bp->b_page_count;
1465 sector_t sector = bp->b_maps[map].bm_bn;
1481 size = min_t(int, BBTOB(bp->b_maps[map].bm_len), *count);
1486 atomic_inc(&bp->b_io_remaining);
1489 bio = bio_alloc(bp->b_target->bt_bdev, nr_pages, op, GFP_NOIO);
1492 bio->bi_private = bp;
1500 rbytes = bio_add_page(bio, bp->b_pages[page_index], nbytes,
1512 if (xfs_buf_is_vmapped(bp)) {
1513 flush_kernel_vmap_range(bp->b_addr,
1514 xfs_buf_vmap_len(bp));
1524 atomic_dec(&bp->b_io_remaining);
1525 xfs_buf_ioerror(bp, -EIO);
1533 struct xfs_buf *bp)
1545 bp->b_error = 0;
1547 if (bp->b_flags & XBF_WRITE) {
1555 if (bp->b_ops) {
1556 bp->b_ops->verify_write(bp);
1557 if (bp->b_error) {
1558 xfs_force_shutdown(bp->b_mount,
1562 } else if (bp->b_rhash_key != XFS_BUF_DADDR_NULL) {
1563 struct xfs_mount *mp = bp->b_mount;
1572 __func__, xfs_buf_daddr(bp),
1573 bp->b_length);
1574 xfs_hex_dump(bp->b_addr,
1581 if (bp->b_flags & XBF_READ_AHEAD)
1594 offset = bp->b_offset;
1595 size = BBTOB(bp->b_length);
1597 for (i = 0; i < bp->b_map_count; i++) {
1598 xfs_buf_ioapply_map(bp, i, &offset, &size, op);
1599 if (bp->b_error)
1612 struct xfs_buf *bp)
1614 ASSERT(!(bp->b_flags & XBF_ASYNC));
1616 trace_xfs_buf_iowait(bp, _RET_IP_);
1617 wait_for_completion(&bp->b_iowait);
1618 trace_xfs_buf_iowait_done(bp, _RET_IP_);
1620 return bp->b_error;
1631 struct xfs_buf *bp,
1636 trace_xfs_buf_submit(bp, _RET_IP_);
1638 ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
1655 if (bp->b_mount->m_log &&
1656 xlog_is_shutdown(bp->b_mount->m_log)) {
1657 xfs_buf_ioend_fail(bp);
1666 xfs_buf_hold(bp);
1668 if (bp->b_flags & XBF_WRITE)
1669 xfs_buf_wait_unpin(bp);
1672 bp->b_io_error = 0;
1679 atomic_set(&bp->b_io_remaining, 1);
1680 if (bp->b_flags & XBF_ASYNC)
1681 xfs_buf_ioacct_inc(bp);
1682 _xfs_buf_ioapply(bp);
1689 if (atomic_dec_and_test(&bp->b_io_remaining) == 1) {
1690 if (bp->b_error || !(bp->b_flags & XBF_ASYNC))
1691 xfs_buf_ioend(bp);
1693 xfs_buf_ioend_async(bp);
1697 error = xfs_buf_iowait(bp);
1704 xfs_buf_rele(bp);
1710 struct xfs_buf *bp,
1715 if (bp->b_addr)
1716 return bp->b_addr + offset;
1718 page = bp->b_pages[offset >> PAGE_SHIFT];
1724 struct xfs_buf *bp,
1735 page_index = (boff + bp->b_offset) >> PAGE_SHIFT;
1736 page_offset = (boff + bp->b_offset) & ~PAGE_MASK;
1737 page = bp->b_pages[page_index];
1739 BBTOB(bp->b_length) - boff);
1762 struct xfs_buf *bp,
1765 ASSERT(bp->b_flags & XBF_DONE);
1767 xfs_buf_corruption_error(bp, fa);
1768 xfs_buf_stale(bp);
1788 struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru);
1791 if (atomic_read(&bp->b_hold) > 1) {
1793 trace_xfs_buf_drain_buftarg(bp, _RET_IP_);
1796 if (!spin_trylock(&bp->b_lock))
1803 atomic_set(&bp->b_lru_ref, 0);
1804 bp->b_state |= XFS_BSTATE_DISPOSE;
1806 spin_unlock(&bp->b_lock);
1850 struct xfs_buf *bp;
1851 bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
1852 list_del_init(&bp->b_lru);
1853 if (bp->b_flags & XBF_WRITE_FAIL) {
1855 xfs_buf_alert_ratelimited(bp,
1858 (long long)xfs_buf_daddr(bp));
1860 xfs_buf_rele(bp);
1886 struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru);
1890 * we are inverting the lru lock/bp->b_lock here, so use a trylock.
1893 if (!spin_trylock(&bp->b_lock))
1900 if (atomic_add_unless(&bp->b_lru_ref, -1, 0)) {
1901 spin_unlock(&bp->b_lock);
1905 bp->b_state |= XFS_BSTATE_DISPOSE;
1907 spin_unlock(&bp->b_lock);
1925 struct xfs_buf *bp;
1926 bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
1927 list_del_init(&bp->b_lru);
1928 xfs_buf_rele(bp);
2062 struct xfs_buf *bp;
2065 bp = list_first_entry(list, struct xfs_buf, b_list);
2067 xfs_buf_lock(bp);
2068 bp->b_flags &= ~_XBF_DELWRI_Q;
2069 list_del_init(&bp->b_list);
2070 xfs_buf_relse(bp);
2087 struct xfs_buf *bp,
2090 ASSERT(xfs_buf_islocked(bp));
2091 ASSERT(!(bp->b_flags & XBF_READ));
2098 if (bp->b_flags & _XBF_DELWRI_Q) {
2099 trace_xfs_buf_delwri_queued(bp, _RET_IP_);
2103 trace_xfs_buf_delwri_queue(bp, _RET_IP_);
2113 bp->b_flags |= _XBF_DELWRI_Q;
2114 if (list_empty(&bp->b_list)) {
2115 atomic_inc(&bp->b_hold);
2116 list_add_tail(&bp->b_list, list);
2134 struct xfs_buf *bp = container_of(b, struct xfs_buf, b_list);
2137 diff = ap->b_maps[0].bm_bn - bp->b_maps[0].bm_bn;
2157 struct xfs_buf *bp, *n;
2164 list_for_each_entry_safe(bp, n, buffer_list, b_list) {
2166 if (!xfs_buf_trylock(bp))
2168 if (xfs_buf_ispinned(bp)) {
2169 xfs_buf_unlock(bp);
2174 xfs_buf_lock(bp);
2183 if (!(bp->b_flags & _XBF_DELWRI_Q)) {
2184 list_del_init(&bp->b_list);
2185 xfs_buf_relse(bp);
2189 trace_xfs_buf_delwri_split(bp, _RET_IP_);
2197 bp->b_flags &= ~_XBF_DELWRI_Q;
2198 bp->b_flags |= XBF_WRITE;
2200 bp->b_flags &= ~XBF_ASYNC;
2201 list_move_tail(&bp->b_list, wait_list);
2203 bp->b_flags |= XBF_ASYNC;
2204 list_del_init(&bp->b_list);
2206 __xfs_buf_submit(bp, false);
2250 struct xfs_buf *bp;
2256 bp = list_first_entry(&wait_list, struct xfs_buf, b_list);
2258 list_del_init(&bp->b_list);
2264 error2 = xfs_buf_iowait(bp);
2265 xfs_buf_relse(bp);
2290 struct xfs_buf *bp,
2296 ASSERT(bp->b_flags & _XBF_DELWRI_Q);
2298 trace_xfs_buf_delwri_pushbuf(bp, _RET_IP_);
2304 xfs_buf_lock(bp);
2305 list_move(&bp->b_list, &submit_list);
2306 xfs_buf_unlock(bp);
2321 error = xfs_buf_iowait(bp);
2322 bp->b_flags |= _XBF_DELWRI_Q;
2323 xfs_buf_unlock(bp);
2328 void xfs_buf_set_ref(struct xfs_buf *bp, int lru_ref)
2335 if (XFS_TEST_ERROR(false, bp->b_mount, XFS_ERRTAG_BUF_LRU_REF))
2338 atomic_set(&bp->b_lru_ref, lru_ref);
2348 struct xfs_buf *bp,
2351 struct xfs_mount *mp = bp->b_mount;
2355 if (WARN_ON(!bp->b_ops || !bp->b_ops->magic[idx]))
2357 return dmagic == bp->b_ops->magic[idx];
2366 struct xfs_buf *bp,
2369 struct xfs_mount *mp = bp->b_mount;
2373 if (WARN_ON(!bp->b_ops || !bp->b_ops->magic16[idx]))
2375 return dmagic == bp->b_ops->magic16[idx];