Lines Matching refs:bp

55 static int __xfs_buf_submit(struct xfs_buf *bp, bool wait);
59 struct xfs_buf *bp)
61 return __xfs_buf_submit(bp, !(bp->b_flags & XBF_ASYNC));
66 struct xfs_buf *bp)
73 * to be both for b_addr and bp->b_page_count > 1.
75 return bp->b_addr && bp->b_page_count > 1;
80 struct xfs_buf *bp)
82 return (bp->b_page_count * PAGE_SIZE) - bp->b_offset;
100 struct xfs_buf *bp)
102 if (bp->b_flags & XBF_NO_IOACCT)
105 ASSERT(bp->b_flags & XBF_ASYNC);
106 spin_lock(&bp->b_lock);
107 if (!(bp->b_state & XFS_BSTATE_IN_FLIGHT)) {
108 bp->b_state |= XFS_BSTATE_IN_FLIGHT;
109 percpu_counter_inc(&bp->b_target->bt_io_count);
111 spin_unlock(&bp->b_lock);
120 struct xfs_buf *bp)
122 lockdep_assert_held(&bp->b_lock);
124 if (bp->b_state & XFS_BSTATE_IN_FLIGHT) {
125 bp->b_state &= ~XFS_BSTATE_IN_FLIGHT;
126 percpu_counter_dec(&bp->b_target->bt_io_count);
132 struct xfs_buf *bp)
134 spin_lock(&bp->b_lock);
135 __xfs_buf_ioacct_dec(bp);
136 spin_unlock(&bp->b_lock);
149 struct xfs_buf *bp)
151 ASSERT(xfs_buf_islocked(bp));
153 bp->b_flags |= XBF_STALE;
160 bp->b_flags &= ~_XBF_DELWRI_Q;
168 spin_lock(&bp->b_lock);
169 __xfs_buf_ioacct_dec(bp);
171 atomic_set(&bp->b_lru_ref, 0);
172 if (!(bp->b_state & XFS_BSTATE_DISPOSE) &&
173 (list_lru_del(&bp->b_target->bt_lru, &bp->b_lru)))
174 atomic_dec(&bp->b_hold);
176 ASSERT(atomic_read(&bp->b_hold) >= 1);
177 spin_unlock(&bp->b_lock);
182 struct xfs_buf *bp,
185 ASSERT(bp->b_maps == NULL);
186 bp->b_map_count = map_count;
189 bp->b_maps = &bp->__b_map;
193 bp->b_maps = kmem_zalloc(map_count * sizeof(struct xfs_buf_map),
195 if (!bp->b_maps)
205 struct xfs_buf *bp)
207 if (bp->b_maps != &bp->__b_map) {
208 kmem_free(bp->b_maps);
209 bp->b_maps = NULL;
221 struct xfs_buf *bp;
226 bp = kmem_cache_zalloc(xfs_buf_zone, GFP_NOFS | __GFP_NOFAIL);
234 atomic_set(&bp->b_hold, 1);
235 atomic_set(&bp->b_lru_ref, 1);
236 init_completion(&bp->b_iowait);
237 INIT_LIST_HEAD(&bp->b_lru);
238 INIT_LIST_HEAD(&bp->b_list);
239 INIT_LIST_HEAD(&bp->b_li_list);
240 sema_init(&bp->b_sema, 0); /* held, no waiters */
241 spin_lock_init(&bp->b_lock);
242 bp->b_target = target;
243 bp->b_mount = target->bt_mount;
244 bp->b_flags = flags;
251 error = xfs_buf_get_maps(bp, nmaps);
253 kmem_cache_free(xfs_buf_zone, bp);
257 bp->b_bn = map[0].bm_bn;
258 bp->b_length = 0;
260 bp->b_maps[i].bm_bn = map[i].bm_bn;
261 bp->b_maps[i].bm_len = map[i].bm_len;
262 bp->b_length += map[i].bm_len;
265 atomic_set(&bp->b_pin_count, 0);
266 init_waitqueue_head(&bp->b_waiters);
268 XFS_STATS_INC(bp->b_mount, xb_create);
269 trace_xfs_buf_init(bp, _RET_IP_);
271 *bpp = bp;
281 xfs_buf_t *bp,
285 if (bp->b_pages == NULL) {
286 bp->b_page_count = page_count;
288 bp->b_pages = bp->b_page_array;
290 bp->b_pages = kmem_alloc(sizeof(struct page *) *
292 if (bp->b_pages == NULL)
295 memset(bp->b_pages, 0, sizeof(struct page *) * page_count);
305 xfs_buf_t *bp)
307 if (bp->b_pages != bp->b_page_array) {
308 kmem_free(bp->b_pages);
309 bp->b_pages = NULL;
322 xfs_buf_t *bp)
324 trace_xfs_buf_free(bp, _RET_IP_);
326 ASSERT(list_empty(&bp->b_lru));
328 if (bp->b_flags & _XBF_PAGES) {
331 if (xfs_buf_is_vmapped(bp))
332 vm_unmap_ram(bp->b_addr - bp->b_offset,
333 bp->b_page_count);
335 for (i = 0; i < bp->b_page_count; i++) {
336 struct page *page = bp->b_pages[i];
342 bp->b_page_count;
343 } else if (bp->b_flags & _XBF_KMEM)
344 kmem_free(bp->b_addr);
345 _xfs_buf_free_pages(bp);
346 xfs_buf_free_maps(bp);
347 kmem_cache_free(xfs_buf_zone, bp);
355 xfs_buf_t *bp,
379 size = BBTOB(bp->b_length);
381 int align_mask = xfs_buftarg_dma_alignment(bp->b_target);
382 bp->b_addr = kmem_alloc_io(size, align_mask,
384 if (!bp->b_addr) {
389 if (((unsigned long)(bp->b_addr + size - 1) & PAGE_MASK) !=
390 ((unsigned long)bp->b_addr & PAGE_MASK)) {
392 kmem_free(bp->b_addr);
393 bp->b_addr = NULL;
396 bp->b_offset = offset_in_page(bp->b_addr);
397 bp->b_pages = bp->b_page_array;
398 bp->b_pages[0] = kmem_to_page(bp->b_addr);
399 bp->b_page_count = 1;
400 bp->b_flags |= _XBF_KMEM;
405 start = BBTOB(bp->b_maps[0].bm_bn) >> PAGE_SHIFT;
406 end = (BBTOB(bp->b_maps[0].bm_bn + bp->b_length) + PAGE_SIZE - 1)
409 error = _xfs_buf_get_pages(bp, page_count);
413 offset = bp->b_offset;
414 bp->b_flags |= _XBF_PAGES;
416 for (i = 0; i < bp->b_page_count; i++) {
423 bp->b_page_count = i;
440 XFS_STATS_INC(bp->b_mount, xb_page_retries);
445 XFS_STATS_INC(bp->b_mount, xb_page_found);
449 bp->b_pages[i] = page;
455 for (i = 0; i < bp->b_page_count; i++)
456 __free_page(bp->b_pages[i]);
457 bp->b_flags &= ~_XBF_PAGES;
466 xfs_buf_t *bp,
469 ASSERT(bp->b_flags & _XBF_PAGES);
470 if (bp->b_page_count == 1) {
472 bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
474 bp->b_addr = NULL;
489 bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count,
491 if (bp->b_addr)
497 if (!bp->b_addr)
499 bp->b_addr += bp->b_offset;
514 const struct xfs_buf *bp = obj;
522 if (bp->b_bn != map->bm_bn)
525 if (unlikely(bp->b_length != map->bm_len)) {
534 ASSERT(bp->b_flags & XBF_STALE);
593 xfs_buf_t *bp;
624 bp = rhashtable_lookup_fast(&pag->pag_buf_hash, &cmap,
626 if (bp) {
627 atomic_inc(&bp->b_hold);
651 if (!xfs_buf_trylock(bp)) {
653 xfs_buf_rele(bp);
657 xfs_buf_lock(bp);
666 if (bp->b_flags & XBF_STALE) {
667 ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
668 bp->b_flags &= _XBF_KMEM | _XBF_PAGES;
669 bp->b_ops = NULL;
672 trace_xfs_buf_find(bp, flags, _RET_IP_);
674 *found_bp = bp;
685 struct xfs_buf *bp;
689 error = xfs_buf_find(target, &map, 1, flags, NULL, &bp);
692 return bp;
708 struct xfs_buf *bp;
713 error = xfs_buf_find(target, map, nmaps, flags, NULL, &bp);
729 error = xfs_buf_find(target, map, nmaps, flags, new_bp, &bp);
735 if (bp != new_bp)
739 if (!bp->b_addr) {
740 error = _xfs_buf_map_pages(bp, flags);
744 bp->b_page_count);
745 xfs_buf_relse(bp);
755 xfs_buf_ioerror(bp, 0);
758 trace_xfs_buf_get(bp, flags, _RET_IP_);
759 *bpp = bp;
765 xfs_buf_t *bp,
769 ASSERT(bp->b_maps[0].bm_bn != XFS_BUF_DADDR_NULL);
771 bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD | XBF_DONE);
772 bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD);
774 return xfs_buf_submit(bp);
796 struct xfs_buf *bp,
799 ASSERT(bp->b_flags & XBF_DONE);
800 ASSERT(bp->b_error == 0);
802 if (!ops || bp->b_ops)
805 bp->b_ops = ops;
806 bp->b_ops->verify_read(bp);
807 if (bp->b_error)
808 bp->b_flags &= ~XBF_DONE;
809 return bp->b_error;
822 struct xfs_buf *bp;
828 error = xfs_buf_get_map(target, map, nmaps, flags, &bp);
832 trace_xfs_buf_read(bp, flags, _RET_IP_);
834 if (!(bp->b_flags & XBF_DONE)) {
837 bp->b_ops = ops;
838 error = _xfs_buf_read(bp, flags);
845 error = xfs_buf_reverify(bp, ops);
849 xfs_buf_relse(bp);
854 bp->b_flags &= ~XBF_READ;
855 ASSERT(bp->b_ops != NULL || ops == NULL);
869 xfs_buf_ioerror_alert(bp, fa);
871 bp->b_flags &= ~XBF_DONE;
872 xfs_buf_stale(bp);
873 xfs_buf_relse(bp);
881 *bpp = bp;
896 struct xfs_buf *bp;
902 XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD, &bp, ops,
919 struct xfs_buf *bp;
924 error = xfs_buf_get_uncached(target, numblks, flags, &bp);
929 ASSERT(bp->b_map_count == 1);
930 bp->b_bn = XFS_BUF_DADDR_NULL; /* always null for uncached buffers */
931 bp->b_maps[0].bm_bn = daddr;
932 bp->b_flags |= XBF_READ;
933 bp->b_ops = ops;
935 xfs_buf_submit(bp);
936 if (bp->b_error) {
937 error = bp->b_error;
938 xfs_buf_relse(bp);
942 *bpp = bp;
955 struct xfs_buf *bp;
961 error = _xfs_buf_alloc(target, &map, 1, flags & XBF_NO_IOACCT, &bp);
966 error = _xfs_buf_get_pages(bp, page_count);
971 bp->b_pages[i] = alloc_page(xb_to_gfp(flags));
972 if (!bp->b_pages[i]) {
977 bp->b_flags |= _XBF_PAGES;
979 error = _xfs_buf_map_pages(bp, 0);
986 trace_xfs_buf_get_uncached(bp, _RET_IP_);
987 *bpp = bp;
992 __free_page(bp->b_pages[i]);
993 _xfs_buf_free_pages(bp);
995 xfs_buf_free_maps(bp);
996 kmem_cache_free(xfs_buf_zone, bp);
1008 xfs_buf_t *bp)
1010 trace_xfs_buf_hold(bp, _RET_IP_);
1011 atomic_inc(&bp->b_hold);
1020 xfs_buf_t *bp)
1022 struct xfs_perag *pag = bp->b_pag;
1026 trace_xfs_buf_rele(bp, _RET_IP_);
1029 ASSERT(list_empty(&bp->b_lru));
1030 if (atomic_dec_and_test(&bp->b_hold)) {
1031 xfs_buf_ioacct_dec(bp);
1032 xfs_buf_free(bp);
1037 ASSERT(atomic_read(&bp->b_hold) > 0);
1044 * reference until we take bp->b_lock. Hence if we don't grab b_lock
1049 spin_lock(&bp->b_lock);
1050 release = atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock);
1058 if ((atomic_read(&bp->b_hold) == 1) && !list_empty(&bp->b_lru))
1059 __xfs_buf_ioacct_dec(bp);
1064 __xfs_buf_ioacct_dec(bp);
1065 if (!(bp->b_flags & XBF_STALE) && atomic_read(&bp->b_lru_ref)) {
1071 if (list_lru_add(&bp->b_target->bt_lru, &bp->b_lru)) {
1072 bp->b_state &= ~XFS_BSTATE_DISPOSE;
1073 atomic_inc(&bp->b_hold);
1083 if (!(bp->b_state & XFS_BSTATE_DISPOSE)) {
1084 list_lru_del(&bp->b_target->bt_lru, &bp->b_lru);
1086 ASSERT(list_empty(&bp->b_lru));
1089 ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
1090 rhashtable_remove_fast(&pag->pag_buf_hash, &bp->b_rhash_head,
1098 spin_unlock(&bp->b_lock);
1101 xfs_buf_free(bp);
1118 struct xfs_buf *bp)
1122 locked = down_trylock(&bp->b_sema) == 0;
1124 trace_xfs_buf_trylock(bp, _RET_IP_);
1126 trace_xfs_buf_trylock_fail(bp, _RET_IP_);
1141 struct xfs_buf *bp)
1143 trace_xfs_buf_lock(bp, _RET_IP_);
1145 if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
1146 xfs_log_force(bp->b_mount, 0);
1147 down(&bp->b_sema);
1149 trace_xfs_buf_lock_done(bp, _RET_IP_);
1154 struct xfs_buf *bp)
1156 ASSERT(xfs_buf_islocked(bp));
1158 up(&bp->b_sema);
1159 trace_xfs_buf_unlock(bp, _RET_IP_);
1164 xfs_buf_t *bp)
1168 if (atomic_read(&bp->b_pin_count) == 0)
1171 add_wait_queue(&bp->b_waiters, &wait);
1174 if (atomic_read(&bp->b_pin_count) == 0)
1178 remove_wait_queue(&bp->b_waiters, &wait);
1184 struct xfs_buf *bp)
1189 if (bp->b_target != lasttarg ||
1192 xfs_buf_ioerror_alert(bp, __this_address);
1194 lasttarg = bp->b_target;
1203 struct xfs_buf *bp,
1206 struct xfs_mount *mp = bp->b_mount;
1209 ++bp->b_retries > cfg->max_retries)
1212 time_after(jiffies, cfg->retry_timeout + bp->b_first_retry_time))
1224 * caller handle the error in bp->b_error appropriately.
1241 struct xfs_buf *bp)
1243 struct xfs_mount *mp = bp->b_mount;
1253 xfs_buf_ioerror_alert_ratelimited(bp);
1259 if (bp->b_flags & _XBF_LOGRECOVERY) {
1267 if (!(bp->b_flags & XBF_ASYNC))
1270 trace_xfs_buf_iodone_async(bp, _RET_IP_);
1272 cfg = xfs_error_get_cfg(mp, XFS_ERR_METADATA, bp->b_error);
1273 if (bp->b_last_error != bp->b_error ||
1274 !(bp->b_flags & (XBF_STALE | XBF_WRITE_FAIL))) {
1275 bp->b_last_error = bp->b_error;
1277 !bp->b_first_retry_time)
1278 bp->b_first_retry_time = jiffies;
1286 if (xfs_buf_ioerror_permanent(bp, cfg)) {
1292 if (bp->b_flags & _XBF_INODES)
1293 xfs_buf_inode_io_fail(bp);
1294 else if (bp->b_flags & _XBF_DQUOTS)
1295 xfs_buf_dquot_io_fail(bp);
1297 ASSERT(list_empty(&bp->b_li_list));
1298 xfs_buf_ioerror(bp, 0);
1299 xfs_buf_relse(bp);
1303 xfs_buf_ioerror(bp, 0);
1304 bp->b_flags |= (XBF_DONE | XBF_WRITE_FAIL);
1305 xfs_buf_submit(bp);
1308 xfs_buf_stale(bp);
1309 bp->b_flags |= XBF_DONE;
1310 bp->b_flags &= ~XBF_WRITE;
1311 trace_xfs_buf_error_relse(bp, _RET_IP_);
1317 struct xfs_buf *bp)
1319 trace_xfs_buf_iodone(bp, _RET_IP_);
1325 if (!bp->b_error && bp->b_io_error)
1326 xfs_buf_ioerror(bp, bp->b_io_error);
1328 if (bp->b_flags & XBF_READ) {
1329 if (!bp->b_error && bp->b_ops)
1330 bp->b_ops->verify_read(bp);
1331 if (!bp->b_error)
1332 bp->b_flags |= XBF_DONE;
1334 if (!bp->b_error) {
1335 bp->b_flags &= ~XBF_WRITE_FAIL;
1336 bp->b_flags |= XBF_DONE;
1339 if (unlikely(bp->b_error) && xfs_buf_ioend_handle_error(bp))
1343 bp->b_last_error = 0;
1344 bp->b_retries = 0;
1345 bp->b_first_retry_time = 0;
1352 if (bp->b_log_item)
1353 xfs_buf_item_done(bp);
1355 if (bp->b_flags & _XBF_INODES)
1356 xfs_buf_inode_iodone(bp);
1357 else if (bp->b_flags & _XBF_DQUOTS)
1358 xfs_buf_dquot_iodone(bp);
1362 bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD |
1365 if (bp->b_flags & XBF_ASYNC)
1366 xfs_buf_relse(bp);
1368 complete(&bp->b_iowait);
1375 struct xfs_buf *bp =
1378 xfs_buf_ioend(bp);
1383 struct xfs_buf *bp)
1385 INIT_WORK(&bp->b_ioend_work, xfs_buf_ioend_work);
1386 queue_work(bp->b_mount->m_buf_workqueue, &bp->b_ioend_work);
1391 xfs_buf_t *bp,
1396 bp->b_error = error;
1397 trace_xfs_buf_ioerror(bp, error, failaddr);
1402 struct xfs_buf *bp,
1405 xfs_buf_alert_ratelimited(bp, "XFS: metadata IO error",
1407 func, (uint64_t)XFS_BUF_ADDR(bp),
1408 bp->b_length, -bp->b_error);
1419 struct xfs_buf *bp)
1421 bp->b_flags &= ~XBF_DONE;
1422 xfs_buf_stale(bp);
1423 xfs_buf_ioerror(bp, -EIO);
1424 xfs_buf_ioend(bp);
1429 struct xfs_buf *bp)
1433 ASSERT(xfs_buf_islocked(bp));
1435 bp->b_flags |= XBF_WRITE;
1436 bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q |
1439 error = xfs_buf_submit(bp);
1441 xfs_force_shutdown(bp->b_mount, SHUTDOWN_META_IO_ERROR);
1449 struct xfs_buf *bp = (struct xfs_buf *)bio->bi_private;
1452 (bp->b_flags & XBF_WRITE) && (bp->b_flags & XBF_ASYNC) &&
1453 XFS_TEST_ERROR(false, bp->b_mount, XFS_ERRTAG_BUF_IOERROR))
1463 cmpxchg(&bp->b_io_error, 0, error);
1466 if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
1467 invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));
1469 if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
1470 xfs_buf_ioend_async(bp);
1476 struct xfs_buf *bp,
1483 int total_nr_pages = bp->b_page_count;
1486 sector_t sector = bp->b_maps[map].bm_bn;
1502 size = min_t(int, BBTOB(bp->b_maps[map].bm_len), *count);
1507 atomic_inc(&bp->b_io_remaining);
1511 bio_set_dev(bio, bp->b_target->bt_bdev);
1514 bio->bi_private = bp;
1523 rbytes = bio_add_page(bio, bp->b_pages[page_index], nbytes,
1535 if (xfs_buf_is_vmapped(bp)) {
1536 flush_kernel_vmap_range(bp->b_addr,
1537 xfs_buf_vmap_len(bp));
1547 atomic_dec(&bp->b_io_remaining);
1548 xfs_buf_ioerror(bp, -EIO);
1556 struct xfs_buf *bp)
1568 bp->b_error = 0;
1570 if (bp->b_flags & XBF_WRITE) {
1578 if (bp->b_ops) {
1579 bp->b_ops->verify_write(bp);
1580 if (bp->b_error) {
1581 xfs_force_shutdown(bp->b_mount,
1585 } else if (bp->b_bn != XFS_BUF_DADDR_NULL) {
1586 struct xfs_mount *mp = bp->b_mount;
1595 __func__, bp->b_bn, bp->b_length);
1596 xfs_hex_dump(bp->b_addr,
1603 if (bp->b_flags & XBF_READ_AHEAD)
1616 offset = bp->b_offset;
1617 size = BBTOB(bp->b_length);
1619 for (i = 0; i < bp->b_map_count; i++) {
1620 xfs_buf_ioapply_map(bp, i, &offset, &size, op);
1621 if (bp->b_error)
1634 struct xfs_buf *bp)
1636 ASSERT(!(bp->b_flags & XBF_ASYNC));
1638 trace_xfs_buf_iowait(bp, _RET_IP_);
1639 wait_for_completion(&bp->b_iowait);
1640 trace_xfs_buf_iowait_done(bp, _RET_IP_);
1642 return bp->b_error;
1653 struct xfs_buf *bp,
1658 trace_xfs_buf_submit(bp, _RET_IP_);
1660 ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
1663 if (XFS_FORCED_SHUTDOWN(bp->b_mount)) {
1664 xfs_buf_ioend_fail(bp);
1673 xfs_buf_hold(bp);
1675 if (bp->b_flags & XBF_WRITE)
1676 xfs_buf_wait_unpin(bp);
1679 bp->b_io_error = 0;
1686 atomic_set(&bp->b_io_remaining, 1);
1687 if (bp->b_flags & XBF_ASYNC)
1688 xfs_buf_ioacct_inc(bp);
1689 _xfs_buf_ioapply(bp);
1696 if (atomic_dec_and_test(&bp->b_io_remaining) == 1) {
1697 if (bp->b_error || !(bp->b_flags & XBF_ASYNC))
1698 xfs_buf_ioend(bp);
1700 xfs_buf_ioend_async(bp);
1704 error = xfs_buf_iowait(bp);
1711 xfs_buf_rele(bp);
1717 struct xfs_buf *bp,
1722 if (bp->b_addr)
1723 return bp->b_addr + offset;
1725 offset += bp->b_offset;
1726 page = bp->b_pages[offset >> PAGE_SHIFT];
1732 struct xfs_buf *bp,
1743 page_index = (boff + bp->b_offset) >> PAGE_SHIFT;
1744 page_offset = (boff + bp->b_offset) & ~PAGE_MASK;
1745 page = bp->b_pages[page_index];
1747 BBTOB(bp->b_length) - boff);
1770 struct xfs_buf *bp,
1773 ASSERT(bp->b_flags & XBF_DONE);
1775 xfs_buf_corruption_error(bp, fa);
1776 xfs_buf_stale(bp);
1796 struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru);
1799 if (atomic_read(&bp->b_hold) > 1) {
1801 trace_xfs_buf_wait_buftarg(bp, _RET_IP_);
1804 if (!spin_trylock(&bp->b_lock))
1811 atomic_set(&bp->b_lru_ref, 0);
1812 bp->b_state |= XFS_BSTATE_DISPOSE;
1814 spin_unlock(&bp->b_lock);
1848 struct xfs_buf *bp;
1849 bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
1850 list_del_init(&bp->b_lru);
1851 if (bp->b_flags & XBF_WRITE_FAIL) {
1853 xfs_buf_alert_ratelimited(bp,
1856 (long long)bp->b_bn);
1858 xfs_buf_rele(bp);
1884 struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru);
1888 * we are inverting the lru lock/bp->b_lock here, so use a trylock.
1891 if (!spin_trylock(&bp->b_lock))
1898 if (atomic_add_unless(&bp->b_lru_ref, -1, 0)) {
1899 spin_unlock(&bp->b_lock);
1903 bp->b_state |= XFS_BSTATE_DISPOSE;
1905 spin_unlock(&bp->b_lock);
1923 struct xfs_buf *bp;
1924 bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
1925 list_del_init(&bp->b_lru);
1926 xfs_buf_rele(bp);
2050 struct xfs_buf *bp;
2053 bp = list_first_entry(list, struct xfs_buf, b_list);
2055 xfs_buf_lock(bp);
2056 bp->b_flags &= ~_XBF_DELWRI_Q;
2057 list_del_init(&bp->b_list);
2058 xfs_buf_relse(bp);
2075 struct xfs_buf *bp,
2078 ASSERT(xfs_buf_islocked(bp));
2079 ASSERT(!(bp->b_flags & XBF_READ));
2086 if (bp->b_flags & _XBF_DELWRI_Q) {
2087 trace_xfs_buf_delwri_queued(bp, _RET_IP_);
2091 trace_xfs_buf_delwri_queue(bp, _RET_IP_);
2101 bp->b_flags |= _XBF_DELWRI_Q;
2102 if (list_empty(&bp->b_list)) {
2103 atomic_inc(&bp->b_hold);
2104 list_add_tail(&bp->b_list, list);
2122 struct xfs_buf *bp = container_of(b, struct xfs_buf, b_list);
2125 diff = ap->b_maps[0].bm_bn - bp->b_maps[0].bm_bn;
2145 struct xfs_buf *bp, *n;
2152 list_for_each_entry_safe(bp, n, buffer_list, b_list) {
2154 if (xfs_buf_ispinned(bp)) {
2158 if (!xfs_buf_trylock(bp))
2161 xfs_buf_lock(bp);
2170 if (!(bp->b_flags & _XBF_DELWRI_Q)) {
2171 list_del_init(&bp->b_list);
2172 xfs_buf_relse(bp);
2176 trace_xfs_buf_delwri_split(bp, _RET_IP_);
2184 bp->b_flags &= ~_XBF_DELWRI_Q;
2185 bp->b_flags |= XBF_WRITE;
2187 bp->b_flags &= ~XBF_ASYNC;
2188 list_move_tail(&bp->b_list, wait_list);
2190 bp->b_flags |= XBF_ASYNC;
2191 list_del_init(&bp->b_list);
2193 __xfs_buf_submit(bp, false);
2237 struct xfs_buf *bp;
2243 bp = list_first_entry(&wait_list, struct xfs_buf, b_list);
2245 list_del_init(&bp->b_list);
2251 error2 = xfs_buf_iowait(bp);
2252 xfs_buf_relse(bp);
2277 struct xfs_buf *bp,
2283 ASSERT(bp->b_flags & _XBF_DELWRI_Q);
2285 trace_xfs_buf_delwri_pushbuf(bp, _RET_IP_);
2291 xfs_buf_lock(bp);
2292 list_move(&bp->b_list, &submit_list);
2293 xfs_buf_unlock(bp);
2308 error = xfs_buf_iowait(bp);
2309 bp->b_flags |= _XBF_DELWRI_Q;
2310 xfs_buf_unlock(bp);
2338 void xfs_buf_set_ref(struct xfs_buf *bp, int lru_ref)
2345 if (XFS_TEST_ERROR(false, bp->b_mount, XFS_ERRTAG_BUF_LRU_REF))
2348 atomic_set(&bp->b_lru_ref, lru_ref);
2358 struct xfs_buf *bp,
2361 struct xfs_mount *mp = bp->b_mount;
2365 if (WARN_ON(!bp->b_ops || !bp->b_ops->magic[idx]))
2367 return dmagic == bp->b_ops->magic[idx];
2376 struct xfs_buf *bp,
2379 struct xfs_mount *mp = bp->b_mount;
2383 if (WARN_ON(!bp->b_ops || !bp->b_ops->magic16[idx]))
2385 return dmagic == bp->b_ops->magic16[idx];