Lines Matching defs:bio
20 #include <linux/bio.h>
45 * For REQ_POLLED fs bio, this flag is set if we link mapped underlying
46 * dm_io into one list, and reuse bio->bi_private as the list head. Before
47 * ending this fs bio, we will recover its ->bi_private.
80 * One of these is allocated (on-stack) per original bio.
84 struct bio *bio;
92 static inline struct dm_target_io *clone_to_tio(struct bio *clone)
97 void *dm_per_bio_data(struct bio *bio, size_t data_size)
99 if (!dm_tio_flagged(clone_to_tio(bio), DM_TIO_INSIDE_DM_IO))
100 return (char *)bio - DM_TARGET_IO_BIO_OFFSET - data_size;
101 return (char *)bio - DM_IO_BIO_OFFSET - data_size;
105 struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size)
110 return (struct bio *)((char *)io + DM_IO_BIO_OFFSET);
112 return (struct bio *)((char *)io + DM_TARGET_IO_BIO_OFFSET);
116 unsigned int dm_bio_get_target_bio_nr(const struct bio *bio)
118 return container_of(bio, struct dm_target_io, clone)->target_bio_nr;
484 u64 dm_start_time_ns_from_clone(struct bio *bio)
486 return jiffies_to_nsecs(clone_to_tio(bio)->io->start_time);
490 static inline bool bio_is_flush_with_data(struct bio *bio)
492 return ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size);
495 static inline unsigned int dm_io_sectors(struct dm_io *io, struct bio *bio)
501 if (bio_is_flush_with_data(bio))
505 return bio_sectors(bio);
510 struct bio *bio = io->orig_bio;
514 bdev_start_io_acct(bio->bi_bdev, bio_op(bio),
517 bdev_end_io_acct(bio->bi_bdev, bio_op(bio),
518 dm_io_sectors(io, bio),
527 sector = bio_end_sector(bio) - io->sector_offset;
529 sector = bio->bi_iter.bi_sector;
531 dm_stats_account_io(&io->md->stats, bio_data_dir(bio),
532 sector, dm_io_sectors(io, bio),
542 static void dm_start_io_acct(struct dm_io *io, struct bio *clone)
573 static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio)
577 struct bio *clone;
579 clone = bio_alloc_clone(NULL, bio, GFP_NOIO, &md->mempools->io_bs);
592 io->orig_bio = bio;
612 static struct bio *alloc_tio(struct clone_info *ci, struct dm_target *ti,
617 struct bio *clone;
625 clone = bio_alloc_clone(NULL, ci->bio, gfp_mask,
658 static void free_tio(struct bio *clone)
666 * Add the bio to the list of deferred io.
668 static void queue_io(struct mapped_device *md, struct bio *bio)
673 bio_list_add(&md->deferred, bio);
879 * Return true if the dm_io's original bio is requeued.
884 struct bio *bio = io->orig_bio;
887 (bio->bi_opf & REQ_POLLED));
894 if (bio->bi_opf & REQ_POLLED) {
896 * Upper layer won't help us poll split bio
900 bio_clear_polled(bio);
909 !WARN_ON_ONCE(dm_is_zone_write(md, bio))) ||
931 struct bio *bio = io->orig_bio;
959 /* Return early if the original bio was requeued */
963 if (bio_is_flush_with_data(bio)) {
968 bio->bi_opf &= ~REQ_PREFLUSH;
969 queue_io(md, bio);
973 bio->bi_status = io_error;
974 bio_endio(bio);
1006 * 1) io->orig_bio points to the real original bio, and the part mapped to
1007 * this io must be requeued, instead of other parts of the original bio.
1009 * 2) io->orig_bio points to new cloned bio which matches the requeued dm_io.
1017 * we may run into long bio clone chain during suspend and OOM could
1032 * Decrements the number of outstanding ios that a bio has been
1087 static bool swap_bios_limit(struct dm_target *ti, struct bio *bio)
1089 return unlikely((bio->bi_opf & REQ_SWAP) != 0) && unlikely(ti->limit_swap_bios);
1092 static void clone_endio(struct bio *bio)
1094 blk_status_t error = bio->bi_status;
1095 struct dm_target_io *tio = clone_to_tio(bio);
1102 if (bio_op(bio) == REQ_OP_DISCARD &&
1103 !bdev_max_discard_sectors(bio->bi_bdev))
1105 else if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
1106 !bdev_write_zeroes_sectors(bio->bi_bdev))
1111 unlikely(bdev_is_zoned(bio->bi_bdev)))
1112 dm_zone_endio(io, bio);
1115 int r = endio(ti, bio, &error);
1125 if (WARN_ON_ONCE(dm_is_zone_write(md, bio)))
1144 unlikely(swap_bios_limit(ti, bio)))
1147 free_tio(bio);
1295 * allowed for all bio types except REQ_PREFLUSH, REQ_OP_ZONE_* zone management
1296 * operations, REQ_OP_ZONE_APPEND (zone append writes) and any bio serviced by
1300 * additional n_sectors sectors of the bio and the rest of the data should be
1301 * sent in a next bio.
1314 * Region 2 is the remaining bio size that the target wants to process.
1317 * The target requires that region 3 is to be sent in the next bio.
1319 * If the target wants to receive multiple copies of the bio (via num_*bios, etc),
1321 * copies of the bio.
1323 void dm_accept_partial_bio(struct bio *bio, unsigned int n_sectors)
1325 struct dm_target_io *tio = clone_to_tio(bio);
1327 unsigned int bio_sectors = bio_sectors(bio);
1330 BUG_ON(op_is_zone_mgmt(bio_op(bio)));
1331 BUG_ON(bio_op(bio) == REQ_OP_ZONE_APPEND);
1336 bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT;
1349 * @clone: clone bio that DM core passed to target's .map function
1350 * @tgt_clone: clone of @clone bio that target needs submitted
1357 void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone)
1362 /* establish bio that will get submitted */
1394 static void __map_bio(struct bio *clone)
1468 io->sector_offset = bio_sectors(ci->bio);
1476 struct bio *bio;
1485 bio = alloc_tio(ci, ti, bio_nr, len,
1487 if (!bio)
1490 bio_list_add(blist, bio);
1497 while ((bio = bio_list_pop(blist)))
1498 free_tio(bio);
1506 struct bio *clone;
1538 struct bio flush_bio;
1541 * Use an on-stack bio for this, it's safe since we don't
1548 ci->bio = &flush_bio;
1567 bio_uninit(ci->bio);
1592 static bool is_abnormal_io(struct bio *bio)
1594 enum req_op op = bio_op(bio);
1618 switch (bio_op(ci->bio)) {
1657 * associated with this bio, and this bio's bi_private needs to be
1660 * bio->bi_private is owned by fs or upper layer, so block layer won't
1662 * bio is submitted. So this reuse is safe.
1664 static inline struct dm_io **dm_poll_list_head(struct bio *bio)
1666 return (struct dm_io **)&bio->bi_private;
1669 static void dm_queue_poll_io(struct bio *bio, struct dm_io *io)
1671 struct dm_io **head = dm_poll_list_head(bio);
1673 if (!(bio->bi_opf & REQ_DM_POLL_LIST)) {
1674 bio->bi_opf |= REQ_DM_POLL_LIST;
1679 io->data = bio->bi_private;
1682 bio->bi_cookie = ~BLK_QC_T_NONE;
1687 * bio recursed due to split, reuse original poll list,
1688 * and save bio->bi_private too.
1698 * Select the correct strategy for processing a non-flush bio.
1702 struct bio *clone;
1710 if (unlikely((ci->bio->bi_opf & REQ_NOWAIT) != 0) &&
1718 * Only support bio polling for normal IO, and the target io is
1721 ci->submit_as_polled = !!(ci->bio->bi_opf & REQ_POLLED);
1735 struct dm_table *map, struct bio *bio, bool is_abnormal)
1738 ci->io = alloc_io(md, bio);
1739 ci->bio = bio;
1742 ci->sector = bio->bi_iter.bi_sector;
1743 ci->sector_count = bio_sectors(bio);
1747 WARN_ON_ONCE(op_is_zone_mgmt(bio_op(bio)) && ci->sector_count))
1752 * Entry point to split a bio into clones and submit them to the targets.
1755 struct dm_table *map, struct bio *bio)
1762 is_abnormal = is_abnormal_io(bio);
1768 bio = bio_split_to_limits(bio);
1769 if (!bio)
1773 init_clone_info(&ci, md, map, bio, is_abnormal);
1776 if (bio->bi_opf & REQ_PREFLUSH) {
1789 bio_trim(bio, io->sectors, ci.sector_count);
1790 trace_block_split(bio, bio->bi_iter.bi_sector);
1791 bio_inc_remaining(bio);
1792 submit_bio_noacct(bio);
1795 * Drop the extra reference count for non-POLLED bio, and hold one
1796 * reference for POLLED bio, which will be released in dm_poll_bio
1799 * in bio->bi_private, so that dm_poll_bio can poll them all.
1810 dm_queue_poll_io(bio, io);
1813 static void dm_submit_bio(struct bio *bio)
1815 struct mapped_device *md = bio->bi_bdev->bd_disk->private_data;
1824 if (bio->bi_opf & REQ_NOWAIT)
1825 bio_wouldblock_error(bio);
1826 else if (bio->bi_opf & REQ_RAHEAD)
1827 bio_io_error(bio);
1829 queue_io(md, bio);
1833 dm_split_and_process_bio(md, map, bio);
1851 static int dm_poll_bio(struct bio *bio, struct io_comp_batch *iob,
1854 struct dm_io **head = dm_poll_list_head(bio);
1859 /* Only poll normal bio which was marked as REQ_DM_POLL_LIST */
1860 if (!(bio->bi_opf & REQ_DM_POLL_LIST))
1868 * bio_poll() is only possible once @bio has been completely
1873 bio->bi_opf &= ~REQ_DM_POLL_LIST;
1874 bio->bi_private = list->data;
1892 bio->bi_opf |= REQ_DM_POLL_LIST;
1893 /* Reset bio->bi_private to dm_io list head */
2070 * default to bio-based until DM table is loaded and md->type
2218 * requests in the queue may refer to bio from the old bioset,
2575 struct bio *bio;
2579 bio = bio_list_pop(&md->deferred);
2582 if (!bio)
2585 submit_bio_noacct(bio);
3493 MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools");