Lines Matching defs:bio
18 #include <linux/bio.h>
66 * One of these is allocated (on-stack) per original bio.
70 struct bio *bio;
77 * One of these is allocated per clone bio.
87 struct bio clone;
91 * One of these is allocated per original bio.
100 struct bio *orig_bio;
104 /* last member of dm_target_io is 'struct bio' */
108 void *dm_per_bio_data(struct bio *bio, size_t data_size)
110 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
112 return (char *)bio - offsetof(struct dm_target_io, clone) - data_size;
113 return (char *)bio - offsetof(struct dm_target_io, clone) - offsetof(struct dm_io, tio) - data_size;
117 struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size)
121 return (struct bio *)((char *)io + offsetof(struct dm_io, tio) + offsetof(struct dm_target_io, clone));
123 return (struct bio *)((char *)io + offsetof(struct dm_target_io, clone));
127 unsigned dm_bio_get_target_bio_nr(const struct bio *bio)
129 return container_of(bio, struct dm_target_io, clone)->target_bio_nr;
588 u64 dm_start_time_ns_from_clone(struct bio *bio)
590 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
600 struct bio *bio = io->orig_bio;
602 io->start_time = bio_start_io_acct(bio);
604 dm_stats_account_io(&md->stats, bio_data_dir(bio),
605 bio->bi_iter.bi_sector, bio_sectors(bio),
609 static void end_io_acct(struct mapped_device *md, struct bio *bio,
615 dm_stats_account_io(&md->stats, bio_data_dir(bio),
616 bio->bi_iter.bi_sector, bio_sectors(bio),
621 bio_end_io_acct(bio, start_time);
628 static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio)
632 struct bio *clone;
646 io->orig_bio = bio;
669 struct bio *clone = bio_alloc_bioset(gfp_mask, 0, &ci->io->md->bs);
693 * Add the bio to the list of deferred io.
695 static void queue_io(struct mapped_device *md, struct bio *bio)
700 bio_list_add(&md->deferred, bio);
897 * Decrements the number of outstanding ios that a bio has been
904 struct bio *bio;
933 bio = io->orig_bio;
937 end_io_acct(md, bio, start_time, &stats_aux);
942 if ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size) {
947 bio->bi_opf &= ~REQ_PREFLUSH;
948 queue_io(md, bio);
952 bio->bi_status = io_error;
953 bio_endio(bio);
983 static bool swap_bios_limit(struct dm_target *ti, struct bio *bio)
985 return unlikely((bio->bi_opf & REQ_SWAP) != 0) && unlikely(ti->limit_swap_bios);
988 static void clone_endio(struct bio *bio)
990 blk_status_t error = bio->bi_status;
991 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
995 struct bio *orig_bio = io->orig_bio;
998 if (bio_op(bio) == REQ_OP_DISCARD &&
999 !bio->bi_disk->queue->limits.max_discard_sectors)
1001 else if (bio_op(bio) == REQ_OP_WRITE_SAME &&
1002 !bio->bi_disk->queue->limits.max_write_same_sectors)
1004 else if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
1005 !bio->bi_disk->queue->limits.max_write_zeroes_sectors)
1011 * sector and add that to the original bio sector pos.
1014 sector_t written_sector = bio->bi_iter.bi_sector;
1022 int r = endio(tio->ti, bio, &error);
1038 if (unlikely(swap_bios_limit(tio->ti, bio))) {
1237 * allowed for all bio types except REQ_PREFLUSH, REQ_OP_ZONE_* zone management
1241 * additional n_sectors sectors of the bio and the rest of the data should be
1242 * sent in a next bio.
1255 * Region 2 is the remaining bio size that the target wants to process.
1258 * The target requires that region 3 is to be sent in the next bio.
1260 * If the target wants to receive multiple copies of the bio (via num_*bios, etc),
1262 * copies of the bio.
1264 void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
1266 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
1267 unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT;
1269 BUG_ON(bio->bi_opf & REQ_PREFLUSH);
1270 BUG_ON(op_is_zone_mgmt(bio_op(bio)));
1271 BUG_ON(bio_op(bio) == REQ_OP_ZONE_APPEND);
1276 bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT;
1300 struct bio *clone = &tio->clone;
1328 /* the bio has been remapped so dispatch it */
1357 static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len)
1359 bio->bi_iter.bi_sector = sector;
1360 bio->bi_iter.bi_size = to_bytes(len);
1364 * Creates a bio that consists of range of complete bvecs.
1366 static int clone_bio(struct dm_target_io *tio, struct bio *bio,
1369 struct bio *clone = &tio->clone;
1372 __bio_clone_fast(clone, bio);
1374 r = bio_crypt_clone(clone, bio, GFP_NOIO);
1378 if (bio_integrity(bio)) {
1387 r = bio_integrity_clone(clone, bio, GFP_NOIO);
1395 if (bio_integrity(bio))
1418 struct bio *bio;
1434 while ((bio = bio_list_pop(blist))) {
1435 tio = container_of(bio, struct dm_target_io, clone);
1444 struct bio *clone = &tio->clone;
1448 __bio_clone_fast(clone, ci->bio);
1459 struct bio *bio;
1464 while ((bio = bio_list_pop(&blist))) {
1465 tio = container_of(bio, struct dm_target_io, clone);
1474 struct bio flush_bio;
1477 * Use an on-stack bio for this, it's safe since we don't
1483 ci->bio = &flush_bio;
1487 * Empty flush uses a statically initialized bio, as the base for
1493 bio_set_dev(ci->bio, ci->io->md->bdev);
1495 BUG_ON(bio_has_data(ci->bio));
1499 bio_uninit(ci->bio);
1506 struct bio *bio = ci->bio;
1512 r = clone_bio(tio, bio, sector, *len);
1547 static bool is_abnormal_io(struct bio *bio)
1551 switch (bio_op(bio)) {
1566 struct bio *bio = ci->bio;
1569 switch (bio_op(bio)) {
1591 * Select the correct strategy for processing a non-flush bio.
1619 struct dm_table *map, struct bio *bio)
1622 ci->io = alloc_io(md, bio);
1623 ci->sector = bio->bi_iter.bi_sector;
1630 * Entry point to split a bio into clones and submit them to the targets.
1633 struct dm_table *map, struct bio *bio)
1639 init_clone_info(&ci, md, map, bio);
1641 if (bio->bi_opf & REQ_PREFLUSH) {
1644 } else if (op_is_zone_mgmt(bio_op(bio))) {
1645 ci.bio = bio;
1649 ci.bio = bio;
1650 ci.sector_count = bio_sectors(bio);
1662 struct bio *b = bio_split(bio, bio_sectors(bio) - ci.sector_count,
1670 * significant refactoring of DM core's bio splitting
1675 sectors[op_stat_group(bio_op(bio))], ci.sector_count);
1678 bio_chain(b, bio);
1679 trace_block_split(md->queue, b, bio->bi_iter.bi_sector);
1680 ret = submit_bio_noacct(bio);
1691 static blk_qc_t dm_submit_bio(struct bio *bio)
1693 struct mapped_device *md = bio->bi_disk->private_data;
1703 if (bio->bi_opf & REQ_NOWAIT)
1704 bio_wouldblock_error(bio);
1705 else if (bio->bi_opf & REQ_RAHEAD)
1706 bio_io_error(bio);
1708 queue_io(md, bio);
1716 if (is_abnormal_io(bio))
1717 blk_queue_split(&bio);
1719 ret = __split_and_process_bio(md, map, bio);
1865 * default to bio-based until DM table is loaded and md->type
1975 * to bio from the old bioset, so you must walk
2377 struct bio *bio;
2381 bio = bio_list_pop(&md->deferred);
2384 if (!bio)
2387 submit_bio_noacct(bio);
3179 MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools");