Lines Matching defs:bio

9 #include "dm-bio-prison-v2.h"
10 #include "dm-bio-record.h"
83 void (*issue_op)(struct bio *bio, void *context);
106 struct bio *bio;
132 while ((bio = bio_list_pop(&bios))) {
134 bio->bi_status = r;
135 bio_endio(bio);
137 b->issue_op(bio, b->issue_context);
144 void (*issue_op)(struct bio *bio, void *),
182 static void issue_after_commit(struct batcher *b, struct bio *bio)
188 bio_list_add(&b->bios, bio);
212 * There are a couple of places where we let a bio run, but want to do some
220 static void dm_hook_bio(struct dm_hook_info *h, struct bio *bio,
223 h->bi_end_io = bio->bi_end_io;
225 bio->bi_end_io = bi_end_io;
226 bio->bi_private = bi_private;
229 static void dm_unhook_bio(struct dm_hook_info *h, struct bio *bio)
231 bio->bi_end_io = h->bi_end_io;
424 struct bio *overwrite_bio;
521 static unsigned int lock_level(struct bio *bio)
523 return bio_data_dir(bio) == WRITE ?
530 * Per bio data
534 static struct per_bio_data *get_per_bio_data(struct bio *bio)
536 struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
542 static struct per_bio_data *init_per_bio_data(struct bio *bio)
544 struct per_bio_data *pb = get_per_bio_data(bio);
547 pb->req_nr = dm_bio_get_target_bio_nr(bio);
556 static void defer_bio(struct cache *cache, struct bio *bio)
559 bio_list_add(&cache->deferred_bios, bio);
577 static bool bio_detain_shared(struct cache *cache, dm_oblock_t oblock, struct bio *bio)
588 r = dm_cell_get_v2(cache->prison, &key, lock_level(bio), bio, cell_prealloc, &cell);
600 pb = get_per_bio_data(bio);
719 static void remap_to_origin(struct cache *cache, struct bio *bio)
721 bio_set_dev(bio, cache->origin_dev->bdev);
724 static void remap_to_cache(struct cache *cache, struct bio *bio,
727 sector_t bi_sector = bio->bi_iter.bi_sector;
730 bio_set_dev(bio, cache->cache_dev->bdev);
732 bio->bi_iter.bi_sector =
736 bio->bi_iter.bi_sector =
741 static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
746 if (cache->need_tick_bio && !op_is_flush(bio->bi_opf) &&
747 bio_op(bio) != REQ_OP_DISCARD) {
748 pb = get_per_bio_data(bio);
755 static void remap_to_origin_clear_discard(struct cache *cache, struct bio *bio,
759 check_if_tick_bio_needed(cache, bio);
760 remap_to_origin(cache, bio);
761 if (bio_data_dir(bio) == WRITE)
765 static void remap_to_cache_dirty(struct cache *cache, struct bio *bio,
768 check_if_tick_bio_needed(cache, bio);
769 remap_to_cache(cache, bio, cblock);
770 if (bio_data_dir(bio) == WRITE) {
776 static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio)
778 sector_t block_nr = bio->bi_iter.bi_sector;
788 static bool accountable_bio(struct cache *cache, struct bio *bio)
790 return bio_op(bio) != REQ_OP_DISCARD;
793 static void accounted_begin(struct cache *cache, struct bio *bio)
797 if (accountable_bio(cache, bio)) {
798 pb = get_per_bio_data(bio);
799 pb->len = bio_sectors(bio);
804 static void accounted_complete(struct cache *cache, struct bio *bio)
806 struct per_bio_data *pb = get_per_bio_data(bio);
811 static void accounted_request(struct cache *cache, struct bio *bio)
813 accounted_begin(cache, bio);
814 dm_submit_bio_remap(bio, NULL);
817 static void issue_op(struct bio *bio, void *context)
821 accounted_request(cache, bio);
826 * to both the cache and origin devices. Clone the bio and send them in parallel.
828 static void remap_to_origin_and_cache(struct cache *cache, struct bio *bio,
831 struct bio *origin_bio = bio_alloc_clone(cache->origin_dev->bdev, bio,
836 bio_chain(origin_bio, bio);
842 remap_to_cache(cache, bio, cblock);
1004 static bool discard_or_flush(struct bio *bio)
1006 return bio_op(bio) == REQ_OP_DISCARD || op_is_flush(bio->bi_opf);
1009 static void calc_discard_block_range(struct cache *cache, struct bio *bio,
1012 sector_t sb = bio->bi_iter.bi_sector;
1013 sector_t se = bio_end_sector(bio);
1059 static bool bio_writes_complete_block(struct cache *cache, struct bio *bio)
1061 return (bio_data_dir(bio) == WRITE) &&
1062 (bio->bi_iter.bi_size == (cache->sectors_per_block << SECTOR_SHIFT));
1065 static bool optimisable_bio(struct cache *cache, struct bio *bio, dm_oblock_t block)
1068 (is_discarded_oblock(cache, block) || bio_writes_complete_block(cache, bio));
1114 static void bio_drop_shared_lock(struct cache *cache, struct bio *bio)
1116 struct per_bio_data *pb = get_per_bio_data(bio);
1123 static void overwrite_endio(struct bio *bio)
1125 struct dm_cache_migration *mg = bio->bi_private;
1127 struct per_bio_data *pb = get_per_bio_data(bio);
1129 dm_unhook_bio(&pb->hook_info, bio);
1131 if (bio->bi_status)
1132 mg->k.input = bio->bi_status;
1140 struct bio *bio = mg->overwrite_bio;
1141 struct per_bio_data *pb = get_per_bio_data(bio);
1143 dm_hook_bio(&pb->hook_info, bio, overwrite_endio, mg);
1146 * The overwrite bio is part of the copy operation, as such it does
1150 remap_to_cache(mg->cache, bio, mg->op->cblock);
1152 remap_to_origin(mg->cache, bio);
1155 accounted_request(mg->cache, bio);
1163 * 3) copy or issue overwrite bio
1363 * No exclusive lock was held when we last checked if the bio
1404 * Unless we're using an overwrite bio, in which case we lock
1428 static int mg_start(struct cache *cache, struct policy_work *op, struct bio *bio)
1440 mg->overwrite_bio = bio;
1442 if (!bio)
1560 dm_oblock_t oblock, struct bio *bio)
1569 mg->overwrite_bio = bio;
1578 * bio processing
1599 static void inc_hit_counter(struct cache *cache, struct bio *bio)
1601 atomic_inc(bio_data_dir(bio) == READ ?
1605 static void inc_miss_counter(struct cache *cache, struct bio *bio)
1607 atomic_inc(bio_data_dir(bio) == READ ?
1613 static int map_bio(struct cache *cache, struct bio *bio, dm_oblock_t block,
1622 rb = bio_detain_shared(cache, block, bio);
1634 data_dir = bio_data_dir(bio);
1636 if (optimisable_bio(cache, bio, block)) {
1643 bio_io_error(bio);
1648 bio_drop_shared_lock(cache, bio);
1650 mg_start(cache, op, bio);
1658 bio_io_error(bio);
1667 struct per_bio_data *pb = get_per_bio_data(bio);
1672 inc_miss_counter(cache, bio);
1674 accounted_begin(cache, bio);
1675 remap_to_origin_clear_discard(cache, bio, block);
1681 bio_endio(bio);
1688 inc_hit_counter(cache, bio);
1695 if (bio_data_dir(bio) == WRITE) {
1696 bio_drop_shared_lock(cache, bio);
1698 invalidate_start(cache, cblock, block, bio);
1700 remap_to_origin_clear_discard(cache, bio, block);
1702 if (bio_data_dir(bio) == WRITE && writethrough_mode(cache) &&
1704 remap_to_origin_and_cache(cache, bio, block, cblock);
1705 accounted_begin(cache, bio);
1707 remap_to_cache_dirty(cache, bio, block, cblock);
1714 if (bio->bi_opf & REQ_FUA) {
1719 accounted_complete(cache, bio);
1720 issue_after_commit(&cache->committer, bio);
1728 static bool process_bio(struct cache *cache, struct bio *bio)
1732 if (map_bio(cache, bio, get_bio_block(cache, bio), &commit_needed) == DM_MAPIO_REMAPPED)
1733 dm_submit_bio_remap(bio, NULL);
1771 static bool process_flush_bio(struct cache *cache, struct bio *bio)
1773 struct per_bio_data *pb = get_per_bio_data(bio);
1776 remap_to_origin(cache, bio);
1778 remap_to_cache(cache, bio, 0);
1780 issue_after_commit(&cache->committer, bio);
1784 static bool process_discard_bio(struct cache *cache, struct bio *bio)
1793 calc_discard_block_range(cache, bio, &b, &e);
1800 remap_to_origin(cache, bio);
1801 dm_submit_bio_remap(bio, NULL);
1803 bio_endio(bio);
1814 struct bio *bio;
1823 while ((bio = bio_list_pop(&bios))) {
1824 if (bio->bi_opf & REQ_PREFLUSH)
1825 commit_needed = process_flush_bio(cache, bio) || commit_needed;
1827 else if (bio_op(bio) == REQ_OP_DISCARD)
1828 commit_needed = process_discard_bio(cache, bio) || commit_needed;
1831 commit_needed = process_bio(cache, bio) || commit_needed;
1846 struct bio *bio;
1853 while ((bio = bio_list_pop(&bios))) {
1854 bio->bi_status = BLK_STS_DM_REQUEUE;
1855 bio_endio(bio);
2528 *error = "could not create bio prison";
2631 static int cache_map(struct dm_target *ti, struct bio *bio)
2637 dm_oblock_t block = get_bio_block(cache, bio);
2639 init_per_bio_data(bio);
2646 remap_to_origin(cache, bio);
2647 accounted_begin(cache, bio);
2651 if (discard_or_flush(bio)) {
2652 defer_bio(cache, bio);
2656 r = map_bio(cache, bio, block, &commit_needed);
2663 static int cache_end_io(struct dm_target *ti, struct bio *bio, blk_status_t *error)
2667 struct per_bio_data *pb = get_per_bio_data(bio);
2677 bio_drop_shared_lock(cache, bio);
2678 accounted_complete(cache, bio);