Lines Matching defs:bio
8 #include "dm-bio-prison-v2.h"
9 #include "dm-bio-record.h"
152 void (*issue_op)(struct bio *bio, void *context);
175 struct bio *bio;
201 while ((bio = bio_list_pop(&bios))) {
203 bio->bi_status = r;
204 bio_endio(bio);
206 b->issue_op(bio, b->issue_context);
213 void (*issue_op)(struct bio *bio, void *),
251 static void issue_after_commit(struct batcher *b, struct bio *bio)
257 bio_list_add(&b->bios, bio);
281 * There are a couple of places where we let a bio run, but want to do some
289 static void dm_hook_bio(struct dm_hook_info *h, struct bio *bio,
292 h->bi_end_io = bio->bi_end_io;
294 bio->bi_end_io = bi_end_io;
295 bio->bi_private = bi_private;
298 static void dm_unhook_bio(struct dm_hook_info *h, struct bio *bio)
300 bio->bi_end_io = h->bi_end_io;
493 struct bio *overwrite_bio;
590 static unsigned lock_level(struct bio *bio)
592 return bio_data_dir(bio) == WRITE ?
598 * Per bio data
601 static struct per_bio_data *get_per_bio_data(struct bio *bio)
603 struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
608 static struct per_bio_data *init_per_bio_data(struct bio *bio)
610 struct per_bio_data *pb = get_per_bio_data(bio);
613 pb->req_nr = dm_bio_get_target_bio_nr(bio);
622 static void defer_bio(struct cache *cache, struct bio *bio)
625 bio_list_add(&cache->deferred_bios, bio);
643 static bool bio_detain_shared(struct cache *cache, dm_oblock_t oblock, struct bio *bio)
654 r = dm_cell_get_v2(cache->prison, &key, lock_level(bio), bio, cell_prealloc, &cell);
666 pb = get_per_bio_data(bio);
781 static void remap_to_origin(struct cache *cache, struct bio *bio)
783 bio_set_dev(bio, cache->origin_dev->bdev);
786 static void remap_to_cache(struct cache *cache, struct bio *bio,
789 sector_t bi_sector = bio->bi_iter.bi_sector;
792 bio_set_dev(bio, cache->cache_dev->bdev);
794 bio->bi_iter.bi_sector =
798 bio->bi_iter.bi_sector =
803 static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
808 if (cache->need_tick_bio && !op_is_flush(bio->bi_opf) &&
809 bio_op(bio) != REQ_OP_DISCARD) {
810 pb = get_per_bio_data(bio);
817 static void __remap_to_origin_clear_discard(struct cache *cache, struct bio *bio,
821 check_if_tick_bio_needed(cache, bio);
822 remap_to_origin(cache, bio);
823 if (bio_data_dir(bio) == WRITE)
827 static void remap_to_origin_clear_discard(struct cache *cache, struct bio *bio,
831 __remap_to_origin_clear_discard(cache, bio, oblock, true);
834 static void remap_to_cache_dirty(struct cache *cache, struct bio *bio,
837 check_if_tick_bio_needed(cache, bio);
838 remap_to_cache(cache, bio, cblock);
839 if (bio_data_dir(bio) == WRITE) {
845 static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio)
847 sector_t block_nr = bio->bi_iter.bi_sector;
857 static bool accountable_bio(struct cache *cache, struct bio *bio)
859 return bio_op(bio) != REQ_OP_DISCARD;
862 static void accounted_begin(struct cache *cache, struct bio *bio)
866 if (accountable_bio(cache, bio)) {
867 pb = get_per_bio_data(bio);
868 pb->len = bio_sectors(bio);
873 static void accounted_complete(struct cache *cache, struct bio *bio)
875 struct per_bio_data *pb = get_per_bio_data(bio);
880 static void accounted_request(struct cache *cache, struct bio *bio)
882 accounted_begin(cache, bio);
883 submit_bio_noacct(bio);
886 static void issue_op(struct bio *bio, void *context)
889 accounted_request(cache, bio);
894 * to both the cache and origin devices. Clone the bio and send them in parallel.
896 static void remap_to_origin_and_cache(struct cache *cache, struct bio *bio,
899 struct bio *origin_bio = bio_clone_fast(bio, GFP_NOIO, &cache->bs);
903 bio_chain(origin_bio, bio);
911 remap_to_cache(cache, bio, cblock);
1070 static bool discard_or_flush(struct bio *bio)
1072 return bio_op(bio) == REQ_OP_DISCARD || op_is_flush(bio->bi_opf);
1075 static void calc_discard_block_range(struct cache *cache, struct bio *bio,
1078 sector_t sb = bio->bi_iter.bi_sector;
1079 sector_t se = bio_end_sector(bio);
1125 static bool bio_writes_complete_block(struct cache *cache, struct bio *bio)
1127 return (bio_data_dir(bio) == WRITE) &&
1128 (bio->bi_iter.bi_size == (cache->sectors_per_block << SECTOR_SHIFT));
1131 static bool optimisable_bio(struct cache *cache, struct bio *bio, dm_oblock_t block)
1134 (is_discarded_oblock(cache, block) || bio_writes_complete_block(cache, bio));
1179 static void bio_drop_shared_lock(struct cache *cache, struct bio *bio)
1181 struct per_bio_data *pb = get_per_bio_data(bio);
1188 static void overwrite_endio(struct bio *bio)
1190 struct dm_cache_migration *mg = bio->bi_private;
1192 struct per_bio_data *pb = get_per_bio_data(bio);
1194 dm_unhook_bio(&pb->hook_info, bio);
1196 if (bio->bi_status)
1197 mg->k.input = bio->bi_status;
1205 struct bio *bio = mg->overwrite_bio;
1206 struct per_bio_data *pb = get_per_bio_data(bio);
1208 dm_hook_bio(&pb->hook_info, bio, overwrite_endio, mg);
1211 * The overwrite bio is part of the copy operation, as such it does
1215 remap_to_cache(mg->cache, bio, mg->op->cblock);
1217 remap_to_origin(mg->cache, bio);
1220 accounted_request(mg->cache, bio);
1228 * 3) copy or issue overwrite bio
1427 * No exclusive lock was held when we last checked if the bio
1467 * Unless we're using an overwrite bio, in which case we lock
1491 static int mg_start(struct cache *cache, struct policy_work *op, struct bio *bio)
1503 mg->overwrite_bio = bio;
1505 if (!bio)
1618 dm_oblock_t oblock, struct bio *bio)
1627 mg->overwrite_bio = bio;
1635 * bio processing
1655 static void inc_hit_counter(struct cache *cache, struct bio *bio)
1657 atomic_inc(bio_data_dir(bio) == READ ?
1661 static void inc_miss_counter(struct cache *cache, struct bio *bio)
1663 atomic_inc(bio_data_dir(bio) == READ ?
1669 static int map_bio(struct cache *cache, struct bio *bio, dm_oblock_t block,
1678 rb = bio_detain_shared(cache, block, bio);
1690 data_dir = bio_data_dir(bio);
1692 if (optimisable_bio(cache, bio, block)) {
1699 bio_io_error(bio);
1704 bio_drop_shared_lock(cache, bio);
1706 mg_start(cache, op, bio);
1714 bio_io_error(bio);
1723 struct per_bio_data *pb = get_per_bio_data(bio);
1728 inc_miss_counter(cache, bio);
1730 accounted_begin(cache, bio);
1731 remap_to_origin_clear_discard(cache, bio, block);
1737 bio_endio(bio);
1744 inc_hit_counter(cache, bio);
1751 if (bio_data_dir(bio) == WRITE) {
1752 bio_drop_shared_lock(cache, bio);
1754 invalidate_start(cache, cblock, block, bio);
1756 remap_to_origin_clear_discard(cache, bio, block);
1758 if (bio_data_dir(bio) == WRITE && writethrough_mode(cache) &&
1760 remap_to_origin_and_cache(cache, bio, block, cblock);
1761 accounted_begin(cache, bio);
1763 remap_to_cache_dirty(cache, bio, block, cblock);
1770 if (bio->bi_opf & REQ_FUA) {
1775 accounted_complete(cache, bio);
1776 issue_after_commit(&cache->committer, bio);
1784 static bool process_bio(struct cache *cache, struct bio *bio)
1788 if (map_bio(cache, bio, get_bio_block(cache, bio), &commit_needed) == DM_MAPIO_REMAPPED)
1789 submit_bio_noacct(bio);
1827 static bool process_flush_bio(struct cache *cache, struct bio *bio)
1829 struct per_bio_data *pb = get_per_bio_data(bio);
1832 remap_to_origin(cache, bio);
1834 remap_to_cache(cache, bio, 0);
1836 issue_after_commit(&cache->committer, bio);
1840 static bool process_discard_bio(struct cache *cache, struct bio *bio)
1847 calc_discard_block_range(cache, bio, &b, &e);
1854 remap_to_origin(cache, bio);
1855 submit_bio_noacct(bio);
1857 bio_endio(bio);
1868 struct bio *bio;
1877 while ((bio = bio_list_pop(&bios))) {
1878 if (bio->bi_opf & REQ_PREFLUSH)
1879 commit_needed = process_flush_bio(cache, bio) || commit_needed;
1881 else if (bio_op(bio) == REQ_OP_DISCARD)
1882 commit_needed = process_discard_bio(cache, bio) || commit_needed;
1885 commit_needed = process_bio(cache, bio) || commit_needed;
1899 struct bio *bio;
1906 while ((bio = bio_list_pop(&bios))) {
1907 bio->bi_status = BLK_STS_DM_REQUEUE;
1908 bio_endio(bio);
2579 *error = "could not create bio prison";
2682 static int cache_map(struct dm_target *ti, struct bio *bio)
2688 dm_oblock_t block = get_bio_block(cache, bio);
2690 init_per_bio_data(bio);
2697 remap_to_origin(cache, bio);
2698 accounted_begin(cache, bio);
2702 if (discard_or_flush(bio)) {
2703 defer_bio(cache, bio);
2707 r = map_bio(cache, bio, block, &commit_needed);
2714 static int cache_end_io(struct dm_target *ti, struct bio *bio, blk_status_t *error)
2718 struct per_bio_data *pb = get_per_bio_data(bio);
2728 bio_drop_shared_lock(cache, bio);
2729 accounted_complete(cache, bio);