Lines Matching defs:bio

8 #include "dm-bio-prison-v1.h"
223 typedef void (*process_bio_fn)(struct thin_c *tc, struct bio *bio);
286 struct bio flush_bio;
383 struct bio *parent_bio;
384 struct bio *bio;
387 static void begin_discard(struct discard_op *op, struct thin_c *tc, struct bio *parent)
394 op->bio = NULL;
404 GFP_NOWAIT, 0, &op->bio);
409 if (op->bio) {
414 bio_chain(op->bio, op->parent_bio);
415 bio_set_op_attrs(op->bio, REQ_OP_DISCARD, 0);
416 submit_bio(op->bio);
443 static int bio_detain(struct pool *pool, struct dm_cell_key *key, struct bio *bio,
455 r = dm_bio_detain(pool->prison, key, bio, cell_prealloc, cell_result);
602 struct bio *bio;
604 while ((bio = bio_list_pop(bios))) {
605 bio->bi_status = error;
606 bio_endio(bio);
677 static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
680 sector_t block_nr = bio->bi_iter.bi_sector;
691 * Returns the _complete_ blocks that this bio covers.
693 static void get_bio_block_range(struct thin_c *tc, struct bio *bio,
697 sector_t b = bio->bi_iter.bi_sector;
698 sector_t e = b + (bio->bi_iter.bi_size >> SECTOR_SHIFT);
711 /* Can happen if the bio is within a single block. */
718 static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
721 sector_t bi_sector = bio->bi_iter.bi_sector;
723 bio_set_dev(bio, tc->pool_dev->bdev);
725 bio->bi_iter.bi_sector =
729 bio->bi_iter.bi_sector = (block * pool->sectors_per_block) +
733 static void remap_to_origin(struct thin_c *tc, struct bio *bio)
735 bio_set_dev(bio, tc->origin_dev->bdev);
738 static int bio_triggers_commit(struct thin_c *tc, struct bio *bio)
740 return op_is_flush(bio->bi_opf) &&
744 static void inc_all_io_entry(struct pool *pool, struct bio *bio)
748 if (bio_op(bio) == REQ_OP_DISCARD)
751 h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
755 static void issue(struct thin_c *tc, struct bio *bio)
759 if (!bio_triggers_commit(tc, bio)) {
760 submit_bio_noacct(bio);
765 * Complete bio with an error if earlier I/O caused changes to
770 bio_io_error(bio);
779 bio_list_add(&pool->deferred_flush_bios, bio);
783 static void remap_to_origin_and_issue(struct thin_c *tc, struct bio *bio)
785 remap_to_origin(tc, bio);
786 issue(tc, bio);
789 static void remap_and_issue(struct thin_c *tc, struct bio *bio,
792 remap(tc, bio, block);
793 issue(tc, bio);
821 * If the bio covers the whole area of a block then we can avoid
822 * zeroing or copying. Instead this bio is hooked. The bio will
824 * the bio twice.
826 struct bio *bio;
858 static void overwrite_endio(struct bio *bio)
860 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
863 bio->bi_end_io = m->saved_bi_end_io;
865 m->status = bio->bi_status;
898 static void thin_defer_bio(struct thin_c *tc, struct bio *bio);
910 struct bio *bio;
912 while ((bio = bio_list_pop(&cell->bios))) {
913 if (op_is_flush(bio->bi_opf) || bio_op(bio) == REQ_OP_DISCARD)
914 bio_list_add(&info->defer_bios, bio);
916 inc_all_io_entry(info->tc->pool, bio);
919 * We can't issue the bios with the bio prison lock
923 bio_list_add(&info->issue_bios, bio);
932 struct bio *bio;
947 while ((bio = bio_list_pop(&info.defer_bios)))
948 thin_defer_bio(tc, bio);
950 while ((bio = bio_list_pop(&info.issue_bios)))
951 remap_and_issue(info.tc, bio, block);
961 static void complete_overwrite_bio(struct thin_c *tc, struct bio *bio)
966 * If the bio has the REQ_FUA flag set we must commit the metadata
969 if (!bio_triggers_commit(tc, bio)) {
970 bio_endio(bio);
975 * Complete bio with an error if earlier I/O caused changes to the
980 bio_io_error(bio);
989 bio_list_add(&pool->deferred_flush_completions, bio);
997 struct bio *bio = m->bio;
1019 * If we are processing a write bio that completely covers the block,
1023 if (bio) {
1025 complete_overwrite_bio(tc, bio);
1049 bio_io_error(m->bio);
1055 bio_endio(m->bio);
1067 bio_io_error(m->bio);
1069 bio_endio(m->bio);
1078 struct bio *discard_parent)
1137 static void passdown_endio(struct bio *bio)
1143 queue_passdown_pt2(bio->bi_private);
1144 bio_put(bio);
1152 struct bio *discard_parent;
1163 bio_io_error(m->bio);
1176 bio_io_error(m->bio);
1184 DMWARN("%s: unable to allocate top level discard bio for passdown. Skipping passdown.",
1218 bio_io_error(m->bio);
1220 bio_endio(m->bio);
1242 * Deferred bio jobs.
1244 static int io_overlaps_block(struct pool *pool, struct bio *bio)
1246 return bio->bi_iter.bi_size ==
1250 static int io_overwrites_block(struct pool *pool, struct bio *bio)
1252 return (bio_data_dir(bio) == WRITE) &&
1253 io_overlaps_block(pool, bio);
1256 static void save_and_set_endio(struct bio *bio, bio_end_io_t **save,
1259 *save = bio->bi_end_io;
1260 bio->bi_end_io = fn;
1281 m->bio = NULL;
1300 static void remap_and_issue_overwrite(struct thin_c *tc, struct bio *bio,
1305 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1308 m->bio = bio;
1309 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
1310 inc_all_io_entry(pool, bio);
1311 remap_and_issue(tc, bio, data_begin);
1320 struct dm_bio_prison_cell *cell, struct bio *bio,
1346 * bio immediately. Otherwise we use kcopyd to clone the data first.
1348 if (io_overwrites_block(pool, bio))
1349 remap_and_issue_overwrite(tc, bio, data_dest, m);
1380 struct dm_bio_prison_cell *cell, struct bio *bio)
1383 data_origin, data_dest, cell, bio,
1389 struct bio *bio)
1403 * zeroing pre-existing data, we can issue the bio immediately.
1407 if (io_overwrites_block(pool, bio))
1408 remap_and_issue_overwrite(tc, bio, data_block, m);
1418 struct dm_bio_prison_cell *cell, struct bio *bio)
1426 virt_block, data_dest, cell, bio,
1431 virt_block, data_dest, cell, bio,
1435 schedule_zero(tc, virt_block, data_dest, cell, bio);
1589 static void retry_on_resume(struct bio *bio)
1591 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1595 bio_list_add(&tc->retry_on_resume_list, bio);
1606 DMERR_LIMIT("bio unserviceable, yet pool is in PM_WRITE mode");
1618 DMERR_LIMIT("bio unserviceable, yet pool has an unknown mode");
1623 static void handle_unserviceable_bio(struct pool *pool, struct bio *bio)
1628 bio->bi_status = error;
1629 bio_endio(bio);
1631 retry_on_resume(bio);
1636 struct bio *bio;
1649 while ((bio = bio_list_pop(&bios)))
1650 retry_on_resume(bio);
1667 m->bio = virt_cell->holder;
1674 struct bio *bio)
1718 m->bio = bio;
1721 * The parent bio must not complete before sub discard bios are
1728 bio_inc_remaining(bio);
1738 struct bio *bio = virt_cell->holder;
1739 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1742 * The virt_cell will only get freed once the origin bio completes.
1747 break_up_discard_bio(tc, virt_cell->key.block_begin, virt_cell->key.block_end, bio);
1750 * We complete the bio now, knowing that the bi_remaining field
1754 bio_endio(bio);
1757 static void process_discard_bio(struct thin_c *tc, struct bio *bio)
1763 get_bio_block_range(tc, bio, &begin, &end);
1768 bio_endio(bio);
1773 if (bio_detain(tc->pool, &virt_key, bio, &virt_cell))
1786 static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
1799 data_block, cell, bio);
1818 struct bio *bio;
1820 while ((bio = bio_list_pop(&cell->bios))) {
1821 if (bio_data_dir(bio) == WRITE || op_is_flush(bio->bi_opf) ||
1822 bio_op(bio) == REQ_OP_DISCARD)
1823 bio_list_add(&info->defer_bios, bio);
1825 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1828 inc_all_io_entry(info->tc->pool, bio);
1829 bio_list_add(&info->issue_bios, bio);
1838 struct bio *bio;
1848 while ((bio = bio_list_pop(&info.defer_bios)))
1849 thin_defer_bio(tc, bio);
1851 while ((bio = bio_list_pop(&info.issue_bios)))
1852 remap_and_issue(tc, bio, block);
1855 static void process_shared_bio(struct thin_c *tc, struct bio *bio,
1869 if (bio_detain(pool, &key, bio, &data_cell)) {
1874 if (bio_data_dir(bio) == WRITE && bio->bi_iter.bi_size) {
1875 break_sharing(tc, bio, block, &key, lookup_result, data_cell);
1878 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1881 inc_all_io_entry(pool, bio);
1882 remap_and_issue(tc, bio, lookup_result->block);
1889 static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block,
1899 if (!bio->bi_iter.bi_size) {
1900 inc_all_io_entry(pool, bio);
1903 remap_and_issue(tc, bio, 0);
1910 if (bio_data_dir(bio) == READ) {
1911 zero_fill_bio(bio);
1913 bio_endio(bio);
1921 schedule_external_copy(tc, block, data_block, cell, bio);
1923 schedule_zero(tc, block, data_block, cell, bio);
1942 struct bio *bio = cell->holder;
1943 dm_block_t block = get_bio_block(tc, bio);
1955 process_shared_bio(tc, bio, block, &lookup_result, cell);
1957 inc_all_io_entry(pool, bio);
1958 remap_and_issue(tc, bio, lookup_result.block);
1964 if (bio_data_dir(bio) == READ && tc->origin_dev) {
1965 inc_all_io_entry(pool, bio);
1968 if (bio_end_sector(bio) <= tc->origin_size)
1969 remap_to_origin_and_issue(tc, bio);
1971 else if (bio->bi_iter.bi_sector < tc->origin_size) {
1972 zero_fill_bio(bio);
1973 bio->bi_iter.bi_size = (tc->origin_size - bio->bi_iter.bi_sector) << SECTOR_SHIFT;
1974 remap_to_origin_and_issue(tc, bio);
1977 zero_fill_bio(bio);
1978 bio_endio(bio);
1981 provision_block(tc, bio, block, cell);
1988 bio_io_error(bio);
1993 static void process_bio(struct thin_c *tc, struct bio *bio)
1996 dm_block_t block = get_bio_block(tc, bio);
2005 if (bio_detain(pool, &key, bio, &cell))
2011 static void __process_bio_read_only(struct thin_c *tc, struct bio *bio,
2015 int rw = bio_data_dir(bio);
2016 dm_block_t block = get_bio_block(tc, bio);
2022 if (lookup_result.shared && (rw == WRITE) && bio->bi_iter.bi_size) {
2023 handle_unserviceable_bio(tc->pool, bio);
2027 inc_all_io_entry(tc->pool, bio);
2028 remap_and_issue(tc, bio, lookup_result.block);
2038 handle_unserviceable_bio(tc->pool, bio);
2043 inc_all_io_entry(tc->pool, bio);
2044 remap_to_origin_and_issue(tc, bio);
2048 zero_fill_bio(bio);
2049 bio_endio(bio);
2057 bio_io_error(bio);
2062 static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
2064 __process_bio_read_only(tc, bio, NULL);
2072 static void process_bio_success(struct thin_c *tc, struct bio *bio)
2074 bio_endio(bio);
2077 static void process_bio_fail(struct thin_c *tc, struct bio *bio)
2079 bio_io_error(bio);
2105 static void __thin_bio_rb_add(struct thin_c *tc, struct bio *bio)
2109 sector_t bi_sector = bio->bi_iter.bi_sector;
2123 pbd = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
2132 struct bio *bio;
2136 bio = thin_bio(pbd);
2138 bio_list_add(&tc->deferred_bio_list, bio);
2147 struct bio *bio;
2155 while ((bio = bio_list_pop(&bios)))
2156 __thin_bio_rb_add(tc, bio);
2169 struct bio *bio;
2197 while ((bio = bio_list_pop(&bios))) {
2200 * this bio might require one, we pause until there are some
2205 bio_list_add(&tc->deferred_bio_list, bio);
2211 if (bio_op(bio) == REQ_OP_DISCARD)
2212 pool->process_discard(tc, bio);
2214 pool->process_bio(tc, bio);
2285 * this bio might require one, we pause until there are some
2348 struct bio *bio;
2381 while ((bio = bio_list_pop(&bios)))
2382 bio_io_error(bio);
2387 while ((bio = bio_list_pop(&bio_completions)))
2388 bio_endio(bio);
2390 while ((bio = bio_list_pop(&bios))) {
2395 if (bio->bi_opf & REQ_PREFLUSH)
2396 bio_endio(bio);
2398 submit_bio_noacct(bio);
2659 * Called only while mapping a thin bio to hand it over to the workqueue.
2661 static void thin_defer_bio(struct thin_c *tc, struct bio *bio)
2666 bio_list_add(&tc->deferred_bio_list, bio);
2672 static void thin_defer_bio_with_throttle(struct thin_c *tc, struct bio *bio)
2677 thin_defer_bio(tc, bio);
2694 static void thin_hook_bio(struct thin_c *tc, struct bio *bio)
2696 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
2708 static int thin_bio_map(struct dm_target *ti, struct bio *bio)
2712 dm_block_t block = get_bio_block(tc, bio);
2718 thin_hook_bio(tc, bio);
2721 bio->bi_status = BLK_STS_DM_REQUEUE;
2722 bio_endio(bio);
2727 bio_io_error(bio);
2731 if (op_is_flush(bio->bi_opf) || bio_op(bio) == REQ_OP_DISCARD) {
2732 thin_defer_bio_with_throttle(tc, bio);
2741 if (bio_detain(tc->pool, &key, bio, &virt_cell))
2771 if (bio_detain(tc->pool, &key, bio, &data_cell)) {
2776 inc_all_io_entry(tc->pool, bio);
2780 remap(tc, bio, result.block);
2794 bio_io_error(bio);
2963 *error = "Error creating pool's bio prison";
3208 struct bio *flush_bio = &pool->flush_bio;
3435 static int pool_map(struct dm_target *ti, struct bio *bio)
3445 bio_set_dev(bio, pt->data_dev->bdev);
3589 * bio is in deferred list. Therefore need to return 0
4318 static int thin_map(struct dm_target *ti, struct bio *bio)
4320 bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
4322 return thin_bio_map(ti, bio);
4325 static int thin_endio(struct dm_target *ti, struct bio *bio,
4329 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));