Lines Matching defs:bio

9 #include "dm-bio-prison-v1.h"
226 typedef void (*process_bio_fn)(struct thin_c *tc, struct bio *bio);
384 struct bio *parent_bio;
385 struct bio *bio;
388 static void begin_discard(struct discard_op *op, struct thin_c *tc, struct bio *parent)
395 op->bio = NULL;
404 return __blkdev_issue_discard(tc->pool_dev->bdev, s, len, GFP_NOIO, &op->bio);
409 if (op->bio) {
414 bio_chain(op->bio, op->parent_bio);
415 op->bio->bi_opf = REQ_OP_DISCARD;
416 submit_bio(op->bio);
443 static int bio_detain(struct pool *pool, struct dm_cell_key *key, struct bio *bio,
455 r = dm_bio_detain(pool->prison, key, bio, cell_prealloc, cell_result);
602 struct bio *bio;
604 while ((bio = bio_list_pop(bios))) {
605 bio->bi_status = error;
606 bio_endio(bio);
677 static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
680 sector_t block_nr = bio->bi_iter.bi_sector;
691 * Returns the _complete_ blocks that this bio covers.
693 static void get_bio_block_range(struct thin_c *tc, struct bio *bio,
697 sector_t b = bio->bi_iter.bi_sector;
698 sector_t e = b + (bio->bi_iter.bi_size >> SECTOR_SHIFT);
711 /* Can happen if the bio is within a single block. */
718 static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
721 sector_t bi_sector = bio->bi_iter.bi_sector;
723 bio_set_dev(bio, tc->pool_dev->bdev);
725 bio->bi_iter.bi_sector =
729 bio->bi_iter.bi_sector = (block * pool->sectors_per_block) +
733 static void remap_to_origin(struct thin_c *tc, struct bio *bio)
735 bio_set_dev(bio, tc->origin_dev->bdev);
738 static int bio_triggers_commit(struct thin_c *tc, struct bio *bio)
740 return op_is_flush(bio->bi_opf) &&
744 static void inc_all_io_entry(struct pool *pool, struct bio *bio)
748 if (bio_op(bio) == REQ_OP_DISCARD)
751 h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
755 static void issue(struct thin_c *tc, struct bio *bio)
759 if (!bio_triggers_commit(tc, bio)) {
760 dm_submit_bio_remap(bio, NULL);
765 * Complete bio with an error if earlier I/O caused changes to
770 bio_io_error(bio);
779 bio_list_add(&pool->deferred_flush_bios, bio);
783 static void remap_to_origin_and_issue(struct thin_c *tc, struct bio *bio)
785 remap_to_origin(tc, bio);
786 issue(tc, bio);
789 static void remap_and_issue(struct thin_c *tc, struct bio *bio,
792 remap(tc, bio, block);
793 issue(tc, bio);
821 * If the bio covers the whole area of a block then we can avoid
822 * zeroing or copying. Instead this bio is hooked. The bio will
824 * the bio twice.
826 struct bio *bio;
858 static void overwrite_endio(struct bio *bio)
860 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
863 bio->bi_end_io = m->saved_bi_end_io;
865 m->status = bio->bi_status;
900 static void thin_defer_bio(struct thin_c *tc, struct bio *bio);
912 struct bio *bio;
914 while ((bio = bio_list_pop(&cell->bios))) {
915 if (op_is_flush(bio->bi_opf) || bio_op(bio) == REQ_OP_DISCARD)
916 bio_list_add(&info->defer_bios, bio);
918 inc_all_io_entry(info->tc->pool, bio);
921 * We can't issue the bios with the bio prison lock
925 bio_list_add(&info->issue_bios, bio);
934 struct bio *bio;
949 while ((bio = bio_list_pop(&info.defer_bios)))
950 thin_defer_bio(tc, bio);
952 while ((bio = bio_list_pop(&info.issue_bios)))
953 remap_and_issue(info.tc, bio, block);
963 static void complete_overwrite_bio(struct thin_c *tc, struct bio *bio)
968 * If the bio has the REQ_FUA flag set we must commit the metadata
971 if (!bio_triggers_commit(tc, bio)) {
972 bio_endio(bio);
977 * Complete bio with an error if earlier I/O caused changes to the
982 bio_io_error(bio);
991 bio_list_add(&pool->deferred_flush_completions, bio);
999 struct bio *bio = m->bio;
1021 * If we are processing a write bio that completely covers the block,
1025 if (bio) {
1027 complete_overwrite_bio(tc, bio);
1052 bio_io_error(m->bio);
1058 bio_endio(m->bio);
1070 bio_io_error(m->bio);
1072 bio_endio(m->bio);
1081 struct bio *discard_parent)
1140 static void passdown_endio(struct bio *bio)
1146 queue_passdown_pt2(bio->bi_private);
1147 bio_put(bio);
1155 struct bio *discard_parent;
1166 bio_io_error(m->bio);
1179 bio_io_error(m->bio);
1213 bio_io_error(m->bio);
1215 bio_endio(m->bio);
1237 * Deferred bio jobs.
1239 static int io_overlaps_block(struct pool *pool, struct bio *bio)
1241 return bio->bi_iter.bi_size ==
1245 static int io_overwrites_block(struct pool *pool, struct bio *bio)
1247 return (bio_data_dir(bio) == WRITE) &&
1248 io_overlaps_block(pool, bio);
1251 static void save_and_set_endio(struct bio *bio, bio_end_io_t **save,
1254 *save = bio->bi_end_io;
1255 bio->bi_end_io = fn;
1276 m->bio = NULL;
1295 static void remap_and_issue_overwrite(struct thin_c *tc, struct bio *bio,
1300 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1303 m->bio = bio;
1304 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
1305 inc_all_io_entry(pool, bio);
1306 remap_and_issue(tc, bio, data_begin);
1315 struct dm_bio_prison_cell *cell, struct bio *bio,
1341 * bio immediately. Otherwise we use kcopyd to clone the data first.
1343 if (io_overwrites_block(pool, bio))
1344 remap_and_issue_overwrite(tc, bio, data_dest, m);
1375 struct dm_bio_prison_cell *cell, struct bio *bio)
1378 data_origin, data_dest, cell, bio,
1384 struct bio *bio)
1398 * zeroing pre-existing data, we can issue the bio immediately.
1402 if (io_overwrites_block(pool, bio))
1403 remap_and_issue_overwrite(tc, bio, data_block, m);
1413 struct dm_bio_prison_cell *cell, struct bio *bio)
1421 virt_block, data_dest, cell, bio,
1426 virt_block, data_dest, cell, bio,
1430 schedule_zero(tc, virt_block, data_dest, cell, bio);
1584 static void retry_on_resume(struct bio *bio)
1586 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1590 bio_list_add(&tc->retry_on_resume_list, bio);
1601 DMERR_LIMIT("bio unserviceable, yet pool is in PM_WRITE mode");
1613 DMERR_LIMIT("bio unserviceable, yet pool has an unknown mode");
1618 static void handle_unserviceable_bio(struct pool *pool, struct bio *bio)
1623 bio->bi_status = error;
1624 bio_endio(bio);
1626 retry_on_resume(bio);
1631 struct bio *bio;
1644 while ((bio = bio_list_pop(&bios)))
1645 retry_on_resume(bio);
1662 m->bio = virt_cell->holder;
1669 struct bio *bio)
1695 * Make sure the data region obeys the bio prison restrictions.
1725 m->bio = bio;
1728 * The parent bio must not complete before sub discard bios are
1735 bio_inc_remaining(bio);
1749 struct bio *bio = virt_cell->holder;
1750 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1753 * The virt_cell will only get freed once the origin bio completes.
1758 break_up_discard_bio(tc, virt_cell->key.block_begin, virt_cell->key.block_end, bio);
1761 * We complete the bio now, knowing that the bi_remaining field
1765 bio_endio(bio);
1768 static void process_discard_bio(struct thin_c *tc, struct bio *bio)
1774 get_bio_block_range(tc, bio, &begin, &end);
1779 bio_endio(bio);
1784 DMERR_LIMIT("Discard doesn't respect bio prison limits");
1785 bio_endio(bio);
1789 if (bio_detain(tc->pool, &virt_key, bio, &virt_cell)) {
1803 static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
1816 data_block, cell, bio);
1835 struct bio *bio;
1837 while ((bio = bio_list_pop(&cell->bios))) {
1838 if (bio_data_dir(bio) == WRITE || op_is_flush(bio->bi_opf) ||
1839 bio_op(bio) == REQ_OP_DISCARD)
1840 bio_list_add(&info->defer_bios, bio);
1842 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1845 inc_all_io_entry(info->tc->pool, bio);
1846 bio_list_add(&info->issue_bios, bio);
1855 struct bio *bio;
1865 while ((bio = bio_list_pop(&info.defer_bios)))
1866 thin_defer_bio(tc, bio);
1868 while ((bio = bio_list_pop(&info.issue_bios)))
1869 remap_and_issue(tc, bio, block);
1872 static void process_shared_bio(struct thin_c *tc, struct bio *bio,
1886 if (bio_detain(pool, &key, bio, &data_cell)) {
1891 if (bio_data_dir(bio) == WRITE && bio->bi_iter.bi_size) {
1892 break_sharing(tc, bio, block, &key, lookup_result, data_cell);
1895 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1898 inc_all_io_entry(pool, bio);
1899 remap_and_issue(tc, bio, lookup_result->block);
1906 static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block,
1916 if (!bio->bi_iter.bi_size) {
1917 inc_all_io_entry(pool, bio);
1920 remap_and_issue(tc, bio, 0);
1927 if (bio_data_dir(bio) == READ) {
1928 zero_fill_bio(bio);
1930 bio_endio(bio);
1938 schedule_external_copy(tc, block, data_block, cell, bio);
1940 schedule_zero(tc, block, data_block, cell, bio);
1959 struct bio *bio = cell->holder;
1960 dm_block_t block = get_bio_block(tc, bio);
1972 process_shared_bio(tc, bio, block, &lookup_result, cell);
1974 inc_all_io_entry(pool, bio);
1975 remap_and_issue(tc, bio, lookup_result.block);
1981 if (bio_data_dir(bio) == READ && tc->origin_dev) {
1982 inc_all_io_entry(pool, bio);
1985 if (bio_end_sector(bio) <= tc->origin_size)
1986 remap_to_origin_and_issue(tc, bio);
1988 else if (bio->bi_iter.bi_sector < tc->origin_size) {
1989 zero_fill_bio(bio);
1990 bio->bi_iter.bi_size = (tc->origin_size - bio->bi_iter.bi_sector) << SECTOR_SHIFT;
1991 remap_to_origin_and_issue(tc, bio);
1994 zero_fill_bio(bio);
1995 bio_endio(bio);
1998 provision_block(tc, bio, block, cell);
2005 bio_io_error(bio);
2010 static void process_bio(struct thin_c *tc, struct bio *bio)
2013 dm_block_t block = get_bio_block(tc, bio);
2022 if (bio_detain(pool, &key, bio, &cell))
2028 static void __process_bio_read_only(struct thin_c *tc, struct bio *bio,
2032 int rw = bio_data_dir(bio);
2033 dm_block_t block = get_bio_block(tc, bio);
2039 if (lookup_result.shared && (rw == WRITE) && bio->bi_iter.bi_size) {
2040 handle_unserviceable_bio(tc->pool, bio);
2044 inc_all_io_entry(tc->pool, bio);
2045 remap_and_issue(tc, bio, lookup_result.block);
2055 handle_unserviceable_bio(tc->pool, bio);
2060 inc_all_io_entry(tc->pool, bio);
2061 remap_to_origin_and_issue(tc, bio);
2065 zero_fill_bio(bio);
2066 bio_endio(bio);
2074 bio_io_error(bio);
2079 static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
2081 __process_bio_read_only(tc, bio, NULL);
2089 static void process_bio_success(struct thin_c *tc, struct bio *bio)
2091 bio_endio(bio);
2094 static void process_bio_fail(struct thin_c *tc, struct bio *bio)
2096 bio_io_error(bio);
2122 static void __thin_bio_rb_add(struct thin_c *tc, struct bio *bio)
2126 sector_t bi_sector = bio->bi_iter.bi_sector;
2140 pbd = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
2149 struct bio *bio;
2153 bio = thin_bio(pbd);
2155 bio_list_add(&tc->deferred_bio_list, bio);
2164 struct bio *bio;
2172 while ((bio = bio_list_pop(&bios)))
2173 __thin_bio_rb_add(tc, bio);
2186 struct bio *bio;
2214 while ((bio = bio_list_pop(&bios))) {
2217 * this bio might require one, we pause until there are some
2222 bio_list_add(&tc->deferred_bio_list, bio);
2228 if (bio_op(bio) == REQ_OP_DISCARD)
2229 pool->process_discard(tc, bio);
2231 pool->process_bio(tc, bio);
2302 * this bio might require one, we pause until there are some
2365 struct bio *bio;
2398 while ((bio = bio_list_pop(&bios)))
2399 bio_io_error(bio);
2404 while ((bio = bio_list_pop(&bio_completions)))
2405 bio_endio(bio);
2407 while ((bio = bio_list_pop(&bios))) {
2412 if (bio->bi_opf & REQ_PREFLUSH)
2413 bio_endio(bio);
2415 dm_submit_bio_remap(bio, NULL);
2674 * Called only while mapping a thin bio to hand it over to the workqueue.
2676 static void thin_defer_bio(struct thin_c *tc, struct bio *bio)
2681 bio_list_add(&tc->deferred_bio_list, bio);
2687 static void thin_defer_bio_with_throttle(struct thin_c *tc, struct bio *bio)
2692 thin_defer_bio(tc, bio);
2709 static void thin_hook_bio(struct thin_c *tc, struct bio *bio)
2711 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
2723 static int thin_bio_map(struct dm_target *ti, struct bio *bio)
2727 dm_block_t block = get_bio_block(tc, bio);
2733 thin_hook_bio(tc, bio);
2736 bio->bi_status = BLK_STS_DM_REQUEUE;
2737 bio_endio(bio);
2742 bio_io_error(bio);
2746 if (op_is_flush(bio->bi_opf) || bio_op(bio) == REQ_OP_DISCARD) {
2747 thin_defer_bio_with_throttle(tc, bio);
2756 if (bio_detain(tc->pool, &key, bio, &virt_cell))
2786 if (bio_detain(tc->pool, &key, bio, &data_cell)) {
2791 inc_all_io_entry(tc->pool, bio);
2795 remap(tc, bio, result.block);
2809 bio_io_error(bio);
2973 *error = "Error creating pool's bio prison";
3442 static int pool_map(struct dm_target *ti, struct bio *bio)
3451 bio_set_dev(bio, pt->data_dev->bdev);
3594 * bio is in deferred list. Therefore need to return 0
4330 static int thin_map(struct dm_target *ti, struct bio *bio)
4332 bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
4334 return thin_bio_map(ti, bio);
4337 static int thin_endio(struct dm_target *ti, struct bio *bio,
4341 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));