Lines Matching defs:bio
204 * in a bio list
229 struct bio *full_bio;
245 static void init_tracked_chunk(struct bio *bio)
247 struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
252 static bool is_bio_tracked(struct bio *bio)
254 struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
259 static void track_chunk(struct dm_snapshot *s, struct bio *bio, chunk_t chunk)
261 struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
271 static void stop_tracking_chunk(struct dm_snapshot *s, struct bio *bio)
273 struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
926 static struct bio *__release_queued_bios_after_merge(struct dm_snapshot *s)
984 static void flush_bios(struct bio *bio);
988 struct bio *b = NULL;
1124 static void error_bios(struct bio *bio);
1129 struct bio *b = NULL;
1569 static void flush_bios(struct bio *bio)
1571 struct bio *n;
1573 while (bio) {
1574 n = bio->bi_next;
1575 bio->bi_next = NULL;
1576 submit_bio_noacct(bio);
1577 bio = n;
1581 static int do_origin(struct dm_dev *origin, struct bio *bio, bool limit);
1586 static void retry_origin_bios(struct dm_snapshot *s, struct bio *bio)
1588 struct bio *n;
1591 while (bio) {
1592 n = bio->bi_next;
1593 bio->bi_next = NULL;
1594 r = do_origin(s->origin, bio, false);
1596 submit_bio_noacct(bio);
1597 bio = n;
1604 static void error_bios(struct bio *bio)
1606 struct bio *n;
1608 while (bio) {
1609 n = bio->bi_next;
1610 bio->bi_next = NULL;
1611 bio_io_error(bio);
1612 bio = n;
1646 struct bio *origin_bios = NULL;
1647 struct bio *snapshot_bios = NULL;
1648 struct bio *full_bio = NULL;
1814 static void full_bio_end_io(struct bio *bio)
1816 void *callback_data = bio->bi_private;
1818 dm_kcopyd_do_callback(callback_data, 0, bio->bi_status ? 1 : 0);
1822 struct bio *bio)
1827 pe->full_bio = bio;
1828 pe->full_bio_end_io = bio->bi_end_io;
1834 bio->bi_end_io = full_bio_end_io;
1835 bio->bi_private = callback_data;
1837 submit_bio_noacct(bio);
1906 struct bio *bio, chunk_t chunk)
1908 bio_set_dev(bio, s->cow->bdev);
1909 bio->bi_iter.bi_sector =
1912 (bio->bi_iter.bi_sector & s->store->chunk_mask);
1917 struct bio *bio = context;
1918 struct dm_snapshot *s = bio->bi_private;
1921 bio->bi_status = write_err ? BLK_STS_IOERR : 0;
1922 bio_endio(bio);
1926 struct bio *bio, chunk_t chunk)
1931 dest.sector = bio->bi_iter.bi_sector;
1935 WARN_ON_ONCE(bio->bi_private);
1936 bio->bi_private = s;
1937 dm_kcopyd_zero(s->kcopyd_client, 1, &dest, 0, zero_callback, bio);
1940 static bool io_overlaps_chunk(struct dm_snapshot *s, struct bio *bio)
1942 return bio->bi_iter.bi_size ==
1946 static int snapshot_map(struct dm_target *ti, struct bio *bio)
1955 init_tracked_chunk(bio);
1957 if (bio->bi_opf & REQ_PREFLUSH) {
1958 bio_set_dev(bio, s->cow->bdev);
1962 chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
1970 if (bio_data_dir(bio) == WRITE) {
1979 bio_data_dir(bio) == WRITE)) {
1984 if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) {
1985 if (s->discard_passdown_origin && dm_bio_get_target_bio_nr(bio)) {
1992 bio_set_dev(bio, s->origin->bdev);
1993 track_chunk(s, bio, chunk);
2002 remap_exception(s, e, bio, chunk);
2003 if (unlikely(bio_op(bio) == REQ_OP_DISCARD) &&
2004 io_overlaps_chunk(s, bio)) {
2007 zero_exception(s, e, bio, chunk);
2014 if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) {
2019 bio_endio(bio);
2029 if (bio_data_dir(bio) == WRITE) {
2039 remap_exception(s, e, bio, chunk);
2064 remap_exception(s, &pe->e, bio, chunk);
2068 if (!pe->started && io_overlaps_chunk(s, bio)) {
2074 start_full_bio(pe, bio);
2078 bio_list_add(&pe->snapshot_bios, bio);
2091 bio_set_dev(bio, s->origin->bdev);
2092 track_chunk(s, bio, chunk);
2114 static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
2121 init_tracked_chunk(bio);
2123 if (bio->bi_opf & REQ_PREFLUSH) {
2124 if (!dm_bio_get_target_bio_nr(bio))
2125 bio_set_dev(bio, s->origin->bdev);
2127 bio_set_dev(bio, s->cow->bdev);
2131 if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) {
2133 bio_endio(bio);
2137 chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
2149 if (bio_data_dir(bio) == WRITE &&
2153 bio_set_dev(bio, s->origin->bdev);
2154 bio_list_add(&s->bios_queued_during_merge, bio);
2159 remap_exception(s, e, bio, chunk);
2161 if (bio_data_dir(bio) == WRITE)
2162 track_chunk(s, bio, chunk);
2167 bio_set_dev(bio, s->origin->bdev);
2169 if (bio_data_dir(bio) == WRITE) {
2171 return do_origin(s->origin, bio, false);
2180 static int snapshot_end_io(struct dm_target *ti, struct bio *bio,
2185 if (is_bio_tracked(bio))
2186 stop_tracking_chunk(s, bio);
2424 * supplied bio was ignored. The caller may submit it immediately.
2429 * and any supplied bio is added to a list to be submitted once all
2433 struct bio *bio)
2512 * If an origin bio was supplied, queue it to wait for the
2516 if (bio) {
2517 bio_list_add(&pe->origin_bios, bio);
2518 bio = NULL;
2542 * Submit the exception against which the bio is queued last,
2554 static int do_origin(struct dm_dev *origin, struct bio *bio, bool limit)
2571 r = __origin_write(&o->snapshots, bio->bi_iter.bi_sector, bio);
2665 static int origin_map(struct dm_target *ti, struct bio *bio)
2670 bio_set_dev(bio, o->dev->bdev);
2672 if (unlikely(bio->bi_opf & REQ_PREFLUSH))
2675 if (bio_data_dir(bio) != WRITE)
2679 ((unsigned int)bio->bi_iter.bi_sector & (o->split_boundary - 1));
2681 if (bio_sectors(bio) > available_sectors)
2682 dm_accept_partial_bio(bio, available_sectors);
2685 return do_origin(o->dev, bio, true);