Lines Matching defs:bio

148 	struct bio flush_bio;
208 * in a bio list
233 struct bio *full_bio;
249 static void init_tracked_chunk(struct bio *bio)
251 struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
255 static bool is_bio_tracked(struct bio *bio)
257 struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
261 static void track_chunk(struct dm_snapshot *s, struct bio *bio, chunk_t chunk)
263 struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
273 static void stop_tracking_chunk(struct dm_snapshot *s, struct bio *bio)
275 struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
926 static struct bio *__release_queued_bios_after_merge(struct dm_snapshot *s)
986 static void flush_bios(struct bio *bio);
990 struct bio *b = NULL;
1127 static void error_bios(struct bio *bio);
1131 struct bio *flush_bio = &s->flush_bio;
1143 struct bio *b = NULL;
1589 static void flush_bios(struct bio *bio)
1591 struct bio *n;
1593 while (bio) {
1594 n = bio->bi_next;
1595 bio->bi_next = NULL;
1596 submit_bio_noacct(bio);
1597 bio = n;
1601 static int do_origin(struct dm_dev *origin, struct bio *bio, bool limit);
1606 static void retry_origin_bios(struct dm_snapshot *s, struct bio *bio)
1608 struct bio *n;
1611 while (bio) {
1612 n = bio->bi_next;
1613 bio->bi_next = NULL;
1614 r = do_origin(s->origin, bio, false);
1616 submit_bio_noacct(bio);
1617 bio = n;
1624 static void error_bios(struct bio *bio)
1626 struct bio *n;
1628 while (bio) {
1629 n = bio->bi_next;
1630 bio->bi_next = NULL;
1631 bio_io_error(bio);
1632 bio = n;
1666 struct bio *origin_bios = NULL;
1667 struct bio *snapshot_bios = NULL;
1668 struct bio *full_bio = NULL;
1834 static void full_bio_end_io(struct bio *bio)
1836 void *callback_data = bio->bi_private;
1838 dm_kcopyd_do_callback(callback_data, 0, bio->bi_status ? 1 : 0);
1842 struct bio *bio)
1847 pe->full_bio = bio;
1848 pe->full_bio_end_io = bio->bi_end_io;
1854 bio->bi_end_io = full_bio_end_io;
1855 bio->bi_private = callback_data;
1857 submit_bio_noacct(bio);
1926 struct bio *bio, chunk_t chunk)
1928 bio_set_dev(bio, s->cow->bdev);
1929 bio->bi_iter.bi_sector =
1932 (bio->bi_iter.bi_sector & s->store->chunk_mask);
1937 struct bio *bio = context;
1938 struct dm_snapshot *s = bio->bi_private;
1941 bio->bi_status = write_err ? BLK_STS_IOERR : 0;
1942 bio_endio(bio);
1946 struct bio *bio, chunk_t chunk)
1951 dest.sector = bio->bi_iter.bi_sector;
1955 WARN_ON_ONCE(bio->bi_private);
1956 bio->bi_private = s;
1957 dm_kcopyd_zero(s->kcopyd_client, 1, &dest, 0, zero_callback, bio);
1960 static bool io_overlaps_chunk(struct dm_snapshot *s, struct bio *bio)
1962 return bio->bi_iter.bi_size ==
1966 static int snapshot_map(struct dm_target *ti, struct bio *bio)
1975 init_tracked_chunk(bio);
1977 if (bio->bi_opf & REQ_PREFLUSH) {
1978 bio_set_dev(bio, s->cow->bdev);
1982 chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
1990 if (bio_data_dir(bio) == WRITE) {
1999 bio_data_dir(bio) == WRITE)) {
2004 if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) {
2005 if (s->discard_passdown_origin && dm_bio_get_target_bio_nr(bio)) {
2012 bio_set_dev(bio, s->origin->bdev);
2013 track_chunk(s, bio, chunk);
2022 remap_exception(s, e, bio, chunk);
2023 if (unlikely(bio_op(bio) == REQ_OP_DISCARD) &&
2024 io_overlaps_chunk(s, bio)) {
2027 zero_exception(s, e, bio, chunk);
2034 if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) {
2039 bio_endio(bio);
2049 if (bio_data_dir(bio) == WRITE) {
2059 remap_exception(s, e, bio, chunk);
2084 remap_exception(s, &pe->e, bio, chunk);
2088 if (!pe->started && io_overlaps_chunk(s, bio)) {
2094 start_full_bio(pe, bio);
2098 bio_list_add(&pe->snapshot_bios, bio);
2111 bio_set_dev(bio, s->origin->bdev);
2112 track_chunk(s, bio, chunk);
2134 static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
2141 init_tracked_chunk(bio);
2143 if (bio->bi_opf & REQ_PREFLUSH) {
2144 if (!dm_bio_get_target_bio_nr(bio))
2145 bio_set_dev(bio, s->origin->bdev);
2147 bio_set_dev(bio, s->cow->bdev);
2151 if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) {
2153 bio_endio(bio);
2157 chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
2169 if (bio_data_dir(bio) == WRITE &&
2173 bio_set_dev(bio, s->origin->bdev);
2174 bio_list_add(&s->bios_queued_during_merge, bio);
2179 remap_exception(s, e, bio, chunk);
2181 if (bio_data_dir(bio) == WRITE)
2182 track_chunk(s, bio, chunk);
2187 bio_set_dev(bio, s->origin->bdev);
2189 if (bio_data_dir(bio) == WRITE) {
2191 return do_origin(s->origin, bio, false);
2200 static int snapshot_end_io(struct dm_target *ti, struct bio *bio,
2205 if (is_bio_tracked(bio))
2206 stop_tracking_chunk(s, bio);
2436 * supplied bio was ignored. The caller may submit it immediately.
2441 * and any supplied bio is added to a list to be submitted once all
2445 struct bio *bio)
2524 * If an origin bio was supplied, queue it to wait for the
2528 if (bio) {
2529 bio_list_add(&pe->origin_bios, bio);
2530 bio = NULL;
2554 * Submit the exception against which the bio is queued last,
2566 static int do_origin(struct dm_dev *origin, struct bio *bio, bool limit)
2582 r = __origin_write(&o->snapshots, bio->bi_iter.bi_sector, bio);
2676 static int origin_map(struct dm_target *ti, struct bio *bio)
2681 bio_set_dev(bio, o->dev->bdev);
2683 if (unlikely(bio->bi_opf & REQ_PREFLUSH))
2686 if (bio_data_dir(bio) != WRITE)
2690 ((unsigned)bio->bi_iter.bi_sector & (o->split_boundary - 1));
2692 if (bio_sectors(bio) > available_sectors)
2693 dm_accept_partial_bio(bio, available_sectors);
2696 return do_origin(o->dev, bio, true);