Lines Matching refs:bio
40 static void bio_csum(struct bio *bio, struct bkey *k)
46 bio_for_each_segment(bv, bio, iter) {
111 struct bio *bio = op->bio;
114 bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector);
116 while (bio_sectors(bio)) {
117 unsigned int sectors = min(bio_sectors(bio),
123 bio->bi_iter.bi_sector += sectors;
124 bio->bi_iter.bi_size -= sectors << 9;
128 bio->bi_iter.bi_sector,
134 bio_put(bio);
169 static void bch_data_insert_endio(struct bio *bio)
171 struct closure *cl = bio->bi_private;
174 if (bio->bi_status) {
177 op->status = bio->bi_status;
184 bch_bbio_endio(op->c, bio, bio->bi_status, "writing data to cache");
190 struct bio *bio = op->bio, *n;
195 if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0)
202 bio->bi_opf &= ~(REQ_PREFLUSH|REQ_FUA);
220 SET_KEY_OFFSET(k, bio->bi_iter.bi_sector);
222 if (!bch_alloc_sectors(op->c, k, bio_sectors(bio),
227 n = bio_next_split(bio, KEY_SIZE(k), GFP_NOIO, split);
249 } while (n != bio);
279 bio_put(bio);
302 * It inserts the data in op->bio; bi_sector is used for the key offset,
306 * region of the cache represented by op->bio and op->inode.
312 trace_bcache_write(op->c, op->inode, op->bio,
316 bio_get(op->bio);
363 static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
373 (bio_op(bio) == REQ_OP_DISCARD))
378 op_is_write(bio_op(bio))))
382 * If the bio is for read-ahead or background IO, bypass it or
391 if ((bio->bi_opf & (REQ_RAHEAD|REQ_BACKGROUND))) {
392 if (!(bio->bi_opf & (REQ_META|REQ_PRIO)) &&
397 if (bio->bi_iter.bi_sector & (c->cache->sb.block_size - 1) ||
398 bio_sectors(bio) & (c->cache->sb.block_size - 1)) {
416 hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash)
417 if (i->last == bio->bi_iter.bi_sector &&
426 if (i->sequential + bio->bi_iter.bi_size > i->sequential)
427 i->sequential += bio->bi_iter.bi_size;
429 i->last = bio_end_sector(bio);
444 trace_bcache_bypass_sequential(bio);
449 trace_bcache_bypass_congested(bio);
454 bch_rescale_priorities(c, bio_sectors(bio));
457 bch_mark_sectors_bypassed(c, dc, bio_sectors(bio));
467 struct bbio bio;
468 struct bio *orig_bio;
469 struct bio *cache_miss;
485 static void bch_cache_read_endio(struct bio *bio)
487 struct bbio *b = container_of(bio, struct bbio, bio);
488 struct closure *cl = bio->bi_private;
492 * If the bucket was reused while our bio was in flight, we might have
498 if (bio->bi_status)
499 s->iop.status = bio->bi_status;
506 bch_bbio_endio(s->iop.c, bio, bio->bi_status, "reading from cache");
511 * the middle of the bio
516 struct bio *n, *bio = &s->bio.bio;
520 if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0)
524 KEY_START(k) > bio->bi_iter.bi_sector) {
525 unsigned int bio_sectors = bio_sectors(bio);
528 KEY_START(k) - bio->bi_iter.bi_sector)
530 int ret = s->d->cache_miss(b, s, bio, sectors);
550 n = bio_next_split(bio, min_t(uint64_t, INT_MAX,
551 KEY_OFFSET(k) - bio->bi_iter.bi_sector),
554 bio_key = &container_of(n, struct bbio, bio)->key;
564 * The bucket we're reading from might be reused while our bio
575 return n == bio ? MAP_DONE : MAP_CONTINUE;
581 struct bio *bio = &s->bio.bio;
588 &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0),
602 * before we submit s->bio.bio
621 static void request_endio(struct bio *bio)
623 struct closure *cl = bio->bi_private;
625 if (bio->bi_status) {
628 s->iop.status = bio->bi_status;
633 bio_put(bio);
637 static void backing_request_endio(struct bio *bio)
639 struct closure *cl = bio->bi_private;
641 if (bio->bi_status) {
646 * If a bio has REQ_PREFLUSH for writeback mode, it is
653 bio->bi_opf & REQ_PREFLUSH)) {
655 dc->backing_dev_name, bio->bi_status);
658 s->iop.status = bio->bi_status;
662 bch_count_backing_io_errors(dc, bio);
665 bio_put(bio);
683 struct bio *orig_bio,
686 struct bio *bio = &s->bio.bio;
688 bio_init(bio, NULL, 0);
689 __bio_clone_fast(bio, orig_bio);
696 bio->bi_end_io = end_io_fn;
697 bio->bi_private = &s->cl;
699 bio_cnt_set(bio, 3);
708 if (s->iop.bio)
709 bio_put(s->iop.bio);
716 static inline struct search *search_alloc(struct bio *bio,
724 do_bio_hook(s, bio, request_endio);
727 s->orig_bio = bio;
732 s->write = op_is_write(bio_op(bio));
735 s->start_time = part_start_io_acct(d->disk, &s->part, bio);
737 s->iop.bio = NULL;
743 s->iop.flush_journal = op_is_flush(bio->bi_opf);
769 if (s->iop.bio)
770 bio_free_pages(s->iop.bio);
778 struct bio *bio = &s->bio.bio;
797 closure_bio_submit(s->iop.c, bio, cl);
811 if (s->iop.bio)
812 bio_free_pages(s->iop.bio);
828 * to the buffers the original bio pointed to:
831 if (s->iop.bio) {
832 bio_reset(s->iop.bio);
833 s->iop.bio->bi_iter.bi_sector =
835 bio_copy_dev(s->iop.bio, s->cache_miss);
836 s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
837 bch_bio_map(s->iop.bio, NULL);
839 bio_copy_data(s->cache_miss, s->iop.bio);
851 if (s->iop.bio &&
871 else if (s->iop.bio || verify(dc))
878 struct bio *bio, unsigned int sectors)
883 struct bio *miss, *cache_bio;
888 miss = bio_next_split(bio, sectors, GFP_NOIO, &s->d->bio_split);
889 ret = miss == bio ? MAP_DONE : MAP_CONTINUE;
893 if (!(bio->bi_opf & REQ_RAHEAD) &&
894 !(bio->bi_opf & (REQ_META|REQ_PRIO)) &&
897 get_capacity(bio->bi_disk) - bio_end_sector(bio));
899 s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada);
902 bio->bi_iter.bi_sector + s->insert_bio_sectors,
911 miss = bio_next_split(bio, sectors, GFP_NOIO, &s->d->bio_split);
914 ret = miss == bio ? MAP_DONE : -EINTR;
937 s->iop.bio = cache_bio;
975 struct bio *bio = &s->bio.bio;
976 struct bkey start = KEY(dc->disk.id, bio->bi_iter.bi_sector, 0);
977 struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0);
998 if (bio_op(bio) == REQ_OP_DISCARD)
1009 s->iop.bio = s->orig_bio;
1010 bio_get(s->iop.bio);
1012 if (bio_op(bio) == REQ_OP_DISCARD &&
1017 bio->bi_end_io = backing_request_endio;
1018 closure_bio_submit(s->iop.c, bio, cl);
1022 s->iop.bio = bio;
1024 if (bio->bi_opf & REQ_PREFLUSH) {
1029 struct bio *flush;
1037 bio_copy_dev(flush, bio);
1045 s->iop.bio = bio_clone_fast(bio, GFP_NOIO, &dc->disk.bio_split);
1047 bio->bi_end_io = backing_request_endio;
1048 closure_bio_submit(s->iop.c, bio, cl);
1059 struct bio *bio = &s->bio.bio;
1065 bio->bi_end_io = backing_request_endio;
1066 closure_bio_submit(s->iop.c, bio, cl);
1079 static void detached_dev_end_io(struct bio *bio)
1083 ddip = bio->bi_private;
1084 bio->bi_end_io = ddip->bi_end_io;
1085 bio->bi_private = ddip->bi_private;
1088 part_end_io_acct(ddip->part, bio, ddip->start_time);
1090 if (bio->bi_status) {
1094 bch_count_backing_io_errors(dc, bio);
1098 bio->bi_end_io(bio);
1101 static void detached_dev_do_request(struct bcache_device *d, struct bio *bio)
1113 bio->bi_status = BLK_STS_RESOURCE;
1114 bio->bi_end_io(bio);
1120 ddip->start_time = part_start_io_acct(d->disk, &ddip->part, bio);
1121 ddip->bi_end_io = bio->bi_end_io;
1122 ddip->bi_private = bio->bi_private;
1123 bio->bi_end_io = detached_dev_end_io;
1124 bio->bi_private = ddip;
1126 if ((bio_op(bio) == REQ_OP_DISCARD) &&
1128 bio->bi_end_io(bio);
1130 submit_bio_noacct(bio);
1173 blk_qc_t cached_dev_submit_bio(struct bio *bio)
1176 struct bcache_device *d = bio->bi_disk->private_data;
1178 int rw = bio_data_dir(bio);
1182 bio->bi_status = BLK_STS_IOERR;
1183 bio_endio(bio);
1202 bio_set_dev(bio, dc->bdev);
1203 bio->bi_iter.bi_sector += dc->sb.data_offset;
1206 s = search_alloc(bio, d);
1207 trace_bcache_request_start(s->d, bio);
1209 if (!bio->bi_iter.bi_size) {
1218 s->iop.bypass = check_should_bypass(dc, bio);
1227 detached_dev_do_request(d, bio);
1252 struct bio *bio, unsigned int sectors)
1254 unsigned int bytes = min(sectors, bio_sectors(bio)) << 9;
1256 swap(bio->bi_iter.bi_size, bytes);
1257 zero_fill_bio(bio);
1258 swap(bio->bi_iter.bi_size, bytes);
1260 bio_advance(bio, bytes);
1262 if (!bio->bi_iter.bi_size)
1278 blk_qc_t flash_dev_submit_bio(struct bio *bio)
1282 struct bcache_device *d = bio->bi_disk->private_data;
1285 bio->bi_status = BLK_STS_IOERR;
1286 bio_endio(bio);
1290 s = search_alloc(bio, d);
1292 bio = &s->bio.bio;
1294 trace_bcache_request_start(s->d, bio);
1296 if (!bio->bi_iter.bi_size) {
1304 } else if (bio_data_dir(bio)) {
1306 &KEY(d->id, bio->bi_iter.bi_sector, 0),
1307 &KEY(d->id, bio_end_sector(bio), 0));
1309 s->iop.bypass = (bio_op(bio) == REQ_OP_DISCARD) != 0;
1311 s->iop.bio = bio;