Lines Matching defs:bio

353  * call has finished, the bio has been linked into some internal structure
356 static bool is_suspended(struct mddev *mddev, struct bio *bio)
360 if (bio_data_dir(bio) != WRITE)
364 if (bio->bi_iter.bi_sector >= mddev->suspend_hi)
366 if (bio_end_sector(bio) < mddev->suspend_lo)
371 void md_handle_request(struct mddev *mddev, struct bio *bio)
374 if (is_suspended(mddev, bio)) {
376 /* Bail out if REQ_NOWAIT is set for the bio */
377 if (bio->bi_opf & REQ_NOWAIT) {
378 bio_wouldblock_error(bio);
384 if (!is_suspended(mddev, bio))
393 if (!mddev->pers->make_request(mddev, bio)) {
402 static void md_submit_bio(struct bio *bio)
404 const int rw = bio_data_dir(bio);
405 struct mddev *mddev = bio->bi_bdev->bd_disk->private_data;
408 bio_io_error(bio);
413 bio_io_error(bio);
417 bio = bio_split_to_limits(bio);
418 if (!bio)
422 if (bio_sectors(bio) != 0)
423 bio->bi_status = BLK_STS_IOERR;
424 bio_endio(bio);
428 /* bio could be mergeable after passing to underlayer */
429 bio->bi_opf &= ~REQ_NOMERGE;
431 md_handle_request(mddev, bio);
487 static void md_end_flush(struct bio *bio)
489 struct md_rdev *rdev = bio->bi_private;
492 bio_put(bio);
519 struct bio *bi;
544 struct bio *bio = mddev->flush_bio;
558 if (bio->bi_iter.bi_size == 0) {
560 bio_endio(bio);
562 bio->bi_opf &= ~REQ_PREFLUSH;
563 md_handle_request(mddev, bio);
569 * a bio with REQ_PREFLUSH. Returns true if the bio is finished or is
571 * complete but still needs the I/O portion of the bio to be processed.
573 bool md_flush_request(struct mddev *mddev, struct bio *bio)
599 mddev->flush_bio = bio;
600 bio = NULL;
604 if (!bio) {
608 /* flush was performed for some other bio while we waited. */
609 if (bio->bi_iter.bi_size == 0)
611 bio_endio(bio);
613 bio->bi_opf &= ~REQ_PREFLUSH;
904 static void super_written(struct bio *bio)
906 struct md_rdev *rdev = bio->bi_private;
909 if (bio->bi_status) {
911 blk_status_to_errno(bio->bi_status));
914 && (bio->bi_opf & MD_FAILFAST)) {
921 bio_put(bio);
938 struct bio *bio;
946 bio = bio_alloc_bioset(rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev,
954 bio->bi_iter.bi_sector = sector;
955 __bio_add_page(bio, page, size, 0);
956 bio->bi_private = rdev;
957 bio->bi_end_io = super_written;
962 bio->bi_opf |= MD_FAILFAST;
965 submit_bio(bio);
980 struct bio bio;
984 bio_init(&bio, rdev->meta_bdev, &bvec, 1, opf);
986 bio_init(&bio, rdev->bdev, &bvec, 1, opf);
989 bio.bi_iter.bi_sector = sector + rdev->sb_start;
993 bio.bi_iter.bi_sector = sector + rdev->new_data_offset;
995 bio.bi_iter.bi_sector = sector + rdev->data_offset;
996 __bio_add_page(&bio, page, size, 0);
998 submit_bio_wait(&bio);
1000 return !bio.bi_status;
8620 bool md_write_start(struct mddev *mddev, struct bio *bi)
8677 void md_write_inc(struct mddev *mddev, struct bio *bi)
8705 struct bio *bio, sector_t start, sector_t size)
8707 struct bio *discard_bio = NULL;
8713 bio_chain(discard_bio, bio);
8714 bio_clone_blkg_association(discard_bio, bio);
8718 bio->bi_iter.bi_sector);
8723 static void md_end_clone_io(struct bio *bio)
8725 struct md_io_clone *md_io_clone = bio->bi_private;
8726 struct bio *orig_bio = md_io_clone->orig_bio;
8729 if (bio->bi_status && !orig_bio->bi_status)
8730 orig_bio->bi_status = bio->bi_status;
8735 bio_put(bio);
8740 static void md_clone_bio(struct mddev *mddev, struct bio **bio)
8742 struct block_device *bdev = (*bio)->bi_bdev;
8744 struct bio *clone =
8745 bio_alloc_clone(bdev, *bio, GFP_NOIO, &mddev->io_clone_set);
8748 md_io_clone->orig_bio = *bio;
8751 md_io_clone->start_time = bio_start_io_acct(*bio);
8755 *bio = clone;
8758 void md_account_bio(struct mddev *mddev, struct bio **bio)
8761 md_clone_bio(mddev, bio);