Lines Matching defs:mp_bh
51 static void multipath_reschedule_retry (struct multipath_bh *mp_bh)
54 struct mddev *mddev = mp_bh->mddev;
58 list_add(&mp_bh->retry_list, &conf->retry_list);
68 static void multipath_end_bh_io(struct multipath_bh *mp_bh, blk_status_t status)
70 struct bio *bio = mp_bh->master_bio;
71 struct mpconf *conf = mp_bh->mddev->private;
75 mempool_free(mp_bh, &conf->pool);
80 struct multipath_bh *mp_bh = bio->bi_private;
81 struct mpconf *conf = mp_bh->mddev->private;
82 struct md_rdev *rdev = conf->multipaths[mp_bh->path].rdev;
85 multipath_end_bh_io(mp_bh, 0);
91 md_error (mp_bh->mddev, rdev);
95 multipath_reschedule_retry(mp_bh);
97 multipath_end_bh_io(mp_bh, bio->bi_status);
104 struct multipath_bh * mp_bh;
111 mp_bh = mempool_alloc(&conf->pool, GFP_NOIO);
113 mp_bh->master_bio = bio;
114 mp_bh->mddev = mddev;
116 mp_bh->path = multipath_map(conf);
117 if (mp_bh->path < 0) {
119 mempool_free(mp_bh, &conf->pool);
122 multipath = conf->multipaths + mp_bh->path;
124 bio_init(&mp_bh->bio, NULL, 0);
125 __bio_clone_fast(&mp_bh->bio, bio);
127 mp_bh->bio.bi_iter.bi_sector += multipath->rdev->data_offset;
128 bio_set_dev(&mp_bh->bio, multipath->rdev->bdev);
129 mp_bh->bio.bi_opf |= REQ_FAILFAST_TRANSPORT;
130 mp_bh->bio.bi_end_io = multipath_end_request;
131 mp_bh->bio.bi_private = mp_bh;
132 mddev_check_writesame(mddev, &mp_bh->bio);
133 mddev_check_write_zeroes(mddev, &mp_bh->bio);
134 submit_bio_noacct(&mp_bh->bio);
294 struct multipath_bh *mp_bh;
306 mp_bh = list_entry(head->prev, struct multipath_bh, retry_list);
310 bio = &mp_bh->bio;
311 bio->bi_iter.bi_sector = mp_bh->master_bio->bi_iter.bi_sector;
313 if ((mp_bh->path = multipath_map (conf))<0) {
317 multipath_end_bh_io(mp_bh, BLK_STS_IOERR);
322 *bio = *(mp_bh->master_bio);
324 conf->multipaths[mp_bh->path].rdev->data_offset;
325 bio_set_dev(bio, conf->multipaths[mp_bh->path].rdev->bdev);
328 bio->bi_private = mp_bh;