Lines Matching defs:bio

74 static void end_reshape_write(struct bio *bio);
98 * for resync bio, r10bio pointer can be retrieved from the per-bio
101 static inline struct r10bio *get_resync_r10bio(struct bio *bio)
103 return get_resync_pages(bio)->raid_bio;
135 struct bio *bio;
163 bio = bio_kmalloc(RESYNC_PAGES, gfp_flags);
164 if (!bio)
166 bio_init(bio, NULL, bio->bi_inline_vecs, RESYNC_PAGES, 0);
167 r10_bio->devs[j].bio = bio;
170 bio = bio_kmalloc(RESYNC_PAGES, gfp_flags);
171 if (!bio)
173 bio_init(bio, NULL, bio->bi_inline_vecs, RESYNC_PAGES, 0);
174 r10_bio->devs[j].repl_bio = bio;
181 struct bio *rbio = r10_bio->devs[j].repl_bio;
188 bio = r10_bio->devs[j].bio;
200 bio->bi_private = rp;
216 if (r10_bio->devs[j].bio)
217 bio_uninit(r10_bio->devs[j].bio);
218 kfree(r10_bio->devs[j].bio);
237 struct bio *bio = r10bio->devs[j].bio;
239 if (bio) {
240 rp = get_resync_pages(bio);
242 bio_uninit(bio);
243 kfree(bio);
246 bio = r10bio->devs[j].repl_bio;
247 if (bio) {
248 bio_uninit(bio);
249 kfree(bio);
253 /* resync pages array stored in the 1st bio's .bi_private */
264 struct bio **bio = & r10_bio->devs[i].bio;
265 if (!BIO_SPECIAL(*bio))
266 bio_put(*bio);
267 *bio = NULL;
268 bio = &r10_bio->devs[i].repl_bio;
269 if (r10_bio->read_slot < 0 && !BIO_SPECIAL(*bio))
270 bio_put(*bio);
271 *bio = NULL;
322 struct bio *bio = r10_bio->master_bio;
326 bio->bi_status = BLK_STS_IOERR;
328 bio_endio(bio);
350 * Find the disk number which triggered given bio
353 struct bio *bio, int *slotp, int *replp)
359 if (r10_bio->devs[slot].bio == bio)
361 if (r10_bio->devs[slot].repl_bio == bio) {
376 static void raid10_end_read_request(struct bio *bio)
378 int uptodate = !bio->bi_status;
379 struct r10bio *r10_bio = bio->bi_private;
393 * Set R10BIO_Uptodate in our master bio, so that
399 * wait for the 'master' bio.
453 static void raid10_end_write_request(struct bio *bio)
455 struct r10bio *r10_bio = bio->bi_private;
461 struct bio *to_put = NULL;
464 discard_error = bio->bi_status && bio_op(bio) == REQ_OP_DISCARD;
466 dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
478 if (bio->bi_status && !discard_error) {
492 (bio->bi_opf & MD_FAILFAST)) {
505 r10_bio->devs[slot].bio = NULL;
506 to_put = bio;
512 * Set R10BIO_Uptodate in our master bio, so that
518 * wait for the 'master' bio.
540 bio_put(bio);
544 r10_bio->devs[slot].bio = IO_MADE_GOOD;
775 if (r10_bio->devs[slot].bio == IO_BLOCKED)
894 struct bio *bio;
896 bio = bio_list_get(&conf->pending_bio_list);
914 while (bio) { /* submit pending writes */
915 struct bio *next = bio->bi_next;
917 raid1_submit_write(bio);
918 bio = next;
989 * rising completely), and the pre-process bio queue isn't empty, then
1113 struct bio *bio;
1126 bio = bio_list_get(&plug->pending);
1130 while (bio) { /* submit pending writes */
1131 struct bio *next = bio->bi_next;
1133 raid1_submit_write(bio);
1134 bio = next;
1147 struct bio *bio, sector_t sectors)
1149 /* Bail out if REQ_NOWAIT is set for the bio */
1150 if (!wait_barrier(conf, bio->bi_opf & REQ_NOWAIT)) {
1151 bio_wouldblock_error(bio);
1155 bio->bi_iter.bi_sector < conf->reshape_progress &&
1156 bio->bi_iter.bi_sector + sectors > conf->reshape_progress) {
1158 if (bio->bi_opf & REQ_NOWAIT) {
1159 bio_wouldblock_error(bio);
1164 conf->reshape_progress <= bio->bi_iter.bi_sector ||
1165 conf->reshape_progress >= bio->bi_iter.bi_sector +
1172 static void raid10_read_request(struct mddev *mddev, struct bio *bio,
1176 struct bio *read_bio;
1177 const enum req_op op = bio_op(bio);
1178 const blk_opf_t do_sync = bio->bi_opf & REQ_SYNC;
1214 if (!regular_request_wait(mddev, conf, bio, r10_bio->sectors))
1231 if (max_sectors < bio_sectors(bio)) {
1232 struct bio *split = bio_split(bio, max_sectors,
1234 bio_chain(split, bio);
1236 submit_bio_noacct(bio);
1238 bio = split;
1239 r10_bio->master_bio = bio;
1245 md_account_bio(mddev, &bio);
1246 r10_bio->master_bio = bio;
1248 read_bio = bio_alloc_clone(rdev->bdev, bio, gfp, &mddev->bio_set);
1250 r10_bio->devs[slot].bio = read_bio;
1270 struct bio *bio, bool replacement,
1273 const enum req_op op = bio_op(bio);
1274 const blk_opf_t do_sync = bio->bi_opf & REQ_SYNC;
1275 const blk_opf_t do_fua = bio->bi_opf & REQ_FUA;
1280 struct bio *mbio;
1292 mbio = bio_alloc_clone(rdev->bdev, bio, GFP_NOIO, &mddev->bio_set);
1296 r10_bio->devs[n_copy].bio = mbio;
1407 static void raid10_write_request(struct mddev *mddev, struct bio *bio,
1417 bio->bi_iter.bi_sector,
1418 bio_end_sector(bio)))) {
1420 /* Bail out if REQ_NOWAIT is set for the bio */
1421 if (bio->bi_opf & REQ_NOWAIT) {
1422 bio_wouldblock_error(bio);
1429 bio->bi_iter.bi_sector, bio_end_sector(bio)))
1437 if (!regular_request_wait(mddev, conf, bio, sectors))
1441 ? (bio->bi_iter.bi_sector < conf->reshape_safe &&
1442 bio->bi_iter.bi_sector + sectors > conf->reshape_progress)
1443 : (bio->bi_iter.bi_sector + sectors > conf->reshape_safe &&
1444 bio->bi_iter.bi_sector < conf->reshape_progress))) {
1450 if (bio->bi_opf & REQ_NOWAIT) {
1452 bio_wouldblock_error(bio);
1464 * bios[x] to bio
1490 r10_bio->devs[i].bio = NULL;
1530 r10_bio->devs[i].bio = bio;
1534 r10_bio->devs[i].repl_bio = bio;
1543 if (r10_bio->sectors < bio_sectors(bio)) {
1544 struct bio *split = bio_split(bio, r10_bio->sectors,
1546 bio_chain(split, bio);
1548 submit_bio_noacct(bio);
1550 bio = split;
1551 r10_bio->master_bio = bio;
1554 md_account_bio(mddev, &bio);
1555 r10_bio->master_bio = bio;
1560 if (r10_bio->devs[i].bio)
1561 raid10_write_one_disk(mddev, r10_bio, bio, false, i);
1563 raid10_write_one_disk(mddev, r10_bio, bio, true, i);
1568 static void __make_request(struct mddev *mddev, struct bio *bio, int sectors)
1575 r10_bio->master_bio = bio;
1579 r10_bio->sector = bio->bi_iter.bi_sector;
1585 if (bio_data_dir(bio) == READ)
1586 raid10_read_request(mddev, bio, r10_bio, true);
1588 raid10_write_request(mddev, bio, r10_bio);
1613 static void raid10_end_discard_request(struct bio *bio)
1615 struct r10bio *r10_bio = bio->bi_private;
1622 * We don't care the return value of discard bio
1627 dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
1645 * There are some limitations to handle discard bio
1647 * 2st, if the discard bio spans reshape progress, we use the old way to
1648 * handle discard bio
1650 static int raid10_handle_discard(struct mddev *mddev, struct bio *bio)
1657 struct bio *split;
1674 if (WARN_ON_ONCE(bio->bi_opf & REQ_NOWAIT)) {
1675 bio_wouldblock_error(bio);
1695 bio_start = bio->bi_iter.bi_sector;
1696 bio_end = bio_end_sector(bio);
1699 * Maybe one discard bio is smaller than strip size or across one
1702 * size, there is hole when we submit discard bio to member disk.
1703 * For simplicity, we only handle discard bio which discard region
1706 if (bio_sectors(bio) < stripe_size*2)
1710 * Keep bio aligned with strip size.
1715 split = bio_split(bio, split_size, GFP_NOIO, &conf->bio_split);
1716 bio_chain(split, bio);
1724 split_size = bio_sectors(bio) - remainder;
1725 split = bio_split(bio, split_size, GFP_NOIO, &conf->bio_split);
1726 bio_chain(split, bio);
1729 submit_bio_noacct(bio);
1730 bio = split;
1734 bio_start = bio->bi_iter.bi_sector;
1735 bio_end = bio_end_sector(bio);
1771 * to record the discard bio. Other r10bio->master_bio record the first
1773 * The discard bio returns only first r10bio finishes
1776 r10_bio->master_bio = bio;
1781 r10_bio->master_bio = (struct bio *)first_r10bio;
1786 * bios[x] to bio
1793 r10_bio->devs[disk].bio = NULL;
1804 r10_bio->devs[disk].bio = bio;
1808 r10_bio->devs[disk].repl_bio = bio;
1817 struct bio *mbio, *rbio = NULL;
1846 * It only handles discard bio which size is >= stripe size, so
1851 if (r10_bio->devs[disk].bio) {
1853 mbio = bio_alloc_clone(bio->bi_bdev, bio, GFP_NOIO,
1857 r10_bio->devs[disk].bio = mbio;
1867 rbio = bio_alloc_clone(bio->bi_bdev, bio, GFP_NOIO,
1900 static bool raid10_make_request(struct mddev *mddev, struct bio *bio)
1905 int sectors = bio_sectors(bio);
1907 if (unlikely(bio->bi_opf & REQ_PREFLUSH)
1908 && md_flush_request(mddev, bio))
1911 if (!md_write_start(mddev, bio))
1914 if (unlikely(bio_op(bio) == REQ_OP_DISCARD))
1915 if (!raid10_handle_discard(mddev, bio))
1922 if (unlikely((bio->bi_iter.bi_sector & chunk_mask) +
1928 (bio->bi_iter.bi_sector &
1930 __make_request(mddev, bio, sectors);
2278 static void __end_sync_read(struct r10bio *r10_bio, struct bio *bio, int d)
2282 if (!bio->bi_status)
2304 static void end_sync_read(struct bio *bio)
2306 struct r10bio *r10_bio = get_resync_r10bio(bio);
2308 int d = find_bio_disk(conf, r10_bio, bio, NULL, NULL);
2310 __end_sync_read(r10_bio, bio, d);
2313 static void end_reshape_read(struct bio *bio)
2315 /* reshape read bio isn't allocated from r10buf_pool */
2316 struct r10bio *r10_bio = bio->bi_private;
2318 __end_sync_read(r10_bio, bio, r10_bio->read_slot);
2348 static void end_sync_write(struct bio *bio)
2350 struct r10bio *r10_bio = get_resync_r10bio(bio);
2360 d = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
2366 if (bio->bi_status) {
2407 struct bio *tbio, *fbio;
2415 if (!r10_bio->devs[i].bio->bi_status)
2422 fbio = r10_bio->devs[i].bio;
2434 tbio = r10_bio->devs[i].bio;
2444 if (!r10_bio->devs[i].bio->bi_status) {
2471 /* Ok, we need to write this bio, either to correct an
2507 if (r10_bio->devs[i].bio->bi_end_io != end_sync_write
2508 && r10_bio->devs[i].bio != fbio)
2545 struct bio *bio = r10_bio->devs[0].bio;
2551 struct page **pages = get_resync_pages(bio)->pages;
2621 struct bio *wbio = r10_bio->devs[1].bio;
2622 struct bio *wbio2 = r10_bio->devs[1].repl_bio;
2641 * share the pages with the first bio
2753 r10_bio->devs[slot].bio = IO_BLOCKED;
2811 r10_bio->devs[slot].bio
2905 struct bio *bio = r10_bio->master_bio;
2909 /* bio has the data to be written to slot 'i' where
2911 * We repeatedly clone the bio and trim down to one block,
2914 * It is conceivable that the bio doesn't exactly align with
2937 struct bio *wbio;
2942 wbio = bio_alloc_clone(rdev->bdev, bio, GFP_NOIO,
2944 bio_trim(wbio, sector - bio->bi_iter.bi_sector, sectors);
2967 struct bio *bio;
2979 bio = r10_bio->devs[slot].bio;
2980 bio_put(bio);
2981 r10_bio->devs[slot].bio = NULL;
2984 r10_bio->devs[slot].bio = IO_BLOCKED;
3018 if (r10_bio->devs[m].bio == NULL ||
3019 r10_bio->devs[m].bio->bi_end_io == NULL)
3021 if (!r10_bio->devs[m].bio->bi_status) {
3056 struct bio *bio = r10_bio->devs[m].bio;
3058 if (bio == IO_MADE_GOOD) {
3064 } else if (bio != NULL && bio->bi_status) {
3073 bio = r10_bio->devs[m].repl_bio;
3075 if (rdev && bio == IO_MADE_GOOD) {
3199 struct bio *bio;
3210 bio = r10bio->devs[i].bio;
3211 rp = bio->bi_private;
3212 bio_reset(bio, NULL, 0);
3213 bio->bi_private = rp;
3214 bio = r10bio->devs[i].repl_bio;
3215 if (bio) {
3216 rp = bio->bi_private;
3217 bio_reset(bio, NULL, 0);
3218 bio->bi_private = rp;
3282 * As we setup these structures, we collect all bio's together into a list
3298 struct bio *biolist = NULL, *bio;
3513 r10_bio->master_bio = (struct bio*)rb2;
3566 bio = r10_bio->devs[0].bio;
3567 bio->bi_next = biolist;
3568 biolist = bio;
3569 bio->bi_end_io = end_sync_read;
3570 bio->bi_opf = REQ_OP_READ;
3572 bio->bi_opf |= MD_FAILFAST;
3574 bio->bi_iter.bi_sector = from_addr +
3576 bio_set_dev(bio, rdev->bdev);
3591 bio = r10_bio->devs[1].bio;
3592 bio->bi_next = biolist;
3593 biolist = bio;
3594 bio->bi_end_io = end_sync_write;
3595 bio->bi_opf = REQ_OP_WRITE;
3596 bio->bi_iter.bi_sector = to_addr
3598 bio_set_dev(bio, mrdev->bdev);
3601 r10_bio->devs[1].bio->bi_end_io = NULL;
3604 bio = r10_bio->devs[1].repl_bio;
3605 if (bio)
3606 bio->bi_end_io = NULL;
3607 /* Note: if replace is not NULL, then bio
3613 bio->bi_next = biolist;
3614 biolist = bio;
3615 bio->bi_end_io = end_sync_write;
3616 bio->bi_opf = REQ_OP_WRITE;
3617 bio->bi_iter.bi_sector = to_addr +
3619 bio_set_dev(bio, mreplace->bdev);
3673 if (r10_bio->devs[0].bio->bi_opf & MD_FAILFAST) {
3687 r10_bio->devs[0].bio->bi_opf
3748 bio = r10_bio->devs[i].bio;
3749 bio->bi_status = BLK_STS_IOERR;
3771 bio->bi_next = biolist;
3772 biolist = bio;
3773 bio->bi_end_io = end_sync_read;
3774 bio->bi_opf = REQ_OP_READ;
3776 bio->bi_opf |= MD_FAILFAST;
3777 bio->bi_iter.bi_sector = sector + rdev->data_offset;
3778 bio_set_dev(bio, rdev->bdev);
3789 bio = r10_bio->devs[i].repl_bio;
3790 bio->bi_status = BLK_STS_IOERR;
3793 bio->bi_next = biolist;
3794 biolist = bio;
3795 bio->bi_end_io = end_sync_write;
3796 bio->bi_opf = REQ_OP_WRITE;
3798 bio->bi_opf |= MD_FAILFAST;
3799 bio->bi_iter.bi_sector = sector + rdev->data_offset;
3800 bio_set_dev(bio, rdev->bdev);
3808 if (r10_bio->devs[i].bio->bi_end_io)
3833 for (bio= biolist ; bio ; bio=bio->bi_next) {
3834 struct resync_pages *rp = get_resync_pages(bio);
3836 if (WARN_ON(!bio_add_page(bio, page, len, 0))) {
3837 bio->bi_status = BLK_STS_RESOURCE;
3838 bio_endio(bio);
3894 bio = biolist;
3897 bio->bi_next = NULL;
3898 r10_bio = get_resync_r10bio(bio);
3901 if (bio->bi_end_io == end_sync_read) {
3902 md_sync_acct_bio(bio, nr_sectors);
3903 bio->bi_status = 0;
3904 submit_bio_noacct(bio);
4809 * We store the read-in bio in ->master_bio and the others in
4810 * ->devs[x].bio and ->devs[x].repl_bio.
4820 struct bio *blist;
4821 struct bio *bio, *read_bio;
4982 struct bio *b;
4990 b = r10_bio->devs[s/2].bio;
5007 pages = get_resync_pages(r10_bio->devs[0].bio)->pages;
5013 for (bio = blist; bio ; bio = bio->bi_next) {
5014 if (WARN_ON(!bio_add_page(bio, page, len, 0))) {
5015 bio->bi_status = BLK_STS_RESOURCE;
5016 bio_endio(bio);
5073 struct bio *b;
5082 b = r10_bio->devs[s/2].bio;
5146 /* reshape IOs share pages from .devs[0].bio */
5147 pages = get_resync_pages(r10_bio->devs[0].bio)->pages;
5204 static void end_reshape_write(struct bio *bio)
5206 struct r10bio *r10_bio = get_resync_r10bio(bio);
5214 d = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
5222 if (bio->bi_status) {