Lines Matching refs:r10_bio
73 static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio);
134 struct r10bio *r10_bio;
140 r10_bio = r10bio_pool_alloc(gfp_flags, conf);
141 if (!r10_bio)
167 r10_bio->devs[j].bio = bio;
174 r10_bio->devs[j].repl_bio = bio;
181 struct bio *rbio = r10_bio->devs[j].repl_bio;
188 bio = r10_bio->devs[j].bio;
199 rp->raid_bio = r10_bio;
207 return r10_bio;
216 if (r10_bio->devs[j].bio)
217 bio_uninit(r10_bio->devs[j].bio);
218 kfree(r10_bio->devs[j].bio);
219 if (r10_bio->devs[j].repl_bio)
220 bio_uninit(r10_bio->devs[j].repl_bio);
221 kfree(r10_bio->devs[j].repl_bio);
225 rbio_pool_free(r10_bio, conf);
259 static void put_all_bios(struct r10conf *conf, struct r10bio *r10_bio)
264 struct bio **bio = & r10_bio->devs[i].bio;
268 bio = &r10_bio->devs[i].repl_bio;
269 if (r10_bio->read_slot < 0 && !BIO_SPECIAL(*bio))
275 static void free_r10bio(struct r10bio *r10_bio)
277 struct r10conf *conf = r10_bio->mddev->private;
279 put_all_bios(conf, r10_bio);
280 mempool_free(r10_bio, &conf->r10bio_pool);
283 static void put_buf(struct r10bio *r10_bio)
285 struct r10conf *conf = r10_bio->mddev->private;
287 mempool_free(r10_bio, &conf->r10buf_pool);
298 static void reschedule_retry(struct r10bio *r10_bio)
301 struct mddev *mddev = r10_bio->mddev;
305 list_add(&r10_bio->retry_list, &conf->retry_list);
320 static void raid_end_bio_io(struct r10bio *r10_bio)
322 struct bio *bio = r10_bio->master_bio;
323 struct r10conf *conf = r10_bio->mddev->private;
325 if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
335 free_r10bio(r10_bio);
341 static inline void update_head_pos(int slot, struct r10bio *r10_bio)
343 struct r10conf *conf = r10_bio->mddev->private;
345 conf->mirrors[r10_bio->devs[slot].devnum].head_position =
346 r10_bio->devs[slot].addr + (r10_bio->sectors);
352 static int find_bio_disk(struct r10conf *conf, struct r10bio *r10_bio,
359 if (r10_bio->devs[slot].bio == bio)
361 if (r10_bio->devs[slot].repl_bio == bio) {
367 update_head_pos(slot, r10_bio);
373 return r10_bio->devs[slot].devnum;
379 struct r10bio *r10_bio = bio->bi_private;
382 struct r10conf *conf = r10_bio->mddev->private;
384 slot = r10_bio->read_slot;
385 rdev = r10_bio->devs[slot].rdev;
389 update_head_pos(slot, r10_bio);
401 set_bit(R10BIO_Uptodate, &r10_bio->state);
408 if (!_enough(conf, test_bit(R10BIO_Previous, &r10_bio->state),
413 raid_end_bio_io(r10_bio);
422 (unsigned long long)r10_bio->sector);
423 set_bit(R10BIO_ReadError, &r10_bio->state);
424 reschedule_retry(r10_bio);
428 static void close_write(struct r10bio *r10_bio)
431 md_bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector,
432 r10_bio->sectors,
433 !test_bit(R10BIO_Degraded, &r10_bio->state),
435 md_write_end(r10_bio->mddev);
438 static void one_write_done(struct r10bio *r10_bio)
440 if (atomic_dec_and_test(&r10_bio->remaining)) {
441 if (test_bit(R10BIO_WriteError, &r10_bio->state))
442 reschedule_retry(r10_bio);
444 close_write(r10_bio);
445 if (test_bit(R10BIO_MadeGood, &r10_bio->state))
446 reschedule_retry(r10_bio);
448 raid_end_bio_io(r10_bio);
455 struct r10bio *r10_bio = bio->bi_private;
458 struct r10conf *conf = r10_bio->mddev->private;
466 dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
501 set_bit(R10BIO_WriteError, &r10_bio->state);
504 set_bit(R10BIO_Degraded, &r10_bio->state);
505 r10_bio->devs[slot].bio = NULL;
533 set_bit(R10BIO_Uptodate, &r10_bio->state);
537 r10_bio->devs[slot].addr,
538 r10_bio->sectors,
542 r10_bio->devs[slot].repl_bio = IO_MADE_GOOD;
544 r10_bio->devs[slot].bio = IO_MADE_GOOD;
546 set_bit(R10BIO_MadeGood, &r10_bio->state);
555 one_write_done(r10_bio);
730 struct r10bio *r10_bio,
733 const sector_t this_sector = r10_bio->sector;
735 int sectors = r10_bio->sectors;
745 raid10_find_phys(conf, r10_bio);
754 clear_bit(R10BIO_FailFast, &r10_bio->state);
775 if (r10_bio->devs[slot].bio == IO_BLOCKED)
777 disk = r10_bio->devs[slot].devnum;
780 r10_bio->devs[slot].addr + sectors >
794 r10_bio->devs[slot].addr + sectors > rdev->recovery_offset)
797 dev_sector = r10_bio->devs[slot].addr;
843 set_bit(R10BIO_FailFast, &r10_bio->state);
853 new_distance = r10_bio->devs[slot].addr;
855 new_distance = abs(r10_bio->devs[slot].addr -
876 r10_bio->read_slot = slot;
1098 static sector_t choose_data_offset(struct r10bio *r10_bio,
1102 test_bit(R10BIO_Previous, &r10_bio->state))
1173 struct r10bio *r10_bio, bool io_accounting)
1182 int slot = r10_bio->read_slot;
1186 if (slot >= 0 && r10_bio->devs[slot].rdev) {
1189 * safely dereference the rdev in the r10_bio,
1202 disk = r10_bio->devs[slot].devnum;
1209 err_rdev = r10_bio->devs[slot].rdev;
1214 if (!regular_request_wait(mddev, conf, bio, r10_bio->sectors))
1216 rdev = read_balance(conf, r10_bio, &max_sectors);
1221 (unsigned long long)r10_bio->sector);
1223 raid_end_bio_io(r10_bio);
1230 (unsigned long long)r10_bio->sector);
1239 r10_bio->master_bio = bio;
1240 r10_bio->sectors = max_sectors;
1242 slot = r10_bio->read_slot;
1246 r10_bio->master_bio = bio;
1250 r10_bio->devs[slot].bio = read_bio;
1251 r10_bio->devs[slot].rdev = rdev;
1253 read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr +
1254 choose_data_offset(r10_bio, rdev);
1258 test_bit(R10BIO_FailFast, &r10_bio->state))
1260 read_bio->bi_private = r10_bio;
1264 r10_bio->sector);
1269 static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
1279 int devnum = r10_bio->devs[n_copy].devnum;
1294 r10_bio->devs[n_copy].repl_bio = mbio;
1296 r10_bio->devs[n_copy].bio = mbio;
1298 mbio->bi_iter.bi_sector = (r10_bio->devs[n_copy].addr +
1299 choose_data_offset(r10_bio, rdev));
1306 mbio->bi_private = r10_bio;
1310 r10_bio->sector);
1314 atomic_inc(&r10_bio->remaining);
1343 static void wait_blocked_dev(struct mddev *mddev, struct r10bio *r10_bio)
1369 sector_t dev_sector = r10_bio->devs[i].addr;
1377 if (!r10_bio->sectors)
1380 is_bad = is_badblock(rdev, dev_sector, r10_bio->sectors,
1408 struct r10bio *r10_bio)
1436 sectors = r10_bio->sectors;
1469 * gets its own r10_bio with a set of bios attached.
1472 r10_bio->read_slot = -1; /* make sure repl_bio gets freed */
1473 raid10_find_phys(conf, r10_bio);
1475 wait_blocked_dev(mddev, r10_bio);
1478 max_sectors = r10_bio->sectors;
1481 int d = r10_bio->devs[i].devnum;
1490 r10_bio->devs[i].bio = NULL;
1491 r10_bio->devs[i].repl_bio = NULL;
1494 set_bit(R10BIO_Degraded, &r10_bio->state);
1499 sector_t dev_sector = r10_bio->devs[i].addr;
1530 r10_bio->devs[i].bio = bio;
1534 r10_bio->devs[i].repl_bio = bio;
1540 if (max_sectors < r10_bio->sectors)
1541 r10_bio->sectors = max_sectors;
1543 if (r10_bio->sectors < bio_sectors(bio)) {
1544 struct bio *split = bio_split(bio, r10_bio->sectors,
1551 r10_bio->master_bio = bio;
1555 r10_bio->master_bio = bio;
1556 atomic_set(&r10_bio->remaining, 1);
1557 md_bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0);
1560 if (r10_bio->devs[i].bio)
1561 raid10_write_one_disk(mddev, r10_bio, bio, false, i);
1562 if (r10_bio->devs[i].repl_bio)
1563 raid10_write_one_disk(mddev, r10_bio, bio, true, i);
1565 one_write_done(r10_bio);
1571 struct r10bio *r10_bio;
1573 r10_bio = mempool_alloc(&conf->r10bio_pool, GFP_NOIO);
1575 r10_bio->master_bio = bio;
1576 r10_bio->sectors = sectors;
1578 r10_bio->mddev = mddev;
1579 r10_bio->sector = bio->bi_iter.bi_sector;
1580 r10_bio->state = 0;
1581 r10_bio->read_slot = -1;
1582 memset(r10_bio->devs, 0, sizeof(r10_bio->devs[0]) *
1586 raid10_read_request(mddev, bio, r10_bio, true);
1588 raid10_write_request(mddev, bio, r10_bio);
1615 struct r10bio *r10_bio = bio->bi_private;
1616 struct r10conf *conf = r10_bio->mddev->private;
1624 if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
1625 set_bit(R10BIO_Uptodate, &r10_bio->state);
1627 dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
1640 raid_end_discard_bio(r10_bio);
1656 struct r10bio *r10_bio, *first_r10bio;
1761 r10_bio = mempool_alloc(&conf->r10bio_pool, GFP_NOIO);
1762 r10_bio->mddev = mddev;
1763 r10_bio->state = 0;
1764 r10_bio->sectors = 0;
1765 memset(r10_bio->devs, 0, sizeof(r10_bio->devs[0]) * geo->raid_disks);
1766 wait_blocked_dev(mddev, r10_bio);
1776 r10_bio->master_bio = bio;
1777 set_bit(R10BIO_Discard, &r10_bio->state);
1779 first_r10bio = r10_bio;
1781 r10_bio->master_bio = (struct bio *)first_r10bio;
1793 r10_bio->devs[disk].bio = NULL;
1794 r10_bio->devs[disk].repl_bio = NULL;
1804 r10_bio->devs[disk].bio = bio;
1808 r10_bio->devs[disk].repl_bio = bio;
1814 atomic_set(&r10_bio->remaining, 1);
1851 if (r10_bio->devs[disk].bio) {
1856 mbio->bi_private = r10_bio;
1857 r10_bio->devs[disk].bio = mbio;
1858 r10_bio->devs[disk].devnum = disk;
1859 atomic_inc(&r10_bio->remaining);
1861 dev_start + choose_data_offset(r10_bio, rdev),
1865 if (r10_bio->devs[disk].repl_bio) {
1870 rbio->bi_private = r10_bio;
1871 r10_bio->devs[disk].repl_bio = rbio;
1872 r10_bio->devs[disk].devnum = disk;
1873 atomic_inc(&r10_bio->remaining);
1875 dev_start + choose_data_offset(r10_bio, rrdev),
1887 raid_end_discard_bio(r10_bio);
1892 raid_end_discard_bio(r10_bio);
2278 static void __end_sync_read(struct r10bio *r10_bio, struct bio *bio, int d)
2280 struct r10conf *conf = r10_bio->mddev->private;
2283 set_bit(R10BIO_Uptodate, &r10_bio->state);
2288 atomic_add(r10_bio->sectors,
2295 if (test_bit(R10BIO_IsRecover, &r10_bio->state) ||
2296 atomic_dec_and_test(&r10_bio->remaining)) {
2300 reschedule_retry(r10_bio);
2306 struct r10bio *r10_bio = get_resync_r10bio(bio);
2307 struct r10conf *conf = r10_bio->mddev->private;
2308 int d = find_bio_disk(conf, r10_bio, bio, NULL, NULL);
2310 __end_sync_read(r10_bio, bio, d);
2316 struct r10bio *r10_bio = bio->bi_private;
2318 __end_sync_read(r10_bio, bio, r10_bio->read_slot);
2321 static void end_sync_request(struct r10bio *r10_bio)
2323 struct mddev *mddev = r10_bio->mddev;
2325 while (atomic_dec_and_test(&r10_bio->remaining)) {
2326 if (r10_bio->master_bio == NULL) {
2328 sector_t s = r10_bio->sectors;
2329 if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
2330 test_bit(R10BIO_WriteError, &r10_bio->state))
2331 reschedule_retry(r10_bio);
2333 put_buf(r10_bio);
2337 struct r10bio *r10_bio2 = (struct r10bio *)r10_bio->master_bio;
2338 if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
2339 test_bit(R10BIO_WriteError, &r10_bio->state))
2340 reschedule_retry(r10_bio);
2342 put_buf(r10_bio);
2343 r10_bio = r10_bio2;
2350 struct r10bio *r10_bio = get_resync_r10bio(bio);
2351 struct mddev *mddev = r10_bio->mddev;
2360 d = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
2374 set_bit(R10BIO_WriteError, &r10_bio->state);
2377 r10_bio->devs[slot].addr,
2378 r10_bio->sectors,
2380 set_bit(R10BIO_MadeGood, &r10_bio->state);
2384 end_sync_request(r10_bio);
2403 static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
2411 atomic_set(&r10_bio->remaining, 1);
2415 if (!r10_bio->devs[i].bio->bi_status)
2422 fbio = r10_bio->devs[i].bio;
2423 fbio->bi_iter.bi_size = r10_bio->sectors << 9;
2427 vcnt = (r10_bio->sectors + (PAGE_SIZE >> 9) - 1) >> (PAGE_SHIFT - 9);
2434 tbio = r10_bio->devs[i].bio;
2442 d = r10_bio->devs[i].devnum;
2444 if (!r10_bio->devs[i].bio->bi_status) {
2449 int sectors = r10_bio->sectors;
2462 atomic64_add(r10_bio->sectors, &mddev->resync_mismatches);
2481 rp->raid_bio = r10_bio;
2483 tbio->bi_iter.bi_sector = r10_bio->devs[i].addr;
2489 atomic_inc(&r10_bio->remaining);
2504 tbio = r10_bio->devs[i].repl_bio;
2507 if (r10_bio->devs[i].bio->bi_end_io != end_sync_write
2508 && r10_bio->devs[i].bio != fbio)
2510 d = r10_bio->devs[i].devnum;
2511 atomic_inc(&r10_bio->remaining);
2518 if (atomic_dec_and_test(&r10_bio->remaining)) {
2519 md_done_sync(mddev, r10_bio->sectors, 1);
2520 put_buf(r10_bio);
2529 * There is a separate r10_bio for each non-in_sync drive.
2534 static void fix_recovery_read_error(struct r10bio *r10_bio)
2543 struct mddev *mddev = r10_bio->mddev;
2545 struct bio *bio = r10_bio->devs[0].bio;
2547 int sectors = r10_bio->sectors;
2549 int dr = r10_bio->devs[0].devnum;
2550 int dw = r10_bio->devs[1].devnum;
2563 addr = r10_bio->devs[0].addr + sect,
2571 addr = r10_bio->devs[1].addr + sect;
2595 addr = r10_bio->devs[1].addr + sect;
2617 static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
2621 struct bio *wbio = r10_bio->devs[1].bio;
2622 struct bio *wbio2 = r10_bio->devs[1].repl_bio;
2631 if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) {
2632 fix_recovery_read_error(r10_bio);
2634 end_sync_request(r10_bio);
2636 end_sync_request(r10_bio);
2644 d = r10_bio->devs[1].devnum;
2726 static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10bio *r10_bio)
2728 int sect = 0; /* Offset from r10_bio->sector */
2729 int sectors = r10_bio->sectors, slot = r10_bio->read_slot;
2732 int d = r10_bio->devs[slot].devnum;
2753 r10_bio->devs[slot].bio = IO_BLOCKED;
2771 d = r10_bio->devs[sl].devnum;
2776 is_badblock(rdev, r10_bio->devs[sl].addr + sect, s,
2781 r10_bio->devs[sl].addr +
2802 int dn = r10_bio->devs[slot].devnum;
2807 r10_bio->devs[slot].addr
2811 r10_bio->devs[slot].bio
2824 d = r10_bio->devs[sl].devnum;
2834 r10_bio->devs[sl].addr +
2843 choose_data_offset(r10_bio,
2858 d = r10_bio->devs[sl].devnum;
2868 r10_bio->devs[sl].addr +
2877 choose_data_offset(r10_bio, rdev)),
2888 choose_data_offset(r10_bio, rdev)),
2903 static int narrow_write_error(struct r10bio *r10_bio, int i)
2905 struct bio *bio = r10_bio->master_bio;
2906 struct mddev *mddev = r10_bio->mddev;
2908 struct md_rdev *rdev = conf->mirrors[r10_bio->devs[i].devnum].rdev;
2923 int sect_to_write = r10_bio->sectors;
2931 sector = r10_bio->sector;
2932 sectors = ((r10_bio->sector + block_sectors)
2945 wsector = r10_bio->devs[i].addr + (sector - r10_bio->sector);
2947 choose_data_offset(r10_bio, rdev);
2964 static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
2966 int slot = r10_bio->read_slot;
2969 struct md_rdev *rdev = r10_bio->devs[slot].rdev;
2979 bio = r10_bio->devs[slot].bio;
2981 r10_bio->devs[slot].bio = NULL;
2984 r10_bio->devs[slot].bio = IO_BLOCKED;
2987 fix_read_error(conf, mddev, r10_bio);
2993 r10_bio->state = 0;
2994 raid10_read_request(mddev, r10_bio->master_bio, r10_bio, false);
3002 static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
3013 if (test_bit(R10BIO_IsSync, &r10_bio->state) ||
3014 test_bit(R10BIO_IsRecover, &r10_bio->state)) {
3016 int dev = r10_bio->devs[m].devnum;
3018 if (r10_bio->devs[m].bio == NULL ||
3019 r10_bio->devs[m].bio->bi_end_io == NULL)
3021 if (!r10_bio->devs[m].bio->bi_status) {
3024 r10_bio->devs[m].addr,
3025 r10_bio->sectors, 0);
3029 r10_bio->devs[m].addr,
3030 r10_bio->sectors, 0))
3034 if (r10_bio->devs[m].repl_bio == NULL ||
3035 r10_bio->devs[m].repl_bio->bi_end_io == NULL)
3038 if (!r10_bio->devs[m].repl_bio->bi_status) {
3041 r10_bio->devs[m].addr,
3042 r10_bio->sectors, 0);
3046 r10_bio->devs[m].addr,
3047 r10_bio->sectors, 0))
3051 put_buf(r10_bio);
3055 int dev = r10_bio->devs[m].devnum;
3056 struct bio *bio = r10_bio->devs[m].bio;
3061 r10_bio->devs[m].addr,
3062 r10_bio->sectors, 0);
3066 if (!narrow_write_error(r10_bio, m)) {
3069 &r10_bio->state);
3073 bio = r10_bio->devs[m].repl_bio;
3078 r10_bio->devs[m].addr,
3079 r10_bio->sectors, 0);
3085 list_add(&r10_bio->retry_list, &conf->bio_end_io_list);
3096 &r10_bio->state))
3097 close_write(r10_bio);
3098 raid_end_bio_io(r10_bio);
3106 struct r10bio *r10_bio;
3126 r10_bio = list_first_entry(&tmp, struct r10bio,
3128 list_del(&r10_bio->retry_list);
3130 set_bit(R10BIO_Degraded, &r10_bio->state);
3133 &r10_bio->state))
3134 close_write(r10_bio);
3135 raid_end_bio_io(r10_bio);
3149 r10_bio = list_entry(head->prev, struct r10bio, retry_list);
3154 mddev = r10_bio->mddev;
3156 if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
3157 test_bit(R10BIO_WriteError, &r10_bio->state))
3158 handle_write_completed(conf, r10_bio);
3159 else if (test_bit(R10BIO_IsReshape, &r10_bio->state))
3160 reshape_request_write(mddev, r10_bio);
3161 else if (test_bit(R10BIO_IsSync, &r10_bio->state))
3162 sync_request_write(mddev, r10_bio);
3163 else if (test_bit(R10BIO_IsRecover, &r10_bio->state))
3164 recovery_request_write(mddev, r10_bio);
3165 else if (test_bit(R10BIO_ReadError, &r10_bio->state))
3166 handle_read_error(mddev, r10_bio);
3281 * a number of r10_bio structures, one for each out-of-sync device.
3286 * The r10_bio structures are linked using a borrowed master_bio pointer.
3287 * This link is counted in ->remaining. When the r10_bio that points to NULL
3297 struct r10bio *r10_bio;
3447 r10_bio = NULL;
3475 rb2 = r10_bio;
3508 r10_bio = raid10_alloc_init_r10buf(conf);
3509 r10_bio->state = 0;
3511 atomic_set(&r10_bio->remaining, 0);
3513 r10_bio->master_bio = (struct bio*)rb2;
3516 r10_bio->mddev = mddev;
3517 set_bit(R10BIO_IsRecover, &r10_bio->state);
3518 r10_bio->sector = sect;
3520 raid10_find_phys(conf, r10_bio);
3541 int d = r10_bio->devs[j].devnum;
3552 sector = r10_bio->devs[j].addr;
3566 bio = r10_bio->devs[0].bio;
3573 from_addr = r10_bio->devs[j].addr;
3581 if (r10_bio->devs[k].devnum == i)
3584 to_addr = r10_bio->devs[k].addr;
3585 r10_bio->devs[0].devnum = d;
3586 r10_bio->devs[0].addr = from_addr;
3587 r10_bio->devs[1].devnum = i;
3588 r10_bio->devs[1].addr = to_addr;
3591 bio = r10_bio->devs[1].bio;
3599 atomic_inc(&r10_bio->remaining);
3601 r10_bio->devs[1].bio->bi_end_io = NULL;
3604 bio = r10_bio->devs[1].repl_bio;
3620 atomic_inc(&r10_bio->remaining);
3633 if (r10_bio->devs[k].devnum == i)
3639 r10_bio->devs[k].addr,
3645 r10_bio->devs[k].addr,
3659 put_buf(r10_bio);
3662 r10_bio = rb2;
3673 if (r10_bio->devs[0].bio->bi_opf & MD_FAILFAST) {
3680 int d = r10_bio->devs[j].devnum;
3687 r10_bio->devs[0].bio->bi_opf
3692 while (r10_bio) {
3693 struct r10bio *rb2 = r10_bio;
3694 r10_bio = (struct r10bio*) rb2->master_bio;
3725 r10_bio = raid10_alloc_init_r10buf(conf);
3726 r10_bio->state = 0;
3728 r10_bio->mddev = mddev;
3729 atomic_set(&r10_bio->remaining, 0);
3733 r10_bio->master_bio = NULL;
3734 r10_bio->sector = sector_nr;
3735 set_bit(R10BIO_IsSync, &r10_bio->state);
3736 raid10_find_phys(conf, r10_bio);
3737 r10_bio->sectors = (sector_nr | chunk_mask) - sector_nr + 1;
3740 int d = r10_bio->devs[i].devnum;
3745 if (r10_bio->devs[i].repl_bio)
3746 r10_bio->devs[i].repl_bio->bi_end_io = NULL;
3748 bio = r10_bio->devs[i].bio;
3756 sector = r10_bio->devs[i].addr;
3770 atomic_inc(&r10_bio->remaining);
3789 bio = r10_bio->devs[i].repl_bio;
3792 sector = r10_bio->devs[i].addr;
3807 int d = r10_bio->devs[i].devnum;
3808 if (r10_bio->devs[i].bio->bi_end_io)
3811 if (r10_bio->devs[i].repl_bio &&
3812 r10_bio->devs[i].repl_bio->bi_end_io)
3817 put_buf(r10_bio);
3845 r10_bio->sectors = nr_sectors;
3898 r10_bio = get_resync_r10bio(bio);
3899 r10_bio->sectors = nr_sectors;
4813 struct r10bio *r10_bio;
4918 r10_bio = raid10_alloc_init_r10buf(conf);
4919 r10_bio->state = 0;
4921 atomic_set(&r10_bio->remaining, 0);
4922 r10_bio->mddev = mddev;
4923 r10_bio->sector = sector_nr;
4924 set_bit(R10BIO_IsReshape, &r10_bio->state);
4925 r10_bio->sectors = last - sector_nr + 1;
4926 rdev = read_balance(conf, r10_bio, &max_sectors);
4927 BUG_ON(!test_bit(R10BIO_Previous, &r10_bio->state));
4934 mempool_free(r10_bio, &conf->r10buf_pool);
4941 read_bio->bi_iter.bi_sector = (r10_bio->devs[r10_bio->read_slot].addr
4943 read_bio->bi_private = r10_bio;
4945 r10_bio->master_bio = read_bio;
4946 r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum;
4975 __raid10_find_phys(&conf->geo, r10_bio);
4983 int d = r10_bio->devs[s/2].devnum;
4987 b = r10_bio->devs[s/2].repl_bio;
4990 b = r10_bio->devs[s/2].bio;
4996 b->bi_iter.bi_sector = r10_bio->devs[s/2].addr +
5007 pages = get_resync_pages(r10_bio->devs[0].bio)->pages;
5024 r10_bio->sectors = nr_sectors;
5027 md_sync_acct_bio(read_bio, r10_bio->sectors);
5028 atomic_inc(&r10_bio->remaining);
5048 static void end_reshape_request(struct r10bio *r10_bio);
5050 struct r10bio *r10_bio);
5051 static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio)
5061 if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
5062 if (handle_reshape_read_error(mddev, r10_bio) < 0) {
5064 md_done_sync(mddev, r10_bio->sectors, 0);
5071 atomic_set(&r10_bio->remaining, 1);
5074 int d = r10_bio->devs[s/2].devnum;
5079 b = r10_bio->devs[s/2].repl_bio;
5082 b = r10_bio->devs[s/2].bio;
5090 md_sync_acct_bio(b, r10_bio->sectors);
5091 atomic_inc(&r10_bio->remaining);
5095 end_reshape_request(r10_bio);
5130 struct r10bio *r10_bio)
5133 int sectors = r10_bio->sectors;
5147 pages = get_resync_pages(r10_bio->devs[0].bio)->pages;
5149 r10b->sector = r10_bio->sector;
5206 struct r10bio *r10_bio = get_resync_r10bio(bio);
5207 struct mddev *mddev = r10_bio->mddev;
5214 d = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
5228 end_reshape_request(r10_bio);
5231 static void end_reshape_request(struct r10bio *r10_bio)
5233 if (!atomic_dec_and_test(&r10_bio->remaining))
5235 md_done_sync(r10_bio->mddev, r10_bio->sectors, 1);
5236 bio_put(r10_bio->master_bio);
5237 put_buf(r10_bio);