Lines Matching refs:r10_bio
73 static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio);
119 struct r10bio *r10_bio;
125 r10_bio = r10bio_pool_alloc(gfp_flags, conf);
126 if (!r10_bio)
151 r10_bio->devs[j].bio = bio;
157 r10_bio->devs[j].repl_bio = bio;
164 struct bio *rbio = r10_bio->devs[j].repl_bio;
171 bio = r10_bio->devs[j].bio;
182 rp->raid_bio = r10_bio;
190 return r10_bio;
199 if (r10_bio->devs[j].bio)
200 bio_put(r10_bio->devs[j].bio);
201 if (r10_bio->devs[j].repl_bio)
202 bio_put(r10_bio->devs[j].repl_bio);
206 rbio_pool_free(r10_bio, conf);
237 static void put_all_bios(struct r10conf *conf, struct r10bio *r10_bio)
242 struct bio **bio = & r10_bio->devs[i].bio;
246 bio = &r10_bio->devs[i].repl_bio;
247 if (r10_bio->read_slot < 0 && !BIO_SPECIAL(*bio))
253 static void free_r10bio(struct r10bio *r10_bio)
255 struct r10conf *conf = r10_bio->mddev->private;
257 put_all_bios(conf, r10_bio);
258 mempool_free(r10_bio, &conf->r10bio_pool);
261 static void put_buf(struct r10bio *r10_bio)
263 struct r10conf *conf = r10_bio->mddev->private;
265 mempool_free(r10_bio, &conf->r10buf_pool);
270 static void reschedule_retry(struct r10bio *r10_bio)
273 struct mddev *mddev = r10_bio->mddev;
277 list_add(&r10_bio->retry_list, &conf->retry_list);
292 static void raid_end_bio_io(struct r10bio *r10_bio)
294 struct bio *bio = r10_bio->master_bio;
295 struct r10conf *conf = r10_bio->mddev->private;
297 if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
307 free_r10bio(r10_bio);
313 static inline void update_head_pos(int slot, struct r10bio *r10_bio)
315 struct r10conf *conf = r10_bio->mddev->private;
317 conf->mirrors[r10_bio->devs[slot].devnum].head_position =
318 r10_bio->devs[slot].addr + (r10_bio->sectors);
324 static int find_bio_disk(struct r10conf *conf, struct r10bio *r10_bio,
331 if (r10_bio->devs[slot].bio == bio)
333 if (r10_bio->devs[slot].repl_bio == bio) {
340 update_head_pos(slot, r10_bio);
346 return r10_bio->devs[slot].devnum;
352 struct r10bio *r10_bio = bio->bi_private;
355 struct r10conf *conf = r10_bio->mddev->private;
357 slot = r10_bio->read_slot;
358 rdev = r10_bio->devs[slot].rdev;
362 update_head_pos(slot, r10_bio);
374 set_bit(R10BIO_Uptodate, &r10_bio->state);
381 if (!_enough(conf, test_bit(R10BIO_Previous, &r10_bio->state),
386 raid_end_bio_io(r10_bio);
396 (unsigned long long)r10_bio->sector);
397 set_bit(R10BIO_ReadError, &r10_bio->state);
398 reschedule_retry(r10_bio);
402 static void close_write(struct r10bio *r10_bio)
405 md_bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector,
406 r10_bio->sectors,
407 !test_bit(R10BIO_Degraded, &r10_bio->state),
409 md_write_end(r10_bio->mddev);
412 static void one_write_done(struct r10bio *r10_bio)
414 if (atomic_dec_and_test(&r10_bio->remaining)) {
415 if (test_bit(R10BIO_WriteError, &r10_bio->state))
416 reschedule_retry(r10_bio);
418 close_write(r10_bio);
419 if (test_bit(R10BIO_MadeGood, &r10_bio->state))
420 reschedule_retry(r10_bio);
422 raid_end_bio_io(r10_bio);
429 struct r10bio *r10_bio = bio->bi_private;
432 struct r10conf *conf = r10_bio->mddev->private;
440 dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
475 set_bit(R10BIO_WriteError, &r10_bio->state);
478 set_bit(R10BIO_Degraded, &r10_bio->state);
479 r10_bio->devs[slot].bio = NULL;
507 set_bit(R10BIO_Uptodate, &r10_bio->state);
511 r10_bio->devs[slot].addr,
512 r10_bio->sectors,
516 r10_bio->devs[slot].repl_bio = IO_MADE_GOOD;
518 r10_bio->devs[slot].bio = IO_MADE_GOOD;
520 set_bit(R10BIO_MadeGood, &r10_bio->state);
529 one_write_done(r10_bio);
704 struct r10bio *r10_bio,
707 const sector_t this_sector = r10_bio->sector;
709 int sectors = r10_bio->sectors;
719 raid10_find_phys(conf, r10_bio);
728 clear_bit(R10BIO_FailFast, &r10_bio->state);
749 if (r10_bio->devs[slot].bio == IO_BLOCKED)
751 disk = r10_bio->devs[slot].devnum;
754 r10_bio->devs[slot].addr + sectors >
768 r10_bio->devs[slot].addr + sectors > rdev->recovery_offset)
771 dev_sector = r10_bio->devs[slot].addr;
817 set_bit(R10BIO_FailFast, &r10_bio->state);
827 new_distance = r10_bio->devs[slot].addr;
829 new_distance = abs(r10_bio->devs[slot].addr -
850 r10_bio->read_slot = slot;
1045 static sector_t choose_data_offset(struct r10bio *r10_bio,
1049 test_bit(R10BIO_Previous, &r10_bio->state))
1128 struct r10bio *r10_bio)
1137 int slot = r10_bio->read_slot;
1141 if (slot >= 0 && r10_bio->devs[slot].rdev) {
1144 * safely dereference the rdev in the r10_bio,
1157 disk = r10_bio->devs[slot].devnum;
1164 err_rdev = r10_bio->devs[slot].rdev;
1169 regular_request_wait(mddev, conf, bio, r10_bio->sectors);
1170 rdev = read_balance(conf, r10_bio, &max_sectors);
1175 (unsigned long long)r10_bio->sector);
1177 raid_end_bio_io(r10_bio);
1184 (unsigned long long)r10_bio->sector);
1193 r10_bio->master_bio = bio;
1194 r10_bio->sectors = max_sectors;
1196 slot = r10_bio->read_slot;
1200 r10_bio->devs[slot].bio = read_bio;
1201 r10_bio->devs[slot].rdev = rdev;
1203 read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr +
1204 choose_data_offset(r10_bio, rdev);
1209 test_bit(R10BIO_FailFast, &r10_bio->state))
1211 read_bio->bi_private = r10_bio;
1216 r10_bio->sector);
1221 static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
1233 int devnum = r10_bio->devs[n_copy].devnum;
1248 r10_bio->devs[n_copy].repl_bio = mbio;
1250 r10_bio->devs[n_copy].bio = mbio;
1252 mbio->bi_iter.bi_sector = (r10_bio->devs[n_copy].addr +
1253 choose_data_offset(r10_bio, rdev));
1261 mbio->bi_private = r10_bio;
1266 r10_bio->sector);
1270 atomic_inc(&r10_bio->remaining);
1290 struct r10bio *r10_bio)
1314 sectors = r10_bio->sectors;
1347 * gets its own r10_bio with a set of bios attached.
1350 r10_bio->read_slot = -1; /* make sure repl_bio gets freed */
1351 raid10_find_phys(conf, r10_bio);
1355 max_sectors = r10_bio->sectors;
1358 int d = r10_bio->devs[i].devnum;
1385 r10_bio->devs[i].bio = NULL;
1386 r10_bio->devs[i].repl_bio = NULL;
1389 set_bit(R10BIO_Degraded, &r10_bio->state);
1394 sector_t dev_sector = r10_bio->devs[i].addr;
1434 r10_bio->devs[i].bio = bio;
1438 r10_bio->devs[i].repl_bio = bio;
1450 if (r10_bio->devs[j].bio) {
1451 d = r10_bio->devs[j].devnum;
1454 if (r10_bio->devs[j].repl_bio) {
1456 d = r10_bio->devs[j].devnum;
1473 if (max_sectors < r10_bio->sectors)
1474 r10_bio->sectors = max_sectors;
1476 if (r10_bio->sectors < bio_sectors(bio)) {
1477 struct bio *split = bio_split(bio, r10_bio->sectors,
1484 r10_bio->master_bio = bio;
1487 atomic_set(&r10_bio->remaining, 1);
1488 md_bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0);
1491 if (r10_bio->devs[i].bio)
1492 raid10_write_one_disk(mddev, r10_bio, bio, false, i);
1493 if (r10_bio->devs[i].repl_bio)
1494 raid10_write_one_disk(mddev, r10_bio, bio, true, i);
1496 one_write_done(r10_bio);
1502 struct r10bio *r10_bio;
1504 r10_bio = mempool_alloc(&conf->r10bio_pool, GFP_NOIO);
1506 r10_bio->master_bio = bio;
1507 r10_bio->sectors = sectors;
1509 r10_bio->mddev = mddev;
1510 r10_bio->sector = bio->bi_iter.bi_sector;
1511 r10_bio->state = 0;
1512 r10_bio->read_slot = -1;
1513 memset(r10_bio->devs, 0, sizeof(r10_bio->devs[0]) * conf->copies);
1516 raid10_read_request(mddev, bio, r10_bio);
1518 raid10_write_request(mddev, bio, r10_bio);
1886 static void __end_sync_read(struct r10bio *r10_bio, struct bio *bio, int d)
1888 struct r10conf *conf = r10_bio->mddev->private;
1891 set_bit(R10BIO_Uptodate, &r10_bio->state);
1896 atomic_add(r10_bio->sectors,
1903 if (test_bit(R10BIO_IsRecover, &r10_bio->state) ||
1904 atomic_dec_and_test(&r10_bio->remaining)) {
1908 reschedule_retry(r10_bio);
1914 struct r10bio *r10_bio = get_resync_r10bio(bio);
1915 struct r10conf *conf = r10_bio->mddev->private;
1916 int d = find_bio_disk(conf, r10_bio, bio, NULL, NULL);
1918 __end_sync_read(r10_bio, bio, d);
1924 struct r10bio *r10_bio = bio->bi_private;
1926 __end_sync_read(r10_bio, bio, r10_bio->read_slot);
1929 static void end_sync_request(struct r10bio *r10_bio)
1931 struct mddev *mddev = r10_bio->mddev;
1933 while (atomic_dec_and_test(&r10_bio->remaining)) {
1934 if (r10_bio->master_bio == NULL) {
1936 sector_t s = r10_bio->sectors;
1937 if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
1938 test_bit(R10BIO_WriteError, &r10_bio->state))
1939 reschedule_retry(r10_bio);
1941 put_buf(r10_bio);
1945 struct r10bio *r10_bio2 = (struct r10bio *)r10_bio->master_bio;
1946 if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
1947 test_bit(R10BIO_WriteError, &r10_bio->state))
1948 reschedule_retry(r10_bio);
1950 put_buf(r10_bio);
1951 r10_bio = r10_bio2;
1958 struct r10bio *r10_bio = get_resync_r10bio(bio);
1959 struct mddev *mddev = r10_bio->mddev;
1968 d = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
1982 set_bit(R10BIO_WriteError, &r10_bio->state);
1985 r10_bio->devs[slot].addr,
1986 r10_bio->sectors,
1988 set_bit(R10BIO_MadeGood, &r10_bio->state);
1992 end_sync_request(r10_bio);
2011 static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
2019 atomic_set(&r10_bio->remaining, 1);
2023 if (!r10_bio->devs[i].bio->bi_status)
2030 fbio = r10_bio->devs[i].bio;
2031 fbio->bi_iter.bi_size = r10_bio->sectors << 9;
2035 vcnt = (r10_bio->sectors + (PAGE_SIZE >> 9) - 1) >> (PAGE_SHIFT - 9);
2042 tbio = r10_bio->devs[i].bio;
2050 d = r10_bio->devs[i].devnum;
2052 if (!r10_bio->devs[i].bio->bi_status) {
2057 int sectors = r10_bio->sectors;
2070 atomic64_add(r10_bio->sectors, &mddev->resync_mismatches);
2089 rp->raid_bio = r10_bio;
2091 tbio->bi_iter.bi_sector = r10_bio->devs[i].addr;
2098 atomic_inc(&r10_bio->remaining);
2114 tbio = r10_bio->devs[i].repl_bio;
2117 if (r10_bio->devs[i].bio->bi_end_io != end_sync_write
2118 && r10_bio->devs[i].bio != fbio)
2120 d = r10_bio->devs[i].devnum;
2121 atomic_inc(&r10_bio->remaining);
2128 if (atomic_dec_and_test(&r10_bio->remaining)) {
2129 md_done_sync(mddev, r10_bio->sectors, 1);
2130 put_buf(r10_bio);
2139 * There is a separate r10_bio for each non-in_sync drive.
2144 static void fix_recovery_read_error(struct r10bio *r10_bio)
2153 struct mddev *mddev = r10_bio->mddev;
2155 struct bio *bio = r10_bio->devs[0].bio;
2157 int sectors = r10_bio->sectors;
2159 int dr = r10_bio->devs[0].devnum;
2160 int dw = r10_bio->devs[1].devnum;
2173 addr = r10_bio->devs[0].addr + sect,
2181 addr = r10_bio->devs[1].addr + sect;
2205 addr = r10_bio->devs[1].addr + sect;
2227 static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
2231 struct bio *wbio = r10_bio->devs[1].bio;
2232 struct bio *wbio2 = r10_bio->devs[1].repl_bio;
2241 if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) {
2242 fix_recovery_read_error(r10_bio);
2244 end_sync_request(r10_bio);
2246 end_sync_request(r10_bio);
2254 d = r10_bio->devs[1].devnum;
2336 static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10bio *r10_bio)
2338 int sect = 0; /* Offset from r10_bio->sector */
2339 int sectors = r10_bio->sectors;
2342 int d = r10_bio->devs[r10_bio->read_slot].devnum;
2366 r10_bio->devs[r10_bio->read_slot].bio = IO_BLOCKED;
2372 int sl = r10_bio->read_slot;
2384 d = r10_bio->devs[sl].devnum;
2389 is_badblock(rdev, r10_bio->devs[sl].addr + sect, s,
2394 r10_bio->devs[sl].addr +
2407 } while (!success && sl != r10_bio->read_slot);
2415 int dn = r10_bio->devs[r10_bio->read_slot].devnum;
2420 r10_bio->devs[r10_bio->read_slot].addr
2424 r10_bio->devs[r10_bio->read_slot].bio
2433 while (sl != r10_bio->read_slot) {
2439 d = r10_bio->devs[sl].devnum;
2449 r10_bio->devs[sl].addr +
2458 choose_data_offset(r10_bio,
2469 while (sl != r10_bio->read_slot) {
2475 d = r10_bio->devs[sl].devnum;
2485 r10_bio->devs[sl].addr +
2495 choose_data_offset(r10_bio, rdev)),
2506 choose_data_offset(r10_bio, rdev)),
2521 static int narrow_write_error(struct r10bio *r10_bio, int i)
2523 struct bio *bio = r10_bio->master_bio;
2524 struct mddev *mddev = r10_bio->mddev;
2526 struct md_rdev *rdev = conf->mirrors[r10_bio->devs[i].devnum].rdev;
2541 int sect_to_write = r10_bio->sectors;
2549 sector = r10_bio->sector;
2550 sectors = ((r10_bio->sector + block_sectors)
2562 wsector = r10_bio->devs[i].addr + (sector - r10_bio->sector);
2564 choose_data_offset(r10_bio, rdev);
2582 static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
2584 int slot = r10_bio->read_slot;
2587 struct md_rdev *rdev = r10_bio->devs[slot].rdev;
2597 bio = r10_bio->devs[slot].bio;
2599 r10_bio->devs[slot].bio = NULL;
2602 r10_bio->devs[slot].bio = IO_BLOCKED;
2605 fix_read_error(conf, mddev, r10_bio);
2612 r10_bio->state = 0;
2613 raid10_read_request(mddev, r10_bio->master_bio, r10_bio);
2616 static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
2627 if (test_bit(R10BIO_IsSync, &r10_bio->state) ||
2628 test_bit(R10BIO_IsRecover, &r10_bio->state)) {
2630 int dev = r10_bio->devs[m].devnum;
2632 if (r10_bio->devs[m].bio == NULL ||
2633 r10_bio->devs[m].bio->bi_end_io == NULL)
2635 if (!r10_bio->devs[m].bio->bi_status) {
2638 r10_bio->devs[m].addr,
2639 r10_bio->sectors, 0);
2643 r10_bio->devs[m].addr,
2644 r10_bio->sectors, 0))
2648 if (r10_bio->devs[m].repl_bio == NULL ||
2649 r10_bio->devs[m].repl_bio->bi_end_io == NULL)
2652 if (!r10_bio->devs[m].repl_bio->bi_status) {
2655 r10_bio->devs[m].addr,
2656 r10_bio->sectors, 0);
2660 r10_bio->devs[m].addr,
2661 r10_bio->sectors, 0))
2665 put_buf(r10_bio);
2669 int dev = r10_bio->devs[m].devnum;
2670 struct bio *bio = r10_bio->devs[m].bio;
2675 r10_bio->devs[m].addr,
2676 r10_bio->sectors, 0);
2680 if (!narrow_write_error(r10_bio, m)) {
2683 &r10_bio->state);
2687 bio = r10_bio->devs[m].repl_bio;
2692 r10_bio->devs[m].addr,
2693 r10_bio->sectors, 0);
2699 list_add(&r10_bio->retry_list, &conf->bio_end_io_list);
2710 &r10_bio->state))
2711 close_write(r10_bio);
2712 raid_end_bio_io(r10_bio);
2720 struct r10bio *r10_bio;
2740 r10_bio = list_first_entry(&tmp, struct r10bio,
2742 list_del(&r10_bio->retry_list);
2744 set_bit(R10BIO_Degraded, &r10_bio->state);
2747 &r10_bio->state))
2748 close_write(r10_bio);
2749 raid_end_bio_io(r10_bio);
2763 r10_bio = list_entry(head->prev, struct r10bio, retry_list);
2768 mddev = r10_bio->mddev;
2770 if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
2771 test_bit(R10BIO_WriteError, &r10_bio->state))
2772 handle_write_completed(conf, r10_bio);
2773 else if (test_bit(R10BIO_IsReshape, &r10_bio->state))
2774 reshape_request_write(mddev, r10_bio);
2775 else if (test_bit(R10BIO_IsSync, &r10_bio->state))
2776 sync_request_write(mddev, r10_bio);
2777 else if (test_bit(R10BIO_IsRecover, &r10_bio->state))
2778 recovery_request_write(mddev, r10_bio);
2779 else if (test_bit(R10BIO_ReadError, &r10_bio->state))
2780 handle_read_error(mddev, r10_bio);
2895 * a number of r10_bio structures, one for each out-of-sync device.
2900 * The r10_bio structures are linked using a borrowed master_bio pointer.
2901 * This link is counted in ->remaining. When the r10_bio that points to NULL
2911 struct r10bio *r10_bio;
3047 r10_bio = NULL;
3077 rb2 = r10_bio;
3109 r10_bio = raid10_alloc_init_r10buf(conf);
3110 r10_bio->state = 0;
3112 atomic_set(&r10_bio->remaining, 0);
3114 r10_bio->master_bio = (struct bio*)rb2;
3117 r10_bio->mddev = mddev;
3118 set_bit(R10BIO_IsRecover, &r10_bio->state);
3119 r10_bio->sector = sect;
3121 raid10_find_phys(conf, r10_bio);
3142 int d = r10_bio->devs[j].devnum;
3153 sector = r10_bio->devs[j].addr;
3167 bio = r10_bio->devs[0].bio;
3174 from_addr = r10_bio->devs[j].addr;
3182 if (r10_bio->devs[k].devnum == i)
3185 to_addr = r10_bio->devs[k].addr;
3186 r10_bio->devs[0].devnum = d;
3187 r10_bio->devs[0].addr = from_addr;
3188 r10_bio->devs[1].devnum = i;
3189 r10_bio->devs[1].addr = to_addr;
3192 bio = r10_bio->devs[1].bio;
3200 atomic_inc(&r10_bio->remaining);
3202 r10_bio->devs[1].bio->bi_end_io = NULL;
3205 bio = r10_bio->devs[1].repl_bio;
3221 atomic_inc(&r10_bio->remaining);
3234 if (r10_bio->devs[k].devnum == i)
3240 r10_bio->devs[k].addr,
3246 r10_bio->devs[k].addr,
3258 put_buf(r10_bio);
3261 r10_bio = rb2;
3270 if (r10_bio->devs[0].bio->bi_opf & MD_FAILFAST) {
3277 int d = r10_bio->devs[j].devnum;
3284 r10_bio->devs[0].bio->bi_opf
3289 while (r10_bio) {
3290 struct r10bio *rb2 = r10_bio;
3291 r10_bio = (struct r10bio*) rb2->master_bio;
3322 r10_bio = raid10_alloc_init_r10buf(conf);
3323 r10_bio->state = 0;
3325 r10_bio->mddev = mddev;
3326 atomic_set(&r10_bio->remaining, 0);
3330 r10_bio->master_bio = NULL;
3331 r10_bio->sector = sector_nr;
3332 set_bit(R10BIO_IsSync, &r10_bio->state);
3333 raid10_find_phys(conf, r10_bio);
3334 r10_bio->sectors = (sector_nr | chunk_mask) - sector_nr + 1;
3337 int d = r10_bio->devs[i].devnum;
3342 if (r10_bio->devs[i].repl_bio)
3343 r10_bio->devs[i].repl_bio->bi_end_io = NULL;
3345 bio = r10_bio->devs[i].bio;
3353 sector = r10_bio->devs[i].addr;
3367 atomic_inc(&r10_bio->remaining);
3386 bio = r10_bio->devs[i].repl_bio;
3389 sector = r10_bio->devs[i].addr;
3404 int d = r10_bio->devs[i].devnum;
3405 if (r10_bio->devs[i].bio->bi_end_io)
3408 if (r10_bio->devs[i].repl_bio &&
3409 r10_bio->devs[i].repl_bio->bi_end_io)
3414 put_buf(r10_bio);
3442 r10_bio->sectors = nr_sectors;
3495 r10_bio = get_resync_r10bio(bio);
3496 r10_bio->sectors = nr_sectors;
4425 struct r10bio *r10_bio;
4530 r10_bio = raid10_alloc_init_r10buf(conf);
4531 r10_bio->state = 0;
4533 atomic_set(&r10_bio->remaining, 0);
4534 r10_bio->mddev = mddev;
4535 r10_bio->sector = sector_nr;
4536 set_bit(R10BIO_IsReshape, &r10_bio->state);
4537 r10_bio->sectors = last - sector_nr + 1;
4538 rdev = read_balance(conf, r10_bio, &max_sectors);
4539 BUG_ON(!test_bit(R10BIO_Previous, &r10_bio->state));
4546 mempool_free(r10_bio, &conf->r10buf_pool);
4554 read_bio->bi_iter.bi_sector = (r10_bio->devs[r10_bio->read_slot].addr
4556 read_bio->bi_private = r10_bio;
4563 r10_bio->master_bio = read_bio;
4564 r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum;
4593 __raid10_find_phys(&conf->geo, r10_bio);
4601 int d = r10_bio->devs[s/2].devnum;
4605 b = r10_bio->devs[s/2].repl_bio;
4608 b = r10_bio->devs[s/2].bio;
4614 b->bi_iter.bi_sector = r10_bio->devs[s/2].addr +
4625 pages = get_resync_pages(r10_bio->devs[0].bio)->pages;
4642 r10_bio->sectors = nr_sectors;
4645 md_sync_acct_bio(read_bio, r10_bio->sectors);
4646 atomic_inc(&r10_bio->remaining);
4666 static void end_reshape_request(struct r10bio *r10_bio);
4668 struct r10bio *r10_bio);
4669 static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio)
4679 if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
4680 if (handle_reshape_read_error(mddev, r10_bio) < 0) {
4682 md_done_sync(mddev, r10_bio->sectors, 0);
4689 atomic_set(&r10_bio->remaining, 1);
4692 int d = r10_bio->devs[s/2].devnum;
4697 b = r10_bio->devs[s/2].repl_bio;
4700 b = r10_bio->devs[s/2].bio;
4708 md_sync_acct_bio(b, r10_bio->sectors);
4709 atomic_inc(&r10_bio->remaining);
4713 end_reshape_request(r10_bio);
4748 struct r10bio *r10_bio)
4751 int sectors = r10_bio->sectors;
4765 pages = get_resync_pages(r10_bio->devs[0].bio)->pages;
4767 r10b->sector = r10_bio->sector;
4824 struct r10bio *r10_bio = get_resync_r10bio(bio);
4825 struct mddev *mddev = r10_bio->mddev;
4832 d = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
4846 end_reshape_request(r10_bio);
4849 static void end_reshape_request(struct r10bio *r10_bio)
4851 if (!atomic_dec_and_test(&r10_bio->remaining))
4853 md_done_sync(r10_bio->mddev, r10_bio->sectors, 1);
4854 bio_put(r10_bio->master_bio);
4855 put_buf(r10_bio);