Lines Matching defs:mddev

71 static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
73 static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio);
129 if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) ||
130 test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery))
174 &conf->mddev->recovery)) {
255 struct r10conf *conf = r10_bio->mddev->private;
263 struct r10conf *conf = r10_bio->mddev->private;
273 struct mddev *mddev = r10_bio->mddev;
274 struct r10conf *conf = mddev->private;
284 md_wakeup_thread(mddev->thread);
295 struct r10conf *conf = r10_bio->mddev->private;
315 struct r10conf *conf = r10_bio->mddev->private;
355 struct r10conf *conf = r10_bio->mddev->private;
387 rdev_dec_pending(rdev, conf->mddev);
394 mdname(conf->mddev),
405 md_bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector,
409 md_write_end(r10_bio->mddev);
432 struct r10conf *conf = r10_bio->mddev->private;
457 md_error(rdev->mddev, rdev);
462 &rdev->mddev->recovery);
467 md_error(rdev->mddev, rdev);
531 rdev_dec_pending(rdev, conf->mddev);
630 conf->mddev->reshape_backwards)) {
735 if ((conf->mddev->recovery_cp < MaxSector
737 (mddev_is_clustered(conf->mddev) &&
738 md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector,
888 md_bitmap_unplug(conf->mddev->bitmap);
978 raid10_log(conf->mddev, "wait barrier");
988 (conf->mddev->thread->tsk == current &&
990 &conf->mddev->recovery) &&
1048 if (!test_bit(MD_RECOVERY_RESHAPE, &rdev->mddev->recovery) ||
1065 struct mddev *mddev = plug->cb.data;
1066 struct r10conf *conf = mddev->private;
1075 md_wakeup_thread(mddev->thread);
1082 md_bitmap_unplug(mddev->bitmap);
1110 static void regular_request_wait(struct mddev *mddev, struct r10conf *conf,
1114 while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
1117 raid10_log(conf->mddev, "wait reshape");
1127 static void raid10_read_request(struct mddev *mddev, struct bio *bio,
1130 struct r10conf *conf = mddev->private;
1169 regular_request_wait(mddev, conf, bio, r10_bio->sectors);
1174 mdname(mddev), b,
1182 mdname(mddev),
1198 read_bio = bio_clone_fast(bio, gfp, &mddev->bio_set);
1213 if (mddev->gendisk)
1215 read_bio, disk_devt(mddev->gendisk),
1221 static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
1231 struct r10conf *conf = mddev->private;
1246 mbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set);
1263 if (conf->mddev->gendisk)
1265 mbio, disk_devt(conf->mddev->gendisk),
1272 cb = blk_check_plugged(raid10_unplug, mddev, sizeof(*plug));
1285 md_wakeup_thread(mddev->thread);
1289 static void raid10_write_request(struct mddev *mddev, struct bio *bio,
1292 struct r10conf *conf = mddev->private;
1298 if ((mddev_is_clustered(mddev) &&
1299 md_cluster_ops->area_resyncing(mddev, WRITE,
1306 if (!md_cluster_ops->area_resyncing(mddev, WRITE,
1315 regular_request_wait(mddev, conf, bio, sectors);
1316 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
1317 (mddev->reshape_backwards
1323 mddev->reshape_position = conf->reshape_progress;
1324 set_mask_bits(&mddev->sb_flags, 0,
1326 md_wakeup_thread(mddev->thread);
1327 raid10_log(conf->mddev, "wait reshape metadata");
1328 wait_event(mddev->sb_wait,
1329 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
1331 conf->reshape_safe = mddev->reshape_position;
1335 md_wakeup_thread(mddev->thread);
1336 raid10_log(mddev, "wait queued");
1452 rdev_dec_pending(conf->mirrors[d].rdev, mddev);
1463 rdev_dec_pending(rdev, mddev);
1467 raid10_log(conf->mddev, "wait rdev %d blocked", blocked_rdev->raid_disk);
1468 md_wait_for_blocked_rdev(blocked_rdev, mddev);
1488 md_bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0);
1492 raid10_write_one_disk(mddev, r10_bio, bio, false, i);
1494 raid10_write_one_disk(mddev, r10_bio, bio, true, i);
1499 static void __make_request(struct mddev *mddev, struct bio *bio, int sectors)
1501 struct r10conf *conf = mddev->private;
1509 r10_bio->mddev = mddev;
1516 raid10_read_request(mddev, bio, r10_bio);
1518 raid10_write_request(mddev, bio, r10_bio);
1521 static bool raid10_make_request(struct mddev *mddev, struct bio *bio)
1523 struct r10conf *conf = mddev->private;
1529 && md_flush_request(mddev, bio))
1532 if (!md_write_start(mddev, bio))
1547 __make_request(mddev, bio, sectors);
1554 static void raid10_status(struct seq_file *seq, struct mddev *mddev)
1556 struct r10conf *conf = mddev->private;
1560 seq_printf(seq, " %dK chunks", mddev->chunk_sectors / 2);
1572 conf->geo.raid_disks - mddev->degraded);
1634 static void raid10_error(struct mddev *mddev, struct md_rdev *rdev)
1637 struct r10conf *conf = mddev->private;
1647 if (test_bit(In_sync, &rdev->flags) && !mddev->fail_last_dev
1656 mddev->degraded++;
1660 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1663 set_mask_bits(&mddev->sb_flags, 0,
1668 mdname(mddev), bdevname(rdev->bdev, b),
1669 mdname(mddev), conf->geo.raid_disks - mddev->degraded);
1682 pr_debug(" --- wd:%d rd:%d\n", conf->geo.raid_disks - conf->mddev->degraded,
1706 static int raid10_spare_active(struct mddev *mddev)
1709 struct r10conf *conf = mddev->private;
1747 mddev->degraded -= count;
1754 static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
1756 struct r10conf *conf = mddev->private;
1762 if (mddev->recovery_cp < MaxSector)
1770 if (md_integrity_add_rdev(rdev, mddev))
1784 if (p->recovery_disabled == mddev->recovery_disabled)
1794 if (mddev->gendisk)
1795 disk_stack_limits(mddev->gendisk, rdev->bdev,
1802 if (mddev->gendisk)
1803 disk_stack_limits(mddev->gendisk, rdev->bdev,
1807 p->recovery_disabled = mddev->recovery_disabled - 1;
1815 if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
1816 blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue);
1822 static int raid10_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
1824 struct r10conf *conf = mddev->private;
1831 if (unlikely(number >= mddev->raid_disks))
1850 mddev->recovery_disabled != p->recovery_disabled &&
1878 err = md_integrity_register(mddev);
1888 struct r10conf *conf = r10_bio->mddev->private;
1902 rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev);
1915 struct r10conf *conf = r10_bio->mddev->private;
1931 struct mddev *mddev = r10_bio->mddev;
1942 md_done_sync(mddev, s, 1);
1959 struct mddev *mddev = r10_bio->mddev;
1960 struct r10conf *conf = mddev->private;
1976 md_error(mddev, rdev);
1981 &rdev->mddev->recovery);
1990 rdev_dec_pending(rdev, mddev);
2011 static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
2013 struct r10conf *conf = mddev->private;
2070 atomic64_add(r10_bio->sectors, &mddev->resync_mismatches);
2071 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
2076 md_error(rdev->mddev, rdev);
2129 md_done_sync(mddev, r10_bio->sectors, 1);
2153 struct mddev *mddev = r10_bio->mddev;
2154 struct r10conf *conf = mddev->private;
2192 &rdev->mddev->recovery);
2210 mdname(mddev));
2213 = mddev->recovery_disabled;
2215 &mddev->recovery);
2227 static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
2229 struct r10conf *conf = mddev->private;
2274 static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
2320 &rdev->mddev->recovery);
2324 md_error(rdev->mddev, rdev);
2336 static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10bio *r10_bio)
2341 int max_read_errors = atomic_read(&mddev->max_corr_read_errors);
2354 check_decay_read_errors(mddev, rdev);
2361 mdname(mddev), b,
2364 mdname(mddev), b);
2365 md_error(mddev, rdev);
2399 rdev_dec_pending(rdev, mddev);
2423 md_error(mddev, rdev);
2455 mdname(mddev), s,
2462 mdname(mddev),
2465 rdev_dec_pending(rdev, mddev);
2492 mdname(mddev), s,
2498 mdname(mddev),
2503 mdname(mddev), s,
2511 rdev_dec_pending(rdev, mddev);
2524 struct mddev *mddev = r10_bio->mddev;
2525 struct r10conf *conf = mddev->private;
2560 wbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set);
2582 static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
2586 struct r10conf *conf = mddev->private;
2601 if (mddev->ro)
2605 fix_read_error(conf, mddev, r10_bio);
2608 md_error(mddev, rdev);
2610 rdev_dec_pending(rdev, mddev);
2613 raid10_read_request(mddev, r10_bio->master_bio, r10_bio);
2645 md_error(conf->mddev, rdev);
2662 md_error(conf->mddev, rdev);
2677 rdev_dec_pending(rdev, conf->mddev);
2681 md_error(conf->mddev, rdev);
2685 rdev_dec_pending(rdev, conf->mddev);
2694 rdev_dec_pending(rdev, conf->mddev);
2707 md_wakeup_thread(conf->mddev->thread);
2719 struct mddev *mddev = thread->mddev;
2722 struct r10conf *conf = mddev->private;
2726 md_check_recovery(mddev);
2729 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
2732 if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
2743 if (mddev->degraded)
2768 mddev = r10_bio->mddev;
2769 conf = mddev->private;
2774 reshape_request_write(mddev, r10_bio);
2776 sync_request_write(mddev, r10_bio);
2778 recovery_request_write(mddev, r10_bio);
2780 handle_read_error(mddev, r10_bio);
2785 if (mddev->sb_flags & ~(1<<MD_SB_CHANGE_PENDING))
2786 md_check_recovery(mddev);
2817 if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) ||
2818 test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery))
2864 window_size = (chunks + extra_chunk) * conf->mddev->chunk_sectors;
2885 * We differentiate by looking at MD_RECOVERY_SYNC in mddev->recovery.
2907 static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
2910 struct r10conf *conf = mddev->private;
2926 if (mddev->bitmap == NULL &&
2927 mddev->recovery_cp == MaxSector &&
2928 mddev->reshape_position == MaxSector &&
2929 !test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
2930 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
2931 !test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
2934 return mddev->dev_sectors - sector_nr;
2942 max_sector = mddev->dev_sectors;
2943 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
2944 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
2945 max_sector = mddev->resync_max_sectors;
2955 * mddev->curr_resync, but for recovery,
2959 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
2965 if (mddev->curr_resync < max_sector) { /* aborted */
2966 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
2967 md_bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
2971 raid10_find_virt(conf, mddev->curr_resync, i);
2972 md_bitmap_end_sync(mddev->bitmap, sect,
2977 if ((!mddev->bitmap || conf->fullsync)
2979 && test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
2994 md_bitmap_close_sync(mddev->bitmap);
3000 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
3001 return reshape_request(mddev, sector_nr, skipped);
3011 if (max_sector > mddev->resync_max)
3012 max_sector = mddev->resync_max; /* Don't do IO beyond here */
3044 if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
3079 if (sect >= mddev->resync_max_sectors) {
3090 must_sync = md_bitmap_start_sync(mddev->bitmap, sect,
3117 r10_bio->mddev = mddev;
3136 must_sync = md_bitmap_start_sync(mddev->bitmap, sect,
3252 &mddev->recovery))
3254 mdname(mddev));
3256 = mddev->recovery_disabled;
3262 rdev_dec_pending(mrdev, mddev);
3264 rdev_dec_pending(mreplace, mddev);
3267 rdev_dec_pending(mrdev, mddev);
3269 rdev_dec_pending(mreplace, mddev);
3308 md_bitmap_cond_end_sync(mddev->bitmap, sector_nr,
3309 mddev_is_clustered(mddev) &&
3312 if (!md_bitmap_start_sync(mddev->bitmap, sector_nr,
3313 &sync_blocks, mddev->degraded) &&
3315 &mddev->recovery)) {
3325 r10_bio->mddev = mddev;
3407 mddev);
3412 mddev);
3444 if (mddev_is_clustered(mddev) &&
3445 test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
3448 conf->cluster_sync_low = mddev->curr_resync_completed;
3451 md_cluster_ops->resync_info_update(mddev,
3455 } else if (mddev_is_clustered(mddev)) {
3475 mddev->curr_resync_completed, i);
3484 md_cluster_ops->resync_info_update(mddev,
3509 md_done_sync(mddev, sectors_skipped, 1);
3527 raid10_size(struct mddev *mddev, sector_t sectors, int raid_disks)
3530 struct r10conf *conf = mddev->private;
3577 static int setup_geo(struct geom *geo, struct mddev *mddev, enum geo_type new)
3583 layout = mddev->layout;
3584 chunk = mddev->chunk_sectors;
3585 disks = mddev->raid_disks - mddev->delta_disks;
3588 layout = mddev->new_layout;
3589 chunk = mddev->new_chunk_sectors;
3590 disks = mddev->raid_disks;
3595 layout = mddev->new_layout;
3596 chunk = mddev->new_chunk_sectors;
3597 disks = mddev->raid_disks + mddev->delta_disks;
3647 static struct r10conf *setup_conf(struct mddev *mddev)
3654 copies = setup_geo(&geo, mddev, geo_new);
3658 mdname(mddev), PAGE_SIZE);
3662 if (copies < 2 || copies > mddev->raid_disks) {
3664 mdname(mddev), mddev->new_layout);
3674 conf->mirrors = kcalloc(mddev->raid_disks + max(0, -mddev->delta_disks),
3695 calc_sectors(conf, mddev->dev_sectors);
3696 if (mddev->reshape_position == MaxSector) {
3700 if (setup_geo(&conf->prev, mddev, geo_old) != conf->copies) {
3704 conf->reshape_progress = mddev->reshape_position;
3721 conf->thread = md_register_thread(raid10d, mddev, "raid10");
3725 conf->mddev = mddev;
3739 blk_queue_io_opt(conf->mddev->queue, (conf->mddev->chunk_sectors << 9) *
3743 static int raid10_run(struct mddev *mddev)
3754 if (mddev_init_writes_pending(mddev) < 0)
3757 if (mddev->private == NULL) {
3758 conf = setup_conf(mddev);
3761 mddev->private = conf;
3763 conf = mddev->private;
3767 mddev->thread = conf->thread;
3770 if (mddev_is_clustered(conf->mddev)) {
3773 fc = (mddev->layout >> 8) & 255;
3774 fo = mddev->layout & (1<<16);
3782 if (mddev->queue) {
3783 blk_queue_max_discard_sectors(mddev->queue,
3784 mddev->chunk_sectors);
3785 blk_queue_max_write_same_sectors(mddev->queue, 0);
3786 blk_queue_max_write_zeroes_sectors(mddev->queue, 0);
3787 blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
3791 rdev_for_each(rdev, mddev) {
3812 if (!mddev->reshape_backwards)
3819 if (mddev->gendisk)
3820 disk_stack_limits(mddev->gendisk, rdev->bdev,
3830 if (mddev->queue) {
3833 mddev->queue);
3836 mddev->queue);
3841 mdname(mddev));
3855 mddev->degraded = 0;
3873 mddev->degraded++;
3885 disk->recovery_disabled = mddev->recovery_disabled - 1;
3888 if (mddev->recovery_cp != MaxSector)
3890 mdname(mddev));
3892 mdname(mddev), conf->geo.raid_disks - mddev->degraded,
3897 mddev->dev_sectors = conf->dev_sectors;
3898 size = raid10_size(mddev, 0, 0);
3899 md_set_array_sectors(mddev, size);
3900 mddev->resync_max_sectors = size;
3901 set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
3903 if (md_integrity_register(mddev))
3921 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
3922 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
3923 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
3924 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
3925 mddev->sync_thread = md_register_thread(md_do_sync, mddev,
3927 if (!mddev->sync_thread)
3934 md_unregister_thread(&mddev->thread);
3936 mddev->private = NULL;
3941 static void raid10_free(struct mddev *mddev, void *priv)
3946 static void raid10_quiesce(struct mddev *mddev, int quiesce)
3948 struct r10conf *conf = mddev->private;
3956 static int raid10_resize(struct mddev *mddev, sector_t sectors)
3970 struct r10conf *conf = mddev->private;
3973 if (mddev->reshape_position != MaxSector)
3979 oldsize = raid10_size(mddev, 0, 0);
3980 size = raid10_size(mddev, sectors, 0);
3981 if (mddev->external_size &&
3982 mddev->array_sectors > size)
3984 if (mddev->bitmap) {
3985 int ret = md_bitmap_resize(mddev->bitmap, size, 0, 0);
3989 md_set_array_sectors(mddev, size);
3990 if (sectors > mddev->dev_sectors &&
3991 mddev->recovery_cp > oldsize) {
3992 mddev->recovery_cp = oldsize;
3993 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3996 mddev->dev_sectors = conf->dev_sectors;
3997 mddev->resync_max_sectors = size;
4001 static void *raid10_takeover_raid0(struct mddev *mddev, sector_t size, int devs)
4006 if (mddev->degraded > 0) {
4008 mdname(mddev));
4014 mddev->new_level = 10;
4016 mddev->new_layout = (1<<8) + 2;
4017 mddev->new_chunk_sectors = mddev->chunk_sectors;
4018 mddev->delta_disks = mddev->raid_disks;
4019 mddev->raid_disks *= 2;
4021 mddev->recovery_cp = MaxSector;
4022 mddev->dev_sectors = size;
4024 conf = setup_conf(mddev);
4026 rdev_for_each(rdev, mddev)
4037 static void *raid10_takeover(struct mddev *mddev)
4044 if (mddev->level == 0) {
4046 raid0_conf = mddev->private;
4049 mdname(mddev));
4052 return raid10_takeover_raid0(mddev,
4059 static int raid10_check_reshape(struct mddev *mddev)
4075 struct r10conf *conf = mddev->private;
4081 if (setup_geo(&geo, mddev, geo_start) != conf->copies)
4088 if (mddev->array_sectors & geo.chunk_mask)
4097 if (mddev->delta_disks > 0) {
4100 kcalloc(mddev->raid_disks + mddev->delta_disks,
4166 static int raid10_start_reshape(struct mddev *mddev)
4182 struct r10conf *conf = mddev->private;
4187 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4190 if (setup_geo(&new, mddev, geo_start) != conf->copies)
4198 rdev_for_each(rdev, mddev) {
4205 if (!mddev->reshape_backwards)
4218 if (spares < mddev->delta_disks)
4232 setup_geo(&conf->geo, mddev, geo_start);
4234 if (mddev->reshape_backwards) {
4235 sector_t size = raid10_size(mddev, 0, 0);
4236 if (size < mddev->array_sectors) {
4239 mdname(mddev));
4242 mddev->resync_max_sectors = size;
4249 if (mddev->delta_disks && mddev->bitmap) {
4253 oldsize = raid10_size(mddev, 0, 0);
4254 newsize = raid10_size(mddev, 0, conf->geo.raid_disks);
4256 if (!mddev_is_clustered(mddev)) {
4257 ret = md_bitmap_resize(mddev->bitmap, newsize, 0, 0);
4264 rdev_for_each(rdev, mddev) {
4279 ret = md_bitmap_resize(mddev->bitmap, newsize, 0, 0);
4283 ret = md_cluster_ops->resize_bitmaps(mddev, newsize, oldsize);
4285 md_bitmap_resize(mddev->bitmap, oldsize, 0, 0);
4290 if (mddev->delta_disks > 0) {
4291 rdev_for_each(rdev, mddev)
4294 if (raid10_add_disk(mddev, rdev) == 0) {
4302 sysfs_link_rdev(mddev, rdev);
4315 mddev->degraded = calc_degraded(conf);
4317 mddev->raid_disks = conf->geo.raid_disks;
4318 mddev->reshape_position = conf->reshape_progress;
4319 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
4321 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
4322 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
4323 clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
4324 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
4325 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
4327 mddev->sync_thread = md_register_thread(md_do_sync, mddev,
4329 if (!mddev->sync_thread) {
4334 md_wakeup_thread(mddev->sync_thread);
4335 md_new_event(mddev);
4339 mddev->recovery = 0;
4342 mddev->raid_disks = conf->geo.raid_disks;
4343 rdev_for_each(rdev, mddev)
4348 mddev->reshape_position = MaxSector;
4384 static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
4391 * a chunk, but that is not a problem as mddev->reshape_position
4424 struct r10conf *conf = mddev->private;
4439 if (mddev->reshape_backwards &&
4440 conf->reshape_progress < raid10_size(mddev, 0, 0)) {
4441 sector_nr = (raid10_size(mddev, 0, 0)
4443 } else if (!mddev->reshape_backwards &&
4447 mddev->curr_resync_completed = sector_nr;
4448 sysfs_notify_dirent_safe(mddev->sysfs_completed);
4458 if (mddev->reshape_backwards) {
4508 mddev->reshape_position = conf->reshape_progress;
4509 if (mddev->reshape_backwards)
4510 mddev->curr_resync_completed = raid10_size(mddev, 0, 0)
4513 mddev->curr_resync_completed = conf->reshape_progress;
4515 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
4516 md_wakeup_thread(mddev->thread);
4517 wait_event(mddev->sb_wait, mddev->sb_flags == 0 ||
4518 test_bit(MD_RECOVERY_INTR, &mddev->recovery));
4519 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
4523 conf->reshape_safe = mddev->reshape_position;
4534 r10_bio->mddev = mddev;
4547 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4551 read_bio = bio_alloc_mddev(GFP_KERNEL, RESYNC_PAGES, mddev);
4570 if (mddev_is_clustered(mddev) && conf->cluster_sync_high <= sector_nr) {
4588 md_cluster_ops->resync_info_update(mddev, conf->cluster_sync_low,
4658 if (mddev->reshape_backwards)
4667 static int handle_reshape_read_error(struct mddev *mddev,
4669 static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio)
4676 struct r10conf *conf = mddev->private;
4680 if (handle_reshape_read_error(mddev, r10_bio) < 0) {
4682 md_done_sync(mddev, r10_bio->sectors, 0);
4718 if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery))
4723 md_finish_reshape(conf->mddev);
4729 if (conf->mddev->queue)
4734 static void raid10_update_reshape_pos(struct mddev *mddev)
4736 struct r10conf *conf = mddev->private;
4739 md_cluster_ops->resync_info_get(mddev, &lo, &hi);
4740 if (((mddev->reshape_position <= hi) && (mddev->reshape_position >= lo))
4741 || mddev->reshape_position == MaxSector)
4742 conf->reshape_progress = mddev->reshape_position;
4747 static int handle_reshape_read_error(struct mddev *mddev,
4752 struct r10conf *conf = mddev->private;
4760 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4796 rdev_dec_pending(rdev, mddev);
4811 &mddev->recovery);
4825 struct mddev *mddev = r10_bio->mddev;
4826 struct r10conf *conf = mddev->private;
4842 md_error(mddev, rdev);
4845 rdev_dec_pending(rdev, mddev);
4853 md_done_sync(r10_bio->mddev, r10_bio->sectors, 1);
4858 static void raid10_finish_reshape(struct mddev *mddev)
4860 struct r10conf *conf = mddev->private;
4862 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
4865 if (mddev->delta_disks > 0) {
4866 if (mddev->recovery_cp > mddev->resync_max_sectors) {
4867 mddev->recovery_cp = mddev->resync_max_sectors;
4868 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4870 mddev->resync_max_sectors = mddev->array_sectors;
4875 d < conf->geo.raid_disks - mddev->delta_disks;
4886 mddev->layout = mddev->new_layout;
4887 mddev->chunk_sectors = 1 << conf->geo.chunk_shift;
4888 mddev->reshape_position = MaxSector;
4889 mddev->delta_disks = 0;
4890 mddev->reshape_backwards = 0;