Lines Matching defs:mddev
71 static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
73 static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio);
144 if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) ||
145 test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery))
191 &conf->mddev->recovery)) {
277 struct r10conf *conf = r10_bio->mddev->private;
285 struct r10conf *conf = r10_bio->mddev->private;
301 struct mddev *mddev = r10_bio->mddev;
302 struct r10conf *conf = mddev->private;
312 md_wakeup_thread(mddev->thread);
323 struct r10conf *conf = r10_bio->mddev->private;
343 struct r10conf *conf = r10_bio->mddev->private;
382 struct r10conf *conf = r10_bio->mddev->private;
414 rdev_dec_pending(rdev, conf->mddev);
420 mdname(conf->mddev),
431 md_bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector,
435 md_write_end(r10_bio->mddev);
458 struct r10conf *conf = r10_bio->mddev->private;
483 md_error(rdev->mddev, rdev);
488 &rdev->mddev->recovery);
493 md_error(rdev->mddev, rdev);
557 rdev_dec_pending(rdev, conf->mddev);
656 conf->mddev->reshape_backwards)) {
761 if ((conf->mddev->recovery_cp < MaxSector
763 (mddev_is_clustered(conf->mddev) &&
764 md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector,
911 raid1_prepare_flush_writes(conf->mddev->bitmap);
998 thread = rcu_dereference_protected(conf->mddev->thread, true);
1043 raid10_log(conf->mddev, "wait barrier");
1101 if (!test_bit(MD_RECOVERY_RESHAPE, &rdev->mddev->recovery) ||
1111 struct mddev *mddev = plug->cb.data;
1112 struct r10conf *conf = mddev->private;
1120 md_wakeup_thread(mddev->thread);
1127 raid1_prepare_flush_writes(mddev->bitmap);
1146 static bool regular_request_wait(struct mddev *mddev, struct r10conf *conf,
1154 while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
1162 raid10_log(conf->mddev, "wait reshape");
1172 static void raid10_read_request(struct mddev *mddev, struct bio *bio,
1175 struct r10conf *conf = mddev->private;
1214 if (!regular_request_wait(mddev, conf, bio, r10_bio->sectors))
1220 mdname(mddev), b,
1228 mdname(mddev),
1245 md_account_bio(mddev, &bio);
1248 read_bio = bio_alloc_clone(rdev->bdev, bio, gfp, &mddev->bio_set);
1262 if (mddev->gendisk)
1263 trace_block_bio_remap(read_bio, disk_devt(mddev->gendisk),
1269 static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
1277 struct r10conf *conf = mddev->private;
1292 mbio = bio_alloc_clone(rdev->bdev, bio, GFP_NOIO, &mddev->bio_set);
1308 if (conf->mddev->gendisk)
1309 trace_block_bio_remap(mbio, disk_devt(conf->mddev->gendisk),
1316 if (!raid1_add_bio_to_plug(mddev, mbio, raid10_unplug, conf->copies)) {
1320 md_wakeup_thread(mddev->thread);
1343 static void wait_blocked_dev(struct mddev *mddev, struct r10bio *r10_bio)
1346 struct r10conf *conf = mddev->private;
1399 raid10_log(conf->mddev, "%s wait rdev %d blocked",
1401 md_wait_for_blocked_rdev(blocked_rdev, mddev);
1407 static void raid10_write_request(struct mddev *mddev, struct bio *bio,
1410 struct r10conf *conf = mddev->private;
1415 if ((mddev_is_clustered(mddev) &&
1416 md_cluster_ops->area_resyncing(mddev, WRITE,
1428 if (!md_cluster_ops->area_resyncing(mddev, WRITE,
1437 if (!regular_request_wait(mddev, conf, bio, sectors))
1439 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
1440 (mddev->reshape_backwards
1446 mddev->reshape_position = conf->reshape_progress;
1447 set_mask_bits(&mddev->sb_flags, 0,
1449 md_wakeup_thread(mddev->thread);
1455 raid10_log(conf->mddev, "wait reshape metadata");
1456 wait_event(mddev->sb_wait,
1457 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
1459 conf->reshape_safe = mddev->reshape_position;
1475 wait_blocked_dev(mddev, r10_bio);
1554 md_account_bio(mddev, &bio);
1557 md_bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0);
1561 raid10_write_one_disk(mddev, r10_bio, bio, false, i);
1563 raid10_write_one_disk(mddev, r10_bio, bio, true, i);
1568 static void __make_request(struct mddev *mddev, struct bio *bio, int sectors)
1570 struct r10conf *conf = mddev->private;
1578 r10_bio->mddev = mddev;
1586 raid10_read_request(mddev, bio, r10_bio, true);
1588 raid10_write_request(mddev, bio, r10_bio);
1593 struct r10conf *conf = r10bio->mddev->private;
1605 md_write_end(r10bio->mddev);
1616 struct r10conf *conf = r10_bio->mddev->private;
1641 rdev_dec_pending(rdev, conf->mddev);
1650 static int raid10_handle_discard(struct mddev *mddev, struct bio *bio)
1652 struct r10conf *conf = mddev->private;
1671 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
1684 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
1762 r10_bio->mddev = mddev;
1766 wait_blocked_dev(mddev, r10_bio);
1832 dev_start = (first_stripe_index + 1) * mddev->chunk_sectors;
1834 dev_start = first_stripe_index * mddev->chunk_sectors;
1839 dev_end = (last_stripe_index + 1) * mddev->chunk_sectors;
1841 dev_end = last_stripe_index * mddev->chunk_sectors;
1854 &mddev->bio_set);
1860 md_submit_discard_bio(mddev, rdev, mbio,
1868 &mddev->bio_set);
1874 md_submit_discard_bio(mddev, rrdev, rbio,
1900 static bool raid10_make_request(struct mddev *mddev, struct bio *bio)
1902 struct r10conf *conf = mddev->private;
1908 && md_flush_request(mddev, bio))
1911 if (!md_write_start(mddev, bio))
1915 if (!raid10_handle_discard(mddev, bio))
1930 __make_request(mddev, bio, sectors);
1937 static void raid10_status(struct seq_file *seq, struct mddev *mddev)
1939 struct r10conf *conf = mddev->private;
1943 seq_printf(seq, " %dK chunks", mddev->chunk_sectors / 2);
1955 conf->geo.raid_disks - mddev->degraded);
2019 * @mddev: affected md device.
2022 * The routine acknowledges &rdev failure and determines new @mddev state.
2024 * - &MD_BROKEN flag is set in &mddev->flags.
2027 * - &mddev->degraded is bumped.
2030 * &mddev->fail_last_dev is off.
2032 static void raid10_error(struct mddev *mddev, struct md_rdev *rdev)
2034 struct r10conf *conf = mddev->private;
2040 set_bit(MD_BROKEN, &mddev->flags);
2042 if (!mddev->fail_last_dev) {
2048 mddev->degraded++;
2050 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2053 set_mask_bits(&mddev->sb_flags, 0,
2058 mdname(mddev), rdev->bdev,
2059 mdname(mddev), conf->geo.raid_disks - mddev->degraded);
2072 pr_debug(" --- wd:%d rd:%d\n", conf->geo.raid_disks - conf->mddev->degraded,
2095 static int raid10_spare_active(struct mddev *mddev)
2098 struct r10conf *conf = mddev->private;
2136 mddev->degraded -= count;
2143 static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
2145 struct r10conf *conf = mddev->private;
2152 if (mddev->recovery_cp < MaxSector)
2160 if (md_integrity_add_rdev(rdev, mddev))
2174 if (p->recovery_disabled == mddev->recovery_disabled)
2183 if (mddev->gendisk)
2184 disk_stack_limits(mddev->gendisk, rdev->bdev,
2188 p->recovery_disabled = mddev->recovery_disabled - 1;
2203 if (mddev->gendisk)
2204 disk_stack_limits(mddev->gendisk, rdev->bdev,
2214 static int raid10_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
2216 struct r10conf *conf = mddev->private;
2223 if (unlikely(number >= mddev->raid_disks))
2242 mddev->recovery_disabled != p->recovery_disabled &&
2270 err = md_integrity_register(mddev);
2280 struct r10conf *conf = r10_bio->mddev->private;
2294 rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev);
2307 struct r10conf *conf = r10_bio->mddev->private;
2323 struct mddev *mddev = r10_bio->mddev;
2334 md_done_sync(mddev, s, 1);
2351 struct mddev *mddev = r10_bio->mddev;
2352 struct r10conf *conf = mddev->private;
2368 md_error(mddev, rdev);
2373 &rdev->mddev->recovery);
2382 rdev_dec_pending(rdev, mddev);
2403 static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
2405 struct r10conf *conf = mddev->private;
2462 atomic64_add(r10_bio->sectors, &mddev->resync_mismatches);
2463 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
2468 md_error(rdev->mddev, rdev);
2519 md_done_sync(mddev, r10_bio->sectors, 1);
2543 struct mddev *mddev = r10_bio->mddev;
2544 struct r10conf *conf = mddev->private;
2582 &rdev->mddev->recovery);
2600 mdname(mddev));
2603 = mddev->recovery_disabled;
2605 &mddev->recovery);
2617 static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
2619 struct r10conf *conf = mddev->private;
2664 static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
2710 &rdev->mddev->recovery);
2714 md_error(rdev->mddev, rdev);
2726 static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10bio *r10_bio)
2731 int max_read_errors = atomic_read(&mddev->max_corr_read_errors);
2744 check_decay_read_errors(mddev, rdev);
2748 mdname(mddev), rdev->bdev,
2751 mdname(mddev), rdev->bdev);
2752 md_error(mddev, rdev);
2786 rdev_dec_pending(rdev, mddev);
2810 md_error(mddev, rdev);
2840 mdname(mddev), s,
2847 mdname(mddev),
2850 rdev_dec_pending(rdev, mddev);
2874 mdname(mddev), s,
2880 mdname(mddev),
2885 mdname(mddev), s,
2893 rdev_dec_pending(rdev, mddev);
2906 struct mddev *mddev = r10_bio->mddev;
2907 struct r10conf *conf = mddev->private;
2943 &mddev->bio_set);
2964 static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
2968 struct r10conf *conf = mddev->private;
2983 if (mddev->ro)
2987 fix_read_error(conf, mddev, r10_bio);
2990 md_error(mddev, rdev);
2992 rdev_dec_pending(rdev, mddev);
2994 raid10_read_request(mddev, r10_bio->master_bio, r10_bio, false);
3031 md_error(conf->mddev, rdev);
3048 md_error(conf->mddev, rdev);
3063 rdev_dec_pending(rdev, conf->mddev);
3067 md_error(conf->mddev, rdev);
3071 rdev_dec_pending(rdev, conf->mddev);
3080 rdev_dec_pending(rdev, conf->mddev);
3093 md_wakeup_thread(conf->mddev->thread);
3105 struct mddev *mddev = thread->mddev;
3108 struct r10conf *conf = mddev->private;
3112 md_check_recovery(mddev);
3115 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
3118 if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
3129 if (mddev->degraded)
3154 mddev = r10_bio->mddev;
3155 conf = mddev->private;
3160 reshape_request_write(mddev, r10_bio);
3162 sync_request_write(mddev, r10_bio);
3164 recovery_request_write(mddev, r10_bio);
3166 handle_read_error(mddev, r10_bio);
3171 if (mddev->sb_flags & ~(1<<MD_SB_CHANGE_PENDING))
3172 md_check_recovery(mddev);
3203 if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) ||
3204 test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery))
3250 window_size = (chunks + extra_chunk) * conf->mddev->chunk_sectors;
3271 * We differentiate by looking at MD_RECOVERY_SYNC in mddev->recovery.
3293 static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
3296 struct r10conf *conf = mddev->private;
3313 if (mddev->bitmap == NULL &&
3314 mddev->recovery_cp == MaxSector &&
3315 mddev->reshape_position == MaxSector &&
3316 !test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
3317 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
3318 !test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
3321 return mddev->dev_sectors - sector_nr;
3329 max_sector = mddev->dev_sectors;
3330 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
3331 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
3332 max_sector = mddev->resync_max_sectors;
3342 * mddev->curr_resync, but for recovery,
3346 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
3352 if (mddev->curr_resync < max_sector) { /* aborted */
3353 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
3354 md_bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
3358 raid10_find_virt(conf, mddev->curr_resync, i);
3359 md_bitmap_end_sync(mddev->bitmap, sect,
3364 if ((!mddev->bitmap || conf->fullsync)
3366 && test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
3381 md_bitmap_close_sync(mddev->bitmap);
3387 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
3388 return reshape_request(mddev, sector_nr, skipped);
3391 pr_err("md/raid10:%s: %s fails\n", mdname(mddev),
3392 test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ? "resync" : "recovery");
3394 !test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
3400 mddev->recovery_disabled;
3411 if (max_sector > mddev->resync_max)
3412 max_sector = mddev->resync_max; /* Don't do IO beyond here */
3444 if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
3477 if (sect >= mddev->resync_max_sectors) {
3488 must_sync = md_bitmap_start_sync(mddev->bitmap, sect,
3516 r10_bio->mddev = mddev;
3535 must_sync = md_bitmap_start_sync(mddev->bitmap, sect,
3651 &mddev->recovery))
3653 mdname(mddev));
3655 = mddev->recovery_disabled;
3664 rdev_dec_pending(mrdev, mddev);
3666 rdev_dec_pending(mreplace, mddev);
3670 rdev_dec_pending(mrdev, mddev);
3672 rdev_dec_pending(mreplace, mddev);
3711 md_bitmap_cond_end_sync(mddev->bitmap, sector_nr,
3712 mddev_is_clustered(mddev) &&
3715 if (!md_bitmap_start_sync(mddev->bitmap, sector_nr,
3716 &sync_blocks, mddev->degraded) &&
3718 &mddev->recovery)) {
3728 r10_bio->mddev = mddev;
3810 mddev);
3815 mddev);
3847 if (mddev_is_clustered(mddev) &&
3848 test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
3851 conf->cluster_sync_low = mddev->curr_resync_completed;
3854 md_cluster_ops->resync_info_update(mddev,
3858 } else if (mddev_is_clustered(mddev)) {
3878 mddev->curr_resync_completed, i);
3887 md_cluster_ops->resync_info_update(mddev,
3912 md_done_sync(mddev, sectors_skipped, 1);
3930 raid10_size(struct mddev *mddev, sector_t sectors, int raid_disks)
3933 struct r10conf *conf = mddev->private;
3980 static int setup_geo(struct geom *geo, struct mddev *mddev, enum geo_type new)
3986 layout = mddev->layout;
3987 chunk = mddev->chunk_sectors;
3988 disks = mddev->raid_disks - mddev->delta_disks;
3991 layout = mddev->new_layout;
3992 chunk = mddev->new_chunk_sectors;
3993 disks = mddev->raid_disks;
3998 layout = mddev->new_layout;
3999 chunk = mddev->new_chunk_sectors;
4000 disks = mddev->raid_disks + mddev->delta_disks;
4050 static struct r10conf *setup_conf(struct mddev *mddev)
4057 copies = setup_geo(&geo, mddev, geo_new);
4061 mdname(mddev), PAGE_SIZE);
4065 if (copies < 2 || copies > mddev->raid_disks) {
4067 mdname(mddev), mddev->new_layout);
4077 conf->mirrors = kcalloc(mddev->raid_disks + max(0, -mddev->delta_disks),
4098 calc_sectors(conf, mddev->dev_sectors);
4099 if (mddev->reshape_position == MaxSector) {
4103 if (setup_geo(&conf->prev, mddev, geo_old) != conf->copies) {
4107 conf->reshape_progress = mddev->reshape_position;
4125 md_register_thread(raid10d, mddev, "raid10"));
4129 conf->mddev = mddev;
4143 blk_queue_io_opt(conf->mddev->queue, (conf->mddev->chunk_sectors << 9) *
4147 static int raid10_run(struct mddev *mddev)
4157 if (mddev_init_writes_pending(mddev) < 0)
4160 if (mddev->private == NULL) {
4161 conf = setup_conf(mddev);
4164 mddev->private = conf;
4166 conf = mddev->private;
4170 rcu_assign_pointer(mddev->thread, conf->thread);
4173 if (mddev_is_clustered(conf->mddev)) {
4176 fc = (mddev->layout >> 8) & 255;
4177 fo = mddev->layout & (1<<16);
4185 if (mddev->queue) {
4186 blk_queue_max_write_zeroes_sectors(mddev->queue, 0);
4187 blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
4191 rdev_for_each(rdev, mddev) {
4212 if (!mddev->reshape_backwards)
4219 if (mddev->gendisk)
4220 disk_stack_limits(mddev->gendisk, rdev->bdev,
4230 mdname(mddev));
4244 mddev->degraded = 0;
4262 mddev->degraded++;
4274 disk->recovery_disabled = mddev->recovery_disabled - 1;
4277 if (mddev->recovery_cp != MaxSector)
4279 mdname(mddev));
4281 mdname(mddev), conf->geo.raid_disks - mddev->degraded,
4286 mddev->dev_sectors = conf->dev_sectors;
4287 size = raid10_size(mddev, 0, 0);
4288 md_set_array_sectors(mddev, size);
4289 mddev->resync_max_sectors = size;
4290 set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
4292 if (md_integrity_register(mddev))
4310 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
4311 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
4312 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
4313 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
4314 rcu_assign_pointer(mddev->sync_thread,
4315 md_register_thread(md_do_sync, mddev, "reshape"));
4316 if (!mddev->sync_thread)
4323 md_unregister_thread(mddev, &mddev->thread);
4325 mddev->private = NULL;
4330 static void raid10_free(struct mddev *mddev, void *priv)
4335 static void raid10_quiesce(struct mddev *mddev, int quiesce)
4337 struct r10conf *conf = mddev->private;
4345 static int raid10_resize(struct mddev *mddev, sector_t sectors)
4359 struct r10conf *conf = mddev->private;
4362 if (mddev->reshape_position != MaxSector)
4368 oldsize = raid10_size(mddev, 0, 0);
4369 size = raid10_size(mddev, sectors, 0);
4370 if (mddev->external_size &&
4371 mddev->array_sectors > size)
4373 if (mddev->bitmap) {
4374 int ret = md_bitmap_resize(mddev->bitmap, size, 0, 0);
4378 md_set_array_sectors(mddev, size);
4379 if (sectors > mddev->dev_sectors &&
4380 mddev->recovery_cp > oldsize) {
4381 mddev->recovery_cp = oldsize;
4382 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4385 mddev->dev_sectors = conf->dev_sectors;
4386 mddev->resync_max_sectors = size;
4390 static void *raid10_takeover_raid0(struct mddev *mddev, sector_t size, int devs)
4395 if (mddev->degraded > 0) {
4397 mdname(mddev));
4403 mddev->new_level = 10;
4405 mddev->new_layout = (1<<8) + 2;
4406 mddev->new_chunk_sectors = mddev->chunk_sectors;
4407 mddev->delta_disks = mddev->raid_disks;
4408 mddev->raid_disks *= 2;
4410 mddev->recovery_cp = MaxSector;
4411 mddev->dev_sectors = size;
4413 conf = setup_conf(mddev);
4415 rdev_for_each(rdev, mddev)
4425 static void *raid10_takeover(struct mddev *mddev)
4432 if (mddev->level == 0) {
4434 raid0_conf = mddev->private;
4437 mdname(mddev));
4440 return raid10_takeover_raid0(mddev,
4447 static int raid10_check_reshape(struct mddev *mddev)
4463 struct r10conf *conf = mddev->private;
4469 if (setup_geo(&geo, mddev, geo_start) != conf->copies)
4476 if (mddev->array_sectors & geo.chunk_mask)
4485 if (mddev->delta_disks > 0) {
4488 kcalloc(mddev->raid_disks + mddev->delta_disks,
4554 static int raid10_start_reshape(struct mddev *mddev)
4570 struct r10conf *conf = mddev->private;
4575 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4578 if (setup_geo(&new, mddev, geo_start) != conf->copies)
4586 rdev_for_each(rdev, mddev) {
4593 if (!mddev->reshape_backwards)
4606 if (spares < mddev->delta_disks)
4620 setup_geo(&conf->geo, mddev, geo_start);
4622 if (mddev->reshape_backwards) {
4623 sector_t size = raid10_size(mddev, 0, 0);
4624 if (size < mddev->array_sectors) {
4627 mdname(mddev));
4630 mddev->resync_max_sectors = size;
4637 if (mddev->delta_disks && mddev->bitmap) {
4641 oldsize = raid10_size(mddev, 0, 0);
4642 newsize = raid10_size(mddev, 0, conf->geo.raid_disks);
4644 if (!mddev_is_clustered(mddev)) {
4645 ret = md_bitmap_resize(mddev->bitmap, newsize, 0, 0);
4652 rdev_for_each(rdev, mddev) {
4667 ret = md_bitmap_resize(mddev->bitmap, newsize, 0, 0);
4671 ret = md_cluster_ops->resize_bitmaps(mddev, newsize, oldsize);
4673 md_bitmap_resize(mddev->bitmap, oldsize, 0, 0);
4678 if (mddev->delta_disks > 0) {
4679 rdev_for_each(rdev, mddev)
4682 if (raid10_add_disk(mddev, rdev) == 0) {
4690 sysfs_link_rdev(mddev, rdev);
4703 mddev->degraded = calc_degraded(conf);
4705 mddev->raid_disks = conf->geo.raid_disks;
4706 mddev->reshape_position = conf->reshape_progress;
4707 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
4709 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
4710 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
4711 clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
4712 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
4713 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
4715 rcu_assign_pointer(mddev->sync_thread,
4716 md_register_thread(md_do_sync, mddev, "reshape"));
4717 if (!mddev->sync_thread) {
4722 md_wakeup_thread(mddev->sync_thread);
4727 mddev->recovery = 0;
4730 mddev->raid_disks = conf->geo.raid_disks;
4731 rdev_for_each(rdev, mddev)
4736 mddev->reshape_position = MaxSector;
4772 static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
4779 * a chunk, but that is not a problem as mddev->reshape_position
4812 struct r10conf *conf = mddev->private;
4827 if (mddev->reshape_backwards &&
4828 conf->reshape_progress < raid10_size(mddev, 0, 0)) {
4829 sector_nr = (raid10_size(mddev, 0, 0)
4831 } else if (!mddev->reshape_backwards &&
4835 mddev->curr_resync_completed = sector_nr;
4836 sysfs_notify_dirent_safe(mddev->sysfs_completed);
4846 if (mddev->reshape_backwards) {
4896 mddev->reshape_position = conf->reshape_progress;
4897 if (mddev->reshape_backwards)
4898 mddev->curr_resync_completed = raid10_size(mddev, 0, 0)
4901 mddev->curr_resync_completed = conf->reshape_progress;
4903 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
4904 md_wakeup_thread(mddev->thread);
4905 wait_event(mddev->sb_wait, mddev->sb_flags == 0 ||
4906 test_bit(MD_RECOVERY_INTR, &mddev->recovery));
4907 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
4911 conf->reshape_safe = mddev->reshape_position;
4922 r10_bio->mddev = mddev;
4935 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4940 GFP_KERNEL, &mddev->bio_set);
4952 if (mddev_is_clustered(mddev) && conf->cluster_sync_high <= sector_nr) {
4970 md_cluster_ops->resync_info_update(mddev, conf->cluster_sync_low,
5040 if (mddev->reshape_backwards)
5049 static int handle_reshape_read_error(struct mddev *mddev,
5051 static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio)
5058 struct r10conf *conf = mddev->private;
5062 if (handle_reshape_read_error(mddev, r10_bio) < 0) {
5064 md_done_sync(mddev, r10_bio->sectors, 0);
5100 if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery))
5105 md_finish_reshape(conf->mddev);
5111 if (conf->mddev->queue)
5116 static void raid10_update_reshape_pos(struct mddev *mddev)
5118 struct r10conf *conf = mddev->private;
5121 md_cluster_ops->resync_info_get(mddev, &lo, &hi);
5122 if (((mddev->reshape_position <= hi) && (mddev->reshape_position >= lo))
5123 || mddev->reshape_position == MaxSector)
5124 conf->reshape_progress = mddev->reshape_position;
5129 static int handle_reshape_read_error(struct mddev *mddev,
5134 struct r10conf *conf = mddev->private;
5142 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
5178 rdev_dec_pending(rdev, mddev);
5193 &mddev->recovery);
5207 struct mddev *mddev = r10_bio->mddev;
5208 struct r10conf *conf = mddev->private;
5224 md_error(mddev, rdev);
5227 rdev_dec_pending(rdev, mddev);
5235 md_done_sync(r10_bio->mddev, r10_bio->sectors, 1);
5240 static void raid10_finish_reshape(struct mddev *mddev)
5242 struct r10conf *conf = mddev->private;
5244 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
5247 if (mddev->delta_disks > 0) {
5248 if (mddev->recovery_cp > mddev->resync_max_sectors) {
5249 mddev->recovery_cp = mddev->resync_max_sectors;
5250 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5252 mddev->resync_max_sectors = mddev->array_sectors;
5257 d < conf->geo.raid_disks - mddev->delta_disks;
5268 mddev->layout = mddev->new_layout;
5269 mddev->chunk_sectors = 1 << conf->geo.chunk_shift;
5270 mddev->reshape_position = MaxSector;
5271 mddev->delta_disks = 0;
5272 mddev->reshape_backwards = 0;