Lines Matching defs:mddev

190 		md_wakeup_thread(conf->mddev->thread);
262 md_wakeup_thread(conf->mddev->thread);
268 md_wakeup_thread(conf->mddev->thread);
349 md_wakeup_thread(conf->mddev->thread);
395 if (unlikely(!conf->mddev->thread) ||
400 md_wakeup_thread(conf->mddev->thread);
691 int degraded = conf->mddev->degraded;
693 if (test_bit(MD_BROKEN, &conf->mddev->flags))
696 if (conf->mddev->reshape_position != MaxSector)
909 md_wakeup_thread(conf->mddev->thread);
1145 if (!conf->mddev->external &&
1146 conf->mddev->sb_flags) {
1151 md_check_recovery(conf->mddev);
1159 md_wait_for_blocked_rdev(rdev, conf->mddev);
1162 rdev_dec_pending(rdev, conf->mddev);
1225 if (conf->mddev->gendisk)
1227 bi, disk_devt(conf->mddev->gendisk),
1275 if (conf->mddev->gendisk)
1277 rbi, disk_devt(conf->mddev->gendisk),
2362 if (conf->mddev->gendisk)
2364 "raid%d-%s", conf->level, mdname(conf->mddev));
2367 "raid%d-%p", conf->level, conf->mddev);
2439 mddev_suspend(conf->mddev);
2453 mddev_resume(conf->mddev);
2494 md_allow_write(conf->mddev);
2707 mdname(conf->mddev), RAID5_STRIPE_SECTORS(conf),
2736 mdname(conf->mddev),
2739 else if (conf->mddev->degraded >= conf->max_degraded) {
2743 mdname(conf->mddev),
2751 mdname(conf->mddev),
2758 mdname(conf->mddev),
2762 mdname(conf->mddev), bdn);
2784 md_error(conf->mddev, rdev);
2787 rdev_dec_pending(rdev, conf->mddev);
2833 md_error(conf->mddev, rdev);
2845 &rdev->mddev->recovery);
2858 rdev_dec_pending(rdev, conf->mddev);
2873 static void raid5_error(struct mddev *mddev, struct md_rdev *rdev)
2876 struct r5conf *conf = mddev->private;
2881 mdname(mddev), bdevname(rdev->bdev, b));
2886 mddev->degraded = raid5_calc_degraded(conf);
2889 set_bit(MD_BROKEN, &conf->mddev->flags);
2890 conf->recovery_disabled = mddev->recovery_disabled;
2893 mdname(mddev), mddev->degraded, conf->raid_disks);
2896 mdname(mddev), conf->raid_disks - mddev->degraded);
2900 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2903 set_mask_bits(&mddev->sb_flags, 0,
2905 r5c_update_on_rdev_error(mddev, rdev);
3228 mdname(conf->mddev));
3472 md_write_inc(conf->mddev, bi);
3493 if (conf->mddev->bitmap && firstwrite) {
3508 md_bitmap_startwrite(conf->mddev->bitmap, sh->sector,
3572 md_error(conf->mddev, rdev);
3573 rdev_dec_pending(rdev, conf->mddev);
3594 md_write_end(conf->mddev);
3599 md_bitmap_endwrite(conf->mddev->bitmap, sh->sector,
3615 md_write_end(conf->mddev);
3645 md_bitmap_endwrite(conf->mddev->bitmap, sh->sector,
3657 md_wakeup_thread(conf->mddev->thread);
3680 if (test_bit(MD_RECOVERY_RECOVER, &conf->mddev->recovery)) {
3704 conf->mddev->recovery_disabled;
3706 md_done_sync(conf->mddev, RAID5_STRIPE_SECTORS(conf), !abort);
3720 || rdev->mddev->recovery_cp <= sh->sector))
3813 sh->sector < sh->raid_conf->mddev->recovery_cp)
3993 md_write_end(conf->mddev);
3997 md_bitmap_endwrite(conf->mddev->bitmap, sh->sector,
4054 md_wakeup_thread(conf->mddev->thread);
4081 sector_t recovery_cp = conf->mddev->recovery_cp;
4132 if (conf->mddev->queue)
4133 blk_add_trace_msg(conf->mddev->queue,
4212 if (rcw && conf->mddev->queue)
4213 blk_add_trace_msg(conf->mddev->queue, "raid5 rcw %llu %d %d %d",
4301 atomic64_add(RAID5_STRIPE_SECTORS(conf), &conf->mddev->resync_mismatches);
4302 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) {
4306 "%llu-%llu\n", mdname(conf->mddev),
4428 mdname(conf->mddev),
4466 atomic64_add(RAID5_STRIPE_SECTORS(conf), &conf->mddev->resync_mismatches);
4467 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) {
4471 "%llu-%llu\n", mdname(conf->mddev),
4767 sh->sector >= conf->mddev->recovery_cp ||
4768 test_bit(MD_RECOVERY_REQUESTED, &(conf->mddev->recovery)))
4935 test_bit(MD_SB_CHANGE_PENDING, &conf->mddev->sb_flags)) {
4947 rdev_dec_pending(s.blocked_rdev, conf->mddev);
5135 md_done_sync(conf->mddev, RAID5_STRIPE_SECTORS(conf), 1);
5144 if (s.failed <= conf->max_degraded && !conf->mddev->ro)
5200 md_done_sync(conf->mddev, RAID5_STRIPE_SECTORS(conf), 1);
5210 if (conf->mddev->external)
5212 conf->mddev);
5219 conf->mddev);
5231 md_error(conf->mddev, rdev);
5232 rdev_dec_pending(rdev, conf->mddev);
5238 rdev_dec_pending(rdev, conf->mddev);
5247 rdev_dec_pending(rdev, conf->mddev);
5264 md_wakeup_thread(conf->mddev->thread);
5304 static int in_chunk_boundary(struct mddev *mddev, struct bio *bio)
5306 struct r5conf *conf = mddev->private;
5332 md_wakeup_thread(conf->mddev->thread);
5365 struct mddev *mddev;
5374 mddev = rdev->mddev;
5375 conf = mddev->private;
5377 rdev_dec_pending(rdev, conf->mddev);
5391 static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio)
5393 struct r5conf *conf = mddev->private;
5399 if (!in_chunk_boundary(mddev, raid_bio)) {
5406 align_bi = bio_clone_fast(raid_bio, GFP_NOIO, &mddev->bio_set);
5454 rdev_dec_pending(rdev, mddev);
5468 if (mddev->gendisk)
5470 align_bi, disk_devt(mddev->gendisk),
5481 static struct bio *chunk_aligned_read(struct mddev *mddev, struct bio *raid_bio)
5485 unsigned chunk_sects = mddev->chunk_sectors;
5489 struct r5conf *conf = mddev->private;
5496 if (!raid5_read_one_chunk(mddev, raid_bio))
5615 struct mddev *mddev = cb->cb.data;
5616 struct r5conf *conf = mddev->private;
5644 if (mddev->queue)
5645 trace_block_unplug(mddev->queue, cnt, !from_schedule);
5649 static void release_stripe_plug(struct mddev *mddev,
5653 raid5_unplug, mddev,
5677 static void make_discard_request(struct mddev *mddev, struct bio *bi)
5679 struct r5conf *conf = mddev->private;
5684 if (mddev->reshape_position != MaxSector)
5738 md_write_inc(mddev, bi);
5742 if (conf->mddev->bitmap) {
5746 md_bitmap_startwrite(mddev->bitmap,
5758 release_stripe_plug(mddev, sh);
5764 static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
5766 struct r5conf *conf = mddev->private;
5782 if (md_flush_request(mddev, bi))
5793 if (!md_write_start(mddev, bi))
5800 if (rw == READ && mddev->degraded == 0 &&
5801 mddev->reshape_position == MaxSector) {
5802 bi = chunk_aligned_read(mddev, bi);
5808 make_discard_request(mddev, bi);
5809 md_write_end(mddev);
5839 if (mddev->reshape_backwards
5844 if (mddev->reshape_backwards
5877 if (mddev->reshape_backwards
5904 md_wakeup_thread(mddev->thread);
5922 release_stripe_plug(mddev, sh);
5932 md_write_end(mddev);
5937 static sector_t raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks);
5939 static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *skipped)
5950 struct r5conf *conf = mddev->private;
5967 if (mddev->reshape_backwards &&
5968 conf->reshape_progress < raid5_size(mddev, 0, 0)) {
5969 sector_nr = raid5_size(mddev, 0, 0)
5971 } else if (mddev->reshape_backwards &&
5975 } else if (!mddev->reshape_backwards &&
5980 mddev->curr_resync_completed = sector_nr;
5981 sysfs_notify_dirent_safe(mddev->sysfs_completed);
6007 if (mddev->reshape_backwards) {
6025 if (mddev->reshape_backwards) {
6028 BUG_ON((mddev->dev_sectors &
6063 if ((mddev->reshape_backwards
6070 || test_bit(MD_RECOVERY_INTR, &mddev->recovery));
6073 mddev->reshape_position = conf->reshape_progress;
6074 mddev->curr_resync_completed = sector_nr;
6075 if (!mddev->reshape_backwards)
6077 rdev_for_each(rdev, mddev)
6085 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
6086 md_wakeup_thread(mddev->thread);
6087 wait_event(mddev->sb_wait, mddev->sb_flags == 0 ||
6088 test_bit(MD_RECOVERY_INTR, &mddev->recovery));
6089 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
6092 conf->reshape_safe = mddev->reshape_position;
6095 sysfs_notify_dirent_safe(mddev->sysfs_completed);
6116 if (s < raid5_size(mddev, 0, 0)) {
6131 if (mddev->reshape_backwards)
6148 if (last_sector >= mddev->dev_sectors)
6149 last_sector = mddev->dev_sectors - 1;
6171 if (mddev->curr_resync_completed > mddev->resync_max ||
6172 (sector_nr - mddev->curr_resync_completed) * 2
6173 >= mddev->resync_max - mddev->curr_resync_completed) {
6177 || test_bit(MD_RECOVERY_INTR, &mddev->recovery));
6180 mddev->reshape_position = conf->reshape_progress;
6181 mddev->curr_resync_completed = sector_nr;
6182 if (!mddev->reshape_backwards)
6184 rdev_for_each(rdev, mddev)
6191 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
6192 md_wakeup_thread(mddev->thread);
6193 wait_event(mddev->sb_wait,
6194 !test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)
6195 || test_bit(MD_RECOVERY_INTR, &mddev->recovery));
6196 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
6199 conf->reshape_safe = mddev->reshape_position;
6202 sysfs_notify_dirent_safe(mddev->sysfs_completed);
6208 static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_nr,
6211 struct r5conf *conf = mddev->private;
6213 sector_t max_sector = mddev->dev_sectors;
6221 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
6226 if (mddev->curr_resync < max_sector) /* aborted */
6227 md_bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
6231 md_bitmap_close_sync(mddev->bitmap);
6239 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
6240 return reshape_request(mddev, sector_nr, skipped);
6252 if (mddev->degraded >= conf->max_degraded &&
6253 test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
6254 sector_t rv = mddev->dev_sectors - sector_nr;
6258 if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
6260 !md_bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
6269 md_bitmap_cond_end_sync(mddev->bitmap, sector_nr, false);
6292 md_bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded);
6421 struct mddev *mddev = conf->mddev;
6442 wait_event_lock_irq(mddev->sb_wait,
6443 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags),
6469 struct mddev *mddev = thread->mddev;
6470 struct r5conf *conf = mddev->private;
6476 md_check_recovery(mddev);
6495 md_bitmap_unplug(mddev->bitmap);
6518 if (mddev->sb_flags & ~(1 << MD_SB_CHANGE_PENDING)) {
6520 md_check_recovery(mddev);
6548 raid5_show_stripe_cache_size(struct mddev *mddev, char *page)
6552 spin_lock(&mddev->lock);
6553 conf = mddev->private;
6556 spin_unlock(&mddev->lock);
6561 raid5_set_cache_size(struct mddev *mddev, int size)
6564 struct r5conf *conf = mddev->private;
6576 md_allow_write(mddev);
6592 raid5_store_stripe_cache_size(struct mddev *mddev, const char *page, size_t len)
6602 err = mddev_lock(mddev);
6605 conf = mddev->private;
6609 err = raid5_set_cache_size(mddev, new);
6610 mddev_unlock(mddev);
6621 raid5_show_rmw_level(struct mddev *mddev, char *page)
6623 struct r5conf *conf = mddev->private;
6631 raid5_store_rmw_level(struct mddev *mddev, const char *page, size_t len)
6633 struct r5conf *conf = mddev->private;
6663 raid5_show_stripe_size(struct mddev *mddev, char *page)
6668 spin_lock(&mddev->lock);
6669 conf = mddev->private;
6672 spin_unlock(&mddev->lock);
6678 raid5_store_stripe_size(struct mddev *mddev, const char *page, size_t len)
6700 err = mddev_lock(mddev);
6704 conf = mddev->private;
6716 if (mddev->sync_thread ||
6717 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
6718 mddev->reshape_position != MaxSector ||
6719 mddev->sysfs_active) {
6724 mddev_suspend(mddev);
6735 mdname(mddev));
6739 mddev_resume(mddev);
6742 mddev_unlock(mddev);
6758 raid5_show_preread_threshold(struct mddev *mddev, char *page)
6762 spin_lock(&mddev->lock);
6763 conf = mddev->private;
6766 spin_unlock(&mddev->lock);
6771 raid5_store_preread_threshold(struct mddev *mddev, const char *page, size_t len)
6782 err = mddev_lock(mddev);
6785 conf = mddev->private;
6792 mddev_unlock(mddev);
6803 raid5_show_skip_copy(struct mddev *mddev, char *page)
6807 spin_lock(&mddev->lock);
6808 conf = mddev->private;
6811 spin_unlock(&mddev->lock);
6816 raid5_store_skip_copy(struct mddev *mddev, const char *page, size_t len)
6828 err = mddev_lock(mddev);
6831 conf = mddev->private;
6835 struct request_queue *q = mddev->queue;
6837 mddev_suspend(mddev);
6843 mddev_resume(mddev);
6845 mddev_unlock(mddev);
6855 stripe_cache_active_show(struct mddev *mddev, char *page)
6857 struct r5conf *conf = mddev->private;
6868 raid5_show_group_thread_cnt(struct mddev *mddev, char *page)
6872 spin_lock(&mddev->lock);
6873 conf = mddev->private;
6876 spin_unlock(&mddev->lock);
6884 raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len)
6900 err = mddev_lock(mddev);
6903 conf = mddev->private;
6907 mddev_suspend(mddev);
6925 mddev_resume(mddev);
6927 mddev_unlock(mddev);
7008 raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks)
7010 struct r5conf *conf = mddev->private;
7013 sectors = mddev->dev_sectors;
7152 static struct r5conf *setup_conf(struct mddev *mddev)
7164 if (mddev->new_level != 5
7165 && mddev->new_level != 4
7166 && mddev->new_level != 6) {
7168 mdname(mddev), mddev->new_level);
7171 if ((mddev->new_level == 5
7172 && !algorithm_valid_raid5(mddev->new_layout)) ||
7173 (mddev->new_level == 6
7174 && !algorithm_valid_raid6(mddev->new_layout))) {
7176 mdname(mddev), mddev->new_layout);
7179 if (mddev->new_level == 6 && mddev->raid_disks < 4) {
7181 mdname(mddev), mddev->raid_disks);
7185 if (!mddev->new_chunk_sectors ||
7186 (mddev->new_chunk_sectors << 9) % PAGE_SIZE ||
7187 !is_power_of_2(mddev->new_chunk_sectors)) {
7189 mdname(mddev), mddev->new_chunk_sectors << 9);
7235 rdev_for_each(rdev, mddev) {
7245 conf->recovery_disabled = mddev->recovery_disabled - 1;
7247 conf->raid_disks = mddev->raid_disks;
7248 if (mddev->reshape_position == MaxSector)
7249 conf->previous_raid_disks = mddev->raid_disks;
7251 conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks;
7269 conf->mddev = mddev;
7296 conf->level = mddev->new_level;
7297 conf->chunk_sectors = mddev->new_chunk_sectors;
7301 pr_debug("raid456: run(%s) called.\n", mdname(mddev));
7303 rdev_for_each(rdev, mddev) {
7323 mdname(mddev), bdevname(rdev->bdev, b), raid_disk);
7329 conf->level = mddev->new_level;
7340 conf->algorithm = mddev->new_layout;
7341 conf->reshape_progress = mddev->reshape_position;
7343 conf->prev_chunk_sectors = mddev->chunk_sectors;
7344 conf->prev_algo = mddev->layout;
7351 if (mddev->reshape_position != MaxSector) {
7353 ((mddev->chunk_sectors << 9) / RAID5_STRIPE_SIZE(conf)) * 4,
7354 ((mddev->new_chunk_sectors << 9) / RAID5_STRIPE_SIZE(conf)) * 4);
7358 mdname(mddev), conf->min_nr_stripes);
7365 mdname(mddev), memory);
7368 pr_debug("md/raid:%s: allocated %dkB\n", mdname(mddev), memory);
7381 mdname(mddev));
7385 sprintf(pers_name, "raid%d", mddev->new_level);
7386 conf->thread = md_register_thread(raid5d, mddev, pers_name);
7389 mdname(mddev));
7431 blk_queue_io_opt(conf->mddev->queue, (conf->chunk_sectors << 9) *
7435 static int raid5_run(struct mddev *mddev)
7447 if (mddev_init_writes_pending(mddev) < 0)
7450 if (mddev->recovery_cp != MaxSector)
7452 mdname(mddev));
7454 rdev_for_each(rdev, mddev) {
7467 } else if (mddev->reshape_backwards &&
7470 else if (!mddev->reshape_backwards &&
7475 if ((test_bit(MD_HAS_JOURNAL, &mddev->flags) || journal_dev) &&
7476 (mddev->bitmap_info.offset || mddev->bitmap_info.file)) {
7478 mdname(mddev));
7482 if (mddev->reshape_position != MaxSector) {
7497 int max_degraded = (mddev->level == 6 ? 2 : 1);
7503 mdname(mddev));
7507 if (mddev->new_level != mddev->level) {
7509 mdname(mddev));
7512 old_disks = mddev->raid_disks - mddev->delta_disks;
7520 here_new = mddev->reshape_position;
7521 chunk_sectors = max(mddev->chunk_sectors, mddev->new_chunk_sectors);
7522 new_data_disks = mddev->raid_disks - max_degraded;
7525 mdname(mddev));
7530 here_old = mddev->reshape_position;
7534 if (mddev->delta_disks == 0) {
7542 if (abs(min_offset_diff) >= mddev->chunk_sectors &&
7543 abs(min_offset_diff) >= mddev->new_chunk_sectors)
7545 else if (mddev->ro == 0) {
7547 mdname(mddev));
7550 } else if (mddev->reshape_backwards
7557 mdname(mddev));
7560 pr_debug("md/raid:%s: reshape will continue\n", mdname(mddev));
7563 BUG_ON(mddev->level != mddev->new_level);
7564 BUG_ON(mddev->layout != mddev->new_layout);
7565 BUG_ON(mddev->chunk_sectors != mddev->new_chunk_sectors);
7566 BUG_ON(mddev->delta_disks != 0);
7569 if (test_bit(MD_HAS_JOURNAL, &mddev->flags) &&
7570 test_bit(MD_HAS_PPL, &mddev->flags)) {
7572 mdname(mddev));
7573 clear_bit(MD_HAS_PPL, &mddev->flags);
7574 clear_bit(MD_HAS_MULTIPLE_PPLS, &mddev->flags);
7577 if (mddev->private == NULL)
7578 conf = setup_conf(mddev);
7580 conf = mddev->private;
7585 if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) {
7588 mdname(mddev));
7589 mddev->ro = 1;
7590 set_disk_ro(mddev->gendisk, 1);
7591 } else if (mddev->recovery_cp == MaxSector)
7592 set_bit(MD_JOURNAL_CLEAN, &mddev->flags);
7596 mddev->thread = conf->thread;
7598 mddev->private = conf;
7631 if (mddev->major_version == 0 &&
7632 mddev->minor_version > 90)
7654 mddev->degraded = raid5_calc_degraded(conf);
7658 mdname(mddev), mddev->degraded, conf->raid_disks);
7663 mddev->dev_sectors &= ~(mddev->chunk_sectors - 1);
7664 mddev->resync_max_sectors = mddev->dev_sectors;
7666 if (mddev->degraded > dirty_parity_disks &&
7667 mddev->recovery_cp != MaxSector) {
7668 if (test_bit(MD_HAS_PPL, &mddev->flags))
7670 mdname(mddev));
7671 else if (mddev->ok_start_degraded)
7673 mdname(mddev));
7676 mdname(mddev));
7682 mdname(mddev), conf->level,
7683 mddev->raid_disks-mddev->degraded, mddev->raid_disks,
7684 mddev->new_layout);
7691 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
7692 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
7693 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
7694 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
7695 mddev->sync_thread = md_register_thread(md_do_sync, mddev,
7697 if (!mddev->sync_thread)
7702 if (mddev->to_remove == &raid5_attrs_group)
7703 mddev->to_remove = NULL;
7704 else if (mddev->kobj.sd &&
7705 sysfs_create_group(&mddev->kobj, &raid5_attrs_group))
7707 mdname(mddev));
7708 md_set_array_sectors(mddev, raid5_size(mddev, 0, 0));
7710 if (mddev->queue) {
7718 ((mddev->chunk_sectors << 9) / PAGE_SIZE);
7720 chunk_size = mddev->chunk_sectors << 9;
7721 blk_queue_io_min(mddev->queue, chunk_size);
7723 mddev->queue->limits.raid_partial_stripes_expensive = 1;
7733 mddev->queue->limits.discard_alignment = stripe;
7734 mddev->queue->limits.discard_granularity = stripe;
7736 blk_queue_max_write_same_sectors(mddev->queue, 0);
7737 blk_queue_max_write_zeroes_sectors(mddev->queue, 0);
7739 rdev_for_each(rdev, mddev) {
7740 disk_stack_limits(mddev->gendisk, rdev->bdev,
7742 disk_stack_limits(mddev->gendisk, rdev->bdev,
7762 mddev->queue->limits.max_discard_sectors >= (stripe >> 9) &&
7763 mddev->queue->limits.discard_granularity >= stripe)
7765 mddev->queue);
7768 mddev->queue);
7770 blk_queue_max_hw_sectors(mddev->queue, UINT_MAX);
7778 md_unregister_thread(&mddev->thread);
7781 mddev->private = NULL;
7782 pr_warn("md/raid:%s: failed to run raid set.\n", mdname(mddev));
7786 static void raid5_free(struct mddev *mddev, void *priv)
7791 mddev->to_remove = &raid5_attrs_group;
7794 static void raid5_status(struct seq_file *seq, struct mddev *mddev)
7796 struct r5conf *conf = mddev->private;
7799 seq_printf(seq, " level %d, %dk chunk, algorithm %d", mddev->level,
7800 conf->chunk_sectors / 2, mddev->layout);
7801 seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded);
7823 conf->raid_disks - conf->mddev->degraded);
7835 static int raid5_spare_active(struct mddev *mddev)
7838 struct r5conf *conf = mddev->private;
7872 mddev->degraded = raid5_calc_degraded(conf);
7878 static int raid5_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
7880 struct r5conf *conf = mddev->private;
7922 mddev->recovery_disabled != conf->recovery_disabled &&
7963 static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
7965 struct r5conf *conf = mddev->private;
7991 if (mddev->recovery_disabled == conf->recovery_disabled)
8043 static int raid5_resize(struct mddev *mddev, sector_t sectors)
8053 struct r5conf *conf = mddev->private;
8058 newsize = raid5_size(mddev, sectors, mddev->raid_disks);
8059 if (mddev->external_size &&
8060 mddev->array_sectors > newsize)
8062 if (mddev->bitmap) {
8063 int ret = md_bitmap_resize(mddev->bitmap, sectors, 0, 0);
8067 md_set_array_sectors(mddev, newsize);
8068 if (sectors > mddev->dev_sectors &&
8069 mddev->recovery_cp > mddev->dev_sectors) {
8070 mddev->recovery_cp = mddev->dev_sectors;
8071 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
8073 mddev->dev_sectors = sectors;
8074 mddev->resync_max_sectors = sectors;
8078 static int check_stripe_cache(struct mddev *mddev)
8088 struct r5conf *conf = mddev->private;
8089 if (((mddev->chunk_sectors << 9) / RAID5_STRIPE_SIZE(conf)) * 4
8091 ((mddev->new_chunk_sectors << 9) / RAID5_STRIPE_SIZE(conf)) * 4
8094 mdname(mddev),
8095 ((max(mddev->chunk_sectors, mddev->new_chunk_sectors) << 9)
8102 static int check_reshape(struct mddev *mddev)
8104 struct r5conf *conf = mddev->private;
8108 if (mddev->delta_disks == 0 &&
8109 mddev->new_layout == mddev->layout &&
8110 mddev->new_chunk_sectors == mddev->chunk_sectors)
8114 if (mddev->delta_disks < 0 && mddev->reshape_position == MaxSector) {
8121 if (mddev->level == 6)
8123 if (mddev->raid_disks + mddev->delta_disks < min)
8127 if (!check_stripe_cache(mddev))
8130 if (mddev->new_chunk_sectors > mddev->chunk_sectors ||
8131 mddev->delta_disks > 0)
8134 + max(0, mddev->delta_disks),
8135 max(mddev->new_chunk_sectors,
8136 mddev->chunk_sectors)
8140 if (conf->previous_raid_disks + mddev->delta_disks <= conf->pool_size)
8143 + mddev->delta_disks));
8146 static int raid5_start_reshape(struct mddev *mddev)
8148 struct r5conf *conf = mddev->private;
8153 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
8156 if (!check_stripe_cache(mddev))
8162 rdev_for_each(rdev, mddev) {
8168 if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded)
8178 if (raid5_size(mddev, 0, conf->raid_disks + mddev->delta_disks)
8179 < mddev->array_sectors) {
8181 mdname(mddev));
8189 conf->raid_disks += mddev->delta_disks;
8191 conf->chunk_sectors = mddev->new_chunk_sectors;
8193 conf->algorithm = mddev->new_layout;
8199 if (mddev->reshape_backwards)
8200 conf->reshape_progress = raid5_size(mddev, 0, 0);
8211 mddev_suspend(mddev);
8212 mddev_resume(mddev);
8221 if (mddev->delta_disks >= 0) {
8222 rdev_for_each(rdev, mddev)
8225 if (raid5_add_disk(mddev, rdev) == 0) {
8233 sysfs_link_rdev(mddev, rdev);
8246 mddev->degraded = raid5_calc_degraded(conf);
8249 mddev->raid_disks = conf->raid_disks;
8250 mddev->reshape_position = conf->reshape_progress;
8251 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
8253 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
8254 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
8255 clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
8256 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
8257 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
8258 mddev->sync_thread = md_register_thread(md_do_sync, mddev,
8260 if (!mddev->sync_thread) {
8261 mddev->recovery = 0;
8264 mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks;
8265 mddev->new_chunk_sectors =
8267 mddev->new_layout = conf->algorithm = conf->prev_algo;
8268 rdev_for_each(rdev, mddev)
8273 mddev->reshape_position = MaxSector;
8279 md_wakeup_thread(mddev->sync_thread);
8280 md_new_event(mddev);
8290 if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
8295 md_finish_reshape(conf->mddev);
8298 conf->mddev->reshape_position = MaxSector;
8299 rdev_for_each(rdev, conf->mddev)
8307 if (conf->mddev->queue)
8315 static void raid5_finish_reshape(struct mddev *mddev)
8317 struct r5conf *conf = mddev->private;
8319 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
8321 if (mddev->delta_disks <= 0) {
8324 mddev->degraded = raid5_calc_degraded(conf);
8327 d < conf->raid_disks - mddev->delta_disks;
8337 mddev->layout = conf->algorithm;
8338 mddev->chunk_sectors = conf->chunk_sectors;
8339 mddev->reshape_position = MaxSector;
8340 mddev->delta_disks = 0;
8341 mddev->reshape_backwards = 0;
8345 static void raid5_quiesce(struct mddev *mddev, int quiesce)
8347 struct r5conf *conf = mddev->private;
8377 static void *raid45_takeover_raid0(struct mddev *mddev, int level)
8379 struct r0conf *raid0_conf = mddev->private;
8385 mdname(mddev));
8391 mddev->dev_sectors = sectors;
8392 mddev->new_level = level;
8393 mddev->new_layout = ALGORITHM_PARITY_N;
8394 mddev->new_chunk_sectors = mddev->chunk_sectors;
8395 mddev->raid_disks += 1;
8396 mddev->delta_disks = 1;
8398 mddev->recovery_cp = MaxSector;
8400 return setup_conf(mddev);
8403 static void *raid5_takeover_raid1(struct mddev *mddev)
8408 if (mddev->raid_disks != 2 ||
8409 mddev->degraded > 1)
8417 while (chunksect && (mddev->array_sectors & (chunksect-1)))
8420 if ((chunksect<<9) < RAID5_STRIPE_SIZE((struct r5conf *)mddev->private))
8424 mddev->new_level = 5;
8425 mddev->new_layout = ALGORITHM_LEFT_SYMMETRIC;
8426 mddev->new_chunk_sectors = chunksect;
8428 ret = setup_conf(mddev);
8430 mddev_clear_unsupported_flags(mddev,
8435 static void *raid5_takeover_raid6(struct mddev *mddev)
8439 switch (mddev->layout) {
8461 mddev->new_level = 5;
8462 mddev->new_layout = new_layout;
8463 mddev->delta_disks = -1;
8464 mddev->raid_disks -= 1;
8465 return setup_conf(mddev);
8468 static int raid5_check_reshape(struct mddev *mddev)
8475 struct r5conf *conf = mddev->private;
8476 int new_chunk = mddev->new_chunk_sectors;
8478 if (mddev->new_layout >= 0 && !algorithm_valid_raid5(mddev->new_layout))
8485 if (mddev->array_sectors & (new_chunk-1))
8492 if (mddev->raid_disks == 2) {
8494 if (mddev->new_layout >= 0) {
8495 conf->algorithm = mddev->new_layout;
8496 mddev->layout = mddev->new_layout;
8500 mddev->chunk_sectors = new_chunk;
8502 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
8503 md_wakeup_thread(mddev->thread);
8505 return check_reshape(mddev);
8508 static int raid6_check_reshape(struct mddev *mddev)
8510 int new_chunk = mddev->new_chunk_sectors;
8512 if (mddev->new_layout >= 0 && !algorithm_valid_raid6(mddev->new_layout))
8519 if (mddev->array_sectors & (new_chunk-1))
8525 return check_reshape(mddev);
8528 static void *raid5_takeover(struct mddev *mddev)
8536 if (mddev->level == 0)
8537 return raid45_takeover_raid0(mddev, 5);
8538 if (mddev->level == 1)
8539 return raid5_takeover_raid1(mddev);
8540 if (mddev->level == 4) {
8541 mddev->new_layout = ALGORITHM_PARITY_N;
8542 mddev->new_level = 5;
8543 return setup_conf(mddev);
8545 if (mddev->level == 6)
8546 return raid5_takeover_raid6(mddev);
8551 static void *raid4_takeover(struct mddev *mddev)
8557 if (mddev->level == 0)
8558 return raid45_takeover_raid0(mddev, 4);
8559 if (mddev->level == 5 &&
8560 mddev->layout == ALGORITHM_PARITY_N) {
8561 mddev->new_layout = 0;
8562 mddev->new_level = 4;
8563 return setup_conf(mddev);
8570 static void *raid6_takeover(struct mddev *mddev)
8578 if (mddev->pers != &raid5_personality)
8580 if (mddev->degraded > 1)
8582 if (mddev->raid_disks > 253)
8584 if (mddev->raid_disks < 3)
8587 switch (mddev->layout) {
8609 mddev->new_level = 6;
8610 mddev->new_layout = new_layout;
8611 mddev->delta_disks = 1;
8612 mddev->raid_disks += 1;
8613 return setup_conf(mddev);
8616 static int raid5_change_consistency_policy(struct mddev *mddev, const char *buf)
8621 err = mddev_lock(mddev);
8624 conf = mddev->private;
8626 mddev_unlock(mddev);
8643 mddev_suspend(mddev);
8645 mddev_resume(mddev);
8647 } else if (test_bit(MD_HAS_JOURNAL, &conf->mddev->flags) &&
8652 rdev_for_each(rdev, mddev)
8659 mddev_suspend(mddev);
8660 clear_bit(MD_HAS_JOURNAL, &mddev->flags);
8661 mddev_resume(mddev);
8671 md_update_sb(mddev, 1);
8673 mddev_unlock(mddev);
8678 static int raid5_start(struct mddev *mddev)
8680 struct r5conf *conf = mddev->private;