Lines Matching refs:conf
22 * conf->seq_write is the number of the last batch successfully written.
23 * conf->seq_flush is the number of the last batch that was closed to
73 static inline struct hlist_head *stripe_hash(struct r5conf *conf, sector_t sect)
75 int hash = (sect >> RAID5_STRIPE_SHIFT(conf)) & HASH_MASK;
76 return &conf->stripe_hashtbl[hash];
79 static inline int stripe_hash_locks_hash(struct r5conf *conf, sector_t sect)
81 return (sect >> RAID5_STRIPE_SHIFT(conf)) & STRIPE_HASH_LOCKS_MASK;
84 static inline void lock_device_hash_lock(struct r5conf *conf, int hash)
85 __acquires(&conf->device_lock)
87 spin_lock_irq(conf->hash_locks + hash);
88 spin_lock(&conf->device_lock);
91 static inline void unlock_device_hash_lock(struct r5conf *conf, int hash)
92 __releases(&conf->device_lock)
94 spin_unlock(&conf->device_lock);
95 spin_unlock_irq(conf->hash_locks + hash);
98 static inline void lock_all_device_hash_locks_irq(struct r5conf *conf)
99 __acquires(&conf->device_lock)
102 spin_lock_irq(conf->hash_locks);
104 spin_lock_nest_lock(conf->hash_locks + i, conf->hash_locks);
105 spin_lock(&conf->device_lock);
108 static inline void unlock_all_device_hash_locks_irq(struct r5conf *conf)
109 __releases(&conf->device_lock)
112 spin_unlock(&conf->device_lock);
114 spin_unlock(conf->hash_locks + i);
115 spin_unlock_irq(conf->hash_locks);
157 static void print_raid5_conf (struct r5conf *conf);
176 struct r5conf *conf = sh->raid_conf;
188 group = conf->worker_groups + cpu_to_group(cpu);
197 if (conf->worker_cnt_per_group == 0) {
198 md_wakeup_thread(conf->mddev->thread);
202 group = conf->worker_groups + cpu_to_group(sh->cpu);
210 for (i = 1; i < conf->worker_cnt_per_group && thread_cnt > 0; i++) {
220 static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh,
222 __must_hold(&conf->device_lock)
228 BUG_ON(atomic_read(&conf->active_stripes)==0);
230 if (r5c_is_writeback(conf->log))
242 (conf->quiesce && r5c_is_writeback(conf->log) &&
252 list_add_tail(&sh->lru, &conf->delayed_list);
254 sh->bm_seq - conf->seq_write > 0)
255 list_add_tail(&sh->lru, &conf->bitmap_list);
259 if (conf->worker_cnt_per_group == 0) {
262 &conf->loprio_list);
265 &conf->handle_list);
271 md_wakeup_thread(conf->mddev->thread);
275 if (atomic_dec_return(&conf->preread_active_stripes)
277 md_wakeup_thread(conf->mddev->thread);
278 atomic_dec(&conf->active_stripes);
280 if (!r5c_is_writeback(conf->log))
286 else if (injournal == conf->raid_disks - conf->max_degraded) {
289 atomic_inc(&conf->r5c_cached_full_stripes);
291 atomic_dec(&conf->r5c_cached_partial_stripes);
292 list_add_tail(&sh->lru, &conf->r5c_full_stripe_list);
293 r5c_check_cached_full_stripe(conf);
300 list_add_tail(&sh->lru, &conf->r5c_partial_stripe_list);
306 static void __release_stripe(struct r5conf *conf, struct stripe_head *sh,
308 __must_hold(&conf->device_lock)
311 do_release_stripe(conf, sh, temp_inactive_list);
321 static void release_inactive_stripe_list(struct r5conf *conf,
342 spin_lock_irqsave(conf->hash_locks + hash, flags);
343 if (list_empty(conf->inactive_list + hash) &&
345 atomic_dec(&conf->empty_inactive_list_nr);
346 list_splice_tail_init(list, conf->inactive_list + hash);
348 spin_unlock_irqrestore(conf->hash_locks + hash, flags);
355 wake_up(&conf->wait_for_stripe);
356 if (atomic_read(&conf->active_stripes) == 0)
357 wake_up(&conf->wait_for_quiescent);
358 if (conf->retry_read_aligned)
359 md_wakeup_thread(conf->mddev->thread);
363 static int release_stripe_list(struct r5conf *conf,
365 __must_hold(&conf->device_lock)
371 head = llist_del_all(&conf->released_stripes);
385 __release_stripe(conf, sh, &temp_inactive_list[hash]);
394 struct r5conf *conf = sh->raid_conf;
405 if (unlikely(!conf->mddev->thread) ||
408 wakeup = llist_add(&sh->release_list, &conf->released_stripes);
410 md_wakeup_thread(conf->mddev->thread);
414 if (atomic_dec_and_lock_irqsave(&sh->count, &conf->device_lock, flags)) {
417 do_release_stripe(conf, sh, &list);
418 spin_unlock_irqrestore(&conf->device_lock, flags);
419 release_inactive_stripe_list(conf, &list, hash);
431 static inline void insert_hash(struct r5conf *conf, struct stripe_head *sh)
433 struct hlist_head *hp = stripe_hash(conf, sh->sector);
442 static struct stripe_head *get_free_stripe(struct r5conf *conf, int hash)
447 if (list_empty(conf->inactive_list + hash))
449 first = (conf->inactive_list + hash)->next;
453 atomic_inc(&conf->active_stripes);
455 if (list_empty(conf->inactive_list + hash))
456 atomic_inc(&conf->empty_inactive_list_nr);
500 init_stripe_shared_pages(struct stripe_head *sh, struct r5conf *conf, int disks)
507 /* Each of the sh->dev[i] need one conf->stripe_size */
508 cnt = PAGE_SIZE / conf->stripe_size;
572 static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous,
577 struct r5conf *conf = sh->raid_conf;
588 seq = read_seqcount_begin(&conf->gen_lock);
589 sh->generation = conf->generation - previous;
590 sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks;
592 stripe_set_idx(sector, conf, previous, sh);
609 if (read_seqcount_retry(&conf->gen_lock, seq))
612 insert_hash(conf, sh);
617 static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector,
623 hlist_for_each_entry(sh, stripe_hash(conf, sector), hash)
630 static struct stripe_head *find_get_stripe(struct r5conf *conf,
636 sh = __find_stripe(conf, sector, generation);
649 spin_lock(&conf->device_lock);
652 atomic_inc(&conf->active_stripes);
656 if (!list_empty(conf->inactive_list + hash))
659 if (list_empty(conf->inactive_list + hash) &&
661 atomic_inc(&conf->empty_inactive_list_nr);
668 spin_unlock(&conf->device_lock);
686 * Most calls to this function hold &conf->device_lock. Calls
690 int raid5_calc_degraded(struct r5conf *conf)
697 for (i = 0; i < conf->previous_raid_disks; i++) {
698 struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev);
700 rdev = rcu_dereference(conf->disks[i].replacement);
715 if (conf->raid_disks >= conf->previous_raid_disks)
719 if (conf->raid_disks == conf->previous_raid_disks)
723 for (i = 0; i < conf->raid_disks; i++) {
724 struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev);
726 rdev = rcu_dereference(conf->disks[i].replacement);
737 if (conf->raid_disks <= conf->previous_raid_disks)
746 static bool has_failed(struct r5conf *conf)
748 int degraded = conf->mddev->degraded;
750 if (test_bit(MD_BROKEN, &conf->mddev->flags))
753 if (conf->mddev->reshape_position != MaxSector)
754 degraded = raid5_calc_degraded(conf);
756 return degraded > conf->max_degraded;
791 static bool is_inactive_blocked(struct r5conf *conf, int hash)
793 if (list_empty(conf->inactive_list + hash))
796 if (!test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state))
799 return (atomic_read(&conf->active_stripes) <
800 (conf->max_nr_stripes * 3 / 4));
803 struct stripe_head *raid5_get_active_stripe(struct r5conf *conf,
808 int hash = stripe_hash_locks_hash(conf, sector);
813 spin_lock_irq(conf->hash_locks + hash);
816 if (!(flags & R5_GAS_NOQUIESCE) && conf->quiesce) {
829 wait_event_lock_irq(conf->wait_for_quiescent,
830 !conf->quiesce,
831 *(conf->hash_locks + hash));
834 sh = find_get_stripe(conf, sector, conf->generation - previous,
839 if (!test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state)) {
840 sh = get_free_stripe(conf, hash);
842 r5c_check_stripe_cache_usage(conf);
848 if (!test_bit(R5_DID_ALLOC, &conf->cache_state))
849 set_bit(R5_ALLOC_MORE, &conf->cache_state);
855 set_bit(R5_INACTIVE_BLOCKED, &conf->cache_state);
856 r5l_wake_reclaim(conf->log, 0);
864 wait_event_lock_irq(conf->wait_for_stripe,
865 is_inactive_blocked(conf, hash),
866 *(conf->hash_locks + hash));
867 clear_bit(R5_INACTIVE_BLOCKED, &conf->cache_state);
870 spin_unlock_irq(conf->hash_locks + hash);
904 struct r5conf *conf = sh->raid_conf;
906 if (raid5_has_log(conf) || raid5_has_ppl(conf))
914 static void stripe_add_to_batch_list(struct r5conf *conf,
924 if (!sector_div(tmp_sec, conf->chunk_sectors))
926 head_sector = sh->sector - RAID5_STRIPE_SECTORS(conf);
932 hash = stripe_hash_locks_hash(conf, head_sector);
933 spin_lock_irq(conf->hash_locks + hash);
934 head = find_get_stripe(conf, head_sector, conf->generation,
936 spin_unlock_irq(conf->hash_locks + hash);
989 if (atomic_dec_return(&conf->preread_active_stripes)
991 md_wakeup_thread(conf->mddev->thread);
1012 static int use_new_offset(struct r5conf *conf, struct stripe_head *sh)
1014 sector_t progress = conf->reshape_progress;
1016 * of conf->generation, or ->data_offset that was set before
1022 if (sh->generation == conf->generation - 1)
1052 static void dispatch_defer_bios(struct r5conf *conf, int target,
1059 if (conf->pending_data_cnt == 0)
1062 list_sort(NULL, &conf->pending_list, cmp_stripe);
1064 first = conf->pending_list.next;
1067 if (conf->next_pending_data)
1068 list_move_tail(&conf->pending_list,
1069 &conf->next_pending_data->sibling);
1071 while (!list_empty(&conf->pending_list)) {
1072 data = list_first_entry(&conf->pending_list,
1079 list_move(&data->sibling, &conf->free_list);
1084 conf->pending_data_cnt -= cnt;
1085 BUG_ON(conf->pending_data_cnt < 0 || cnt < target);
1087 if (next != &conf->pending_list)
1088 conf->next_pending_data = list_entry(next,
1091 conf->next_pending_data = NULL;
1093 if (first != &conf->pending_list)
1094 list_move_tail(&conf->pending_list, first);
1097 static void flush_deferred_bios(struct r5conf *conf)
1101 if (conf->pending_data_cnt == 0)
1104 spin_lock(&conf->pending_bios_lock);
1105 dispatch_defer_bios(conf, conf->pending_data_cnt, &tmp);
1106 BUG_ON(conf->pending_data_cnt != 0);
1107 spin_unlock(&conf->pending_bios_lock);
1112 static void defer_issue_bios(struct r5conf *conf, sector_t sector,
1118 spin_lock(&conf->pending_bios_lock);
1119 ent = list_first_entry(&conf->free_list, struct r5pending_data,
1121 list_move_tail(&ent->sibling, &conf->pending_list);
1125 conf->pending_data_cnt++;
1126 if (conf->pending_data_cnt >= PENDING_IO_MAX)
1127 dispatch_defer_bios(conf, PENDING_IO_ONE_FLUSH, &tmp);
1129 spin_unlock(&conf->pending_bios_lock);
1141 struct r5conf *conf = sh->raid_conf;
1153 should_defer = conf->batch_bio_dispatch && conf->group_cnt;
1186 rrdev = rcu_dereference(conf->disks[i].replacement);
1188 rdev = rcu_dereference(conf->disks[i].rdev);
1223 int bad = is_badblock(rdev, sh->sector, RAID5_STRIPE_SECTORS(conf),
1230 if (!conf->mddev->external &&
1231 conf->mddev->sb_flags) {
1236 md_check_recovery(conf->mddev);
1244 md_wait_for_blocked_rdev(rdev, conf->mddev);
1247 rdev_dec_pending(rdev, conf->mddev);
1255 md_sync_acct(rdev->bdev, RAID5_STRIPE_SECTORS(conf));
1271 if (use_new_offset(conf, sh))
1294 bi->bi_io_vec[0].bv_len = RAID5_STRIPE_SIZE(conf);
1296 bi->bi_iter.bi_size = RAID5_STRIPE_SIZE(conf);
1306 if (conf->mddev->gendisk)
1308 disk_devt(conf->mddev->gendisk),
1318 md_sync_acct(rrdev->bdev, RAID5_STRIPE_SECTORS(conf));
1334 if (use_new_offset(conf, sh))
1344 rbi->bi_io_vec[0].bv_len = RAID5_STRIPE_SIZE(conf);
1346 rbi->bi_iter.bi_size = RAID5_STRIPE_SIZE(conf);
1353 if (conf->mddev->gendisk)
1355 disk_devt(conf->mddev->gendisk),
1380 defer_issue_bios(conf, head_sh->sector, &pending_bios);
1394 struct r5conf *conf = sh->raid_conf;
1416 if (len > 0 && page_offset + len > RAID5_STRIPE_SIZE(conf))
1417 clen = RAID5_STRIPE_SIZE(conf) - page_offset;
1425 if (conf->skip_copy &&
1427 clen == RAID5_STRIPE_SIZE(conf) &&
1452 struct r5conf *conf = sh->raid_conf;
1473 dev->sector + RAID5_STRIPE_SECTORS(conf)) {
1474 rbi2 = r5_next_bio(conf, rbi, dev->sector);
1491 struct r5conf *conf = sh->raid_conf;
1506 dev->sector + RAID5_STRIPE_SECTORS(conf)) {
1510 rbi = r5_next_bio(conf, rbi, dev->sector);
1936 struct r5conf *conf = sh->raid_conf;
1969 dev->sector + RAID5_STRIPE_SECTORS(conf)) {
1980 r5c_is_writeback(conf->log));
1982 !r5c_is_writeback(conf->log)) {
1988 wbi = r5_next_bio(conf, wbi, dev->sector);
2293 struct r5conf *conf = sh->raid_conf;
2294 int level = conf->level;
2297 local_lock(&conf->percpu->lock);
2298 percpu = this_cpu_ptr(conf->percpu);
2358 local_unlock(&conf->percpu->lock);
2372 int disks, struct r5conf *conf)
2385 sh->raid_conf = conf;
2388 if (raid5_has_ppl(conf)) {
2396 if (init_stripe_shared_pages(sh, conf, disks)) {
2404 static int grow_one_stripe(struct r5conf *conf, gfp_t gfp)
2408 sh = alloc_stripe(conf->slab_cache, gfp, conf->pool_size, conf);
2414 free_stripe(conf->slab_cache, sh);
2418 conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS;
2420 atomic_inc(&conf->active_stripes);
2423 WRITE_ONCE(conf->max_nr_stripes, conf->max_nr_stripes + 1);
2427 static int grow_stripes(struct r5conf *conf, int num)
2430 size_t namelen = sizeof(conf->cache_name[0]);
2431 int devs = max(conf->raid_disks, conf->previous_raid_disks);
2433 if (conf->mddev->gendisk)
2434 snprintf(conf->cache_name[0], namelen,
2435 "raid%d-%s", conf->level, mdname(conf->mddev));
2437 snprintf(conf->cache_name[0], namelen,
2438 "raid%d-%p", conf->level, conf->mddev);
2439 snprintf(conf->cache_name[1], namelen, "%.27s-alt", conf->cache_name[0]);
2441 conf->active_name = 0;
2442 sc = kmem_cache_create(conf->cache_name[conf->active_name],
2447 conf->slab_cache = sc;
2448 conf->pool_size = devs;
2450 if (!grow_one_stripe(conf, GFP_KERNEL))
2497 static int resize_chunks(struct r5conf *conf, int new_disks, int new_sectors)
2507 if (conf->scribble_disks >= new_disks &&
2508 conf->scribble_sectors >= new_sectors)
2510 mddev_suspend(conf->mddev);
2516 percpu = per_cpu_ptr(conf->percpu, cpu);
2518 new_sectors / RAID5_STRIPE_SECTORS(conf));
2524 mddev_resume(conf->mddev);
2526 conf->scribble_disks = new_disks;
2527 conf->scribble_sectors = new_sectors;
2532 static int resize_stripes(struct r5conf *conf, int newsize)
2546 * 3/ reallocate conf->disks to be suitable bigger. If this fails,
2565 md_allow_write(conf->mddev);
2568 sc = kmem_cache_create(conf->cache_name[1-conf->active_name],
2575 mutex_lock(&conf->cache_size_mutex);
2577 for (i = conf->max_nr_stripes; i; i--) {
2578 nsh = alloc_stripe(sc, GFP_KERNEL, newsize, conf);
2592 mutex_unlock(&conf->cache_size_mutex);
2602 lock_device_hash_lock(conf, hash);
2603 wait_event_cmd(conf->wait_for_stripe,
2604 !list_empty(conf->inactive_list + hash),
2605 unlock_device_hash_lock(conf, hash),
2606 lock_device_hash_lock(conf, hash));
2607 osh = get_free_stripe(conf, hash);
2608 unlock_device_hash_lock(conf, hash);
2616 for(i=0; i<conf->pool_size; i++) {
2622 free_stripe(conf->slab_cache, osh);
2624 if (cnt >= conf->max_nr_stripes / NR_STRIPE_HASH_LOCKS +
2625 !!((conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS) > hash)) {
2630 kmem_cache_destroy(conf->slab_cache);
2635 * conf->disks and the scribble region
2639 for (i = 0; i < conf->pool_size; i++)
2640 ndisks[i] = conf->disks[i];
2642 for (i = conf->pool_size; i < newsize; i++) {
2649 for (i = conf->pool_size; i < newsize; i++)
2654 kfree(conf->disks);
2655 conf->disks = ndisks;
2660 conf->slab_cache = sc;
2661 conf->active_name = 1-conf->active_name;
2677 for (i = conf->raid_disks; i < newsize; i++) {
2685 for (i=conf->raid_disks; i < newsize; i++)
2700 conf->pool_size = newsize;
2701 mutex_unlock(&conf->cache_size_mutex);
2706 static int drop_one_stripe(struct r5conf *conf)
2709 int hash = (conf->max_nr_stripes - 1) & STRIPE_HASH_LOCKS_MASK;
2711 spin_lock_irq(conf->hash_locks + hash);
2712 sh = get_free_stripe(conf, hash);
2713 spin_unlock_irq(conf->hash_locks + hash);
2718 free_stripe(conf->slab_cache, sh);
2719 atomic_dec(&conf->active_stripes);
2720 WRITE_ONCE(conf->max_nr_stripes, conf->max_nr_stripes - 1);
2724 static void shrink_stripes(struct r5conf *conf)
2726 while (conf->max_nr_stripes &&
2727 drop_one_stripe(conf))
2730 kmem_cache_destroy(conf->slab_cache);
2731 conf->slab_cache = NULL;
2759 struct r5conf *conf = sh->raid_conf;
2781 rdev = rdev_pend_deref(conf->disks[i].replacement);
2783 rdev = rdev_pend_deref(conf->disks[i].rdev);
2785 if (use_new_offset(conf, sh))
2798 mdname(conf->mddev), RAID5_STRIPE_SECTORS(conf),
2801 atomic_add(RAID5_STRIPE_SECTORS(conf), &rdev->corrected_errors);
2826 mdname(conf->mddev),
2829 else if (conf->mddev->degraded >= conf->max_degraded) {
2833 mdname(conf->mddev),
2841 mdname(conf->mddev),
2845 > conf->max_nr_stripes) {
2848 mdname(conf->mddev),
2850 conf->max_nr_stripes);
2852 mdname(conf->mddev), rdev->bdev);
2873 rdev, sh->sector, RAID5_STRIPE_SECTORS(conf), 0)))
2874 md_error(conf->mddev, rdev);
2877 rdev_dec_pending(rdev, conf->mddev);
2887 struct r5conf *conf = sh->raid_conf;
2896 rdev = rdev_pend_deref(conf->disks[i].rdev);
2900 rdev = rdev_pend_deref(conf->disks[i].replacement);
2908 rdev = rdev_pend_deref(conf->disks[i].rdev);
2922 md_error(conf->mddev, rdev);
2924 RAID5_STRIPE_SECTORS(conf),
2936 RAID5_STRIPE_SECTORS(conf),
2947 rdev_dec_pending(rdev, conf->mddev);
2964 struct r5conf *conf = mddev->private;
2971 spin_lock_irqsave(&conf->device_lock, flags);
2974 mddev->degraded = raid5_calc_degraded(conf);
2976 if (has_failed(conf)) {
2977 set_bit(MD_BROKEN, &conf->mddev->flags);
2978 conf->recovery_disabled = mddev->recovery_disabled;
2981 mdname(mddev), mddev->degraded, conf->raid_disks);
2984 mdname(mddev), conf->raid_disks - mddev->degraded);
2987 spin_unlock_irqrestore(&conf->device_lock, flags);
3000 sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector,
3010 int algorithm = previous ? conf->prev_algo
3011 : conf->algorithm;
3012 int sectors_per_chunk = previous ? conf->prev_chunk_sectors
3013 : conf->chunk_sectors;
3014 int raid_disks = previous ? conf->previous_raid_disks
3015 : conf->raid_disks;
3016 int data_disks = raid_disks - conf->max_degraded;
3036 switch(conf->level) {
3204 struct r5conf *conf = sh->raid_conf;
3206 int data_disks = raid_disks - conf->max_degraded;
3208 int sectors_per_chunk = previous ? conf->prev_chunk_sectors
3209 : conf->chunk_sectors;
3210 int algorithm = previous ? conf->prev_algo
3211 : conf->algorithm;
3224 switch(conf->level) {
3311 check = raid5_compute_sector(conf, r_sector,
3316 mdname(conf->mddev));
3343 * stripe, we need to reserve (conf->raid_disk + 1) pages per stripe
3345 * operation, we only need (conf->max_degraded + 1) pages per stripe.
3360 static inline bool delay_towrite(struct r5conf *conf,
3369 if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state) &&
3383 struct r5conf *conf = sh->raid_conf;
3384 int level = conf->level;
3398 if (dev->towrite && !delay_towrite(conf, dev, s)) {
3424 if (s->locked + conf->max_degraded == disks)
3426 atomic_inc(&conf->pending_full_writes);
3490 struct r5conf *conf = sh->raid_conf;
3514 if (forwrite && raid5_has_ppl(conf)) {
3540 if (first + conf->chunk_sectors * (count - 1) != last)
3550 struct r5conf *conf = sh->raid_conf;
3573 md_write_inc(conf->mddev, bi);
3579 sector < sh->dev[dd_idx].sector + RAID5_STRIPE_SECTORS(conf) &&
3581 bi = r5_next_bio(conf, bi, sh->dev[dd_idx].sector)) {
3585 if (sector >= sh->dev[dd_idx].sector + RAID5_STRIPE_SECTORS(conf))
3594 if (conf->mddev->bitmap && firstwrite) {
3609 md_bitmap_startwrite(conf->mddev->bitmap, sh->sector,
3610 RAID5_STRIPE_SECTORS(conf), 0);
3614 sh->bm_seq = conf->seq_flush+1;
3641 static void end_reshape(struct r5conf *conf);
3643 static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous,
3647 previous ? conf->prev_chunk_sectors : conf->chunk_sectors;
3650 int disks = previous ? conf->previous_raid_disks : conf->raid_disks;
3652 raid5_compute_sector(conf,
3653 stripe * (disks - conf->max_degraded)
3660 handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
3672 rdev = rcu_dereference(conf->disks[i].rdev);
3683 RAID5_STRIPE_SECTORS(conf), 0))
3684 md_error(conf->mddev, rdev);
3685 rdev_dec_pending(rdev, conf->mddev);
3700 wake_up(&conf->wait_for_overlap);
3703 sh->dev[i].sector + RAID5_STRIPE_SECTORS(conf)) {
3704 struct bio *nextbi = r5_next_bio(conf, bi, sh->dev[i].sector);
3706 md_write_end(conf->mddev);
3711 md_bitmap_endwrite(conf->mddev->bitmap, sh->sector,
3712 RAID5_STRIPE_SECTORS(conf), 0, 0);
3724 sh->dev[i].sector + RAID5_STRIPE_SECTORS(conf)) {
3725 struct bio *bi2 = r5_next_bio(conf, bi, sh->dev[i].sector);
3727 md_write_end(conf->mddev);
3736 s->failed > conf->max_degraded &&
3744 wake_up(&conf->wait_for_overlap);
3748 sh->dev[i].sector + RAID5_STRIPE_SECTORS(conf)) {
3750 r5_next_bio(conf, bi, sh->dev[i].sector);
3757 md_bitmap_endwrite(conf->mddev->bitmap, sh->sector,
3758 RAID5_STRIPE_SECTORS(conf), 0, 0);
3768 if (atomic_dec_and_test(&conf->pending_full_writes))
3769 md_wakeup_thread(conf->mddev->thread);
3773 handle_failed_sync(struct r5conf *conf, struct stripe_head *sh,
3782 wake_up(&conf->wait_for_overlap);
3792 if (test_bit(MD_RECOVERY_RECOVER, &conf->mddev->recovery)) {
3797 for (i = 0; i < conf->raid_disks; i++) {
3798 struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev);
3803 RAID5_STRIPE_SECTORS(conf), 0))
3805 rdev = rcu_dereference(conf->disks[i].replacement);
3810 RAID5_STRIPE_SECTORS(conf), 0))
3815 conf->recovery_disabled =
3816 conf->mddev->recovery_disabled;
3818 md_done_sync(conf->mddev, RAID5_STRIPE_SECTORS(conf), !abort);
4072 static void handle_stripe_clean_event(struct r5conf *conf,
4103 dev->sector + RAID5_STRIPE_SECTORS(conf)) {
4104 wbi2 = r5_next_bio(conf, wbi, dev->sector);
4105 md_write_end(conf->mddev);
4109 md_bitmap_endwrite(conf->mddev->bitmap, sh->sector,
4110 RAID5_STRIPE_SECTORS(conf),
4148 spin_lock_irq(conf->hash_locks + hash);
4150 spin_unlock_irq(conf->hash_locks + hash);
4165 if (atomic_dec_and_test(&conf->pending_full_writes))
4166 md_wakeup_thread(conf->mddev->thread);
4187 static int handle_stripe_dirtying(struct r5conf *conf,
4193 sector_t recovery_cp = conf->mddev->recovery_cp;
4202 if (conf->rmw_level == PARITY_DISABLE_RMW ||
4210 conf->rmw_level, (unsigned long long)recovery_cp,
4215 if (((dev->towrite && !delay_towrite(conf, dev, s)) ||
4242 if ((rmw < rcw || (rmw == rcw && conf->rmw_level == PARITY_PREFER_RMW)) && rmw > 0) {
4244 if (conf->mddev->queue)
4245 blk_add_trace_msg(conf->mddev->queue,
4266 &conf->cache_state)) {
4280 if (((dev->towrite && !delay_towrite(conf, dev, s)) ||
4299 if ((rcw < rmw || (rcw == rmw && conf->rmw_level != PARITY_PREFER_RMW)) && rcw > 0) {
4324 if (rcw && conf->mddev->queue)
4325 blk_add_trace_msg(conf->mddev->queue, "raid5 rcw %llu %d %d %d",
4351 static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh,
4413 atomic64_add(RAID5_STRIPE_SECTORS(conf), &conf->mddev->resync_mismatches);
4414 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) {
4418 "%llu-%llu\n", mdname(conf->mddev),
4421 RAID5_STRIPE_SECTORS(conf));
4444 static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh,
4540 mdname(conf->mddev),
4578 atomic64_add(RAID5_STRIPE_SECTORS(conf), &conf->mddev->resync_mismatches);
4579 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) {
4583 "%llu-%llu\n", mdname(conf->mddev),
4586 RAID5_STRIPE_SECTORS(conf));
4621 static void handle_stripe_expansion(struct r5conf *conf, struct stripe_head *sh)
4638 sector_t s = raid5_compute_sector(conf, bn, 0,
4640 sh2 = raid5_get_active_stripe(conf, NULL, s,
4659 sh->dev[i].offset, RAID5_STRIPE_SIZE(conf),
4664 for (j = 0; j < conf->raid_disks; j++)
4669 if (j == conf->raid_disks) {
4696 struct r5conf *conf = sh->raid_conf;
4708 s->log_failed = r5l_log_disk_error(conf);
4756 rdev = rcu_dereference(conf->disks[i].replacement);
4758 rdev->recovery_offset >= sh->sector + RAID5_STRIPE_SECTORS(conf) &&
4759 !is_badblock(rdev, sh->sector, RAID5_STRIPE_SECTORS(conf),
4767 rdev = rcu_dereference(conf->disks[i].rdev);
4773 is_bad = is_badblock(rdev, sh->sector, RAID5_STRIPE_SECTORS(conf),
4800 else if (sh->sector + RAID5_STRIPE_SECTORS(conf) <= rdev->recovery_offset)
4815 conf->disks[i].rdev);
4828 conf->disks[i].rdev);
4837 conf->disks[i].replacement);
4859 conf->disks[i].replacement);
4880 sh->sector >= conf->mddev->recovery_cp ||
4881 test_bit(MD_RECOVERY_REQUESTED, &(conf->mddev->recovery)))
4991 struct r5conf *conf = sh->raid_conf;
5048 test_bit(MD_SB_CHANGE_PENDING, &conf->mddev->sb_flags)) {
5060 rdev_dec_pending(s.blocked_rdev, conf->mddev);
5080 if (s.failed > conf->max_degraded ||
5086 handle_failed_stripe(conf, sh, &s, disks);
5088 handle_failed_sync(conf, sh, &s);
5141 || conf->level < 6;
5152 handle_stripe_clean_event(conf, sh, disks);
5155 r5c_handle_cached_data_endio(conf, sh, disks);
5174 r5c_finish_stripe_write_out(conf, sh, &s);
5186 if (!r5c_is_writeback(conf->log)) {
5188 handle_stripe_dirtying(conf, sh, &s, disks);
5194 ret = r5c_try_caching_write(conf, sh, &s,
5207 ret = handle_stripe_dirtying(conf, sh, &s,
5224 if (conf->level == 6)
5225 handle_parity_checks6(conf, sh, &s, disks);
5227 handle_parity_checks5(conf, sh, &s, disks);
5234 for (i = 0; i < conf->raid_disks; i++)
5248 md_done_sync(conf->mddev, RAID5_STRIPE_SECTORS(conf), 1);
5251 wake_up(&conf->wait_for_overlap);
5257 if (s.failed <= conf->max_degraded && !conf->mddev->ro)
5278 = raid5_get_active_stripe(conf, NULL, sh->sector,
5289 atomic_inc(&conf->preread_active_stripes);
5298 for (i = conf->raid_disks; i--; ) {
5308 sh->disks = conf->raid_disks;
5309 stripe_set_idx(sh->sector, conf, 0, sh);
5313 atomic_dec(&conf->reshape_stripes);
5314 wake_up(&conf->wait_for_overlap);
5315 md_done_sync(conf->mddev, RAID5_STRIPE_SECTORS(conf), 1);
5320 handle_stripe_expansion(conf, sh);
5325 if (conf->mddev->external)
5327 conf->mddev);
5334 conf->mddev);
5343 rdev = rdev_pend_deref(conf->disks[i].rdev);
5345 RAID5_STRIPE_SECTORS(conf), 0))
5346 md_error(conf->mddev, rdev);
5347 rdev_dec_pending(rdev, conf->mddev);
5350 rdev = rdev_pend_deref(conf->disks[i].rdev);
5352 RAID5_STRIPE_SECTORS(conf), 0);
5353 rdev_dec_pending(rdev, conf->mddev);
5356 rdev = rdev_pend_deref(conf->disks[i].replacement);
5359 rdev = rdev_pend_deref(conf->disks[i].rdev);
5361 RAID5_STRIPE_SECTORS(conf), 0);
5362 rdev_dec_pending(rdev, conf->mddev);
5376 atomic_dec(&conf->preread_active_stripes);
5377 if (atomic_read(&conf->preread_active_stripes) <
5379 md_wakeup_thread(conf->mddev->thread);
5385 static void raid5_activate_delayed(struct r5conf *conf)
5386 __must_hold(&conf->device_lock)
5388 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) {
5389 while (!list_empty(&conf->delayed_list)) {
5390 struct list_head *l = conf->delayed_list.next;
5396 atomic_inc(&conf->preread_active_stripes);
5397 list_add_tail(&sh->lru, &conf->hold_list);
5403 static void activate_bit_delay(struct r5conf *conf,
5405 __must_hold(&conf->device_lock)
5408 list_add(&head, &conf->bitmap_list);
5409 list_del_init(&conf->bitmap_list);
5416 __release_stripe(conf, sh, &temp_inactive_list[hash]);
5422 struct r5conf *conf = mddev->private;
5427 chunk_sectors = min(conf->chunk_sectors, conf->prev_chunk_sectors);
5436 static void add_bio_to_retry(struct bio *bi,struct r5conf *conf)
5440 spin_lock_irqsave(&conf->device_lock, flags);
5442 bi->bi_next = conf->retry_read_aligned_list;
5443 conf->retry_read_aligned_list = bi;
5445 spin_unlock_irqrestore(&conf->device_lock, flags);
5446 md_wakeup_thread(conf->mddev->thread);
5449 static struct bio *remove_bio_from_retry(struct r5conf *conf,
5454 bi = conf->retry_read_aligned;
5456 *offset = conf->retry_read_offset;
5457 conf->retry_read_aligned = NULL;
5460 bi = conf->retry_read_aligned_list;
5462 conf->retry_read_aligned_list = bi->bi_next;
5481 struct r5conf *conf = mddev->private;
5486 rdev_dec_pending(rdev, conf->mddev);
5490 if (atomic_dec_and_test(&conf->active_aligned_reads))
5491 wake_up(&conf->wait_for_quiescent);
5497 add_bio_to_retry(raid_bi, conf);
5502 struct r5conf *conf = mddev->private;
5514 sector = raid5_compute_sector(conf, raid_bio->bi_iter.bi_sector, 0,
5519 if (r5c_big_stripe_cached(conf, sector))
5522 rdev = rcu_dereference(conf->disks[dd_idx].replacement);
5525 rdev = rcu_dereference(conf->disks[dd_idx].rdev);
5556 if (conf->quiesce == 0) {
5557 atomic_inc(&conf->active_aligned_reads);
5561 if (!did_inc || smp_load_acquire(&conf->quiesce) != 0) {
5565 if (did_inc && atomic_dec_and_test(&conf->active_aligned_reads))
5566 wake_up(&conf->wait_for_quiescent);
5567 spin_lock_irq(&conf->device_lock);
5568 wait_event_lock_irq(conf->wait_for_quiescent, conf->quiesce == 0,
5569 conf->device_lock);
5570 atomic_inc(&conf->active_aligned_reads);
5571 spin_unlock_irq(&conf->device_lock);
5593 struct r5conf *conf = mddev->private;
5594 split = bio_split(raid_bio, sectors, GFP_NOIO, &conf->bio_split);
5616 static struct stripe_head *__get_priority_stripe(struct r5conf *conf, int group)
5617 __must_hold(&conf->device_lock)
5622 bool second_try = !r5c_is_writeback(conf->log) &&
5623 !r5l_log_disk_error(conf);
5624 bool try_loprio = test_bit(R5C_LOG_TIGHT, &conf->cache_state) ||
5625 r5l_log_disk_error(conf);
5630 if (conf->worker_cnt_per_group == 0) {
5631 handle_list = try_loprio ? &conf->loprio_list :
5632 &conf->handle_list;
5634 handle_list = try_loprio ? &conf->worker_groups[group].loprio_list :
5635 &conf->worker_groups[group].handle_list;
5636 wg = &conf->worker_groups[group];
5639 for (i = 0; i < conf->group_cnt; i++) {
5640 handle_list = try_loprio ? &conf->worker_groups[i].loprio_list :
5641 &conf->worker_groups[i].handle_list;
5642 wg = &conf->worker_groups[i];
5651 list_empty(&conf->hold_list) ? "empty" : "busy",
5652 atomic_read(&conf->pending_full_writes), conf->bypass_count);
5657 if (list_empty(&conf->hold_list))
5658 conf->bypass_count = 0;
5660 if (conf->hold_list.next == conf->last_hold)
5661 conf->bypass_count++;
5663 conf->last_hold = conf->hold_list.next;
5664 conf->bypass_count -= conf->bypass_threshold;
5665 if (conf->bypass_count < 0)
5666 conf->bypass_count = 0;
5669 } else if (!list_empty(&conf->hold_list) &&
5670 ((conf->bypass_threshold &&
5671 conf->bypass_count > conf->bypass_threshold) ||
5672 atomic_read(&conf->pending_full_writes) == 0)) {
5674 list_for_each_entry(tmp, &conf->hold_list, lru) {
5675 if (conf->worker_cnt_per_group == 0 ||
5685 conf->bypass_count -= conf->bypass_threshold;
5686 if (conf->bypass_count < 0)
5687 conf->bypass_count = 0;
5721 struct r5conf *conf = mddev->private;
5726 spin_lock_irq(&conf->device_lock);
5742 __release_stripe(conf, sh, &cb->temp_inactive_list[hash]);
5745 spin_unlock_irq(&conf->device_lock);
5747 release_inactive_stripe_list(conf, cb->temp_inactive_list,
5784 struct r5conf *conf = mddev->private;
5797 logical_sector = bi->bi_iter.bi_sector & ~((sector_t)RAID5_STRIPE_SECTORS(conf)-1);
5802 stripe_sectors = conf->chunk_sectors *
5803 (conf->raid_disks - conf->max_degraded);
5808 logical_sector *= conf->chunk_sectors;
5809 last_sector *= conf->chunk_sectors;
5812 logical_sector += RAID5_STRIPE_SECTORS(conf)) {
5816 sh = raid5_get_active_stripe(conf, NULL, logical_sector, 0);
5817 prepare_to_wait(&conf->wait_for_overlap, &w,
5827 for (d = 0; d < conf->raid_disks; d++) {
5839 finish_wait(&conf->wait_for_overlap, &w);
5841 for (d = 0; d < conf->raid_disks; d++) {
5851 if (conf->mddev->bitmap) {
5853 d < conf->raid_disks - conf->max_degraded;
5857 RAID5_STRIPE_SECTORS(conf),
5859 sh->bm_seq = conf->seq_flush + 1;
5866 atomic_inc(&conf->preread_active_stripes);
5887 static bool stripe_ahead_of_reshape(struct mddev *mddev, struct r5conf *conf,
5902 spin_lock_irq(&conf->device_lock);
5905 conf->reshape_progress))
5909 spin_unlock_irq(&conf->device_lock);
5914 static int add_all_stripe_bios(struct r5conf *conf,
5955 RAID5_STRIPE_SHIFT(conf), ctx->sectors_to_do);
5977 struct r5conf *conf, struct stripe_request_ctx *ctx,
5987 seq = read_seqcount_begin(&conf->gen_lock);
5989 if (unlikely(conf->reshape_progress != MaxSector)) {
5999 spin_lock_irq(&conf->device_lock);
6001 conf->reshape_progress)) {
6005 conf->reshape_safe)) {
6006 spin_unlock_irq(&conf->device_lock);
6011 spin_unlock_irq(&conf->device_lock);
6014 new_sector = raid5_compute_sector(conf, logical_sector, previous,
6023 sh = raid5_get_active_stripe(conf, ctx, new_sector, flags);
6031 stripe_ahead_of_reshape(mddev, conf, sh)) {
6044 if (read_seqcount_retry(&conf->gen_lock, seq)) {
6051 !add_all_stripe_bios(conf, ctx, sh, bi, rw, previous)) {
6062 stripe_add_to_batch_list(conf, sh, ctx->batch_last);
6080 atomic_inc(&conf->preread_active_stripes);
6103 static sector_t raid5_bio_lowest_chunk_sector(struct r5conf *conf,
6106 int sectors_per_chunk = conf->chunk_sectors;
6107 int raid_disks = conf->raid_disks;
6111 sector_t r_sector = bi->bi_iter.bi_sector & ~((sector_t)RAID5_STRIPE_SECTORS(conf)-1);
6115 sector = raid5_compute_sector(conf, r_sector, 0, &dd_idx, &sh);
6134 struct r5conf *conf = mddev->private;
6142 int ret = log_handle_flush_request(conf, bi);
6178 logical_sector = bi->bi_iter.bi_sector & ~((sector_t)RAID5_STRIPE_SECTORS(conf)-1);
6184 RAID5_STRIPE_SECTORS(conf));
6192 (conf->reshape_progress != MaxSector) &&
6193 !ahead_of_reshape(mddev, logical_sector, conf->reshape_progress) &&
6194 ahead_of_reshape(mddev, logical_sector, conf->reshape_safe)) {
6209 if (likely(conf->reshape_progress == MaxSector))
6210 logical_sector = raid5_bio_lowest_chunk_sector(conf, bi);
6211 s = (logical_sector - ctx.first_sector) >> RAID5_STRIPE_SHIFT(conf);
6213 add_wait_queue(&conf->wait_for_overlap, &wait);
6215 res = make_stripe_request(mddev, conf, &ctx, logical_sector,
6246 (s << RAID5_STRIPE_SHIFT(conf));
6248 remove_wait_queue(&conf->wait_for_overlap, &wait);
6272 struct r5conf *conf = mddev->private;
6276 int raid_disks = conf->previous_raid_disks;
6277 int data_disks = raid_disks - conf->max_degraded;
6278 int new_data_disks = conf->raid_disks - conf->max_degraded;
6290 conf->reshape_progress < raid5_size(mddev, 0, 0)) {
6292 - conf->reshape_progress;
6294 conf->reshape_progress == MaxSector) {
6298 conf->reshape_progress > 0)
6299 sector_nr = conf->reshape_progress;
6315 reshape_sectors = max(conf->chunk_sectors, conf->prev_chunk_sectors);
6323 writepos = conf->reshape_progress;
6325 readpos = conf->reshape_progress;
6327 safepos = conf->reshape_safe;
6348 BUG_ON(conf->reshape_progress == 0);
6379 if (conf->min_offset_diff < 0) {
6380 safepos += -conf->min_offset_diff;
6381 readpos += -conf->min_offset_diff;
6383 writepos += conf->min_offset_diff;
6388 time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) {
6390 wait_event(conf->wait_for_overlap,
6391 atomic_read(&conf->reshape_stripes)==0
6393 if (atomic_read(&conf->reshape_stripes) != 0)
6395 mddev->reshape_position = conf->reshape_progress;
6406 conf->reshape_checkpoint = jiffies;
6413 spin_lock_irq(&conf->device_lock);
6414 conf->reshape_safe = mddev->reshape_position;
6415 spin_unlock_irq(&conf->device_lock);
6416 wake_up(&conf->wait_for_overlap);
6421 for (i = 0; i < reshape_sectors; i += RAID5_STRIPE_SECTORS(conf)) {
6424 sh = raid5_get_active_stripe(conf, NULL, stripe_addr+i,
6427 atomic_inc(&conf->reshape_stripes);
6435 if (conf->level == 6 &&
6443 memset(page_address(sh->dev[j].page), 0, RAID5_STRIPE_SIZE(conf));
6453 spin_lock_irq(&conf->device_lock);
6455 conf->reshape_progress -= reshape_sectors * new_data_disks;
6457 conf->reshape_progress += reshape_sectors * new_data_disks;
6458 spin_unlock_irq(&conf->device_lock);
6465 raid5_compute_sector(conf, stripe_addr*(new_data_disks),
6468 raid5_compute_sector(conf, ((stripe_addr+reshape_sectors)
6474 sh = raid5_get_active_stripe(conf, NULL, first_sector,
6479 first_sector += RAID5_STRIPE_SECTORS(conf);
6499 wait_event(conf->wait_for_overlap,
6500 atomic_read(&conf->reshape_stripes) == 0
6502 if (atomic_read(&conf->reshape_stripes) != 0)
6504 mddev->reshape_position = conf->reshape_progress;
6514 conf->reshape_checkpoint = jiffies;
6522 spin_lock_irq(&conf->device_lock);
6523 conf->reshape_safe = mddev->reshape_position;
6524 spin_unlock_irq(&conf->device_lock);
6525 wake_up(&conf->wait_for_overlap);
6535 struct r5conf *conf = mddev->private;
6546 end_reshape(conf);
6554 conf->fullsync = 0;
6561 wait_event(conf->wait_for_overlap, conf->quiesce != 2);
6576 if (mddev->degraded >= conf->max_degraded &&
6583 !conf->fullsync &&
6585 sync_blocks >= RAID5_STRIPE_SECTORS(conf)) {
6587 do_div(sync_blocks, RAID5_STRIPE_SECTORS(conf));
6590 return sync_blocks * RAID5_STRIPE_SECTORS(conf);
6595 sh = raid5_get_active_stripe(conf, NULL, sector_nr,
6598 sh = raid5_get_active_stripe(conf, NULL, sector_nr, 0);
6609 for (i = 0; i < conf->raid_disks; i++) {
6610 struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev);
6624 return RAID5_STRIPE_SECTORS(conf);
6627 static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio,
6647 ~((sector_t)RAID5_STRIPE_SECTORS(conf)-1);
6648 sector = raid5_compute_sector(conf, logical_sector,
6653 logical_sector += RAID5_STRIPE_SECTORS(conf),
6654 sector += RAID5_STRIPE_SECTORS(conf),
6661 sh = raid5_get_active_stripe(conf, NULL, sector,
6665 conf->retry_read_aligned = raid_bio;
6666 conf->retry_read_offset = scnt;
6672 conf->retry_read_aligned = raid_bio;
6673 conf->retry_read_offset = scnt;
6685 if (atomic_dec_and_test(&conf->active_aligned_reads))
6686 wake_up(&conf->wait_for_quiescent);
6690 static int handle_active_stripes(struct r5conf *conf, int group,
6693 __must_hold(&conf->device_lock)
6700 (sh = __get_priority_stripe(conf, group)) != NULL)
6708 spin_unlock_irq(&conf->device_lock);
6709 log_flush_stripe_to_raid(conf);
6710 spin_lock_irq(&conf->device_lock);
6715 spin_unlock_irq(&conf->device_lock);
6717 release_inactive_stripe_list(conf, temp_inactive_list,
6720 r5l_flush_stripe_to_raid(conf->log);
6722 spin_lock_irq(&conf->device_lock);
6728 log_write_stripe_run(conf);
6732 spin_lock_irq(&conf->device_lock);
6735 __release_stripe(conf, batch[i], &temp_inactive_list[hash]);
6744 struct r5conf *conf = group->conf;
6745 struct mddev *mddev = conf->mddev;
6746 int group_id = group - conf->worker_groups;
6754 spin_lock_irq(&conf->device_lock);
6758 released = release_stripe_list(conf, worker->temp_inactive_list);
6760 batch_size = handle_active_stripes(conf, group_id, worker,
6768 conf->device_lock);
6772 spin_unlock_irq(&conf->device_lock);
6774 flush_deferred_bios(conf);
6776 r5l_flush_stripe_to_raid(conf->log);
6794 struct r5conf *conf = mddev->private;
6804 spin_lock_irq(&conf->device_lock);
6810 released = release_stripe_list(conf, conf->temp_inactive_list);
6812 clear_bit(R5_DID_ALLOC, &conf->cache_state);
6815 !list_empty(&conf->bitmap_list)) {
6817 conf->seq_flush++;
6818 spin_unlock_irq(&conf->device_lock);
6820 spin_lock_irq(&conf->device_lock);
6821 conf->seq_write = conf->seq_flush;
6822 activate_bit_delay(conf, conf->temp_inactive_list);
6824 raid5_activate_delayed(conf);
6826 while ((bio = remove_bio_from_retry(conf, &offset))) {
6828 spin_unlock_irq(&conf->device_lock);
6829 ok = retry_aligned_read(conf, bio, offset);
6830 spin_lock_irq(&conf->device_lock);
6836 batch_size = handle_active_stripes(conf, ANY_GROUP, NULL,
6837 conf->temp_inactive_list);
6843 spin_unlock_irq(&conf->device_lock);
6845 spin_lock_irq(&conf->device_lock);
6857 conf->device_lock);
6861 spin_unlock_irq(&conf->device_lock);
6862 if (test_and_clear_bit(R5_ALLOC_MORE, &conf->cache_state) &&
6863 mutex_trylock(&conf->cache_size_mutex)) {
6864 grow_one_stripe(conf, __GFP_NOWARN);
6868 set_bit(R5_DID_ALLOC, &conf->cache_state);
6869 mutex_unlock(&conf->cache_size_mutex);
6872 flush_deferred_bios(conf);
6874 r5l_flush_stripe_to_raid(conf->log);
6885 struct r5conf *conf;
6888 conf = mddev->private;
6889 if (conf)
6890 ret = sprintf(page, "%d\n", conf->min_nr_stripes);
6899 struct r5conf *conf = mddev->private;
6904 WRITE_ONCE(conf->min_nr_stripes, size);
6905 mutex_lock(&conf->cache_size_mutex);
6906 while (size < conf->max_nr_stripes &&
6907 drop_one_stripe(conf))
6909 mutex_unlock(&conf->cache_size_mutex);
6913 mutex_lock(&conf->cache_size_mutex);
6914 while (size > conf->max_nr_stripes)
6915 if (!grow_one_stripe(conf, GFP_KERNEL)) {
6916 WRITE_ONCE(conf->min_nr_stripes, conf->max_nr_stripes);
6920 mutex_unlock(&conf->cache_size_mutex);
6929 struct r5conf *conf;
6940 conf = mddev->private;
6941 if (!conf)
6958 struct r5conf *conf = mddev->private;
6959 if (conf)
6960 return sprintf(page, "%d\n", conf->rmw_level);
6968 struct r5conf *conf = mddev->private;
6971 if (!conf)
6988 conf->rmw_level = new;
7000 struct r5conf *conf;
7004 conf = mddev->private;
7005 if (conf)
7006 ret = sprintf(page, "%lu\n", RAID5_STRIPE_SIZE(conf));
7015 struct r5conf *conf;
7039 conf = mddev->private;
7040 if (!conf) {
7045 if (new == conf->stripe_size)
7049 conf->stripe_size, new);
7060 mutex_lock(&conf->cache_size_mutex);
7061 size = conf->max_nr_stripes;
7063 shrink_stripes(conf);
7065 conf->stripe_size = new;
7066 conf->stripe_shift = ilog2(new) - 9;
7067 conf->stripe_sectors = new >> 9;
7068 if (grow_stripes(conf, size)) {
7073 mutex_unlock(&conf->cache_size_mutex);
7095 struct r5conf *conf;
7098 conf = mddev->private;
7099 if (conf)
7100 ret = sprintf(page, "%d\n", conf->bypass_threshold);
7108 struct r5conf *conf;
7120 conf = mddev->private;
7121 if (!conf)
7123 else if (new > conf->min_nr_stripes)
7126 conf->bypass_threshold = new;
7140 struct r5conf *conf;
7143 conf = mddev->private;
7144 if (conf)
7145 ret = sprintf(page, "%d\n", conf->skip_copy);
7153 struct r5conf *conf;
7166 conf = mddev->private;
7167 if (!conf)
7169 else if (new != conf->skip_copy) {
7173 conf->skip_copy = new;
7192 struct r5conf *conf = mddev->private;
7193 if (conf)
7194 return sprintf(page, "%d\n", atomic_read(&conf->active_stripes));
7205 struct r5conf *conf;
7208 conf = mddev->private;
7209 if (conf)
7210 ret = sprintf(page, "%d\n", conf->worker_cnt_per_group);
7215 static int alloc_thread_groups(struct r5conf *conf, int cnt,
7221 struct r5conf *conf;
7238 conf = mddev->private;
7239 if (!conf)
7241 else if (new != conf->worker_cnt_per_group) {
7244 old_groups = conf->worker_groups;
7248 err = alloc_thread_groups(conf, new, &group_cnt, &new_groups);
7250 spin_lock_irq(&conf->device_lock);
7251 conf->group_cnt = group_cnt;
7252 conf->worker_cnt_per_group = new;
7253 conf->worker_groups = new_groups;
7254 spin_unlock_irq(&conf->device_lock);
7289 static int alloc_thread_groups(struct r5conf *conf, int cnt, int *group_cnt,
7318 group->conf = conf;
7334 static void free_thread_groups(struct r5conf *conf)
7336 if (conf->worker_groups)
7337 kfree(conf->worker_groups[0].workers);
7338 kfree(conf->worker_groups);
7339 conf->worker_groups = NULL;
7345 struct r5conf *conf = mddev->private;
7351 raid_disks = min(conf->raid_disks, conf->previous_raid_disks);
7353 sectors &= ~((sector_t)conf->chunk_sectors - 1);
7354 sectors &= ~((sector_t)conf->prev_chunk_sectors - 1);
7355 return sectors * (raid_disks - conf->max_degraded);
7358 static void free_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu)
7366 static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu)
7368 if (conf->level == 6 && !percpu->spare_page) {
7375 max(conf->raid_disks,
7376 conf->previous_raid_disks),
7377 max(conf->chunk_sectors,
7378 conf->prev_chunk_sectors)
7379 / RAID5_STRIPE_SECTORS(conf))) {
7380 free_scratch_buffer(conf, percpu);
7390 struct r5conf *conf = hlist_entry_safe(node, struct r5conf, node);
7392 free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
7396 static void raid5_free_percpu(struct r5conf *conf)
7398 if (!conf->percpu)
7401 cpuhp_state_remove_instance(CPUHP_MD_RAID5_PREPARE, &conf->node);
7402 free_percpu(conf->percpu);
7405 static void free_conf(struct r5conf *conf)
7409 log_exit(conf);
7411 unregister_shrinker(&conf->shrinker);
7412 free_thread_groups(conf);
7413 shrink_stripes(conf);
7414 raid5_free_percpu(conf);
7415 for (i = 0; i < conf->pool_size; i++)
7416 if (conf->disks[i].extra_page)
7417 put_page(conf->disks[i].extra_page);
7418 kfree(conf->disks);
7419 bioset_exit(&conf->bio_split);
7420 kfree(conf->stripe_hashtbl);
7421 kfree(conf->pending_data);
7422 kfree(conf);
7427 struct r5conf *conf = hlist_entry_safe(node, struct r5conf, node);
7428 struct raid5_percpu *percpu = per_cpu_ptr(conf->percpu, cpu);
7430 if (alloc_scratch_buffer(conf, percpu)) {
7438 static int raid5_alloc_percpu(struct r5conf *conf)
7442 conf->percpu = alloc_percpu(struct raid5_percpu);
7443 if (!conf->percpu)
7446 err = cpuhp_state_add_instance(CPUHP_MD_RAID5_PREPARE, &conf->node);
7448 conf->scribble_disks = max(conf->raid_disks,
7449 conf->previous_raid_disks);
7450 conf->scribble_sectors = max(conf->chunk_sectors,
7451 conf->prev_chunk_sectors);
7459 struct r5conf *conf = container_of(shrink, struct r5conf, shrinker);
7462 if (mutex_trylock(&conf->cache_size_mutex)) {
7465 conf->max_nr_stripes > conf->min_nr_stripes) {
7466 if (drop_one_stripe(conf) == 0) {
7472 mutex_unlock(&conf->cache_size_mutex);
7480 struct r5conf *conf = container_of(shrink, struct r5conf, shrinker);
7481 int max_stripes = READ_ONCE(conf->max_nr_stripes);
7482 int min_stripes = READ_ONCE(conf->min_nr_stripes);
7492 struct r5conf *conf;
7531 conf = kzalloc(sizeof(struct r5conf), GFP_KERNEL);
7532 if (conf == NULL)
7536 conf->stripe_size = DEFAULT_STRIPE_SIZE;
7537 conf->stripe_shift = ilog2(DEFAULT_STRIPE_SIZE) - 9;
7538 conf->stripe_sectors = DEFAULT_STRIPE_SIZE >> 9;
7540 INIT_LIST_HEAD(&conf->free_list);
7541 INIT_LIST_HEAD(&conf->pending_list);
7542 conf->pending_data = kcalloc(PENDING_IO_MAX,
7545 if (!conf->pending_data)
7548 list_add(&conf->pending_data[i].sibling, &conf->free_list);
7550 if (!alloc_thread_groups(conf, 0, &group_cnt, &new_group)) {
7551 conf->group_cnt = group_cnt;
7552 conf->worker_cnt_per_group = 0;
7553 conf->worker_groups = new_group;
7556 spin_lock_init(&conf->device_lock);
7557 seqcount_spinlock_init(&conf->gen_lock, &conf->device_lock);
7558 mutex_init(&conf->cache_size_mutex);
7560 init_waitqueue_head(&conf->wait_for_quiescent);
7561 init_waitqueue_head(&conf->wait_for_stripe);
7562 init_waitqueue_head(&conf->wait_for_overlap);
7563 INIT_LIST_HEAD(&conf->handle_list);
7564 INIT_LIST_HEAD(&conf->loprio_list);
7565 INIT_LIST_HEAD(&conf->hold_list);
7566 INIT_LIST_HEAD(&conf->delayed_list);
7567 INIT_LIST_HEAD(&conf->bitmap_list);
7568 init_llist_head(&conf->released_stripes);
7569 atomic_set(&conf->active_stripes, 0);
7570 atomic_set(&conf->preread_active_stripes, 0);
7571 atomic_set(&conf->active_aligned_reads, 0);
7572 spin_lock_init(&conf->pending_bios_lock);
7573 conf->batch_bio_dispatch = true;
7578 conf->batch_bio_dispatch = false;
7583 conf->bypass_threshold = BYPASS_THRESHOLD;
7584 conf->recovery_disabled = mddev->recovery_disabled - 1;
7586 conf->raid_disks = mddev->raid_disks;
7588 conf->previous_raid_disks = mddev->raid_disks;
7590 conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks;
7591 max_disks = max(conf->raid_disks, conf->previous_raid_disks);
7593 conf->disks = kcalloc(max_disks, sizeof(struct disk_info),
7596 if (!conf->disks)
7600 conf->disks[i].extra_page = alloc_page(GFP_KERNEL);
7601 if (!conf->disks[i].extra_page)
7605 ret = bioset_init(&conf->bio_split, BIO_POOL_SIZE, 0, 0);
7608 conf->mddev = mddev;
7611 conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL);
7612 if (!conf->stripe_hashtbl)
7620 spin_lock_init(conf->hash_locks);
7622 spin_lock_init(conf->hash_locks + i);
7625 INIT_LIST_HEAD(conf->inactive_list + i);
7628 INIT_LIST_HEAD(conf->temp_inactive_list + i);
7630 atomic_set(&conf->r5c_cached_full_stripes, 0);
7631 INIT_LIST_HEAD(&conf->r5c_full_stripe_list);
7632 atomic_set(&conf->r5c_cached_partial_stripes, 0);
7633 INIT_LIST_HEAD(&conf->r5c_partial_stripe_list);
7634 atomic_set(&conf->r5c_flushing_full_stripes, 0);
7635 atomic_set(&conf->r5c_flushing_partial_stripes, 0);
7637 conf->level = mddev->new_level;
7638 conf->chunk_sectors = mddev->new_chunk_sectors;
7639 ret = raid5_alloc_percpu(conf);
7651 disk = conf->disks + raid_disk;
7668 conf->fullsync = 1;
7671 conf->level = mddev->new_level;
7672 if (conf->level == 6) {
7673 conf->max_degraded = 2;
7675 conf->rmw_level = PARITY_ENABLE_RMW;
7677 conf->rmw_level = PARITY_DISABLE_RMW;
7679 conf->max_degraded = 1;
7680 conf->rmw_level = PARITY_ENABLE_RMW;
7682 conf->algorithm = mddev->new_layout;
7683 conf->reshape_progress = mddev->reshape_position;
7684 if (conf->reshape_progress != MaxSector) {
7685 conf->prev_chunk_sectors = mddev->chunk_sectors;
7686 conf->prev_algo = mddev->layout;
7688 conf->prev_chunk_sectors = conf->chunk_sectors;
7689 conf->prev_algo = conf->algorithm;
7692 conf->min_nr_stripes = NR_STRIPES;
7695 ((mddev->chunk_sectors << 9) / RAID5_STRIPE_SIZE(conf)) * 4,
7696 ((mddev->new_chunk_sectors << 9) / RAID5_STRIPE_SIZE(conf)) * 4);
7697 conf->min_nr_stripes = max(NR_STRIPES, stripes);
7698 if (conf->min_nr_stripes != NR_STRIPES)
7700 mdname(mddev), conf->min_nr_stripes);
7702 memory = conf->min_nr_stripes * (sizeof(struct stripe_head) +
7704 atomic_set(&conf->empty_inactive_list_nr, NR_STRIPE_HASH_LOCKS);
7705 if (grow_stripes(conf, conf->min_nr_stripes)) {
7717 conf->shrinker.seeks = DEFAULT_SEEKS * conf->raid_disks * 4;
7718 conf->shrinker.scan_objects = raid5_cache_scan;
7719 conf->shrinker.count_objects = raid5_cache_count;
7720 conf->shrinker.batch = 128;
7721 conf->shrinker.flags = 0;
7722 ret = register_shrinker(&conf->shrinker, "md-raid5:%s", mdname(mddev));
7730 rcu_assign_pointer(conf->thread,
7732 if (!conf->thread) {
7739 return conf;
7742 if (conf)
7743 free_conf(conf);
7773 static void raid5_set_io_opt(struct r5conf *conf)
7775 blk_queue_io_opt(conf->mddev->queue, (conf->chunk_sectors << 9) *
7776 (conf->raid_disks - conf->max_degraded));
7781 struct r5conf *conf;
7921 conf = setup_conf(mddev);
7923 conf = mddev->private;
7925 if (IS_ERR(conf))
7926 return PTR_ERR(conf);
7938 conf->min_offset_diff = min_offset_diff;
7939 rcu_assign_pointer(mddev->thread, conf->thread);
7940 rcu_assign_pointer(conf->thread, NULL);
7941 mddev->private = conf;
7943 for (i = 0; i < conf->raid_disks && conf->previous_raid_disks;
7945 rdev = rdev_mdlock_deref(mddev, conf->disks[i].rdev);
7946 if (!rdev && conf->disks[i].replacement) {
7949 conf->disks[i].replacement);
7950 conf->disks[i].replacement = NULL;
7952 rcu_assign_pointer(conf->disks[i].rdev, rdev);
7956 if (rcu_access_pointer(conf->disks[i].replacement) &&
7957 conf->reshape_progress != MaxSector) {
7980 conf->algorithm,
7981 conf->raid_disks,
7982 conf->max_degraded))
7986 conf->prev_algo,
7987 conf->previous_raid_disks,
7988 conf->max_degraded))
7996 mddev->degraded = raid5_calc_degraded(conf);
7998 if (has_failed(conf)) {
8000 mdname(mddev), mddev->degraded, conf->raid_disks);
8024 mdname(mddev), conf->level,
8028 print_raid5_conf(conf);
8030 if (conf->reshape_progress != MaxSector) {
8031 conf->reshape_safe = conf->reshape_progress;
8032 atomic_set(&conf->reshape_stripes, 0);
8058 int data_disks = conf->previous_raid_disks - conf->max_degraded;
8064 raid5_set_io_opt(conf);
8108 RAID5_MAX_REQ_STRIPES << RAID5_STRIPE_SHIFT(conf));
8114 if (log_init(conf, journal_dev, raid5_has_ppl(conf)))
8120 print_raid5_conf(conf);
8121 free_conf(conf);
8129 struct r5conf *conf = priv;
8131 free_conf(conf);
8137 struct r5conf *conf = mddev->private;
8141 conf->chunk_sectors / 2, mddev->layout);
8142 seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded);
8144 for (i = 0; i < conf->raid_disks; i++) {
8145 struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev);
8152 static void print_raid5_conf (struct r5conf *conf)
8157 pr_debug("RAID conf printout:\n");
8158 if (!conf) {
8159 pr_debug("(conf==NULL)\n");
8162 pr_debug(" --- level:%d rd:%d wd:%d\n", conf->level,
8163 conf->raid_disks,
8164 conf->raid_disks - conf->mddev->degraded);
8167 for (i = 0; i < conf->raid_disks; i++) {
8168 rdev = rcu_dereference(conf->disks[i].rdev);
8180 struct r5conf *conf = mddev->private;
8185 for (i = 0; i < conf->raid_disks; i++) {
8186 rdev = rdev_mdlock_deref(mddev, conf->disks[i].rdev);
8188 conf->disks[i].replacement);
8215 spin_lock_irqsave(&conf->device_lock, flags);
8216 mddev->degraded = raid5_calc_degraded(conf);
8217 spin_unlock_irqrestore(&conf->device_lock, flags);
8218 print_raid5_conf(conf);
8224 struct r5conf *conf = mddev->private;
8231 print_raid5_conf(conf);
8232 if (test_bit(Journal, &rdev->flags) && conf->log) {
8239 if (atomic_read(&conf->active_stripes) ||
8240 atomic_read(&conf->r5c_cached_full_stripes) ||
8241 atomic_read(&conf->r5c_cached_partial_stripes)) {
8244 log_exit(conf);
8247 if (unlikely(number >= conf->pool_size))
8249 p = conf->disks + number;
8257 if (number >= conf->raid_disks &&
8258 conf->reshape_progress == MaxSector)
8270 mddev->recovery_disabled != conf->recovery_disabled &&
8271 !has_failed(conf) &&
8274 number < conf->raid_disks) {
8289 err = log_modify(conf, rdev, false);
8305 err = log_modify(conf, tmp, true);
8311 print_raid5_conf(conf);
8317 struct r5conf *conf = mddev->private;
8323 int last = conf->raid_disks - 1;
8326 if (conf->log)
8334 ret = log_init(conf, rdev, false);
8338 ret = r5l_start(conf->log);
8344 if (mddev->recovery_disabled == conf->recovery_disabled)
8347 if (rdev->saved_raid_disk < 0 && has_failed(conf))
8360 conf->disks[rdev->saved_raid_disk].rdev == NULL)
8364 p = conf->disks + disk;
8369 conf->fullsync = 1;
8372 err = log_modify(conf, rdev, true);
8378 p = conf->disks + disk;
8387 conf->fullsync = 1;
8393 print_raid5_conf(conf);
8407 struct r5conf *conf = mddev->private;
8409 if (raid5_has_log(conf) || raid5_has_ppl(conf))
8411 sectors &= ~((sector_t)conf->chunk_sectors - 1);
8442 struct r5conf *conf = mddev->private;
8443 if (((mddev->chunk_sectors << 9) / RAID5_STRIPE_SIZE(conf)) * 4
8444 > conf->min_nr_stripes ||
8445 ((mddev->new_chunk_sectors << 9) / RAID5_STRIPE_SIZE(conf)) * 4
8446 > conf->min_nr_stripes) {
8450 / RAID5_STRIPE_SIZE(conf))*4);
8458 struct r5conf *conf = mddev->private;
8460 if (raid5_has_log(conf) || raid5_has_ppl(conf))
8466 if (has_failed(conf))
8486 if (resize_chunks(conf,
8487 conf->previous_raid_disks
8494 if (conf->previous_raid_disks + mddev->delta_disks <= conf->pool_size)
8496 return resize_stripes(conf, (conf->previous_raid_disks
8502 struct r5conf *conf = mddev->private;
8514 if (has_failed(conf))
8520 for (i = 0; i < conf->raid_disks; i++)
8521 if (rdev_mdlock_deref(mddev, conf->disks[i].replacement))
8530 if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded)
8540 if (raid5_size(mddev, 0, conf->raid_disks + mddev->delta_disks)
8547 atomic_set(&conf->reshape_stripes, 0);
8548 spin_lock_irq(&conf->device_lock);
8549 write_seqcount_begin(&conf->gen_lock);
8550 conf->previous_raid_disks = conf->raid_disks;
8551 conf->raid_disks += mddev->delta_disks;
8552 conf->prev_chunk_sectors = conf->chunk_sectors;
8553 conf->chunk_sectors = mddev->new_chunk_sectors;
8554 conf->prev_algo = conf->algorithm;
8555 conf->algorithm = mddev->new_layout;
8556 conf->generation++;
8562 conf->reshape_progress = raid5_size(mddev, 0, 0);
8564 conf->reshape_progress = 0;
8565 conf->reshape_safe = conf->reshape_progress;
8566 write_seqcount_end(&conf->gen_lock);
8567 spin_unlock_irq(&conf->device_lock);
8589 >= conf->previous_raid_disks)
8597 } else if (rdev->raid_disk >= conf->previous_raid_disks
8607 spin_lock_irqsave(&conf->device_lock, flags);
8608 mddev->degraded = raid5_calc_degraded(conf);
8609 spin_unlock_irqrestore(&conf->device_lock, flags);
8611 mddev->raid_disks = conf->raid_disks;
8612 mddev->reshape_position = conf->reshape_progress;
8624 spin_lock_irq(&conf->device_lock);
8625 write_seqcount_begin(&conf->gen_lock);
8626 mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks;
8628 conf->chunk_sectors = conf->prev_chunk_sectors;
8629 mddev->new_layout = conf->algorithm = conf->prev_algo;
8633 conf->generation --;
8634 conf->reshape_progress = MaxSector;
8636 write_seqcount_end(&conf->gen_lock);
8637 spin_unlock_irq(&conf->device_lock);
8640 conf->reshape_checkpoint = jiffies;
8647 * changes needed in 'conf'
8649 static void end_reshape(struct r5conf *conf)
8652 if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
8655 spin_lock_irq(&conf->device_lock);
8656 conf->previous_raid_disks = conf->raid_disks;
8657 md_finish_reshape(conf->mddev);
8659 conf->reshape_progress = MaxSector;
8660 conf->mddev->reshape_position = MaxSector;
8661 rdev_for_each(rdev, conf->mddev)
8666 spin_unlock_irq(&conf->device_lock);
8667 wake_up(&conf->wait_for_overlap);
8669 if (conf->mddev->queue)
8670 raid5_set_io_opt(conf);
8679 struct r5conf *conf = mddev->private;
8686 spin_lock_irq(&conf->device_lock);
8687 mddev->degraded = raid5_calc_degraded(conf);
8688 spin_unlock_irq(&conf->device_lock);
8689 for (d = conf->raid_disks ;
8690 d < conf->raid_disks - mddev->delta_disks;
8693 conf->disks[d].rdev);
8697 conf->disks[d].replacement);
8702 mddev->layout = conf->algorithm;
8703 mddev->chunk_sectors = conf->chunk_sectors;
8712 struct r5conf *conf = mddev->private;
8716 lock_all_device_hash_locks_irq(conf);
8720 r5c_flush_cache(conf, INT_MAX);
8724 smp_store_release(&conf->quiesce, 2);
8725 wait_event_cmd(conf->wait_for_quiescent,
8726 atomic_read(&conf->active_stripes) == 0 &&
8727 atomic_read(&conf->active_aligned_reads) == 0,
8728 unlock_all_device_hash_locks_irq(conf),
8729 lock_all_device_hash_locks_irq(conf));
8730 conf->quiesce = 1;
8731 unlock_all_device_hash_locks_irq(conf);
8733 wake_up(&conf->wait_for_overlap);
8736 lock_all_device_hash_locks_irq(conf);
8737 conf->quiesce = 0;
8738 wake_up(&conf->wait_for_quiescent);
8739 wake_up(&conf->wait_for_overlap);
8740 unlock_all_device_hash_locks_irq(conf);
8742 log_quiesce(conf, quiesce);
8843 struct r5conf *conf = mddev->private;
8863 conf->algorithm = mddev->new_layout;
8867 conf->chunk_sectors = new_chunk ;
8986 struct r5conf *conf;
8992 conf = mddev->private;
8993 if (!conf) {
9000 if (!raid5_has_ppl(conf) && conf->level == 5) {
9001 err = log_init(conf, NULL, true);
9003 err = resize_stripes(conf, conf->pool_size);
9006 log_exit(conf);
9013 if (raid5_has_ppl(conf)) {
9015 log_exit(conf);
9017 err = resize_stripes(conf, conf->pool_size);
9018 } else if (test_bit(MD_HAS_JOURNAL, &conf->mddev->flags) &&
9019 r5l_log_disk_error(conf)) {
9051 struct r5conf *conf = mddev->private;
9053 return r5l_start(conf->log);
9058 struct r5conf *conf = mddev->private;
9069 wake_up(&conf->wait_for_overlap);