Lines Matching refs:conf

67 static void allow_barrier(struct r10conf *conf);
68 static void lower_barrier(struct r10conf *conf);
69 static int _enough(struct r10conf *conf, int previous, int ignore);
70 static int enough(struct r10conf *conf, int ignore);
75 static void end_reshape(struct r10conf *conf);
93 struct r10conf *conf = data;
94 int size = offsetof(struct r10bio, devs[conf->copies]);
118 struct r10conf *conf = data;
125 r10_bio = r10bio_pool_alloc(gfp_flags, conf);
129 if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) ||
130 test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery))
131 nalloc = conf->copies; /* resync */
136 if (!conf->have_replacement)
152 if (!conf->have_replacement)
174 &conf->mddev->recovery)) {
206 rbio_pool_free(r10_bio, conf);
212 struct r10conf *conf = data;
217 for (j = conf->copies; j--; ) {
234 rbio_pool_free(r10bio, conf);
237 static void put_all_bios(struct r10conf *conf, struct r10bio *r10_bio)
241 for (i = 0; i < conf->copies; i++) {
255 struct r10conf *conf = r10_bio->mddev->private;
257 put_all_bios(conf, r10_bio);
258 mempool_free(r10_bio, &conf->r10bio_pool);
263 struct r10conf *conf = r10_bio->mddev->private;
265 mempool_free(r10_bio, &conf->r10buf_pool);
267 lower_barrier(conf);
274 struct r10conf *conf = mddev->private;
276 spin_lock_irqsave(&conf->device_lock, flags);
277 list_add(&r10_bio->retry_list, &conf->retry_list);
278 conf->nr_queued ++;
279 spin_unlock_irqrestore(&conf->device_lock, flags);
282 wake_up(&conf->wait_barrier);
295 struct r10conf *conf = r10_bio->mddev->private;
305 allow_barrier(conf);
315 struct r10conf *conf = r10_bio->mddev->private;
317 conf->mirrors[r10_bio->devs[slot].devnum].head_position =
324 static int find_bio_disk(struct r10conf *conf, struct r10bio *r10_bio,
330 for (slot = 0; slot < conf->copies; slot++) {
339 BUG_ON(slot == conf->copies);
355 struct r10conf *conf = r10_bio->mddev->private;
381 if (!_enough(conf, test_bit(R10BIO_Previous, &r10_bio->state),
387 rdev_dec_pending(rdev, conf->mddev);
394 mdname(conf->mddev),
432 struct r10conf *conf = r10_bio->mddev->private;
440 dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
443 rdev = conf->mirrors[dev].replacement;
447 rdev = conf->mirrors[dev].rdev;
531 rdev_dec_pending(rdev, conf->mddev);
624 static void raid10_find_phys(struct r10conf *conf, struct r10bio *r10bio)
626 struct geom *geo = &conf->geo;
628 if (conf->reshape_progress != MaxSector &&
629 ((r10bio->sector >= conf->reshape_progress) !=
630 conf->mddev->reshape_backwards)) {
632 geo = &conf->prev;
639 static sector_t raid10_find_virt(struct r10conf *conf, sector_t sector, int dev)
642 /* Never use conf->prev as this is only called during resync
645 struct geom *geo = &conf->geo;
703 static struct md_rdev *read_balance(struct r10conf *conf,
717 struct geom *geo = &conf->geo;
719 raid10_find_phys(conf, r10_bio);
735 if ((conf->mddev->recovery_cp < MaxSector
736 && (this_sector + sectors >= conf->next_resync)) ||
737 (mddev_is_clustered(conf->mddev) &&
738 md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector,
742 for (slot = 0; slot < conf->copies ; slot++) {
752 rdev = rcu_dereference(conf->mirrors[disk].replacement);
762 rdev = rcu_dereference(conf->mirrors[disk].rdev);
830 conf->mirrors[disk].head_position);
838 if (slot >= conf->copies) {
859 static void flush_pending_writes(struct r10conf *conf)
864 spin_lock_irq(&conf->device_lock);
866 if (conf->pending_bio_list.head) {
870 bio = bio_list_get(&conf->pending_bio_list);
871 conf->pending_count = 0;
872 spin_unlock_irq(&conf->device_lock);
888 md_bitmap_unplug(conf->mddev->bitmap);
889 wake_up(&conf->wait_barrier);
909 spin_unlock_irq(&conf->device_lock);
934 static void raise_barrier(struct r10conf *conf, int force)
936 BUG_ON(force && !conf->barrier);
937 spin_lock_irq(&conf->resync_lock);
940 wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting,
941 conf->resync_lock);
944 conf->barrier++;
947 wait_event_lock_irq(conf->wait_barrier,
948 !atomic_read(&conf->nr_pending) && conf->barrier < RESYNC_DEPTH,
949 conf->resync_lock);
951 spin_unlock_irq(&conf->resync_lock);
954 static void lower_barrier(struct r10conf *conf)
957 spin_lock_irqsave(&conf->resync_lock, flags);
958 conf->barrier--;
959 spin_unlock_irqrestore(&conf->resync_lock, flags);
960 wake_up(&conf->wait_barrier);
963 static void wait_barrier(struct r10conf *conf)
965 spin_lock_irq(&conf->resync_lock);
966 if (conf->barrier) {
968 conf->nr_waiting++;
978 raid10_log(conf->mddev, "wait barrier");
979 wait_event_lock_irq(conf->wait_barrier,
980 !conf->barrier ||
981 (atomic_read(&conf->nr_pending) &&
988 (conf->mddev->thread->tsk == current &&
990 &conf->mddev->recovery) &&
991 conf->nr_queued > 0),
992 conf->resync_lock);
993 conf->nr_waiting--;
994 if (!conf->nr_waiting)
995 wake_up(&conf->wait_barrier);
997 atomic_inc(&conf->nr_pending);
998 spin_unlock_irq(&conf->resync_lock);
1001 static void allow_barrier(struct r10conf *conf)
1003 if ((atomic_dec_and_test(&conf->nr_pending)) ||
1004 (conf->array_freeze_pending))
1005 wake_up(&conf->wait_barrier);
1008 static void freeze_array(struct r10conf *conf, int extra)
1022 spin_lock_irq(&conf->resync_lock);
1023 conf->array_freeze_pending++;
1024 conf->barrier++;
1025 conf->nr_waiting++;
1026 wait_event_lock_irq_cmd(conf->wait_barrier,
1027 atomic_read(&conf->nr_pending) == conf->nr_queued+extra,
1028 conf->resync_lock,
1029 flush_pending_writes(conf));
1031 conf->array_freeze_pending--;
1032 spin_unlock_irq(&conf->resync_lock);
1035 static void unfreeze_array(struct r10conf *conf)
1038 spin_lock_irq(&conf->resync_lock);
1039 conf->barrier--;
1040 conf->nr_waiting--;
1041 wake_up(&conf->wait_barrier);
1042 spin_unlock_irq(&conf->resync_lock);
1066 struct r10conf *conf = mddev->private;
1070 spin_lock_irq(&conf->device_lock);
1071 bio_list_merge(&conf->pending_bio_list, &plug->pending);
1072 conf->pending_count += plug->pending_cnt;
1073 spin_unlock_irq(&conf->device_lock);
1074 wake_up(&conf->wait_barrier);
1083 wake_up(&conf->wait_barrier);
1110 static void regular_request_wait(struct mddev *mddev, struct r10conf *conf,
1113 wait_barrier(conf);
1115 bio->bi_iter.bi_sector < conf->reshape_progress &&
1116 bio->bi_iter.bi_sector + sectors > conf->reshape_progress) {
1117 raid10_log(conf->mddev, "wait reshape");
1118 allow_barrier(conf);
1119 wait_event(conf->wait_barrier,
1120 conf->reshape_progress <= bio->bi_iter.bi_sector ||
1121 conf->reshape_progress >= bio->bi_iter.bi_sector +
1123 wait_barrier(conf);
1130 struct r10conf *conf = mddev->private;
1145 * we must use the one in conf.
1158 err_rdev = rcu_dereference(conf->mirrors[disk].rdev);
1169 regular_request_wait(mddev, conf, bio, r10_bio->sectors);
1170 rdev = read_balance(conf, r10_bio, &max_sectors);
1187 gfp, &conf->bio_split);
1189 allow_barrier(conf);
1191 wait_barrier(conf);
1231 struct r10conf *conf = mddev->private;
1237 rdev = conf->mirrors[devnum].replacement;
1241 rdev = conf->mirrors[devnum].rdev;
1244 rdev = conf->mirrors[devnum].rdev;
1258 &conf->mirrors[devnum].rdev->flags)
1259 && enough(conf, devnum))
1263 if (conf->mddev->gendisk)
1265 mbio, disk_devt(conf->mddev->gendisk),
1281 spin_lock_irqsave(&conf->device_lock, flags);
1282 bio_list_add(&conf->pending_bio_list, mbio);
1283 conf->pending_count++;
1284 spin_unlock_irqrestore(&conf->device_lock, flags);
1292 struct r10conf *conf = mddev->private;
1304 prepare_to_wait(&conf->wait_barrier,
1311 finish_wait(&conf->wait_barrier, &w);
1315 regular_request_wait(mddev, conf, bio, sectors);
1318 ? (bio->bi_iter.bi_sector < conf->reshape_safe &&
1319 bio->bi_iter.bi_sector + sectors > conf->reshape_progress)
1320 : (bio->bi_iter.bi_sector + sectors > conf->reshape_safe &&
1321 bio->bi_iter.bi_sector < conf->reshape_progress))) {
1323 mddev->reshape_position = conf->reshape_progress;
1327 raid10_log(conf->mddev, "wait reshape metadata");
1331 conf->reshape_safe = mddev->reshape_position;
1334 if (conf->pending_count >= max_queued_requests) {
1337 wait_event(conf->wait_barrier,
1338 conf->pending_count < max_queued_requests);
1351 raid10_find_phys(conf, r10_bio);
1357 for (i = 0; i < conf->copies; i++) {
1361 rrdev = rcu_dereference(conf->mirrors[d].replacement);
1367 rdev = rcu_dereference(conf->mirrors[d].rdev);
1452 rdev_dec_pending(conf->mirrors[d].rdev, mddev);
1457 rdev = conf->mirrors[d].replacement;
1461 rdev = conf->mirrors[d].rdev;
1466 allow_barrier(conf);
1467 raid10_log(conf->mddev, "wait rdev %d blocked", blocked_rdev->raid_disk);
1469 wait_barrier(conf);
1478 GFP_NOIO, &conf->bio_split);
1480 allow_barrier(conf);
1482 wait_barrier(conf);
1490 for (i = 0; i < conf->copies; i++) {
1501 struct r10conf *conf = mddev->private;
1504 r10_bio = mempool_alloc(&conf->r10bio_pool, GFP_NOIO);
1513 memset(r10_bio->devs, 0, sizeof(r10_bio->devs[0]) * conf->copies);
1523 struct r10conf *conf = mddev->private;
1524 sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask);
1541 && (conf->geo.near_copies < conf->geo.raid_disks
1542 || conf->prev.near_copies <
1543 conf->prev.raid_disks)))
1550 wake_up(&conf->wait_barrier);
1556 struct r10conf *conf = mddev->private;
1559 if (conf->geo.near_copies < conf->geo.raid_disks)
1561 if (conf->geo.near_copies > 1)
1562 seq_printf(seq, " %d near-copies", conf->geo.near_copies);
1563 if (conf->geo.far_copies > 1) {
1564 if (conf->geo.far_offset)
1565 seq_printf(seq, " %d offset-copies", conf->geo.far_copies);
1567 seq_printf(seq, " %d far-copies", conf->geo.far_copies);
1568 if (conf->geo.far_set_size != conf->geo.raid_disks)
1569 seq_printf(seq, " %d devices per set", conf->geo.far_set_size);
1571 seq_printf(seq, " [%d/%d] [", conf->geo.raid_disks,
1572 conf->geo.raid_disks - mddev->degraded);
1574 for (i = 0; i < conf->geo.raid_disks; i++) {
1575 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1587 static int _enough(struct r10conf *conf, int previous, int ignore)
1593 disks = conf->prev.raid_disks;
1594 ncopies = conf->prev.near_copies;
1596 disks = conf->geo.raid_disks;
1597 ncopies = conf->geo.near_copies;
1602 int n = conf->copies;
1608 (rdev = rcu_dereference(conf->mirrors[this].rdev)) &&
1623 static int enough(struct r10conf *conf, int ignore)
1630 return _enough(conf, 0, ignore) &&
1631 _enough(conf, 1, ignore);
1637 struct r10conf *conf = mddev->private;
1646 spin_lock_irqsave(&conf->device_lock, flags);
1648 && !enough(conf, rdev->raid_disk)) {
1652 spin_unlock_irqrestore(&conf->device_lock, flags);
1665 spin_unlock_irqrestore(&conf->device_lock, flags);
1669 mdname(mddev), conf->geo.raid_disks - mddev->degraded);
1672 static void print_conf(struct r10conf *conf)
1677 pr_debug("RAID10 conf printout:\n");
1678 if (!conf) {
1679 pr_debug("(!conf)\n");
1682 pr_debug(" --- wd:%d rd:%d\n", conf->geo.raid_disks - conf->mddev->degraded,
1683 conf->geo.raid_disks);
1687 for (i = 0; i < conf->geo.raid_disks; i++) {
1689 rdev = conf->mirrors[i].rdev;
1698 static void close_sync(struct r10conf *conf)
1700 wait_barrier(conf);
1701 allow_barrier(conf);
1703 mempool_exit(&conf->r10buf_pool);
1709 struct r10conf *conf = mddev->private;
1718 for (i = 0; i < conf->geo.raid_disks; i++) {
1719 tmp = conf->mirrors + i;
1746 spin_lock_irqsave(&conf->device_lock, flags);
1748 spin_unlock_irqrestore(&conf->device_lock, flags);
1750 print_conf(conf);
1756 struct r10conf *conf = mddev->private;
1760 int last = conf->geo.raid_disks - 1;
1767 if (rdev->saved_raid_disk < 0 && !_enough(conf, 1, -1))
1777 rdev->saved_raid_disk < conf->geo.raid_disks &&
1778 conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
1783 struct raid10_info *p = &conf->mirrors[mirror];
1797 conf->fullsync = 1;
1811 conf->fullsync = 1;
1818 print_conf(conf);
1824 struct r10conf *conf = mddev->private;
1830 print_conf(conf);
1833 p = conf->mirrors + number;
1852 number < conf->geo.raid_disks &&
1853 enough(conf, -1)) {
1882 print_conf(conf);
1888 struct r10conf *conf = r10_bio->mddev->private;
1897 &conf->mirrors[d].rdev->corrected_errors);
1902 rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev);
1915 struct r10conf *conf = r10_bio->mddev->private;
1916 int d = find_bio_disk(conf, r10_bio, bio, NULL, NULL);
1960 struct r10conf *conf = mddev->private;
1968 d = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
1970 rdev = conf->mirrors[d].replacement;
1972 rdev = conf->mirrors[d].rdev;
2013 struct r10conf *conf = mddev->private;
2022 for (i=0; i<conf->copies; i++)
2026 if (i == conf->copies)
2037 for (i=0 ; i < conf->copies ; i++) {
2051 rdev = conf->mirrors[d].rdev;
2097 atomic_inc(&conf->mirrors[d].rdev->nr_pending);
2099 md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio));
2101 if (test_bit(FailFast, &conf->mirrors[d].rdev->flags))
2103 tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset;
2104 bio_set_dev(tbio, conf->mirrors[d].rdev->bdev);
2111 for (i = 0; i < conf->copies; i++) {
2122 md_sync_acct(conf->mirrors[d].replacement->bdev,
2154 struct r10conf *conf = mddev->private;
2172 rdev = conf->mirrors[dr].rdev;
2180 rdev = conf->mirrors[dw].rdev;
2202 if (rdev != conf->mirrors[dw].rdev) {
2204 struct md_rdev *rdev2 = conf->mirrors[dw].rdev;
2212 conf->mirrors[dw].recovery_disabled
2229 struct r10conf *conf = mddev->private;
2256 atomic_inc(&conf->mirrors[d].rdev->nr_pending);
2257 md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio));
2261 atomic_inc(&conf->mirrors[d].replacement->nr_pending);
2262 md_sync_acct(conf->mirrors[d].replacement->bdev,
2336 static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10bio *r10_bio)
2347 rdev = conf->mirrors[d].rdev;
2385 rdev = rcu_dereference(conf->mirrors[d].rdev);
2397 conf->tmppage,
2405 if (sl == conf->copies)
2416 rdev = conf->mirrors[dn].rdev;
2437 sl = conf->copies;
2440 rdev = rcu_dereference(conf->mirrors[d].rdev);
2451 s, conf->tmppage, WRITE)
2473 sl = conf->copies;
2476 rdev = rcu_dereference(conf->mirrors[d].rdev);
2487 s, conf->tmppage,
2525 struct r10conf *conf = mddev->private;
2526 struct md_rdev *rdev = conf->mirrors[r10_bio->devs[i].devnum].rdev;
2586 struct r10conf *conf = mddev->private;
2604 freeze_array(conf, 1);
2605 fix_read_error(conf, mddev, r10_bio);
2606 unfreeze_array(conf);
2611 allow_barrier(conf);
2616 static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
2629 for (m = 0; m < conf->copies; m++) {
2631 rdev = conf->mirrors[dev].rdev;
2645 md_error(conf->mddev, rdev);
2647 rdev = conf->mirrors[dev].replacement;
2662 md_error(conf->mddev, rdev);
2668 for (m = 0; m < conf->copies; m++) {
2671 rdev = conf->mirrors[dev].rdev;
2677 rdev_dec_pending(rdev, conf->mddev);
2681 md_error(conf->mddev, rdev);
2685 rdev_dec_pending(rdev, conf->mddev);
2688 rdev = conf->mirrors[dev].replacement;
2694 rdev_dec_pending(rdev, conf->mddev);
2698 spin_lock_irq(&conf->device_lock);
2699 list_add(&r10_bio->retry_list, &conf->bio_end_io_list);
2700 conf->nr_queued++;
2701 spin_unlock_irq(&conf->device_lock);
2706 wake_up(&conf->wait_barrier);
2707 md_wakeup_thread(conf->mddev->thread);
2722 struct r10conf *conf = mddev->private;
2723 struct list_head *head = &conf->retry_list;
2728 if (!list_empty_careful(&conf->bio_end_io_list) &&
2731 spin_lock_irqsave(&conf->device_lock, flags);
2733 while (!list_empty(&conf->bio_end_io_list)) {
2734 list_move(conf->bio_end_io_list.prev, &tmp);
2735 conf->nr_queued--;
2738 spin_unlock_irqrestore(&conf->device_lock, flags);
2756 flush_pending_writes(conf);
2758 spin_lock_irqsave(&conf->device_lock, flags);
2760 spin_unlock_irqrestore(&conf->device_lock, flags);
2765 conf->nr_queued--;
2766 spin_unlock_irqrestore(&conf->device_lock, flags);
2769 conf = mddev->private;
2772 handle_write_completed(conf, r10_bio);
2791 static int init_resync(struct r10conf *conf)
2796 BUG_ON(mempool_initialized(&conf->r10buf_pool));
2797 conf->have_replacement = 0;
2798 for (i = 0; i < conf->geo.raid_disks; i++)
2799 if (conf->mirrors[i].replacement)
2800 conf->have_replacement = 1;
2801 ret = mempool_init(&conf->r10buf_pool, buffs,
2802 r10buf_pool_alloc, r10buf_pool_free, conf);
2805 conf->next_resync = 0;
2809 static struct r10bio *raid10_alloc_init_r10buf(struct r10conf *conf)
2811 struct r10bio *r10bio = mempool_alloc(&conf->r10buf_pool, GFP_NOIO);
2817 if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) ||
2818 test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery))
2819 nalloc = conf->copies; /* resync */
2842 static void raid10_set_cluster_sync_high(struct r10conf *conf)
2859 chunks = conf->geo.raid_disks / conf->geo.near_copies;
2860 if (conf->geo.raid_disks % conf->geo.near_copies == 0)
2864 window_size = (chunks + extra_chunk) * conf->mddev->chunk_sectors;
2872 conf->cluster_sync_high = conf->cluster_sync_low + window_size;
2910 struct r10conf *conf = mddev->private;
2919 sector_t chunk_mask = conf->geo.chunk_mask;
2932 conf->fullsync == 0) {
2937 if (!mempool_initialized(&conf->r10buf_pool))
2938 if (init_resync(conf))
2947 conf->cluster_sync_low = 0;
2948 conf->cluster_sync_high = 0;
2960 end_reshape(conf);
2961 close_sync(conf);
2969 else for (i = 0; i < conf->geo.raid_disks; i++) {
2971 raid10_find_virt(conf, mddev->curr_resync, i);
2977 if ((!mddev->bitmap || conf->fullsync)
2978 && conf->have_replacement
2984 for (i = 0; i < conf->geo.raid_disks; i++) {
2986 rcu_dereference(conf->mirrors[i].replacement);
2992 conf->fullsync = 0;
2995 close_sync(conf);
3003 if (chunks_skipped >= conf->geo.raid_disks) {
3017 if (conf->geo.near_copies < conf->geo.raid_disks &&
3025 if (conf->nr_waiting)
3049 for (i = 0 ; i < conf->geo.raid_disks; i++) {
3056 struct raid10_info *mirror = &conf->mirrors[i];
3078 sect = raid10_find_virt(conf, sector_nr, i);
3096 !conf->fullsync) {
3109 r10_bio = raid10_alloc_init_r10buf(conf);
3111 raise_barrier(conf, rb2 != NULL);
3121 raid10_find_phys(conf, r10_bio);
3127 for (j = 0; j < conf->geo.raid_disks; j++) {
3129 conf->mirrors[j].rdev);
3140 for (j=0; j<conf->copies;j++) {
3145 rcu_dereference(conf->mirrors[d].rdev);
3181 for (k=0; k<conf->copies; k++)
3184 BUG_ON(k == conf->copies);
3225 if (j == conf->copies) {
3233 for (k = 0; k < conf->copies; k++)
3276 for (; j < conf->copies; j++) {
3278 if (conf->mirrors[d].rdev &&
3280 &conf->mirrors[d].rdev->flags))
3310 (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high));
3314 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED,
3322 r10_bio = raid10_alloc_init_r10buf(conf);
3327 raise_barrier(conf, 0);
3328 conf->next_resync = sector_nr;
3333 raid10_find_phys(conf, r10_bio);
3336 for (i = 0; i < conf->copies; i++) {
3348 rdev = rcu_dereference(conf->mirrors[d].rdev);
3378 rdev = rcu_dereference(conf->mirrors[d].replacement);
3403 for (i=0; i<conf->copies; i++) {
3406 rdev_dec_pending(conf->mirrors[d].rdev,
3411 conf->mirrors[d].replacement,
3447 if (conf->cluster_sync_high < sector_nr + nr_sectors) {
3448 conf->cluster_sync_low = mddev->curr_resync_completed;
3449 raid10_set_cluster_sync_high(conf);
3452 conf->cluster_sync_low,
3453 conf->cluster_sync_high);
3460 for (i = 0; i < conf->geo.raid_disks; i++) {
3466 sect_va1 = raid10_find_virt(conf, sector_nr, i);
3468 if (conf->cluster_sync_high < sect_va1 + nr_sectors) {
3474 sect_va2 = raid10_find_virt(conf,
3477 if (conf->cluster_sync_low == 0 ||
3478 conf->cluster_sync_low > sect_va2)
3479 conf->cluster_sync_low = sect_va2;
3483 raid10_set_cluster_sync_high(conf);
3485 conf->cluster_sync_low,
3486 conf->cluster_sync_high);
3530 struct r10conf *conf = mddev->private;
3533 raid_disks = min(conf->geo.raid_disks,
3534 conf->prev.raid_disks);
3536 sectors = conf->dev_sectors;
3538 size = sectors >> conf->geo.chunk_shift;
3539 sector_div(size, conf->geo.far_copies);
3541 sector_div(size, conf->geo.near_copies);
3543 return size << conf->geo.chunk_shift;
3546 static void calc_sectors(struct r10conf *conf, sector_t size)
3549 * actually be used, and set conf->dev_sectors and
3550 * conf->stride
3553 size = size >> conf->geo.chunk_shift;
3554 sector_div(size, conf->geo.far_copies);
3555 size = size * conf->geo.raid_disks;
3556 sector_div(size, conf->geo.near_copies);
3559 size = size * conf->copies;
3564 size = DIV_ROUND_UP_SECTOR_T(size, conf->geo.raid_disks);
3566 conf->dev_sectors = size << conf->geo.chunk_shift;
3568 if (conf->geo.far_offset)
3569 conf->geo.stride = 1 << conf->geo.chunk_shift;
3571 sector_div(size, conf->geo.far_copies);
3572 conf->geo.stride = size << conf->geo.chunk_shift;
3633 static void raid10_free_conf(struct r10conf *conf)
3635 if (!conf)
3638 mempool_exit(&conf->r10bio_pool);
3639 kfree(conf->mirrors);
3640 kfree(conf->mirrors_old);
3641 kfree(conf->mirrors_new);
3642 safe_put_page(conf->tmppage);
3643 bioset_exit(&conf->bio_split);
3644 kfree(conf);
3649 struct r10conf *conf = NULL;
3669 conf = kzalloc(sizeof(struct r10conf), GFP_KERNEL);
3670 if (!conf)
3674 conf->mirrors = kcalloc(mddev->raid_disks + max(0, -mddev->delta_disks),
3677 if (!conf->mirrors)
3680 conf->tmppage = alloc_page(GFP_KERNEL);
3681 if (!conf->tmppage)
3684 conf->geo = geo;
3685 conf->copies = copies;
3686 err = mempool_init(&conf->r10bio_pool, NR_RAID_BIOS, r10bio_pool_alloc,
3687 rbio_pool_free, conf);
3691 err = bioset_init(&conf->bio_split, BIO_POOL_SIZE, 0, 0);
3695 calc_sectors(conf, mddev->dev_sectors);
3697 conf->prev = conf->geo;
3698 conf->reshape_progress = MaxSector;
3700 if (setup_geo(&conf->prev, mddev, geo_old) != conf->copies) {
3704 conf->reshape_progress = mddev->reshape_position;
3705 if (conf->prev.far_offset)
3706 conf->prev.stride = 1 << conf->prev.chunk_shift;
3709 conf->prev.stride = conf->dev_sectors;
3711 conf->reshape_safe = conf->reshape_progress;
3712 spin_lock_init(&conf->device_lock);
3713 INIT_LIST_HEAD(&conf->retry_list);
3714 INIT_LIST_HEAD(&conf->bio_end_io_list);
3716 spin_lock_init(&conf->resync_lock);
3717 init_waitqueue_head(&conf->wait_barrier);
3718 atomic_set(&conf->nr_pending, 0);
3721 conf->thread = md_register_thread(raid10d, mddev, "raid10");
3722 if (!conf->thread)
3725 conf->mddev = mddev;
3726 return conf;
3729 raid10_free_conf(conf);
3733 static void raid10_set_io_opt(struct r10conf *conf)
3735 int raid_disks = conf->geo.raid_disks;
3737 if (!(conf->geo.raid_disks % conf->geo.near_copies))
3738 raid_disks /= conf->geo.near_copies;
3739 blk_queue_io_opt(conf->mddev->queue, (conf->mddev->chunk_sectors << 9) *
3745 struct r10conf *conf;
3758 conf = setup_conf(mddev);
3759 if (IS_ERR(conf))
3760 return PTR_ERR(conf);
3761 mddev->private = conf;
3763 conf = mddev->private;
3764 if (!conf)
3767 mddev->thread = conf->thread;
3768 conf->thread = NULL;
3770 if (mddev_is_clustered(conf->mddev)) {
3788 raid10_set_io_opt(conf);
3797 if (disk_idx >= conf->geo.raid_disks &&
3798 disk_idx >= conf->prev.raid_disks)
3800 disk = conf->mirrors + disk_idx;
3839 if (!enough(conf, -1)) {
3845 if (conf->reshape_progress != MaxSector) {
3847 if (conf->geo.far_copies != 1 &&
3848 conf->geo.far_offset == 0)
3850 if (conf->prev.far_copies != 1 &&
3851 conf->prev.far_offset == 0)
3857 i < conf->geo.raid_disks
3858 || i < conf->prev.raid_disks;
3861 disk = conf->mirrors + i;
3876 conf->fullsync = 1;
3882 conf->fullsync = 1;
3892 mdname(mddev), conf->geo.raid_disks - mddev->degraded,
3893 conf->geo.raid_disks);
3897 mddev->dev_sectors = conf->dev_sectors;
3906 if (conf->reshape_progress != MaxSector) {
3909 before_length = ((1 << conf->prev.chunk_shift) *
3910 conf->prev.far_copies);
3911 after_length = ((1 << conf->geo.chunk_shift) *
3912 conf->geo.far_copies);
3919 conf->offset_diff = min_offset_diff;
3935 raid10_free_conf(conf);
3948 struct r10conf *conf = mddev->private;
3951 raise_barrier(conf, 0);
3953 lower_barrier(conf);
3970 struct r10conf *conf = mddev->private;
3976 if (conf->geo.far_copies > 1 && !conf->geo.far_offset)
3995 calc_sectors(conf, sectors);
3996 mddev->dev_sectors = conf->dev_sectors;
4004 struct r10conf *conf;
4024 conf = setup_conf(mddev);
4025 if (!IS_ERR(conf)) {
4031 conf->barrier = 1;
4034 return conf;
4075 struct r10conf *conf = mddev->private;
4078 if (conf->geo.far_copies != 1 && !conf->geo.far_offset)
4081 if (setup_geo(&geo, mddev, geo_start) != conf->copies)
4092 if (!enough(conf, -1))
4095 kfree(conf->mirrors_new);
4096 conf->mirrors_new = NULL;
4099 conf->mirrors_new =
4103 if (!conf->mirrors_new)
4122 static int calc_degraded(struct r10conf *conf)
4130 for (i = 0; i < conf->prev.raid_disks; i++) {
4131 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
4142 if (conf->geo.raid_disks == conf->prev.raid_disks)
4146 for (i = 0; i < conf->geo.raid_disks; i++) {
4147 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
4156 if (conf->geo.raid_disks <= conf->prev.raid_disks)
4182 struct r10conf *conf = mddev->private;
4190 if (setup_geo(&new, mddev, geo_start) != conf->copies)
4193 before_length = ((1 << conf->prev.chunk_shift) *
4194 conf->prev.far_copies);
4195 after_length = ((1 << conf->geo.chunk_shift) *
4196 conf->geo.far_copies);
4221 conf->offset_diff = min_offset_diff;
4222 spin_lock_irq(&conf->device_lock);
4223 if (conf->mirrors_new) {
4224 memcpy(conf->mirrors_new, conf->mirrors,
4225 sizeof(struct raid10_info)*conf->prev.raid_disks);
4227 kfree(conf->mirrors_old);
4228 conf->mirrors_old = conf->mirrors;
4229 conf->mirrors = conf->mirrors_new;
4230 conf->mirrors_new = NULL;
4232 setup_geo(&conf->geo, mddev, geo_start);
4237 spin_unlock_irq(&conf->device_lock);
4243 conf->reshape_progress = size;
4245 conf->reshape_progress = 0;
4246 conf->reshape_safe = conf->reshape_progress;
4247 spin_unlock_irq(&conf->device_lock);
4254 newsize = raid10_size(mddev, 0, conf->geo.raid_disks);
4296 conf->prev.raid_disks)
4304 } else if (rdev->raid_disk >= conf->prev.raid_disks
4314 spin_lock_irq(&conf->device_lock);
4315 mddev->degraded = calc_degraded(conf);
4316 spin_unlock_irq(&conf->device_lock);
4317 mddev->raid_disks = conf->geo.raid_disks;
4318 mddev->reshape_position = conf->reshape_progress;
4333 conf->reshape_checkpoint = jiffies;
4340 spin_lock_irq(&conf->device_lock);
4341 conf->geo = conf->prev;
4342 mddev->raid_disks = conf->geo.raid_disks;
4346 conf->reshape_progress = MaxSector;
4347 conf->reshape_safe = MaxSector;
4349 spin_unlock_irq(&conf->device_lock);
4414 * (conf->offset_diff - always positive) allows a bit of slack,
4424 struct r10conf *conf = mddev->private;
4440 conf->reshape_progress < raid10_size(mddev, 0, 0)) {
4442 - conf->reshape_progress);
4444 conf->reshape_progress > 0)
4445 sector_nr = conf->reshape_progress;
4462 next = first_dev_address(conf->reshape_progress - 1,
4463 &conf->geo);
4468 safe = last_dev_address(conf->reshape_safe - 1,
4469 &conf->prev);
4471 if (next + conf->offset_diff < safe)
4474 last = conf->reshape_progress - 1;
4475 sector_nr = last & ~(sector_t)(conf->geo.chunk_mask
4476 & conf->prev.chunk_mask);
4483 next = last_dev_address(conf->reshape_progress, &conf->geo);
4488 safe = first_dev_address(conf->reshape_safe, &conf->prev);
4493 if (next > safe + conf->offset_diff)
4496 sector_nr = conf->reshape_progress;
4497 last = sector_nr | (conf->geo.chunk_mask
4498 & conf->prev.chunk_mask);
4505 time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) {
4507 wait_barrier(conf);
4508 mddev->reshape_position = conf->reshape_progress;
4511 - conf->reshape_progress;
4513 mddev->curr_resync_completed = conf->reshape_progress;
4514 conf->reshape_checkpoint = jiffies;
4520 allow_barrier(conf);
4523 conf->reshape_safe = mddev->reshape_position;
4524 allow_barrier(conf);
4527 raise_barrier(conf, 0);
4530 r10_bio = raid10_alloc_init_r10buf(conf);
4532 raise_barrier(conf, 1);
4538 rdev = read_balance(conf, r10_bio, &max_sectors);
4546 mempool_free(r10_bio, &conf->r10buf_pool);
4570 if (mddev_is_clustered(mddev) && conf->cluster_sync_high <= sector_nr) {
4574 conf->cluster_sync_low = sector_nr;
4575 conf->cluster_sync_high = sector_nr + CLUSTER_RESYNC_WINDOW_SECTORS;
4584 if (sb_reshape_pos < conf->cluster_sync_low)
4585 conf->cluster_sync_low = sb_reshape_pos;
4588 md_cluster_ops->resync_info_update(mddev, conf->cluster_sync_low,
4589 conf->cluster_sync_high);
4593 __raid10_find_phys(&conf->geo, r10_bio);
4599 for (s = 0; s < conf->copies*2; s++) {
4604 rdev2 = rcu_dereference(conf->mirrors[d].replacement);
4607 rdev2 = rcu_dereference(conf->mirrors[d].rdev);
4653 lower_barrier(conf);
4659 conf->reshape_progress -= sectors_done;
4661 conf->reshape_progress += sectors_done;
4676 struct r10conf *conf = mddev->private;
4690 for (s = 0; s < conf->copies*2; s++) {
4696 rdev = rcu_dereference(conf->mirrors[d].replacement);
4699 rdev = rcu_dereference(conf->mirrors[d].rdev);
4716 static void end_reshape(struct r10conf *conf)
4718 if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery))
4721 spin_lock_irq(&conf->device_lock);
4722 conf->prev = conf->geo;
4723 md_finish_reshape(conf->mddev);
4725 conf->reshape_progress = MaxSector;
4726 conf->reshape_safe = MaxSector;
4727 spin_unlock_irq(&conf->device_lock);
4729 if (conf->mddev->queue)
4730 raid10_set_io_opt(conf);
4731 conf->fullsync = 0;
4736 struct r10conf *conf = mddev->private;
4742 conf->reshape_progress = mddev->reshape_position;
4752 struct r10conf *conf = mddev->private;
4758 r10b = kmalloc(struct_size(r10b, devs, conf->copies), GFP_NOIO);
4768 __raid10_find_phys(&conf->prev, r10b);
4781 struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev);
4802 if (slot >= conf->copies)
4826 struct r10conf *conf = mddev->private;
4832 d = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
4834 rdev = conf->mirrors[d].replacement;
4837 rdev = conf->mirrors[d].rdev;
4860 struct r10conf *conf = mddev->private;
4874 for (d = conf->geo.raid_disks ;
4875 d < conf->geo.raid_disks - mddev->delta_disks;
4877 struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev);
4880 rdev = rcu_dereference(conf->mirrors[d].replacement);
4887 mddev->chunk_sectors = 1 << conf->geo.chunk_shift;