Lines Matching refs:conf
67 static void allow_barrier(struct r10conf *conf);
68 static void lower_barrier(struct r10conf *conf);
69 static int _enough(struct r10conf *conf, int previous, int ignore);
70 static int enough(struct r10conf *conf, int ignore);
75 static void end_reshape(struct r10conf *conf);
83 #define cmd_before(conf, cmd) \
85 write_sequnlock_irq(&(conf)->resync_lock); \
88 #define cmd_after(conf) write_seqlock_irq(&(conf)->resync_lock)
90 #define wait_event_barrier_cmd(conf, cond, cmd) \
91 wait_event_cmd((conf)->wait_barrier, cond, cmd_before(conf, cmd), \
92 cmd_after(conf))
94 #define wait_event_barrier(conf, cond) \
95 wait_event_barrier_cmd(conf, cond, NULL_CMD)
108 struct r10conf *conf = data;
109 int size = offsetof(struct r10bio, devs[conf->geo.raid_disks]);
133 struct r10conf *conf = data;
140 r10_bio = r10bio_pool_alloc(gfp_flags, conf);
144 if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) ||
145 test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery))
146 nalloc = conf->copies; /* resync */
151 if (!conf->have_replacement)
168 if (!conf->have_replacement)
191 &conf->mddev->recovery)) {
225 rbio_pool_free(r10_bio, conf);
231 struct r10conf *conf = data;
236 for (j = conf->copies; j--; ) {
256 rbio_pool_free(r10bio, conf);
259 static void put_all_bios(struct r10conf *conf, struct r10bio *r10_bio)
263 for (i = 0; i < conf->geo.raid_disks; i++) {
277 struct r10conf *conf = r10_bio->mddev->private;
279 put_all_bios(conf, r10_bio);
280 mempool_free(r10_bio, &conf->r10bio_pool);
285 struct r10conf *conf = r10_bio->mddev->private;
287 mempool_free(r10_bio, &conf->r10buf_pool);
289 lower_barrier(conf);
292 static void wake_up_barrier(struct r10conf *conf)
294 if (wq_has_sleeper(&conf->wait_barrier))
295 wake_up(&conf->wait_barrier);
302 struct r10conf *conf = mddev->private;
304 spin_lock_irqsave(&conf->device_lock, flags);
305 list_add(&r10_bio->retry_list, &conf->retry_list);
306 conf->nr_queued ++;
307 spin_unlock_irqrestore(&conf->device_lock, flags);
310 wake_up(&conf->wait_barrier);
323 struct r10conf *conf = r10_bio->mddev->private;
333 allow_barrier(conf);
343 struct r10conf *conf = r10_bio->mddev->private;
345 conf->mirrors[r10_bio->devs[slot].devnum].head_position =
352 static int find_bio_disk(struct r10conf *conf, struct r10bio *r10_bio,
358 for (slot = 0; slot < conf->geo.raid_disks; slot++) {
382 struct r10conf *conf = r10_bio->mddev->private;
408 if (!_enough(conf, test_bit(R10BIO_Previous, &r10_bio->state),
414 rdev_dec_pending(rdev, conf->mddev);
420 mdname(conf->mddev),
458 struct r10conf *conf = r10_bio->mddev->private;
466 dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
469 rdev = conf->mirrors[dev].replacement;
473 rdev = conf->mirrors[dev].rdev;
557 rdev_dec_pending(rdev, conf->mddev);
650 static void raid10_find_phys(struct r10conf *conf, struct r10bio *r10bio)
652 struct geom *geo = &conf->geo;
654 if (conf->reshape_progress != MaxSector &&
655 ((r10bio->sector >= conf->reshape_progress) !=
656 conf->mddev->reshape_backwards)) {
658 geo = &conf->prev;
665 static sector_t raid10_find_virt(struct r10conf *conf, sector_t sector, int dev)
668 /* Never use conf->prev as this is only called during resync
671 struct geom *geo = &conf->geo;
729 static struct md_rdev *read_balance(struct r10conf *conf,
743 struct geom *geo = &conf->geo;
745 raid10_find_phys(conf, r10_bio);
761 if ((conf->mddev->recovery_cp < MaxSector
762 && (this_sector + sectors >= conf->next_resync)) ||
763 (mddev_is_clustered(conf->mddev) &&
764 md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector,
768 for (slot = 0; slot < conf->copies ; slot++) {
778 rdev = rcu_dereference(conf->mirrors[disk].replacement);
788 rdev = rcu_dereference(conf->mirrors[disk].rdev);
856 conf->mirrors[disk].head_position);
864 if (slot >= conf->copies) {
885 static void flush_pending_writes(struct r10conf *conf)
890 spin_lock_irq(&conf->device_lock);
892 if (conf->pending_bio_list.head) {
896 bio = bio_list_get(&conf->pending_bio_list);
897 spin_unlock_irq(&conf->device_lock);
911 raid1_prepare_flush_writes(conf->mddev->bitmap);
912 wake_up(&conf->wait_barrier);
923 spin_unlock_irq(&conf->device_lock);
948 static void raise_barrier(struct r10conf *conf, int force)
950 write_seqlock_irq(&conf->resync_lock);
952 if (WARN_ON_ONCE(force && !conf->barrier))
956 wait_event_barrier(conf, force || !conf->nr_waiting);
959 WRITE_ONCE(conf->barrier, conf->barrier + 1);
962 wait_event_barrier(conf, !atomic_read(&conf->nr_pending) &&
963 conf->barrier < RESYNC_DEPTH);
965 write_sequnlock_irq(&conf->resync_lock);
968 static void lower_barrier(struct r10conf *conf)
972 write_seqlock_irqsave(&conf->resync_lock, flags);
973 WRITE_ONCE(conf->barrier, conf->barrier - 1);
974 write_sequnlock_irqrestore(&conf->resync_lock, flags);
975 wake_up(&conf->wait_barrier);
978 static bool stop_waiting_barrier(struct r10conf *conf)
984 if (!conf->barrier)
993 if (atomic_read(&conf->nr_pending) && bio_list &&
998 thread = rcu_dereference_protected(conf->mddev->thread, true);
1005 WARN_ON_ONCE(atomic_read(&conf->nr_pending) == 0);
1012 static bool wait_barrier_nolock(struct r10conf *conf)
1014 unsigned int seq = read_seqbegin(&conf->resync_lock);
1016 if (READ_ONCE(conf->barrier))
1019 atomic_inc(&conf->nr_pending);
1020 if (!read_seqretry(&conf->resync_lock, seq))
1023 if (atomic_dec_and_test(&conf->nr_pending))
1024 wake_up_barrier(conf);
1029 static bool wait_barrier(struct r10conf *conf, bool nowait)
1033 if (wait_barrier_nolock(conf))
1036 write_seqlock_irq(&conf->resync_lock);
1037 if (conf->barrier) {
1042 conf->nr_waiting++;
1043 raid10_log(conf->mddev, "wait barrier");
1044 wait_event_barrier(conf, stop_waiting_barrier(conf));
1045 conf->nr_waiting--;
1047 if (!conf->nr_waiting)
1048 wake_up(&conf->wait_barrier);
1052 atomic_inc(&conf->nr_pending);
1053 write_sequnlock_irq(&conf->resync_lock);
1057 static void allow_barrier(struct r10conf *conf)
1059 if ((atomic_dec_and_test(&conf->nr_pending)) ||
1060 (conf->array_freeze_pending))
1061 wake_up_barrier(conf);
1064 static void freeze_array(struct r10conf *conf, int extra)
1078 write_seqlock_irq(&conf->resync_lock);
1079 conf->array_freeze_pending++;
1080 WRITE_ONCE(conf->barrier, conf->barrier + 1);
1081 conf->nr_waiting++;
1082 wait_event_barrier_cmd(conf, atomic_read(&conf->nr_pending) ==
1083 conf->nr_queued + extra, flush_pending_writes(conf));
1084 conf->array_freeze_pending--;
1085 write_sequnlock_irq(&conf->resync_lock);
1088 static void unfreeze_array(struct r10conf *conf)
1091 write_seqlock_irq(&conf->resync_lock);
1092 WRITE_ONCE(conf->barrier, conf->barrier - 1);
1093 conf->nr_waiting--;
1094 wake_up(&conf->wait_barrier);
1095 write_sequnlock_irq(&conf->resync_lock);
1112 struct r10conf *conf = mddev->private;
1116 spin_lock_irq(&conf->device_lock);
1117 bio_list_merge(&conf->pending_bio_list, &plug->pending);
1118 spin_unlock_irq(&conf->device_lock);
1119 wake_up_barrier(conf);
1128 wake_up_barrier(conf);
1146 static bool regular_request_wait(struct mddev *mddev, struct r10conf *conf,
1150 if (!wait_barrier(conf, bio->bi_opf & REQ_NOWAIT)) {
1155 bio->bi_iter.bi_sector < conf->reshape_progress &&
1156 bio->bi_iter.bi_sector + sectors > conf->reshape_progress) {
1157 allow_barrier(conf);
1162 raid10_log(conf->mddev, "wait reshape");
1163 wait_event(conf->wait_barrier,
1164 conf->reshape_progress <= bio->bi_iter.bi_sector ||
1165 conf->reshape_progress >= bio->bi_iter.bi_sector +
1167 wait_barrier(conf, false);
1175 struct r10conf *conf = mddev->private;
1190 * we must use the one in conf.
1203 err_rdev = rcu_dereference(conf->mirrors[disk].rdev);
1214 if (!regular_request_wait(mddev, conf, bio, r10_bio->sectors))
1216 rdev = read_balance(conf, r10_bio, &max_sectors);
1233 gfp, &conf->bio_split);
1235 allow_barrier(conf);
1237 wait_barrier(conf, false);
1277 struct r10conf *conf = mddev->private;
1283 rdev = conf->mirrors[devnum].replacement;
1287 rdev = conf->mirrors[devnum].rdev;
1290 rdev = conf->mirrors[devnum].rdev;
1303 &conf->mirrors[devnum].rdev->flags)
1304 && enough(conf, devnum))
1308 if (conf->mddev->gendisk)
1309 trace_block_bio_remap(mbio, disk_devt(conf->mddev->gendisk),
1316 if (!raid1_add_bio_to_plug(mddev, mbio, raid10_unplug, conf->copies)) {
1317 spin_lock_irqsave(&conf->device_lock, flags);
1318 bio_list_add(&conf->pending_bio_list, mbio);
1319 spin_unlock_irqrestore(&conf->device_lock, flags);
1346 struct r10conf *conf = mddev->private;
1352 for (i = 0; i < conf->copies; i++) {
1355 rdev = dereference_rdev_and_rrdev(&conf->mirrors[i], &rrdev);
1398 allow_barrier(conf);
1399 raid10_log(conf->mddev, "%s wait rdev %d blocked",
1402 wait_barrier(conf, false);
1410 struct r10conf *conf = mddev->private;
1426 prepare_to_wait(&conf->wait_barrier,
1433 finish_wait(&conf->wait_barrier, &w);
1437 if (!regular_request_wait(mddev, conf, bio, sectors))
1441 ? (bio->bi_iter.bi_sector < conf->reshape_safe &&
1442 bio->bi_iter.bi_sector + sectors > conf->reshape_progress)
1443 : (bio->bi_iter.bi_sector + sectors > conf->reshape_safe &&
1444 bio->bi_iter.bi_sector < conf->reshape_progress))) {
1446 mddev->reshape_position = conf->reshape_progress;
1451 allow_barrier(conf);
1455 raid10_log(conf->mddev, "wait reshape metadata");
1459 conf->reshape_safe = mddev->reshape_position;
1473 raid10_find_phys(conf, r10_bio);
1480 for (i = 0; i < conf->copies; i++) {
1484 rdev = dereference_rdev_and_rrdev(&conf->mirrors[d], &rrdev);
1545 GFP_NOIO, &conf->bio_split);
1547 allow_barrier(conf);
1549 wait_barrier(conf, false);
1559 for (i = 0; i < conf->copies; i++) {
1570 struct r10conf *conf = mddev->private;
1573 r10_bio = mempool_alloc(&conf->r10bio_pool, GFP_NOIO);
1583 conf->geo.raid_disks);
1593 struct r10conf *conf = r10bio->mddev->private;
1598 allow_barrier(conf);
1616 struct r10conf *conf = r10_bio->mddev->private;
1627 dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
1629 rdev = conf->mirrors[dev].replacement;
1637 rdev = conf->mirrors[dev].rdev;
1641 rdev_dec_pending(rdev, conf->mddev);
1652 struct r10conf *conf = mddev->private;
1653 struct geom *geo = &conf->geo;
1678 wait_barrier(conf, false);
1715 split = bio_split(bio, split_size, GFP_NOIO, &conf->bio_split);
1717 allow_barrier(conf);
1720 wait_barrier(conf, false);
1725 split = bio_split(bio, split_size, GFP_NOIO, &conf->bio_split);
1727 allow_barrier(conf);
1731 wait_barrier(conf, false);
1761 r10_bio = mempool_alloc(&conf->r10bio_pool, GFP_NOIO);
1792 rdev = dereference_rdev_and_rrdev(&conf->mirrors[disk], &rrdev);
1852 struct md_rdev *rdev = conf->mirrors[disk].rdev;
1866 struct md_rdev *rrdev = conf->mirrors[disk].replacement;
1888 wait_barrier(conf, false);
1896 allow_barrier(conf);
1902 struct r10conf *conf = mddev->private;
1903 sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask);
1924 && (conf->geo.near_copies < conf->geo.raid_disks
1925 || conf->prev.near_copies <
1926 conf->prev.raid_disks)))
1933 wake_up_barrier(conf);
1939 struct r10conf *conf = mddev->private;
1942 if (conf->geo.near_copies < conf->geo.raid_disks)
1944 if (conf->geo.near_copies > 1)
1945 seq_printf(seq, " %d near-copies", conf->geo.near_copies);
1946 if (conf->geo.far_copies > 1) {
1947 if (conf->geo.far_offset)
1948 seq_printf(seq, " %d offset-copies", conf->geo.far_copies);
1950 seq_printf(seq, " %d far-copies", conf->geo.far_copies);
1951 if (conf->geo.far_set_size != conf->geo.raid_disks)
1952 seq_printf(seq, " %d devices per set", conf->geo.far_set_size);
1954 seq_printf(seq, " [%d/%d] [", conf->geo.raid_disks,
1955 conf->geo.raid_disks - mddev->degraded);
1957 for (i = 0; i < conf->geo.raid_disks; i++) {
1958 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1970 static int _enough(struct r10conf *conf, int previous, int ignore)
1976 disks = conf->prev.raid_disks;
1977 ncopies = conf->prev.near_copies;
1979 disks = conf->geo.raid_disks;
1980 ncopies = conf->geo.near_copies;
1985 int n = conf->copies;
1991 (rdev = rcu_dereference(conf->mirrors[this].rdev)) &&
2006 static int enough(struct r10conf *conf, int ignore)
2013 return _enough(conf, 0, ignore) &&
2014 _enough(conf, 1, ignore);
2034 struct r10conf *conf = mddev->private;
2037 spin_lock_irqsave(&conf->device_lock, flags);
2039 if (test_bit(In_sync, &rdev->flags) && !enough(conf, rdev->raid_disk)) {
2043 spin_unlock_irqrestore(&conf->device_lock, flags);
2055 spin_unlock_irqrestore(&conf->device_lock, flags);
2059 mdname(mddev), conf->geo.raid_disks - mddev->degraded);
2062 static void print_conf(struct r10conf *conf)
2067 pr_debug("RAID10 conf printout:\n");
2068 if (!conf) {
2069 pr_debug("(!conf)\n");
2072 pr_debug(" --- wd:%d rd:%d\n", conf->geo.raid_disks - conf->mddev->degraded,
2073 conf->geo.raid_disks);
2077 for (i = 0; i < conf->geo.raid_disks; i++) {
2078 rdev = conf->mirrors[i].rdev;
2087 static void close_sync(struct r10conf *conf)
2089 wait_barrier(conf, false);
2090 allow_barrier(conf);
2092 mempool_exit(&conf->r10buf_pool);
2098 struct r10conf *conf = mddev->private;
2107 for (i = 0; i < conf->geo.raid_disks; i++) {
2108 tmp = conf->mirrors + i;
2135 spin_lock_irqsave(&conf->device_lock, flags);
2137 spin_unlock_irqrestore(&conf->device_lock, flags);
2139 print_conf(conf);
2145 struct r10conf *conf = mddev->private;
2149 int last = conf->geo.raid_disks - 1;
2157 if (rdev->saved_raid_disk < 0 && !_enough(conf, 1, -1))
2167 rdev->saved_raid_disk < conf->geo.raid_disks &&
2168 conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
2173 p = &conf->mirrors[mirror];
2192 conf->fullsync = 1;
2198 p = &conf->mirrors[repl_slot];
2206 conf->fullsync = 1;
2210 print_conf(conf);
2216 struct r10conf *conf = mddev->private;
2222 print_conf(conf);
2225 p = conf->mirrors + number;
2244 number < conf->geo.raid_disks &&
2245 enough(conf, -1)) {
2274 print_conf(conf);
2280 struct r10conf *conf = r10_bio->mddev->private;
2289 &conf->mirrors[d].rdev->corrected_errors);
2294 rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev);
2307 struct r10conf *conf = r10_bio->mddev->private;
2308 int d = find_bio_disk(conf, r10_bio, bio, NULL, NULL);
2352 struct r10conf *conf = mddev->private;
2360 d = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
2362 rdev = conf->mirrors[d].replacement;
2364 rdev = conf->mirrors[d].rdev;
2405 struct r10conf *conf = mddev->private;
2414 for (i=0; i<conf->copies; i++)
2418 if (i == conf->copies)
2429 for (i=0 ; i < conf->copies ; i++) {
2443 rdev = conf->mirrors[d].rdev;
2477 bio_reset(tbio, conf->mirrors[d].rdev->bdev, REQ_OP_WRITE);
2488 atomic_inc(&conf->mirrors[d].rdev->nr_pending);
2490 md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio));
2492 if (test_bit(FailFast, &conf->mirrors[d].rdev->flags))
2494 tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset;
2501 for (i = 0; i < conf->copies; i++) {
2512 md_sync_acct(conf->mirrors[d].replacement->bdev,
2544 struct r10conf *conf = mddev->private;
2562 rdev = conf->mirrors[dr].rdev;
2570 rdev = conf->mirrors[dw].rdev;
2592 if (rdev != conf->mirrors[dw].rdev) {
2594 struct md_rdev *rdev2 = conf->mirrors[dw].rdev;
2602 conf->mirrors[dw].recovery_disabled
2619 struct r10conf *conf = mddev->private;
2646 atomic_inc(&conf->mirrors[d].rdev->nr_pending);
2647 md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio));
2651 atomic_inc(&conf->mirrors[d].replacement->nr_pending);
2652 md_sync_acct(conf->mirrors[d].replacement->bdev,
2726 static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10bio *r10_bio)
2737 rdev = conf->mirrors[d].rdev;
2772 rdev = rcu_dereference(conf->mirrors[d].rdev);
2784 conf->tmppage,
2792 if (sl == conf->copies)
2803 rdev = conf->mirrors[dn].rdev;
2822 sl = conf->copies;
2825 rdev = rcu_dereference(conf->mirrors[d].rdev);
2836 s, conf->tmppage, REQ_OP_WRITE)
2856 sl = conf->copies;
2859 rdev = rcu_dereference(conf->mirrors[d].rdev);
2870 s, conf->tmppage, REQ_OP_READ)) {
2907 struct r10conf *conf = mddev->private;
2908 struct md_rdev *rdev = conf->mirrors[r10_bio->devs[i].devnum].rdev;
2968 struct r10conf *conf = mddev->private;
2986 freeze_array(conf, 1);
2987 fix_read_error(conf, mddev, r10_bio);
2988 unfreeze_array(conf);
2999 allow_barrier(conf);
3002 static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
3015 for (m = 0; m < conf->copies; m++) {
3017 rdev = conf->mirrors[dev].rdev;
3031 md_error(conf->mddev, rdev);
3033 rdev = conf->mirrors[dev].replacement;
3048 md_error(conf->mddev, rdev);
3054 for (m = 0; m < conf->copies; m++) {
3057 rdev = conf->mirrors[dev].rdev;
3063 rdev_dec_pending(rdev, conf->mddev);
3067 md_error(conf->mddev, rdev);
3071 rdev_dec_pending(rdev, conf->mddev);
3074 rdev = conf->mirrors[dev].replacement;
3080 rdev_dec_pending(rdev, conf->mddev);
3084 spin_lock_irq(&conf->device_lock);
3085 list_add(&r10_bio->retry_list, &conf->bio_end_io_list);
3086 conf->nr_queued++;
3087 spin_unlock_irq(&conf->device_lock);
3092 wake_up(&conf->wait_barrier);
3093 md_wakeup_thread(conf->mddev->thread);
3108 struct r10conf *conf = mddev->private;
3109 struct list_head *head = &conf->retry_list;
3114 if (!list_empty_careful(&conf->bio_end_io_list) &&
3117 spin_lock_irqsave(&conf->device_lock, flags);
3119 while (!list_empty(&conf->bio_end_io_list)) {
3120 list_move(conf->bio_end_io_list.prev, &tmp);
3121 conf->nr_queued--;
3124 spin_unlock_irqrestore(&conf->device_lock, flags);
3142 flush_pending_writes(conf);
3144 spin_lock_irqsave(&conf->device_lock, flags);
3146 spin_unlock_irqrestore(&conf->device_lock, flags);
3151 conf->nr_queued--;
3152 spin_unlock_irqrestore(&conf->device_lock, flags);
3155 conf = mddev->private;
3158 handle_write_completed(conf, r10_bio);
3177 static int init_resync(struct r10conf *conf)
3182 BUG_ON(mempool_initialized(&conf->r10buf_pool));
3183 conf->have_replacement = 0;
3184 for (i = 0; i < conf->geo.raid_disks; i++)
3185 if (conf->mirrors[i].replacement)
3186 conf->have_replacement = 1;
3187 ret = mempool_init(&conf->r10buf_pool, buffs,
3188 r10buf_pool_alloc, r10buf_pool_free, conf);
3191 conf->next_resync = 0;
3195 static struct r10bio *raid10_alloc_init_r10buf(struct r10conf *conf)
3197 struct r10bio *r10bio = mempool_alloc(&conf->r10buf_pool, GFP_NOIO);
3203 if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) ||
3204 test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery))
3205 nalloc = conf->copies; /* resync */
3228 static void raid10_set_cluster_sync_high(struct r10conf *conf)
3245 chunks = conf->geo.raid_disks / conf->geo.near_copies;
3246 if (conf->geo.raid_disks % conf->geo.near_copies == 0)
3250 window_size = (chunks + extra_chunk) * conf->mddev->chunk_sectors;
3258 conf->cluster_sync_high = conf->cluster_sync_low + window_size;
3296 struct r10conf *conf = mddev->private;
3305 sector_t chunk_mask = conf->geo.chunk_mask;
3319 conf->fullsync == 0) {
3324 if (!mempool_initialized(&conf->r10buf_pool))
3325 if (init_resync(conf))
3334 conf->cluster_sync_low = 0;
3335 conf->cluster_sync_high = 0;
3347 end_reshape(conf);
3348 close_sync(conf);
3356 else for (i = 0; i < conf->geo.raid_disks; i++) {
3358 raid10_find_virt(conf, mddev->curr_resync, i);
3364 if ((!mddev->bitmap || conf->fullsync)
3365 && conf->have_replacement
3371 for (i = 0; i < conf->geo.raid_disks; i++) {
3373 rcu_dereference(conf->mirrors[i].replacement);
3379 conf->fullsync = 0;
3382 close_sync(conf);
3390 if (chunks_skipped >= conf->geo.raid_disks) {
3399 conf->mirrors[error_disk].recovery_disabled =
3417 if (conf->geo.near_copies < conf->geo.raid_disks &&
3425 if (conf->nr_waiting)
3449 for (i = 0 ; i < conf->geo.raid_disks; i++) {
3455 struct raid10_info *mirror = &conf->mirrors[i];
3476 sect = raid10_find_virt(conf, sector_nr, i);
3494 !conf->fullsync) {
3508 r10_bio = raid10_alloc_init_r10buf(conf);
3510 raise_barrier(conf, rb2 != NULL);
3520 raid10_find_phys(conf, r10_bio);
3526 for (j = 0; j < conf->geo.raid_disks; j++) {
3528 conf->mirrors[j].rdev);
3539 for (j=0; j<conf->copies;j++) {
3544 rcu_dereference(conf->mirrors[d].rdev);
3580 for (k=0; k<conf->copies; k++)
3583 BUG_ON(k == conf->copies);
3624 if (j == conf->copies) {
3632 for (k = 0; k < conf->copies; k++)
3679 for (; j < conf->copies; j++) {
3681 if (conf->mirrors[d].rdev &&
3683 &conf->mirrors[d].rdev->flags))
3713 (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high));
3717 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED,
3725 r10_bio = raid10_alloc_init_r10buf(conf);
3730 raise_barrier(conf, 0);
3731 conf->next_resync = sector_nr;
3736 raid10_find_phys(conf, r10_bio);
3739 for (i = 0; i < conf->copies; i++) {
3751 rdev = rcu_dereference(conf->mirrors[d].rdev);
3781 rdev = rcu_dereference(conf->mirrors[d].replacement);
3806 for (i=0; i<conf->copies; i++) {
3809 rdev_dec_pending(conf->mirrors[d].rdev,
3814 conf->mirrors[d].replacement,
3850 if (conf->cluster_sync_high < sector_nr + nr_sectors) {
3851 conf->cluster_sync_low = mddev->curr_resync_completed;
3852 raid10_set_cluster_sync_high(conf);
3855 conf->cluster_sync_low,
3856 conf->cluster_sync_high);
3863 for (i = 0; i < conf->geo.raid_disks; i++) {
3869 sect_va1 = raid10_find_virt(conf, sector_nr, i);
3871 if (conf->cluster_sync_high < sect_va1 + nr_sectors) {
3877 sect_va2 = raid10_find_virt(conf,
3880 if (conf->cluster_sync_low == 0 ||
3881 conf->cluster_sync_low > sect_va2)
3882 conf->cluster_sync_low = sect_va2;
3886 raid10_set_cluster_sync_high(conf);
3888 conf->cluster_sync_low,
3889 conf->cluster_sync_high);
3933 struct r10conf *conf = mddev->private;
3936 raid_disks = min(conf->geo.raid_disks,
3937 conf->prev.raid_disks);
3939 sectors = conf->dev_sectors;
3941 size = sectors >> conf->geo.chunk_shift;
3942 sector_div(size, conf->geo.far_copies);
3944 sector_div(size, conf->geo.near_copies);
3946 return size << conf->geo.chunk_shift;
3949 static void calc_sectors(struct r10conf *conf, sector_t size)
3952 * actually be used, and set conf->dev_sectors and
3953 * conf->stride
3956 size = size >> conf->geo.chunk_shift;
3957 sector_div(size, conf->geo.far_copies);
3958 size = size * conf->geo.raid_disks;
3959 sector_div(size, conf->geo.near_copies);
3962 size = size * conf->copies;
3967 size = DIV_ROUND_UP_SECTOR_T(size, conf->geo.raid_disks);
3969 conf->dev_sectors = size << conf->geo.chunk_shift;
3971 if (conf->geo.far_offset)
3972 conf->geo.stride = 1 << conf->geo.chunk_shift;
3974 sector_div(size, conf->geo.far_copies);
3975 conf->geo.stride = size << conf->geo.chunk_shift;
4036 static void raid10_free_conf(struct r10conf *conf)
4038 if (!conf)
4041 mempool_exit(&conf->r10bio_pool);
4042 kfree(conf->mirrors);
4043 kfree(conf->mirrors_old);
4044 kfree(conf->mirrors_new);
4045 safe_put_page(conf->tmppage);
4046 bioset_exit(&conf->bio_split);
4047 kfree(conf);
4052 struct r10conf *conf = NULL;
4072 conf = kzalloc(sizeof(struct r10conf), GFP_KERNEL);
4073 if (!conf)
4077 conf->mirrors = kcalloc(mddev->raid_disks + max(0, -mddev->delta_disks),
4080 if (!conf->mirrors)
4083 conf->tmppage = alloc_page(GFP_KERNEL);
4084 if (!conf->tmppage)
4087 conf->geo = geo;
4088 conf->copies = copies;
4089 err = mempool_init(&conf->r10bio_pool, NR_RAID_BIOS, r10bio_pool_alloc,
4090 rbio_pool_free, conf);
4094 err = bioset_init(&conf->bio_split, BIO_POOL_SIZE, 0, 0);
4098 calc_sectors(conf, mddev->dev_sectors);
4100 conf->prev = conf->geo;
4101 conf->reshape_progress = MaxSector;
4103 if (setup_geo(&conf->prev, mddev, geo_old) != conf->copies) {
4107 conf->reshape_progress = mddev->reshape_position;
4108 if (conf->prev.far_offset)
4109 conf->prev.stride = 1 << conf->prev.chunk_shift;
4112 conf->prev.stride = conf->dev_sectors;
4114 conf->reshape_safe = conf->reshape_progress;
4115 spin_lock_init(&conf->device_lock);
4116 INIT_LIST_HEAD(&conf->retry_list);
4117 INIT_LIST_HEAD(&conf->bio_end_io_list);
4119 seqlock_init(&conf->resync_lock);
4120 init_waitqueue_head(&conf->wait_barrier);
4121 atomic_set(&conf->nr_pending, 0);
4124 rcu_assign_pointer(conf->thread,
4126 if (!conf->thread)
4129 conf->mddev = mddev;
4130 return conf;
4133 raid10_free_conf(conf);
4137 static void raid10_set_io_opt(struct r10conf *conf)
4139 int raid_disks = conf->geo.raid_disks;
4141 if (!(conf->geo.raid_disks % conf->geo.near_copies))
4142 raid_disks /= conf->geo.near_copies;
4143 blk_queue_io_opt(conf->mddev->queue, (conf->mddev->chunk_sectors << 9) *
4149 struct r10conf *conf;
4161 conf = setup_conf(mddev);
4162 if (IS_ERR(conf))
4163 return PTR_ERR(conf);
4164 mddev->private = conf;
4166 conf = mddev->private;
4167 if (!conf)
4170 rcu_assign_pointer(mddev->thread, conf->thread);
4171 rcu_assign_pointer(conf->thread, NULL);
4173 if (mddev_is_clustered(conf->mddev)) {
4188 raid10_set_io_opt(conf);
4197 if (disk_idx >= conf->geo.raid_disks &&
4198 disk_idx >= conf->prev.raid_disks)
4200 disk = conf->mirrors + disk_idx;
4228 if (!enough(conf, -1)) {
4234 if (conf->reshape_progress != MaxSector) {
4236 if (conf->geo.far_copies != 1 &&
4237 conf->geo.far_offset == 0)
4239 if (conf->prev.far_copies != 1 &&
4240 conf->prev.far_offset == 0)
4246 i < conf->geo.raid_disks
4247 || i < conf->prev.raid_disks;
4250 disk = conf->mirrors + i;
4265 conf->fullsync = 1;
4271 conf->fullsync = 1;
4281 mdname(mddev), conf->geo.raid_disks - mddev->degraded,
4282 conf->geo.raid_disks);
4286 mddev->dev_sectors = conf->dev_sectors;
4295 if (conf->reshape_progress != MaxSector) {
4298 before_length = ((1 << conf->prev.chunk_shift) *
4299 conf->prev.far_copies);
4300 after_length = ((1 << conf->geo.chunk_shift) *
4301 conf->geo.far_copies);
4308 conf->offset_diff = min_offset_diff;
4324 raid10_free_conf(conf);
4337 struct r10conf *conf = mddev->private;
4340 raise_barrier(conf, 0);
4342 lower_barrier(conf);
4359 struct r10conf *conf = mddev->private;
4365 if (conf->geo.far_copies > 1 && !conf->geo.far_offset)
4384 calc_sectors(conf, sectors);
4385 mddev->dev_sectors = conf->dev_sectors;
4393 struct r10conf *conf;
4413 conf = setup_conf(mddev);
4414 if (!IS_ERR(conf)) {
4422 return conf;
4463 struct r10conf *conf = mddev->private;
4466 if (conf->geo.far_copies != 1 && !conf->geo.far_offset)
4469 if (setup_geo(&geo, mddev, geo_start) != conf->copies)
4480 if (!enough(conf, -1))
4483 kfree(conf->mirrors_new);
4484 conf->mirrors_new = NULL;
4487 conf->mirrors_new =
4491 if (!conf->mirrors_new)
4510 static int calc_degraded(struct r10conf *conf)
4518 for (i = 0; i < conf->prev.raid_disks; i++) {
4519 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
4530 if (conf->geo.raid_disks == conf->prev.raid_disks)
4534 for (i = 0; i < conf->geo.raid_disks; i++) {
4535 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
4544 if (conf->geo.raid_disks <= conf->prev.raid_disks)
4570 struct r10conf *conf = mddev->private;
4578 if (setup_geo(&new, mddev, geo_start) != conf->copies)
4581 before_length = ((1 << conf->prev.chunk_shift) *
4582 conf->prev.far_copies);
4583 after_length = ((1 << conf->geo.chunk_shift) *
4584 conf->geo.far_copies);
4609 conf->offset_diff = min_offset_diff;
4610 spin_lock_irq(&conf->device_lock);
4611 if (conf->mirrors_new) {
4612 memcpy(conf->mirrors_new, conf->mirrors,
4613 sizeof(struct raid10_info)*conf->prev.raid_disks);
4615 kfree(conf->mirrors_old);
4616 conf->mirrors_old = conf->mirrors;
4617 conf->mirrors = conf->mirrors_new;
4618 conf->mirrors_new = NULL;
4620 setup_geo(&conf->geo, mddev, geo_start);
4625 spin_unlock_irq(&conf->device_lock);
4631 conf->reshape_progress = size;
4633 conf->reshape_progress = 0;
4634 conf->reshape_safe = conf->reshape_progress;
4635 spin_unlock_irq(&conf->device_lock);
4642 newsize = raid10_size(mddev, 0, conf->geo.raid_disks);
4684 conf->prev.raid_disks)
4692 } else if (rdev->raid_disk >= conf->prev.raid_disks
4702 spin_lock_irq(&conf->device_lock);
4703 mddev->degraded = calc_degraded(conf);
4704 spin_unlock_irq(&conf->device_lock);
4705 mddev->raid_disks = conf->geo.raid_disks;
4706 mddev->reshape_position = conf->reshape_progress;
4721 conf->reshape_checkpoint = jiffies;
4728 spin_lock_irq(&conf->device_lock);
4729 conf->geo = conf->prev;
4730 mddev->raid_disks = conf->geo.raid_disks;
4734 conf->reshape_progress = MaxSector;
4735 conf->reshape_safe = MaxSector;
4737 spin_unlock_irq(&conf->device_lock);
4802 * (conf->offset_diff - always positive) allows a bit of slack,
4812 struct r10conf *conf = mddev->private;
4828 conf->reshape_progress < raid10_size(mddev, 0, 0)) {
4830 - conf->reshape_progress);
4832 conf->reshape_progress > 0)
4833 sector_nr = conf->reshape_progress;
4850 next = first_dev_address(conf->reshape_progress - 1,
4851 &conf->geo);
4856 safe = last_dev_address(conf->reshape_safe - 1,
4857 &conf->prev);
4859 if (next + conf->offset_diff < safe)
4862 last = conf->reshape_progress - 1;
4863 sector_nr = last & ~(sector_t)(conf->geo.chunk_mask
4864 & conf->prev.chunk_mask);
4871 next = last_dev_address(conf->reshape_progress, &conf->geo);
4876 safe = first_dev_address(conf->reshape_safe, &conf->prev);
4881 if (next > safe + conf->offset_diff)
4884 sector_nr = conf->reshape_progress;
4885 last = sector_nr | (conf->geo.chunk_mask
4886 & conf->prev.chunk_mask);
4893 time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) {
4895 wait_barrier(conf, false);
4896 mddev->reshape_position = conf->reshape_progress;
4899 - conf->reshape_progress;
4901 mddev->curr_resync_completed = conf->reshape_progress;
4902 conf->reshape_checkpoint = jiffies;
4908 allow_barrier(conf);
4911 conf->reshape_safe = mddev->reshape_position;
4912 allow_barrier(conf);
4915 raise_barrier(conf, 0);
4918 r10_bio = raid10_alloc_init_r10buf(conf);
4920 raise_barrier(conf, 1);
4926 rdev = read_balance(conf, r10_bio, &max_sectors);
4934 mempool_free(r10_bio, &conf->r10buf_pool);
4952 if (mddev_is_clustered(mddev) && conf->cluster_sync_high <= sector_nr) {
4956 conf->cluster_sync_low = sector_nr;
4957 conf->cluster_sync_high = sector_nr + CLUSTER_RESYNC_WINDOW_SECTORS;
4966 if (sb_reshape_pos < conf->cluster_sync_low)
4967 conf->cluster_sync_low = sb_reshape_pos;
4970 md_cluster_ops->resync_info_update(mddev, conf->cluster_sync_low,
4971 conf->cluster_sync_high);
4975 __raid10_find_phys(&conf->geo, r10_bio);
4981 for (s = 0; s < conf->copies*2; s++) {
4986 rdev2 = rcu_dereference(conf->mirrors[d].replacement);
4989 rdev2 = rcu_dereference(conf->mirrors[d].rdev);
5035 lower_barrier(conf);
5041 conf->reshape_progress -= sectors_done;
5043 conf->reshape_progress += sectors_done;
5058 struct r10conf *conf = mddev->private;
5072 for (s = 0; s < conf->copies*2; s++) {
5078 rdev = rcu_dereference(conf->mirrors[d].replacement);
5081 rdev = rcu_dereference(conf->mirrors[d].rdev);
5098 static void end_reshape(struct r10conf *conf)
5100 if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery))
5103 spin_lock_irq(&conf->device_lock);
5104 conf->prev = conf->geo;
5105 md_finish_reshape(conf->mddev);
5107 conf->reshape_progress = MaxSector;
5108 conf->reshape_safe = MaxSector;
5109 spin_unlock_irq(&conf->device_lock);
5111 if (conf->mddev->queue)
5112 raid10_set_io_opt(conf);
5113 conf->fullsync = 0;
5118 struct r10conf *conf = mddev->private;
5124 conf->reshape_progress = mddev->reshape_position;
5134 struct r10conf *conf = mddev->private;
5140 r10b = kmalloc(struct_size(r10b, devs, conf->copies), GFP_NOIO);
5150 __raid10_find_phys(&conf->prev, r10b);
5163 struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev);
5184 if (slot >= conf->copies)
5208 struct r10conf *conf = mddev->private;
5214 d = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
5216 rdev = conf->mirrors[d].replacement;
5219 rdev = conf->mirrors[d].rdev;
5242 struct r10conf *conf = mddev->private;
5256 for (d = conf->geo.raid_disks ;
5257 d < conf->geo.raid_disks - mddev->delta_disks;
5259 struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev);
5262 rdev = rcu_dereference(conf->mirrors[d].replacement);
5269 mddev->chunk_sectors = 1 << conf->geo.chunk_shift;