Lines Matching refs:conf
46 static void allow_barrier(struct r1conf *conf, sector_t sector_nr);
47 static void lower_barrier(struct r1conf *conf, sector_t sector_nr);
241 static void put_all_bios(struct r1conf *conf, struct r1bio *r1_bio)
245 for (i = 0; i < conf->raid_disks * 2; i++) {
255 struct r1conf *conf = r1_bio->mddev->private;
257 put_all_bios(conf, r1_bio);
258 mempool_free(r1_bio, &conf->r1bio_pool);
263 struct r1conf *conf = r1_bio->mddev->private;
267 for (i = 0; i < conf->raid_disks * 2; i++) {
270 rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev);
273 mempool_free(r1_bio, &conf->r1buf_pool);
275 lower_barrier(conf, sect);
282 struct r1conf *conf = mddev->private;
286 spin_lock_irqsave(&conf->device_lock, flags);
287 list_add(&r1_bio->retry_list, &conf->retry_list);
288 atomic_inc(&conf->nr_queued[idx]);
289 spin_unlock_irqrestore(&conf->device_lock, flags);
291 wake_up(&conf->wait_barrier);
313 struct r1conf *conf = r1_bio->mddev->private;
331 allow_barrier(conf, sector);
339 struct r1conf *conf = r1_bio->mddev->private;
341 conf->mirrors[disk].head_position =
351 struct r1conf *conf = r1_bio->mddev->private;
352 int raid_disks = conf->raid_disks;
368 struct r1conf *conf = r1_bio->mddev->private;
369 struct md_rdev *rdev = conf->mirrors[r1_bio->read_disk].rdev;
389 spin_lock_irqsave(&conf->device_lock, flags);
390 if (r1_bio->mddev->degraded == conf->raid_disks ||
391 (r1_bio->mddev->degraded == conf->raid_disks-1 &&
394 spin_unlock_irqrestore(&conf->device_lock, flags);
399 rdev_dec_pending(rdev, conf->mddev);
405 mdname(conf->mddev),
450 struct r1conf *conf = r1_bio->mddev->private;
453 struct md_rdev *rdev = conf->mirrors[mirror].rdev;
467 conf->mddev->recovery);
553 rdev_dec_pending(rdev, conf->mddev);
598 static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sectors)
630 if ((conf->mddev->recovery_cp < this_sector + sectors) ||
631 (mddev_is_clustered(conf->mddev) &&
632 md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector,
638 for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) {
645 rdev = rcu_dereference(conf->mirrors[disk].rdev);
712 dist = abs(this_sector - conf->mirrors[disk].head_position);
718 if (conf->mirrors[disk].next_seq_sect == this_sector
721 struct raid1_info *mirror = &conf->mirrors[disk];
776 rdev = rcu_dereference(conf->mirrors[best_disk].rdev);
782 if (conf->mirrors[best_disk].next_seq_sect != this_sector)
783 conf->mirrors[best_disk].seq_start = this_sector;
785 conf->mirrors[best_disk].next_seq_sect = this_sector + sectors;
793 static void wake_up_barrier(struct r1conf *conf)
795 if (wq_has_sleeper(&conf->wait_barrier))
796 wake_up(&conf->wait_barrier);
799 static void flush_bio_list(struct r1conf *conf, struct bio *bio)
802 raid1_prepare_flush_writes(conf->mddev->bitmap);
803 wake_up_barrier(conf);
814 static void flush_pending_writes(struct r1conf *conf)
819 spin_lock_irq(&conf->device_lock);
821 if (conf->pending_bio_list.head) {
825 bio = bio_list_get(&conf->pending_bio_list);
826 spin_unlock_irq(&conf->device_lock);
839 flush_bio_list(conf, bio);
842 spin_unlock_irq(&conf->device_lock);
869 static int raise_barrier(struct r1conf *conf, sector_t sector_nr)
873 spin_lock_irq(&conf->resync_lock);
876 wait_event_lock_irq(conf->wait_barrier,
877 !atomic_read(&conf->nr_waiting[idx]),
878 conf->resync_lock);
881 atomic_inc(&conf->barrier[idx]);
883 * In raise_barrier() we firstly increase conf->barrier[idx] then
884 * check conf->nr_pending[idx]. In _wait_barrier() we firstly
885 * increase conf->nr_pending[idx] then check conf->barrier[idx].
886 * A memory barrier here to make sure conf->nr_pending[idx] won't
887 * be fetched before conf->barrier[idx] is increased. Otherwise
894 * B: while conf->nr_pending[idx] is not 0, meaning regular I/O
896 * C: while conf->barrier[idx] >= RESYNC_DEPTH, meaning reaches
899 wait_event_lock_irq(conf->wait_barrier,
900 (!conf->array_frozen &&
901 !atomic_read(&conf->nr_pending[idx]) &&
902 atomic_read(&conf->barrier[idx]) < RESYNC_DEPTH) ||
903 test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery),
904 conf->resync_lock);
906 if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
907 atomic_dec(&conf->barrier[idx]);
908 spin_unlock_irq(&conf->resync_lock);
909 wake_up(&conf->wait_barrier);
913 atomic_inc(&conf->nr_sync_pending);
914 spin_unlock_irq(&conf->resync_lock);
919 static void lower_barrier(struct r1conf *conf, sector_t sector_nr)
923 BUG_ON(atomic_read(&conf->barrier[idx]) <= 0);
925 atomic_dec(&conf->barrier[idx]);
926 atomic_dec(&conf->nr_sync_pending);
927 wake_up(&conf->wait_barrier);
930 static bool _wait_barrier(struct r1conf *conf, int idx, bool nowait)
935 * We need to increase conf->nr_pending[idx] very early here,
937 * conf->nr_pending[idx] to be 0. Then we can avoid holding
938 * conf->resync_lock when there is no barrier raised in same
942 atomic_inc(&conf->nr_pending[idx]);
944 * In _wait_barrier() we firstly increase conf->nr_pending[idx], then
945 * check conf->barrier[idx]. In raise_barrier() we firstly increase
946 * conf->barrier[idx], then check conf->nr_pending[idx]. A memory
947 * barrier is necessary here to make sure conf->barrier[idx] won't be
948 * fetched before conf->nr_pending[idx] is increased. Otherwise there
955 * here. If during we check conf->barrier[idx], the array is
956 * frozen (conf->array_frozen is 1), and chonf->barrier[idx] is
962 if (!READ_ONCE(conf->array_frozen) &&
963 !atomic_read(&conf->barrier[idx]))
967 * After holding conf->resync_lock, conf->nr_pending[idx]
970 * raise_barrer() might be waiting for conf->nr_pending[idx]
973 spin_lock_irq(&conf->resync_lock);
974 atomic_inc(&conf->nr_waiting[idx]);
975 atomic_dec(&conf->nr_pending[idx]);
980 wake_up_barrier(conf);
987 wait_event_lock_irq(conf->wait_barrier,
988 !conf->array_frozen &&
989 !atomic_read(&conf->barrier[idx]),
990 conf->resync_lock);
991 atomic_inc(&conf->nr_pending[idx]);
994 atomic_dec(&conf->nr_waiting[idx]);
995 spin_unlock_irq(&conf->resync_lock);
999 static bool wait_read_barrier(struct r1conf *conf, sector_t sector_nr, bool nowait)
1009 * conf->barrier[idx] here, memory barrier is unnecessary as well.
1011 atomic_inc(&conf->nr_pending[idx]);
1013 if (!READ_ONCE(conf->array_frozen))
1016 spin_lock_irq(&conf->resync_lock);
1017 atomic_inc(&conf->nr_waiting[idx]);
1018 atomic_dec(&conf->nr_pending[idx]);
1023 wake_up_barrier(conf);
1031 wait_event_lock_irq(conf->wait_barrier,
1032 !conf->array_frozen,
1033 conf->resync_lock);
1034 atomic_inc(&conf->nr_pending[idx]);
1037 atomic_dec(&conf->nr_waiting[idx]);
1038 spin_unlock_irq(&conf->resync_lock);
1042 static bool wait_barrier(struct r1conf *conf, sector_t sector_nr, bool nowait)
1046 return _wait_barrier(conf, idx, nowait);
1049 static void _allow_barrier(struct r1conf *conf, int idx)
1051 atomic_dec(&conf->nr_pending[idx]);
1052 wake_up_barrier(conf);
1055 static void allow_barrier(struct r1conf *conf, sector_t sector_nr)
1059 _allow_barrier(conf, idx);
1062 /* conf->resync_lock should be held */
1063 static int get_unqueued_pending(struct r1conf *conf)
1067 ret = atomic_read(&conf->nr_sync_pending);
1069 ret += atomic_read(&conf->nr_pending[idx]) -
1070 atomic_read(&conf->nr_queued[idx]);
1075 static void freeze_array(struct r1conf *conf, int extra)
1088 * Every flying I/O contributes to a conf->nr_pending[idx], idx is the
1090 * normal I/O are queued, sum of all conf->nr_pending[] will match sum
1091 * of all conf->nr_queued[]. But normal I/O failure is an exception,
1097 * get_unqueued_pendings(conf) gets equal to extra. For
1100 spin_lock_irq(&conf->resync_lock);
1101 conf->array_frozen = 1;
1102 raid1_log(conf->mddev, "wait freeze");
1104 conf->wait_barrier,
1105 get_unqueued_pending(conf) == extra,
1106 conf->resync_lock,
1107 flush_pending_writes(conf));
1108 spin_unlock_irq(&conf->resync_lock);
1110 static void unfreeze_array(struct r1conf *conf)
1113 spin_lock_irq(&conf->resync_lock);
1114 conf->array_frozen = 0;
1115 spin_unlock_irq(&conf->resync_lock);
1116 wake_up(&conf->wait_barrier);
1174 struct r1conf *conf = mddev->private;
1178 spin_lock_irq(&conf->device_lock);
1179 bio_list_merge(&conf->pending_bio_list, &plug->pending);
1180 spin_unlock_irq(&conf->device_lock);
1181 wake_up_barrier(conf);
1189 flush_bio_list(conf, bio);
1205 struct r1conf *conf = mddev->private;
1208 r1_bio = mempool_alloc(&conf->r1bio_pool, GFP_NOIO);
1210 memset(r1_bio->bios, 0, conf->raid_disks * sizeof(r1_bio->bios[0]));
1218 struct r1conf *conf = mddev->private;
1240 rdev = rcu_dereference(conf->mirrors[r1_bio->read_disk].rdev);
1252 if (!wait_read_barrier(conf, bio->bi_iter.bi_sector,
1268 rdisk = read_balance(conf, r1_bio, &max_sectors);
1281 mirror = conf->mirrors + rdisk;
1302 gfp, &conf->bio_split);
1339 struct r1conf *conf = mddev->private;
1359 prepare_to_wait(&conf->wait_barrier,
1367 finish_wait(&conf->wait_barrier, &w);
1375 if (!wait_barrier(conf, bio->bi_iter.bi_sector,
1396 disks = conf->raid_disks * 2;
1401 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1418 if (i < conf->raid_disks)
1475 rdev_dec_pending(conf->mirrors[j].rdev, mddev);
1477 allow_barrier(conf, bio->bi_iter.bi_sector);
1485 wait_barrier(conf, bio->bi_iter.bi_sector, false);
1500 GFP_NOIO, &conf->bio_split);
1517 struct md_rdev *rdev = conf->mirrors[i].rdev;
1561 conf->raid_disks - mddev->degraded > 1)
1573 spin_lock_irqsave(&conf->device_lock, flags);
1574 bio_list_add(&conf->pending_bio_list, mbio);
1575 spin_unlock_irqrestore(&conf->device_lock, flags);
1583 wake_up_barrier(conf);
1616 struct r1conf *conf = mddev->private;
1619 seq_printf(seq, " [%d/%d] [", conf->raid_disks,
1620 conf->raid_disks - mddev->degraded);
1622 for (i = 0; i < conf->raid_disks; i++) {
1623 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1649 struct r1conf *conf = mddev->private;
1652 spin_lock_irqsave(&conf->device_lock, flags);
1655 (conf->raid_disks - mddev->degraded) == 1) {
1659 conf->recovery_disabled = mddev->recovery_disabled;
1660 spin_unlock_irqrestore(&conf->device_lock, flags);
1668 spin_unlock_irqrestore(&conf->device_lock, flags);
1678 mdname(mddev), conf->raid_disks - mddev->degraded);
1681 static void print_conf(struct r1conf *conf)
1685 pr_debug("RAID1 conf printout:\n");
1686 if (!conf) {
1687 pr_debug("(!conf)\n");
1690 pr_debug(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
1691 conf->raid_disks);
1694 for (i = 0; i < conf->raid_disks; i++) {
1695 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1705 static void close_sync(struct r1conf *conf)
1710 _wait_barrier(conf, idx, false);
1711 _allow_barrier(conf, idx);
1714 mempool_exit(&conf->r1buf_pool);
1720 struct r1conf *conf = mddev->private;
1731 spin_lock_irqsave(&conf->device_lock, flags);
1732 for (i = 0; i < conf->raid_disks; i++) {
1733 struct md_rdev *rdev = conf->mirrors[i].rdev;
1734 struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev;
1763 spin_unlock_irqrestore(&conf->device_lock, flags);
1765 print_conf(conf);
1771 struct r1conf *conf = mddev->private;
1776 int last = conf->raid_disks - 1;
1778 if (mddev->recovery_disabled == conf->recovery_disabled)
1793 rdev->saved_raid_disk < conf->raid_disks &&
1794 conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
1798 p = conf->mirrors + mirror;
1811 conf->fullsync = 1;
1816 p[conf->raid_disks].rdev == NULL && repl_slot < 0)
1822 p = conf->mirrors + repl_slot;
1827 conf->fullsync = 1;
1828 rcu_assign_pointer(p[conf->raid_disks].rdev, rdev);
1831 print_conf(conf);
1837 struct r1conf *conf = mddev->private;
1840 struct raid1_info *p = conf->mirrors + number;
1842 if (unlikely(number >= conf->raid_disks))
1846 p = conf->mirrors + conf->raid_disks + number;
1848 print_conf(conf);
1859 mddev->recovery_disabled != conf->recovery_disabled &&
1860 mddev->degraded < conf->raid_disks) {
1874 if (conf->mirrors[conf->raid_disks + number].rdev) {
1880 conf->mirrors[conf->raid_disks + number].rdev;
1881 freeze_array(conf, 0);
1890 unfreeze_array(conf);
1895 conf->mirrors[conf->raid_disks + number].rdev = NULL;
1896 unfreeze_array(conf);
1904 print_conf(conf);
1961 struct r1conf *conf = mddev->private;
1964 struct md_rdev *rdev = conf->mirrors[find_bio_disk(r1_bio, bio)].rdev;
1975 !is_badblock(conf->mirrors[r1_bio->read_disk].rdev,
2018 struct r1conf *conf = mddev->private;
2026 rdev = conf->mirrors[r1_bio->read_disk].rdev;
2052 rdev = conf->mirrors[d].rdev;
2061 if (d == conf->raid_disks * 2)
2075 for (d = 0; d < conf->raid_disks * 2; d++) {
2076 rdev = conf->mirrors[d].rdev;
2083 conf->recovery_disabled =
2101 d = conf->raid_disks * 2;
2105 rdev = conf->mirrors[d].rdev;
2116 d = conf->raid_disks * 2;
2120 rdev = conf->mirrors[d].rdev;
2145 struct r1conf *conf = mddev->private;
2152 for (i = 0; i < conf->raid_disks * 2; i++) {
2160 bio_reset(b, conf->mirrors[i].rdev->bdev, REQ_OP_READ);
2163 conf->mirrors[i].rdev->data_offset;
2171 for (primary = 0; primary < conf->raid_disks * 2; primary++)
2175 rdev_dec_pending(conf->mirrors[primary].rdev, mddev);
2179 for (i = 0; i < conf->raid_disks * 2; i++) {
2213 rdev_dec_pending(conf->mirrors[i].rdev, mddev);
2223 struct r1conf *conf = mddev->private;
2225 int disks = conf->raid_disks * 2;
2247 if (test_bit(Faulty, &conf->mirrors[i].rdev->flags)) {
2253 if (test_bit(FailFast, &conf->mirrors[i].rdev->flags))
2258 md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio));
2274 static void fix_read_error(struct r1conf *conf, int read_disk,
2277 struct mddev *mddev = conf->mddev;
2293 rdev = rcu_dereference(conf->mirrors[d].rdev);
2303 conf->tmppage, REQ_OP_READ, false))
2311 if (d == conf->raid_disks * 2)
2317 struct md_rdev *rdev = conf->mirrors[read_disk].rdev;
2326 d = conf->raid_disks * 2;
2329 rdev = rcu_dereference(conf->mirrors[d].rdev);
2335 conf->tmppage, REQ_OP_WRITE);
2343 d = conf->raid_disks * 2;
2346 rdev = rcu_dereference(conf->mirrors[d].rdev);
2352 conf->tmppage, REQ_OP_READ)) {
2372 struct r1conf *conf = mddev->private;
2373 struct md_rdev *rdev = conf->mirrors[i].rdev;
2438 static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
2442 for (m = 0; m < conf->raid_disks * 2 ; m++) {
2443 struct md_rdev *rdev = conf->mirrors[m].rdev;
2454 md_error(conf->mddev, rdev);
2458 md_done_sync(conf->mddev, s, 1);
2461 static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
2466 for (m = 0; m < conf->raid_disks * 2 ; m++)
2468 struct md_rdev *rdev = conf->mirrors[m].rdev;
2472 rdev_dec_pending(rdev, conf->mddev);
2480 md_error(conf->mddev,
2481 conf->mirrors[m].rdev);
2485 rdev_dec_pending(conf->mirrors[m].rdev,
2486 conf->mddev);
2489 spin_lock_irq(&conf->device_lock);
2490 list_add(&r1_bio->retry_list, &conf->bio_end_io_list);
2492 atomic_inc(&conf->nr_queued[idx]);
2493 spin_unlock_irq(&conf->device_lock);
2498 wake_up(&conf->wait_barrier);
2499 md_wakeup_thread(conf->mddev->thread);
2507 static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
2509 struct mddev *mddev = conf->mddev;
2528 rdev = conf->mirrors[r1_bio->read_disk].rdev;
2531 freeze_array(conf, 1);
2532 fix_read_error(conf, r1_bio->read_disk,
2534 unfreeze_array(conf);
2541 rdev_dec_pending(rdev, conf->mddev);
2548 allow_barrier(conf, sector);
2556 struct r1conf *conf = mddev->private;
2557 struct list_head *head = &conf->retry_list;
2563 if (!list_empty_careful(&conf->bio_end_io_list) &&
2566 spin_lock_irqsave(&conf->device_lock, flags);
2568 list_splice_init(&conf->bio_end_io_list, &tmp);
2569 spin_unlock_irqrestore(&conf->device_lock, flags);
2575 atomic_dec(&conf->nr_queued[idx]);
2587 flush_pending_writes(conf);
2589 spin_lock_irqsave(&conf->device_lock, flags);
2591 spin_unlock_irqrestore(&conf->device_lock, flags);
2597 atomic_dec(&conf->nr_queued[idx]);
2598 spin_unlock_irqrestore(&conf->device_lock, flags);
2601 conf = mddev->private;
2605 handle_sync_write_finished(conf, r1_bio);
2610 handle_write_finished(conf, r1_bio);
2612 handle_read_error(conf, r1_bio);
2623 static int init_resync(struct r1conf *conf)
2628 BUG_ON(mempool_initialized(&conf->r1buf_pool));
2630 return mempool_init(&conf->r1buf_pool, buffs, r1buf_pool_alloc,
2631 r1buf_pool_free, conf->poolinfo);
2634 static struct r1bio *raid1_alloc_init_r1buf(struct r1conf *conf)
2636 struct r1bio *r1bio = mempool_alloc(&conf->r1buf_pool, GFP_NOIO);
2641 for (i = conf->poolinfo->raid_disks; i--; ) {
2664 struct r1conf *conf = mddev->private;
2679 if (!mempool_initialized(&conf->r1buf_pool))
2680 if (init_resync(conf))
2694 conf->fullsync = 0;
2697 close_sync(conf);
2700 conf->cluster_sync_low = 0;
2701 conf->cluster_sync_high = 0;
2709 conf->fullsync == 0) {
2717 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
2727 if (atomic_read(&conf->nr_waiting[idx]))
2735 mddev_is_clustered(mddev) && (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high));
2738 if (raise_barrier(conf, sector_nr))
2741 r1_bio = raid1_alloc_init_r1buf(conf);
2760 for (i = 0; i < conf->raid_disks * 2; i++) {
2764 rdev = rcu_dereference(conf->mirrors[i].rdev);
2767 if (i < conf->raid_disks)
2832 for (i = 0 ; i < conf->raid_disks * 2 ; i++)
2834 struct md_rdev *rdev = conf->mirrors[i].rdev;
2849 conf->recovery_disabled = mddev->recovery_disabled;
2895 !conf->fullsync &&
2902 for (i = 0 ; i < conf->raid_disks * 2; i++) {
2925 conf->cluster_sync_high < sector_nr + nr_sectors) {
2926 conf->cluster_sync_low = mddev->curr_resync_completed;
2927 conf->cluster_sync_high = conf->cluster_sync_low + CLUSTER_RESYNC_WINDOW_SECTORS;
2930 conf->cluster_sync_low,
2931 conf->cluster_sync_high);
2939 for (i = 0; i < conf->raid_disks * 2 && read_targets; i++) {
2970 struct r1conf *conf;
2976 conf = kzalloc(sizeof(struct r1conf), GFP_KERNEL);
2977 if (!conf)
2980 conf->nr_pending = kcalloc(BARRIER_BUCKETS_NR,
2982 if (!conf->nr_pending)
2985 conf->nr_waiting = kcalloc(BARRIER_BUCKETS_NR,
2987 if (!conf->nr_waiting)
2990 conf->nr_queued = kcalloc(BARRIER_BUCKETS_NR,
2992 if (!conf->nr_queued)
2995 conf->barrier = kcalloc(BARRIER_BUCKETS_NR,
2997 if (!conf->barrier)
3000 conf->mirrors = kzalloc(array3_size(sizeof(struct raid1_info),
3003 if (!conf->mirrors)
3006 conf->tmppage = alloc_page(GFP_KERNEL);
3007 if (!conf->tmppage)
3010 conf->poolinfo = kzalloc(sizeof(*conf->poolinfo), GFP_KERNEL);
3011 if (!conf->poolinfo)
3013 conf->poolinfo->raid_disks = mddev->raid_disks * 2;
3014 err = mempool_init(&conf->r1bio_pool, NR_RAID_BIOS, r1bio_pool_alloc,
3015 rbio_pool_free, conf->poolinfo);
3019 err = bioset_init(&conf->bio_split, BIO_POOL_SIZE, 0, 0);
3023 conf->poolinfo->mddev = mddev;
3026 spin_lock_init(&conf->device_lock);
3033 disk = conf->mirrors + mddev->raid_disks + disk_idx;
3035 disk = conf->mirrors + disk_idx;
3043 conf->raid_disks = mddev->raid_disks;
3044 conf->mddev = mddev;
3045 INIT_LIST_HEAD(&conf->retry_list);
3046 INIT_LIST_HEAD(&conf->bio_end_io_list);
3048 spin_lock_init(&conf->resync_lock);
3049 init_waitqueue_head(&conf->wait_barrier);
3051 bio_list_init(&conf->pending_bio_list);
3052 conf->recovery_disabled = mddev->recovery_disabled - 1;
3055 for (i = 0; i < conf->raid_disks * 2; i++) {
3057 disk = conf->mirrors + i;
3059 if (i < conf->raid_disks &&
3060 disk[conf->raid_disks].rdev) {
3067 disk[conf->raid_disks].rdev;
3068 disk[conf->raid_disks].rdev = NULL;
3079 conf->fullsync = 1;
3084 rcu_assign_pointer(conf->thread,
3086 if (!conf->thread)
3089 return conf;
3092 if (conf) {
3093 mempool_exit(&conf->r1bio_pool);
3094 kfree(conf->mirrors);
3095 safe_put_page(conf->tmppage);
3096 kfree(conf->poolinfo);
3097 kfree(conf->nr_pending);
3098 kfree(conf->nr_waiting);
3099 kfree(conf->nr_queued);
3100 kfree(conf->barrier);
3101 bioset_exit(&conf->bio_split);
3102 kfree(conf);
3110 struct r1conf *conf;
3133 conf = setup_conf(mddev);
3135 conf = mddev->private;
3137 if (IS_ERR(conf))
3138 return PTR_ERR(conf);
3151 for (i = 0; i < conf->raid_disks; i++)
3152 if (conf->mirrors[i].rdev == NULL ||
3153 !test_bit(In_sync, &conf->mirrors[i].rdev->flags) ||
3154 test_bit(Faulty, &conf->mirrors[i].rdev->flags))
3159 if (conf->raid_disks - mddev->degraded < 1) {
3160 md_unregister_thread(mddev, &conf->thread);
3165 if (conf->raid_disks - mddev->degraded == 1)
3178 rcu_assign_pointer(mddev->thread, conf->thread);
3179 rcu_assign_pointer(conf->thread, NULL);
3180 mddev->private = conf;
3193 raid1_free(mddev, conf);
3199 struct r1conf *conf = priv;
3201 mempool_exit(&conf->r1bio_pool);
3202 kfree(conf->mirrors);
3203 safe_put_page(conf->tmppage);
3204 kfree(conf->poolinfo);
3205 kfree(conf->nr_pending);
3206 kfree(conf->nr_waiting);
3207 kfree(conf->nr_queued);
3208 kfree(conf->barrier);
3209 bioset_exit(&conf->bio_split);
3210 kfree(conf);
3246 * 2/ resize conf->mirrors
3250 * Then resize conf->mirrors and swap in the new r1bio pool.
3258 struct r1conf *conf = mddev->private;
3282 if (raid_disks < conf->raid_disks) {
3284 for (d= 0; d < conf->raid_disks; d++)
3285 if (conf->mirrors[d].rdev)
3312 freeze_array(conf, 0);
3315 oldpool = conf->r1bio_pool;
3316 conf->r1bio_pool = newpool;
3318 for (d = d2 = 0; d < conf->raid_disks; d++) {
3319 struct md_rdev *rdev = conf->mirrors[d].rdev;
3331 kfree(conf->mirrors);
3332 conf->mirrors = newmirrors;
3333 kfree(conf->poolinfo);
3334 conf->poolinfo = newpoolinfo;
3336 spin_lock_irqsave(&conf->device_lock, flags);
3337 mddev->degraded += (raid_disks - conf->raid_disks);
3338 spin_unlock_irqrestore(&conf->device_lock, flags);
3339 conf->raid_disks = mddev->raid_disks = raid_disks;
3342 unfreeze_array(conf);
3354 struct r1conf *conf = mddev->private;
3357 freeze_array(conf, 0);
3359 unfreeze_array(conf);
3368 struct r1conf *conf;
3372 conf = setup_conf(mddev);
3373 if (!IS_ERR(conf)) {
3375 conf->array_frozen = 1;
3379 return conf;