Lines Matching refs:conf
46 static void allow_barrier(struct r1conf *conf, sector_t sector_nr);
47 static void lower_barrier(struct r1conf *conf, sector_t sector_nr);
237 static void put_all_bios(struct r1conf *conf, struct r1bio *r1_bio)
241 for (i = 0; i < conf->raid_disks * 2; i++) {
251 struct r1conf *conf = r1_bio->mddev->private;
253 put_all_bios(conf, r1_bio);
254 mempool_free(r1_bio, &conf->r1bio_pool);
259 struct r1conf *conf = r1_bio->mddev->private;
263 for (i = 0; i < conf->raid_disks * 2; i++) {
266 rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev);
269 mempool_free(r1_bio, &conf->r1buf_pool);
271 lower_barrier(conf, sect);
278 struct r1conf *conf = mddev->private;
282 spin_lock_irqsave(&conf->device_lock, flags);
283 list_add(&r1_bio->retry_list, &conf->retry_list);
284 atomic_inc(&conf->nr_queued[idx]);
285 spin_unlock_irqrestore(&conf->device_lock, flags);
287 wake_up(&conf->wait_barrier);
309 struct r1conf *conf = r1_bio->mddev->private;
324 allow_barrier(conf, r1_bio->sector);
334 struct r1conf *conf = r1_bio->mddev->private;
336 conf->mirrors[disk].head_position =
346 struct r1conf *conf = r1_bio->mddev->private;
347 int raid_disks = conf->raid_disks;
363 struct r1conf *conf = r1_bio->mddev->private;
364 struct md_rdev *rdev = conf->mirrors[r1_bio->read_disk].rdev;
384 spin_lock_irqsave(&conf->device_lock, flags);
385 if (r1_bio->mddev->degraded == conf->raid_disks ||
386 (r1_bio->mddev->degraded == conf->raid_disks-1 &&
389 spin_unlock_irqrestore(&conf->device_lock, flags);
394 rdev_dec_pending(rdev, conf->mddev);
401 mdname(conf->mddev),
446 struct r1conf *conf = r1_bio->mddev->private;
449 struct md_rdev *rdev = conf->mirrors[mirror].rdev;
463 conf->mddev->recovery);
549 rdev_dec_pending(rdev, conf->mddev);
594 static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sectors)
626 if ((conf->mddev->recovery_cp < this_sector + sectors) ||
627 (mddev_is_clustered(conf->mddev) &&
628 md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector,
634 for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) {
641 rdev = rcu_dereference(conf->mirrors[disk].rdev);
708 dist = abs(this_sector - conf->mirrors[disk].head_position);
714 if (conf->mirrors[disk].next_seq_sect == this_sector
717 struct raid1_info *mirror = &conf->mirrors[disk];
772 rdev = rcu_dereference(conf->mirrors[best_disk].rdev);
778 if (conf->mirrors[best_disk].next_seq_sect != this_sector)
779 conf->mirrors[best_disk].seq_start = this_sector;
781 conf->mirrors[best_disk].next_seq_sect = this_sector + sectors;
789 static void flush_bio_list(struct r1conf *conf, struct bio *bio)
792 md_bitmap_unplug(conf->mddev->bitmap);
793 wake_up(&conf->wait_barrier);
813 static void flush_pending_writes(struct r1conf *conf)
818 spin_lock_irq(&conf->device_lock);
820 if (conf->pending_bio_list.head) {
824 bio = bio_list_get(&conf->pending_bio_list);
825 conf->pending_count = 0;
826 spin_unlock_irq(&conf->device_lock);
839 flush_bio_list(conf, bio);
842 spin_unlock_irq(&conf->device_lock);
869 static int raise_barrier(struct r1conf *conf, sector_t sector_nr)
873 spin_lock_irq(&conf->resync_lock);
876 wait_event_lock_irq(conf->wait_barrier,
877 !atomic_read(&conf->nr_waiting[idx]),
878 conf->resync_lock);
881 atomic_inc(&conf->barrier[idx]);
883 * In raise_barrier() we firstly increase conf->barrier[idx] then
884 * check conf->nr_pending[idx]. In _wait_barrier() we firstly
885 * increase conf->nr_pending[idx] then check conf->barrier[idx].
886 * A memory barrier here to make sure conf->nr_pending[idx] won't
887 * be fetched before conf->barrier[idx] is increased. Otherwise
894 * B: while conf->nr_pending[idx] is not 0, meaning regular I/O
896 * C: while conf->barrier[idx] >= RESYNC_DEPTH, meaning reaches
899 wait_event_lock_irq(conf->wait_barrier,
900 (!conf->array_frozen &&
901 !atomic_read(&conf->nr_pending[idx]) &&
902 atomic_read(&conf->barrier[idx]) < RESYNC_DEPTH) ||
903 test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery),
904 conf->resync_lock);
906 if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
907 atomic_dec(&conf->barrier[idx]);
908 spin_unlock_irq(&conf->resync_lock);
909 wake_up(&conf->wait_barrier);
913 atomic_inc(&conf->nr_sync_pending);
914 spin_unlock_irq(&conf->resync_lock);
919 static void lower_barrier(struct r1conf *conf, sector_t sector_nr)
923 BUG_ON(atomic_read(&conf->barrier[idx]) <= 0);
925 atomic_dec(&conf->barrier[idx]);
926 atomic_dec(&conf->nr_sync_pending);
927 wake_up(&conf->wait_barrier);
930 static void _wait_barrier(struct r1conf *conf, int idx)
933 * We need to increase conf->nr_pending[idx] very early here,
935 * conf->nr_pending[idx] to be 0. Then we can avoid holding
936 * conf->resync_lock when there is no barrier raised in same
940 atomic_inc(&conf->nr_pending[idx]);
942 * In _wait_barrier() we firstly increase conf->nr_pending[idx], then
943 * check conf->barrier[idx]. In raise_barrier() we firstly increase
944 * conf->barrier[idx], then check conf->nr_pending[idx]. A memory
945 * barrier is necessary here to make sure conf->barrier[idx] won't be
946 * fetched before conf->nr_pending[idx] is increased. Otherwise there
953 * here. If during we check conf->barrier[idx], the array is
954 * frozen (conf->array_frozen is 1), and chonf->barrier[idx] is
960 if (!READ_ONCE(conf->array_frozen) &&
961 !atomic_read(&conf->barrier[idx]))
965 * After holding conf->resync_lock, conf->nr_pending[idx]
968 * raise_barrer() might be waiting for conf->nr_pending[idx]
971 spin_lock_irq(&conf->resync_lock);
972 atomic_inc(&conf->nr_waiting[idx]);
973 atomic_dec(&conf->nr_pending[idx]);
978 wake_up(&conf->wait_barrier);
980 wait_event_lock_irq(conf->wait_barrier,
981 !conf->array_frozen &&
982 !atomic_read(&conf->barrier[idx]),
983 conf->resync_lock);
984 atomic_inc(&conf->nr_pending[idx]);
985 atomic_dec(&conf->nr_waiting[idx]);
986 spin_unlock_irq(&conf->resync_lock);
989 static void wait_read_barrier(struct r1conf *conf, sector_t sector_nr)
998 * conf->barrier[idx] here, memory barrier is unnecessary as well.
1000 atomic_inc(&conf->nr_pending[idx]);
1002 if (!READ_ONCE(conf->array_frozen))
1005 spin_lock_irq(&conf->resync_lock);
1006 atomic_inc(&conf->nr_waiting[idx]);
1007 atomic_dec(&conf->nr_pending[idx]);
1012 wake_up(&conf->wait_barrier);
1014 wait_event_lock_irq(conf->wait_barrier,
1015 !conf->array_frozen,
1016 conf->resync_lock);
1017 atomic_inc(&conf->nr_pending[idx]);
1018 atomic_dec(&conf->nr_waiting[idx]);
1019 spin_unlock_irq(&conf->resync_lock);
1022 static void wait_barrier(struct r1conf *conf, sector_t sector_nr)
1026 _wait_barrier(conf, idx);
1029 static void _allow_barrier(struct r1conf *conf, int idx)
1031 atomic_dec(&conf->nr_pending[idx]);
1032 wake_up(&conf->wait_barrier);
1035 static void allow_barrier(struct r1conf *conf, sector_t sector_nr)
1039 _allow_barrier(conf, idx);
1042 /* conf->resync_lock should be held */
1043 static int get_unqueued_pending(struct r1conf *conf)
1047 ret = atomic_read(&conf->nr_sync_pending);
1049 ret += atomic_read(&conf->nr_pending[idx]) -
1050 atomic_read(&conf->nr_queued[idx]);
1055 static void freeze_array(struct r1conf *conf, int extra)
1068 * Every flying I/O contributes to a conf->nr_pending[idx], idx is the
1070 * normal I/O are queued, sum of all conf->nr_pending[] will match sum
1071 * of all conf->nr_queued[]. But normal I/O failure is an exception,
1077 * get_unqueued_pendings(conf) gets equal to extra. For
1080 spin_lock_irq(&conf->resync_lock);
1081 conf->array_frozen = 1;
1082 raid1_log(conf->mddev, "wait freeze");
1084 conf->wait_barrier,
1085 get_unqueued_pending(conf) == extra,
1086 conf->resync_lock,
1087 flush_pending_writes(conf));
1088 spin_unlock_irq(&conf->resync_lock);
1090 static void unfreeze_array(struct r1conf *conf)
1093 spin_lock_irq(&conf->resync_lock);
1094 conf->array_frozen = 0;
1095 spin_unlock_irq(&conf->resync_lock);
1096 wake_up(&conf->wait_barrier);
1158 struct r1conf *conf = mddev->private;
1162 spin_lock_irq(&conf->device_lock);
1163 bio_list_merge(&conf->pending_bio_list, &plug->pending);
1164 conf->pending_count += plug->pending_cnt;
1165 spin_unlock_irq(&conf->device_lock);
1166 wake_up(&conf->wait_barrier);
1174 flush_bio_list(conf, bio);
1190 struct r1conf *conf = mddev->private;
1193 r1_bio = mempool_alloc(&conf->r1bio_pool, GFP_NOIO);
1195 memset(r1_bio->bios, 0, conf->raid_disks * sizeof(r1_bio->bios[0]));
1203 struct r1conf *conf = mddev->private;
1225 rdev = rcu_dereference(conf->mirrors[r1_bio->read_disk].rdev);
1237 wait_read_barrier(conf, bio->bi_iter.bi_sector);
1249 rdisk = read_balance(conf, r1_bio, &max_sectors);
1262 mirror = conf->mirrors + rdisk;
1283 gfp, &conf->bio_split);
1317 struct r1conf *conf = mddev->private;
1334 prepare_to_wait(&conf->wait_barrier,
1342 finish_wait(&conf->wait_barrier, &w);
1350 wait_barrier(conf, bio->bi_iter.bi_sector);
1355 if (conf->pending_count >= max_queued_requests) {
1358 wait_event(conf->wait_barrier,
1359 conf->pending_count < max_queued_requests);
1372 disks = conf->raid_disks * 2;
1378 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1386 if (i < conf->raid_disks)
1443 rdev_dec_pending(conf->mirrors[j].rdev, mddev);
1445 allow_barrier(conf, bio->bi_iter.bi_sector);
1448 wait_barrier(conf, bio->bi_iter.bi_sector);
1454 GFP_NOIO, &conf->bio_split);
1469 struct md_rdev *rdev = conf->mirrors[i].rdev;
1507 conf->mirrors[i].rdev->data_offset);
1508 bio_set_dev(mbio, conf->mirrors[i].rdev->bdev);
1511 if (test_bit(FailFast, &conf->mirrors[i].rdev->flags) &&
1512 !test_bit(WriteMostly, &conf->mirrors[i].rdev->flags) &&
1513 conf->raid_disks - mddev->degraded > 1)
1524 mbio->bi_disk = (void *)conf->mirrors[i].rdev;
1535 spin_lock_irqsave(&conf->device_lock, flags);
1536 bio_list_add(&conf->pending_bio_list, mbio);
1537 conf->pending_count++;
1538 spin_unlock_irqrestore(&conf->device_lock, flags);
1546 wake_up(&conf->wait_barrier);
1579 struct r1conf *conf = mddev->private;
1582 seq_printf(seq, " [%d/%d] [", conf->raid_disks,
1583 conf->raid_disks - mddev->degraded);
1585 for (i = 0; i < conf->raid_disks; i++) {
1586 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1597 struct r1conf *conf = mddev->private;
1606 spin_lock_irqsave(&conf->device_lock, flags);
1608 && (conf->raid_disks - mddev->degraded) == 1) {
1615 conf->recovery_disabled = mddev->recovery_disabled;
1616 spin_unlock_irqrestore(&conf->device_lock, flags);
1623 spin_unlock_irqrestore(&conf->device_lock, flags);
1633 mdname(mddev), conf->raid_disks - mddev->degraded);
1636 static void print_conf(struct r1conf *conf)
1640 pr_debug("RAID1 conf printout:\n");
1641 if (!conf) {
1642 pr_debug("(!conf)\n");
1645 pr_debug(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
1646 conf->raid_disks);
1649 for (i = 0; i < conf->raid_disks; i++) {
1651 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1661 static void close_sync(struct r1conf *conf)
1666 _wait_barrier(conf, idx);
1667 _allow_barrier(conf, idx);
1670 mempool_exit(&conf->r1buf_pool);
1676 struct r1conf *conf = mddev->private;
1687 spin_lock_irqsave(&conf->device_lock, flags);
1688 for (i = 0; i < conf->raid_disks; i++) {
1689 struct md_rdev *rdev = conf->mirrors[i].rdev;
1690 struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev;
1719 spin_unlock_irqrestore(&conf->device_lock, flags);
1721 print_conf(conf);
1727 struct r1conf *conf = mddev->private;
1732 int last = conf->raid_disks - 1;
1734 if (mddev->recovery_disabled == conf->recovery_disabled)
1749 rdev->saved_raid_disk < conf->raid_disks &&
1750 conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
1754 p = conf->mirrors + mirror;
1767 conf->fullsync = 1;
1772 p[conf->raid_disks].rdev == NULL) {
1778 conf->fullsync = 1;
1779 rcu_assign_pointer(p[conf->raid_disks].rdev, rdev);
1785 print_conf(conf);
1791 struct r1conf *conf = mddev->private;
1794 struct raid1_info *p = conf->mirrors + number;
1796 if (unlikely(number >= conf->raid_disks))
1800 p = conf->mirrors + conf->raid_disks + number;
1802 print_conf(conf);
1813 mddev->recovery_disabled != conf->recovery_disabled &&
1814 mddev->degraded < conf->raid_disks) {
1828 if (conf->mirrors[conf->raid_disks + number].rdev) {
1834 conf->mirrors[conf->raid_disks + number].rdev;
1835 freeze_array(conf, 0);
1844 unfreeze_array(conf);
1849 conf->mirrors[conf->raid_disks + number].rdev = NULL;
1850 unfreeze_array(conf);
1858 print_conf(conf);
1915 struct r1conf *conf = mddev->private;
1918 struct md_rdev *rdev = conf->mirrors[find_bio_disk(r1_bio, bio)].rdev;
1929 !is_badblock(conf->mirrors[r1_bio->read_disk].rdev,
1972 struct r1conf *conf = mddev->private;
1980 rdev = conf->mirrors[r1_bio->read_disk].rdev;
2006 rdev = conf->mirrors[d].rdev;
2015 if (d == conf->raid_disks * 2)
2030 for (d = 0; d < conf->raid_disks * 2; d++) {
2031 rdev = conf->mirrors[d].rdev;
2038 conf->recovery_disabled =
2056 d = conf->raid_disks * 2;
2060 rdev = conf->mirrors[d].rdev;
2071 d = conf->raid_disks * 2;
2075 rdev = conf->mirrors[d].rdev;
2100 struct r1conf *conf = mddev->private;
2107 for (i = 0; i < conf->raid_disks * 2; i++) {
2118 conf->mirrors[i].rdev->data_offset;
2119 bio_set_dev(b, conf->mirrors[i].rdev->bdev);
2127 for (primary = 0; primary < conf->raid_disks * 2; primary++)
2131 rdev_dec_pending(conf->mirrors[primary].rdev, mddev);
2135 for (i = 0; i < conf->raid_disks * 2; i++) {
2169 rdev_dec_pending(conf->mirrors[i].rdev, mddev);
2179 struct r1conf *conf = mddev->private;
2181 int disks = conf->raid_disks * 2;
2203 if (test_bit(Faulty, &conf->mirrors[i].rdev->flags)) {
2209 if (test_bit(FailFast, &conf->mirrors[i].rdev->flags))
2214 md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio));
2230 static void fix_read_error(struct r1conf *conf, int read_disk,
2233 struct mddev *mddev = conf->mddev;
2249 rdev = rcu_dereference(conf->mirrors[d].rdev);
2259 conf->tmppage, REQ_OP_READ, 0, false))
2267 if (d == conf->raid_disks * 2)
2273 struct md_rdev *rdev = conf->mirrors[read_disk].rdev;
2282 d = conf->raid_disks * 2;
2285 rdev = rcu_dereference(conf->mirrors[d].rdev);
2291 conf->tmppage, WRITE);
2300 d = conf->raid_disks * 2;
2303 rdev = rcu_dereference(conf->mirrors[d].rdev);
2309 conf->tmppage, READ)) {
2329 struct r1conf *conf = mddev->private;
2330 struct md_rdev *rdev = conf->mirrors[i].rdev;
2396 static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
2400 for (m = 0; m < conf->raid_disks * 2 ; m++) {
2401 struct md_rdev *rdev = conf->mirrors[m].rdev;
2412 md_error(conf->mddev, rdev);
2416 md_done_sync(conf->mddev, s, 1);
2419 static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
2424 for (m = 0; m < conf->raid_disks * 2 ; m++)
2426 struct md_rdev *rdev = conf->mirrors[m].rdev;
2430 rdev_dec_pending(rdev, conf->mddev);
2438 md_error(conf->mddev,
2439 conf->mirrors[m].rdev);
2443 rdev_dec_pending(conf->mirrors[m].rdev,
2444 conf->mddev);
2447 spin_lock_irq(&conf->device_lock);
2448 list_add(&r1_bio->retry_list, &conf->bio_end_io_list);
2450 atomic_inc(&conf->nr_queued[idx]);
2451 spin_unlock_irq(&conf->device_lock);
2456 wake_up(&conf->wait_barrier);
2457 md_wakeup_thread(conf->mddev->thread);
2465 static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
2467 struct mddev *mddev = conf->mddev;
2485 rdev = conf->mirrors[r1_bio->read_disk].rdev;
2488 freeze_array(conf, 1);
2489 fix_read_error(conf, r1_bio->read_disk,
2491 unfreeze_array(conf);
2498 rdev_dec_pending(rdev, conf->mddev);
2499 allow_barrier(conf, r1_bio->sector);
2512 struct r1conf *conf = mddev->private;
2513 struct list_head *head = &conf->retry_list;
2519 if (!list_empty_careful(&conf->bio_end_io_list) &&
2522 spin_lock_irqsave(&conf->device_lock, flags);
2524 list_splice_init(&conf->bio_end_io_list, &tmp);
2525 spin_unlock_irqrestore(&conf->device_lock, flags);
2531 atomic_dec(&conf->nr_queued[idx]);
2543 flush_pending_writes(conf);
2545 spin_lock_irqsave(&conf->device_lock, flags);
2547 spin_unlock_irqrestore(&conf->device_lock, flags);
2553 atomic_dec(&conf->nr_queued[idx]);
2554 spin_unlock_irqrestore(&conf->device_lock, flags);
2557 conf = mddev->private;
2561 handle_sync_write_finished(conf, r1_bio);
2566 handle_write_finished(conf, r1_bio);
2568 handle_read_error(conf, r1_bio);
2579 static int init_resync(struct r1conf *conf)
2584 BUG_ON(mempool_initialized(&conf->r1buf_pool));
2586 return mempool_init(&conf->r1buf_pool, buffs, r1buf_pool_alloc,
2587 r1buf_pool_free, conf->poolinfo);
2590 static struct r1bio *raid1_alloc_init_r1buf(struct r1conf *conf)
2592 struct r1bio *r1bio = mempool_alloc(&conf->r1buf_pool, GFP_NOIO);
2597 for (i = conf->poolinfo->raid_disks; i--; ) {
2620 struct r1conf *conf = mddev->private;
2635 if (!mempool_initialized(&conf->r1buf_pool))
2636 if (init_resync(conf))
2650 conf->fullsync = 0;
2653 close_sync(conf);
2656 conf->cluster_sync_low = 0;
2657 conf->cluster_sync_high = 0;
2665 conf->fullsync == 0) {
2673 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
2683 if (atomic_read(&conf->nr_waiting[idx]))
2691 mddev_is_clustered(mddev) && (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high));
2694 if (raise_barrier(conf, sector_nr))
2697 r1_bio = raid1_alloc_init_r1buf(conf);
2716 for (i = 0; i < conf->raid_disks * 2; i++) {
2720 rdev = rcu_dereference(conf->mirrors[i].rdev);
2723 if (i < conf->raid_disks)
2788 for (i = 0 ; i < conf->raid_disks * 2 ; i++)
2790 struct md_rdev *rdev = conf->mirrors[i].rdev;
2805 conf->recovery_disabled = mddev->recovery_disabled;
2851 !conf->fullsync &&
2858 for (i = 0 ; i < conf->raid_disks * 2; i++) {
2881 conf->cluster_sync_high < sector_nr + nr_sectors) {
2882 conf->cluster_sync_low = mddev->curr_resync_completed;
2883 conf->cluster_sync_high = conf->cluster_sync_low + CLUSTER_RESYNC_WINDOW_SECTORS;
2886 conf->cluster_sync_low,
2887 conf->cluster_sync_high);
2895 for (i = 0; i < conf->raid_disks * 2 && read_targets; i++) {
2926 struct r1conf *conf;
2932 conf = kzalloc(sizeof(struct r1conf), GFP_KERNEL);
2933 if (!conf)
2936 conf->nr_pending = kcalloc(BARRIER_BUCKETS_NR,
2938 if (!conf->nr_pending)
2941 conf->nr_waiting = kcalloc(BARRIER_BUCKETS_NR,
2943 if (!conf->nr_waiting)
2946 conf->nr_queued = kcalloc(BARRIER_BUCKETS_NR,
2948 if (!conf->nr_queued)
2951 conf->barrier = kcalloc(BARRIER_BUCKETS_NR,
2953 if (!conf->barrier)
2956 conf->mirrors = kzalloc(array3_size(sizeof(struct raid1_info),
2959 if (!conf->mirrors)
2962 conf->tmppage = alloc_page(GFP_KERNEL);
2963 if (!conf->tmppage)
2966 conf->poolinfo = kzalloc(sizeof(*conf->poolinfo), GFP_KERNEL);
2967 if (!conf->poolinfo)
2969 conf->poolinfo->raid_disks = mddev->raid_disks * 2;
2970 err = mempool_init(&conf->r1bio_pool, NR_RAID_BIOS, r1bio_pool_alloc,
2971 rbio_pool_free, conf->poolinfo);
2975 err = bioset_init(&conf->bio_split, BIO_POOL_SIZE, 0, 0);
2979 conf->poolinfo->mddev = mddev;
2982 spin_lock_init(&conf->device_lock);
2989 disk = conf->mirrors + mddev->raid_disks + disk_idx;
2991 disk = conf->mirrors + disk_idx;
2999 conf->raid_disks = mddev->raid_disks;
3000 conf->mddev = mddev;
3001 INIT_LIST_HEAD(&conf->retry_list);
3002 INIT_LIST_HEAD(&conf->bio_end_io_list);
3004 spin_lock_init(&conf->resync_lock);
3005 init_waitqueue_head(&conf->wait_barrier);
3007 bio_list_init(&conf->pending_bio_list);
3008 conf->pending_count = 0;
3009 conf->recovery_disabled = mddev->recovery_disabled - 1;
3012 for (i = 0; i < conf->raid_disks * 2; i++) {
3014 disk = conf->mirrors + i;
3016 if (i < conf->raid_disks &&
3017 disk[conf->raid_disks].rdev) {
3024 disk[conf->raid_disks].rdev;
3025 disk[conf->raid_disks].rdev = NULL;
3036 conf->fullsync = 1;
3041 conf->thread = md_register_thread(raid1d, mddev, "raid1");
3042 if (!conf->thread)
3045 return conf;
3048 if (conf) {
3049 mempool_exit(&conf->r1bio_pool);
3050 kfree(conf->mirrors);
3051 safe_put_page(conf->tmppage);
3052 kfree(conf->poolinfo);
3053 kfree(conf->nr_pending);
3054 kfree(conf->nr_waiting);
3055 kfree(conf->nr_queued);
3056 kfree(conf->barrier);
3057 bioset_exit(&conf->bio_split);
3058 kfree(conf);
3066 struct r1conf *conf;
3090 conf = setup_conf(mddev);
3092 conf = mddev->private;
3094 if (IS_ERR(conf))
3095 return PTR_ERR(conf);
3112 for (i = 0; i < conf->raid_disks; i++)
3113 if (conf->mirrors[i].rdev == NULL ||
3114 !test_bit(In_sync, &conf->mirrors[i].rdev->flags) ||
3115 test_bit(Faulty, &conf->mirrors[i].rdev->flags))
3120 if (conf->raid_disks - mddev->degraded < 1) {
3121 md_unregister_thread(&conf->thread);
3126 if (conf->raid_disks - mddev->degraded == 1)
3139 mddev->thread = conf->thread;
3140 conf->thread = NULL;
3141 mddev->private = conf;
3163 raid1_free(mddev, conf);
3169 struct r1conf *conf = priv;
3171 mempool_exit(&conf->r1bio_pool);
3172 kfree(conf->mirrors);
3173 safe_put_page(conf->tmppage);
3174 kfree(conf->poolinfo);
3175 kfree(conf->nr_pending);
3176 kfree(conf->nr_waiting);
3177 kfree(conf->nr_queued);
3178 kfree(conf->barrier);
3179 bioset_exit(&conf->bio_split);
3180 kfree(conf);
3216 * 2/ resize conf->mirrors
3220 * Then resize conf->mirrors and swap in the new r1bio pool.
3228 struct r1conf *conf = mddev->private;
3252 if (raid_disks < conf->raid_disks) {
3254 for (d= 0; d < conf->raid_disks; d++)
3255 if (conf->mirrors[d].rdev)
3282 freeze_array(conf, 0);
3285 oldpool = conf->r1bio_pool;
3286 conf->r1bio_pool = newpool;
3288 for (d = d2 = 0; d < conf->raid_disks; d++) {
3289 struct md_rdev *rdev = conf->mirrors[d].rdev;
3301 kfree(conf->mirrors);
3302 conf->mirrors = newmirrors;
3303 kfree(conf->poolinfo);
3304 conf->poolinfo = newpoolinfo;
3306 spin_lock_irqsave(&conf->device_lock, flags);
3307 mddev->degraded += (raid_disks - conf->raid_disks);
3308 spin_unlock_irqrestore(&conf->device_lock, flags);
3309 conf->raid_disks = mddev->raid_disks = raid_disks;
3312 unfreeze_array(conf);
3324 struct r1conf *conf = mddev->private;
3327 freeze_array(conf, 0);
3329 unfreeze_array(conf);
3338 struct r1conf *conf;
3342 conf = setup_conf(mddev);
3343 if (!IS_ERR(conf)) {
3345 conf->array_frozen = 1;
3349 return conf;