Lines Matching defs:mddev
84 struct mddev *mddev = rdev->mddev;
89 if (WARN_ON(!mddev->serial_info_pool))
91 si = mempool_alloc(mddev->serial_info_pool, GFP_NOIO);
101 struct mddev *mddev = rdev->mddev;
110 mempool_free(si, mddev->serial_info_pool);
179 if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery))
251 struct r1conf *conf = r1_bio->mddev->private;
259 struct r1conf *conf = r1_bio->mddev->private;
266 rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev);
277 struct mddev *mddev = r1_bio->mddev;
278 struct r1conf *conf = mddev->private;
288 md_wakeup_thread(mddev->thread);
309 struct r1conf *conf = r1_bio->mddev->private;
334 struct r1conf *conf = r1_bio->mddev->private;
346 struct r1conf *conf = r1_bio->mddev->private;
363 struct r1conf *conf = r1_bio->mddev->private;
385 if (r1_bio->mddev->degraded == conf->raid_disks ||
386 (r1_bio->mddev->degraded == conf->raid_disks-1 &&
394 rdev_dec_pending(rdev, conf->mddev);
401 mdname(conf->mddev),
419 md_bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
423 md_write_end(r1_bio->mddev);
446 struct r1conf *conf = r1_bio->mddev->private;
463 conf->mddev->recovery);
469 md_error(r1_bio->mddev, rdev);
546 } else if (rdev->mddev->serialize_policy)
549 rdev_dec_pending(rdev, conf->mddev);
626 if ((conf->mddev->recovery_cp < this_sector + sectors) ||
627 (mddev_is_clustered(conf->mddev) &&
628 md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector,
792 md_bitmap_unplug(conf->mddev->bitmap);
903 test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery),
906 if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
1082 raid1_log(conf->mddev, "wait freeze");
1107 behind_bio = bio_alloc_mddev(GFP_NOIO, vcnt, r1_bio->mddev);
1157 struct mddev *mddev = plug->cb.data;
1158 struct r1conf *conf = mddev->private;
1167 md_wakeup_thread(mddev->thread);
1178 static void init_r1bio(struct r1bio *r1_bio, struct mddev *mddev, struct bio *bio)
1183 r1_bio->mddev = mddev;
1188 alloc_r1bio(struct mddev *mddev, struct bio *bio)
1190 struct r1conf *conf = mddev->private;
1196 init_r1bio(r1_bio, mddev, bio);
1200 static void raid1_read_request(struct mddev *mddev, struct bio *bio,
1203 struct r1conf *conf = mddev->private;
1206 struct bitmap *bitmap = mddev->bitmap;
1240 r1_bio = alloc_r1bio(mddev, bio);
1242 init_r1bio(r1_bio, mddev, bio);
1255 mdname(mddev),
1266 mdname(mddev),
1276 raid1_log(mddev, "wait behind writes");
1293 read_bio = bio_clone_fast(bio, gfp, &mddev->bio_set);
1307 if (mddev->gendisk)
1309 disk_devt(mddev->gendisk), r1_bio->sector);
1314 static void raid1_write_request(struct mddev *mddev, struct bio *bio,
1317 struct r1conf *conf = mddev->private;
1320 struct bitmap *bitmap = mddev->bitmap;
1328 if (mddev_is_clustered(mddev) &&
1329 md_cluster_ops->area_resyncing(mddev, WRITE,
1336 if (!md_cluster_ops->area_resyncing(mddev, WRITE,
1352 r1_bio = alloc_r1bio(mddev, bio);
1356 md_wakeup_thread(mddev->thread);
1357 raid1_log(mddev, "wait queued");
1414 rdev_dec_pending(rdev, mddev);
1443 rdev_dec_pending(conf->mirrors[j].rdev, mddev);
1446 raid1_log(mddev, "wait rdev %d blocked", blocked_rdev->raid_disk);
1447 md_wait_for_blocked_rdev(blocked_rdev, mddev);
1480 < mddev->bitmap_info.max_write_behind) &&
1492 GFP_NOIO, &mddev->bio_set);
1494 mbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set);
1501 } else if (mddev->serialize_policy)
1513 conf->raid_disks - mddev->degraded > 1)
1519 if (mddev->gendisk)
1521 mbio, disk_devt(mddev->gendisk),
1526 cb = blk_check_plugged(raid1_unplug, mddev, sizeof(*plug));
1539 md_wakeup_thread(mddev->thread);
1549 static bool raid1_make_request(struct mddev *mddev, struct bio *bio)
1554 && md_flush_request(mddev, bio))
1568 raid1_read_request(mddev, bio, sectors, NULL);
1570 if (!md_write_start(mddev,bio))
1572 raid1_write_request(mddev, bio, sectors);
1577 static void raid1_status(struct seq_file *seq, struct mddev *mddev)
1579 struct r1conf *conf = mddev->private;
1583 conf->raid_disks - mddev->degraded);
1594 static void raid1_error(struct mddev *mddev, struct md_rdev *rdev)
1597 struct r1conf *conf = mddev->private;
1607 if (test_bit(In_sync, &rdev->flags) && !mddev->fail_last_dev
1608 && (conf->raid_disks - mddev->degraded) == 1) {
1615 conf->recovery_disabled = mddev->recovery_disabled;
1621 mddev->degraded++;
1627 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1628 set_mask_bits(&mddev->sb_flags, 0,
1632 mdname(mddev), bdevname(rdev->bdev, b),
1633 mdname(mddev), conf->raid_disks - mddev->degraded);
1645 pr_debug(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
1673 static int raid1_spare_active(struct mddev *mddev)
1676 struct r1conf *conf = mddev->private;
1683 * Called under mddev lock, so rcu protection not needed.
1718 mddev->degraded -= count;
1725 static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
1727 struct r1conf *conf = mddev->private;
1734 if (mddev->recovery_disabled == conf->recovery_disabled)
1737 if (md_integrity_add_rdev(rdev, mddev))
1756 if (mddev->gendisk)
1757 disk_stack_limits(mddev->gendisk, rdev->bdev,
1783 if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
1784 blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue);
1789 static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
1791 struct r1conf *conf = mddev->private;
1813 mddev->recovery_disabled != conf->recovery_disabled &&
1814 mddev->degraded < conf->raid_disks) {
1854 err = md_integrity_register(mddev);
1880 static void abort_sync_write(struct mddev *mddev, struct r1bio *r1_bio)
1888 md_bitmap_end_sync(mddev->bitmap, s, &sync_blocks, 1);
1897 struct mddev *mddev = r1_bio->mddev;
1905 md_done_sync(mddev, s, uptodate);
1914 struct mddev *mddev = r1_bio->mddev;
1915 struct r1conf *conf = mddev->private;
1921 abort_sync_write(mddev, r1_bio);
1925 mddev->recovery);
1950 rdev->mddev->recovery);
1954 md_error(rdev->mddev, rdev);
1971 struct mddev *mddev = r1_bio->mddev;
1972 struct r1conf *conf = mddev->private;
1984 md_error(mddev, rdev);
2028 mdname(mddev), bio_devname(bio, b),
2039 mddev->recovery_disabled;
2040 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2041 md_done_sync(mddev, r1_bio->sectors, 0);
2065 rdev_dec_pending(rdev, mddev);
2099 struct mddev *mddev = r1_bio->mddev;
2100 struct r1conf *conf = mddev->private;
2131 rdev_dec_pending(conf->mirrors[primary].rdev, mddev);
2164 atomic64_add(r1_bio->sectors, &mddev->resync_mismatches);
2165 if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)
2169 rdev_dec_pending(conf->mirrors[i].rdev, mddev);
2177 static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
2179 struct r1conf *conf = mddev->private;
2189 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
2201 !test_bit(MD_RECOVERY_SYNC, &mddev->recovery))))
2204 abort_sync_write(mddev, r1_bio);
2233 struct mddev *mddev = conf->mddev;
2261 rdev_dec_pending(rdev, mddev);
2275 md_error(mddev, rdev);
2292 rdev_dec_pending(rdev, mddev);
2312 mdname(mddev), s,
2317 rdev_dec_pending(rdev, mddev);
2328 struct mddev *mddev = r1_bio->mddev;
2329 struct r1conf *conf = mddev->private;
2368 &mddev->bio_set);
2371 &mddev->bio_set);
2412 md_error(conf->mddev, rdev);
2416 md_done_sync(conf->mddev, s, 1);
2430 rdev_dec_pending(rdev, conf->mddev);
2438 md_error(conf->mddev,
2444 conf->mddev);
2457 md_wakeup_thread(conf->mddev->thread);
2467 struct mddev *mddev = conf->mddev;
2486 if (mddev->ro == 0
2492 } else if (mddev->ro == 0 && test_bit(FailFast, &rdev->flags)) {
2493 md_error(mddev, rdev);
2498 rdev_dec_pending(rdev, conf->mddev);
2504 raid1_read_request(mddev, bio, r1_bio->sectors, r1_bio);
2509 struct mddev *mddev = thread->mddev;
2512 struct r1conf *conf = mddev->private;
2517 md_check_recovery(mddev);
2520 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
2523 if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
2532 if (mddev->degraded)
2556 mddev = r1_bio->mddev;
2557 conf = mddev->private;
2563 sync_request_write(mddev, r1_bio);
2573 if (mddev->sb_flags & ~(1<<MD_SB_CHANGE_PENDING))
2574 md_check_recovery(mddev);
2617 static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
2620 struct r1conf *conf = mddev->private;
2639 max_sector = mddev->dev_sectors;
2644 * We can find the current addess in mddev->curr_resync
2646 if (mddev->curr_resync < max_sector) /* aborted */
2647 md_bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
2652 md_bitmap_close_sync(mddev->bitmap);
2655 if (mddev_is_clustered(mddev)) {
2662 if (mddev->bitmap == NULL &&
2663 mddev->recovery_cp == MaxSector &&
2664 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
2672 if (!md_bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
2673 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
2690 md_bitmap_cond_end_sync(mddev->bitmap, sector_nr,
2691 mddev_is_clustered(mddev) && (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high));
2709 r1_bio->mddev = mddev;
2757 test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
2758 !test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
2795 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2805 conf->recovery_disabled = mddev->recovery_disabled;
2806 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2818 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && read_targets > 0)
2835 if (max_sector > mddev->resync_max)
2836 max_sector = mddev->resync_max; /* Don't do IO beyond here */
2849 if (!md_bitmap_start_sync(mddev->bitmap, sector_nr,
2852 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
2880 if (mddev_is_clustered(mddev) &&
2882 conf->cluster_sync_low = mddev->curr_resync_completed;
2885 md_cluster_ops->resync_info_update(mddev,
2893 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
2916 static sector_t raid1_size(struct mddev *mddev, sector_t sectors, int raid_disks)
2921 return mddev->dev_sectors;
2924 static struct r1conf *setup_conf(struct mddev *mddev)
2957 mddev->raid_disks, 2),
2969 conf->poolinfo->raid_disks = mddev->raid_disks * 2;
2979 conf->poolinfo->mddev = mddev;
2983 rdev_for_each(rdev, mddev) {
2985 if (disk_idx >= mddev->raid_disks
2989 disk = conf->mirrors + mddev->raid_disks + disk_idx;
2999 conf->raid_disks = mddev->raid_disks;
3000 conf->mddev = mddev;
3009 conf->recovery_disabled = mddev->recovery_disabled - 1;
3041 conf->thread = md_register_thread(raid1d, mddev, "raid1");
3063 static void raid1_free(struct mddev *mddev, void *priv);
3064 static int raid1_run(struct mddev *mddev)
3072 if (mddev->level != 1) {
3074 mdname(mddev), mddev->level);
3077 if (mddev->reshape_position != MaxSector) {
3079 mdname(mddev));
3082 if (mddev_init_writes_pending(mddev) < 0)
3089 if (mddev->private == NULL)
3090 conf = setup_conf(mddev);
3092 conf = mddev->private;
3097 if (mddev->queue) {
3098 blk_queue_max_write_same_sectors(mddev->queue, 0);
3099 blk_queue_max_write_zeroes_sectors(mddev->queue, 0);
3102 rdev_for_each(rdev, mddev) {
3103 if (!mddev->gendisk)
3105 disk_stack_limits(mddev->gendisk, rdev->bdev,
3111 mddev->degraded = 0;
3116 mddev->degraded++;
3120 if (conf->raid_disks - mddev->degraded < 1) {
3126 if (conf->raid_disks - mddev->degraded == 1)
3127 mddev->recovery_cp = MaxSector;
3129 if (mddev->recovery_cp != MaxSector)
3131 mdname(mddev));
3133 mdname(mddev), mddev->raid_disks - mddev->degraded,
3134 mddev->raid_disks);
3139 mddev->thread = conf->thread;
3141 mddev->private = conf;
3142 set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
3144 md_set_array_sectors(mddev, raid1_size(mddev, 0, 0));
3146 if (mddev->queue) {
3149 mddev->queue);
3152 mddev->queue);
3155 ret = md_integrity_register(mddev);
3157 md_unregister_thread(&mddev->thread);
3163 raid1_free(mddev, conf);
3167 static void raid1_free(struct mddev *mddev, void *priv)
3183 static int raid1_resize(struct mddev *mddev, sector_t sectors)
3192 sector_t newsize = raid1_size(mddev, sectors, 0);
3193 if (mddev->external_size &&
3194 mddev->array_sectors > newsize)
3196 if (mddev->bitmap) {
3197 int ret = md_bitmap_resize(mddev->bitmap, newsize, 0, 0);
3201 md_set_array_sectors(mddev, newsize);
3202 if (sectors > mddev->dev_sectors &&
3203 mddev->recovery_cp > mddev->dev_sectors) {
3204 mddev->recovery_cp = mddev->dev_sectors;
3205 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3207 mddev->dev_sectors = sectors;
3208 mddev->resync_max_sectors = sectors;
3212 static int raid1_reshape(struct mddev *mddev)
3228 struct r1conf *conf = mddev->private;
3238 if (mddev->chunk_sectors != mddev->new_chunk_sectors ||
3239 mddev->layout != mddev->new_layout ||
3240 mddev->level != mddev->new_level) {
3241 mddev->new_chunk_sectors = mddev->chunk_sectors;
3242 mddev->new_layout = mddev->layout;
3243 mddev->new_level = mddev->level;
3247 if (!mddev_is_clustered(mddev))
3248 md_allow_write(mddev);
3250 raid_disks = mddev->raid_disks + mddev->delta_disks;
3264 newpoolinfo->mddev = mddev;
3291 sysfs_unlink_rdev(mddev, rdev);
3293 sysfs_unlink_rdev(mddev, rdev);
3294 if (sysfs_link_rdev(mddev, rdev))
3296 mdname(mddev), rdev->raid_disk);
3307 mddev->degraded += (raid_disks - conf->raid_disks);
3309 conf->raid_disks = mddev->raid_disks = raid_disks;
3310 mddev->delta_disks = 0;
3314 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
3315 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3316 md_wakeup_thread(mddev->thread);
3322 static void raid1_quiesce(struct mddev *mddev, int quiesce)
3324 struct r1conf *conf = mddev->private;
3332 static void *raid1_takeover(struct mddev *mddev)
3337 if (mddev->level == 5 && mddev->raid_disks == 2) {
3339 mddev->new_level = 1;
3340 mddev->new_layout = 0;
3341 mddev->new_chunk_sectors = 0;
3342 conf = setup_conf(mddev);
3346 mddev_clear_unsupported_flags(mddev,