Lines Matching defs:mddev
84 struct mddev *mddev = rdev->mddev;
89 if (WARN_ON(!mddev->serial_info_pool))
91 si = mempool_alloc(mddev->serial_info_pool, GFP_NOIO);
101 struct mddev *mddev = rdev->mddev;
110 mempool_free(si, mddev->serial_info_pool);
180 if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery))
255 struct r1conf *conf = r1_bio->mddev->private;
263 struct r1conf *conf = r1_bio->mddev->private;
270 rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev);
281 struct mddev *mddev = r1_bio->mddev;
282 struct r1conf *conf = mddev->private;
292 md_wakeup_thread(mddev->thread);
313 struct r1conf *conf = r1_bio->mddev->private;
339 struct r1conf *conf = r1_bio->mddev->private;
351 struct r1conf *conf = r1_bio->mddev->private;
368 struct r1conf *conf = r1_bio->mddev->private;
390 if (r1_bio->mddev->degraded == conf->raid_disks ||
391 (r1_bio->mddev->degraded == conf->raid_disks-1 &&
399 rdev_dec_pending(rdev, conf->mddev);
405 mdname(conf->mddev),
423 md_bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
427 md_write_end(r1_bio->mddev);
450 struct r1conf *conf = r1_bio->mddev->private;
467 conf->mddev->recovery);
473 md_error(r1_bio->mddev, rdev);
550 } else if (rdev->mddev->serialize_policy)
553 rdev_dec_pending(rdev, conf->mddev);
630 if ((conf->mddev->recovery_cp < this_sector + sectors) ||
631 (mddev_is_clustered(conf->mddev) &&
632 md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector,
802 raid1_prepare_flush_writes(conf->mddev->bitmap);
903 test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery),
906 if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
1102 raid1_log(conf->mddev, "wait freeze");
1128 &r1_bio->mddev->bio_set);
1173 struct mddev *mddev = plug->cb.data;
1174 struct r1conf *conf = mddev->private;
1182 md_wakeup_thread(mddev->thread);
1193 static void init_r1bio(struct r1bio *r1_bio, struct mddev *mddev, struct bio *bio)
1198 r1_bio->mddev = mddev;
1203 alloc_r1bio(struct mddev *mddev, struct bio *bio)
1205 struct r1conf *conf = mddev->private;
1211 init_r1bio(r1_bio, mddev, bio);
1215 static void raid1_read_request(struct mddev *mddev, struct bio *bio,
1218 struct r1conf *conf = mddev->private;
1221 struct bitmap *bitmap = mddev->bitmap;
1259 r1_bio = alloc_r1bio(mddev, bio);
1261 init_r1bio(r1_bio, mddev, bio);
1274 mdname(mddev),
1285 mdname(mddev),
1295 raid1_log(mddev, "wait behind writes");
1312 md_account_bio(mddev, &bio);
1316 &mddev->bio_set);
1329 if (mddev->gendisk)
1330 trace_block_bio_remap(read_bio, disk_devt(mddev->gendisk),
1336 static void raid1_write_request(struct mddev *mddev, struct bio *bio,
1339 struct r1conf *conf = mddev->private;
1342 struct bitmap *bitmap = mddev->bitmap;
1349 if (mddev_is_clustered(mddev) &&
1350 md_cluster_ops->area_resyncing(mddev, WRITE,
1361 if (!md_cluster_ops->area_resyncing(mddev, WRITE,
1382 r1_bio = alloc_r1bio(mddev, bio);
1446 rdev_dec_pending(rdev, mddev);
1475 rdev_dec_pending(conf->mirrors[j].rdev, mddev);
1483 raid1_log(mddev, "wait rdev %d blocked", blocked_rdev->raid_disk);
1484 md_wait_for_blocked_rdev(blocked_rdev, mddev);
1508 md_account_bio(mddev, &bio);
1528 < mddev->bitmap_info.max_write_behind) &&
1541 GFP_NOIO, &mddev->bio_set);
1548 &mddev->bio_set);
1550 if (mddev->serialize_policy)
1561 conf->raid_disks - mddev->degraded > 1)
1567 if (mddev->gendisk)
1568 trace_block_bio_remap(mbio, disk_devt(mddev->gendisk),
1572 if (!raid1_add_bio_to_plug(mddev, mbio, raid1_unplug, disks)) {
1576 md_wakeup_thread(mddev->thread);
1586 static bool raid1_make_request(struct mddev *mddev, struct bio *bio)
1591 && md_flush_request(mddev, bio))
1605 raid1_read_request(mddev, bio, sectors, NULL);
1607 if (!md_write_start(mddev,bio))
1609 raid1_write_request(mddev, bio, sectors);
1614 static void raid1_status(struct seq_file *seq, struct mddev *mddev)
1616 struct r1conf *conf = mddev->private;
1620 conf->raid_disks - mddev->degraded);
1633 * @mddev: affected md device.
1636 * The routine acknowledges &rdev failure and determines new @mddev state.
1638 * - &MD_BROKEN flag is set in &mddev->flags.
1642 * - &mddev->degraded is bumped.
1645 * &mddev->fail_last_dev is off.
1647 static void raid1_error(struct mddev *mddev, struct md_rdev *rdev)
1649 struct r1conf *conf = mddev->private;
1655 (conf->raid_disks - mddev->degraded) == 1) {
1656 set_bit(MD_BROKEN, &mddev->flags);
1658 if (!mddev->fail_last_dev) {
1659 conf->recovery_disabled = mddev->recovery_disabled;
1666 mddev->degraded++;
1672 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1673 set_mask_bits(&mddev->sb_flags, 0,
1677 mdname(mddev), rdev->bdev,
1678 mdname(mddev), conf->raid_disks - mddev->degraded);
1690 pr_debug(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
1717 static int raid1_spare_active(struct mddev *mddev)
1720 struct r1conf *conf = mddev->private;
1727 * Called under mddev lock, so rcu protection not needed.
1762 mddev->degraded -= count;
1769 static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
1771 struct r1conf *conf = mddev->private;
1778 if (mddev->recovery_disabled == conf->recovery_disabled)
1781 if (md_integrity_add_rdev(rdev, mddev))
1800 if (mddev->gendisk)
1801 disk_stack_limits(mddev->gendisk, rdev->bdev,
1835 static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
1837 struct r1conf *conf = mddev->private;
1859 mddev->recovery_disabled != conf->recovery_disabled &&
1860 mddev->degraded < conf->raid_disks) {
1900 err = md_integrity_register(mddev);
1926 static void abort_sync_write(struct mddev *mddev, struct r1bio *r1_bio)
1934 md_bitmap_end_sync(mddev->bitmap, s, &sync_blocks, 1);
1943 struct mddev *mddev = r1_bio->mddev;
1951 md_done_sync(mddev, s, uptodate);
1960 struct mddev *mddev = r1_bio->mddev;
1961 struct r1conf *conf = mddev->private;
1967 abort_sync_write(mddev, r1_bio);
1971 mddev->recovery);
1996 rdev->mddev->recovery);
2000 md_error(rdev->mddev, rdev);
2017 struct mddev *mddev = r1_bio->mddev;
2018 struct r1conf *conf = mddev->private;
2030 md_error(mddev, rdev);
2073 mdname(mddev), bio->bi_bdev,
2084 mddev->recovery_disabled;
2085 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2086 md_done_sync(mddev, r1_bio->sectors, 0);
2110 rdev_dec_pending(rdev, mddev);
2144 struct mddev *mddev = r1_bio->mddev;
2145 struct r1conf *conf = mddev->private;
2175 rdev_dec_pending(conf->mirrors[primary].rdev, mddev);
2208 atomic64_add(r1_bio->sectors, &mddev->resync_mismatches);
2209 if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)
2213 rdev_dec_pending(conf->mirrors[i].rdev, mddev);
2221 static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
2223 struct r1conf *conf = mddev->private;
2233 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
2245 !test_bit(MD_RECOVERY_SYNC, &mddev->recovery))))
2248 abort_sync_write(mddev, r1_bio);
2277 struct mddev *mddev = conf->mddev;
2305 rdev_dec_pending(rdev, mddev);
2319 md_error(mddev, rdev);
2336 rdev_dec_pending(rdev, mddev);
2355 mdname(mddev), s,
2360 rdev_dec_pending(rdev, mddev);
2371 struct mddev *mddev = r1_bio->mddev;
2372 struct r1conf *conf = mddev->private;
2411 GFP_NOIO, &mddev->bio_set);
2414 GFP_NOIO, &mddev->bio_set);
2454 md_error(conf->mddev, rdev);
2458 md_done_sync(conf->mddev, s, 1);
2472 rdev_dec_pending(rdev, conf->mddev);
2480 md_error(conf->mddev,
2486 conf->mddev);
2499 md_wakeup_thread(conf->mddev->thread);
2509 struct mddev *mddev = conf->mddev;
2529 if (mddev->ro == 0
2535 } else if (mddev->ro == 0 && test_bit(FailFast, &rdev->flags)) {
2536 md_error(mddev, rdev);
2541 rdev_dec_pending(rdev, conf->mddev);
2547 raid1_read_request(mddev, bio, r1_bio->sectors, r1_bio);
2553 struct mddev *mddev = thread->mddev;
2556 struct r1conf *conf = mddev->private;
2561 md_check_recovery(mddev);
2564 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
2567 if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
2576 if (mddev->degraded)
2600 mddev = r1_bio->mddev;
2601 conf = mddev->private;
2607 sync_request_write(mddev, r1_bio);
2617 if (mddev->sb_flags & ~(1<<MD_SB_CHANGE_PENDING))
2618 md_check_recovery(mddev);
2661 static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
2664 struct r1conf *conf = mddev->private;
2683 max_sector = mddev->dev_sectors;
2688 * We can find the current addess in mddev->curr_resync
2690 if (mddev->curr_resync < max_sector) /* aborted */
2691 md_bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
2696 md_bitmap_close_sync(mddev->bitmap);
2699 if (mddev_is_clustered(mddev)) {
2706 if (mddev->bitmap == NULL &&
2707 mddev->recovery_cp == MaxSector &&
2708 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
2716 if (!md_bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
2717 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
2734 md_bitmap_cond_end_sync(mddev->bitmap, sector_nr,
2735 mddev_is_clustered(mddev) && (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high));
2753 r1_bio->mddev = mddev;
2801 test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
2802 !test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
2839 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2849 conf->recovery_disabled = mddev->recovery_disabled;
2850 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2862 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && read_targets > 0)
2879 if (max_sector > mddev->resync_max)
2880 max_sector = mddev->resync_max; /* Don't do IO beyond here */
2893 if (!md_bitmap_start_sync(mddev->bitmap, sector_nr,
2896 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
2924 if (mddev_is_clustered(mddev) &&
2926 conf->cluster_sync_low = mddev->curr_resync_completed;
2929 md_cluster_ops->resync_info_update(mddev,
2937 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
2960 static sector_t raid1_size(struct mddev *mddev, sector_t sectors, int raid_disks)
2965 return mddev->dev_sectors;
2968 static struct r1conf *setup_conf(struct mddev *mddev)
3001 mddev->raid_disks, 2),
3013 conf->poolinfo->raid_disks = mddev->raid_disks * 2;
3023 conf->poolinfo->mddev = mddev;
3027 rdev_for_each(rdev, mddev) {
3029 if (disk_idx >= mddev->raid_disks
3033 disk = conf->mirrors + mddev->raid_disks + disk_idx;
3043 conf->raid_disks = mddev->raid_disks;
3044 conf->mddev = mddev;
3052 conf->recovery_disabled = mddev->recovery_disabled - 1;
3085 md_register_thread(raid1d, mddev, "raid1"));
3107 static void raid1_free(struct mddev *mddev, void *priv);
3108 static int raid1_run(struct mddev *mddev)
3115 if (mddev->level != 1) {
3117 mdname(mddev), mddev->level);
3120 if (mddev->reshape_position != MaxSector) {
3122 mdname(mddev));
3125 if (mddev_init_writes_pending(mddev) < 0)
3132 if (mddev->private == NULL)
3133 conf = setup_conf(mddev);
3135 conf = mddev->private;
3140 if (mddev->queue)
3141 blk_queue_max_write_zeroes_sectors(mddev->queue, 0);
3143 rdev_for_each(rdev, mddev) {
3144 if (!mddev->gendisk)
3146 disk_stack_limits(mddev->gendisk, rdev->bdev,
3150 mddev->degraded = 0;
3155 mddev->degraded++;
3159 if (conf->raid_disks - mddev->degraded < 1) {
3160 md_unregister_thread(mddev, &conf->thread);
3165 if (conf->raid_disks - mddev->degraded == 1)
3166 mddev->recovery_cp = MaxSector;
3168 if (mddev->recovery_cp != MaxSector)
3170 mdname(mddev));
3172 mdname(mddev), mddev->raid_disks - mddev->degraded,
3173 mddev->raid_disks);
3178 rcu_assign_pointer(mddev->thread, conf->thread);
3180 mddev->private = conf;
3181 set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
3183 md_set_array_sectors(mddev, raid1_size(mddev, 0, 0));
3185 ret = md_integrity_register(mddev);
3187 md_unregister_thread(mddev, &mddev->thread);
3193 raid1_free(mddev, conf);
3197 static void raid1_free(struct mddev *mddev, void *priv)
3213 static int raid1_resize(struct mddev *mddev, sector_t sectors)
3222 sector_t newsize = raid1_size(mddev, sectors, 0);
3223 if (mddev->external_size &&
3224 mddev->array_sectors > newsize)
3226 if (mddev->bitmap) {
3227 int ret = md_bitmap_resize(mddev->bitmap, newsize, 0, 0);
3231 md_set_array_sectors(mddev, newsize);
3232 if (sectors > mddev->dev_sectors &&
3233 mddev->recovery_cp > mddev->dev_sectors) {
3234 mddev->recovery_cp = mddev->dev_sectors;
3235 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3237 mddev->dev_sectors = sectors;
3238 mddev->resync_max_sectors = sectors;
3242 static int raid1_reshape(struct mddev *mddev)
3258 struct r1conf *conf = mddev->private;
3268 if (mddev->chunk_sectors != mddev->new_chunk_sectors ||
3269 mddev->layout != mddev->new_layout ||
3270 mddev->level != mddev->new_level) {
3271 mddev->new_chunk_sectors = mddev->chunk_sectors;
3272 mddev->new_layout = mddev->layout;
3273 mddev->new_level = mddev->level;
3277 if (!mddev_is_clustered(mddev))
3278 md_allow_write(mddev);
3280 raid_disks = mddev->raid_disks + mddev->delta_disks;
3294 newpoolinfo->mddev = mddev;
3321 sysfs_unlink_rdev(mddev, rdev);
3323 sysfs_unlink_rdev(mddev, rdev);
3324 if (sysfs_link_rdev(mddev, rdev))
3326 mdname(mddev), rdev->raid_disk);
3337 mddev->degraded += (raid_disks - conf->raid_disks);
3339 conf->raid_disks = mddev->raid_disks = raid_disks;
3340 mddev->delta_disks = 0;
3344 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
3345 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3346 md_wakeup_thread(mddev->thread);
3352 static void raid1_quiesce(struct mddev *mddev, int quiesce)
3354 struct r1conf *conf = mddev->private;
3362 static void *raid1_takeover(struct mddev *mddev)
3367 if (mddev->level == 5 && mddev->raid_disks == 2) {
3369 mddev->new_level = 1;
3370 mddev->new_layout = 0;
3371 mddev->new_chunk_sectors = 0;
3372 conf = setup_conf(mddev);
3376 mddev_clear_unsupported_flags(mddev,