Lines Matching defs:rdev
641 struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev);
642 if (rdev && test_bit(Faulty, &rdev->flags))
643 rdev = rcu_dereference(conf->disks[i].replacement);
644 if (!rdev || test_bit(Faulty, &rdev->flags))
646 else if (test_bit(In_sync, &rdev->flags))
667 struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev);
668 if (rdev && test_bit(Faulty, &rdev->flags))
669 rdev = rcu_dereference(conf->disks[i].replacement);
670 if (!rdev || test_bit(Faulty, &rdev->flags))
672 else if (test_bit(In_sync, &rdev->flags))
1076 struct md_rdev *rdev, *rrdev = NULL;
1102 smp_mb(); /* Ensure that if rrdev is NULL, rdev won't be */
1103 rdev = rcu_dereference(conf->disks[i].rdev);
1104 if (!rdev) {
1105 rdev = rrdev;
1110 rdev = NULL;
1111 if (rdev == rrdev)
1116 rdev = rrdev;
1120 if (rdev && test_bit(Faulty, &rdev->flags))
1121 rdev = NULL;
1122 if (rdev)
1123 atomic_inc(&rdev->nr_pending);
1134 while (op_is_write(op) && rdev &&
1135 test_bit(WriteErrorSeen, &rdev->flags)) {
1138 int bad = is_badblock(rdev, sh->sector, RAID5_STRIPE_SECTORS(conf),
1144 set_bit(BlockedBadBlocks, &rdev->flags);
1158 atomic_inc(&rdev->nr_pending);
1159 md_wait_for_blocked_rdev(rdev, conf->mddev);
1162 rdev_dec_pending(rdev, conf->mddev);
1163 rdev = NULL;
1167 if (rdev) {
1170 md_sync_acct(rdev->bdev, RAID5_STRIPE_SECTORS(conf));
1174 bio_set_dev(bi, rdev->bdev);
1189 + rdev->new_data_offset);
1192 + rdev->data_offset);
1284 if (!rdev && !rrdev) {
2669 struct md_rdev *rdev = NULL;
2687 * In that case it moved down to 'rdev'.
2688 * rdev is not removed until all requests are finished.
2690 rdev = conf->disks[i].replacement;
2691 if (!rdev)
2692 rdev = conf->disks[i].rdev;
2695 s = sh->sector + rdev->new_data_offset;
2697 s = sh->sector + rdev->data_offset;
2709 bdevname(rdev->bdev, b));
2710 atomic_add(RAID5_STRIPE_SECTORS(conf), &rdev->corrected_errors);
2723 if (atomic_read(&rdev->read_errors))
2724 atomic_set(&rdev->read_errors, 0);
2726 const char *bdn = bdevname(rdev->bdev, b);
2732 atomic_inc(&rdev->read_errors);
2754 } else if (atomic_read(&rdev->read_errors)
2756 if (!test_bit(Faulty, &rdev->flags)) {
2759 atomic_read(&rdev->read_errors),
2766 if (set_bad && test_bit(In_sync, &rdev->flags)
2781 && test_bit(In_sync, &rdev->flags)
2783 rdev, sh->sector, RAID5_STRIPE_SECTORS(conf), 0)))
2784 md_error(conf->mddev, rdev);
2787 rdev_dec_pending(rdev, conf->mddev);
2799 struct md_rdev *rdev;
2806 rdev = conf->disks[i].rdev;
2810 rdev = conf->disks[i].replacement;
2811 if (rdev)
2814 /* rdev was removed and 'replacement'
2815 * replaced it. rdev is not removed
2818 rdev = conf->disks[i].rdev;
2833 md_error(conf->mddev, rdev);
2834 else if (is_badblock(rdev, sh->sector,
2841 set_bit(WriteErrorSeen, &rdev->flags);
2843 if (!test_and_set_bit(WantReplacement, &rdev->flags))
2845 &rdev->mddev->recovery);
2846 } else if (is_badblock(rdev, sh->sector,
2858 rdev_dec_pending(rdev, conf->mddev);
2873 static void raid5_error(struct mddev *mddev, struct md_rdev *rdev)
2881 mdname(mddev), bdevname(rdev->bdev, b));
2884 set_bit(Faulty, &rdev->flags);
2885 clear_bit(In_sync, &rdev->flags);
2902 set_bit(Blocked, &rdev->flags);
2905 r5c_update_on_rdev_error(mddev, rdev);
3558 struct md_rdev *rdev;
3560 rdev = rcu_dereference(conf->disks[i].rdev);
3561 if (rdev && test_bit(In_sync, &rdev->flags) &&
3562 !test_bit(Faulty, &rdev->flags))
3563 atomic_inc(&rdev->nr_pending);
3565 rdev = NULL;
3567 if (rdev) {
3569 rdev,
3572 md_error(conf->mddev, rdev);
3573 rdev_dec_pending(rdev, conf->mddev);
3686 struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev);
3687 if (rdev
3688 && !test_bit(Faulty, &rdev->flags)
3689 && !test_bit(In_sync, &rdev->flags)
3690 && !rdev_set_badblocks(rdev, sh->sector,
3693 rdev = rcu_dereference(conf->disks[i].replacement);
3694 if (rdev
3695 && !test_bit(Faulty, &rdev->flags)
3696 && !test_bit(In_sync, &rdev->flags)
3697 && !rdev_set_badblocks(rdev, sh->sector,
3711 struct md_rdev *rdev;
3715 rdev = rcu_dereference(sh->raid_conf->disks[disk_idx].replacement);
3716 if (rdev
3717 && !test_bit(Faulty, &rdev->flags)
3718 && !test_bit(In_sync, &rdev->flags)
3719 && (rdev->recovery_offset <= sh->sector
3720 || rdev->mddev->recovery_cp <= sh->sector))
4600 struct md_rdev *rdev;
4643 rdev = rcu_dereference(conf->disks[i].replacement);
4644 if (rdev && !test_bit(Faulty, &rdev->flags) &&
4645 rdev->recovery_offset >= sh->sector + RAID5_STRIPE_SECTORS(conf) &&
4646 !is_badblock(rdev, sh->sector, RAID5_STRIPE_SECTORS(conf),
4650 if (rdev && !test_bit(Faulty, &rdev->flags))
4654 rdev = rcu_dereference(conf->disks[i].rdev);
4657 if (rdev && test_bit(Faulty, &rdev->flags))
4658 rdev = NULL;
4659 if (rdev) {
4660 is_bad = is_badblock(rdev, sh->sector, RAID5_STRIPE_SECTORS(conf),
4663 && (test_bit(Blocked, &rdev->flags)
4667 &rdev->flags);
4668 s->blocked_rdev = rdev;
4669 atomic_inc(&rdev->nr_pending);
4673 if (!rdev)
4677 if (!test_bit(WriteErrorSeen, &rdev->flags) &&
4685 } else if (test_bit(In_sync, &rdev->flags))
4687 else if (sh->sector + RAID5_STRIPE_SECTORS(conf) <= rdev->recovery_offset)
4700 * only to .rdev, so make sure to check that*/
4702 conf->disks[i].rdev);
4703 if (rdev2 == rdev)
4713 * only to .rdev, so make sure to check that*/
4715 conf->disks[i].rdev);
4742 if (rdev && !test_bit(Faulty, &rdev->flags))
4744 else if (!rdev) {
4745 rdev = rcu_dereference(
4747 if (rdev && !test_bit(Faulty, &rdev->flags))
5224 struct md_rdev *rdev;
5227 /* We own a safe reference to the rdev */
5228 rdev = conf->disks[i].rdev;
5229 if (!rdev_set_badblocks(rdev, sh->sector,
5231 md_error(conf->mddev, rdev);
5232 rdev_dec_pending(rdev, conf->mddev);
5235 rdev = conf->disks[i].rdev;
5236 rdev_clear_badblocks(rdev, sh->sector,
5238 rdev_dec_pending(rdev, conf->mddev);
5241 rdev = conf->disks[i].replacement;
5242 if (!rdev)
5243 /* rdev have been moved down */
5244 rdev = conf->disks[i].rdev;
5245 rdev_clear_badblocks(rdev, sh->sector,
5247 rdev_dec_pending(rdev, conf->mddev);
5367 struct md_rdev *rdev;
5372 rdev = (void*)raid_bi->bi_next;
5374 mddev = rdev->mddev;
5377 rdev_dec_pending(rdev, conf->mddev);
5396 struct md_rdev *rdev;
5424 rdev = rcu_dereference(conf->disks[dd_idx].replacement);
5425 if (!rdev || test_bit(Faulty, &rdev->flags) ||
5426 rdev->recovery_offset < end_sector) {
5427 rdev = rcu_dereference(conf->disks[dd_idx].rdev);
5428 if (rdev &&
5429 (test_bit(Faulty, &rdev->flags) ||
5430 !(test_bit(In_sync, &rdev->flags) ||
5431 rdev->recovery_offset >= end_sector)))
5432 rdev = NULL;
5441 if (rdev) {
5445 atomic_inc(&rdev->nr_pending);
5447 raid_bio->bi_next = (void*)rdev;
5448 bio_set_dev(align_bi, rdev->bdev);
5450 if (is_badblock(rdev, align_bi->bi_iter.bi_sector,
5454 rdev_dec_pending(rdev, mddev);
5458 /* No reshape active, so we can trust rdev->data_offset */
5459 align_bi->bi_iter.bi_sector += rdev->data_offset;
5952 struct md_rdev *rdev;
6077 rdev_for_each(rdev, mddev)
6078 if (rdev->raid_disk >= 0 &&
6079 !test_bit(Journal, &rdev->flags) &&
6080 !test_bit(In_sync, &rdev->flags) &&
6081 rdev->recovery_offset < sector_nr)
6082 rdev->recovery_offset = sector_nr;
6184 rdev_for_each(rdev, mddev)
6185 if (rdev->raid_disk >= 0 &&
6186 !test_bit(Journal, &rdev->flags) &&
6187 !test_bit(In_sync, &rdev->flags) &&
6188 rdev->recovery_offset < sector_nr)
6189 rdev->recovery_offset = sector_nr;
6285 struct md_rdev *rdev = READ_ONCE(conf->disks[i].rdev);
6287 if (rdev == NULL || test_bit(Faulty, &rdev->flags))
7156 struct md_rdev *rdev;
7235 rdev_for_each(rdev, mddev) {
7236 if (test_bit(Journal, &rdev->flags))
7238 if (blk_queue_nonrot(bdev_get_queue(rdev->bdev))) {
7303 rdev_for_each(rdev, mddev) {
7304 raid_disk = rdev->raid_disk;
7306 || raid_disk < 0 || test_bit(Journal, &rdev->flags))
7310 if (test_bit(Replacement, &rdev->flags)) {
7313 disk->replacement = rdev;
7315 if (disk->rdev)
7317 disk->rdev = rdev;
7320 if (test_bit(In_sync, &rdev->flags)) {
7323 mdname(mddev), bdevname(rdev->bdev, b), raid_disk);
7324 } else if (rdev->saved_raid_disk != raid_disk)
7440 struct md_rdev *rdev;
7454 rdev_for_each(rdev, mddev) {
7457 if (test_bit(Journal, &rdev->flags)) {
7458 journal_dev = rdev;
7461 if (rdev->raid_disk < 0)
7463 diff = (rdev->new_data_offset - rdev->data_offset);
7602 rdev = conf->disks[i].rdev;
7603 if (!rdev && conf->disks[i].replacement) {
7605 rdev = conf->disks[i].replacement;
7607 clear_bit(Replacement, &rdev->flags);
7608 conf->disks[i].rdev = rdev;
7610 if (!rdev)
7618 if (test_bit(In_sync, &rdev->flags)) {
7633 rdev->recovery_offset = reshape_offset;
7635 if (rdev->recovery_offset < reshape_offset) {
7637 if (!only_parity(rdev->raid_disk,
7643 if (!only_parity(rdev->raid_disk,
7739 rdev_for_each(rdev, mddev) {
7740 disk_stack_limits(mddev->gendisk, rdev->bdev,
7741 rdev->data_offset << 9);
7742 disk_stack_limits(mddev->gendisk, rdev->bdev,
7743 rdev->new_data_offset << 9);
7804 struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev);
7805 seq_printf (seq, "%s", rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
7828 if (tmp->rdev)
7830 i, !test_bit(Faulty, &tmp->rdev->flags),
7831 bdevname(tmp->rdev->bdev, b));
7850 if (!tmp->rdev
7851 || !test_and_clear_bit(In_sync, &tmp->rdev->flags))
7853 if (tmp->rdev) {
7858 set_bit(Faulty, &tmp->rdev->flags);
7860 tmp->rdev->sysfs_state);
7863 } else if (tmp->rdev
7864 && tmp->rdev->recovery_offset == MaxSector
7865 && !test_bit(Faulty, &tmp->rdev->flags)
7866 && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
7868 sysfs_notify_dirent_safe(tmp->rdev->sysfs_state);
7878 static int raid5_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
7882 int number = rdev->raid_disk;
7887 if (test_bit(Journal, &rdev->flags) && conf->log) {
7902 if (rdev == p->rdev)
7903 rdevp = &p->rdev;
7904 else if (rdev == p->replacement)
7911 clear_bit(In_sync, &rdev->flags);
7913 if (test_bit(In_sync, &rdev->flags) ||
7914 atomic_read(&rdev->nr_pending)) {
7921 if (!test_bit(Faulty, &rdev->flags) &&
7924 (!p->replacement || p->replacement == rdev) &&
7930 if (!test_bit(RemoveSynchronized, &rdev->flags)) {
7932 if (atomic_read(&rdev->nr_pending)) {
7935 *rdevp = rdev;
7939 err = log_modify(conf, rdev, false);
7944 /* We must have just cleared 'rdev' */
7945 p->rdev = p->replacement;
7953 err = log_modify(conf, p->rdev, true);
7956 clear_bit(WantReplacement, &rdev->flags);
7963 static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
7972 if (test_bit(Journal, &rdev->flags)) {
7976 rdev->raid_disk = 0;
7981 ret = log_init(conf, rdev, false);
7994 if (rdev->saved_raid_disk < 0 && has_failed(conf))
7998 if (rdev->raid_disk >= 0)
7999 first = last = rdev->raid_disk;
8002 * find the disk ... but prefer rdev->saved_raid_disk
8005 if (rdev->saved_raid_disk >= 0 &&
8006 rdev->saved_raid_disk >= first &&
8007 rdev->saved_raid_disk <= last &&
8008 conf->disks[rdev->saved_raid_disk].rdev == NULL)
8009 first = rdev->saved_raid_disk;
8013 if (p->rdev == NULL) {
8014 clear_bit(In_sync, &rdev->flags);
8015 rdev->raid_disk = disk;
8016 if (rdev->saved_raid_disk != disk)
8018 rcu_assign_pointer(p->rdev, rdev);
8020 err = log_modify(conf, rdev, true);
8027 if (test_bit(WantReplacement, &p->rdev->flags) &&
8029 clear_bit(In_sync, &rdev->flags);
8030 set_bit(Replacement, &rdev->flags);
8031 rdev->raid_disk = disk;
8034 rcu_assign_pointer(p->replacement, rdev);
8149 struct md_rdev *rdev;
8162 rdev_for_each(rdev, mddev) {
8163 if (!test_bit(In_sync, &rdev->flags)
8164 && !test_bit(Faulty, &rdev->flags))
8222 rdev_for_each(rdev, mddev)
8223 if (rdev->raid_disk < 0 &&
8224 !test_bit(Faulty, &rdev->flags)) {
8225 if (raid5_add_disk(mddev, rdev) == 0) {
8226 if (rdev->raid_disk
8228 set_bit(In_sync, &rdev->flags);
8230 rdev->recovery_offset = 0;
8233 sysfs_link_rdev(mddev, rdev);
8235 } else if (rdev->raid_disk >= conf->previous_raid_disks
8236 && !test_bit(Faulty, &rdev->flags)) {
8238 set_bit(In_sync, &rdev->flags);
8268 rdev_for_each(rdev, mddev)
8269 rdev->new_data_offset = rdev->data_offset;
8291 struct md_rdev *rdev;
8299 rdev_for_each(rdev, conf->mddev)
8300 if (rdev->raid_disk >= 0 &&
8301 !test_bit(Journal, &rdev->flags) &&
8302 !test_bit(In_sync, &rdev->flags))
8303 rdev->recovery_offset = MaxSector;
8329 struct md_rdev *rdev = conf->disks[d].rdev;
8330 if (rdev)
8331 clear_bit(In_sync, &rdev->flags);
8332 rdev = conf->disks[d].replacement;
8333 if (rdev)
8334 clear_bit(In_sync, &rdev->flags);
8650 struct md_rdev *rdev;
8652 rdev_for_each(rdev, mddev)
8653 if (test_bit(Journal, &rdev->flags)) {