Lines Matching defs:rdev
381 struct md_rdev *rdev;
385 rdev = r10_bio->devs[slot].rdev;
409 rdev->raid_disk))
414 rdev_dec_pending(rdev, conf->mddev);
417 * oops, read error - keep the refcount on the rdev
421 rdev->bdev,
460 struct md_rdev *rdev = NULL;
469 rdev = conf->mirrors[dev].replacement;
470 if (!rdev) {
473 rdev = conf->mirrors[dev].rdev;
483 md_error(rdev->mddev, rdev);
485 set_bit(WriteErrorSeen, &rdev->flags);
486 if (!test_and_set_bit(WantReplacement, &rdev->flags))
488 &rdev->mddev->recovery);
491 if (test_bit(FailFast, &rdev->flags) &&
493 md_error(rdev->mddev, rdev);
500 if (!test_bit(Faulty, &rdev->flags))
528 * before rdev->recovery_offset, but for simplicity we don't
531 if (test_bit(In_sync, &rdev->flags) &&
532 !test_bit(Faulty, &rdev->flags))
536 if (is_badblock(rdev,
557 rdev_dec_pending(rdev, conf->mddev);
722 * The rdev for the device selected will have nr_pending incremented.
738 struct md_rdev *best_dist_rdev, *best_pending_rdev, *rdev = NULL;
778 rdev = rcu_dereference(conf->mirrors[disk].replacement);
779 if (rdev == NULL || test_bit(Faulty, &rdev->flags) ||
781 rdev->recovery_offset) {
783 * Read replacement first to prevent reading both rdev
785 * rdev.
788 rdev = rcu_dereference(conf->mirrors[disk].rdev);
790 if (rdev == NULL ||
791 test_bit(Faulty, &rdev->flags))
793 if (!test_bit(In_sync, &rdev->flags) &&
794 r10_bio->devs[slot].addr + sectors > rdev->recovery_offset)
798 if (is_badblock(rdev, dev_sector, sectors,
819 best_dist_rdev = rdev;
832 nonrot = bdev_nonrot(rdev->bdev);
834 pending = atomic_read(&rdev->nr_pending);
838 best_pending_rdev = rdev;
861 best_dist_rdev = rdev;
867 rdev = best_pending_rdev;
870 rdev = best_dist_rdev;
875 atomic_inc(&rdev->nr_pending);
878 rdev = NULL;
882 return rdev;
1099 struct md_rdev *rdev)
1101 if (!test_bit(MD_RECOVERY_RESHAPE, &rdev->mddev->recovery) ||
1103 return rdev->data_offset;
1105 return rdev->new_data_offset;
1180 struct md_rdev *rdev;
1186 if (slot >= 0 && r10_bio->devs[slot].rdev) {
1189 * safely dereference the rdev in the r10_bio,
1203 err_rdev = rcu_dereference(conf->mirrors[disk].rdev);
1209 err_rdev = r10_bio->devs[slot].rdev;
1216 rdev = read_balance(conf, r10_bio, &max_sectors);
1217 if (!rdev) {
1229 rdev->bdev,
1248 read_bio = bio_alloc_clone(rdev->bdev, bio, gfp, &mddev->bio_set);
1251 r10_bio->devs[slot].rdev = rdev;
1254 choose_data_offset(r10_bio, rdev);
1257 if (test_bit(FailFast, &rdev->flags) &&
1278 struct md_rdev *rdev;
1283 rdev = conf->mirrors[devnum].replacement;
1284 if (rdev == NULL) {
1285 /* Replacement just got moved to main 'rdev' */
1287 rdev = conf->mirrors[devnum].rdev;
1290 rdev = conf->mirrors[devnum].rdev;
1292 mbio = bio_alloc_clone(rdev->bdev, bio, GFP_NOIO, &mddev->bio_set);
1299 choose_data_offset(r10_bio, rdev));
1303 &conf->mirrors[devnum].rdev->flags)
1311 /* flush_pending_writes() needs access to the rdev so...*/
1312 mbio->bi_bdev = (void *)rdev;
1327 struct md_rdev *rdev, *rrdev;
1331 * Read replacement first to prevent reading both rdev and
1332 * replacement as NULL during replacement replace rdev.
1335 rdev = rcu_dereference(mirror->rdev);
1336 if (rdev == rrdev)
1340 return rdev;
1353 struct md_rdev *rdev, *rrdev;
1355 rdev = dereference_rdev_and_rrdev(&conf->mirrors[i], &rrdev);
1356 if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
1357 atomic_inc(&rdev->nr_pending);
1358 blocked_rdev = rdev;
1367 if (rdev && test_bit(WriteErrorSeen, &rdev->flags)) {
1380 is_bad = is_badblock(rdev, dev_sector, r10_bio->sectors,
1387 atomic_inc(&rdev->nr_pending);
1388 set_bit(BlockedBadBlocks, &rdev->flags);
1389 blocked_rdev = rdev;
1399 raid10_log(conf->mddev, "%s wait rdev %d blocked",
1463 * inc refcount on their rdev. Record them by setting
1482 struct md_rdev *rdev, *rrdev;
1484 rdev = dereference_rdev_and_rrdev(&conf->mirrors[d], &rrdev);
1485 if (rdev && (test_bit(Faulty, &rdev->flags)))
1486 rdev = NULL;
1493 if (!rdev && !rrdev) {
1497 if (rdev && test_bit(WriteErrorSeen, &rdev->flags)) {
1503 is_bad = is_badblock(rdev, dev_sector, max_sectors,
1529 if (rdev) {
1531 atomic_inc(&rdev->nr_pending);
1617 struct md_rdev *rdev = NULL;
1629 rdev = conf->mirrors[dev].replacement;
1630 if (!rdev) {
1632 * raid10_remove_disk uses smp_mb to make sure rdev is set to
1634 * rdev first without barrier protect even replacement is NULL
1637 rdev = conf->mirrors[dev].rdev;
1641 rdev_dec_pending(rdev, conf->mddev);
1785 * inc refcount on their rdev. Record them by setting
1790 struct md_rdev *rdev, *rrdev;
1792 rdev = dereference_rdev_and_rrdev(&conf->mirrors[disk], &rrdev);
1796 if (rdev && (test_bit(Faulty, &rdev->flags)))
1797 rdev = NULL;
1800 if (!rdev && !rrdev)
1803 if (rdev) {
1805 atomic_inc(&rdev->nr_pending);
1848 * It doesn't need to use rcu lock to get rdev here. We already
1849 * add rdev->nr_pending in the first loop.
1852 struct md_rdev *rdev = conf->mirrors[disk].rdev;
1860 md_submit_discard_bio(mddev, rdev, mbio,
1861 dev_start + choose_data_offset(r10_bio, rdev),
1958 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1959 seq_printf(seq, "%s", rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
1989 struct md_rdev *rdev;
1991 (rdev = rcu_dereference(conf->mirrors[this].rdev)) &&
1992 test_bit(In_sync, &rdev->flags))
2020 * @rdev: member device to fail.
2022 * The routine acknowledges &rdev failure and determines new @mddev state.
2029 * @rdev is marked as &Faulty excluding case when array is failed and
2032 static void raid10_error(struct mddev *mddev, struct md_rdev *rdev)
2039 if (test_bit(In_sync, &rdev->flags) && !enough(conf, rdev->raid_disk)) {
2047 if (test_and_clear_bit(In_sync, &rdev->flags))
2051 set_bit(Blocked, &rdev->flags);
2052 set_bit(Faulty, &rdev->flags);
2058 mdname(mddev), rdev->bdev,
2065 struct md_rdev *rdev;
2076 * rcu protection of rdev is not needed */
2078 rdev = conf->mirrors[i].rdev;
2079 if (rdev)
2081 i, !test_bit(In_sync, &rdev->flags),
2082 !test_bit(Faulty, &rdev->flags),
2083 rdev->bdev);
2114 if (!tmp->rdev
2115 || !test_and_clear_bit(In_sync, &tmp->rdev->flags))
2117 if (tmp->rdev) {
2122 set_bit(Faulty, &tmp->rdev->flags);
2124 tmp->rdev->sysfs_state);
2127 } else if (tmp->rdev
2128 && tmp->rdev->recovery_offset == MaxSector
2129 && !test_bit(Faulty, &tmp->rdev->flags)
2130 && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
2132 sysfs_notify_dirent_safe(tmp->rdev->sysfs_state);
2143 static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
2157 if (rdev->saved_raid_disk < 0 && !_enough(conf, 1, -1))
2160 if (md_integrity_add_rdev(rdev, mddev))
2163 if (rdev->raid_disk >= 0)
2164 first = last = rdev->raid_disk;
2166 if (rdev->saved_raid_disk >= first &&
2167 rdev->saved_raid_disk < conf->geo.raid_disks &&
2168 conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
2169 mirror = rdev->saved_raid_disk;
2176 if (p->rdev) {
2177 if (test_bit(WantReplacement, &p->rdev->flags) &&
2184 disk_stack_limits(mddev->gendisk, rdev->bdev,
2185 rdev->data_offset << 9);
2189 rdev->raid_disk = mirror;
2191 if (rdev->saved_raid_disk != mirror)
2193 rcu_assign_pointer(p->rdev, rdev);
2199 clear_bit(In_sync, &rdev->flags);
2200 set_bit(Replacement, &rdev->flags);
2201 rdev->raid_disk = repl_slot;
2204 disk_stack_limits(mddev->gendisk, rdev->bdev,
2205 rdev->data_offset << 9);
2207 rcu_assign_pointer(p->replacement, rdev);
2214 static int raid10_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
2218 int number = rdev->raid_disk;
2226 if (rdev == p->rdev)
2227 rdevp = &p->rdev;
2228 else if (rdev == p->replacement)
2233 if (test_bit(In_sync, &rdev->flags) ||
2234 atomic_read(&rdev->nr_pending)) {
2241 if (!test_bit(Faulty, &rdev->flags) &&
2243 (!p->replacement || p->replacement == rdev) &&
2250 if (!test_bit(RemoveSynchronized, &rdev->flags)) {
2252 if (atomic_read(&rdev->nr_pending)) {
2255 *rdevp = rdev;
2260 /* We must have just cleared 'rdev' */
2261 p->rdev = p->replacement;
2269 clear_bit(WantReplacement, &rdev->flags);
2289 &conf->mirrors[d].rdev->corrected_errors);
2294 rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev);
2358 struct md_rdev *rdev = NULL;
2362 rdev = conf->mirrors[d].replacement;
2364 rdev = conf->mirrors[d].rdev;
2368 md_error(mddev, rdev);
2370 set_bit(WriteErrorSeen, &rdev->flags);
2371 if (!test_and_set_bit(WantReplacement, &rdev->flags))
2373 &rdev->mddev->recovery);
2376 } else if (is_badblock(rdev,
2382 rdev_dec_pending(rdev, mddev);
2431 struct md_rdev *rdev;
2443 rdev = conf->mirrors[d].rdev;
2466 } else if (test_bit(FailFast, &rdev->flags)) {
2468 md_error(rdev->mddev, rdev);
2477 bio_reset(tbio, conf->mirrors[d].rdev->bdev, REQ_OP_WRITE);
2488 atomic_inc(&conf->mirrors[d].rdev->nr_pending);
2490 md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio));
2492 if (test_bit(FailFast, &conf->mirrors[d].rdev->flags))
2494 tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset;
2555 struct md_rdev *rdev;
2562 rdev = conf->mirrors[dr].rdev;
2564 ok = sync_page_io(rdev,
2570 rdev = conf->mirrors[dw].rdev;
2572 ok = sync_page_io(rdev,
2578 set_bit(WriteErrorSeen, &rdev->flags);
2580 &rdev->flags))
2582 &rdev->mddev->recovery);
2590 rdev_set_badblocks(rdev, addr, s, 0);
2592 if (rdev != conf->mirrors[dw].rdev) {
2594 struct md_rdev *rdev2 = conf->mirrors[dw].rdev;
2646 atomic_inc(&conf->mirrors[d].rdev->nr_pending);
2647 md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio));
2659 * Used by fix_read_error() to decay the per rdev read_errors.
2664 static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
2668 unsigned int read_errors = atomic_read(&rdev->read_errors);
2672 if (rdev->last_read_error == 0) {
2674 rdev->last_read_error = cur_time_mon;
2679 rdev->last_read_error) / 3600;
2681 rdev->last_read_error = cur_time_mon;
2689 atomic_set(&rdev->read_errors, 0);
2691 atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
2694 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
2700 if (is_badblock(rdev, sector, sectors, &first_bad, &bad_sectors)
2701 && (op == REQ_OP_READ || test_bit(WriteErrorSeen, &rdev->flags)))
2703 if (sync_page_io(rdev, sector, sectors << 9, page, op, false))
2707 set_bit(WriteErrorSeen, &rdev->flags);
2708 if (!test_and_set_bit(WantReplacement, &rdev->flags))
2710 &rdev->mddev->recovery);
2713 if (!rdev_set_badblocks(rdev, sector, sectors, 0))
2714 md_error(rdev->mddev, rdev);
2730 struct md_rdev *rdev;
2734 /* still own a reference to this rdev, so it cannot
2737 rdev = conf->mirrors[d].rdev;
2739 if (test_bit(Faulty, &rdev->flags))
2744 check_decay_read_errors(mddev, rdev);
2745 atomic_inc(&rdev->read_errors);
2746 if (atomic_read(&rdev->read_errors) > max_read_errors) {
2748 mdname(mddev), rdev->bdev,
2749 atomic_read(&rdev->read_errors), max_read_errors);
2751 mdname(mddev), rdev->bdev);
2752 md_error(mddev, rdev);
2772 rdev = rcu_dereference(conf->mirrors[d].rdev);
2773 if (rdev &&
2774 test_bit(In_sync, &rdev->flags) &&
2775 !test_bit(Faulty, &rdev->flags) &&
2776 is_badblock(rdev, r10_bio->devs[sl].addr + sect, s,
2778 atomic_inc(&rdev->nr_pending);
2780 success = sync_page_io(rdev,
2786 rdev_dec_pending(rdev, mddev);
2803 rdev = conf->mirrors[dn].rdev;
2806 rdev,
2810 md_error(mddev, rdev);
2825 rdev = rcu_dereference(conf->mirrors[d].rdev);
2826 if (!rdev ||
2827 test_bit(Faulty, &rdev->flags) ||
2828 !test_bit(In_sync, &rdev->flags))
2831 atomic_inc(&rdev->nr_pending);
2833 if (r10_sync_page_io(rdev,
2844 rdev)),
2845 rdev->bdev);
2848 rdev->bdev);
2850 rdev_dec_pending(rdev, mddev);
2859 rdev = rcu_dereference(conf->mirrors[d].rdev);
2860 if (!rdev ||
2861 test_bit(Faulty, &rdev->flags) ||
2862 !test_bit(In_sync, &rdev->flags))
2865 atomic_inc(&rdev->nr_pending);
2867 switch (r10_sync_page_io(rdev,
2877 choose_data_offset(r10_bio, rdev)),
2878 rdev->bdev);
2881 rdev->bdev);
2888 choose_data_offset(r10_bio, rdev)),
2889 rdev->bdev);
2890 atomic_add(s, &rdev->corrected_errors);
2893 rdev_dec_pending(rdev, mddev);
2908 struct md_rdev *rdev = conf->mirrors[r10_bio->devs[i].devnum].rdev;
2917 * We currently own a reference to the rdev.
2926 if (rdev->badblocks.shift < 0)
2929 block_sectors = roundup(1 << rdev->badblocks.shift,
2930 bdev_logical_block_size(rdev->bdev) >> 9);
2942 wbio = bio_alloc_clone(rdev->bdev, bio, GFP_NOIO,
2947 choose_data_offset(r10_bio, rdev);
2952 ok = rdev_set_badblocks(rdev, wsector,
2969 struct md_rdev *rdev = r10_bio->devs[slot].rdev;
2985 else if (!test_bit(FailFast, &rdev->flags)) {
2990 md_error(mddev, rdev);
2992 rdev_dec_pending(rdev, mddev);
3011 struct md_rdev *rdev;
3017 rdev = conf->mirrors[dev].rdev;
3023 rdev,
3028 rdev,
3031 md_error(conf->mddev, rdev);
3033 rdev = conf->mirrors[dev].replacement;
3040 rdev,
3045 rdev,
3048 md_error(conf->mddev, rdev);
3057 rdev = conf->mirrors[dev].rdev;
3060 rdev,
3063 rdev_dec_pending(rdev, conf->mddev);
3067 md_error(conf->mddev, rdev);
3071 rdev_dec_pending(rdev, conf->mddev);
3074 rdev = conf->mirrors[dev].replacement;
3075 if (rdev && bio == IO_MADE_GOOD) {
3077 rdev,
3080 rdev_dec_pending(rdev, conf->mddev);
3372 struct md_rdev *rdev =
3374 if (rdev)
3375 rdev->recovery_offset = MaxSector;
3459 mrdev = rcu_dereference(mirror->rdev);
3527 struct md_rdev *rdev = rcu_dereference(
3528 conf->mirrors[j].rdev);
3529 if (rdev == NULL || test_bit(Faulty, &rdev->flags)) {
3543 struct md_rdev *rdev =
3544 rcu_dereference(conf->mirrors[d].rdev);
3547 if (!rdev ||
3548 !test_bit(In_sync, &rdev->flags))
3554 if (is_badblock(rdev, sector, max_sync,
3571 if (test_bit(FailFast, &rdev->flags))
3575 rdev->data_offset;
3576 bio_set_dev(bio, rdev->bdev);
3577 atomic_inc(&rdev->nr_pending);
3681 if (conf->mirrors[d].rdev &&
3683 &conf->mirrors[d].rdev->flags))
3743 struct md_rdev *rdev;
3751 rdev = rcu_dereference(conf->mirrors[d].rdev);
3752 if (rdev == NULL || test_bit(Faulty, &rdev->flags)) {
3757 if (is_badblock(rdev, sector, max_sync,
3769 atomic_inc(&rdev->nr_pending);
3775 if (test_bit(FailFast, &rdev->flags))
3777 bio->bi_iter.bi_sector = sector + rdev->data_offset;
3778 bio_set_dev(bio, rdev->bdev);
3781 rdev = rcu_dereference(conf->mirrors[d].replacement);
3782 if (rdev == NULL || test_bit(Faulty, &rdev->flags)) {
3786 atomic_inc(&rdev->nr_pending);
3797 if (test_bit(FailFast, &rdev->flags))
3799 bio->bi_iter.bi_sector = sector + rdev->data_offset;
3800 bio_set_dev(bio, rdev->bdev);
3809 rdev_dec_pending(conf->mirrors[d].rdev,
4152 struct md_rdev *rdev;
4191 rdev_for_each(rdev, mddev) {
4194 disk_idx = rdev->raid_disk;
4202 if (test_bit(Replacement, &rdev->flags)) {
4205 disk->replacement = rdev;
4207 if (disk->rdev)
4209 disk->rdev = rdev;
4211 diff = (rdev->new_data_offset - rdev->data_offset);
4220 disk_stack_limits(mddev->gendisk, rdev->bdev,
4221 rdev->data_offset << 9);
4252 if (!disk->rdev && disk->replacement) {
4254 disk->rdev = disk->replacement;
4256 clear_bit(Replacement, &disk->rdev->flags);
4259 if (!disk->rdev ||
4260 !test_bit(In_sync, &disk->rdev->flags)) {
4263 if (disk->rdev &&
4264 disk->rdev->saved_raid_disk < 0)
4392 struct md_rdev *rdev;
4415 rdev_for_each(rdev, mddev)
4416 if (rdev->raid_disk >= 0) {
4417 rdev->new_raid_disk = rdev->raid_disk * 2;
4418 rdev->sectors = size;
4519 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
4520 if (!rdev || test_bit(Faulty, &rdev->flags))
4522 else if (!test_bit(In_sync, &rdev->flags))
4535 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
4536 if (!rdev || test_bit(Faulty, &rdev->flags))
4538 else if (!test_bit(In_sync, &rdev->flags)) {
4571 struct md_rdev *rdev;
4586 rdev_for_each(rdev, mddev) {
4587 if (!test_bit(In_sync, &rdev->flags)
4588 && !test_bit(Faulty, &rdev->flags))
4590 if (rdev->raid_disk >= 0) {
4591 long long diff = (rdev->new_data_offset
4592 - rdev->data_offset);
4652 rdev_for_each(rdev, mddev) {
4653 if (rdev->raid_disk > -1 &&
4654 !test_bit(Faulty, &rdev->flags))
4655 sb = page_address(rdev->sb_page);
4679 rdev_for_each(rdev, mddev)
4680 if (rdev->raid_disk < 0 &&
4681 !test_bit(Faulty, &rdev->flags)) {
4682 if (raid10_add_disk(mddev, rdev) == 0) {
4683 if (rdev->raid_disk >=
4685 set_bit(In_sync, &rdev->flags);
4687 rdev->recovery_offset = 0;
4690 sysfs_link_rdev(mddev, rdev);
4692 } else if (rdev->raid_disk >= conf->prev.raid_disks
4693 && !test_bit(Faulty, &rdev->flags)) {
4695 set_bit(In_sync, &rdev->flags);
4731 rdev_for_each(rdev, mddev)
4732 rdev->new_data_offset = rdev->data_offset;
4818 struct md_rdev *rdev;
4926 rdev = read_balance(conf, r10_bio, &max_sectors);
4929 if (!rdev) {
4939 read_bio = bio_alloc_bioset(rdev->bdev, RESYNC_PAGES, REQ_OP_READ,
4942 + rdev->data_offset);
4958 sb = page_address(rdev->sb_page);
4989 rdev2 = rcu_dereference(conf->mirrors[d].rdev);
5075 struct md_rdev *rdev;
5078 rdev = rcu_dereference(conf->mirrors[d].replacement);
5081 rdev = rcu_dereference(conf->mirrors[d].rdev);
5084 if (!rdev || test_bit(Faulty, &rdev->flags)) {
5088 atomic_inc(&rdev->nr_pending);
5163 struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev);
5165 if (rdev == NULL ||
5166 test_bit(Faulty, &rdev->flags) ||
5167 !test_bit(In_sync, &rdev->flags))
5171 atomic_inc(&rdev->nr_pending);
5173 success = sync_page_io(rdev,
5178 rdev_dec_pending(rdev, mddev);
5212 struct md_rdev *rdev = NULL;
5216 rdev = conf->mirrors[d].replacement;
5217 if (!rdev) {
5219 rdev = conf->mirrors[d].rdev;
5224 md_error(mddev, rdev);
5227 rdev_dec_pending(rdev, mddev);
5259 struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev);
5260 if (rdev)
5261 clear_bit(In_sync, &rdev->flags);
5262 rdev = rcu_dereference(conf->mirrors[d].replacement);
5263 if (rdev)
5264 clear_bit(In_sync, &rdev->flags);