Lines Matching defs:rdev

354 	struct md_rdev *rdev;
358 rdev = r10_bio->devs[slot].rdev;
382 rdev->raid_disk))
387 rdev_dec_pending(rdev, conf->mddev);
390 * oops, read error - keep the refcount on the rdev
395 bdevname(rdev->bdev, b),
434 struct md_rdev *rdev = NULL;
443 rdev = conf->mirrors[dev].replacement;
444 if (!rdev) {
447 rdev = conf->mirrors[dev].rdev;
457 md_error(rdev->mddev, rdev);
459 set_bit(WriteErrorSeen, &rdev->flags);
460 if (!test_and_set_bit(WantReplacement, &rdev->flags))
462 &rdev->mddev->recovery);
465 if (test_bit(FailFast, &rdev->flags) &&
467 md_error(rdev->mddev, rdev);
474 if (!test_bit(Faulty, &rdev->flags))
502 * before rdev->recovery_offset, but for simplicity we don't
505 if (test_bit(In_sync, &rdev->flags) &&
506 !test_bit(Faulty, &rdev->flags))
510 if (is_badblock(rdev,
531 rdev_dec_pending(rdev, conf->mddev);
696 * The rdev for the device selected will have nr_pending incremented.
712 struct md_rdev *best_dist_rdev, *best_pending_rdev, *rdev = NULL;
752 rdev = rcu_dereference(conf->mirrors[disk].replacement);
753 if (rdev == NULL || test_bit(Faulty, &rdev->flags) ||
755 rdev->recovery_offset) {
757 * Read replacement first to prevent reading both rdev
759 * rdev.
762 rdev = rcu_dereference(conf->mirrors[disk].rdev);
764 if (rdev == NULL ||
765 test_bit(Faulty, &rdev->flags))
767 if (!test_bit(In_sync, &rdev->flags) &&
768 r10_bio->devs[slot].addr + sectors > rdev->recovery_offset)
772 if (is_badblock(rdev, dev_sector, sectors,
793 best_dist_rdev = rdev;
806 nonrot = blk_queue_nonrot(bdev_get_queue(rdev->bdev));
808 pending = atomic_read(&rdev->nr_pending);
812 best_pending_rdev = rdev;
835 best_dist_rdev = rdev;
841 rdev = best_pending_rdev;
844 rdev = best_dist_rdev;
849 atomic_inc(&rdev->nr_pending);
852 rdev = NULL;
856 return rdev;
893 struct md_rdev *rdev = (void*)bio->bi_disk;
895 bio_set_dev(bio, rdev->bdev);
896 if (test_bit(Faulty, &rdev->flags)) {
1046 struct md_rdev *rdev)
1048 if (!test_bit(MD_RECOVERY_RESHAPE, &rdev->mddev->recovery) ||
1050 return rdev->data_offset;
1052 return rdev->new_data_offset;
1087 struct md_rdev *rdev = (void*)bio->bi_disk;
1089 bio_set_dev(bio, rdev->bdev);
1090 if (test_bit(Faulty, &rdev->flags)) {
1135 struct md_rdev *rdev;
1141 if (slot >= 0 && r10_bio->devs[slot].rdev) {
1144 * safely dereference the rdev in the r10_bio,
1158 err_rdev = rcu_dereference(conf->mirrors[disk].rdev);
1164 err_rdev = r10_bio->devs[slot].rdev;
1170 rdev = read_balance(conf, r10_bio, &max_sectors);
1171 if (!rdev) {
1183 bdevname(rdev->bdev, b),
1201 r10_bio->devs[slot].rdev = rdev;
1204 choose_data_offset(r10_bio, rdev);
1205 bio_set_dev(read_bio, rdev->bdev);
1208 if (test_bit(FailFast, &rdev->flags) &&
1232 struct md_rdev *rdev;
1237 rdev = conf->mirrors[devnum].replacement;
1238 if (rdev == NULL) {
1239 /* Replacement just got moved to main 'rdev' */
1241 rdev = conf->mirrors[devnum].rdev;
1244 rdev = conf->mirrors[devnum].rdev;
1253 choose_data_offset(r10_bio, rdev));
1254 bio_set_dev(mbio, rdev->bdev);
1258 &conf->mirrors[devnum].rdev->flags)
1267 /* flush_pending_writes() needs access to the rdev so...*/
1268 mbio->bi_disk = (void *)rdev;
1341 * inc refcount on their rdev. Record them by setting
1359 struct md_rdev *rdev, *rrdev;
1363 * Read replacement first to prevent reading both rdev and
1364 * replacement as NULL during replacement replace rdev.
1367 rdev = rcu_dereference(conf->mirrors[d].rdev);
1368 if (rdev == rrdev)
1370 if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
1371 atomic_inc(&rdev->nr_pending);
1372 blocked_rdev = rdev;
1380 if (rdev && (test_bit(Faulty, &rdev->flags)))
1381 rdev = NULL;
1388 if (!rdev && !rrdev) {
1392 if (rdev && test_bit(WriteErrorSeen, &rdev->flags)) {
1398 is_bad = is_badblock(rdev, dev_sector, max_sectors,
1404 atomic_inc(&rdev->nr_pending);
1405 set_bit(BlockedBadBlocks, &rdev->flags);
1406 blocked_rdev = rdev;
1433 if (rdev) {
1435 atomic_inc(&rdev->nr_pending);
1452 rdev_dec_pending(conf->mirrors[d].rdev, mddev);
1455 struct md_rdev *rdev;
1457 rdev = conf->mirrors[d].replacement;
1458 if (!rdev) {
1461 rdev = conf->mirrors[d].rdev;
1463 rdev_dec_pending(rdev, mddev);
1467 raid10_log(conf->mddev, "wait rdev %d blocked", blocked_rdev->raid_disk);
1575 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1576 seq_printf(seq, "%s", rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
1606 struct md_rdev *rdev;
1608 (rdev = rcu_dereference(conf->mirrors[this].rdev)) &&
1609 test_bit(In_sync, &rdev->flags))
1634 static void raid10_error(struct mddev *mddev, struct md_rdev *rdev)
1647 if (test_bit(In_sync, &rdev->flags) && !mddev->fail_last_dev
1648 && !enough(conf, rdev->raid_disk)) {
1655 if (test_and_clear_bit(In_sync, &rdev->flags))
1661 set_bit(Blocked, &rdev->flags);
1662 set_bit(Faulty, &rdev->flags);
1668 mdname(mddev), bdevname(rdev->bdev, b),
1675 struct md_rdev *rdev;
1686 * rcu protection of rdev is not needed */
1689 rdev = conf->mirrors[i].rdev;
1690 if (rdev)
1692 i, !test_bit(In_sync, &rdev->flags),
1693 !test_bit(Faulty, &rdev->flags),
1694 bdevname(rdev->bdev,b));
1725 if (!tmp->rdev
1726 || !test_and_clear_bit(In_sync, &tmp->rdev->flags))
1728 if (tmp->rdev) {
1733 set_bit(Faulty, &tmp->rdev->flags);
1735 tmp->rdev->sysfs_state);
1738 } else if (tmp->rdev
1739 && tmp->rdev->recovery_offset == MaxSector
1740 && !test_bit(Faulty, &tmp->rdev->flags)
1741 && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
1743 sysfs_notify_dirent_safe(tmp->rdev->sysfs_state);
1754 static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
1767 if (rdev->saved_raid_disk < 0 && !_enough(conf, 1, -1))
1770 if (md_integrity_add_rdev(rdev, mddev))
1773 if (rdev->raid_disk >= 0)
1774 first = last = rdev->raid_disk;
1776 if (rdev->saved_raid_disk >= first &&
1777 rdev->saved_raid_disk < conf->geo.raid_disks &&
1778 conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
1779 mirror = rdev->saved_raid_disk;
1786 if (p->rdev) {
1787 if (!test_bit(WantReplacement, &p->rdev->flags) ||
1790 clear_bit(In_sync, &rdev->flags);
1791 set_bit(Replacement, &rdev->flags);
1792 rdev->raid_disk = mirror;
1795 disk_stack_limits(mddev->gendisk, rdev->bdev,
1796 rdev->data_offset << 9);
1798 rcu_assign_pointer(p->replacement, rdev);
1803 disk_stack_limits(mddev->gendisk, rdev->bdev,
1804 rdev->data_offset << 9);
1808 rdev->raid_disk = mirror;
1810 if (rdev->saved_raid_disk != mirror)
1812 rcu_assign_pointer(p->rdev, rdev);
1815 if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
1822 static int raid10_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
1826 int number = rdev->raid_disk;
1834 if (rdev == p->rdev)
1835 rdevp = &p->rdev;
1836 else if (rdev == p->replacement)
1841 if (test_bit(In_sync, &rdev->flags) ||
1842 atomic_read(&rdev->nr_pending)) {
1849 if (!test_bit(Faulty, &rdev->flags) &&
1851 (!p->replacement || p->replacement == rdev) &&
1858 if (!test_bit(RemoveSynchronized, &rdev->flags)) {
1860 if (atomic_read(&rdev->nr_pending)) {
1863 *rdevp = rdev;
1868 /* We must have just cleared 'rdev' */
1869 p->rdev = p->replacement;
1877 clear_bit(WantReplacement, &rdev->flags);
1897 &conf->mirrors[d].rdev->corrected_errors);
1902 rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev);
1966 struct md_rdev *rdev = NULL;
1970 rdev = conf->mirrors[d].replacement;
1972 rdev = conf->mirrors[d].rdev;
1976 md_error(mddev, rdev);
1978 set_bit(WriteErrorSeen, &rdev->flags);
1979 if (!test_and_set_bit(WantReplacement, &rdev->flags))
1981 &rdev->mddev->recovery);
1984 } else if (is_badblock(rdev,
1990 rdev_dec_pending(rdev, mddev);
2039 struct md_rdev *rdev;
2051 rdev = conf->mirrors[d].rdev;
2074 } else if (test_bit(FailFast, &rdev->flags)) {
2076 md_error(rdev->mddev, rdev);
2097 atomic_inc(&conf->mirrors[d].rdev->nr_pending);
2099 md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio));
2101 if (test_bit(FailFast, &conf->mirrors[d].rdev->flags))
2103 tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset;
2104 bio_set_dev(tbio, conf->mirrors[d].rdev->bdev);
2165 struct md_rdev *rdev;
2172 rdev = conf->mirrors[dr].rdev;
2174 ok = sync_page_io(rdev,
2180 rdev = conf->mirrors[dw].rdev;
2182 ok = sync_page_io(rdev,
2188 set_bit(WriteErrorSeen, &rdev->flags);
2190 &rdev->flags))
2192 &rdev->mddev->recovery);
2200 rdev_set_badblocks(rdev, addr, s, 0);
2202 if (rdev != conf->mirrors[dw].rdev) {
2204 struct md_rdev *rdev2 = conf->mirrors[dw].rdev;
2256 atomic_inc(&conf->mirrors[d].rdev->nr_pending);
2257 md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio));
2269 * Used by fix_read_error() to decay the per rdev read_errors.
2274 static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
2278 unsigned int read_errors = atomic_read(&rdev->read_errors);
2282 if (rdev->last_read_error == 0) {
2284 rdev->last_read_error = cur_time_mon;
2289 rdev->last_read_error) / 3600;
2291 rdev->last_read_error = cur_time_mon;
2299 atomic_set(&rdev->read_errors, 0);
2301 atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
2304 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
2310 if (is_badblock(rdev, sector, sectors, &first_bad, &bad_sectors)
2311 && (rw == READ || test_bit(WriteErrorSeen, &rdev->flags)))
2313 if (sync_page_io(rdev, sector, sectors << 9, page, rw, 0, false))
2317 set_bit(WriteErrorSeen, &rdev->flags);
2318 if (!test_and_set_bit(WantReplacement, &rdev->flags))
2320 &rdev->mddev->recovery);
2323 if (!rdev_set_badblocks(rdev, sector, sectors, 0))
2324 md_error(rdev->mddev, rdev);
2340 struct md_rdev *rdev;
2344 /* still own a reference to this rdev, so it cannot
2347 rdev = conf->mirrors[d].rdev;
2349 if (test_bit(Faulty, &rdev->flags))
2354 check_decay_read_errors(mddev, rdev);
2355 atomic_inc(&rdev->read_errors);
2356 if (atomic_read(&rdev->read_errors) > max_read_errors) {
2358 bdevname(rdev->bdev, b);
2362 atomic_read(&rdev->read_errors), max_read_errors);
2365 md_error(mddev, rdev);
2385 rdev = rcu_dereference(conf->mirrors[d].rdev);
2386 if (rdev &&
2387 test_bit(In_sync, &rdev->flags) &&
2388 !test_bit(Faulty, &rdev->flags) &&
2389 is_badblock(rdev, r10_bio->devs[sl].addr + sect, s,
2391 atomic_inc(&rdev->nr_pending);
2393 success = sync_page_io(rdev,
2399 rdev_dec_pending(rdev, mddev);
2416 rdev = conf->mirrors[dn].rdev;
2419 rdev,
2423 md_error(mddev, rdev);
2440 rdev = rcu_dereference(conf->mirrors[d].rdev);
2441 if (!rdev ||
2442 test_bit(Faulty, &rdev->flags) ||
2443 !test_bit(In_sync, &rdev->flags))
2446 atomic_inc(&rdev->nr_pending);
2448 if (r10_sync_page_io(rdev,
2459 rdev)),
2460 bdevname(rdev->bdev, b));
2463 bdevname(rdev->bdev, b));
2465 rdev_dec_pending(rdev, mddev);
2476 rdev = rcu_dereference(conf->mirrors[d].rdev);
2477 if (!rdev ||
2478 test_bit(Faulty, &rdev->flags) ||
2479 !test_bit(In_sync, &rdev->flags))
2482 atomic_inc(&rdev->nr_pending);
2484 switch (r10_sync_page_io(rdev,
2495 choose_data_offset(r10_bio, rdev)),
2496 bdevname(rdev->bdev, b));
2499 bdevname(rdev->bdev, b));
2506 choose_data_offset(r10_bio, rdev)),
2507 bdevname(rdev->bdev, b));
2508 atomic_add(s, &rdev->corrected_errors);
2511 rdev_dec_pending(rdev, mddev);
2526 struct md_rdev *rdev = conf->mirrors[r10_bio->devs[i].devnum].rdev;
2535 * We currently own a reference to the rdev.
2544 if (rdev->badblocks.shift < 0)
2547 block_sectors = roundup(1 << rdev->badblocks.shift,
2548 bdev_logical_block_size(rdev->bdev) >> 9);
2564 choose_data_offset(r10_bio, rdev);
2565 bio_set_dev(wbio, rdev->bdev);
2570 ok = rdev_set_badblocks(rdev, wsector,
2587 struct md_rdev *rdev = r10_bio->devs[slot].rdev;
2603 else if (!test_bit(FailFast, &rdev->flags)) {
2608 md_error(mddev, rdev);
2610 rdev_dec_pending(rdev, mddev);
2625 struct md_rdev *rdev;
2631 rdev = conf->mirrors[dev].rdev;
2637 rdev,
2642 rdev,
2645 md_error(conf->mddev, rdev);
2647 rdev = conf->mirrors[dev].replacement;
2654 rdev,
2659 rdev,
2662 md_error(conf->mddev, rdev);
2671 rdev = conf->mirrors[dev].rdev;
2674 rdev,
2677 rdev_dec_pending(rdev, conf->mddev);
2681 md_error(conf->mddev, rdev);
2685 rdev_dec_pending(rdev, conf->mddev);
2688 rdev = conf->mirrors[dev].replacement;
2689 if (rdev && bio == IO_MADE_GOOD) {
2691 rdev,
2694 rdev_dec_pending(rdev, conf->mddev);
2985 struct md_rdev *rdev =
2987 if (rdev)
2988 rdev->recovery_offset = MaxSector;
3060 mrdev = rcu_dereference(mirror->rdev);
3128 struct md_rdev *rdev = rcu_dereference(
3129 conf->mirrors[j].rdev);
3130 if (rdev == NULL || test_bit(Faulty, &rdev->flags)) {
3144 struct md_rdev *rdev =
3145 rcu_dereference(conf->mirrors[d].rdev);
3148 if (!rdev ||
3149 !test_bit(In_sync, &rdev->flags))
3155 if (is_badblock(rdev, sector, max_sync,
3172 if (test_bit(FailFast, &rdev->flags))
3176 rdev->data_offset;
3177 bio_set_dev(bio, rdev->bdev);
3178 atomic_inc(&rdev->nr_pending);
3278 if (conf->mirrors[d].rdev &&
3280 &conf->mirrors[d].rdev->flags))
3340 struct md_rdev *rdev;
3348 rdev = rcu_dereference(conf->mirrors[d].rdev);
3349 if (rdev == NULL || test_bit(Faulty, &rdev->flags)) {
3354 if (is_badblock(rdev, sector, max_sync,
3366 atomic_inc(&rdev->nr_pending);
3372 if (test_bit(FailFast, &rdev->flags))
3374 bio->bi_iter.bi_sector = sector + rdev->data_offset;
3375 bio_set_dev(bio, rdev->bdev);
3378 rdev = rcu_dereference(conf->mirrors[d].replacement);
3379 if (rdev == NULL || test_bit(Faulty, &rdev->flags)) {
3383 atomic_inc(&rdev->nr_pending);
3394 if (test_bit(FailFast, &rdev->flags))
3396 bio->bi_iter.bi_sector = sector + rdev->data_offset;
3397 bio_set_dev(bio, rdev->bdev);
3406 rdev_dec_pending(conf->mirrors[d].rdev,
3748 struct md_rdev *rdev;
3791 rdev_for_each(rdev, mddev) {
3794 disk_idx = rdev->raid_disk;
3802 if (test_bit(Replacement, &rdev->flags)) {
3805 disk->replacement = rdev;
3807 if (disk->rdev)
3809 disk->rdev = rdev;
3811 diff = (rdev->new_data_offset - rdev->data_offset);
3820 disk_stack_limits(mddev->gendisk, rdev->bdev,
3821 rdev->data_offset << 9);
3825 if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
3863 if (!disk->rdev && disk->replacement) {
3865 disk->rdev = disk->replacement;
3867 clear_bit(Replacement, &disk->rdev->flags);
3870 if (!disk->rdev ||
3871 !test_bit(In_sync, &disk->rdev->flags)) {
3874 if (disk->rdev &&
3875 disk->rdev->saved_raid_disk < 0)
4003 struct md_rdev *rdev;
4026 rdev_for_each(rdev, mddev)
4027 if (rdev->raid_disk >= 0) {
4028 rdev->new_raid_disk = rdev->raid_disk * 2;
4029 rdev->sectors = size;
4131 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
4132 if (!rdev || test_bit(Faulty, &rdev->flags))
4134 else if (!test_bit(In_sync, &rdev->flags))
4147 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
4148 if (!rdev || test_bit(Faulty, &rdev->flags))
4150 else if (!test_bit(In_sync, &rdev->flags)) {
4183 struct md_rdev *rdev;
4198 rdev_for_each(rdev, mddev) {
4199 if (!test_bit(In_sync, &rdev->flags)
4200 && !test_bit(Faulty, &rdev->flags))
4202 if (rdev->raid_disk >= 0) {
4203 long long diff = (rdev->new_data_offset
4204 - rdev->data_offset);
4264 rdev_for_each(rdev, mddev) {
4265 if (rdev->raid_disk > -1 &&
4266 !test_bit(Faulty, &rdev->flags))
4267 sb = page_address(rdev->sb_page);
4291 rdev_for_each(rdev, mddev)
4292 if (rdev->raid_disk < 0 &&
4293 !test_bit(Faulty, &rdev->flags)) {
4294 if (raid10_add_disk(mddev, rdev) == 0) {
4295 if (rdev->raid_disk >=
4297 set_bit(In_sync, &rdev->flags);
4299 rdev->recovery_offset = 0;
4302 sysfs_link_rdev(mddev, rdev);
4304 } else if (rdev->raid_disk >= conf->prev.raid_disks
4305 && !test_bit(Faulty, &rdev->flags)) {
4307 set_bit(In_sync, &rdev->flags);
4343 rdev_for_each(rdev, mddev)
4344 rdev->new_data_offset = rdev->data_offset;
4430 struct md_rdev *rdev;
4538 rdev = read_balance(conf, r10_bio, &max_sectors);
4541 if (!rdev) {
4553 bio_set_dev(read_bio, rdev->bdev);
4555 + rdev->data_offset);
4576 sb = page_address(rdev->sb_page);
4607 rdev2 = rcu_dereference(conf->mirrors[d].rdev);
4693 struct md_rdev *rdev;
4696 rdev = rcu_dereference(conf->mirrors[d].replacement);
4699 rdev = rcu_dereference(conf->mirrors[d].rdev);
4702 if (!rdev || test_bit(Faulty, &rdev->flags)) {
4706 atomic_inc(&rdev->nr_pending);
4781 struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev);
4783 if (rdev == NULL ||
4784 test_bit(Faulty, &rdev->flags) ||
4785 !test_bit(In_sync, &rdev->flags))
4789 atomic_inc(&rdev->nr_pending);
4791 success = sync_page_io(rdev,
4796 rdev_dec_pending(rdev, mddev);
4830 struct md_rdev *rdev = NULL;
4834 rdev = conf->mirrors[d].replacement;
4835 if (!rdev) {
4837 rdev = conf->mirrors[d].rdev;
4842 md_error(mddev, rdev);
4845 rdev_dec_pending(rdev, mddev);
4877 struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev);
4878 if (rdev)
4879 clear_bit(In_sync, &rdev->flags);
4880 rdev = rcu_dereference(conf->mirrors[d].replacement);
4881 if (rdev)
4882 clear_bit(In_sync, &rdev->flags);