Lines Matching defs:rdev

59 static int check_and_add_serial(struct md_rdev *rdev, struct r1bio *r1_bio,
66 struct serial_in_rdev *serial = &rdev->serial[idx];
82 static void wait_for_serialization(struct md_rdev *rdev, struct r1bio *r1_bio)
84 struct mddev *mddev = rdev->mddev;
87 struct serial_in_rdev *serial = &rdev->serial[idx];
93 check_and_add_serial(rdev, r1_bio, si, idx) == 0);
96 static void remove_serial(struct md_rdev *rdev, sector_t lo, sector_t hi)
101 struct mddev *mddev = rdev->mddev;
103 struct serial_in_rdev *serial = &rdev->serial[idx];
270 rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev);
369 struct md_rdev *rdev = conf->mirrors[r1_bio->read_disk].rdev;
378 else if (test_bit(FailFast, &rdev->flags) &&
392 test_bit(In_sync, &rdev->flags)))
399 rdev_dec_pending(rdev, conf->mddev);
406 rdev->bdev,
453 struct md_rdev *rdev = conf->mirrors[mirror].rdev;
464 set_bit(WriteErrorSeen, &rdev->flags);
465 if (!test_and_set_bit(WantReplacement, &rdev->flags))
469 if (test_bit(FailFast, &rdev->flags) &&
472 !test_bit(WriteMostly, &rdev->flags)) {
473 md_error(r1_bio->mddev, rdev);
480 if (!test_bit(Faulty, &rdev->flags))
510 * before rdev->recovery_offset, but for simplicity we don't
513 if (test_bit(In_sync, &rdev->flags) &&
514 !test_bit(Faulty, &rdev->flags))
518 if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors,
526 if (test_bit(CollisionCheck, &rdev->flags))
527 remove_serial(rdev, lo, hi);
528 if (test_bit(WriteMostly, &rdev->flags))
550 } else if (rdev->mddev->serialize_policy)
551 remove_serial(rdev, lo, hi);
553 rdev_dec_pending(rdev, conf->mddev);
596 * The rdev for the device selected will have nr_pending incremented.
608 struct md_rdev *rdev;
645 rdev = rcu_dereference(conf->mirrors[disk].rdev);
647 || rdev == NULL
648 || test_bit(Faulty, &rdev->flags))
650 if (!test_bit(In_sync, &rdev->flags) &&
651 rdev->recovery_offset < this_sector + sectors)
653 if (test_bit(WriteMostly, &rdev->flags)) {
657 if (is_badblock(rdev, this_sector, sectors,
673 if (is_badblock(rdev, this_sector, sectors,
709 nonrot = bdev_nonrot(rdev->bdev);
711 pending = atomic_read(&rdev->nr_pending);
720 int opt_iosize = bdev_io_opt(rdev->bdev) >> 9;
776 rdev = rcu_dereference(conf->mirrors[best_disk].rdev);
777 if (!rdev)
779 atomic_inc(&rdev->nr_pending);
1238 struct md_rdev *rdev;
1240 rdev = rcu_dereference(conf->mirrors[r1_bio->read_disk].rdev);
1241 if (rdev)
1242 snprintf(b, sizeof(b), "%pg", rdev->bdev);
1287 mirror->rdev->bdev);
1289 if (test_bit(WriteMostly, &mirror->rdev->flags) &&
1315 read_bio = bio_alloc_clone(mirror->rdev->bdev, bio, gfp,
1321 mirror->rdev->data_offset;
1324 if (test_bit(FailFast, &mirror->rdev->flags) &&
1386 * inc refcount on their rdev. Record them by setting
1401 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1408 if (rdev && test_bit(WriteMostly, &rdev->flags))
1411 if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
1412 atomic_inc(&rdev->nr_pending);
1413 blocked_rdev = rdev;
1417 if (!rdev || test_bit(Faulty, &rdev->flags)) {
1423 atomic_inc(&rdev->nr_pending);
1424 if (test_bit(WriteErrorSeen, &rdev->flags)) {
1429 is_bad = is_badblock(rdev, r1_bio->sector, max_sectors,
1434 set_bit(BlockedBadBlocks, &rdev->flags);
1435 blocked_rdev = rdev;
1446 rdev_dec_pending(rdev, mddev);
1475 rdev_dec_pending(conf->mirrors[j].rdev, mddev);
1483 raid1_log(mddev, "wait rdev %d blocked", blocked_rdev->raid_disk);
1517 struct md_rdev *rdev = conf->mirrors[i].rdev;
1539 mbio = bio_alloc_clone(rdev->bdev,
1542 if (test_bit(CollisionCheck, &rdev->flags))
1543 wait_for_serialization(rdev, r1_bio);
1544 if (test_bit(WriteMostly, &rdev->flags))
1547 mbio = bio_alloc_clone(rdev->bdev, bio, GFP_NOIO,
1551 wait_for_serialization(rdev, r1_bio);
1556 mbio->bi_iter.bi_sector = (r1_bio->sector + rdev->data_offset);
1559 if (test_bit(FailFast, &rdev->flags) &&
1560 !test_bit(WriteMostly, &rdev->flags) &&
1570 /* flush_pending_writes() needs access to the rdev so...*/
1571 mbio->bi_bdev = (void *)rdev;
1623 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1625 rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
1634 * @rdev: member device to fail.
1636 * The routine acknowledges &rdev failure and determines new @mddev state.
1644 * @rdev is marked as &Faulty excluding case when array is failed and
1647 static void raid1_error(struct mddev *mddev, struct md_rdev *rdev)
1654 if (test_bit(In_sync, &rdev->flags) &&
1664 set_bit(Blocked, &rdev->flags);
1665 if (test_and_clear_bit(In_sync, &rdev->flags))
1667 set_bit(Faulty, &rdev->flags);
1677 mdname(mddev), rdev->bdev,
1695 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1696 if (rdev)
1698 i, !test_bit(In_sync, &rdev->flags),
1699 !test_bit(Faulty, &rdev->flags),
1700 rdev->bdev);
1733 struct md_rdev *rdev = conf->mirrors[i].rdev;
1734 struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev;
1741 if (!rdev ||
1742 !test_and_clear_bit(In_sync, &rdev->flags))
1744 if (rdev) {
1749 set_bit(Faulty, &rdev->flags);
1751 rdev->sysfs_state);
1754 if (rdev
1755 && rdev->recovery_offset == MaxSector
1756 && !test_bit(Faulty, &rdev->flags)
1757 && !test_and_set_bit(In_sync, &rdev->flags)) {
1759 sysfs_notify_dirent_safe(rdev->sysfs_state);
1769 static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
1781 if (md_integrity_add_rdev(rdev, mddev))
1784 if (rdev->raid_disk >= 0)
1785 first = last = rdev->raid_disk;
1788 * find the disk ... but prefer rdev->saved_raid_disk
1791 if (rdev->saved_raid_disk >= 0 &&
1792 rdev->saved_raid_disk >= first &&
1793 rdev->saved_raid_disk < conf->raid_disks &&
1794 conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
1795 first = last = rdev->saved_raid_disk;
1799 if (!p->rdev) {
1801 disk_stack_limits(mddev->gendisk, rdev->bdev,
1802 rdev->data_offset << 9);
1805 rdev->raid_disk = mirror;
1810 if (rdev->saved_raid_disk < 0)
1812 rcu_assign_pointer(p->rdev, rdev);
1815 if (test_bit(WantReplacement, &p->rdev->flags) &&
1816 p[conf->raid_disks].rdev == NULL && repl_slot < 0)
1823 clear_bit(In_sync, &rdev->flags);
1824 set_bit(Replacement, &rdev->flags);
1825 rdev->raid_disk = repl_slot;
1828 rcu_assign_pointer(p[conf->raid_disks].rdev, rdev);
1835 static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
1839 int number = rdev->raid_disk;
1845 if (rdev != p->rdev)
1849 if (rdev == p->rdev) {
1850 if (test_bit(In_sync, &rdev->flags) ||
1851 atomic_read(&rdev->nr_pending)) {
1858 if (!test_bit(Faulty, &rdev->flags) &&
1864 p->rdev = NULL;
1865 if (!test_bit(RemoveSynchronized, &rdev->flags)) {
1867 if (atomic_read(&rdev->nr_pending)) {
1870 p->rdev = rdev;
1874 if (conf->mirrors[conf->raid_disks + number].rdev) {
1880 conf->mirrors[conf->raid_disks + number].rdev;
1885 * as NULL, avoiding rdev NULL pointer
1894 p->rdev = repl;
1895 conf->mirrors[conf->raid_disks + number].rdev = NULL;
1899 clear_bit(WantReplacement, &rdev->flags);
1964 struct md_rdev *rdev = conf->mirrors[find_bio_disk(r1_bio, bio)].rdev;
1968 set_bit(WriteErrorSeen, &rdev->flags);
1969 if (!test_and_set_bit(WantReplacement, &rdev->flags))
1973 } else if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors,
1975 !is_badblock(conf->mirrors[r1_bio->read_disk].rdev,
1985 static int r1_sync_page_io(struct md_rdev *rdev, sector_t sector,
1988 if (sync_page_io(rdev, sector, sectors << 9, page, rw, false))
1992 set_bit(WriteErrorSeen, &rdev->flags);
1994 &rdev->flags))
1996 rdev->mddev->recovery);
1999 if (!rdev_set_badblocks(rdev, sector, sectors, 0))
2000 md_error(rdev->mddev, rdev);
2024 struct md_rdev *rdev;
2026 rdev = conf->mirrors[r1_bio->read_disk].rdev;
2027 if (test_bit(FailFast, &rdev->flags)) {
2030 md_error(mddev, rdev);
2031 if (test_bit(Faulty, &rdev->flags))
2052 rdev = conf->mirrors[d].rdev;
2053 if (sync_page_io(rdev, sect, s<<9,
2076 rdev = conf->mirrors[d].rdev;
2077 if (!rdev || test_bit(Faulty, &rdev->flags))
2079 if (!rdev_set_badblocks(rdev, sect, s, 0))
2105 rdev = conf->mirrors[d].rdev;
2106 if (r1_sync_page_io(rdev, sect, s,
2110 rdev_dec_pending(rdev, mddev);
2120 rdev = conf->mirrors[d].rdev;
2121 if (r1_sync_page_io(rdev, sect, s,
2124 atomic_add(s, &rdev->corrected_errors);
2160 bio_reset(b, conf->mirrors[i].rdev->bdev, REQ_OP_READ);
2163 conf->mirrors[i].rdev->data_offset;
2175 rdev_dec_pending(conf->mirrors[primary].rdev, mddev);
2213 rdev_dec_pending(conf->mirrors[i].rdev, mddev);
2247 if (test_bit(Faulty, &conf->mirrors[i].rdev->flags)) {
2253 if (test_bit(FailFast, &conf->mirrors[i].rdev->flags))
2258 md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio));
2283 struct md_rdev *rdev;
2293 rdev = rcu_dereference(conf->mirrors[d].rdev);
2294 if (rdev &&
2295 (test_bit(In_sync, &rdev->flags) ||
2296 (!test_bit(Faulty, &rdev->flags) &&
2297 rdev->recovery_offset >= sect + s)) &&
2298 is_badblock(rdev, sect, s,
2300 atomic_inc(&rdev->nr_pending);
2302 if (sync_page_io(rdev, sect, s<<9,
2305 rdev_dec_pending(rdev, mddev);
2317 struct md_rdev *rdev = conf->mirrors[read_disk].rdev;
2318 if (!rdev_set_badblocks(rdev, sect, s, 0))
2319 md_error(mddev, rdev);
2329 rdev = rcu_dereference(conf->mirrors[d].rdev);
2330 if (rdev &&
2331 !test_bit(Faulty, &rdev->flags)) {
2332 atomic_inc(&rdev->nr_pending);
2334 r1_sync_page_io(rdev, sect, s,
2336 rdev_dec_pending(rdev, mddev);
2346 rdev = rcu_dereference(conf->mirrors[d].rdev);
2347 if (rdev &&
2348 !test_bit(Faulty, &rdev->flags)) {
2349 atomic_inc(&rdev->nr_pending);
2351 if (r1_sync_page_io(rdev, sect, s,
2353 atomic_add(s, &rdev->corrected_errors);
2357 rdev->data_offset),
2358 rdev->bdev);
2360 rdev_dec_pending(rdev, mddev);
2373 struct md_rdev *rdev = conf->mirrors[i].rdev;
2383 * We currently own a reference on the rdev.
2392 if (rdev->badblocks.shift < 0)
2395 block_sectors = roundup(1 << rdev->badblocks.shift,
2396 bdev_logical_block_size(rdev->bdev) >> 9);
2409 wbio = bio_alloc_clone(rdev->bdev,
2413 wbio = bio_alloc_clone(rdev->bdev, r1_bio->master_bio,
2422 wbio->bi_iter.bi_sector += rdev->data_offset;
2426 ok = rdev_set_badblocks(rdev, sector,
2443 struct md_rdev *rdev = conf->mirrors[m].rdev;
2449 rdev_clear_badblocks(rdev, r1_bio->sector, s, 0);
2453 if (!rdev_set_badblocks(rdev, r1_bio->sector, s, 0))
2454 md_error(conf->mddev, rdev);
2468 struct md_rdev *rdev = conf->mirrors[m].rdev;
2469 rdev_clear_badblocks(rdev,
2472 rdev_dec_pending(rdev, conf->mddev);
2481 conf->mirrors[m].rdev);
2485 rdev_dec_pending(conf->mirrors[m].rdev,
2511 struct md_rdev *rdev;
2528 rdev = conf->mirrors[r1_bio->read_disk].rdev;
2530 && !test_bit(FailFast, &rdev->flags)) {
2535 } else if (mddev->ro == 0 && test_bit(FailFast, &rdev->flags)) {
2536 md_error(mddev, rdev);
2541 rdev_dec_pending(rdev, conf->mddev);
2761 struct md_rdev *rdev;
2764 rdev = rcu_dereference(conf->mirrors[i].rdev);
2765 if (rdev == NULL ||
2766 test_bit(Faulty, &rdev->flags)) {
2769 } else if (!test_bit(In_sync, &rdev->flags)) {
2778 if (is_badblock(rdev, sector_nr, good_sectors,
2790 if (test_bit(WriteMostly, &rdev->flags)) {
2800 } else if (!test_bit(WriteErrorSeen, &rdev->flags) &&
2814 if (rdev && bio->bi_end_io) {
2815 atomic_inc(&rdev->nr_pending);
2816 bio->bi_iter.bi_sector = sector_nr + rdev->data_offset;
2817 bio_set_dev(bio, rdev->bdev);
2818 if (test_bit(FailFast, &rdev->flags))
2834 struct md_rdev *rdev = conf->mirrors[i].rdev;
2835 ok = rdev_set_badblocks(rdev, sector_nr,
2973 struct md_rdev *rdev;
3027 rdev_for_each(rdev, mddev) {
3028 int disk_idx = rdev->raid_disk;
3032 if (test_bit(Replacement, &rdev->flags))
3037 if (disk->rdev)
3039 disk->rdev = rdev;
3060 disk[conf->raid_disks].rdev) {
3062 if (!disk->rdev) {
3066 disk->rdev =
3067 disk[conf->raid_disks].rdev;
3068 disk[conf->raid_disks].rdev = NULL;
3069 } else if (!test_bit(In_sync, &disk->rdev->flags))
3074 if (!disk->rdev ||
3075 !test_bit(In_sync, &disk->rdev->flags)) {
3077 if (disk->rdev &&
3078 (disk->rdev->saved_raid_disk < 0))
3112 struct md_rdev *rdev;
3143 rdev_for_each(rdev, mddev) {
3146 disk_stack_limits(mddev->gendisk, rdev->bdev,
3147 rdev->data_offset << 9);
3152 if (conf->mirrors[i].rdev == NULL ||
3153 !test_bit(In_sync, &conf->mirrors[i].rdev->flags) ||
3154 test_bit(Faulty, &conf->mirrors[i].rdev->flags))
3285 if (conf->mirrors[d].rdev)
3319 struct md_rdev *rdev = conf->mirrors[d].rdev;
3320 if (rdev && rdev->raid_disk != d2) {
3321 sysfs_unlink_rdev(mddev, rdev);
3322 rdev->raid_disk = d2;
3323 sysfs_unlink_rdev(mddev, rdev);
3324 if (sysfs_link_rdev(mddev, rdev))
3326 mdname(mddev), rdev->raid_disk);
3328 if (rdev)
3329 newmirrors[d2++].rdev = rdev;