Lines Matching defs:rdev
95 * Default number of read corrections we'll attempt on an rdev
129 static void rdev_uninit_serial(struct md_rdev *rdev)
131 if (!test_and_clear_bit(CollisionCheck, &rdev->flags))
134 kvfree(rdev->serial);
135 rdev->serial = NULL;
140 struct md_rdev *rdev;
142 rdev_for_each(rdev, mddev)
143 rdev_uninit_serial(rdev);
146 static int rdev_init_serial(struct md_rdev *rdev)
152 if (test_bit(CollisionCheck, &rdev->flags))
168 rdev->serial = serial;
169 set_bit(CollisionCheck, &rdev->flags);
176 struct md_rdev *rdev;
179 rdev_for_each(rdev, mddev) {
180 ret = rdev_init_serial(rdev);
193 * rdev needs to enable serial stuffs if it meets the conditions:
197 static int rdev_need_serial(struct md_rdev *rdev)
199 return (rdev && rdev->mddev->bitmap_info.max_write_behind > 0 &&
200 rdev->bdev->bd_disk->queue->nr_hw_queues != 1 &&
201 test_bit(WriteMostly, &rdev->flags));
205 * Init resource for rdev(s), then create serial_info_pool if:
206 * 1. rdev is the first device which return true from rdev_enable_serial.
207 * 2. rdev is NULL, means we want to enable serialization for all rdevs.
209 void mddev_create_serial_pool(struct mddev *mddev, struct md_rdev *rdev,
214 if (rdev && !rdev_need_serial(rdev) &&
215 !test_bit(CollisionCheck, &rdev->flags))
221 if (!rdev)
224 ret = rdev_init_serial(rdev);
248 * Free resource from rdev(s), and destroy serial_info_pool under conditions:
249 * 1. rdev is the last device flaged with CollisionCheck.
251 * 3. for disable policy, the pool is destroyed only when no rdev needs it.
253 void mddev_destroy_serial_pool(struct mddev *mddev, struct md_rdev *rdev,
256 if (rdev && !test_bit(CollisionCheck, &rdev->flags))
266 if (!rdev) {
272 } else if (temp != rdev &&
277 if (rdev)
278 rdev_uninit_serial(rdev);
543 struct md_rdev *rdev = bio->bi_private;
544 struct mddev *mddev = rdev->mddev;
548 rdev_dec_pending(rdev, mddev);
561 struct md_rdev *rdev;
567 rdev_for_each_rcu(rdev, mddev)
568 if (rdev->raid_disk >= 0 &&
569 !test_bit(Faulty, &rdev->flags)) {
575 atomic_inc(&rdev->nr_pending);
576 atomic_inc(&rdev->nr_pending);
580 bi->bi_private = rdev;
581 bio_set_dev(bi, rdev->bdev);
586 rdev_dec_pending(rdev, mddev);
864 struct md_rdev *rdev;
866 rdev_for_each_rcu(rdev, mddev)
867 if (rdev->desc_nr == nr)
868 return rdev;
876 struct md_rdev *rdev;
878 rdev_for_each(rdev, mddev)
879 if (rdev->bdev->bd_dev == dev)
880 return rdev;
887 struct md_rdev *rdev;
889 rdev_for_each_rcu(rdev, mddev)
890 if (rdev->bdev->bd_dev == dev)
891 return rdev;
910 static inline sector_t calc_dev_sboffset(struct md_rdev *rdev)
912 sector_t num_sectors = i_size_read(rdev->bdev->bd_inode) / 512;
916 static int alloc_disk_sb(struct md_rdev *rdev)
918 rdev->sb_page = alloc_page(GFP_KERNEL);
919 if (!rdev->sb_page)
924 void md_rdev_clear(struct md_rdev *rdev)
926 if (rdev->sb_page) {
927 put_page(rdev->sb_page);
928 rdev->sb_loaded = 0;
929 rdev->sb_page = NULL;
930 rdev->sb_start = 0;
931 rdev->sectors = 0;
933 if (rdev->bb_page) {
934 put_page(rdev->bb_page);
935 rdev->bb_page = NULL;
937 badblocks_exit(&rdev->badblocks);
943 struct md_rdev *rdev = bio->bi_private;
944 struct mddev *mddev = rdev->mddev;
949 md_error(mddev, rdev);
950 if (!test_bit(Faulty, &rdev->flags)
953 set_bit(LastDev, &rdev->flags);
956 clear_bit(LastDev, &rdev->flags);
960 rdev_dec_pending(rdev, mddev);
966 void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
969 /* write first size bytes of page to sector of rdev
981 if (test_bit(Faulty, &rdev->flags))
986 atomic_inc(&rdev->nr_pending);
988 bio_set_dev(bio, rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev);
991 bio->bi_private = rdev;
995 test_bit(FailFast, &rdev->flags) &&
996 !test_bit(LastDev, &rdev->flags))
1013 int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
1016 struct bio *bio = md_bio_alloc_sync(rdev->mddev);
1019 if (metadata_op && rdev->meta_bdev)
1020 bio_set_dev(bio, rdev->meta_bdev);
1022 bio_set_dev(bio, rdev->bdev);
1025 bio->bi_iter.bi_sector = sector + rdev->sb_start;
1026 else if (rdev->mddev->reshape_position != MaxSector &&
1027 (rdev->mddev->reshape_backwards ==
1028 (sector >= rdev->mddev->reshape_position)))
1029 bio->bi_iter.bi_sector = sector + rdev->new_data_offset;
1031 bio->bi_iter.bi_sector = sector + rdev->data_offset;
1042 static int read_disk_sb(struct md_rdev *rdev, int size)
1046 if (rdev->sb_loaded)
1049 if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, 0, true))
1051 rdev->sb_loaded = 1;
1056 bdevname(rdev->bdev,b));
1158 * Update the superblock for rdev with data in mddev
1166 int (*load_super)(struct md_rdev *rdev,
1171 struct md_rdev *rdev);
1173 struct md_rdev *rdev);
1174 unsigned long long (*rdev_size_change)(struct md_rdev *rdev,
1176 int (*allow_new_offset)(struct md_rdev *rdev,
1201 static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
1214 rdev->sb_start = calc_dev_sboffset(rdev);
1216 ret = read_disk_sb(rdev, MD_SB_BYTES);
1222 bdevname(rdev->bdev, b);
1223 sb = page_address(rdev->sb_page);
1246 rdev->preferred_minor = sb->md_minor;
1247 rdev->data_offset = 0;
1248 rdev->new_data_offset = 0;
1249 rdev->sb_size = MD_SB_BYTES;
1250 rdev->badblocks.shift = -1;
1253 rdev->desc_nr = -1;
1255 rdev->desc_nr = sb->this_disk.number;
1259 (rdev->desc_nr >= 0 &&
1260 rdev->desc_nr < MD_SB_DISKS &&
1261 sb->disks[rdev->desc_nr].state &
1291 rdev->sectors = rdev->sb_start;
1296 if ((u64)rdev->sectors >= (2ULL << 32) && sb->level >= 1)
1297 rdev->sectors = (sector_t)(2ULL << 32) - 2;
1299 if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1)
1311 static int super_90_validate(struct mddev *mddev, struct md_rdev *freshest, struct md_rdev *rdev)
1314 mdp_super_t *sb = page_address(rdev->sb_page);
1317 rdev->raid_disk = -1;
1318 clear_bit(Faulty, &rdev->flags);
1319 clear_bit(In_sync, &rdev->flags);
1320 clear_bit(Bitmap_sync, &rdev->flags);
1321 clear_bit(WriteMostly, &rdev->flags);
1391 if (sb->disks[rdev->desc_nr].state & (
1402 set_bit(Bitmap_sync, &rdev->flags);
1410 desc = sb->disks + rdev->desc_nr;
1413 set_bit(Faulty, &rdev->flags);
1416 set_bit(In_sync, &rdev->flags);
1417 rdev->raid_disk = desc->raid_disk;
1418 rdev->saved_raid_disk = desc->raid_disk;
1424 rdev->recovery_offset = 0;
1425 rdev->raid_disk = desc->raid_disk;
1429 set_bit(WriteMostly, &rdev->flags);
1431 set_bit(FailFast, &rdev->flags);
1433 set_bit(In_sync, &rdev->flags);
1440 static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev)
1446 /* make rdev->sb match mddev data..
1459 rdev->sb_size = MD_SB_BYTES;
1461 sb = page_address(rdev->sb_page);
1577 sb->this_disk = sb->disks[rdev->desc_nr];
1585 super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
1587 if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
1589 if (rdev->mddev->bitmap_info.offset)
1591 rdev->sb_start = calc_dev_sboffset(rdev);
1592 if (!num_sectors || num_sectors > rdev->sb_start)
1593 num_sectors = rdev->sb_start;
1597 if ((u64)num_sectors >= (2ULL << 32) && rdev->mddev->level >= 1)
1600 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
1601 rdev->sb_page);
1602 } while (md_super_wait(rdev->mddev) < 0);
1607 super_90_allow_new_offset(struct md_rdev *rdev, unsigned long long new_offset)
1639 static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
1659 sb_start = i_size_read(rdev->bdev->bd_inode) >> 9;
1672 rdev->sb_start = sb_start;
1677 ret = read_disk_sb(rdev, 4096);
1680 sb = page_address(rdev->sb_page);
1685 le64_to_cpu(sb->super_offset) != rdev->sb_start ||
1691 bdevname(rdev->bdev,b));
1696 bdevname(rdev->bdev,b));
1705 rdev->preferred_minor = 0xffff;
1706 rdev->data_offset = le64_to_cpu(sb->data_offset);
1707 rdev->new_data_offset = rdev->data_offset;
1710 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
1711 atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
1713 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
1714 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
1715 if (rdev->sb_size & bmask)
1716 rdev->sb_size = (rdev->sb_size | bmask) + 1;
1719 && rdev->data_offset < sb_start + (rdev->sb_size/512))
1722 && rdev->new_data_offset < sb_start + (rdev->sb_size/512))
1726 rdev->desc_nr = -1;
1728 rdev->desc_nr = le32_to_cpu(sb->dev_number);
1730 if (!rdev->bb_page) {
1731 rdev->bb_page = alloc_page(GFP_KERNEL);
1732 if (!rdev->bb_page)
1736 rdev->badblocks.count == 0) {
1751 if (!sync_page_io(rdev, bb_sector, sectors << 9,
1752 rdev->bb_page, REQ_OP_READ, 0, true))
1754 bbp = (__le64 *)page_address(rdev->bb_page);
1755 rdev->badblocks.shift = sb->bblog_shift;
1764 if (badblocks_set(&rdev->badblocks, sector, count, 1))
1768 rdev->badblocks.shift = 0;
1772 rdev->ppl.offset = (__s16)le16_to_cpu(sb->ppl.offset);
1773 rdev->ppl.size = le16_to_cpu(sb->ppl.size);
1774 rdev->ppl.sector = rdev->sb_start + rdev->ppl.offset;
1783 (rdev->desc_nr >= 0 &&
1784 rdev->desc_nr < le32_to_cpu(sb->max_dev) &&
1785 (le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX ||
1786 le16_to_cpu(sb->dev_roles[rdev->desc_nr]) == MD_DISK_ROLE_JOURNAL)))
1803 bdevname(rdev->bdev,b),
1816 sectors = (i_size_read(rdev->bdev->bd_inode) >> 9);
1817 sectors -= rdev->data_offset;
1819 sectors = rdev->sb_start;
1822 rdev->sectors = le64_to_cpu(sb->data_size);
1826 static int super_1_validate(struct mddev *mddev, struct md_rdev *freshest, struct md_rdev *rdev)
1828 struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
1831 rdev->raid_disk = -1;
1832 clear_bit(Faulty, &rdev->flags);
1833 clear_bit(In_sync, &rdev->flags);
1834 clear_bit(Bitmap_sync, &rdev->flags);
1835 clear_bit(WriteMostly, &rdev->flags);
1926 if (rdev->desc_nr >= 0 &&
1927 rdev->desc_nr < le32_to_cpu(sb->max_dev) &&
1928 (le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX ||
1929 le16_to_cpu(sb->dev_roles[rdev->desc_nr]) == MD_DISK_ROLE_JOURNAL))
1939 set_bit(Bitmap_sync, &rdev->flags);
1947 if (rdev->desc_nr < 0 ||
1948 rdev->desc_nr >= le32_to_cpu(sb->max_dev)) {
1950 rdev->desc_nr = -1;
1955 * It could happen that our rdev was marked as Faulty, and all other
1960 * If we allow current rdev without consulting the freshest superblock,
1963 * highest, otherwise, this rdev would not be allowed into array;
1969 if (rdev->desc_nr >= freshest_max_dev) {
1971 pr_warn("md: %s: rdev[%pg]: desc_nr(%d) >= freshest(%pg)->sb->max_dev(%u)\n",
1972 mdname(mddev), rdev->bdev, rdev->desc_nr,
1977 role = le16_to_cpu(freshest_sb->dev_roles[rdev->desc_nr]);
1978 pr_debug("md: %s: rdev[%pg]: role=%d(0x%x) according to freshest %pg\n",
1979 mdname(mddev), rdev->bdev, role, role, freshest->bdev);
1981 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
1987 set_bit(Faulty, &rdev->flags);
1995 set_bit(Journal, &rdev->flags);
1996 rdev->journal_tail = le64_to_cpu(sb->journal_tail);
1997 rdev->raid_disk = 0;
2000 rdev->saved_raid_disk = role;
2003 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
2006 rdev->saved_raid_disk = -1;
2014 set_bit(In_sync, &rdev->flags);
2016 rdev->raid_disk = role;
2020 set_bit(WriteMostly, &rdev->flags);
2022 set_bit(FailFast, &rdev->flags);
2024 set_bit(Replacement, &rdev->flags);
2026 set_bit(In_sync, &rdev->flags);
2031 static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
2036 /* make rdev->sb match mddev and rdev data. */
2038 sb = page_address(rdev->sb_page);
2054 sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
2061 if (test_bit(FailFast, &rdev->flags))
2066 if (test_bit(WriteMostly, &rdev->flags))
2070 sb->data_offset = cpu_to_le64(rdev->data_offset);
2071 sb->data_size = cpu_to_le64(rdev->sectors);
2078 if (rdev->raid_disk >= 0 && !test_bit(Journal, &rdev->flags) &&
2079 !test_bit(In_sync, &rdev->flags)) {
2083 cpu_to_le64(rdev->recovery_offset);
2084 if (rdev->saved_raid_disk >= 0 && mddev->bitmap)
2089 if (test_bit(Journal, &rdev->flags))
2090 sb->journal_tail = cpu_to_le64(rdev->journal_tail);
2091 if (test_bit(Replacement, &rdev->flags))
2106 if (rdev->new_data_offset != rdev->data_offset) {
2109 sb->new_offset = cpu_to_le32((__u32)(rdev->new_data_offset
2110 - rdev->data_offset));
2117 if (rdev->badblocks.count == 0)
2121 md_error(mddev, rdev);
2123 struct badblocks *bb = &rdev->badblocks;
2124 __le64 *bbp = (__le64 *)page_address(rdev->bb_page);
2145 bb->sector = (rdev->sb_start +
2159 rdev->sb_size = max_dev * 2 + 256;
2160 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
2161 if (rdev->sb_size & bmask)
2162 rdev->sb_size = (rdev->sb_size | bmask) + 1;
2178 sb->ppl.offset = cpu_to_le16(rdev->ppl.offset);
2179 sb->ppl.size = cpu_to_le16(rdev->ppl.size);
2218 super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
2222 if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
2224 if (rdev->data_offset != rdev->new_data_offset)
2226 if (rdev->sb_start < rdev->data_offset) {
2228 max_sectors = i_size_read(rdev->bdev->bd_inode) >> 9;
2229 max_sectors -= rdev->data_offset;
2232 } else if (rdev->mddev->bitmap_info.offset) {
2238 sector_t dev_size = i_size_read(rdev->bdev->bd_inode) >> 9;
2253 rdev->sb_start = sb_start;
2255 sb = page_address(rdev->sb_page);
2257 sb->super_offset = cpu_to_le64(rdev->sb_start);
2260 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
2261 rdev->sb_page);
2262 } while (md_super_wait(rdev->mddev) < 0);
2268 super_1_allow_new_offset(struct md_rdev *rdev,
2273 if (new_offset >= rdev->data_offset)
2278 if (rdev->mddev->minor_version == 0)
2287 if (rdev->sb_start + (32+4)*2 > new_offset)
2289 bitmap = rdev->mddev->bitmap;
2290 if (bitmap && !rdev->mddev->bitmap_info.file &&
2291 rdev->sb_start + rdev->mddev->bitmap_info.offset +
2294 if (rdev->badblocks.sector + rdev->badblocks.size > new_offset)
2321 static void sync_super(struct mddev *mddev, struct md_rdev *rdev)
2324 mddev->sync_super(mddev, rdev);
2330 super_types[mddev->major_version].sync_super(mddev, rdev);
2335 struct md_rdev *rdev, *rdev2;
2338 rdev_for_each_rcu(rdev, mddev1) {
2339 if (test_bit(Faulty, &rdev->flags) ||
2340 test_bit(Journal, &rdev->flags) ||
2341 rdev->raid_disk == -1)
2348 if (rdev->bdev->bd_disk == rdev2->bdev->bd_disk) {
2369 struct md_rdev *rdev, *reference = NULL;
2375 rdev_for_each(rdev, mddev) {
2377 if (test_bit(Faulty, &rdev->flags))
2379 if (rdev->raid_disk < 0)
2382 /* Use the first rdev as the reference */
2383 reference = rdev;
2386 /* does this rdev's profile match the reference profile? */
2388 rdev->bdev->bd_disk) < 0)
2411 * Attempt to add an rdev, but only if it is consistent with the current
2414 int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
2427 if (blk_integrity_compare(mddev->gendisk, rdev->bdev->bd_disk) != 0) {
2429 mdname(mddev), bdevname(rdev->bdev, name));
2437 static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
2444 if (find_rdev(mddev, rdev->bdev->bd_dev))
2447 if ((bdev_read_only(rdev->bdev) || bdev_read_only(rdev->meta_bdev)) &&
2451 /* make sure rdev->sectors exceeds mddev->dev_sectors */
2452 if (!test_bit(Journal, &rdev->flags) &&
2453 rdev->sectors &&
2454 (mddev->dev_sectors == 0 || rdev->sectors < mddev->dev_sectors)) {
2463 mddev->dev_sectors = rdev->sectors;
2466 /* Verify rdev->desc_nr is unique.
2471 if (rdev->desc_nr < 0) {
2477 rdev->desc_nr = choice;
2479 if (md_find_rdev_nr_rcu(mddev, rdev->desc_nr)) {
2485 if (!test_bit(Journal, &rdev->flags) &&
2486 mddev->max_disks && rdev->desc_nr >= mddev->max_disks) {
2491 bdevname(rdev->bdev,b);
2494 rdev->mddev = mddev;
2498 mddev_create_serial_pool(mddev, rdev, false);
2500 if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
2503 ko = &part_to_dev(rdev->bdev->bd_part)->kobj;
2505 err = sysfs_create_link(&rdev->kobj, ko, "block");
2506 rdev->sysfs_state = sysfs_get_dirent_safe(rdev->kobj.sd, "state");
2507 rdev->sysfs_unack_badblocks =
2508 sysfs_get_dirent_safe(rdev->kobj.sd, "unacknowledged_bad_blocks");
2509 rdev->sysfs_badblocks =
2510 sysfs_get_dirent_safe(rdev->kobj.sd, "bad_blocks");
2512 list_add_rcu(&rdev->same_set, &mddev->disks);
2513 bd_link_disk_holder(rdev->bdev, mddev->gendisk);
2523 mddev_destroy_serial_pool(mddev, rdev, false);
2529 struct md_rdev *rdev = container_of(ws, struct md_rdev, del_work);
2530 kobject_del(&rdev->kobj);
2531 kobject_put(&rdev->kobj);
2534 static void unbind_rdev_from_array(struct md_rdev *rdev)
2538 bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk);
2539 list_del_rcu(&rdev->same_set);
2540 pr_debug("md: unbind<%s>\n", bdevname(rdev->bdev,b));
2541 mddev_destroy_serial_pool(rdev->mddev, rdev, false);
2542 rdev->mddev = NULL;
2543 sysfs_remove_link(&rdev->kobj, "block");
2544 sysfs_put(rdev->sysfs_state);
2545 sysfs_put(rdev->sysfs_unack_badblocks);
2546 sysfs_put(rdev->sysfs_badblocks);
2547 rdev->sysfs_state = NULL;
2548 rdev->sysfs_unack_badblocks = NULL;
2549 rdev->sysfs_badblocks = NULL;
2550 rdev->badblocks.count = 0;
2556 INIT_WORK(&rdev->del_work, rdev_delayed_delete);
2557 kobject_get(&rdev->kobj);
2558 queue_work(md_rdev_misc_wq, &rdev->del_work);
2566 static int lock_rdev(struct md_rdev *rdev, dev_t dev, int shared)
2572 shared ? (struct md_rdev *)lock_rdev : rdev);
2578 rdev->bdev = bdev;
2582 static void unlock_rdev(struct md_rdev *rdev)
2584 struct block_device *bdev = rdev->bdev;
2585 rdev->bdev = NULL;
2591 static void export_rdev(struct md_rdev *rdev)
2595 pr_debug("md: export_rdev(%s)\n", bdevname(rdev->bdev,b));
2596 md_rdev_clear(rdev);
2598 if (test_bit(AutoDetected, &rdev->flags))
2599 md_autodetect_dev(rdev->bdev->bd_dev);
2601 unlock_rdev(rdev);
2602 kobject_put(&rdev->kobj);
2605 void md_kick_rdev_from_array(struct md_rdev *rdev)
2607 unbind_rdev_from_array(rdev);
2608 export_rdev(rdev);
2614 struct md_rdev *rdev;
2617 rdev = list_first_entry(&mddev->disks, struct md_rdev,
2619 md_kick_rdev_from_array(rdev);
2660 struct md_rdev *rdev;
2661 rdev_for_each(rdev, mddev) {
2662 if (rdev->sb_events == mddev->events ||
2664 rdev->raid_disk < 0 &&
2665 rdev->sb_events+1 == mddev->events)) {
2667 rdev->sb_loaded = 2;
2669 sync_super(mddev, rdev);
2670 rdev->sb_loaded = 1;
2677 struct md_rdev *rdev = NULL, *iter;
2681 /* Find a good rdev */
2684 rdev = iter;
2689 if (!rdev)
2692 sb = page_address(rdev->sb_page);
2694 rdev_for_each(rdev, mddev) {
2695 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
2697 if (role == 0xffff && rdev->raid_disk >=0 &&
2698 !test_bit(Faulty, &rdev->flags))
2701 if (test_bit(Faulty, &rdev->flags) && (role < 0xfffd))
2718 struct md_rdev *rdev;
2754 rdev_for_each(rdev, mddev) {
2755 if (rdev->raid_disk >= 0 &&
2760 !test_bit(Journal, &rdev->flags) &&
2761 !test_bit(In_sync, &rdev->flags) &&
2762 mddev->curr_resync_completed > rdev->recovery_offset)
2763 rdev->recovery_offset = mddev->curr_resync_completed;
2771 rdev_for_each(rdev, mddev) {
2772 if (rdev->badblocks.changed) {
2773 rdev->badblocks.changed = 0;
2774 ack_all_badblocks(&rdev->badblocks);
2775 md_error(mddev, rdev);
2777 clear_bit(Blocked, &rdev->flags);
2778 clear_bit(BlockedBadBlocks, &rdev->flags);
2779 wake_up(&rdev->blocked_wait);
2835 rdev_for_each(rdev, mddev) {
2836 if (rdev->badblocks.changed)
2838 if (test_bit(Faulty, &rdev->flags))
2839 set_bit(FaultRecorded, &rdev->flags);
2852 rdev_for_each(rdev, mddev) {
2855 if (rdev->sb_loaded != 1)
2858 if (!test_bit(Faulty, &rdev->flags)) {
2859 md_super_write(mddev,rdev,
2860 rdev->sb_start, rdev->sb_size,
2861 rdev->sb_page);
2863 bdevname(rdev->bdev, b),
2864 (unsigned long long)rdev->sb_start);
2865 rdev->sb_events = mddev->events;
2866 if (rdev->badblocks.size) {
2867 md_super_write(mddev, rdev,
2868 rdev->badblocks.sector,
2869 rdev->badblocks.size << 9,
2870 rdev->bb_page);
2871 rdev->badblocks.size = 0;
2876 bdevname(rdev->bdev, b));
2898 rdev_for_each(rdev, mddev) {
2899 if (test_and_clear_bit(FaultRecorded, &rdev->flags))
2900 clear_bit(Blocked, &rdev->flags);
2903 ack_all_badblocks(&rdev->badblocks);
2904 clear_bit(BlockedBadBlocks, &rdev->flags);
2905 wake_up(&rdev->blocked_wait);
2910 static int add_bound_rdev(struct md_rdev *rdev)
2912 struct mddev *mddev = rdev->mddev;
2914 bool add_journal = test_bit(Journal, &rdev->flags);
2922 validate_super(mddev, NULL/*freshest*/, rdev);
2925 err = mddev->pers->hot_add_disk(mddev, rdev);
2929 md_kick_rdev_from_array(rdev);
2933 sysfs_notify_dirent_safe(rdev->sysfs_state);
2971 state_show(struct md_rdev *rdev, char *page)
2975 unsigned long flags = READ_ONCE(rdev->flags);
2979 rdev->badblocks.unacked_exist))
2988 (rdev->badblocks.unacked_exist
3013 state_store(struct md_rdev *rdev, const char *buf, size_t len)
3030 struct mddev *mddev = rdev->mddev;
3034 if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
3035 md_error(rdev->mddev, rdev);
3036 if (test_bit(Faulty, &rdev->flags))
3041 if (rdev->mddev->pers) {
3042 clear_bit(Blocked, &rdev->flags);
3043 remove_and_add_spares(rdev->mddev, rdev);
3045 if (rdev->raid_disk >= 0)
3050 err = md_cluster_ops->remove_disk(mddev, rdev);
3053 md_kick_rdev_from_array(rdev);
3062 set_bit(WriteMostly, &rdev->flags);
3063 mddev_create_serial_pool(rdev->mddev, rdev, false);
3067 mddev_destroy_serial_pool(rdev->mddev, rdev, false);
3068 clear_bit(WriteMostly, &rdev->flags);
3072 set_bit(Blocked, &rdev->flags);
3075 if (!test_bit(Faulty, &rdev->flags) &&
3076 !test_bit(ExternalBbl, &rdev->flags) &&
3077 rdev->badblocks.unacked_exist) {
3081 md_error(rdev->mddev, rdev);
3083 clear_bit(Blocked, &rdev->flags);
3084 clear_bit(BlockedBadBlocks, &rdev->flags);
3085 wake_up(&rdev->blocked_wait);
3086 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
3087 md_wakeup_thread(rdev->mddev->thread);
3090 } else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) {
3091 set_bit(In_sync, &rdev->flags);
3094 set_bit(FailFast, &rdev->flags);
3098 clear_bit(FailFast, &rdev->flags);
3101 } else if (cmd_match(buf, "-insync") && rdev->raid_disk >= 0 &&
3102 !test_bit(Journal, &rdev->flags)) {
3103 if (rdev->mddev->pers == NULL) {
3104 clear_bit(In_sync, &rdev->flags);
3105 rdev->saved_raid_disk = rdev->raid_disk;
3106 rdev->raid_disk = -1;
3110 set_bit(WriteErrorSeen, &rdev->flags);
3113 clear_bit(WriteErrorSeen, &rdev->flags);
3120 if (rdev->raid_disk >= 0 &&
3121 !test_bit(Journal, &rdev->flags) &&
3122 !test_bit(Replacement, &rdev->flags))
3123 set_bit(WantReplacement, &rdev->flags);
3124 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
3125 md_wakeup_thread(rdev->mddev->thread);
3132 clear_bit(WantReplacement, &rdev->flags);
3138 if (rdev->mddev->pers)
3141 set_bit(Replacement, &rdev->flags);
3146 if (rdev->mddev->pers)
3149 clear_bit(Replacement, &rdev->flags);
3153 if (!rdev->mddev->pers)
3155 else if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1) &&
3156 rdev->saved_raid_disk >= 0) {
3163 if (!mddev_is_clustered(rdev->mddev) ||
3164 (err = md_cluster_ops->gather_bitmaps(rdev)) == 0) {
3165 clear_bit(Faulty, &rdev->flags);
3166 err = add_bound_rdev(rdev);
3170 } else if (cmd_match(buf, "external_bbl") && (rdev->mddev->external)) {
3171 set_bit(ExternalBbl, &rdev->flags);
3172 rdev->badblocks.shift = 0;
3174 } else if (cmd_match(buf, "-external_bbl") && (rdev->mddev->external)) {
3175 clear_bit(ExternalBbl, &rdev->flags);
3181 sysfs_notify_dirent_safe(rdev->sysfs_state);
3188 errors_show(struct md_rdev *rdev, char *page)
3190 return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
3194 errors_store(struct md_rdev *rdev, const char *buf, size_t len)
3202 atomic_set(&rdev->corrected_errors, n);
3209 slot_show(struct md_rdev *rdev, char *page)
3211 if (test_bit(Journal, &rdev->flags))
3213 else if (rdev->raid_disk < 0)
3216 return sprintf(page, "%d\n", rdev->raid_disk);
3220 slot_store(struct md_rdev *rdev, const char *buf, size_t len)
3225 if (test_bit(Journal, &rdev->flags))
3237 if (rdev->mddev->pers && slot == -1) {
3245 if (rdev->raid_disk == -1)
3248 if (rdev->mddev->pers->hot_remove_disk == NULL)
3250 clear_bit(Blocked, &rdev->flags);
3251 remove_and_add_spares(rdev->mddev, rdev);
3252 if (rdev->raid_disk >= 0)
3254 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
3255 md_wakeup_thread(rdev->mddev->thread);
3256 } else if (rdev->mddev->pers) {
3262 if (rdev->raid_disk != -1)
3265 if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery))
3268 if (rdev->mddev->pers->hot_add_disk == NULL)
3271 if (slot >= rdev->mddev->raid_disks &&
3272 slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
3275 rdev->raid_disk = slot;
3276 if (test_bit(In_sync, &rdev->flags))
3277 rdev->saved_raid_disk = slot;
3279 rdev->saved_raid_disk = -1;
3280 clear_bit(In_sync, &rdev->flags);
3281 clear_bit(Bitmap_sync, &rdev->flags);
3282 err = rdev->mddev->pers->hot_add_disk(rdev->mddev, rdev);
3284 rdev->raid_disk = -1;
3287 sysfs_notify_dirent_safe(rdev->sysfs_state);
3289 sysfs_link_rdev(rdev->mddev, rdev);
3292 if (slot >= rdev->mddev->raid_disks &&
3293 slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
3295 rdev->raid_disk = slot;
3297 clear_bit(Faulty, &rdev->flags);
3298 clear_bit(WriteMostly, &rdev->flags);
3299 set_bit(In_sync, &rdev->flags);
3300 sysfs_notify_dirent_safe(rdev->sysfs_state);
3309 offset_show(struct md_rdev *rdev, char *page)
3311 return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset);
3315 offset_store(struct md_rdev *rdev, const char *buf, size_t len)
3320 if (rdev->mddev->pers && rdev->raid_disk >= 0)
3322 if (rdev->sectors && rdev->mddev->external)
3326 rdev->data_offset = offset;
3327 rdev->new_data_offset = offset;
3334 static ssize_t new_offset_show(struct md_rdev *rdev, char *page)
3337 (unsigned long long)rdev->new_data_offset);
3340 static ssize_t new_offset_store(struct md_rdev *rdev,
3344 struct mddev *mddev = rdev->mddev;
3352 if (new_offset == rdev->data_offset)
3355 else if (new_offset > rdev->data_offset) {
3357 if (new_offset - rdev->data_offset
3358 + mddev->dev_sectors > rdev->sectors)
3366 if (new_offset < rdev->data_offset &&
3373 if (new_offset > rdev->data_offset &&
3379 .allow_new_offset(rdev, new_offset))
3381 rdev->new_data_offset = new_offset;
3382 if (new_offset > rdev->data_offset)
3384 else if (new_offset < rdev->data_offset)
3393 rdev_size_show(struct md_rdev *rdev, char *page)
3395 return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2);
3428 rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len)
3430 struct mddev *my_mddev = rdev->mddev;
3431 sector_t oldsectors = rdev->sectors;
3434 if (test_bit(Journal, &rdev->flags))
3438 if (rdev->data_offset != rdev->new_data_offset)
3440 if (my_mddev->pers && rdev->raid_disk >= 0) {
3443 rdev_size_change(rdev, sectors);
3447 sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) -
3448 rdev->data_offset;
3456 rdev->sectors = sectors;
3460 * the rdev lists safely.
3473 if (rdev->bdev == rdev2->bdev &&
3474 rdev != rdev2 &&
3475 overlaps(rdev->data_offset, rdev->sectors,
3494 rdev->sectors = oldsectors;
3504 static ssize_t recovery_start_show(struct md_rdev *rdev, char *page)
3506 unsigned long long recovery_start = rdev->recovery_offset;
3508 if (test_bit(In_sync, &rdev->flags) ||
3515 static ssize_t recovery_start_store(struct md_rdev *rdev, const char *buf, size_t len)
3524 if (rdev->mddev->pers &&
3525 rdev->raid_disk >= 0)
3528 rdev->recovery_offset = recovery_start;
3530 set_bit(In_sync, &rdev->flags);
3532 clear_bit(In_sync, &rdev->flags);
3550 static ssize_t bb_show(struct md_rdev *rdev, char *page)
3552 return badblocks_show(&rdev->badblocks, page, 0);
3554 static ssize_t bb_store(struct md_rdev *rdev, const char *page, size_t len)
3556 int rv = badblocks_store(&rdev->badblocks, page, len, 0);
3558 if (test_and_clear_bit(BlockedBadBlocks, &rdev->flags))
3559 wake_up(&rdev->blocked_wait);
3565 static ssize_t ubb_show(struct md_rdev *rdev, char *page)
3567 return badblocks_show(&rdev->badblocks, page, 1);
3569 static ssize_t ubb_store(struct md_rdev *rdev, const char *page, size_t len)
3571 return badblocks_store(&rdev->badblocks, page, len, 1);
3577 ppl_sector_show(struct md_rdev *rdev, char *page)
3579 return sprintf(page, "%llu\n", (unsigned long long)rdev->ppl.sector);
3583 ppl_sector_store(struct md_rdev *rdev, const char *buf, size_t len)
3592 if (rdev->mddev->pers && test_bit(MD_HAS_PPL, &rdev->mddev->flags) &&
3593 rdev->raid_disk >= 0)
3596 if (rdev->mddev->persistent) {
3597 if (rdev->mddev->major_version == 0)
3599 if ((sector > rdev->sb_start &&
3600 sector - rdev->sb_start > S16_MAX) ||
3601 (sector < rdev->sb_start &&
3602 rdev->sb_start - sector > -S16_MIN))
3604 rdev->ppl.offset = sector - rdev->sb_start;
3605 } else if (!rdev->mddev->external) {
3608 rdev->ppl.sector = sector;
3616 ppl_size_show(struct md_rdev *rdev, char *page)
3618 return sprintf(page, "%u\n", rdev->ppl.size);
3622 ppl_size_store(struct md_rdev *rdev, const char *buf, size_t len)
3629 if (rdev->mddev->pers && test_bit(MD_HAS_PPL, &rdev->mddev->flags) &&
3630 rdev->raid_disk >= 0)
3633 if (rdev->mddev->persistent) {
3634 if (rdev->mddev->major_version == 0)
3638 } else if (!rdev->mddev->external) {
3641 rdev->ppl.size = size;
3666 struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
3670 if (!rdev->mddev)
3672 return entry->show(rdev, page);
3680 struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
3682 struct mddev *mddev = rdev->mddev;
3690 if (rdev->mddev == NULL)
3693 rv = entry->store(rdev, page, length);
3701 struct md_rdev *rdev = container_of(ko, struct md_rdev, kobj);
3702 kfree(rdev);
3714 int md_rdev_init(struct md_rdev *rdev)
3716 rdev->desc_nr = -1;
3717 rdev->saved_raid_disk = -1;
3718 rdev->raid_disk = -1;
3719 rdev->flags = 0;
3720 rdev->data_offset = 0;
3721 rdev->new_data_offset = 0;
3722 rdev->sb_events = 0;
3723 rdev->last_read_error = 0;
3724 rdev->sb_loaded = 0;
3725 rdev->bb_page = NULL;
3726 atomic_set(&rdev->nr_pending, 0);
3727 atomic_set(&rdev->read_errors, 0);
3728 atomic_set(&rdev->corrected_errors, 0);
3730 INIT_LIST_HEAD(&rdev->same_set);
3731 init_waitqueue_head(&rdev->blocked_wait);
3737 return badblocks_init(&rdev->badblocks, 0);
3748 * a faulty rdev _never_ has rdev->sb set.
3754 struct md_rdev *rdev;
3757 rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
3758 if (!rdev)
3761 err = md_rdev_init(rdev);
3764 err = alloc_disk_sb(rdev);
3768 err = lock_rdev(rdev, newdev, super_format == -2);
3772 kobject_init(&rdev->kobj, &rdev_ktype);
3774 size = i_size_read(rdev->bdev->bd_inode) >> BLOCK_SIZE_BITS;
3777 bdevname(rdev->bdev,b));
3784 load_super(rdev, NULL, super_minor);
3787 bdevname(rdev->bdev,b),
3793 bdevname(rdev->bdev,b));
3798 return rdev;
3801 if (rdev->bdev)
3802 unlock_rdev(rdev);
3803 md_rdev_clear(rdev);
3804 kfree(rdev);
3815 struct md_rdev *rdev, *freshest, *tmp;
3819 rdev_for_each_safe(rdev, tmp, mddev)
3821 load_super(rdev, freshest, mddev->minor_version)) {
3823 freshest = rdev;
3829 bdevname(rdev->bdev,b));
3830 md_kick_rdev_from_array(rdev);
3843 rdev_for_each_safe(rdev, tmp, mddev) {
3845 (rdev->desc_nr >= mddev->max_disks ||
3848 mdname(mddev), bdevname(rdev->bdev, b),
3850 md_kick_rdev_from_array(rdev);
3853 if (rdev != freshest) {
3855 validate_super(mddev, freshest, rdev)) {
3857 bdevname(rdev->bdev,b));
3858 md_kick_rdev_from_array(rdev);
3863 rdev->desc_nr = i++;
3864 rdev->raid_disk = rdev->desc_nr;
3865 set_bit(In_sync, &rdev->flags);
3866 } else if (rdev->raid_disk >=
3868 !test_bit(Journal, &rdev->flags)) {
3869 rdev->raid_disk = -1;
3870 clear_bit(In_sync, &rdev->flags);
3977 struct md_rdev *rdev;
4053 rdev_for_each(rdev, mddev)
4054 rdev->new_raid_disk = rdev->raid_disk;
4127 rdev_for_each(rdev, mddev) {
4128 if (rdev->raid_disk < 0)
4130 if (rdev->new_raid_disk >= mddev->raid_disks)
4131 rdev->new_raid_disk = -1;
4132 if (rdev->new_raid_disk == rdev->raid_disk)
4134 sysfs_unlink_rdev(mddev, rdev);
4136 rdev_for_each(rdev, mddev) {
4137 if (rdev->raid_disk < 0)
4139 if (rdev->new_raid_disk == rdev->raid_disk)
4141 rdev->raid_disk = rdev->new_raid_disk;
4142 if (rdev->raid_disk < 0)
4143 clear_bit(In_sync, &rdev->flags);
4145 if (sysfs_link_rdev(mddev, rdev))
4147 rdev->raid_disk, mdname(mddev));
4251 struct md_rdev *rdev;
4255 rdev_for_each(rdev, mddev) {
4257 rdev->data_offset < rdev->new_data_offset)
4260 rdev->data_offset > rdev->new_data_offset)
4619 struct md_rdev *rdev;
4622 rdev_for_each_rcu(rdev, mddev)
4623 if (work_pending(&rdev->del_work)) {
4644 struct md_rdev *rdev;
4662 rdev = md_import_device(dev, mddev->major_version,
4664 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) {
4669 .load_super(rdev, rdev0, mddev->minor_version);
4674 rdev = md_import_device(dev, -2, -1);
4676 rdev = md_import_device(dev, -1, -1);
4678 if (IS_ERR(rdev)) {
4680 return PTR_ERR(rdev);
4682 err = bind_rdev_to_array(rdev, mddev);
4685 export_rdev(rdev);
5291 struct md_rdev *rdev;
5312 rdev_for_each(rdev, mddev)
5313 rdev->new_data_offset = rdev->data_offset;
5861 struct md_rdev *rdev;
5896 rdev_for_each(rdev, mddev) {
5897 if (test_bit(Faulty, &rdev->flags))
5899 sync_blockdev(rdev->bdev);
5900 invalidate_bdev(rdev->bdev);
5902 (bdev_read_only(rdev->bdev) ||
5903 bdev_read_only(rdev->meta_bdev))) {
5909 if (rdev->sb_page)
5916 if (rdev->meta_bdev) {
5918 } else if (rdev->data_offset < rdev->sb_start) {
5920 rdev->data_offset + mddev->dev_sectors
5921 > rdev->sb_start) {
5927 if (rdev->sb_start + rdev->sb_size/512
5928 > rdev->data_offset) {
5934 sysfs_notify_dirent_safe(rdev->sysfs_state);
5984 rdev_for_each(rdev, mddev)
5986 if (rdev < rdev2 &&
5987 rdev->bdev->bd_disk ==
5991 bdevname(rdev->bdev,b),
6041 rdev_for_each(rdev, mddev) {
6042 if (test_bit(WriteMostly, &rdev->flags) &&
6043 rdev_init_serial(rdev))
6060 rdev_for_each(rdev, mddev) {
6061 if (rdev->raid_disk >= 0 &&
6062 !blk_queue_nonrot(bdev_get_queue(rdev->bdev))) {
6097 rdev_for_each(rdev, mddev)
6098 if (rdev->raid_disk >= 0)
6099 sysfs_link_rdev(mddev, rdev); /* failure here is OK */
6182 struct md_rdev *rdev;
6195 rdev_for_each_rcu(rdev, mddev) {
6196 if (test_bit(Journal, &rdev->flags) &&
6197 !test_bit(Faulty, &rdev->flags))
6199 if (bdev_read_only(rdev->bdev))
6421 struct md_rdev *rdev;
6466 rdev_for_each(rdev, mddev)
6467 if (rdev->raid_disk >= 0)
6468 sysfs_unlink_rdev(mddev, rdev);
6508 struct md_rdev *rdev;
6516 rdev_for_each(rdev, mddev) {
6518 pr_cont("<%s>", bdevname(rdev->bdev,b));
6543 struct md_rdev *rdev0, *rdev, *tmp;
6557 rdev_for_each_list(rdev, tmp, &pending_raid_disks)
6558 if (super_90_load(rdev, rdev0, 0) >= 0) {
6560 bdevname(rdev->bdev,b));
6561 list_move(&rdev->same_set, &candidates);
6597 rdev_for_each_list(rdev, tmp, &candidates) {
6598 list_del_init(&rdev->same_set);
6599 if (bind_rdev_to_array(rdev, mddev))
6600 export_rdev(rdev);
6608 rdev_for_each_list(rdev, tmp, &candidates) {
6609 list_del_init(&rdev->same_set);
6610 export_rdev(rdev);
6636 struct md_rdev *rdev;
6640 rdev_for_each_rcu(rdev, mddev) {
6642 if (test_bit(Faulty, &rdev->flags))
6646 if (test_bit(In_sync, &rdev->flags))
6648 else if (test_bit(Journal, &rdev->flags))
6727 struct md_rdev *rdev;
6733 rdev = md_find_rdev_nr_rcu(mddev, info.number);
6734 if (rdev) {
6735 info.major = MAJOR(rdev->bdev->bd_dev);
6736 info.minor = MINOR(rdev->bdev->bd_dev);
6737 info.raid_disk = rdev->raid_disk;
6739 if (test_bit(Faulty, &rdev->flags))
6741 else if (test_bit(In_sync, &rdev->flags)) {
6745 if (test_bit(Journal, &rdev->flags))
6747 if (test_bit(WriteMostly, &rdev->flags))
6749 if (test_bit(FailFast, &rdev->flags))
6767 struct md_rdev *rdev;
6783 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
6784 if (IS_ERR(rdev)) {
6786 PTR_ERR(rdev));
6787 return PTR_ERR(rdev);
6794 .load_super(rdev, rdev0, mddev->minor_version);
6797 bdevname(rdev->bdev,b),
6799 export_rdev(rdev);
6803 err = bind_rdev_to_array(rdev, mddev);
6805 export_rdev(rdev);
6822 rdev = md_import_device(dev, mddev->major_version,
6825 rdev = md_import_device(dev, -1, -1);
6826 if (IS_ERR(rdev)) {
6828 PTR_ERR(rdev));
6829 return PTR_ERR(rdev);
6835 rdev->raid_disk = info->raid_disk;
6836 set_bit(In_sync, &rdev->flags);
6837 clear_bit(Bitmap_sync, &rdev->flags);
6839 rdev->raid_disk = -1;
6840 rdev->saved_raid_disk = rdev->raid_disk;
6843 validate_super(mddev, NULL/*freshest*/, rdev);
6845 rdev->raid_disk != info->raid_disk) {
6849 export_rdev(rdev);
6853 clear_bit(In_sync, &rdev->flags); /* just to be sure */
6855 set_bit(WriteMostly, &rdev->flags);
6857 clear_bit(WriteMostly, &rdev->flags);
6859 set_bit(FailFast, &rdev->flags);
6861 clear_bit(FailFast, &rdev->flags);
6875 export_rdev(rdev);
6878 set_bit(Journal, &rdev->flags);
6885 set_bit(Candidate, &rdev->flags);
6888 err = md_cluster_ops->add_new_disk(mddev, rdev);
6890 export_rdev(rdev);
6896 rdev->raid_disk = -1;
6897 err = bind_rdev_to_array(rdev, mddev);
6900 export_rdev(rdev);
6908 md_kick_rdev_from_array(rdev);
6914 err = add_bound_rdev(rdev);
6918 err = add_bound_rdev(rdev);
6933 rdev = md_import_device(dev, -1, 0);
6934 if (IS_ERR(rdev)) {
6936 PTR_ERR(rdev));
6937 return PTR_ERR(rdev);
6939 rdev->desc_nr = info->number;
6941 rdev->raid_disk = info->raid_disk;
6943 rdev->raid_disk = -1;
6945 if (rdev->raid_disk < mddev->raid_disks)
6947 set_bit(In_sync, &rdev->flags);
6950 set_bit(WriteMostly, &rdev->flags);
6952 set_bit(FailFast, &rdev->flags);
6956 rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
6958 rdev->sb_start = calc_dev_sboffset(rdev);
6959 rdev->sectors = rdev->sb_start;
6961 err = bind_rdev_to_array(rdev, mddev);
6963 export_rdev(rdev);
6974 struct md_rdev *rdev;
6979 rdev = find_rdev(mddev, dev);
6980 if (!rdev)
6983 if (rdev->raid_disk < 0)
6986 clear_bit(Blocked, &rdev->flags);
6987 remove_and_add_spares(mddev, rdev);
6989 if (rdev->raid_disk >= 0)
6994 if (md_cluster_ops->remove_disk(mddev, rdev))
6998 md_kick_rdev_from_array(rdev);
7009 bdevname(rdev->bdev,b), mdname(mddev));
7017 struct md_rdev *rdev;
7033 rdev = md_import_device(dev, -1, 0);
7034 if (IS_ERR(rdev)) {
7036 PTR_ERR(rdev));
7041 rdev->sb_start = calc_dev_sboffset(rdev);
7043 rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
7045 rdev->sectors = rdev->sb_start;
7047 if (test_bit(Faulty, &rdev->flags)) {
7049 bdevname(rdev->bdev,b), mdname(mddev));
7054 clear_bit(In_sync, &rdev->flags);
7055 rdev->desc_nr = -1;
7056 rdev->saved_raid_disk = -1;
7057 err = bind_rdev_to_array(rdev, mddev);
7066 rdev->raid_disk = -1;
7081 export_rdev(rdev);
7269 struct md_rdev *rdev;
7291 rdev_for_each(rdev, mddev) {
7292 sector_t avail = rdev->sectors;
7314 struct md_rdev *rdev;
7329 rdev_for_each(rdev, mddev) {
7331 rdev->data_offset < rdev->new_data_offset)
7334 rdev->data_offset > rdev->new_data_offset)
7489 struct md_rdev *rdev;
7496 rdev = md_find_rdev_rcu(mddev, dev);
7497 if (!rdev)
7500 md_error(mddev, rdev);
7501 if (!test_bit(Faulty, &rdev->flags))
8040 void md_error(struct mddev *mddev, struct md_rdev *rdev)
8042 if (!rdev || test_bit(Faulty, &rdev->flags))
8047 mddev->pers->error_handler(mddev,rdev);
8050 sysfs_notify_dirent_safe(rdev->sysfs_state);
8065 struct md_rdev *rdev;
8069 list_for_each_entry(rdev, &pending_raid_disks, same_set) {
8073 bdevname(rdev->bdev,b));
8107 struct md_rdev *rdev;
8109 rdev_for_each(rdev, mddev)
8110 if (rdev->raid_disk >= 0 &&
8111 !test_bit(Faulty, &rdev->flags) &&
8112 rdev->recovery_offset != MaxSector &&
8113 rdev->recovery_offset) {
8279 struct md_rdev *rdev;
8312 rdev_for_each_rcu(rdev, mddev) {
8315 bdevname(rdev->bdev,b), rdev->desc_nr);
8316 if (test_bit(WriteMostly, &rdev->flags))
8318 if (test_bit(Journal, &rdev->flags))
8320 if (test_bit(Faulty, &rdev->flags)) {
8324 if (rdev->raid_disk < 0)
8326 if (test_bit(Replacement, &rdev->flags))
8328 sectors += rdev->sectors;
8496 struct md_rdev *rdev;
8502 rdev_for_each_rcu(rdev, mddev) {
8503 struct gendisk *disk = rdev->bdev->bd_disk;
8528 if (init || curr_events - rdev->last_events > 64) {
8529 rdev->last_events = curr_events;
8691 struct md_rdev *rdev;
8828 rdev_for_each_rcu(rdev, mddev)
8829 if (rdev->raid_disk >= 0 &&
8830 !test_bit(Journal, &rdev->flags) &&
8831 !test_bit(Faulty, &rdev->flags) &&
8832 !test_bit(In_sync, &rdev->flags) &&
8833 rdev->recovery_offset < j)
8834 j = rdev->recovery_offset;
9046 rdev_for_each_rcu(rdev, mddev)
9047 if (rdev->raid_disk >= 0 &&
9049 !test_bit(Journal, &rdev->flags) &&
9050 !test_bit(Faulty, &rdev->flags) &&
9051 !test_bit(In_sync, &rdev->flags) &&
9052 rdev->recovery_offset < mddev->curr_resync)
9053 rdev->recovery_offset = mddev->curr_resync;
9101 struct md_rdev *rdev;
9110 rdev_for_each(rdev, mddev) {
9111 if ((this == NULL || rdev == this) &&
9112 rdev->raid_disk >= 0 &&
9113 !test_bit(Blocked, &rdev->flags) &&
9114 test_bit(Faulty, &rdev->flags) &&
9115 atomic_read(&rdev->nr_pending)==0) {
9122 set_bit(RemoveSynchronized, &rdev->flags);
9128 rdev_for_each(rdev, mddev) {
9129 if ((this == NULL || rdev == this) &&
9130 rdev->raid_disk >= 0 &&
9131 !test_bit(Blocked, &rdev->flags) &&
9132 ((test_bit(RemoveSynchronized, &rdev->flags) ||
9133 (!test_bit(In_sync, &rdev->flags) &&
9134 !test_bit(Journal, &rdev->flags))) &&
9135 atomic_read(&rdev->nr_pending)==0)) {
9137 mddev, rdev) == 0) {
9138 sysfs_unlink_rdev(mddev, rdev);
9139 rdev->saved_raid_disk = rdev->raid_disk;
9140 rdev->raid_disk = -1;
9144 if (remove_some && test_bit(RemoveSynchronized, &rdev->flags))
9145 clear_bit(RemoveSynchronized, &rdev->flags);
9154 rdev_for_each(rdev, mddev) {
9155 if (this && this != rdev)
9157 if (test_bit(Candidate, &rdev->flags))
9159 if (rdev->raid_disk >= 0 &&
9160 !test_bit(In_sync, &rdev->flags) &&
9161 !test_bit(Journal, &rdev->flags) &&
9162 !test_bit(Faulty, &rdev->flags))
9164 if (rdev->raid_disk >= 0)
9166 if (test_bit(Faulty, &rdev->flags))
9168 if (!test_bit(Journal, &rdev->flags)) {
9170 ! (rdev->saved_raid_disk >= 0 &&
9171 !test_bit(Bitmap_sync, &rdev->flags)))
9174 rdev->recovery_offset = 0;
9176 if (mddev->pers->hot_add_disk(mddev, rdev) == 0) {
9178 sysfs_link_rdev(mddev, rdev);
9179 if (!test_bit(Journal, &rdev->flags))
9289 struct md_rdev *rdev;
9296 rdev_for_each(rdev, mddev)
9297 clear_bit(Blocked, &rdev->flags);
9318 struct md_rdev *rdev, *tmp;
9322 rdev_for_each_safe(rdev, tmp, mddev) {
9323 if (test_and_clear_bit(ClusterRemove, &rdev->flags) &&
9324 rdev->raid_disk < 0)
9325 md_kick_rdev_from_array(rdev);
9420 struct md_rdev *rdev;
9447 rdev_for_each(rdev, mddev)
9448 rdev->saved_raid_disk = -1;
9481 void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev)
9483 sysfs_notify_dirent_safe(rdev->sysfs_state);
9484 wait_event_timeout(rdev->blocked_wait,
9485 !test_bit(Blocked, &rdev->flags) &&
9486 !test_bit(BlockedBadBlocks, &rdev->flags),
9488 rdev_dec_pending(rdev, mddev);
9495 struct md_rdev *rdev;
9497 rdev_for_each(rdev, mddev) {
9498 if (rdev->data_offset > rdev->new_data_offset)
9499 rdev->sectors += rdev->data_offset - rdev->new_data_offset;
9501 rdev->sectors -= rdev->new_data_offset - rdev->data_offset;
9502 rdev->data_offset = rdev->new_data_offset;
9510 int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
9513 struct mddev *mddev = rdev->mddev;
9516 s += rdev->new_data_offset;
9518 s += rdev->data_offset;
9519 rv = badblocks_set(&rdev->badblocks, s, sectors, 0);
9522 if (test_bit(ExternalBbl, &rdev->flags))
9523 sysfs_notify_dirent_safe(rdev->sysfs_unack_badblocks);
9524 sysfs_notify_dirent_safe(rdev->sysfs_state);
9527 md_wakeup_thread(rdev->mddev->thread);
9534 int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
9539 s += rdev->new_data_offset;
9541 s += rdev->data_offset;
9542 rv = badblocks_clear(&rdev->badblocks, s, sectors);
9543 if ((rv == 0) && test_bit(ExternalBbl, &rdev->flags))
9544 sysfs_notify_dirent_safe(rdev->sysfs_badblocks);
9637 static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
9639 struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
9736 static int read_rdev(struct mddev *mddev, struct md_rdev *rdev)
9739 struct page *swapout = rdev->sb_page;
9742 /* Store the sb page of the rdev in the swapout temporary
9745 rdev->sb_page = NULL;
9746 err = alloc_disk_sb(rdev);
9748 ClearPageUptodate(rdev->sb_page);
9749 rdev->sb_loaded = 0;
9751 load_super(rdev, NULL, mddev->minor_version);
9754 pr_warn("%s: %d Could not reload rdev(%d) err: %d. Restoring old values\n",
9755 __func__, __LINE__, rdev->desc_nr, err);
9756 if (rdev->sb_page)
9757 put_page(rdev->sb_page);
9758 rdev->sb_page = swapout;
9759 rdev->sb_loaded = 1;
9763 sb = page_address(rdev->sb_page);
9769 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
9774 if (rdev->recovery_offset == MaxSector &&
9775 !test_bit(In_sync, &rdev->flags) &&
9785 struct md_rdev *rdev = NULL, *iter;
9788 /* Find the rdev */
9791 rdev = iter;
9796 if (!rdev) {
9797 pr_warn("%s: %d Could not find rdev with nr %d\n", __func__, __LINE__, nr);
9801 err = read_rdev(mddev, rdev);
9805 check_sb_changes(mddev, rdev);
9807 /* Read all rdev's to update recovery_offset */
9808 rdev_for_each_rcu(rdev, mddev) {
9809 if (!test_bit(Faulty, &rdev->flags))
9810 read_rdev(mddev, rdev);
9844 struct md_rdev *rdev;
9863 rdev = md_import_device(dev,0, 90);
9865 if (IS_ERR(rdev))
9868 if (test_bit(Faulty, &rdev->flags))
9871 set_bit(AutoDetected, &rdev->flags);
9872 list_add(&rdev->same_set, &pending_raid_disks);