Lines Matching defs:rdev

91 static void export_rdev(struct md_rdev *rdev, struct mddev *mddev);
95 * Default number of read corrections we'll attempt on an rdev
129 static void rdev_uninit_serial(struct md_rdev *rdev)
131 if (!test_and_clear_bit(CollisionCheck, &rdev->flags))
134 kvfree(rdev->serial);
135 rdev->serial = NULL;
140 struct md_rdev *rdev;
142 rdev_for_each(rdev, mddev)
143 rdev_uninit_serial(rdev);
146 static int rdev_init_serial(struct md_rdev *rdev)
152 if (test_bit(CollisionCheck, &rdev->flags))
168 rdev->serial = serial;
169 set_bit(CollisionCheck, &rdev->flags);
176 struct md_rdev *rdev;
179 rdev_for_each(rdev, mddev) {
180 ret = rdev_init_serial(rdev);
193 * rdev needs to enable serial stuffs if it meets the conditions:
197 static int rdev_need_serial(struct md_rdev *rdev)
199 return (rdev && rdev->mddev->bitmap_info.max_write_behind > 0 &&
200 rdev->bdev->bd_disk->queue->nr_hw_queues != 1 &&
201 test_bit(WriteMostly, &rdev->flags));
205 * Init resource for rdev(s), then create serial_info_pool if:
206 * 1. rdev is the first device which return true from rdev_enable_serial.
207 * 2. rdev is NULL, means we want to enable serialization for all rdevs.
209 void mddev_create_serial_pool(struct mddev *mddev, struct md_rdev *rdev,
214 if (rdev && !rdev_need_serial(rdev) &&
215 !test_bit(CollisionCheck, &rdev->flags))
221 if (!rdev)
224 ret = rdev_init_serial(rdev);
248 * Free resource from rdev(s), and destroy serial_info_pool under conditions:
249 * 1. rdev is the last device flaged with CollisionCheck.
251 * 3. for disable policy, the pool is destroyed only when no rdev needs it.
253 void mddev_destroy_serial_pool(struct mddev *mddev, struct md_rdev *rdev,
256 if (rdev && !test_bit(CollisionCheck, &rdev->flags))
266 if (!rdev) {
272 } else if (temp != rdev &&
277 if (rdev)
278 rdev_uninit_serial(rdev);
489 struct md_rdev *rdev = bio->bi_private;
490 struct mddev *mddev = rdev->mddev;
494 rdev_dec_pending(rdev, mddev);
510 struct md_rdev *rdev;
516 rdev_for_each_rcu(rdev, mddev)
517 if (rdev->raid_disk >= 0 &&
518 !test_bit(Faulty, &rdev->flags)) {
521 atomic_inc(&rdev->nr_pending);
523 bi = bio_alloc_bioset(rdev->bdev, 0,
527 bi->bi_private = rdev;
769 struct md_rdev *rdev;
818 list_for_each_entry_safe(rdev, tmp, &delete, same_set) {
819 list_del_init(&rdev->same_set);
820 kobject_del(&rdev->kobj);
821 export_rdev(rdev, mddev);
828 struct md_rdev *rdev;
830 rdev_for_each_rcu(rdev, mddev)
831 if (rdev->desc_nr == nr)
832 return rdev;
840 struct md_rdev *rdev;
842 rdev_for_each(rdev, mddev)
843 if (rdev->bdev->bd_dev == dev)
844 return rdev;
851 struct md_rdev *rdev;
853 rdev_for_each_rcu(rdev, mddev)
854 if (rdev->bdev->bd_dev == dev)
855 return rdev;
874 static inline sector_t calc_dev_sboffset(struct md_rdev *rdev)
876 return MD_NEW_SIZE_SECTORS(bdev_nr_sectors(rdev->bdev));
879 static int alloc_disk_sb(struct md_rdev *rdev)
881 rdev->sb_page = alloc_page(GFP_KERNEL);
882 if (!rdev->sb_page)
887 void md_rdev_clear(struct md_rdev *rdev)
889 if (rdev->sb_page) {
890 put_page(rdev->sb_page);
891 rdev->sb_loaded = 0;
892 rdev->sb_page = NULL;
893 rdev->sb_start = 0;
894 rdev->sectors = 0;
896 if (rdev->bb_page) {
897 put_page(rdev->bb_page);
898 rdev->bb_page = NULL;
900 badblocks_exit(&rdev->badblocks);
906 struct md_rdev *rdev = bio->bi_private;
907 struct mddev *mddev = rdev->mddev;
912 md_error(mddev, rdev);
913 if (!test_bit(Faulty, &rdev->flags)
916 set_bit(LastDev, &rdev->flags);
919 clear_bit(LastDev, &rdev->flags);
923 rdev_dec_pending(rdev, mddev);
929 void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
932 /* write first size bytes of page to sector of rdev
943 if (test_bit(Faulty, &rdev->flags))
946 bio = bio_alloc_bioset(rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev,
952 atomic_inc(&rdev->nr_pending);
956 bio->bi_private = rdev;
960 test_bit(FailFast, &rdev->flags) &&
961 !test_bit(LastDev, &rdev->flags))
977 int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
983 if (metadata_op && rdev->meta_bdev)
984 bio_init(&bio, rdev->meta_bdev, &bvec, 1, opf);
986 bio_init(&bio, rdev->bdev, &bvec, 1, opf);
989 bio.bi_iter.bi_sector = sector + rdev->sb_start;
990 else if (rdev->mddev->reshape_position != MaxSector &&
991 (rdev->mddev->reshape_backwards ==
992 (sector >= rdev->mddev->reshape_position)))
993 bio.bi_iter.bi_sector = sector + rdev->new_data_offset;
995 bio.bi_iter.bi_sector = sector + rdev->data_offset;
1004 static int read_disk_sb(struct md_rdev *rdev, int size)
1006 if (rdev->sb_loaded)
1009 if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, true))
1011 rdev->sb_loaded = 1;
1016 rdev->bdev);
1118 * Update the superblock for rdev with data in mddev
1126 int (*load_super)(struct md_rdev *rdev,
1131 struct md_rdev *rdev);
1133 struct md_rdev *rdev);
1134 unsigned long long (*rdev_size_change)(struct md_rdev *rdev,
1136 int (*allow_new_offset)(struct md_rdev *rdev,
1161 static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
1173 rdev->sb_start = calc_dev_sboffset(rdev);
1175 ret = read_disk_sb(rdev, MD_SB_BYTES);
1181 sb = page_address(rdev->sb_page);
1185 rdev->bdev);
1193 sb->major_version, sb->minor_version, rdev->bdev);
1201 pr_warn("md: invalid superblock checksum on %pg\n", rdev->bdev);
1205 rdev->preferred_minor = sb->md_minor;
1206 rdev->data_offset = 0;
1207 rdev->new_data_offset = 0;
1208 rdev->sb_size = MD_SB_BYTES;
1209 rdev->badblocks.shift = -1;
1212 rdev->desc_nr = -1;
1214 rdev->desc_nr = sb->this_disk.number;
1218 (rdev->desc_nr >= 0 &&
1219 rdev->desc_nr < MD_SB_DISKS &&
1220 sb->disks[rdev->desc_nr].state &
1234 rdev->bdev, refdev->bdev);
1239 rdev->bdev, refdev->bdev);
1250 rdev->sectors = rdev->sb_start;
1255 if ((u64)rdev->sectors >= (2ULL << 32) && sb->level >= 1)
1256 rdev->sectors = (sector_t)(2ULL << 32) - 2;
1258 if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1)
1270 static int super_90_validate(struct mddev *mddev, struct md_rdev *freshest, struct md_rdev *rdev)
1273 mdp_super_t *sb = page_address(rdev->sb_page);
1276 rdev->raid_disk = -1;
1277 clear_bit(Faulty, &rdev->flags);
1278 clear_bit(In_sync, &rdev->flags);
1279 clear_bit(Bitmap_sync, &rdev->flags);
1280 clear_bit(WriteMostly, &rdev->flags);
1350 if (sb->disks[rdev->desc_nr].state & (
1361 set_bit(Bitmap_sync, &rdev->flags);
1369 desc = sb->disks + rdev->desc_nr;
1372 set_bit(Faulty, &rdev->flags);
1375 set_bit(In_sync, &rdev->flags);
1376 rdev->raid_disk = desc->raid_disk;
1377 rdev->saved_raid_disk = desc->raid_disk;
1383 rdev->recovery_offset = 0;
1384 rdev->raid_disk = desc->raid_disk;
1388 set_bit(WriteMostly, &rdev->flags);
1390 set_bit(FailFast, &rdev->flags);
1392 set_bit(In_sync, &rdev->flags);
1399 static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev)
1405 /* make rdev->sb match mddev data..
1418 rdev->sb_size = MD_SB_BYTES;
1420 sb = page_address(rdev->sb_page);
1536 sb->this_disk = sb->disks[rdev->desc_nr];
1544 super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
1546 if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
1548 if (rdev->mddev->bitmap_info.offset)
1550 rdev->sb_start = calc_dev_sboffset(rdev);
1551 if (!num_sectors || num_sectors > rdev->sb_start)
1552 num_sectors = rdev->sb_start;
1556 if ((u64)num_sectors >= (2ULL << 32) && rdev->mddev->level >= 1)
1559 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
1560 rdev->sb_page);
1561 } while (md_super_wait(rdev->mddev) < 0);
1566 super_90_allow_new_offset(struct md_rdev *rdev, unsigned long long new_offset)
1598 static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version)
1617 sb_start = bdev_nr_sectors(rdev->bdev) - 8 * 2;
1629 rdev->sb_start = sb_start;
1634 ret = read_disk_sb(rdev, 4096);
1637 sb = page_address(rdev->sb_page);
1642 le64_to_cpu(sb->super_offset) != rdev->sb_start ||
1648 rdev->bdev);
1653 rdev->bdev);
1662 rdev->preferred_minor = 0xffff;
1663 rdev->data_offset = le64_to_cpu(sb->data_offset);
1664 rdev->new_data_offset = rdev->data_offset;
1667 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
1668 atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
1670 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
1671 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
1672 if (rdev->sb_size & bmask)
1673 rdev->sb_size = (rdev->sb_size | bmask) + 1;
1676 && rdev->data_offset < sb_start + (rdev->sb_size/512))
1679 && rdev->new_data_offset < sb_start + (rdev->sb_size/512))
1683 rdev->desc_nr = -1;
1685 rdev->desc_nr = le32_to_cpu(sb->dev_number);
1687 if (!rdev->bb_page) {
1688 rdev->bb_page = alloc_page(GFP_KERNEL);
1689 if (!rdev->bb_page)
1693 rdev->badblocks.count == 0) {
1708 if (!sync_page_io(rdev, bb_sector, sectors << 9,
1709 rdev->bb_page, REQ_OP_READ, true))
1711 bbp = (__le64 *)page_address(rdev->bb_page);
1712 rdev->badblocks.shift = sb->bblog_shift;
1721 if (badblocks_set(&rdev->badblocks, sector, count, 1))
1725 rdev->badblocks.shift = 0;
1729 rdev->ppl.offset = (__s16)le16_to_cpu(sb->ppl.offset);
1730 rdev->ppl.size = le16_to_cpu(sb->ppl.size);
1731 rdev->ppl.sector = rdev->sb_start + rdev->ppl.offset;
1740 (rdev->desc_nr >= 0 &&
1741 rdev->desc_nr < le32_to_cpu(sb->max_dev) &&
1742 (le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX ||
1743 le16_to_cpu(sb->dev_roles[rdev->desc_nr]) == MD_DISK_ROLE_JOURNAL)))
1760 rdev->bdev,
1773 sectors = bdev_nr_sectors(rdev->bdev) - rdev->data_offset;
1775 sectors = rdev->sb_start;
1778 rdev->sectors = le64_to_cpu(sb->data_size);
1782 static int super_1_validate(struct mddev *mddev, struct md_rdev *freshest, struct md_rdev *rdev)
1784 struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
1787 rdev->raid_disk = -1;
1788 clear_bit(Faulty, &rdev->flags);
1789 clear_bit(In_sync, &rdev->flags);
1790 clear_bit(Bitmap_sync, &rdev->flags);
1791 clear_bit(WriteMostly, &rdev->flags);
1882 if (rdev->desc_nr >= 0 &&
1883 rdev->desc_nr < le32_to_cpu(sb->max_dev) &&
1884 (le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX ||
1885 le16_to_cpu(sb->dev_roles[rdev->desc_nr]) == MD_DISK_ROLE_JOURNAL))
1895 set_bit(Bitmap_sync, &rdev->flags);
1903 if (rdev->desc_nr < 0 ||
1904 rdev->desc_nr >= le32_to_cpu(sb->max_dev)) {
1906 rdev->desc_nr = -1;
1911 * It could happen that our rdev was marked as Faulty, and all other
1916 * If we allow current rdev without consulting the freshest superblock,
1919 * highest, otherwise, this rdev would not be allowed into array;
1925 if (rdev->desc_nr >= freshest_max_dev) {
1927 pr_warn("md: %s: rdev[%pg]: desc_nr(%d) >= freshest(%pg)->sb->max_dev(%u)\n",
1928 mdname(mddev), rdev->bdev, rdev->desc_nr,
1933 role = le16_to_cpu(freshest_sb->dev_roles[rdev->desc_nr]);
1934 pr_debug("md: %s: rdev[%pg]: role=%d(0x%x) according to freshest %pg\n",
1935 mdname(mddev), rdev->bdev, role, role, freshest->bdev);
1937 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
1943 set_bit(Faulty, &rdev->flags);
1951 set_bit(Journal, &rdev->flags);
1952 rdev->journal_tail = le64_to_cpu(sb->journal_tail);
1953 rdev->raid_disk = 0;
1956 rdev->saved_raid_disk = role;
1959 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
1962 rdev->saved_raid_disk = -1;
1970 set_bit(In_sync, &rdev->flags);
1972 rdev->raid_disk = role;
1976 set_bit(WriteMostly, &rdev->flags);
1978 set_bit(FailFast, &rdev->flags);
1980 set_bit(Replacement, &rdev->flags);
1982 set_bit(In_sync, &rdev->flags);
1987 static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
1992 /* make rdev->sb match mddev and rdev data. */
1994 sb = page_address(rdev->sb_page);
2010 sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
2017 if (test_bit(FailFast, &rdev->flags))
2022 if (test_bit(WriteMostly, &rdev->flags))
2026 sb->data_offset = cpu_to_le64(rdev->data_offset);
2027 sb->data_size = cpu_to_le64(rdev->sectors);
2034 if (rdev->raid_disk >= 0 && !test_bit(Journal, &rdev->flags) &&
2035 !test_bit(In_sync, &rdev->flags)) {
2039 cpu_to_le64(rdev->recovery_offset);
2040 if (rdev->saved_raid_disk >= 0 && mddev->bitmap)
2045 if (test_bit(Journal, &rdev->flags))
2046 sb->journal_tail = cpu_to_le64(rdev->journal_tail);
2047 if (test_bit(Replacement, &rdev->flags))
2062 if (rdev->new_data_offset != rdev->data_offset) {
2065 sb->new_offset = cpu_to_le32((__u32)(rdev->new_data_offset
2066 - rdev->data_offset));
2073 if (rdev->badblocks.count == 0)
2077 md_error(mddev, rdev);
2079 struct badblocks *bb = &rdev->badblocks;
2080 __le64 *bbp = (__le64 *)page_address(rdev->bb_page);
2101 bb->sector = (rdev->sb_start +
2115 rdev->sb_size = max_dev * 2 + 256;
2116 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
2117 if (rdev->sb_size & bmask)
2118 rdev->sb_size = (rdev->sb_size | bmask) + 1;
2134 sb->ppl.offset = cpu_to_le16(rdev->ppl.offset);
2135 sb->ppl.size = cpu_to_le16(rdev->ppl.size);
2174 super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
2178 if (num_sectors && num_sectors < rdev->mddev->dev_sectors)
2180 if (rdev->data_offset != rdev->new_data_offset)
2182 if (rdev->sb_start < rdev->data_offset) {
2184 max_sectors = bdev_nr_sectors(rdev->bdev) - rdev->data_offset;
2187 } else if (rdev->mddev->bitmap_info.offset) {
2193 sector_t dev_size = bdev_nr_sectors(rdev->bdev);
2208 rdev->sb_start = sb_start;
2210 sb = page_address(rdev->sb_page);
2212 sb->super_offset = cpu_to_le64(rdev->sb_start);
2215 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
2216 rdev->sb_page);
2217 } while (md_super_wait(rdev->mddev) < 0);
2223 super_1_allow_new_offset(struct md_rdev *rdev,
2228 if (new_offset >= rdev->data_offset)
2233 if (rdev->mddev->minor_version == 0)
2242 if (rdev->sb_start + (32+4)*2 > new_offset)
2244 bitmap = rdev->mddev->bitmap;
2245 if (bitmap && !rdev->mddev->bitmap_info.file &&
2246 rdev->sb_start + rdev->mddev->bitmap_info.offset +
2249 if (rdev->badblocks.sector + rdev->badblocks.size > new_offset)
2276 static void sync_super(struct mddev *mddev, struct md_rdev *rdev)
2279 mddev->sync_super(mddev, rdev);
2285 super_types[mddev->major_version].sync_super(mddev, rdev);
2290 struct md_rdev *rdev, *rdev2;
2293 rdev_for_each_rcu(rdev, mddev1) {
2294 if (test_bit(Faulty, &rdev->flags) ||
2295 test_bit(Journal, &rdev->flags) ||
2296 rdev->raid_disk == -1)
2303 if (rdev->bdev->bd_disk == rdev2->bdev->bd_disk) {
2324 struct md_rdev *rdev, *reference = NULL;
2330 rdev_for_each(rdev, mddev) {
2332 if (test_bit(Faulty, &rdev->flags))
2334 if (rdev->raid_disk < 0)
2337 /* Use the first rdev as the reference */
2338 reference = rdev;
2341 /* does this rdev's profile match the reference profile? */
2343 rdev->bdev->bd_disk) < 0)
2374 * Attempt to add an rdev, but only if it is consistent with the current
2377 int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
2389 if (blk_integrity_compare(mddev->gendisk, rdev->bdev->bd_disk) != 0) {
2391 mdname(mddev), rdev->bdev);
2399 static bool rdev_read_only(struct md_rdev *rdev)
2401 return bdev_read_only(rdev->bdev) ||
2402 (rdev->meta_bdev && bdev_read_only(rdev->meta_bdev));
2405 static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
2411 if (find_rdev(mddev, rdev->bdev->bd_dev))
2414 if (rdev_read_only(rdev) && mddev->pers)
2417 /* make sure rdev->sectors exceeds mddev->dev_sectors */
2418 if (!test_bit(Journal, &rdev->flags) &&
2419 rdev->sectors &&
2420 (mddev->dev_sectors == 0 || rdev->sectors < mddev->dev_sectors)) {
2429 mddev->dev_sectors = rdev->sectors;
2432 /* Verify rdev->desc_nr is unique.
2437 if (rdev->desc_nr < 0) {
2443 rdev->desc_nr = choice;
2445 if (md_find_rdev_nr_rcu(mddev, rdev->desc_nr)) {
2451 if (!test_bit(Journal, &rdev->flags) &&
2452 mddev->max_disks && rdev->desc_nr >= mddev->max_disks) {
2457 snprintf(b, sizeof(b), "%pg", rdev->bdev);
2460 rdev->mddev = mddev;
2464 mddev_create_serial_pool(mddev, rdev, false);
2466 if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
2470 err = sysfs_create_link(&rdev->kobj, bdev_kobj(rdev->bdev), "block");
2471 rdev->sysfs_state = sysfs_get_dirent_safe(rdev->kobj.sd, "state");
2472 rdev->sysfs_unack_badblocks =
2473 sysfs_get_dirent_safe(rdev->kobj.sd, "unacknowledged_bad_blocks");
2474 rdev->sysfs_badblocks =
2475 sysfs_get_dirent_safe(rdev->kobj.sd, "bad_blocks");
2477 list_add_rcu(&rdev->same_set, &mddev->disks);
2478 bd_link_disk_holder(rdev->bdev, mddev->gendisk);
2496 static void export_rdev(struct md_rdev *rdev, struct mddev *mddev)
2498 pr_debug("md: export_rdev(%pg)\n", rdev->bdev);
2499 md_rdev_clear(rdev);
2501 if (test_bit(AutoDetected, &rdev->flags))
2502 md_autodetect_dev(rdev->bdev->bd_dev);
2504 blkdev_put(rdev->bdev,
2505 test_bit(Holder, &rdev->flags) ? rdev : &claim_rdev);
2506 rdev->bdev = NULL;
2507 kobject_put(&rdev->kobj);
2510 static void md_kick_rdev_from_array(struct md_rdev *rdev)
2512 struct mddev *mddev = rdev->mddev;
2514 bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk);
2515 list_del_rcu(&rdev->same_set);
2516 pr_debug("md: unbind<%pg>\n", rdev->bdev);
2517 mddev_destroy_serial_pool(rdev->mddev, rdev, false);
2518 rdev->mddev = NULL;
2519 sysfs_remove_link(&rdev->kobj, "block");
2520 sysfs_put(rdev->sysfs_state);
2521 sysfs_put(rdev->sysfs_unack_badblocks);
2522 sysfs_put(rdev->sysfs_badblocks);
2523 rdev->sysfs_state = NULL;
2524 rdev->sysfs_unack_badblocks = NULL;
2525 rdev->sysfs_badblocks = NULL;
2526 rdev->badblocks.count = 0;
2535 list_add(&rdev->same_set, &mddev->deleting);
2540 struct md_rdev *rdev;
2543 rdev = list_first_entry(&mddev->disks, struct md_rdev,
2545 md_kick_rdev_from_array(rdev);
2586 struct md_rdev *rdev;
2587 rdev_for_each(rdev, mddev) {
2588 if (rdev->sb_events == mddev->events ||
2590 rdev->raid_disk < 0 &&
2591 rdev->sb_events+1 == mddev->events)) {
2593 rdev->sb_loaded = 2;
2595 sync_super(mddev, rdev);
2596 rdev->sb_loaded = 1;
2603 struct md_rdev *rdev = NULL, *iter;
2607 /* Find a good rdev */
2610 rdev = iter;
2615 if (!rdev)
2618 sb = page_address(rdev->sb_page);
2620 rdev_for_each(rdev, mddev) {
2621 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]);
2623 if (role == MD_DISK_ROLE_SPARE && rdev->raid_disk >= 0 &&
2624 !test_bit(Faulty, &rdev->flags))
2627 if (test_bit(Faulty, &rdev->flags) && (role < MD_DISK_ROLE_MAX))
2644 struct md_rdev *rdev;
2680 rdev_for_each(rdev, mddev) {
2681 if (rdev->raid_disk >= 0 &&
2686 !test_bit(Journal, &rdev->flags) &&
2687 !test_bit(In_sync, &rdev->flags) &&
2688 mddev->curr_resync_completed > rdev->recovery_offset)
2689 rdev->recovery_offset = mddev->curr_resync_completed;
2697 rdev_for_each(rdev, mddev) {
2698 if (rdev->badblocks.changed) {
2699 rdev->badblocks.changed = 0;
2700 ack_all_badblocks(&rdev->badblocks);
2701 md_error(mddev, rdev);
2703 clear_bit(Blocked, &rdev->flags);
2704 clear_bit(BlockedBadBlocks, &rdev->flags);
2705 wake_up(&rdev->blocked_wait);
2761 rdev_for_each(rdev, mddev) {
2762 if (rdev->badblocks.changed)
2764 if (test_bit(Faulty, &rdev->flags))
2765 set_bit(FaultRecorded, &rdev->flags);
2778 rdev_for_each(rdev, mddev) {
2779 if (rdev->sb_loaded != 1)
2782 if (!test_bit(Faulty, &rdev->flags)) {
2783 md_super_write(mddev,rdev,
2784 rdev->sb_start, rdev->sb_size,
2785 rdev->sb_page);
2787 rdev->bdev,
2788 (unsigned long long)rdev->sb_start);
2789 rdev->sb_events = mddev->events;
2790 if (rdev->badblocks.size) {
2791 md_super_write(mddev, rdev,
2792 rdev->badblocks.sector,
2793 rdev->badblocks.size << 9,
2794 rdev->bb_page);
2795 rdev->badblocks.size = 0;
2800 rdev->bdev);
2822 rdev_for_each(rdev, mddev) {
2823 if (test_and_clear_bit(FaultRecorded, &rdev->flags))
2824 clear_bit(Blocked, &rdev->flags);
2827 ack_all_badblocks(&rdev->badblocks);
2828 clear_bit(BlockedBadBlocks, &rdev->flags);
2829 wake_up(&rdev->blocked_wait);
2834 static int add_bound_rdev(struct md_rdev *rdev)
2836 struct mddev *mddev = rdev->mddev;
2838 bool add_journal = test_bit(Journal, &rdev->flags);
2846 validate_super(mddev, NULL/*freshest*/, rdev);
2849 err = mddev->pers->hot_add_disk(mddev, rdev);
2853 md_kick_rdev_from_array(rdev);
2857 sysfs_notify_dirent_safe(rdev->sysfs_state);
2895 state_show(struct md_rdev *rdev, char *page)
2899 unsigned long flags = READ_ONCE(rdev->flags);
2903 rdev->badblocks.unacked_exist))
2912 (rdev->badblocks.unacked_exist
2937 state_store(struct md_rdev *rdev, const char *buf, size_t len)
2954 struct mddev *mddev = rdev->mddev;
2958 if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
2959 md_error(rdev->mddev, rdev);
2961 if (test_bit(MD_BROKEN, &rdev->mddev->flags))
2966 if (rdev->mddev->pers) {
2967 clear_bit(Blocked, &rdev->flags);
2968 remove_and_add_spares(rdev->mddev, rdev);
2970 if (rdev->raid_disk >= 0)
2975 err = md_cluster_ops->remove_disk(mddev, rdev);
2978 md_kick_rdev_from_array(rdev);
2987 set_bit(WriteMostly, &rdev->flags);
2988 mddev_create_serial_pool(rdev->mddev, rdev, false);
2992 mddev_destroy_serial_pool(rdev->mddev, rdev, false);
2993 clear_bit(WriteMostly, &rdev->flags);
2997 set_bit(Blocked, &rdev->flags);
3000 if (!test_bit(Faulty, &rdev->flags) &&
3001 !test_bit(ExternalBbl, &rdev->flags) &&
3002 rdev->badblocks.unacked_exist) {
3006 md_error(rdev->mddev, rdev);
3008 clear_bit(Blocked, &rdev->flags);
3009 clear_bit(BlockedBadBlocks, &rdev->flags);
3010 wake_up(&rdev->blocked_wait);
3011 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
3012 md_wakeup_thread(rdev->mddev->thread);
3015 } else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) {
3016 set_bit(In_sync, &rdev->flags);
3019 set_bit(FailFast, &rdev->flags);
3023 clear_bit(FailFast, &rdev->flags);
3026 } else if (cmd_match(buf, "-insync") && rdev->raid_disk >= 0 &&
3027 !test_bit(Journal, &rdev->flags)) {
3028 if (rdev->mddev->pers == NULL) {
3029 clear_bit(In_sync, &rdev->flags);
3030 rdev->saved_raid_disk = rdev->raid_disk;
3031 rdev->raid_disk = -1;
3035 set_bit(WriteErrorSeen, &rdev->flags);
3038 clear_bit(WriteErrorSeen, &rdev->flags);
3045 if (rdev->raid_disk >= 0 &&
3046 !test_bit(Journal, &rdev->flags) &&
3047 !test_bit(Replacement, &rdev->flags))
3048 set_bit(WantReplacement, &rdev->flags);
3049 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
3050 md_wakeup_thread(rdev->mddev->thread);
3057 clear_bit(WantReplacement, &rdev->flags);
3063 if (rdev->mddev->pers)
3066 set_bit(Replacement, &rdev->flags);
3071 if (rdev->mddev->pers)
3074 clear_bit(Replacement, &rdev->flags);
3078 if (!rdev->mddev->pers)
3080 else if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1) &&
3081 rdev->saved_raid_disk >= 0) {
3088 if (!mddev_is_clustered(rdev->mddev) ||
3089 (err = md_cluster_ops->gather_bitmaps(rdev)) == 0) {
3090 clear_bit(Faulty, &rdev->flags);
3091 err = add_bound_rdev(rdev);
3095 } else if (cmd_match(buf, "external_bbl") && (rdev->mddev->external)) {
3096 set_bit(ExternalBbl, &rdev->flags);
3097 rdev->badblocks.shift = 0;
3099 } else if (cmd_match(buf, "-external_bbl") && (rdev->mddev->external)) {
3100 clear_bit(ExternalBbl, &rdev->flags);
3106 sysfs_notify_dirent_safe(rdev->sysfs_state);
3113 errors_show(struct md_rdev *rdev, char *page)
3115 return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
3119 errors_store(struct md_rdev *rdev, const char *buf, size_t len)
3127 atomic_set(&rdev->corrected_errors, n);
3134 slot_show(struct md_rdev *rdev, char *page)
3136 if (test_bit(Journal, &rdev->flags))
3138 else if (rdev->raid_disk < 0)
3141 return sprintf(page, "%d\n", rdev->raid_disk);
3145 slot_store(struct md_rdev *rdev, const char *buf, size_t len)
3150 if (test_bit(Journal, &rdev->flags))
3162 if (rdev->mddev->pers && slot == -1) {
3170 if (rdev->raid_disk == -1)
3173 if (rdev->mddev->pers->hot_remove_disk == NULL)
3175 clear_bit(Blocked, &rdev->flags);
3176 remove_and_add_spares(rdev->mddev, rdev);
3177 if (rdev->raid_disk >= 0)
3179 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
3180 md_wakeup_thread(rdev->mddev->thread);
3181 } else if (rdev->mddev->pers) {
3187 if (rdev->raid_disk != -1)
3190 if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery))
3193 if (rdev->mddev->pers->hot_add_disk == NULL)
3196 if (slot >= rdev->mddev->raid_disks &&
3197 slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
3200 rdev->raid_disk = slot;
3201 if (test_bit(In_sync, &rdev->flags))
3202 rdev->saved_raid_disk = slot;
3204 rdev->saved_raid_disk = -1;
3205 clear_bit(In_sync, &rdev->flags);
3206 clear_bit(Bitmap_sync, &rdev->flags);
3207 err = rdev->mddev->pers->hot_add_disk(rdev->mddev, rdev);
3209 rdev->raid_disk = -1;
3212 sysfs_notify_dirent_safe(rdev->sysfs_state);
3214 sysfs_link_rdev(rdev->mddev, rdev);
3217 if (slot >= rdev->mddev->raid_disks &&
3218 slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
3220 rdev->raid_disk = slot;
3222 clear_bit(Faulty, &rdev->flags);
3223 clear_bit(WriteMostly, &rdev->flags);
3224 set_bit(In_sync, &rdev->flags);
3225 sysfs_notify_dirent_safe(rdev->sysfs_state);
3234 offset_show(struct md_rdev *rdev, char *page)
3236 return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset);
3240 offset_store(struct md_rdev *rdev, const char *buf, size_t len)
3245 if (rdev->mddev->pers && rdev->raid_disk >= 0)
3247 if (rdev->sectors && rdev->mddev->external)
3251 rdev->data_offset = offset;
3252 rdev->new_data_offset = offset;
3259 static ssize_t new_offset_show(struct md_rdev *rdev, char *page)
3262 (unsigned long long)rdev->new_data_offset);
3265 static ssize_t new_offset_store(struct md_rdev *rdev,
3269 struct mddev *mddev = rdev->mddev;
3277 if (new_offset == rdev->data_offset)
3280 else if (new_offset > rdev->data_offset) {
3282 if (new_offset - rdev->data_offset
3283 + mddev->dev_sectors > rdev->sectors)
3291 if (new_offset < rdev->data_offset &&
3298 if (new_offset > rdev->data_offset &&
3304 .allow_new_offset(rdev, new_offset))
3306 rdev->new_data_offset = new_offset;
3307 if (new_offset > rdev->data_offset)
3309 else if (new_offset < rdev->data_offset)
3318 rdev_size_show(struct md_rdev *rdev, char *page)
3320 return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2);
3333 static bool md_rdev_overlaps(struct md_rdev *rdev)
3343 if (rdev != rdev2 && rdev->bdev == rdev2->bdev &&
3344 md_rdevs_overlap(rdev, rdev2)) {
3374 rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len)
3376 struct mddev *my_mddev = rdev->mddev;
3377 sector_t oldsectors = rdev->sectors;
3380 if (test_bit(Journal, &rdev->flags))
3384 if (rdev->data_offset != rdev->new_data_offset)
3386 if (my_mddev->pers && rdev->raid_disk >= 0) {
3389 rdev_size_change(rdev, sectors);
3393 sectors = bdev_nr_sectors(rdev->bdev) -
3394 rdev->data_offset;
3402 rdev->sectors = sectors;
3410 md_rdev_overlaps(rdev)) {
3416 rdev->sectors = oldsectors;
3425 static ssize_t recovery_start_show(struct md_rdev *rdev, char *page)
3427 unsigned long long recovery_start = rdev->recovery_offset;
3429 if (test_bit(In_sync, &rdev->flags) ||
3436 static ssize_t recovery_start_store(struct md_rdev *rdev, const char *buf, size_t len)
3445 if (rdev->mddev->pers &&
3446 rdev->raid_disk >= 0)
3449 rdev->recovery_offset = recovery_start;
3451 set_bit(In_sync, &rdev->flags);
3453 clear_bit(In_sync, &rdev->flags);
3471 static ssize_t bb_show(struct md_rdev *rdev, char *page)
3473 return badblocks_show(&rdev->badblocks, page, 0);
3475 static ssize_t bb_store(struct md_rdev *rdev, const char *page, size_t len)
3477 int rv = badblocks_store(&rdev->badblocks, page, len, 0);
3479 if (test_and_clear_bit(BlockedBadBlocks, &rdev->flags))
3480 wake_up(&rdev->blocked_wait);
3486 static ssize_t ubb_show(struct md_rdev *rdev, char *page)
3488 return badblocks_show(&rdev->badblocks, page, 1);
3490 static ssize_t ubb_store(struct md_rdev *rdev, const char *page, size_t len)
3492 return badblocks_store(&rdev->badblocks, page, len, 1);
3498 ppl_sector_show(struct md_rdev *rdev, char *page)
3500 return sprintf(page, "%llu\n", (unsigned long long)rdev->ppl.sector);
3504 ppl_sector_store(struct md_rdev *rdev, const char *buf, size_t len)
3513 if (rdev->mddev->pers && test_bit(MD_HAS_PPL, &rdev->mddev->flags) &&
3514 rdev->raid_disk >= 0)
3517 if (rdev->mddev->persistent) {
3518 if (rdev->mddev->major_version == 0)
3520 if ((sector > rdev->sb_start &&
3521 sector - rdev->sb_start > S16_MAX) ||
3522 (sector < rdev->sb_start &&
3523 rdev->sb_start - sector > -S16_MIN))
3525 rdev->ppl.offset = sector - rdev->sb_start;
3526 } else if (!rdev->mddev->external) {
3529 rdev->ppl.sector = sector;
3537 ppl_size_show(struct md_rdev *rdev, char *page)
3539 return sprintf(page, "%u\n", rdev->ppl.size);
3543 ppl_size_store(struct md_rdev *rdev, const char *buf, size_t len)
3550 if (rdev->mddev->pers && test_bit(MD_HAS_PPL, &rdev->mddev->flags) &&
3551 rdev->raid_disk >= 0)
3554 if (rdev->mddev->persistent) {
3555 if (rdev->mddev->major_version == 0)
3559 } else if (!rdev->mddev->external) {
3562 rdev->ppl.size = size;
3588 struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
3592 if (!rdev->mddev)
3594 return entry->show(rdev, page);
3602 struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj);
3605 struct mddev *mddev = rdev->mddev;
3617 if (rdev->mddev == NULL)
3620 rv = entry->store(rdev, page, length);
3632 struct md_rdev *rdev = container_of(ko, struct md_rdev, kobj);
3633 kfree(rdev);
3645 int md_rdev_init(struct md_rdev *rdev)
3647 rdev->desc_nr = -1;
3648 rdev->saved_raid_disk = -1;
3649 rdev->raid_disk = -1;
3650 rdev->flags = 0;
3651 rdev->data_offset = 0;
3652 rdev->new_data_offset = 0;
3653 rdev->sb_events = 0;
3654 rdev->last_read_error = 0;
3655 rdev->sb_loaded = 0;
3656 rdev->bb_page = NULL;
3657 atomic_set(&rdev->nr_pending, 0);
3658 atomic_set(&rdev->read_errors, 0);
3659 atomic_set(&rdev->corrected_errors, 0);
3661 INIT_LIST_HEAD(&rdev->same_set);
3662 init_waitqueue_head(&rdev->blocked_wait);
3668 return badblocks_init(&rdev->badblocks, 0);
3680 * a faulty rdev _never_ has rdev->sb set.
3684 struct md_rdev *rdev;
3689 rdev = kzalloc(sizeof(*rdev), GFP_KERNEL);
3690 if (!rdev)
3693 err = md_rdev_init(rdev);
3696 err = alloc_disk_sb(rdev);
3703 holder = rdev;
3704 set_bit(Holder, &rdev->flags);
3707 rdev->bdev = blkdev_get_by_dev(newdev, BLK_OPEN_READ | BLK_OPEN_WRITE,
3709 if (IS_ERR(rdev->bdev)) {
3712 err = PTR_ERR(rdev->bdev);
3716 kobject_init(&rdev->kobj, &rdev_ktype);
3718 size = bdev_nr_bytes(rdev->bdev) >> BLOCK_SIZE_BITS;
3721 rdev->bdev);
3728 load_super(rdev, NULL, super_minor);
3731 rdev->bdev,
3737 rdev->bdev);
3742 return rdev;
3745 blkdev_put(rdev->bdev, holder);
3747 md_rdev_clear(rdev);
3749 kfree(rdev);
3760 struct md_rdev *rdev, *freshest, *tmp;
3763 rdev_for_each_safe(rdev, tmp, mddev)
3765 load_super(rdev, freshest, mddev->minor_version)) {
3767 freshest = rdev;
3773 rdev->bdev);
3774 md_kick_rdev_from_array(rdev);
3787 rdev_for_each_safe(rdev, tmp, mddev) {
3789 (rdev->desc_nr >= mddev->max_disks ||
3792 mdname(mddev), rdev->bdev,
3794 md_kick_rdev_from_array(rdev);
3797 if (rdev != freshest) {
3799 validate_super(mddev, freshest, rdev)) {
3801 rdev->bdev);
3802 md_kick_rdev_from_array(rdev);
3807 rdev->desc_nr = i++;
3808 rdev->raid_disk = rdev->desc_nr;
3809 set_bit(In_sync, &rdev->flags);
3810 } else if (rdev->raid_disk >=
3812 !test_bit(Journal, &rdev->flags)) {
3813 rdev->raid_disk = -1;
3814 clear_bit(In_sync, &rdev->flags);
3921 struct md_rdev *rdev;
3997 rdev_for_each(rdev, mddev)
3998 rdev->new_raid_disk = rdev->raid_disk;
4071 rdev_for_each(rdev, mddev) {
4072 if (rdev->raid_disk < 0)
4074 if (rdev->new_raid_disk >= mddev->raid_disks)
4075 rdev->new_raid_disk = -1;
4076 if (rdev->new_raid_disk == rdev->raid_disk)
4078 sysfs_unlink_rdev(mddev, rdev);
4080 rdev_for_each(rdev, mddev) {
4081 if (rdev->raid_disk < 0)
4083 if (rdev->new_raid_disk == rdev->raid_disk)
4085 rdev->raid_disk = rdev->new_raid_disk;
4086 if (rdev->raid_disk < 0)
4087 clear_bit(In_sync, &rdev->flags);
4089 if (sysfs_link_rdev(mddev, rdev))
4091 rdev->raid_disk, mdname(mddev));
4195 struct md_rdev *rdev;
4199 rdev_for_each(rdev, mddev) {
4201 rdev->data_offset < rdev->new_data_offset)
4204 rdev->data_offset > rdev->new_data_offset)
4574 struct md_rdev *rdev;
4591 rdev = md_import_device(dev, mddev->major_version,
4593 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) {
4598 .load_super(rdev, rdev0, mddev->minor_version);
4603 rdev = md_import_device(dev, -2, -1);
4605 rdev = md_import_device(dev, -1, -1);
4607 if (IS_ERR(rdev)) {
4609 return PTR_ERR(rdev);
4611 err = bind_rdev_to_array(rdev, mddev);
4614 export_rdev(rdev, mddev);
5264 struct md_rdev *rdev;
5285 rdev_for_each(rdev, mddev)
5286 rdev->new_data_offset = rdev->data_offset;
5845 struct md_rdev *rdev;
5881 rdev_for_each(rdev, mddev) {
5882 if (test_bit(Faulty, &rdev->flags))
5884 sync_blockdev(rdev->bdev);
5885 invalidate_bdev(rdev->bdev);
5886 if (mddev->ro != MD_RDONLY && rdev_read_only(rdev)) {
5892 if (rdev->sb_page)
5899 if (rdev->meta_bdev) {
5901 } else if (rdev->data_offset < rdev->sb_start) {
5903 rdev->data_offset + mddev->dev_sectors
5904 > rdev->sb_start) {
5910 if (rdev->sb_start + rdev->sb_size/512
5911 > rdev->data_offset) {
5917 sysfs_notify_dirent_safe(rdev->sysfs_state);
5918 nowait = nowait && bdev_nowait(rdev->bdev);
5979 rdev_for_each(rdev, mddev)
5981 if (rdev < rdev2 &&
5982 rdev->bdev->bd_disk ==
5986 rdev->bdev,
6036 rdev_for_each(rdev, mddev) {
6037 if (test_bit(WriteMostly, &rdev->flags) &&
6038 rdev_init_serial(rdev))
6055 rdev_for_each(rdev, mddev) {
6056 if (rdev->raid_disk >= 0 && !bdev_nonrot(rdev->bdev)) {
6096 rdev_for_each(rdev, mddev)
6097 if (rdev->raid_disk >= 0)
6098 sysfs_link_rdev(mddev, rdev); /* failure here is OK */
6185 struct md_rdev *rdev;
6198 rdev_for_each_rcu(rdev, mddev) {
6199 if (test_bit(Journal, &rdev->flags) &&
6200 !test_bit(Faulty, &rdev->flags))
6202 if (rdev_read_only(rdev))
6441 struct md_rdev *rdev;
6488 rdev_for_each(rdev, mddev)
6489 if (rdev->raid_disk >= 0)
6490 sysfs_unlink_rdev(mddev, rdev);
6529 struct md_rdev *rdev;
6537 rdev_for_each(rdev, mddev) {
6538 pr_cont("<%pg>", rdev->bdev);
6563 struct md_rdev *rdev0, *rdev, *tmp;
6576 rdev_for_each_list(rdev, tmp, &pending_raid_disks)
6577 if (super_90_load(rdev, rdev0, 0) >= 0) {
6579 rdev->bdev);
6580 list_move(&rdev->same_set, &candidates);
6615 rdev_for_each_list(rdev, tmp, &candidates) {
6616 list_del_init(&rdev->same_set);
6617 if (bind_rdev_to_array(rdev, mddev))
6618 export_rdev(rdev, mddev);
6626 rdev_for_each_list(rdev, tmp, &candidates) {
6627 list_del_init(&rdev->same_set);
6628 export_rdev(rdev, mddev);
6654 struct md_rdev *rdev;
6658 rdev_for_each_rcu(rdev, mddev) {
6660 if (test_bit(Faulty, &rdev->flags))
6664 if (test_bit(In_sync, &rdev->flags))
6666 else if (test_bit(Journal, &rdev->flags))
6745 struct md_rdev *rdev;
6751 rdev = md_find_rdev_nr_rcu(mddev, info.number);
6752 if (rdev) {
6753 info.major = MAJOR(rdev->bdev->bd_dev);
6754 info.minor = MINOR(rdev->bdev->bd_dev);
6755 info.raid_disk = rdev->raid_disk;
6757 if (test_bit(Faulty, &rdev->flags))
6759 else if (test_bit(In_sync, &rdev->flags)) {
6763 if (test_bit(Journal, &rdev->flags))
6765 if (test_bit(WriteMostly, &rdev->flags))
6767 if (test_bit(FailFast, &rdev->flags))
6784 struct md_rdev *rdev;
6800 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version);
6801 if (IS_ERR(rdev)) {
6803 PTR_ERR(rdev));
6804 return PTR_ERR(rdev);
6811 .load_super(rdev, rdev0, mddev->minor_version);
6814 rdev->bdev,
6816 export_rdev(rdev, mddev);
6820 err = bind_rdev_to_array(rdev, mddev);
6822 export_rdev(rdev, mddev);
6839 rdev = md_import_device(dev, mddev->major_version,
6842 rdev = md_import_device(dev, -1, -1);
6843 if (IS_ERR(rdev)) {
6845 PTR_ERR(rdev));
6846 return PTR_ERR(rdev);
6852 rdev->raid_disk = info->raid_disk;
6853 clear_bit(Bitmap_sync, &rdev->flags);
6855 rdev->raid_disk = -1;
6856 rdev->saved_raid_disk = rdev->raid_disk;
6859 validate_super(mddev, NULL/*freshest*/, rdev);
6861 rdev->raid_disk != info->raid_disk) {
6865 export_rdev(rdev, mddev);
6869 clear_bit(In_sync, &rdev->flags); /* just to be sure */
6871 set_bit(WriteMostly, &rdev->flags);
6873 clear_bit(WriteMostly, &rdev->flags);
6875 set_bit(FailFast, &rdev->flags);
6877 clear_bit(FailFast, &rdev->flags);
6891 export_rdev(rdev, mddev);
6894 set_bit(Journal, &rdev->flags);
6901 set_bit(Candidate, &rdev->flags);
6904 err = md_cluster_ops->add_new_disk(mddev, rdev);
6906 export_rdev(rdev, mddev);
6912 rdev->raid_disk = -1;
6913 err = bind_rdev_to_array(rdev, mddev);
6916 export_rdev(rdev, mddev);
6924 md_kick_rdev_from_array(rdev);
6930 err = add_bound_rdev(rdev);
6934 err = add_bound_rdev(rdev);
6949 rdev = md_import_device(dev, -1, 0);
6950 if (IS_ERR(rdev)) {
6952 PTR_ERR(rdev));
6953 return PTR_ERR(rdev);
6955 rdev->desc_nr = info->number;
6957 rdev->raid_disk = info->raid_disk;
6959 rdev->raid_disk = -1;
6961 if (rdev->raid_disk < mddev->raid_disks)
6963 set_bit(In_sync, &rdev->flags);
6966 set_bit(WriteMostly, &rdev->flags);
6968 set_bit(FailFast, &rdev->flags);
6972 rdev->sb_start = bdev_nr_sectors(rdev->bdev);
6974 rdev->sb_start = calc_dev_sboffset(rdev);
6975 rdev->sectors = rdev->sb_start;
6977 err = bind_rdev_to_array(rdev, mddev);
6979 export_rdev(rdev, mddev);
6989 struct md_rdev *rdev;
6994 rdev = find_rdev(mddev, dev);
6995 if (!rdev)
6998 if (rdev->raid_disk < 0)
7001 clear_bit(Blocked, &rdev->flags);
7002 remove_and_add_spares(mddev, rdev);
7004 if (rdev->raid_disk >= 0)
7009 if (md_cluster_ops->remove_disk(mddev, rdev))
7013 md_kick_rdev_from_array(rdev);
7024 rdev->bdev, mdname(mddev));
7031 struct md_rdev *rdev;
7047 rdev = md_import_device(dev, -1, 0);
7048 if (IS_ERR(rdev)) {
7050 PTR_ERR(rdev));
7055 rdev->sb_start = calc_dev_sboffset(rdev);
7057 rdev->sb_start = bdev_nr_sectors(rdev->bdev);
7059 rdev->sectors = rdev->sb_start;
7061 if (test_bit(Faulty, &rdev->flags)) {
7063 rdev->bdev, mdname(mddev));
7068 clear_bit(In_sync, &rdev->flags);
7069 rdev->desc_nr = -1;
7070 rdev->saved_raid_disk = -1;
7071 err = bind_rdev_to_array(rdev, mddev);
7080 rdev->raid_disk = -1;
7089 if (!bdev_nowait(rdev->bdev)) {
7091 mdname(mddev), rdev->bdev);
7104 export_rdev(rdev, mddev);
7301 struct md_rdev *rdev;
7323 rdev_for_each(rdev, mddev) {
7324 sector_t avail = rdev->sectors;
7346 struct md_rdev *rdev;
7361 rdev_for_each(rdev, mddev) {
7363 rdev->data_offset < rdev->new_data_offset)
7366 rdev->data_offset > rdev->new_data_offset)
7521 struct md_rdev *rdev;
7528 rdev = md_find_rdev_rcu(mddev, dev);
7529 if (!rdev)
7532 md_error(mddev, rdev);
8078 void md_error(struct mddev *mddev, struct md_rdev *rdev)
8080 if (!rdev || test_bit(Faulty, &rdev->flags))
8085 mddev->pers->error_handler(mddev, rdev);
8092 sysfs_notify_dirent_safe(rdev->sysfs_state);
8109 struct md_rdev *rdev;
8113 list_for_each_entry(rdev, &pending_raid_disks, same_set) {
8115 seq_printf(seq, "%pg ", rdev->bdev);
8159 struct md_rdev *rdev;
8161 rdev_for_each(rdev, mddev)
8162 if (rdev->raid_disk >= 0 &&
8163 !test_bit(Faulty, &rdev->flags) &&
8164 rdev->recovery_offset != MaxSector &&
8165 rdev->recovery_offset) {
8342 struct md_rdev *rdev;
8375 rdev_for_each_rcu(rdev, mddev) {
8376 seq_printf(seq, " %pg[%d]", rdev->bdev, rdev->desc_nr);
8378 if (test_bit(WriteMostly, &rdev->flags))
8380 if (test_bit(Journal, &rdev->flags))
8382 if (test_bit(Faulty, &rdev->flags)) {
8386 if (rdev->raid_disk < 0)
8388 if (test_bit(Replacement, &rdev->flags))
8390 sectors += rdev->sectors;
8558 struct md_rdev *rdev;
8564 rdev_for_each_rcu(rdev, mddev) {
8565 struct gendisk *disk = rdev->bdev->bd_disk;
8590 if (init || curr_events - rdev->last_events > 64) {
8591 rdev->last_events = curr_events;
8704 void md_submit_discard_bio(struct mddev *mddev, struct md_rdev *rdev,
8709 if (__blkdev_issue_discard(rdev->bdev, start, size, GFP_NOIO,
8814 struct md_rdev *rdev;
8952 rdev_for_each_rcu(rdev, mddev)
8953 if (rdev->raid_disk >= 0 &&
8954 !test_bit(Journal, &rdev->flags) &&
8955 !test_bit(Faulty, &rdev->flags) &&
8956 !test_bit(In_sync, &rdev->flags) &&
8957 rdev->recovery_offset < j)
8958 j = rdev->recovery_offset;
9170 rdev_for_each_rcu(rdev, mddev)
9171 if (rdev->raid_disk >= 0 &&
9173 !test_bit(Journal, &rdev->flags) &&
9174 !test_bit(Faulty, &rdev->flags) &&
9175 !test_bit(In_sync, &rdev->flags) &&
9176 rdev->recovery_offset < mddev->curr_resync)
9177 rdev->recovery_offset = mddev->curr_resync;
9225 struct md_rdev *rdev;
9234 rdev_for_each(rdev, mddev) {
9235 if ((this == NULL || rdev == this) &&
9236 rdev->raid_disk >= 0 &&
9237 !test_bit(Blocked, &rdev->flags) &&
9238 test_bit(Faulty, &rdev->flags) &&
9239 atomic_read(&rdev->nr_pending)==0) {
9246 set_bit(RemoveSynchronized, &rdev->flags);
9252 rdev_for_each(rdev, mddev) {
9253 if ((this == NULL || rdev == this) &&
9254 rdev->raid_disk >= 0 &&
9255 !test_bit(Blocked, &rdev->flags) &&
9256 ((test_bit(RemoveSynchronized, &rdev->flags) ||
9257 (!test_bit(In_sync, &rdev->flags) &&
9258 !test_bit(Journal, &rdev->flags))) &&
9259 atomic_read(&rdev->nr_pending)==0)) {
9261 mddev, rdev) == 0) {
9262 sysfs_unlink_rdev(mddev, rdev);
9263 rdev->saved_raid_disk = rdev->raid_disk;
9264 rdev->raid_disk = -1;
9268 if (remove_some && test_bit(RemoveSynchronized, &rdev->flags))
9269 clear_bit(RemoveSynchronized, &rdev->flags);
9278 rdev_for_each(rdev, mddev) {
9279 if (this && this != rdev)
9281 if (test_bit(Candidate, &rdev->flags))
9283 if (rdev->raid_disk >= 0 &&
9284 !test_bit(In_sync, &rdev->flags) &&
9285 !test_bit(Journal, &rdev->flags) &&
9286 !test_bit(Faulty, &rdev->flags))
9288 if (rdev->raid_disk >= 0)
9290 if (test_bit(Faulty, &rdev->flags))
9292 if (!test_bit(Journal, &rdev->flags)) {
9294 !(rdev->saved_raid_disk >= 0 &&
9295 !test_bit(Bitmap_sync, &rdev->flags)))
9298 rdev->recovery_offset = 0;
9300 if (mddev->pers->hot_add_disk(mddev, rdev) == 0) {
9302 sysfs_link_rdev(mddev, rdev);
9303 if (!test_bit(Journal, &rdev->flags))
9413 struct md_rdev *rdev;
9420 rdev_for_each(rdev, mddev)
9421 clear_bit(Blocked, &rdev->flags);
9442 struct md_rdev *rdev, *tmp;
9446 rdev_for_each_safe(rdev, tmp, mddev) {
9447 if (test_and_clear_bit(ClusterRemove, &rdev->flags) &&
9448 rdev->raid_disk < 0)
9449 md_kick_rdev_from_array(rdev);
9552 struct md_rdev *rdev;
9581 rdev_for_each(rdev, mddev)
9582 rdev->saved_raid_disk = -1;
9615 void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev)
9617 sysfs_notify_dirent_safe(rdev->sysfs_state);
9618 wait_event_timeout(rdev->blocked_wait,
9619 !test_bit(Blocked, &rdev->flags) &&
9620 !test_bit(BlockedBadBlocks, &rdev->flags),
9622 rdev_dec_pending(rdev, mddev);
9629 struct md_rdev *rdev;
9631 rdev_for_each(rdev, mddev) {
9632 if (rdev->data_offset > rdev->new_data_offset)
9633 rdev->sectors += rdev->data_offset - rdev->new_data_offset;
9635 rdev->sectors -= rdev->new_data_offset - rdev->data_offset;
9636 rdev->data_offset = rdev->new_data_offset;
9644 int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
9647 struct mddev *mddev = rdev->mddev;
9650 s += rdev->new_data_offset;
9652 s += rdev->data_offset;
9653 rv = badblocks_set(&rdev->badblocks, s, sectors, 0);
9656 if (test_bit(ExternalBbl, &rdev->flags))
9657 sysfs_notify_dirent_safe(rdev->sysfs_unack_badblocks);
9658 sysfs_notify_dirent_safe(rdev->sysfs_state);
9661 md_wakeup_thread(rdev->mddev->thread);
9668 int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
9673 s += rdev->new_data_offset;
9675 s += rdev->data_offset;
9676 rv = badblocks_clear(&rdev->badblocks, s, sectors);
9677 if ((rv == 0) && test_bit(ExternalBbl, &rdev->flags))
9678 sysfs_notify_dirent_safe(rdev->sysfs_badblocks);
9776 static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
9778 struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
9876 static int read_rdev(struct mddev *mddev, struct md_rdev *rdev)
9879 struct page *swapout = rdev->sb_page;
9882 /* Store the sb page of the rdev in the swapout temporary
9885 rdev->sb_page = NULL;
9886 err = alloc_disk_sb(rdev);
9888 ClearPageUptodate(rdev->sb_page);
9889 rdev->sb_loaded = 0;
9891 load_super(rdev, NULL, mddev->minor_version);
9894 pr_warn("%s: %d Could not reload rdev(%d) err: %d. Restoring old values\n",
9895 __func__, __LINE__, rdev->desc_nr, err);
9896 if (rdev->sb_page)
9897 put_page(rdev->sb_page);
9898 rdev->sb_page = swapout;
9899 rdev->sb_loaded = 1;
9903 sb = page_address(rdev->sb_page);
9909 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset);
9914 if (rdev->recovery_offset == MaxSector &&
9915 !test_bit(In_sync, &rdev->flags) &&
9925 struct md_rdev *rdev = NULL, *iter;
9928 /* Find the rdev */
9931 rdev = iter;
9936 if (!rdev) {
9937 pr_warn("%s: %d Could not find rdev with nr %d\n", __func__, __LINE__, nr);
9941 err = read_rdev(mddev, rdev);
9945 check_sb_changes(mddev, rdev);
9947 /* Read all rdev's to update recovery_offset */
9948 rdev_for_each_rcu(rdev, mddev) {
9949 if (!test_bit(Faulty, &rdev->flags))
9950 read_rdev(mddev, rdev);
9984 struct md_rdev *rdev;
10003 rdev = md_import_device(dev,0, 90);
10005 if (IS_ERR(rdev))
10008 if (test_bit(Faulty, &rdev->flags))
10011 set_bit(AutoDetected, &rdev->flags);
10012 list_add(&rdev->same_set, &pending_raid_disks);