Lines Matching defs:rdev

38 #define FirstUse 10		/* rdev flag */
55 struct md_rdev rdev;
252 struct md_rdev rdev;
681 /* Adjust rdev sectors */
685 struct md_rdev *rdev;
688 * raid10 sets rdev->sector to the device size, which
691 rdev_for_each(rdev, mddev)
692 if (!test_bit(Journal, &rdev->flags))
693 rdev->sectors = mddev->dev_sectors;
769 md_rdev_init(&rs->dev[i].rdev);
789 md_rdev_clear(&rs->journal_dev.rdev);
796 md_rdev_clear(&rs->dev[i].rdev);
834 rs->dev[i].rdev.raid_disk = i;
843 rs->dev[i].rdev.data_offset = 0;
844 rs->dev[i].rdev.new_data_offset = 0;
845 rs->dev[i].rdev.mddev = &rs->md;
859 rs->dev[i].rdev.sb_page = alloc_page(GFP_KERNEL);
860 if (!rs->dev[i].rdev.sb_page) {
871 if (!test_bit(In_sync, &rs->dev[i].rdev.flags) &&
872 (!rs->dev[i].rdev.recovery_offset)) {
894 rs->dev[i].rdev.meta_bdev = rs->dev[i].meta_dev->bdev;
896 rs->dev[i].rdev.bdev = rs->dev[i].data_dev->bdev;
897 list_add_tail(&rs->dev[i].rdev.same_set, &rs->md.disks);
898 if (!test_bit(In_sync, &rs->dev[i].rdev.flags))
903 list_add_tail(&rs->journal_dev.rdev.same_set, &rs->md.disks);
1009 if (!test_bit(FirstUse, &rs->dev[i].rdev.flags) &&
1010 ((!test_bit(In_sync, &rs->dev[i].rdev.flags) ||
1011 !rs->dev[i].rdev.sb_page)))
1056 if ((!rs->dev[i].rdev.sb_page ||
1057 !test_bit(In_sync, &rs->dev[i].rdev.flags)) &&
1082 if ((!rs->dev[i].rdev.sb_page ||
1083 !test_bit(In_sync, &rs->dev[i].rdev.flags)) &&
1180 set_bit(In_sync, &rs->dev[i].rdev.flags);
1181 rs->dev[i].rdev.recovery_offset = MaxSector;
1263 jdev = &rs->journal_dev.rdev;
1323 clear_bit(In_sync, &rd->rdev.flags);
1324 clear_bit(Faulty, &rd->rdev.flags);
1325 rd->rdev.recovery_offset = 0;
1339 set_bit(WriteMostly, &rs->dev[value].rdev.flags);
1587 * Retrieve rdev->sectors from any valid raid device of @rs
1595 struct md_rdev *rdev = &rs->dev[i].rdev;
1597 if (!test_bit(Journal, &rdev->flags) &&
1598 rdev->bdev && rdev->sectors)
1599 return rdev->sectors;
1609 struct md_rdev *rdev;
1611 rdev_for_each(rdev, &rs->md)
1612 if (!test_bit(Journal, &rdev->flags) && rdev->bdev) {
1613 ds = min(ds, to_sector(i_size_read(rdev->bdev->bd_inode)));
2032 static int read_disk_sb(struct md_rdev *rdev, int size, bool force_reload)
2034 BUG_ON(!rdev->sb_page);
2036 if (rdev->sb_loaded && !force_reload)
2039 rdev->sb_loaded = 0;
2041 if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, 0, true)) {
2043 rdev->raid_disk);
2044 md_error(rdev->mddev, rdev);
2045 set_bit(Faulty, &rdev->flags);
2049 rdev->sb_loaded = 1;
2081 static void super_sync(struct mddev *mddev, struct md_rdev *rdev)
2090 if (!rdev->meta_bdev)
2093 BUG_ON(!rdev->sb_page);
2095 sb = page_address(rdev->sb_page);
2100 if (!rs->dev[i].data_dev || test_bit(Faulty, &rs->dev[i].rdev.flags)) {
2112 sb->array_position = cpu_to_le32(rdev->raid_disk);
2116 sb->disk_recovery_offset = cpu_to_le64(rdev->recovery_offset);
2148 sb->data_offset = cpu_to_le64(rdev->data_offset);
2149 sb->new_data_offset = cpu_to_le64(rdev->new_data_offset);
2150 sb->sectors = cpu_to_le64(rdev->sectors);
2154 memset(sb + 1, 0, rdev->sb_size - sizeof(*sb));
2163 * Return: 1 if use rdev, 0 if use refdev, -Exxx otherwise
2165 static int super_load(struct md_rdev *rdev, struct md_rdev *refdev)
2172 r = read_disk_sb(rdev, rdev->sb_size, false);
2176 sb = page_address(rdev->sb_page);
2184 (!test_bit(In_sync, &rdev->flags) && !rdev->recovery_offset)) {
2185 super_sync(rdev->mddev, rdev);
2187 set_bit(FirstUse, &rdev->flags);
2191 set_bit(MD_SB_CHANGE_DEVS, &rdev->mddev->sb_flags);
2208 static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
2220 sb = page_address(rdev->sb_page);
2328 if (test_bit(Journal, &rdev->flags))
2384 if (test_bit(Journal, &rdev->flags) ||
2432 static int super_validate(struct raid_set *rs, struct md_rdev *rdev)
2437 if (rs_is_raid0(rs) || !rdev->sb_page || rdev->raid_disk < 0)
2440 sb = page_address(rdev->sb_page);
2446 if (!mddev->events && super_init_validation(rs, rdev))
2464 if (!test_and_clear_bit(FirstUse, &rdev->flags)) {
2466 * Retrieve rdev size stored in superblock to be prepared for shrink.
2471 rdev->sectors = le64_to_cpu(sb->sectors);
2473 rdev->recovery_offset = le64_to_cpu(sb->disk_recovery_offset);
2474 if (rdev->recovery_offset == MaxSector)
2475 set_bit(In_sync, &rdev->flags);
2481 clear_bit(In_sync, &rdev->flags); /* Mandatory for recovery */
2487 if (test_and_clear_bit(Faulty, &rdev->flags)) {
2488 rdev->recovery_offset = 0;
2489 clear_bit(In_sync, &rdev->flags);
2490 rdev->saved_raid_disk = rdev->raid_disk;
2494 rdev->data_offset = le64_to_cpu(sb->data_offset);
2495 rdev->new_data_offset = le64_to_cpu(sb->new_data_offset);
2506 struct md_rdev *rdev, *freshest;
2510 rdev_for_each(rdev, mddev) {
2511 if (test_bit(Journal, &rdev->flags))
2514 if (!rdev->meta_bdev)
2518 rdev->sb_start = 0;
2519 rdev->sb_size = bdev_logical_block_size(rdev->meta_bdev);
2520 if (rdev->sb_size < sizeof(struct dm_raid_superblock) || rdev->sb_size > PAGE_SIZE) {
2537 r = super_load(rdev, freshest);
2541 freshest = rdev;
2559 * The rdev has to stay on the same_set list to allow for
2562 rdev->raid_disk = rdev->saved_raid_disk = -1;
2583 rdev_for_each(rdev, mddev)
2584 if (!test_bit(Journal, &rdev->flags) &&
2585 rdev != freshest &&
2586 super_validate(rs, rdev))
2602 struct md_rdev *rdev;
2613 rdev = &rs->dev[0].rdev;
2658 data_offset = rs->data_offset ? rdev->data_offset : 0;
2667 to_sector(i_size_read(rdev->bdev->bd_inode)) - rs->md.dev_sectors < MIN_FREE_RESHAPE_SPACE) {
2678 rs->md.recovery_cp += rs->dev[0].rdev.data_offset;
2681 rdev_for_each(rdev, &rs->md) {
2682 if (!test_bit(Journal, &rdev->flags)) {
2683 rdev->data_offset = data_offset;
2684 rdev->new_data_offset = new_data_offset;
2695 struct md_rdev *rdev;
2697 rdev_for_each(rdev, &rs->md) {
2698 if (!test_bit(Journal, &rdev->flags)) {
2699 rdev->raid_disk = i++;
2700 rdev->saved_raid_disk = rdev->new_raid_disk = -1;
2711 struct md_rdev *rdev;
2713 sector_t new_data_offset = rs->dev[0].rdev.data_offset ? 0 : rs->data_offset;
2736 rdev = &rs->dev[d].rdev;
2739 clear_bit(In_sync, &rdev->flags);
2740 clear_bit(Faulty, &rdev->flags);
2741 mddev->recovery_cp = rdev->recovery_offset = 0;
2746 rdev->new_data_offset = new_data_offset;
2814 struct md_rdev *rdev;
2817 rdev_for_each(rdev, &rs->md)
2818 if (!test_bit(Journal, &rdev->flags)) {
2819 reshape_sectors = (rdev->data_offset > rdev->new_data_offset) ?
2820 rdev->data_offset - rdev->new_data_offset :
2821 rdev->new_data_offset - rdev->data_offset;
2841 struct md_rdev *rdev;
2879 rdev = &rs->dev[d].rdev;
2880 clear_bit(In_sync, &rdev->flags);
2886 rdev->saved_raid_disk = -1;
2887 rdev->raid_disk = d;
2889 rdev->sectors = mddev->dev_sectors;
2890 rdev->recovery_offset = rs_is_raid1(rs) ? 0 : MaxSector;
2923 mddev->reshape_backwards = rs->dev[0].rdev.data_offset ? 0 : 1;
2931 rdev_for_each(rdev, &rs->md)
2932 if (!test_bit(Journal, &rdev->flags))
2933 rdev->sectors += reshape_sectors;
2970 if (!rs->dev[i].rdev.bdev)
2973 q = bdev_get_queue(rs->dev[i].rdev.bdev);
3089 ti->error = "Invalid rdev size";
3397 * Return status string for @rdev
3406 static const char *__raid_dev_status(struct raid_set *rs, struct md_rdev *rdev)
3408 if (!rdev->bdev)
3410 else if (test_bit(Faulty, &rdev->flags))
3412 else if (test_bit(Journal, &rdev->flags))
3416 !test_bit(In_sync, &rdev->flags)))
3489 struct md_rdev *rdev;
3492 rdev_for_each(rdev, mddev)
3493 if (!test_bit(Journal, &rdev->flags) &&
3494 !test_bit(In_sync, &rdev->flags)) {
3547 DMEMIT(__raid_dev_status(rs, &rs->dev[i].rdev));
3592 DMEMIT(" %llu", (unsigned long long) rs->dev[0].rdev.data_offset);
3598 __raid_dev_status(rs, &rs->journal_dev.rdev) : "-");
3610 (test_bit(WriteMostly, &rs->dev[i].rdev.flags) ? 2 : 0);
3640 if (test_bit(WriteMostly, &rs->dev[i].rdev.flags))
3642 rs->dev[i].rdev.raid_disk);
3796 r = &rs->dev[i].rdev;