Lines Matching defs:rdev

39 #define FirstUse 10		/* rdev flag */
56 struct md_rdev rdev;
253 struct md_rdev rdev;
682 /* Adjust rdev sectors */
686 struct md_rdev *rdev;
689 * raid10 sets rdev->sector to the device size, which
692 rdev_for_each(rdev, mddev)
693 if (!test_bit(Journal, &rdev->flags))
694 rdev->sectors = mddev->dev_sectors;
769 md_rdev_init(&rs->dev[i].rdev);
789 md_rdev_clear(&rs->journal_dev.rdev);
796 md_rdev_clear(&rs->dev[i].rdev);
834 rs->dev[i].rdev.raid_disk = i;
843 rs->dev[i].rdev.data_offset = 0;
844 rs->dev[i].rdev.new_data_offset = 0;
845 rs->dev[i].rdev.mddev = &rs->md;
859 rs->dev[i].rdev.sb_page = alloc_page(GFP_KERNEL);
860 if (!rs->dev[i].rdev.sb_page) {
871 if (!test_bit(In_sync, &rs->dev[i].rdev.flags) &&
872 (!rs->dev[i].rdev.recovery_offset)) {
894 rs->dev[i].rdev.meta_bdev = rs->dev[i].meta_dev->bdev;
896 rs->dev[i].rdev.bdev = rs->dev[i].data_dev->bdev;
897 list_add_tail(&rs->dev[i].rdev.same_set, &rs->md.disks);
898 if (!test_bit(In_sync, &rs->dev[i].rdev.flags))
903 list_add_tail(&rs->journal_dev.rdev.same_set, &rs->md.disks);
1009 if (!test_bit(FirstUse, &rs->dev[i].rdev.flags) &&
1010 ((!test_bit(In_sync, &rs->dev[i].rdev.flags) ||
1011 !rs->dev[i].rdev.sb_page)))
1056 if ((!rs->dev[i].rdev.sb_page ||
1057 !test_bit(In_sync, &rs->dev[i].rdev.flags)) &&
1082 if ((!rs->dev[i].rdev.sb_page ||
1083 !test_bit(In_sync, &rs->dev[i].rdev.flags)) &&
1180 set_bit(In_sync, &rs->dev[i].rdev.flags);
1181 rs->dev[i].rdev.recovery_offset = MaxSector;
1263 jdev = &rs->journal_dev.rdev;
1323 clear_bit(In_sync, &rd->rdev.flags);
1324 clear_bit(Faulty, &rd->rdev.flags);
1325 rd->rdev.recovery_offset = 0;
1339 set_bit(WriteMostly, &rs->dev[value].rdev.flags);
1587 * Retrieve rdev->sectors from any valid raid device of @rs
1595 struct md_rdev *rdev = &rs->dev[i].rdev;
1597 if (!test_bit(Journal, &rdev->flags) &&
1598 rdev->bdev && rdev->sectors)
1599 return rdev->sectors;
1609 struct md_rdev *rdev;
1611 rdev_for_each(rdev, &rs->md)
1612 if (!test_bit(Journal, &rdev->flags) && rdev->bdev) {
1613 ds = min(ds, bdev_nr_sectors(rdev->bdev));
2033 static int read_disk_sb(struct md_rdev *rdev, int size, bool force_reload)
2035 BUG_ON(!rdev->sb_page);
2037 if (rdev->sb_loaded && !force_reload)
2040 rdev->sb_loaded = 0;
2042 if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, true)) {
2044 rdev->raid_disk);
2045 md_error(rdev->mddev, rdev);
2046 set_bit(Faulty, &rdev->flags);
2050 rdev->sb_loaded = 1;
2082 static void super_sync(struct mddev *mddev, struct md_rdev *rdev)
2091 if (!rdev->meta_bdev)
2094 BUG_ON(!rdev->sb_page);
2096 sb = page_address(rdev->sb_page);
2101 if (!rs->dev[i].data_dev || test_bit(Faulty, &rs->dev[i].rdev.flags)) {
2113 sb->array_position = cpu_to_le32(rdev->raid_disk);
2117 sb->disk_recovery_offset = cpu_to_le64(rdev->recovery_offset);
2149 sb->data_offset = cpu_to_le64(rdev->data_offset);
2150 sb->new_data_offset = cpu_to_le64(rdev->new_data_offset);
2151 sb->sectors = cpu_to_le64(rdev->sectors);
2155 memset(sb + 1, 0, rdev->sb_size - sizeof(*sb));
2164 * Return: 1 if use rdev, 0 if use refdev, -Exxx otherwise
2166 static int super_load(struct md_rdev *rdev, struct md_rdev *refdev)
2173 r = read_disk_sb(rdev, rdev->sb_size, false);
2177 sb = page_address(rdev->sb_page);
2185 (!test_bit(In_sync, &rdev->flags) && !rdev->recovery_offset)) {
2186 super_sync(rdev->mddev, rdev);
2188 set_bit(FirstUse, &rdev->flags);
2192 set_bit(MD_SB_CHANGE_DEVS, &rdev->mddev->sb_flags);
2209 static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
2220 sb = page_address(rdev->sb_page);
2327 if (test_bit(Journal, &rdev->flags))
2381 if (test_bit(Journal, &rdev->flags) ||
2429 static int super_validate(struct raid_set *rs, struct md_rdev *rdev)
2434 if (rs_is_raid0(rs) || !rdev->sb_page || rdev->raid_disk < 0)
2437 sb = page_address(rdev->sb_page);
2443 if (!mddev->events && super_init_validation(rs, rdev))
2461 if (!test_and_clear_bit(FirstUse, &rdev->flags)) {
2463 * Retrieve rdev size stored in superblock to be prepared for shrink.
2468 rdev->sectors = le64_to_cpu(sb->sectors);
2470 rdev->recovery_offset = le64_to_cpu(sb->disk_recovery_offset);
2471 if (rdev->recovery_offset == MaxSector)
2472 set_bit(In_sync, &rdev->flags);
2478 clear_bit(In_sync, &rdev->flags); /* Mandatory for recovery */
2484 if (test_and_clear_bit(Faulty, &rdev->flags)) {
2485 rdev->recovery_offset = 0;
2486 clear_bit(In_sync, &rdev->flags);
2487 rdev->saved_raid_disk = rdev->raid_disk;
2491 rdev->data_offset = le64_to_cpu(sb->data_offset);
2492 rdev->new_data_offset = le64_to_cpu(sb->new_data_offset);
2503 struct md_rdev *rdev, *freshest;
2507 rdev_for_each(rdev, mddev) {
2508 if (test_bit(Journal, &rdev->flags))
2511 if (!rdev->meta_bdev)
2515 rdev->sb_start = 0;
2516 rdev->sb_size = bdev_logical_block_size(rdev->meta_bdev);
2517 if (rdev->sb_size < sizeof(struct dm_raid_superblock) || rdev->sb_size > PAGE_SIZE) {
2534 r = super_load(rdev, freshest);
2538 freshest = rdev;
2556 * The rdev has to stay on the same_set list to allow for
2559 rdev->raid_disk = rdev->saved_raid_disk = -1;
2580 rdev_for_each(rdev, mddev)
2581 if (!test_bit(Journal, &rdev->flags) &&
2582 rdev != freshest &&
2583 super_validate(rs, rdev))
2599 struct md_rdev *rdev;
2610 rdev = &rs->dev[0].rdev;
2655 data_offset = rs->data_offset ? rdev->data_offset : 0;
2664 bdev_nr_sectors(rdev->bdev) - rs->md.dev_sectors < MIN_FREE_RESHAPE_SPACE) {
2675 rs->md.recovery_cp += rs->dev[0].rdev.data_offset;
2678 rdev_for_each(rdev, &rs->md) {
2679 if (!test_bit(Journal, &rdev->flags)) {
2680 rdev->data_offset = data_offset;
2681 rdev->new_data_offset = new_data_offset;
2692 struct md_rdev *rdev;
2694 rdev_for_each(rdev, &rs->md) {
2695 if (!test_bit(Journal, &rdev->flags)) {
2696 rdev->raid_disk = i++;
2697 rdev->saved_raid_disk = rdev->new_raid_disk = -1;
2708 struct md_rdev *rdev;
2710 sector_t new_data_offset = rs->dev[0].rdev.data_offset ? 0 : rs->data_offset;
2733 rdev = &rs->dev[d].rdev;
2736 clear_bit(In_sync, &rdev->flags);
2737 clear_bit(Faulty, &rdev->flags);
2738 mddev->recovery_cp = rdev->recovery_offset = 0;
2743 rdev->new_data_offset = new_data_offset;
2811 struct md_rdev *rdev;
2814 rdev_for_each(rdev, &rs->md)
2815 if (!test_bit(Journal, &rdev->flags)) {
2816 reshape_sectors = (rdev->data_offset > rdev->new_data_offset) ?
2817 rdev->data_offset - rdev->new_data_offset :
2818 rdev->new_data_offset - rdev->data_offset;
2838 struct md_rdev *rdev;
2876 rdev = &rs->dev[d].rdev;
2877 clear_bit(In_sync, &rdev->flags);
2883 rdev->saved_raid_disk = -1;
2884 rdev->raid_disk = d;
2886 rdev->sectors = mddev->dev_sectors;
2887 rdev->recovery_offset = rs_is_raid1(rs) ? 0 : MaxSector;
2920 mddev->reshape_backwards = rs->dev[0].rdev.data_offset ? 0 : 1;
2928 rdev_for_each(rdev, &rs->md)
2929 if (!test_bit(Journal, &rdev->flags))
2930 rdev->sectors += reshape_sectors;
2965 if (!rs->dev[i].rdev.bdev ||
2966 !bdev_max_discard_sectors(rs->dev[i].rdev.bdev))
3081 ti->error = "Invalid rdev size";
3390 * Return status string for @rdev
3399 static const char *__raid_dev_status(struct raid_set *rs, struct md_rdev *rdev)
3401 if (!rdev->bdev)
3403 else if (test_bit(Faulty, &rdev->flags))
3405 else if (test_bit(Journal, &rdev->flags))
3409 !test_bit(In_sync, &rdev->flags)))
3482 struct md_rdev *rdev;
3485 rdev_for_each(rdev, mddev)
3486 if (!test_bit(Journal, &rdev->flags) &&
3487 !test_bit(In_sync, &rdev->flags)) {
3540 DMEMIT(__raid_dev_status(rs, &rs->dev[i].rdev));
3585 DMEMIT(" %llu", (unsigned long long) rs->dev[0].rdev.data_offset);
3591 __raid_dev_status(rs, &rs->journal_dev.rdev) : "-");
3603 (test_bit(WriteMostly, &rs->dev[i].rdev.flags) ? 2 : 0);
3633 if (test_bit(WriteMostly, &rs->dev[i].rdev.flags))
3635 rs->dev[i].rdev.raid_disk);
3685 DMEMIT(__raid_dev_status(rs, &rs->dev[i].rdev));
3819 r = &rs->dev[i].rdev;