Lines Matching defs:mddev
243 struct mddev md;
261 struct mddev *mddev = &rs->md;
263 l->new_level = mddev->new_level;
264 l->new_layout = mddev->new_layout;
265 l->new_chunk_sectors = mddev->new_chunk_sectors;
270 struct mddev *mddev = &rs->md;
272 mddev->new_level = l->new_level;
273 mddev->new_layout = l->new_layout;
274 mddev->new_chunk_sectors = l->new_chunk_sectors;
684 struct mddev *mddev = &rs->md;
691 rdev_for_each(rdev, mddev)
693 rdev->sectors = mddev->dev_sectors;
708 * Set the mddev properties in @rs to the current
713 struct mddev *mddev = &rs->md;
715 mddev->new_level = mddev->level;
716 mddev->new_layout = mddev->layout;
717 mddev->new_chunk_sectors = mddev->chunk_sectors;
721 * Set the mddev properties in @rs to the new
726 struct mddev *mddev = &rs->md;
728 mddev->level = mddev->new_level;
729 mddev->layout = mddev->new_layout;
730 mddev->chunk_sectors = mddev->new_chunk_sectors;
731 mddev->raid_disks = rs->raid_disks;
732 mddev->delta_disks = 0;
845 rs->dev[i].rdev.mddev = &rs->md;
1265 jdev->mddev = &rs->md;
1539 struct mddev *mddev = &rs->md;
1540 uint32_t min_stripes = max(mddev->chunk_sectors, mddev->new_chunk_sectors) / 2;
1554 conf = mddev->private;
1562 r = raid5_set_cache_size(mddev, nr_stripes);
1574 /* Return # of data stripes as kept in mddev as of @rs (i.e. as of superblock) */
1629 struct mddev *mddev = &rs->md;
1632 delta_disks = mddev->delta_disks;
1664 mddev->array_sectors = array_sectors;
1665 mddev->dev_sectors = dev_sectors;
1700 smp_rmb(); /* Make sure we access most actual mddev properties */
1717 struct mddev *mddev = &rs->md;
1730 switch (mddev->level) {
1733 if ((mddev->new_level == 1 || mddev->new_level == 5) &&
1734 mddev->raid_disks == 1)
1738 if (mddev->new_level == 10 &&
1739 !(rs->raid_disks % mddev->raid_disks))
1743 if (__within_range(mddev->new_level, 4, 6) &&
1744 mddev->new_layout == ALGORITHM_PARITY_N &&
1745 mddev->raid_disks > 1)
1752 if (__is_raid10_offset(mddev->layout))
1755 near_copies = __raid10_near_copies(mddev->layout);
1758 if (mddev->new_level == 0) {
1761 !(mddev->raid_disks % near_copies)) {
1762 mddev->raid_disks /= near_copies;
1763 mddev->delta_disks = mddev->raid_disks;
1769 __raid10_far_copies(mddev->layout) > 1)
1776 if (mddev->new_level == 1 &&
1777 max(near_copies, __raid10_far_copies(mddev->layout)) == mddev->raid_disks)
1781 if (__within_range(mddev->new_level, 4, 5) &&
1782 mddev->raid_disks == 2)
1788 if (__within_range(mddev->new_level, 4, 5) &&
1789 mddev->raid_disks == 2) {
1790 mddev->degraded = 1;
1795 if (mddev->new_level == 0 &&
1796 mddev->raid_disks == 1)
1800 if (mddev->new_level == 10)
1806 if (mddev->new_level == 0)
1810 if ((mddev->new_level == 1 || mddev->new_level == 5) &&
1811 mddev->raid_disks == 2)
1815 if (__within_range(mddev->new_level, 5, 6) &&
1816 mddev->layout == ALGORITHM_PARITY_N)
1822 if (mddev->new_level == 0 &&
1823 mddev->layout == ALGORITHM_PARITY_N)
1827 if (mddev->new_level == 4 &&
1828 mddev->layout == ALGORITHM_PARITY_N)
1832 if ((mddev->new_level == 1 || mddev->new_level == 4 || mddev->new_level == 10) &&
1833 mddev->raid_disks == 2)
1837 if (mddev->new_level == 6 &&
1838 ((mddev->layout == ALGORITHM_PARITY_N && mddev->new_layout == ALGORITHM_PARITY_N) ||
1839 __within_range(mddev->new_layout, ALGORITHM_LEFT_ASYMMETRIC_6, ALGORITHM_RIGHT_SYMMETRIC_6)))
1845 if (mddev->new_level == 0 &&
1846 mddev->layout == ALGORITHM_PARITY_N)
1850 if (mddev->new_level == 4 &&
1851 mddev->layout == ALGORITHM_PARITY_N)
1855 if (mddev->new_level == 5 &&
1856 ((mddev->layout == ALGORITHM_PARITY_N && mddev->new_layout == ALGORITHM_PARITY_N) ||
1857 __within_range(mddev->new_layout, ALGORITHM_LEFT_ASYMMETRIC, ALGORITHM_RIGHT_SYMMETRIC)))
1886 struct mddev *mddev = &rs->md;
1902 mddev->raid_disks != rs->raid_disks;
1907 !__is_raid10_far(mddev->new_layout) &&
2014 struct mddev *mddev = &rs->md;
2016 if (!mddev->pers || !mddev->pers->check_reshape)
2018 else if (mddev->degraded)
2044 md_error(rdev->mddev, rdev);
2081 static void super_sync(struct mddev *mddev, struct md_rdev *rdev)
2087 struct raid_set *rs = container_of(mddev, struct raid_set, md);
2111 sb->num_devices = cpu_to_le32(mddev->raid_disks);
2114 sb->events = cpu_to_le64(mddev->events);
2117 sb->array_resync_offset = cpu_to_le64(mddev->recovery_cp);
2119 sb->level = cpu_to_le32(mddev->level);
2120 sb->layout = cpu_to_le32(mddev->layout);
2121 sb->stripe_sectors = cpu_to_le32(mddev->chunk_sectors);
2128 sb->new_level = cpu_to_le32(mddev->new_level);
2129 sb->new_layout = cpu_to_le32(mddev->new_layout);
2130 sb->new_stripe_sectors = cpu_to_le32(mddev->new_chunk_sectors);
2132 sb->delta_disks = cpu_to_le32(mddev->delta_disks);
2135 sb->reshape_position = cpu_to_le64(mddev->reshape_position);
2140 if (mddev->delta_disks < 0 || mddev->reshape_backwards)
2147 sb->array_sectors = cpu_to_le64(mddev->array_sectors);
2185 super_sync(rdev->mddev, rdev);
2191 set_bit(MD_SB_CHANGE_DEVS, &rdev->mddev->sb_flags);
2212 struct mddev *mddev = &rs->md;
2226 mddev->events = events_sb ? : 1;
2228 mddev->reshape_position = MaxSector;
2230 mddev->raid_disks = le32_to_cpu(sb->num_devices);
2231 mddev->level = le32_to_cpu(sb->level);
2232 mddev->layout = le32_to_cpu(sb->layout);
2233 mddev->chunk_sectors = le32_to_cpu(sb->stripe_sectors);
2241 mddev->new_level = le32_to_cpu(sb->new_level);
2242 mddev->new_layout = le32_to_cpu(sb->new_layout);
2243 mddev->new_chunk_sectors = le32_to_cpu(sb->new_stripe_sectors);
2244 mddev->delta_disks = le32_to_cpu(sb->delta_disks);
2245 mddev->array_sectors = le64_to_cpu(sb->array_sectors);
2254 if (mddev->delta_disks < 0 ||
2255 (!mddev->delta_disks && (le32_to_cpu(sb->flags) & SB_FLAG_RESHAPE_BACKWARDS)))
2256 mddev->reshape_backwards = 1;
2258 mddev->reshape_backwards = 0;
2260 mddev->reshape_position = le64_to_cpu(sb->reshape_position);
2261 rs->raid_type = get_raid_type_by_ll(mddev->level, mddev->layout);
2268 struct raid_type *rt_cur = get_raid_type_by_ll(mddev->level, mddev->layout);
2269 struct raid_type *rt_new = get_raid_type_by_ll(mddev->new_level, mddev->new_layout);
2280 if (mddev->layout != mddev->new_layout) {
2286 le32_to_cpu(sb->layout), mddev->new_layout);
2288 if (mddev->chunk_sectors != mddev->new_chunk_sectors)
2290 mddev->chunk_sectors, mddev->new_chunk_sectors);
2293 mddev->raid_disks, mddev->raid_disks + rs->delta_disks);
2296 raid10_md_layout_to_format(mddev->layout),
2297 raid10_md_layout_to_copies(mddev->layout));
2299 raid10_md_layout_to_format(mddev->new_layout),
2300 raid10_md_layout_to_copies(mddev->new_layout));
2309 mddev->recovery_cp = le64_to_cpu(sb->array_resync_offset);
2327 rdev_for_each(r, mddev) {
2350 set_bit(MD_ARRAY_FIRST_USE, &mddev->flags);
2369 (unsigned long long) mddev->recovery_cp);
2373 (unsigned long long) mddev->reshape_position);
2383 rdev_for_each(r, mddev) {
2400 if (rs_is_raid10(rs) && __is_raid10_near(mddev->layout)) {
2401 if (mddev->raid_disks % __raid10_near_copies(mddev->layout) ||
2434 struct mddev *mddev = &rs->md;
2443 * If mddev->events is not set, we know we have not yet initialized
2446 if (!mddev->events && super_init_validation(rs, rdev))
2461 mddev->bitmap_info.offset = (rt_is_raid0(rs->raid_type) || rs->journal_dev.dev) ? 0 : to_sector(4096);
2462 mddev->bitmap_info.default_offset = mddev->bitmap_info.offset;
2507 struct mddev *mddev = &rs->md;
2510 rdev_for_each(rdev, mddev) {
2583 rdev_for_each(rdev, mddev)
2710 struct mddev *mddev = &rs->md;
2712 unsigned int d = mddev->raid_disks = rs->raid_disks;
2721 mddev->layout = raid10_format_to_md_layout(rs, ALGORITHM_RAID10_FAR,
2725 mddev->layout = raid10_format_to_md_layout(rs, ALGORITHM_RAID10_NEAR,
2732 clear_bit(MD_ARRAY_FIRST_USE, &mddev->flags);
2733 mddev->recovery_cp = MaxSector;
2741 mddev->recovery_cp = rdev->recovery_offset = 0;
2743 set_bit(MD_ARRAY_FIRST_USE, &mddev->flags);
2756 struct mddev *mddev = &rs->md;
2759 if (rs->raid_disks != mddev->raid_disks &&
2760 __is_raid10_near(mddev->layout) &&
2762 rs->raid10_copies != __raid10_near_copies(mddev->layout)) {
2776 mddev->layout = raid10_format_to_md_layout(rs, ALGORITHM_RAID10_NEAR,
2778 mddev->new_layout = mddev->layout;
2789 mddev->degraded = rs->delta_disks < 0 ? -rs->delta_disks : rs->delta_disks;
2793 mddev->raid_disks = rs->raid_disks;
2804 } else if (mddev->raid_disks < rs->raid_disks)
2840 struct mddev *mddev = &rs->md;
2843 mddev->delta_disks = rs->delta_disks;
2844 cur_raid_devs = mddev->raid_disks;
2847 if (mddev->delta_disks &&
2848 mddev->layout != mddev->new_layout) {
2850 mddev->new_layout = mddev->layout;
2889 rdev->sectors = mddev->dev_sectors;
2893 mddev->reshape_backwards = 0; /* adding disk(s) -> forward reshape */
2898 mddev->reshape_backwards = 1; /* removing disk(s) -> backward reshape */
2923 mddev->reshape_backwards = rs->dev[0].rdev.data_offset ? 0 : 1;
2930 if (!mddev->reshape_backwards)
3329 struct mddev *mddev = &rs->md;
3333 * mddev->array_sectors will differ during the process
3334 * (ti->len > mddev->array_sectors), so we have to requeue
3335 * bios with addresses > mddev->array_sectors here or
3339 if (unlikely(bio_end_sector(bio) > mddev->array_sectors))
3342 md_handle_request(mddev, bio);
3365 /* Return enum sync_state for @mddev derived from @recovery flags */
3366 static enum sync_state decipher_sync_action(struct mddev *mddev, unsigned long recovery)
3374 (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &recovery)))) {
3389 if (mddev->reshape_position != MaxSector)
3427 struct mddev *mddev = &rs->md;
3438 r = mddev->recovery_cp;
3440 r = mddev->curr_resync_completed;
3492 rdev_for_each(rdev, mddev)
3514 struct mddev *mddev = &rs->md;
3515 struct r5conf *conf = rs_is_raid456(rs) ? mddev->private : NULL;
3528 rt = get_raid_type_by_ll(mddev->new_level, mddev->new_layout);
3532 DMEMIT("%s %d ", rt->name, mddev->raid_disks);
3534 /* Access most recent mddev properties for status output */
3538 mddev->resync_max_sectors : mddev->dev_sectors;
3540 state = decipher_sync_action(mddev, recovery);
3542 resync_mismatches = (mddev->last_sync_action && !strcasecmp(mddev->last_sync_action, "check")) ?
3543 atomic64_read(&mddev->resync_mismatches) : 0;
3620 DMEMIT("%s %u %u", rs->raid_type->name, raid_param_cnt, mddev->new_chunk_sectors);
3631 mddev->bitmap_info.daemon_sleep);
3634 mddev->sync_speed_min);
3637 mddev->sync_speed_max);
3645 mddev->bitmap_info.max_write_behind);
3651 (unsigned long long) to_sector(mddev->bitmap_info.chunksize));
3654 raid10_md_layout_to_copies(mddev->layout));
3657 raid10_md_layout_to_format(mddev->layout));
3660 max(rs->delta_disks, mddev->delta_disks));
3681 struct mddev *mddev = &rs->md;
3683 if (!mddev->pers || !mddev->pers->sync_request)
3687 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3689 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3692 if (mddev->sync_thread) {
3693 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
3694 md_reap_sync_thread(mddev);
3696 } else if (decipher_sync_action(mddev, mddev->recovery) != st_idle)
3701 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
3704 set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
3705 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
3706 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
3708 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
3709 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
3713 if (mddev->ro == 2) {
3717 mddev->ro = 0;
3718 if (!mddev->suspended && mddev->sync_thread)
3719 md_wakeup_thread(mddev->sync_thread);
3721 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3722 if (!mddev->suspended && mddev->thread)
3723 md_wakeup_thread(mddev->thread);
3786 struct mddev *mddev = &rs->md;
3790 if (!mddev->pers || !mddev->pers->hot_add_disk || !mddev->pers->hot_remove_disk)
3819 if (mddev->pers->hot_remove_disk(mddev, r)) {
3830 if (mddev->pers->hot_add_disk(mddev, r)) {
3880 struct mddev *mddev = &rs->md;
3881 int ro = mddev->ro;
3883 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
3884 mddev->ro = 0;
3885 md_update_sb(mddev, 1);
3886 mddev->ro = ro;
3899 struct mddev *mddev = &rs->md;
3900 struct md_personality *pers = mddev->pers;
3903 set_bit(MD_RECOVERY_WAIT, &mddev->recovery);
3914 r = pers->check_reshape(mddev);
3925 r = pers->start_reshape(mddev);
3946 struct mddev *mddev = &rs->md;
3966 /* We are extending the raid set size, adjust mddev/md_rdev sizes and set capacity. */
3968 mddev->array_sectors = rs->array_sectors;
3969 mddev->dev_sectors = rs->dev_sectors;
3975 if (test_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags) && mddev->bitmap &&
3978 mddev->bitmap_info.chunksize != to_bytes(rs->requested_bitmap_chunk_sectors)))) {
3979 int chunksize = to_bytes(rs->requested_bitmap_chunk_sectors) ?: mddev->bitmap_info.chunksize;
3981 r = md_bitmap_resize(mddev->bitmap, mddev->dev_sectors, chunksize, 0);
3988 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
3989 if (mddev->recovery_cp && mddev->recovery_cp < MaxSector) {
3990 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
3991 mddev->resync_min = mddev->recovery_cp;
3993 mddev->resync_max_sectors = mddev->dev_sectors;
4000 mddev_lock_nointr(mddev);
4002 mddev_unlock(mddev);
4014 struct mddev *mddev = &rs->md;
4027 if (mddev->delta_disks < 0)
4030 mddev_lock_nointr(mddev);
4031 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
4032 mddev->ro = 0;
4033 mddev->in_sync = 0;
4034 mddev_resume(mddev);
4035 mddev_unlock(mddev);