Lines Matching defs:mddev
35 static void dump_zones(struct mddev *mddev)
40 struct r0conf *conf = mddev->private;
43 mdname(mddev),
63 static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
76 rdev_for_each(rdev1, mddev) {
78 mdname(mddev),
84 sector_div(sectors, mddev->chunk_sectors);
85 rdev1->sectors = sectors * mddev->chunk_sectors;
90 rdev_for_each(rdev2, mddev) {
93 mdname(mddev),
100 mdname(mddev));
109 mdname(mddev));
114 mdname(mddev));
118 mdname(mddev));
121 mdname(mddev), conf->nr_strip_zones);
125 mdname(mddev), conf->nr_strip_zones);
131 if ((mddev->chunk_sectors << 9) % blksize) {
133 mdname(mddev),
134 mddev->chunk_sectors << 9, blksize);
147 mddev->raid_disks),
160 rdev_for_each(rdev1, mddev) {
163 if (mddev->level == 10) {
169 if (mddev->level == 1) {
179 mdname(mddev));
182 if (j >= mddev->raid_disks) {
184 mdname(mddev), j);
189 mdname(mddev), j);
198 if (cnt != mddev->raid_disks) {
200 mdname(mddev), cnt, mddev->raid_disks);
214 dev = conf->devlist + i * mddev->raid_disks;
216 pr_debug("md/raid0:%s: zone %d\n", mdname(mddev), i);
225 mdname(mddev),
231 mdname(mddev),
238 mdname(mddev),
246 mdname(mddev),
253 mdname(mddev),
259 } else if (mddev->layout == RAID0_ORIG_LAYOUT ||
260 mddev->layout == RAID0_ALT_MULTIZONE_LAYOUT) {
261 conf->layout = mddev->layout;
267 mdname(mddev));
277 sector_div(first_sector, mddev->chunk_sectors);
285 pr_debug("md/raid0:%s: done.\n", mdname(mddev));
320 static struct md_rdev *map_sector(struct mddev *mddev, struct strip_zone *zone,
325 struct r0conf *conf = mddev->private;
327 unsigned int chunk_sects = mddev->chunk_sectors;
353 static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks)
361 rdev_for_each(rdev, mddev)
363 ~(sector_t)(mddev->chunk_sectors-1));
368 static void free_conf(struct mddev *mddev, struct r0conf *conf)
375 static void raid0_free(struct mddev *mddev, void *priv)
379 free_conf(mddev, conf);
382 static int raid0_run(struct mddev *mddev)
387 if (mddev->chunk_sectors == 0) {
388 pr_warn("md/raid0:%s: chunk size must be set.\n", mdname(mddev));
391 if (md_check_no_bitmap(mddev))
395 if (mddev->private == NULL) {
396 ret = create_strip_zones(mddev, &conf);
399 mddev->private = conf;
401 conf = mddev->private;
402 if (mddev->queue) {
405 blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
406 blk_queue_max_write_zeroes_sectors(mddev->queue, mddev->chunk_sectors);
408 blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
409 blk_queue_io_opt(mddev->queue,
410 (mddev->chunk_sectors << 9) * mddev->raid_disks);
412 rdev_for_each(rdev, mddev) {
413 disk_stack_limits(mddev->gendisk, rdev->bdev,
419 md_set_array_sectors(mddev, raid0_size(mddev, 0, 0));
422 mdname(mddev),
423 (unsigned long long)mddev->array_sectors);
425 dump_zones(mddev);
427 ret = md_integrity_register(mddev);
429 free_conf(mddev, conf);
448 static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
450 struct r0conf *conf = mddev->private;
469 &mddev->bio_set);
482 stripe_size = zone->nb_dev * mddev->chunk_sectors;
491 sector_div(orig_start, mddev->chunk_sectors);
496 sector_div(orig_end, mddev->chunk_sectors);
502 mddev->chunk_sectors;
504 mddev->chunk_sectors;
507 mddev->chunk_sectors) +
508 first_stripe_index * mddev->chunk_sectors;
510 mddev->chunk_sectors) +
511 last_stripe_index * mddev->chunk_sectors;
523 mddev->chunk_sectors;
525 dev_start = first_stripe_index * mddev->chunk_sectors;
530 dev_end = (last_stripe_index + 1) * mddev->chunk_sectors;
532 dev_end = last_stripe_index * mddev->chunk_sectors;
541 md_submit_discard_bio(mddev, rdev, bio,
548 static void raid0_map_submit_bio(struct mddev *mddev, struct bio *bio)
550 struct r0conf *conf = mddev->private;
556 md_account_bio(mddev, &bio);
558 zone = find_zone(mddev->private, §or);
561 tmp_dev = map_sector(mddev, zone, bio_sector, §or);
564 tmp_dev = map_sector(mddev, zone, sector, §or);
567 WARN(1, "md/raid0:%s: Invalid layout\n", mdname(mddev));
574 md_error(mddev, tmp_dev);
582 if (mddev->gendisk)
583 trace_block_bio_remap(bio, disk_devt(mddev->gendisk),
585 mddev_check_write_zeroes(mddev, bio);
589 static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
596 && md_flush_request(mddev, bio))
600 raid0_handle_discard(mddev, bio);
605 chunk_sects = mddev->chunk_sectors;
614 &mddev->bio_set);
616 raid0_map_submit_bio(mddev, bio);
620 raid0_map_submit_bio(mddev, bio);
624 static void raid0_status(struct seq_file *seq, struct mddev *mddev)
626 seq_printf(seq, " %dk chunks", mddev->chunk_sectors / 2);
630 static void raid0_error(struct mddev *mddev, struct md_rdev *rdev)
632 if (!test_and_set_bit(MD_BROKEN, &mddev->flags)) {
633 char *md_name = mdname(mddev);
640 static void *raid0_takeover_raid45(struct mddev *mddev)
645 if (mddev->degraded != 1) {
647 mdname(mddev),
648 mddev->degraded);
652 rdev_for_each(rdev, mddev) {
654 if (rdev->raid_disk == mddev->raid_disks-1) {
656 mdname(mddev));
659 rdev->sectors = mddev->dev_sectors;
663 mddev->new_level = 0;
664 mddev->new_layout = 0;
665 mddev->new_chunk_sectors = mddev->chunk_sectors;
666 mddev->raid_disks--;
667 mddev->delta_disks = -1;
669 mddev->recovery_cp = MaxSector;
670 mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
672 create_strip_zones(mddev, &priv_conf);
677 static void *raid0_takeover_raid10(struct mddev *mddev)
687 if (mddev->layout != ((1 << 8) + 2)) {
689 mdname(mddev),
690 mddev->layout);
693 if (mddev->raid_disks & 1) {
695 mdname(mddev));
698 if (mddev->degraded != (mddev->raid_disks>>1)) {
700 mdname(mddev));
705 mddev->new_level = 0;
706 mddev->new_layout = 0;
707 mddev->new_chunk_sectors = mddev->chunk_sectors;
708 mddev->delta_disks = - mddev->raid_disks / 2;
709 mddev->raid_disks += mddev->delta_disks;
710 mddev->degraded = 0;
712 mddev->recovery_cp = MaxSector;
713 mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
715 create_strip_zones(mddev, &priv_conf);
719 static void *raid0_takeover_raid1(struct mddev *mddev)
727 if ((mddev->raid_disks - 1) != mddev->degraded) {
729 mdname(mddev));
740 while (chunksect && (mddev->array_sectors & (chunksect - 1)))
748 mddev->new_level = 0;
749 mddev->new_layout = 0;
750 mddev->new_chunk_sectors = chunksect;
751 mddev->chunk_sectors = chunksect;
752 mddev->delta_disks = 1 - mddev->raid_disks;
753 mddev->raid_disks = 1;
755 mddev->recovery_cp = MaxSector;
756 mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
758 create_strip_zones(mddev, &priv_conf);
762 static void *raid0_takeover(struct mddev *mddev)
771 if (mddev->bitmap) {
773 mdname(mddev));
776 if (mddev->level == 4)
777 return raid0_takeover_raid45(mddev);
779 if (mddev->level == 5) {
780 if (mddev->layout == ALGORITHM_PARITY_N)
781 return raid0_takeover_raid45(mddev);
784 mdname(mddev), ALGORITHM_PARITY_N);
787 if (mddev->level == 10)
788 return raid0_takeover_raid10(mddev);
790 if (mddev->level == 1)
791 return raid0_takeover_raid1(mddev);
794 mddev->level);
799 static void raid0_quiesce(struct mddev *mddev, int quiesce)