Lines Matching refs:geo
561 static void __raid10_find_phys(struct geom *geo, struct r10bio *r10bio)
571 last_far_set_start = (geo->raid_disks / geo->far_set_size) - 1;
572 last_far_set_start *= geo->far_set_size;
574 last_far_set_size = geo->far_set_size;
575 last_far_set_size += (geo->raid_disks % geo->far_set_size);
578 chunk = r10bio->sector >> geo->chunk_shift;
579 sector = r10bio->sector & geo->chunk_mask;
581 chunk *= geo->near_copies;
583 dev = sector_div(stripe, geo->raid_disks);
584 if (geo->far_offset)
585 stripe *= geo->far_copies;
587 sector += stripe << geo->chunk_shift;
590 for (n = 0; n < geo->near_copies; n++) {
598 for (f = 1; f < geo->far_copies; f++) {
599 set = d / geo->far_set_size;
600 d += geo->near_copies;
602 if ((geo->raid_disks % geo->far_set_size) &&
608 d %= geo->far_set_size;
609 d += geo->far_set_size * set;
611 s += geo->stride;
617 if (dev >= geo->raid_disks) {
619 sector += (geo->chunk_mask + 1);
626 struct geom *geo = &conf->geo;
632 geo = &conf->prev;
636 __raid10_find_phys(geo, r10bio);
645 struct geom *geo = &conf->geo;
646 int far_set_start = (dev / geo->far_set_size) * geo->far_set_size;
647 int far_set_size = geo->far_set_size;
650 if (geo->raid_disks % geo->far_set_size) {
651 last_far_set_start = (geo->raid_disks / geo->far_set_size) - 1;
652 last_far_set_start *= geo->far_set_size;
655 far_set_size = geo->far_set_size;
656 far_set_size += (geo->raid_disks % geo->far_set_size);
661 offset = sector & geo->chunk_mask;
662 if (geo->far_offset) {
664 chunk = sector >> geo->chunk_shift;
665 fc = sector_div(chunk, geo->far_copies);
666 dev -= fc * geo->near_copies;
670 while (sector >= geo->stride) {
671 sector -= geo->stride;
672 if (dev < (geo->near_copies + far_set_start))
673 dev += far_set_size - geo->near_copies;
675 dev -= geo->near_copies;
677 chunk = sector >> geo->chunk_shift;
679 vchunk = chunk * geo->raid_disks + dev;
680 sector_div(vchunk, geo->near_copies);
681 return (vchunk << geo->chunk_shift) + offset;
717 struct geom *geo = &conf->geo;
822 if (geo->near_copies > 1 && !pending)
826 else if (geo->far_copies > 1)
1524 sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask);
1541 && (conf->geo.near_copies < conf->geo.raid_disks
1559 if (conf->geo.near_copies < conf->geo.raid_disks)
1561 if (conf->geo.near_copies > 1)
1562 seq_printf(seq, " %d near-copies", conf->geo.near_copies);
1563 if (conf->geo.far_copies > 1) {
1564 if (conf->geo.far_offset)
1565 seq_printf(seq, " %d offset-copies", conf->geo.far_copies);
1567 seq_printf(seq, " %d far-copies", conf->geo.far_copies);
1568 if (conf->geo.far_set_size != conf->geo.raid_disks)
1569 seq_printf(seq, " %d devices per set", conf->geo.far_set_size);
1571 seq_printf(seq, " [%d/%d] [", conf->geo.raid_disks,
1572 conf->geo.raid_disks - mddev->degraded);
1574 for (i = 0; i < conf->geo.raid_disks; i++) {
1596 disks = conf->geo.raid_disks;
1597 ncopies = conf->geo.near_copies;
1625 /* when calling 'enough', both 'prev' and 'geo' must
1669 mdname(mddev), conf->geo.raid_disks - mddev->degraded);
1682 pr_debug(" --- wd:%d rd:%d\n", conf->geo.raid_disks - conf->mddev->degraded,
1683 conf->geo.raid_disks);
1687 for (i = 0; i < conf->geo.raid_disks; i++) {
1718 for (i = 0; i < conf->geo.raid_disks; i++) {
1760 int last = conf->geo.raid_disks - 1;
1777 rdev->saved_raid_disk < conf->geo.raid_disks &&
1852 number < conf->geo.raid_disks &&
2798 for (i = 0; i < conf->geo.raid_disks; i++)
2859 chunks = conf->geo.raid_disks / conf->geo.near_copies;
2860 if (conf->geo.raid_disks % conf->geo.near_copies == 0)
2919 sector_t chunk_mask = conf->geo.chunk_mask;
2969 else for (i = 0; i < conf->geo.raid_disks; i++) {
2984 for (i = 0; i < conf->geo.raid_disks; i++) {
3003 if (chunks_skipped >= conf->geo.raid_disks) {
3017 if (conf->geo.near_copies < conf->geo.raid_disks &&
3049 for (i = 0 ; i < conf->geo.raid_disks; i++) {
3127 for (j = 0; j < conf->geo.raid_disks; j++) {
3460 for (i = 0; i < conf->geo.raid_disks; i++) {
3533 raid_disks = min(conf->geo.raid_disks,
3538 size = sectors >> conf->geo.chunk_shift;
3539 sector_div(size, conf->geo.far_copies);
3541 sector_div(size, conf->geo.near_copies);
3543 return size << conf->geo.chunk_shift;
3553 size = size >> conf->geo.chunk_shift;
3554 sector_div(size, conf->geo.far_copies);
3555 size = size * conf->geo.raid_disks;
3556 sector_div(size, conf->geo.near_copies);
3564 size = DIV_ROUND_UP_SECTOR_T(size, conf->geo.raid_disks);
3566 conf->dev_sectors = size << conf->geo.chunk_shift;
3568 if (conf->geo.far_offset)
3569 conf->geo.stride = 1 << conf->geo.chunk_shift;
3571 sector_div(size, conf->geo.far_copies);
3572 conf->geo.stride = size << conf->geo.chunk_shift;
3577 static int setup_geo(struct geom *geo, struct mddev *mddev, enum geo_type new)
3608 geo->raid_disks = disks;
3609 geo->near_copies = nc;
3610 geo->far_copies = fc;
3611 geo->far_offset = fo;
3614 geo->far_set_size = disks;
3618 geo->far_set_size = disks/fc;
3619 WARN(geo->far_set_size < fc,
3623 geo->far_set_size = fc * nc;
3628 geo->chunk_mask = chunk - 1;
3629 geo->chunk_shift = ffz(~chunk);
3651 struct geom geo;
3654 copies = setup_geo(&geo, mddev, geo_new);
3684 conf->geo = geo;
3697 conf->prev = conf->geo;
3735 int raid_disks = conf->geo.raid_disks;
3737 if (!(conf->geo.raid_disks % conf->geo.near_copies))
3738 raid_disks /= conf->geo.near_copies;
3797 if (disk_idx >= conf->geo.raid_disks &&
3847 if (conf->geo.far_copies != 1 &&
3848 conf->geo.far_offset == 0)
3857 i < conf->geo.raid_disks
3892 mdname(mddev), conf->geo.raid_disks - mddev->degraded,
3893 conf->geo.raid_disks);
3911 after_length = ((1 << conf->geo.chunk_shift) *
3912 conf->geo.far_copies);
3976 if (conf->geo.far_copies > 1 && !conf->geo.far_offset)
4076 struct geom geo;
4078 if (conf->geo.far_copies != 1 && !conf->geo.far_offset)
4081 if (setup_geo(&geo, mddev, geo_start) != conf->copies)
4084 if (geo.far_copies > 1 && !geo.far_offset)
4088 if (mddev->array_sectors & geo.chunk_mask)
4142 if (conf->geo.raid_disks == conf->prev.raid_disks)
4146 for (i = 0; i < conf->geo.raid_disks; i++) {
4156 if (conf->geo.raid_disks <= conf->prev.raid_disks)
4195 after_length = ((1 << conf->geo.chunk_shift) *
4196 conf->geo.far_copies);
4232 setup_geo(&conf->geo, mddev, geo_start);
4254 newsize = raid10_size(mddev, 0, conf->geo.raid_disks);
4317 mddev->raid_disks = conf->geo.raid_disks;
4341 conf->geo = conf->prev;
4342 mddev->raid_disks = conf->geo.raid_disks;
4359 static sector_t last_dev_address(sector_t s, struct geom *geo)
4361 s = (s | geo->chunk_mask) + 1;
4362 s >>= geo->chunk_shift;
4363 s *= geo->near_copies;
4364 s = DIV_ROUND_UP_SECTOR_T(s, geo->raid_disks);
4365 s *= geo->far_copies;
4366 s <<= geo->chunk_shift;
4374 static sector_t first_dev_address(sector_t s, struct geom *geo)
4376 s >>= geo->chunk_shift;
4377 s *= geo->near_copies;
4378 sector_div(s, geo->raid_disks);
4379 s *= geo->far_copies;
4380 s <<= geo->chunk_shift;
4463 &conf->geo);
4475 sector_nr = last & ~(sector_t)(conf->geo.chunk_mask
4483 next = last_dev_address(conf->reshape_progress, &conf->geo);
4497 last = sector_nr | (conf->geo.chunk_mask
4593 __raid10_find_phys(&conf->geo, r10_bio);
4722 conf->prev = conf->geo;
4874 for (d = conf->geo.raid_disks ;
4875 d < conf->geo.raid_disks - mddev->delta_disks;
4887 mddev->chunk_sectors = 1 << conf->geo.chunk_shift;