Lines Matching refs:devs

109 	int size = offsetof(struct r10bio, devs[conf->geo.raid_disks]);
167 r10_bio->devs[j].bio = bio;
174 r10_bio->devs[j].repl_bio = bio;
181 struct bio *rbio = r10_bio->devs[j].repl_bio;
188 bio = r10_bio->devs[j].bio;
216 if (r10_bio->devs[j].bio)
217 bio_uninit(r10_bio->devs[j].bio);
218 kfree(r10_bio->devs[j].bio);
219 if (r10_bio->devs[j].repl_bio)
220 bio_uninit(r10_bio->devs[j].repl_bio);
221 kfree(r10_bio->devs[j].repl_bio);
237 struct bio *bio = r10bio->devs[j].bio;
246 bio = r10bio->devs[j].repl_bio;
264 struct bio **bio = & r10_bio->devs[i].bio;
268 bio = &r10_bio->devs[i].repl_bio;
345 conf->mirrors[r10_bio->devs[slot].devnum].head_position =
346 r10_bio->devs[slot].addr + (r10_bio->sectors);
359 if (r10_bio->devs[slot].bio == bio)
361 if (r10_bio->devs[slot].repl_bio == bio) {
373 return r10_bio->devs[slot].devnum;
385 rdev = r10_bio->devs[slot].rdev;
505 r10_bio->devs[slot].bio = NULL;
537 r10_bio->devs[slot].addr,
542 r10_bio->devs[slot].repl_bio = IO_MADE_GOOD;
544 r10_bio->devs[slot].bio = IO_MADE_GOOD;
620 r10bio->devs[slot].devnum = d;
621 r10bio->devs[slot].addr = s;
638 r10bio->devs[slot].devnum = d;
639 r10bio->devs[slot].addr = s;
775 if (r10_bio->devs[slot].bio == IO_BLOCKED)
777 disk = r10_bio->devs[slot].devnum;
780 r10_bio->devs[slot].addr + sectors >
794 r10_bio->devs[slot].addr + sectors > rdev->recovery_offset)
797 dev_sector = r10_bio->devs[slot].addr;
853 new_distance = r10_bio->devs[slot].addr;
855 new_distance = abs(r10_bio->devs[slot].addr -
1186 if (slot >= 0 && r10_bio->devs[slot].rdev) {
1202 disk = r10_bio->devs[slot].devnum;
1209 err_rdev = r10_bio->devs[slot].rdev;
1250 r10_bio->devs[slot].bio = read_bio;
1251 r10_bio->devs[slot].rdev = rdev;
1253 read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr +
1279 int devnum = r10_bio->devs[n_copy].devnum;
1294 r10_bio->devs[n_copy].repl_bio = mbio;
1296 r10_bio->devs[n_copy].bio = mbio;
1298 mbio->bi_iter.bi_sector = (r10_bio->devs[n_copy].addr +
1369 sector_t dev_sector = r10_bio->devs[i].addr;
1481 int d = r10_bio->devs[i].devnum;
1490 r10_bio->devs[i].bio = NULL;
1491 r10_bio->devs[i].repl_bio = NULL;
1499 sector_t dev_sector = r10_bio->devs[i].addr;
1530 r10_bio->devs[i].bio = bio;
1534 r10_bio->devs[i].repl_bio = bio;
1560 if (r10_bio->devs[i].bio)
1562 if (r10_bio->devs[i].repl_bio)
1582 memset(r10_bio->devs, 0, sizeof(r10_bio->devs[0]) *
1765 memset(r10_bio->devs, 0, sizeof(r10_bio->devs[0]) * geo->raid_disks);
1793 r10_bio->devs[disk].bio = NULL;
1794 r10_bio->devs[disk].repl_bio = NULL;
1804 r10_bio->devs[disk].bio = bio;
1808 r10_bio->devs[disk].repl_bio = bio;
1851 if (r10_bio->devs[disk].bio) {
1857 r10_bio->devs[disk].bio = mbio;
1858 r10_bio->devs[disk].devnum = disk;
1865 if (r10_bio->devs[disk].repl_bio) {
1871 r10_bio->devs[disk].repl_bio = rbio;
1872 r10_bio->devs[disk].devnum = disk;
2377 r10_bio->devs[slot].addr,
2415 if (!r10_bio->devs[i].bio->bi_status)
2422 fbio = r10_bio->devs[i].bio;
2434 tbio = r10_bio->devs[i].bio;
2442 d = r10_bio->devs[i].devnum;
2444 if (!r10_bio->devs[i].bio->bi_status) {
2483 tbio->bi_iter.bi_sector = r10_bio->devs[i].addr;
2504 tbio = r10_bio->devs[i].repl_bio;
2507 if (r10_bio->devs[i].bio->bi_end_io != end_sync_write
2508 && r10_bio->devs[i].bio != fbio)
2510 d = r10_bio->devs[i].devnum;
2545 struct bio *bio = r10_bio->devs[0].bio;
2549 int dr = r10_bio->devs[0].devnum;
2550 int dw = r10_bio->devs[1].devnum;
2563 addr = r10_bio->devs[0].addr + sect,
2571 addr = r10_bio->devs[1].addr + sect;
2595 addr = r10_bio->devs[1].addr + sect;
2621 struct bio *wbio = r10_bio->devs[1].bio;
2622 struct bio *wbio2 = r10_bio->devs[1].repl_bio;
2644 d = r10_bio->devs[1].devnum;
2732 int d = r10_bio->devs[slot].devnum;
2753 r10_bio->devs[slot].bio = IO_BLOCKED;
2771 d = r10_bio->devs[sl].devnum;
2776 is_badblock(rdev, r10_bio->devs[sl].addr + sect, s,
2781 r10_bio->devs[sl].addr +
2802 int dn = r10_bio->devs[slot].devnum;
2807 r10_bio->devs[slot].addr
2811 r10_bio->devs[slot].bio
2824 d = r10_bio->devs[sl].devnum;
2834 r10_bio->devs[sl].addr +
2858 d = r10_bio->devs[sl].devnum;
2868 r10_bio->devs[sl].addr +
2908 struct md_rdev *rdev = conf->mirrors[r10_bio->devs[i].devnum].rdev;
2945 wsector = r10_bio->devs[i].addr + (sector - r10_bio->sector);
2969 struct md_rdev *rdev = r10_bio->devs[slot].rdev;
2979 bio = r10_bio->devs[slot].bio;
2981 r10_bio->devs[slot].bio = NULL;
2984 r10_bio->devs[slot].bio = IO_BLOCKED;
3016 int dev = r10_bio->devs[m].devnum;
3018 if (r10_bio->devs[m].bio == NULL ||
3019 r10_bio->devs[m].bio->bi_end_io == NULL)
3021 if (!r10_bio->devs[m].bio->bi_status) {
3024 r10_bio->devs[m].addr,
3029 r10_bio->devs[m].addr,
3034 if (r10_bio->devs[m].repl_bio == NULL ||
3035 r10_bio->devs[m].repl_bio->bi_end_io == NULL)
3038 if (!r10_bio->devs[m].repl_bio->bi_status) {
3041 r10_bio->devs[m].addr,
3046 r10_bio->devs[m].addr,
3055 int dev = r10_bio->devs[m].devnum;
3056 struct bio *bio = r10_bio->devs[m].bio;
3061 r10_bio->devs[m].addr,
3073 bio = r10_bio->devs[m].repl_bio;
3078 r10_bio->devs[m].addr,
3210 bio = r10bio->devs[i].bio;
3214 bio = r10bio->devs[i].repl_bio;
3541 int d = r10_bio->devs[j].devnum;
3552 sector = r10_bio->devs[j].addr;
3566 bio = r10_bio->devs[0].bio;
3573 from_addr = r10_bio->devs[j].addr;
3581 if (r10_bio->devs[k].devnum == i)
3584 to_addr = r10_bio->devs[k].addr;
3585 r10_bio->devs[0].devnum = d;
3586 r10_bio->devs[0].addr = from_addr;
3587 r10_bio->devs[1].devnum = i;
3588 r10_bio->devs[1].addr = to_addr;
3591 bio = r10_bio->devs[1].bio;
3601 r10_bio->devs[1].bio->bi_end_io = NULL;
3604 bio = r10_bio->devs[1].repl_bio;
3633 if (r10_bio->devs[k].devnum == i)
3639 r10_bio->devs[k].addr,
3645 r10_bio->devs[k].addr,
3673 if (r10_bio->devs[0].bio->bi_opf & MD_FAILFAST) {
3680 int d = r10_bio->devs[j].devnum;
3687 r10_bio->devs[0].bio->bi_opf
3740 int d = r10_bio->devs[i].devnum;
3745 if (r10_bio->devs[i].repl_bio)
3746 r10_bio->devs[i].repl_bio->bi_end_io = NULL;
3748 bio = r10_bio->devs[i].bio;
3756 sector = r10_bio->devs[i].addr;
3789 bio = r10_bio->devs[i].repl_bio;
3792 sector = r10_bio->devs[i].addr;
3807 int d = r10_bio->devs[i].devnum;
3808 if (r10_bio->devs[i].bio->bi_end_io)
3811 if (r10_bio->devs[i].repl_bio &&
3812 r10_bio->devs[i].repl_bio->bi_end_io)
4390 static void *raid10_takeover_raid0(struct mddev *mddev, sector_t size, int devs)
4400 sector_div(size, devs);
4810 * ->devs[x].bio and ->devs[x].repl_bio.
4941 read_bio->bi_iter.bi_sector = (r10_bio->devs[r10_bio->read_slot].addr
4946 r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum;
4983 int d = r10_bio->devs[s/2].devnum;
4987 b = r10_bio->devs[s/2].repl_bio;
4990 b = r10_bio->devs[s/2].bio;
4996 b->bi_iter.bi_sector = r10_bio->devs[s/2].addr +
5007 pages = get_resync_pages(r10_bio->devs[0].bio)->pages;
5074 int d = r10_bio->devs[s/2].devnum;
5079 b = r10_bio->devs[s/2].repl_bio;
5082 b = r10_bio->devs[s/2].bio;
5140 r10b = kmalloc(struct_size(r10b, devs, conf->copies), GFP_NOIO);
5146 /* reshape IOs share pages from .devs[0].bio */
5147 pages = get_resync_pages(r10_bio->devs[0].bio)->pages;
5162 int d = r10b->devs[slot].devnum;
5170 addr = r10b->devs[slot].addr + idx * PAGE_SIZE;