Lines Matching refs:sectors
65 sector_t hi = lo + r1_bio->sectors;
318 pr_debug("raid1: sync end %s on sectors %llu-%llu\n",
342 r1_bio->sector + (r1_bio->sectors);
424 r1_bio->sectors,
456 sector_t hi = r1_bio->sector + r1_bio->sectors;
518 if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors,
543 pr_debug("raid1: behind end write sectors"
566 sector_t sectors)
570 WARN_ON(sectors == 0);
572 * len is the number of sectors from start_sector to end of the
578 if (len > sectors)
579 len = sectors;
601 int sectors;
619 sectors = r1_bio->sectors;
630 if ((conf->mddev->recovery_cp < this_sector + sectors) ||
633 this_sector + sectors)))
651 rdev->recovery_offset < this_sector + sectors)
657 if (is_badblock(rdev, this_sector, sectors,
664 best_good_sectors = sectors;
673 if (is_badblock(rdev, this_sector, sectors,
684 if (choose_first && sectors > bad_sectors)
685 sectors = bad_sectors;
686 if (best_good_sectors > sectors)
687 best_good_sectors = sectors;
700 if ((sectors > best_good_sectors) && (best_disk >= 0))
702 best_good_sectors = sectors;
780 sectors = best_good_sectors;
785 conf->mirrors[best_disk].next_seq_sect = this_sector + sectors;
788 *max_sectors = sectors;
1196 r1_bio->sectors = bio_sectors(bio);
1262 r1_bio->sectors = max_read_sectors;
1307 r1_bio->sectors = max_sectors;
1383 r1_bio->sectors = max_write_sectors;
1399 max_sectors = r1_bio->sectors;
1505 r1_bio->sectors = max_sectors;
1533 md_bitmap_startwrite(bitmap, r1_bio->sector, r1_bio->sectors,
1588 sector_t sectors;
1598 * we pass the maximum number of sectors down
1601 sectors = align_to_barrier_unit_end(
1605 raid1_read_request(mddev, bio, sectors, NULL);
1609 raid1_write_request(mddev, bio, sectors);
1930 long sectors_to_go = r1_bio->sectors;
1944 int s = r1_bio->sectors;
1973 } else if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors,
1977 r1_bio->sectors,
1986 int sectors, struct page *page, blk_opf_t rw)
1988 if (sync_page_io(rdev, sector, sectors << 9, page, rw, false))
1999 if (!rdev_set_badblocks(rdev, sector, sectors, 0))
2022 int sectors = r1_bio->sectors;
2038 while(sectors) {
2039 int s = sectors;
2086 md_done_sync(mddev, r1_bio->sectors, 0);
2091 sectors -= s;
2126 sectors -= s;
2151 vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9);
2169 md_bio_reset_resync_pages(b, rp, r1_bio->sectors << 9);
2208 atomic64_add(r1_bio->sectors, &mddev->resync_mismatches);
2275 sector_t sect, int sectors)
2278 while(sectors) {
2279 int s = sectors;
2354 pr_info("md/raid1:%s: read error corrected (%d sectors at %llu on %pg)\n",
2364 sectors -= s;
2388 int sectors;
2389 int sect_to_write = r1_bio->sectors;
2398 sectors = ((sector + block_sectors)
2404 if (sectors > sect_to_write)
2405 sectors = sect_to_write;
2406 /* Write at 'sector' for 'sectors'*/
2419 wbio->bi_iter.bi_size = r1_bio->sectors << 9;
2421 bio_trim(wbio, sector - r1_bio->sector, sectors);
2427 sectors, 0)
2431 sect_to_write -= sectors;
2432 sector += sectors;
2433 sectors = block_sectors;
2441 int s = r1_bio->sectors;
2471 r1_bio->sectors, 0);
2533 r1_bio->sector, r1_bio->sectors);
2547 raid1_read_request(mddev, bio, r1_bio->sectors, r1_bio);
2675 int min_bad = 0; /* number of sectors that are bad in all devices */
2828 /* These sectors are bad on all InSync devices, so we
2922 r1_bio->sectors = nr_sectors;
2960 static sector_t raid1_size(struct mddev *mddev, sector_t sectors, int raid_disks)
2962 if (sectors)
2963 return sectors;
3213 static int raid1_resize(struct mddev *mddev, sector_t sectors)
3222 sector_t newsize = raid1_size(mddev, sectors, 0);
3232 if (sectors > mddev->dev_sectors &&
3237 mddev->dev_sectors = sectors;
3238 mddev->resync_max_sectors = sectors;