Lines Matching refs:sectors
65 sector_t hi = lo + r1_bio->sectors;
313 pr_debug("raid1: sync end %s on sectors %llu-%llu\n",
337 r1_bio->sector + (r1_bio->sectors);
420 r1_bio->sectors,
452 sector_t hi = r1_bio->sector + r1_bio->sectors;
514 if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors,
539 pr_debug("raid1: behind end write sectors"
562 sector_t sectors)
566 WARN_ON(sectors == 0);
568 * len is the number of sectors from start_sector to end of the
574 if (len > sectors)
575 len = sectors;
597 int sectors;
615 sectors = r1_bio->sectors;
626 if ((conf->mddev->recovery_cp < this_sector + sectors) ||
629 this_sector + sectors)))
647 rdev->recovery_offset < this_sector + sectors)
653 if (is_badblock(rdev, this_sector, sectors,
660 best_good_sectors = sectors;
669 if (is_badblock(rdev, this_sector, sectors,
680 if (choose_first && sectors > bad_sectors)
681 sectors = bad_sectors;
682 if (best_good_sectors > sectors)
683 best_good_sectors = sectors;
696 if ((sectors > best_good_sectors) && (best_disk >= 0))
698 best_good_sectors = sectors;
776 sectors = best_good_sectors;
781 conf->mirrors[best_disk].next_seq_sect = this_sector + sectors;
784 *max_sectors = sectors;
1181 r1_bio->sectors = bio_sectors(bio);
1243 r1_bio->sectors = max_read_sectors;
1288 r1_bio->sectors = max_sectors;
1353 r1_bio->sectors = max_write_sectors;
1376 max_sectors = r1_bio->sectors;
1459 r1_bio->sectors = max_sectors;
1485 md_bitmap_startwrite(bitmap, r1_bio->sector, r1_bio->sectors,
1551 sector_t sectors;
1561 * we pass the maximum number of sectors down
1564 sectors = align_to_barrier_unit_end(
1568 raid1_read_request(mddev, bio, sectors, NULL);
1572 raid1_write_request(mddev, bio, sectors);
1884 long sectors_to_go = r1_bio->sectors;
1898 int s = r1_bio->sectors;
1927 } else if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors,
1931 r1_bio->sectors,
1940 int sectors, struct page *page, int rw)
1942 if (sync_page_io(rdev, sector, sectors << 9, page, rw, 0, false))
1953 if (!rdev_set_badblocks(rdev, sector, sectors, 0))
1976 int sectors = r1_bio->sectors;
1992 while(sectors) {
1993 int s = sectors;
2041 md_done_sync(mddev, r1_bio->sectors, 0);
2046 sectors -= s;
2081 sectors -= s;
2106 vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9);
2125 md_bio_reset_resync_pages(b, rp, r1_bio->sectors << 9);
2164 atomic64_add(r1_bio->sectors, &mddev->resync_mismatches);
2231 sector_t sect, int sectors)
2234 while(sectors) {
2235 int s = sectors;
2311 pr_info("md/raid1:%s: read error corrected (%d sectors at %llu on %s)\n",
2321 sectors -= s;
2345 int sectors;
2346 int sect_to_write = r1_bio->sectors;
2355 sectors = ((sector + block_sectors)
2361 if (sectors > sect_to_write)
2362 sectors = sect_to_write;
2363 /* Write at 'sector' for 'sectors'*/
2376 wbio->bi_iter.bi_size = r1_bio->sectors << 9;
2378 bio_trim(wbio, sector - r1_bio->sector, sectors);
2385 sectors, 0)
2389 sect_to_write -= sectors;
2390 sector += sectors;
2391 sectors = block_sectors;
2399 int s = r1_bio->sectors;
2429 r1_bio->sectors, 0);
2490 r1_bio->sector, r1_bio->sectors);
2504 raid1_read_request(mddev, bio, r1_bio->sectors, r1_bio);
2631 int min_bad = 0; /* number of sectors that are bad in all devices */
2784 /* These sectors are bad on all InSync devices, so we
2878 r1_bio->sectors = nr_sectors;
2916 static sector_t raid1_size(struct mddev *mddev, sector_t sectors, int raid_disks)
2918 if (sectors)
2919 return sectors;
3183 static int raid1_resize(struct mddev *mddev, sector_t sectors)
3192 sector_t newsize = raid1_size(mddev, sectors, 0);
3202 if (sectors > mddev->dev_sectors &&
3207 mddev->dev_sectors = sectors;
3208 mddev->resync_max_sectors = sectors;