Lines Matching refs:sector_nr

71 static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
2907 static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
2934 return mddev->dev_sectors - sector_nr;
2946 if (sector_nr >= max_sector) {
3001 return reshape_request(mddev, sector_nr, skipped);
3008 return (max_sector - sector_nr) + sectors_skipped;
3018 max_sector > (sector_nr | chunk_mask))
3019 max_sector = (sector_nr | chunk_mask) + 1;
3078 sect = raid10_find_virt(conf, sector_nr, i);
3304 * Let's check against "sector_nr + 2 * RESYNC_SECTORS" for
3308 md_bitmap_cond_end_sync(mddev->bitmap, sector_nr,
3310 (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high));
3312 if (!md_bitmap_start_sync(mddev->bitmap, sector_nr,
3328 conf->next_resync = sector_nr;
3331 r10_bio->sector = sector_nr;
3334 r10_bio->sectors = (sector_nr | chunk_mask) - sector_nr + 1;
3421 if (sector_nr + max_sync < max_sector)
3422 max_sector = sector_nr + max_sync;
3426 if (sector_nr + (len>>9) > max_sector)
3427 len = (max_sector - sector_nr) << 9;
3440 sector_nr += len>>9;
3447 if (conf->cluster_sync_high < sector_nr + nr_sectors) {
3462 * sector_nr is a device address for recovery, so we
3466 sect_va1 = raid10_find_virt(conf, sector_nr, i);
3472 * sector_nr, so make the translation too.
3517 if (sector_nr + max_sync < max_sector)
3518 max_sector = sector_nr + max_sync;
3520 sectors_skipped += (max_sector - sector_nr);
3522 sector_nr = max_sector;
4384 static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
4403 * We interpret 'sector_nr' as an address that we want to write to.
4437 if (sector_nr == 0) {
4441 sector_nr = (raid10_size(mddev, 0, 0)
4445 sector_nr = conf->reshape_progress;
4446 if (sector_nr) {
4447 mddev->curr_resync_completed = sector_nr;
4450 return sector_nr;
4454 /* We don't use sector_nr to track where we are up to
4475 sector_nr = last & ~(sector_t)(conf->geo.chunk_mask
4477 if (sector_nr + RESYNC_SECTORS < last)
4478 sector_nr = last + 1 - RESYNC_SECTORS;
4496 sector_nr = conf->reshape_progress;
4497 last = sector_nr | (conf->geo.chunk_mask
4500 if (sector_nr + RESYNC_SECTORS <= last)
4501 last = sector_nr + RESYNC_SECTORS - 1;
4529 /* Now schedule reads for blocks from sector_nr to last */
4535 r10_bio->sector = sector_nr;
4537 r10_bio->sectors = last - sector_nr + 1;
4570 if (mddev_is_clustered(mddev) && conf->cluster_sync_high <= sector_nr) {
4574 conf->cluster_sync_low = sector_nr;
4575 conf->cluster_sync_high = sector_nr + CLUSTER_RESYNC_WINDOW_SECTORS;
4638 sector_nr += len >> 9;
4650 if (sector_nr <= last)