Home
last modified time | relevance | path

Searched refs:sector_nr (Results 1 - 10 of 10) sorted by relevance

/kernel/linux/linux-6.6/fs/btrfs/
H A Dscrub.c583 static struct page *scrub_stripe_get_page(struct scrub_stripe *stripe, int sector_nr) in scrub_stripe_get_page() argument
586 int page_index = (sector_nr << fs_info->sectorsize_bits) >> PAGE_SHIFT; in scrub_stripe_get_page()
592 int sector_nr) in scrub_stripe_get_page_offset()
596 return offset_in_page(sector_nr << fs_info->sectorsize_bits); in scrub_stripe_get_page_offset()
599 static void scrub_verify_one_metadata(struct scrub_stripe *stripe, int sector_nr) in scrub_verify_one_metadata() argument
603 const u64 logical = stripe->logical + (sector_nr << fs_info->sectorsize_bits); in scrub_verify_one_metadata()
604 const struct page *first_page = scrub_stripe_get_page(stripe, sector_nr); in scrub_verify_one_metadata()
605 const unsigned int first_off = scrub_stripe_get_page_offset(stripe, sector_nr); in scrub_verify_one_metadata()
620 bitmap_set(&stripe->csum_error_bitmap, sector_nr, sectors_per_tree); in scrub_verify_one_metadata()
621 bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tre in scrub_verify_one_metadata()
591 scrub_stripe_get_page_offset(struct scrub_stripe *stripe, int sector_nr) scrub_stripe_get_page_offset() argument
690 scrub_verify_one_sector(struct scrub_stripe *stripe, int sector_nr) scrub_verify_one_sector() argument
756 int sector_nr; scrub_verify_one_stripe() local
789 int sector_nr = calc_sector_number(stripe, bio_first_bvec_all(&bbio->bio)); scrub_repair_read_endio() local
879 int sector_nr; scrub_stripe_report_errors() local
1103 int sector_nr = calc_sector_number(stripe, bio_first_bvec_all(&bbio->bio)); scrub_read_endio() local
1132 int sector_nr = calc_sector_number(stripe, bio_first_bvec_all(&bbio->bio)); scrub_write_endio() local
1200 int sector_nr; scrub_write_sectors() local
1603 int sector_nr; scrub_find_fill_first_stripe() local
[all...]
H A Draid56.c276 const int sector_nr = (page_nr << PAGE_SHIFT) >> in is_data_stripe_page() local
286 return (sector_nr < rbio->nr_data * rbio->stripe_nsectors); in is_data_stripe_page()
595 unsigned int sector_nr) in rbio_stripe_sector_index()
598 ASSERT(sector_nr < rbio->stripe_nsectors); in rbio_stripe_sector_index()
600 return stripe_nr * rbio->stripe_nsectors + sector_nr; in rbio_stripe_sector_index()
606 unsigned int sector_nr) in rbio_stripe_sector()
609 sector_nr)]; in rbio_stripe_sector()
614 unsigned int sector_nr) in rbio_pstripe_sector()
616 return rbio_stripe_sector(rbio, rbio->nr_data, sector_nr); in rbio_pstripe_sector()
621 unsigned int sector_nr) in rbio_qstripe_sector()
593 rbio_stripe_sector_index(const struct btrfs_raid_bio *rbio, unsigned int stripe_nr, unsigned int sector_nr) rbio_stripe_sector_index() argument
604 rbio_stripe_sector(const struct btrfs_raid_bio *rbio, unsigned int stripe_nr, unsigned int sector_nr) rbio_stripe_sector() argument
613 rbio_pstripe_sector(const struct btrfs_raid_bio *rbio, unsigned int sector_nr) rbio_pstripe_sector() argument
620 rbio_qstripe_sector(const struct btrfs_raid_bio *rbio, unsigned int sector_nr) rbio_qstripe_sector() argument
871 sector_in_rbio(struct btrfs_raid_bio *rbio, int stripe_nr, int sector_nr, bool bio_list_only) sector_in_rbio() argument
996 get_rbio_veritical_errors(struct btrfs_raid_bio *rbio, int sector_nr, int *faila, int *failb) get_rbio_veritical_errors() argument
1035 rbio_add_io_sector(struct btrfs_raid_bio *rbio, struct bio_list *bio_list, struct sector_ptr *sector, unsigned int stripe_nr, unsigned int sector_nr, enum req_op op) rbio_add_io_sector() argument
1678 verify_one_sector(struct btrfs_raid_bio *rbio, int stripe_nr, int sector_nr) verify_one_sector() argument
1718 recover_vertical(struct btrfs_raid_bio *rbio, int sector_nr, void **pointers, void **unmap_array) recover_vertical() argument
1994 int sector_nr; set_rbio_raid6_extra_error() local
2563 int sector_nr; recover_scrub_rbio() local
2691 int sector_nr; scrub_rbio() local
[all...]
/kernel/linux/linux-5.10/drivers/md/
H A Draid1.c46 static void allow_barrier(struct r1conf *conf, sector_t sector_nr);
47 static void lower_barrier(struct r1conf *conf, sector_t sector_nr);
869 static int raise_barrier(struct r1conf *conf, sector_t sector_nr) in raise_barrier() argument
871 int idx = sector_to_idx(sector_nr); in raise_barrier()
919 static void lower_barrier(struct r1conf *conf, sector_t sector_nr) in lower_barrier() argument
921 int idx = sector_to_idx(sector_nr); in lower_barrier()
989 static void wait_read_barrier(struct r1conf *conf, sector_t sector_nr) in wait_read_barrier() argument
991 int idx = sector_to_idx(sector_nr); in wait_read_barrier()
1022 static void wait_barrier(struct r1conf *conf, sector_t sector_nr) in wait_barrier() argument
1024 int idx = sector_to_idx(sector_nr); in wait_barrier()
1035 allow_barrier(struct r1conf *conf, sector_t sector_nr) allow_barrier() argument
2617 raid1_sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped) raid1_sync_request() argument
[all...]
H A Draid10.c71 static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
2907 static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, in raid10_sync_request() argument
2934 return mddev->dev_sectors - sector_nr; in raid10_sync_request()
2946 if (sector_nr >= max_sector) { in raid10_sync_request()
3001 return reshape_request(mddev, sector_nr, skipped); in raid10_sync_request()
3008 return (max_sector - sector_nr) + sectors_skipped; in raid10_sync_request()
3018 max_sector > (sector_nr | chunk_mask)) in raid10_sync_request()
3019 max_sector = (sector_nr | chunk_mask) + 1; in raid10_sync_request()
3078 sect = raid10_find_virt(conf, sector_nr, i); in raid10_sync_request()
3304 * Let's check against "sector_nr in raid10_sync_request()
4384 reshape_request(struct mddev *mddev, sector_t sector_nr, int *skipped) reshape_request() argument
[all...]
H A Draid5.c5939 static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *skipped) in reshape_request() argument
5965 if (sector_nr == 0) { in reshape_request()
5969 sector_nr = raid5_size(mddev, 0, 0) in reshape_request()
5974 sector_nr = MaxSector; in reshape_request()
5977 sector_nr = conf->reshape_progress; in reshape_request()
5978 sector_div(sector_nr, new_data_disks); in reshape_request()
5979 if (sector_nr) { in reshape_request()
5980 mddev->curr_resync_completed = sector_nr; in reshape_request()
5983 retn = sector_nr; in reshape_request()
6031 != sector_nr); in reshape_request()
6208 raid5_sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped) raid5_sync_request() argument
[all...]
H A Dmd.h584 sector_t (*sync_request)(struct mddev *mddev, sector_t sector_nr, int *skipped);
/kernel/linux/linux-6.6/drivers/md/
H A Draid1.c46 static void allow_barrier(struct r1conf *conf, sector_t sector_nr);
47 static void lower_barrier(struct r1conf *conf, sector_t sector_nr);
869 static int raise_barrier(struct r1conf *conf, sector_t sector_nr) in raise_barrier() argument
871 int idx = sector_to_idx(sector_nr); in raise_barrier()
919 static void lower_barrier(struct r1conf *conf, sector_t sector_nr) in lower_barrier() argument
921 int idx = sector_to_idx(sector_nr); in lower_barrier()
999 static bool wait_read_barrier(struct r1conf *conf, sector_t sector_nr, bool nowait) in wait_read_barrier() argument
1001 int idx = sector_to_idx(sector_nr); in wait_read_barrier()
1042 static bool wait_barrier(struct r1conf *conf, sector_t sector_nr, bool nowait) in wait_barrier() argument
1044 int idx = sector_to_idx(sector_nr); in wait_barrier()
1055 allow_barrier(struct r1conf *conf, sector_t sector_nr) allow_barrier() argument
2661 raid1_sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped) raid1_sync_request() argument
[all...]
H A Draid10.c71 static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
3293 static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, in raid10_sync_request() argument
3321 return mddev->dev_sectors - sector_nr; in raid10_sync_request()
3333 if (sector_nr >= max_sector) { in raid10_sync_request()
3388 return reshape_request(mddev, sector_nr, skipped); in raid10_sync_request()
3408 return (max_sector - sector_nr) + sectors_skipped; in raid10_sync_request()
3418 max_sector > (sector_nr | chunk_mask)) in raid10_sync_request()
3419 max_sector = (sector_nr | chunk_mask) + 1; in raid10_sync_request()
3476 sect = raid10_find_virt(conf, sector_nr, i); in raid10_sync_request()
3707 * Let's check against "sector_nr in raid10_sync_request()
4772 reshape_request(struct mddev *mddev, sector_t sector_nr, int *skipped) reshape_request() argument
[all...]
H A Draid5.c6261 static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *skipped) in reshape_request() argument
6287 if (sector_nr == 0) { in reshape_request()
6291 sector_nr = raid5_size(mddev, 0, 0) in reshape_request()
6296 sector_nr = MaxSector; in reshape_request()
6299 sector_nr = conf->reshape_progress; in reshape_request()
6300 sector_div(sector_nr, new_data_disks); in reshape_request()
6301 if (sector_nr) { in reshape_request()
6302 mddev->curr_resync_completed = sector_nr; in reshape_request()
6305 retn = sector_nr; in reshape_request()
6353 != sector_nr); in reshape_request()
6532 raid5_sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped) raid5_sync_request() argument
[all...]
H A Dmd.h639 sector_t (*sync_request)(struct mddev *mddev, sector_t sector_nr, int *skipped);

Completed in 49 milliseconds