Home
last modified time | relevance | path

Searched refs:sectors (Results 1 - 25 of 359) sorted by relevance

12345678910>>...15

/kernel/linux/linux-5.10/drivers/target/
H A Dtarget_core_sbc.c216 static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors) in sbc_get_size() argument
218 return cmd->se_dev->dev_attrib.block_size * sectors; in sbc_get_size()
286 unsigned int sectors = sbc_get_write_same_sectors(cmd); in sbc_setup_write_same() local
295 if (sectors > cmd->se_dev->dev_attrib.max_write_same_len) { in sbc_setup_write_same()
296 pr_warn("WRITE_SAME sectors: %u exceeds max_write_same_len: %u\n", in sbc_setup_write_same()
297 sectors, cmd->se_dev->dev_attrib.max_write_same_len); in sbc_setup_write_same()
303 if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) || in sbc_setup_write_same()
304 ((cmd->t_task_lba + sectors) > end_lba)) { in sbc_setup_write_same()
305 pr_err("WRITE_SAME exceeds last lba %llu (lba %llu, sectors %u)\n", in sbc_setup_write_same()
306 (unsigned long long)end_lba, cmd->t_task_lba, sectors); in sbc_setup_write_same()
689 sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char protect, u32 sectors, bool is_write) sbc_check_prot() argument
795 u32 sectors = 0; sbc_parse_cdb() local
1329 sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read, struct scatterlist *sg, int sg_off) sbc_dif_copy_prot() argument
1375 sbc_dif_verify(struct se_cmd *cmd, sector_t start, unsigned int sectors, unsigned int ei_lba, struct scatterlist *psg, int psg_off) sbc_dif_verify() argument
[all...]
/kernel/linux/linux-6.6/drivers/target/
H A Dtarget_core_sbc.c216 static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors) in sbc_get_size() argument
218 return cmd->se_dev->dev_attrib.block_size * sectors; in sbc_get_size()
279 unsigned int sectors = sbc_get_write_same_sectors(cmd); in sbc_setup_write_same() local
288 if (sectors > cmd->se_dev->dev_attrib.max_write_same_len) { in sbc_setup_write_same()
289 pr_warn("WRITE_SAME sectors: %u exceeds max_write_same_len: %u\n", in sbc_setup_write_same()
290 sectors, cmd->se_dev->dev_attrib.max_write_same_len); in sbc_setup_write_same()
296 if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) || in sbc_setup_write_same()
297 ((cmd->t_task_lba + sectors) > end_lba)) { in sbc_setup_write_same()
298 pr_err("WRITE_SAME exceeds last lba %llu (lba %llu, sectors %u)\n", in sbc_setup_write_same()
299 (unsigned long long)end_lba, cmd->t_task_lba, sectors); in sbc_setup_write_same()
667 sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char protect, u32 sectors, bool is_write) sbc_check_prot() argument
773 u32 sectors = 0; sbc_parse_cdb() local
1270 sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read, struct scatterlist *sg, int sg_off) sbc_dif_copy_prot() argument
1316 sbc_dif_verify(struct se_cmd *cmd, sector_t start, unsigned int sectors, unsigned int ei_lba, struct scatterlist *psg, int psg_off) sbc_dif_verify() argument
[all...]
/kernel/linux/linux-5.10/block/
H A Dbadblocks.c20 * badblocks_check() - check a given range for bad sectors
23 * @sectors: number of sectors to check for badblocks
30 * Length of bad-range, in sectors: 0-511 for lengths 1-512
53 int badblocks_check(struct badblocks *bb, sector_t s, int sectors, in badblocks_check() argument
60 sector_t target = s + sectors; in badblocks_check()
68 sectors = target - s; in badblocks_check()
152 * @sectors: number of sectors to mark as bad
153 * @acknowledged: weather to mark the bad sectors a
163 badblocks_set(struct badblocks *bb, sector_t s, int sectors, int acknowledged) badblocks_set() argument
331 badblocks_clear(struct badblocks *bb, sector_t s, int sectors) badblocks_clear() argument
[all...]
/kernel/linux/linux-6.6/block/
H A Dbadblocks.c20 * badblocks_check() - check a given range for bad sectors
23 * @sectors: number of sectors to check for badblocks
30 * Length of bad-range, in sectors: 0-511 for lengths 1-512
53 int badblocks_check(struct badblocks *bb, sector_t s, int sectors, in badblocks_check() argument
60 sector_t target = s + sectors; in badblocks_check()
151 * @sectors: number of sectors to mark as bad
152 * @acknowledged: weather to mark the bad sectors as acknowledged
162 int badblocks_set(struct badblocks *bb, sector_t s, int sectors, in badblocks_set() argument
330 badblocks_clear(struct badblocks *bb, sector_t s, int sectors) badblocks_clear() argument
[all...]
/kernel/linux/linux-5.10/drivers/md/
H A Draid0.c68 sector_t curr_zone_end, sectors; in create_strip_zones() local
87 sectors = rdev1->sectors; in create_strip_zones()
88 sector_div(sectors, mddev->chunk_sectors); in create_strip_zones()
89 rdev1->sectors = sectors * mddev->chunk_sectors; in create_strip_zones()
99 (unsigned long long)rdev1->sectors, in create_strip_zones()
101 (unsigned long long)rdev2->sectors); in create_strip_zones()
107 if (rdev2->sectors == rdev1->sectors) { in create_strip_zones()
357 raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks) raid0_size() argument
575 unsigned sectors; raid0_make_request() local
[all...]
H A Dmd-linear.c49 static sector_t linear_size(struct mddev *mddev, sector_t sectors, int raid_disks) in linear_size() argument
55 WARN_ONCE(sectors || raid_disks, in linear_size()
79 sector_t sectors; in linear_conf() local
89 sectors = rdev->sectors; in linear_conf()
90 sector_div(sectors, mddev->chunk_sectors); in linear_conf()
91 rdev->sectors = sectors * mddev->chunk_sectors; in linear_conf()
97 conf->array_sectors += rdev->sectors; in linear_conf()
117 conf->disks[0].end_sector = conf->disks[0].rdev->sectors; in linear_conf()
[all...]
H A Draid1.c65 sector_t hi = lo + r1_bio->sectors; in check_and_add_serial()
313 pr_debug("raid1: sync end %s on sectors %llu-%llu\n", in raid_end_bio_io()
337 r1_bio->sector + (r1_bio->sectors); in update_head_pos()
420 r1_bio->sectors, in close_write()
452 sector_t hi = r1_bio->sector + r1_bio->sectors; in raid1_end_write_request()
514 if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors, in raid1_end_write_request()
539 pr_debug("raid1: behind end write sectors" in raid1_end_write_request()
562 sector_t sectors) in align_to_barrier_unit_end()
566 WARN_ON(sectors == 0); in align_to_barrier_unit_end()
568 * len is the number of sectors fro in align_to_barrier_unit_end()
561 align_to_barrier_unit_end(sector_t start_sector, sector_t sectors) align_to_barrier_unit_end() argument
597 int sectors; read_balance() local
1551 sector_t sectors; raid1_make_request() local
1939 r1_sync_page_io(struct md_rdev *rdev, sector_t sector, int sectors, struct page *page, int rw) r1_sync_page_io() argument
1976 int sectors = r1_bio->sectors; fix_sync_read_error() local
2230 fix_read_error(struct r1conf *conf, int read_disk, sector_t sect, int sectors) fix_read_error() argument
2345 int sectors; narrow_write_error() local
2916 raid1_size(struct mddev *mddev, sector_t sectors, int raid_disks) raid1_size() argument
3183 raid1_resize(struct mddev *mddev, sector_t sectors) raid1_resize() argument
[all...]
H A Draid10.c318 r10_bio->devs[slot].addr + (r10_bio->sectors); in update_head_pos()
406 r10_bio->sectors, in close_write()
512 r10_bio->sectors, in raid10_end_write_request()
709 int sectors = r10_bio->sectors; in read_balance() local
736 && (this_sector + sectors >= conf->next_resync)) || in read_balance()
739 this_sector + sectors))) in read_balance()
754 r10_bio->devs[slot].addr + sectors > in read_balance()
768 r10_bio->devs[slot].addr + sectors > rdev->recovery_offset) in read_balance()
772 if (is_badblock(rdev, dev_sector, sectors, in read_balance()
1110 regular_request_wait(struct mddev *mddev, struct r10conf *conf, struct bio *bio, sector_t sectors) regular_request_wait() argument
1295 sector_t sectors; raid10_write_request() local
1499 __make_request(struct mddev *mddev, struct bio *bio, int sectors) __make_request() argument
1526 int sectors = bio_sectors(bio); raid10_make_request() local
2057 int sectors = r10_bio->sectors; sync_request_write() local
2157 int sectors = r10_bio->sectors; fix_recovery_read_error() local
2304 r10_sync_page_io(struct md_rdev *rdev, sector_t sector, int sectors, struct page *page, int rw) r10_sync_page_io() argument
2339 int sectors = r10_bio->sectors; fix_read_error() local
2540 int sectors; narrow_write_error() local
3527 raid10_size(struct mddev *mddev, sector_t sectors, int raid_disks) raid10_size() argument
3956 raid10_resize(struct mddev *mddev, sector_t sectors) raid10_resize() argument
4751 int sectors = r10_bio->sectors; handle_reshape_read_error() local
[all...]
/kernel/linux/linux-6.6/drivers/md/
H A Draid0.c66 sector_t curr_zone_end, sectors; in create_strip_zones() local
83 sectors = rdev1->sectors; in create_strip_zones()
84 sector_div(sectors, mddev->chunk_sectors); in create_strip_zones()
85 rdev1->sectors = sectors * mddev->chunk_sectors; in create_strip_zones()
95 (unsigned long long)rdev1->sectors, in create_strip_zones()
97 (unsigned long long)rdev2->sectors); in create_strip_zones()
103 if (rdev2->sectors == rdev1->sectors) { in create_strip_zones()
353 raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks) raid0_size() argument
593 unsigned sectors; raid0_make_request() local
[all...]
H A Dmd-linear.c49 static sector_t linear_size(struct mddev *mddev, sector_t sectors, int raid_disks) in linear_size() argument
55 WARN_ONCE(sectors || raid_disks, in linear_size()
78 sector_t sectors; in linear_conf() local
88 sectors = rdev->sectors; in linear_conf()
89 sector_div(sectors, mddev->chunk_sectors); in linear_conf()
90 rdev->sectors = sectors * mddev->chunk_sectors; in linear_conf()
96 conf->array_sectors += rdev->sectors; in linear_conf()
108 conf->disks[0].end_sector = conf->disks[0].rdev->sectors; in linear_conf()
[all...]
H A Draid1.c65 sector_t hi = lo + r1_bio->sectors; in check_and_add_serial()
318 pr_debug("raid1: sync end %s on sectors %llu-%llu\n", in raid_end_bio_io()
342 r1_bio->sector + (r1_bio->sectors); in update_head_pos()
424 r1_bio->sectors, in close_write()
456 sector_t hi = r1_bio->sector + r1_bio->sectors; in raid1_end_write_request()
518 if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors, in raid1_end_write_request()
543 pr_debug("raid1: behind end write sectors" in raid1_end_write_request()
566 sector_t sectors) in align_to_barrier_unit_end()
570 WARN_ON(sectors == 0); in align_to_barrier_unit_end()
572 * len is the number of sectors fro in align_to_barrier_unit_end()
565 align_to_barrier_unit_end(sector_t start_sector, sector_t sectors) align_to_barrier_unit_end() argument
601 int sectors; read_balance() local
1588 sector_t sectors; raid1_make_request() local
1985 r1_sync_page_io(struct md_rdev *rdev, sector_t sector, int sectors, struct page *page, blk_opf_t rw) r1_sync_page_io() argument
2022 int sectors = r1_bio->sectors; fix_sync_read_error() local
2274 fix_read_error(struct r1conf *conf, int read_disk, sector_t sect, int sectors) fix_read_error() argument
2388 int sectors; narrow_write_error() local
2960 raid1_size(struct mddev *mddev, sector_t sectors, int raid_disks) raid1_size() argument
3213 raid1_resize(struct mddev *mddev, sector_t sectors) raid1_resize() argument
[all...]
H A Draid10.c346 r10_bio->devs[slot].addr + (r10_bio->sectors); in update_head_pos()
432 r10_bio->sectors, in close_write()
538 r10_bio->sectors, in raid10_end_write_request()
735 int sectors = r10_bio->sectors; in read_balance() local
762 && (this_sector + sectors >= conf->next_resync)) || in read_balance()
765 this_sector + sectors))) in read_balance()
780 r10_bio->devs[slot].addr + sectors > in read_balance()
794 r10_bio->devs[slot].addr + sectors > rdev->recovery_offset) in read_balance()
798 if (is_badblock(rdev, dev_sector, sectors, in read_balance()
1146 regular_request_wait(struct mddev *mddev, struct r10conf *conf, struct bio *bio, sector_t sectors) regular_request_wait() argument
1412 sector_t sectors; raid10_write_request() local
1568 __make_request(struct mddev *mddev, struct bio *bio, int sectors) __make_request() argument
1905 int sectors = bio_sectors(bio); raid10_make_request() local
2449 int sectors = r10_bio->sectors; sync_request_write() local
2547 int sectors = r10_bio->sectors; fix_recovery_read_error() local
2694 r10_sync_page_io(struct md_rdev *rdev, sector_t sector, int sectors, struct page *page, enum req_op op) r10_sync_page_io() argument
2729 int sectors = r10_bio->sectors, slot = r10_bio->read_slot; fix_read_error() local
2922 int sectors; narrow_write_error() local
3930 raid10_size(struct mddev *mddev, sector_t sectors, int raid_disks) raid10_size() argument
4345 raid10_resize(struct mddev *mddev, sector_t sectors) raid10_resize() argument
5133 int sectors = r10_bio->sectors; handle_reshape_read_error() local
[all...]
/kernel/linux/linux-5.10/drivers/scsi/
H A Dscsicam.c51 * scsi_partsize - Parse cylinders/heads/sectors from PC partition table
53 * @capacity: size of the disk in sectors
54 * @geom: output in form of [hds, cylinders, sectors]
171 * minimizes the number of sectors that will be unused at the end
180 unsigned long heads, sectors, cylinders, temp; in setsize() local
183 sectors = 62L; /* Maximize sectors per track */ in setsize()
185 temp = cylinders * sectors; /* Compute divisor for heads */ in setsize()
189 temp = cylinders * heads; /* Compute divisor for sectors */ in setsize()
190 sectors in setsize()
[all...]
/kernel/linux/linux-6.6/drivers/scsi/
H A Dscsicam.c50 * scsi_partsize - Parse cylinders/heads/sectors from PC partition table
52 * @capacity: size of the disk in sectors
53 * @geom: output in form of [hds, cylinders, sectors]
170 * minimizes the number of sectors that will be unused at the end
179 unsigned long heads, sectors, cylinders, temp; in setsize() local
182 sectors = 62L; /* Maximize sectors per track */ in setsize()
184 temp = cylinders * sectors; /* Compute divisor for heads */ in setsize()
188 temp = cylinders * heads; /* Compute divisor for sectors */ in setsize()
189 sectors in setsize()
[all...]
/kernel/linux/linux-6.6/include/linux/
H A Dblk-integrity.h83 * @sectors: Size of the bio in 512-byte sectors
86 * sectors but integrity metadata is done in terms of the data integrity
87 * interval size of the storage device. Convert the block layer sectors
91 unsigned int sectors) in bio_integrity_intervals()
93 return sectors >> (bi->interval_exp - 9); in bio_integrity_intervals()
97 unsigned int sectors) in bio_integrity_bytes()
99 return bio_integrity_intervals(bi, sectors) * bi->tuple_size; in bio_integrity_bytes()
164 unsigned int sectors) in bio_integrity_intervals()
170 unsigned int sectors) in bio_integrity_bytes()
90 bio_integrity_intervals(struct blk_integrity *bi, unsigned int sectors) bio_integrity_intervals() argument
96 bio_integrity_bytes(struct blk_integrity *bi, unsigned int sectors) bio_integrity_bytes() argument
163 bio_integrity_intervals(struct blk_integrity *bi, unsigned int sectors) bio_integrity_intervals() argument
169 bio_integrity_bytes(struct blk_integrity *bi, unsigned int sectors) bio_integrity_bytes() argument
[all...]
H A Dbadblocks.h34 int shift; /* shift from sectors to block size
41 sector_t size; /* in sectors */
44 int badblocks_check(struct badblocks *bb, sector_t s, int sectors,
46 int badblocks_set(struct badblocks *bb, sector_t s, int sectors,
48 int badblocks_clear(struct badblocks *bb, sector_t s, int sectors);
/kernel/linux/linux-5.10/drivers/mtd/
H A Dssfdc.c23 unsigned char sectors; member
318 ssfdc->sectors = 32; in ssfdcr_add_mtd()
319 get_chs(mtd->size, NULL, &ssfdc->heads, &ssfdc->sectors); in ssfdcr_add_mtd()
321 ((long)ssfdc->sectors * (long)ssfdc->heads)); in ssfdcr_add_mtd()
324 ssfdc->cylinders, ssfdc->heads , ssfdc->sectors, in ssfdcr_add_mtd()
326 (long)ssfdc->sectors); in ssfdcr_add_mtd()
329 (long)ssfdc->sectors; in ssfdcr_add_mtd()
411 ssfdc->cylinders, ssfdc->heads, ssfdc->sectors); in ssfdcr_getgeo()
414 geo->sectors = ssfdc->sectors; in ssfdcr_getgeo()
[all...]
/kernel/linux/linux-6.6/drivers/mtd/
H A Dssfdc.c23 unsigned char sectors; member
318 ssfdc->sectors = 32; in ssfdcr_add_mtd()
319 get_chs(mtd->size, NULL, &ssfdc->heads, &ssfdc->sectors); in ssfdcr_add_mtd()
321 ((long)ssfdc->sectors * (long)ssfdc->heads)); in ssfdcr_add_mtd()
324 ssfdc->cylinders, ssfdc->heads , ssfdc->sectors, in ssfdcr_add_mtd()
326 (long)ssfdc->sectors); in ssfdcr_add_mtd()
329 (long)ssfdc->sectors; in ssfdcr_add_mtd()
411 ssfdc->cylinders, ssfdc->heads, ssfdc->sectors); in ssfdcr_getgeo()
414 geo->sectors = ssfdc->sectors; in ssfdcr_getgeo()
[all...]
/kernel/linux/linux-5.10/include/linux/
H A Dbadblocks.h34 int shift; /* shift from sectors to block size
41 sector_t size; /* in sectors */
44 int badblocks_check(struct badblocks *bb, sector_t s, int sectors,
46 int badblocks_set(struct badblocks *bb, sector_t s, int sectors,
48 int badblocks_clear(struct badblocks *bb, sector_t s, int sectors);
/kernel/linux/linux-5.10/block/partitions/
H A Dibm.c41 return cyl * geo->heads * geo->sectors + in cchh2blk()
42 head * geo->sectors; in cchh2blk()
59 return cyl * geo->heads * geo->sectors + in cchhb2blk()
60 head * geo->sectors + in cchhb2blk()
177 offset + geo->sectors; in find_vol1_partitions()
222 * geo->sectors * secperblk; in find_lnx1_partitions()
/kernel/linux/linux-6.6/block/partitions/
H A Dibm.c41 return cyl * geo->heads * geo->sectors + in cchh2blk()
42 head * geo->sectors; in cchh2blk()
59 return cyl * geo->heads * geo->sectors + in cchhb2blk()
60 head * geo->sectors + in cchhb2blk()
177 offset + geo->sectors; in find_vol1_partitions()
222 * geo->sectors * secperblk; in find_lnx1_partitions()
/kernel/linux/linux-5.10/drivers/md/bcache/
H A Dalloc.c86 void bch_rescale_priorities(struct cache_set *c, int sectors) in bch_rescale_priorities() argument
93 atomic_sub(sectors, &c->rescale); in bch_rescale_priorities()
162 * first: we also take into account the number of sectors of live data in that
552 * For example, dirty sectors of flash only volume is not reclaimable, if their
553 * dirty sectors mixed with dirty sectors of cached device, such buckets will
602 * May allocate fewer sectors than @sectors, KEY_SIZE(k) indicates how many
603 * sectors were actually allocated.
609 unsigned int sectors, in bch_alloc_sectors()
607 bch_alloc_sectors(struct cache_set *c, struct bkey *k, unsigned int sectors, unsigned int write_point, unsigned int write_prio, bool wait) bch_alloc_sectors() argument
[all...]
/kernel/linux/linux-5.10/drivers/usb/storage/
H A Ddatafab.c60 unsigned long sectors; /* total sector count */ member
138 u32 sectors) in datafab_read_data()
153 if (sectors > 0x0FFFFFFF) in datafab_read_data()
162 totallen = sectors * info->ssize; in datafab_read_data()
221 u32 sectors) in datafab_write_data()
237 if (sectors > 0x0FFFFFFF) in datafab_write_data()
246 totallen = sectors * info->ssize; in datafab_write_data()
421 info->sectors = ((u32)(reply[117]) << 24) | in datafab_id_device()
577 info->ssize = 0x200; // hard coded 512 byte sectors as per ATA spec in datafab_transport()
582 usb_stor_dbg(us, "READ_CAPACITY: %ld sectors, in datafab_transport()
135 datafab_read_data(struct us_data *us, struct datafab_info *info, u32 sector, u32 sectors) datafab_read_data() argument
218 datafab_write_data(struct us_data *us, struct datafab_info *info, u32 sector, u32 sectors) datafab_write_data() argument
[all...]
/kernel/linux/linux-6.6/drivers/usb/storage/
H A Ddatafab.c60 unsigned long sectors; /* total sector count */ member
138 u32 sectors) in datafab_read_data()
153 if (sectors > 0x0FFFFFFF) in datafab_read_data()
162 totallen = sectors * info->ssize; in datafab_read_data()
221 u32 sectors) in datafab_write_data()
237 if (sectors > 0x0FFFFFFF) in datafab_write_data()
246 totallen = sectors * info->ssize; in datafab_write_data()
420 info->sectors = ((u32)(reply[117]) << 24) | in datafab_id_device()
576 info->ssize = 0x200; // hard coded 512 byte sectors as per ATA spec in datafab_transport()
581 usb_stor_dbg(us, "READ_CAPACITY: %ld sectors, in datafab_transport()
135 datafab_read_data(struct us_data *us, struct datafab_info *info, u32 sector, u32 sectors) datafab_read_data() argument
218 datafab_write_data(struct us_data *us, struct datafab_info *info, u32 sector, u32 sectors) datafab_write_data() argument
[all...]
/kernel/linux/linux-6.6/drivers/md/bcache/
H A Dalloc.c86 void bch_rescale_priorities(struct cache_set *c, int sectors) in bch_rescale_priorities() argument
93 atomic_sub(sectors, &c->rescale); in bch_rescale_priorities()
162 * first: we also take into account the number of sectors of live data in that
551 * For example, dirty sectors of flash only volume is not reclaimable, if their
552 * dirty sectors mixed with dirty sectors of cached device, such buckets will
601 * May allocate fewer sectors than @sectors, KEY_SIZE(k) indicates how many
602 * sectors were actually allocated.
608 unsigned int sectors, in bch_alloc_sectors()
606 bch_alloc_sectors(struct cache_set *c, struct bkey *k, unsigned int sectors, unsigned int write_point, unsigned int write_prio, bool wait) bch_alloc_sectors() argument
[all...]

Completed in 22 milliseconds

12345678910>>...15