Home
last modified time | relevance | path

Searched refs:nr_blocks (Results 1 - 25 of 99) sorted by relevance

1234

/kernel/linux/linux-6.6/drivers/md/
H A Ddm-zoned-target.c118 unsigned int nr_blocks) in dmz_submit_bio()
135 clone->bi_iter.bi_size = dmz_blk2sect(nr_blocks) << SECTOR_SHIFT; in dmz_submit_bio()
145 zone->wp_block += nr_blocks; in dmz_submit_bio()
154 sector_t chunk_block, unsigned int nr_blocks) in dmz_handle_read_zero()
156 unsigned int size = nr_blocks << DMZ_BLOCK_SHIFT; in dmz_handle_read_zero()
158 /* Clear nr_blocks */ in dmz_handle_read_zero()
174 unsigned int nr_blocks = dmz_bio_blocks(bio); in dmz_handle_read() local
175 sector_t end_block = chunk_block + nr_blocks; in dmz_handle_read()
191 (unsigned long long)chunk_block, nr_blocks); in dmz_handle_read()
196 nr_blocks in dmz_handle_read()
116 dmz_submit_bio(struct dmz_target *dmz, struct dm_zone *zone, struct bio *bio, sector_t chunk_block, unsigned int nr_blocks) dmz_submit_bio() argument
153 dmz_handle_read_zero(struct dmz_target *dmz, struct bio *bio, sector_t chunk_block, unsigned int nr_blocks) dmz_handle_read_zero() argument
249 dmz_handle_direct_write(struct dmz_target *dmz, struct dm_zone *zone, struct bio *bio, sector_t chunk_block, unsigned int nr_blocks) dmz_handle_direct_write() argument
282 dmz_handle_buffered_write(struct dmz_target *dmz, struct dm_zone *zone, struct bio *bio, sector_t chunk_block, unsigned int nr_blocks) dmz_handle_buffered_write() argument
323 unsigned int nr_blocks = dmz_bio_blocks(bio); dmz_handle_write() local
362 unsigned int nr_blocks = dmz_bio_blocks(bio); dmz_handle_discard() local
[all...]
H A Ddm-era-target.c75 static int writeset_alloc(struct writeset *ws, dm_block_t nr_blocks) in writeset_alloc() argument
77 ws->bits = vzalloc(bitset_size(nr_blocks)); in writeset_alloc()
90 dm_block_t nr_blocks) in writeset_init()
94 memset(ws->bits, 0, bitset_size(nr_blocks)); in writeset_init()
96 ws->md.nr_bits = nr_blocks; in writeset_init()
180 __le32 nr_blocks; member
278 uint32_t nr_blocks; member
518 disk->nr_blocks = cpu_to_le32(md->nr_blocks); in prepare_superblock()
603 md->nr_blocks in open_metadata()
89 writeset_init(struct dm_disk_bitset *info, struct writeset *ws, dm_block_t nr_blocks) writeset_init() argument
1178 dm_block_t nr_blocks; global() member
[all...]
H A Ddm-zoned-reclaim.c65 unsigned int nr_blocks; in dmz_reclaim_align_wp() local
78 nr_blocks = block - wp_block; in dmz_reclaim_align_wp()
81 dmz_blk2sect(nr_blocks), GFP_NOIO, 0); in dmz_reclaim_align_wp()
86 (unsigned long long)block, nr_blocks, ret); in dmz_reclaim_align_wp()
123 sector_t nr_blocks; in dmz_reclaim_copy() local
152 nr_blocks = ret; in dmz_reclaim_copy()
167 src.count = dmz_blk2sect(nr_blocks); in dmz_reclaim_copy()
184 block += nr_blocks; in dmz_reclaim_copy()
H A Ddm-zoned-metadata.c2452 unsigned int nr_blocks; in dmz_merge_valid_blocks() local
2462 nr_blocks = ret; in dmz_merge_valid_blocks()
2463 ret = dmz_validate_blocks(zmd, to_zone, chunk_block, nr_blocks); in dmz_merge_valid_blocks()
2467 chunk_block += nr_blocks; in dmz_merge_valid_blocks()
2474 * Validate all the blocks in the range [block..block+nr_blocks-1].
2477 sector_t chunk_block, unsigned int nr_blocks) in dmz_validate_blocks()
2486 nr_blocks); in dmz_validate_blocks()
2488 WARN_ON(chunk_block + nr_blocks > zone_nr_blocks); in dmz_validate_blocks()
2490 while (nr_blocks) { in dmz_validate_blocks()
2498 nr_bits = min(nr_blocks, zm in dmz_validate_blocks()
2476 dmz_validate_blocks(struct dmz_metadata *zmd, struct dm_zone *zone, sector_t chunk_block, unsigned int nr_blocks) dmz_validate_blocks() argument
2557 dmz_invalidate_blocks(struct dmz_metadata *zmd, struct dm_zone *zone, sector_t chunk_block, unsigned int nr_blocks) dmz_invalidate_blocks() argument
2631 dmz_to_next_set_block(struct dmz_metadata *zmd, struct dm_zone *zone, sector_t chunk_block, unsigned int nr_blocks, int set) dmz_to_next_set_block() argument
2748 unsigned int nr_blocks = zmd->zone_nr_blocks; dmz_get_zone_weight() local
[all...]
/kernel/linux/linux-5.10/drivers/md/
H A Ddm-zoned-target.c118 unsigned int nr_blocks) in dmz_submit_bio()
136 clone->bi_iter.bi_size = dmz_blk2sect(nr_blocks) << SECTOR_SHIFT; in dmz_submit_bio()
146 zone->wp_block += nr_blocks; in dmz_submit_bio()
155 sector_t chunk_block, unsigned int nr_blocks) in dmz_handle_read_zero()
157 unsigned int size = nr_blocks << DMZ_BLOCK_SHIFT; in dmz_handle_read_zero()
159 /* Clear nr_blocks */ in dmz_handle_read_zero()
175 unsigned int nr_blocks = dmz_bio_blocks(bio); in dmz_handle_read() local
176 sector_t end_block = chunk_block + nr_blocks; in dmz_handle_read()
192 (unsigned long long)chunk_block, nr_blocks); in dmz_handle_read()
197 nr_blocks in dmz_handle_read()
116 dmz_submit_bio(struct dmz_target *dmz, struct dm_zone *zone, struct bio *bio, sector_t chunk_block, unsigned int nr_blocks) dmz_submit_bio() argument
154 dmz_handle_read_zero(struct dmz_target *dmz, struct bio *bio, sector_t chunk_block, unsigned int nr_blocks) dmz_handle_read_zero() argument
250 dmz_handle_direct_write(struct dmz_target *dmz, struct dm_zone *zone, struct bio *bio, sector_t chunk_block, unsigned int nr_blocks) dmz_handle_direct_write() argument
283 dmz_handle_buffered_write(struct dmz_target *dmz, struct dm_zone *zone, struct bio *bio, sector_t chunk_block, unsigned int nr_blocks) dmz_handle_buffered_write() argument
324 unsigned int nr_blocks = dmz_bio_blocks(bio); dmz_handle_write() local
363 unsigned int nr_blocks = dmz_bio_blocks(bio); dmz_handle_discard() local
[all...]
H A Ddm-era-target.c73 static int writeset_alloc(struct writeset *ws, dm_block_t nr_blocks) in writeset_alloc() argument
75 ws->bits = vzalloc(bitset_size(nr_blocks)); in writeset_alloc()
88 dm_block_t nr_blocks) in writeset_init()
92 memset(ws->bits, 0, bitset_size(nr_blocks)); in writeset_init()
94 ws->md.nr_bits = nr_blocks; in writeset_init()
175 __le32 nr_blocks; member
268 uint32_t nr_blocks; member
503 disk->nr_blocks = cpu_to_le32(md->nr_blocks); in prepare_superblock()
588 md->nr_blocks in open_metadata()
87 writeset_init(struct dm_disk_bitset *info, struct writeset *ws, dm_block_t nr_blocks) writeset_init() argument
1159 dm_block_t nr_blocks; global() member
[all...]
H A Ddm-zoned-reclaim.c65 unsigned int nr_blocks; in dmz_reclaim_align_wp() local
78 nr_blocks = block - wp_block; in dmz_reclaim_align_wp()
81 dmz_blk2sect(nr_blocks), GFP_NOIO, 0); in dmz_reclaim_align_wp()
86 (unsigned long long)block, nr_blocks, ret); in dmz_reclaim_align_wp()
123 sector_t nr_blocks; in dmz_reclaim_copy() local
152 nr_blocks = ret; in dmz_reclaim_copy()
167 src.count = dmz_blk2sect(nr_blocks); in dmz_reclaim_copy()
184 block += nr_blocks; in dmz_reclaim_copy()
H A Ddm-zoned-metadata.c2469 unsigned int nr_blocks; in dmz_merge_valid_blocks() local
2479 nr_blocks = ret; in dmz_merge_valid_blocks()
2480 ret = dmz_validate_blocks(zmd, to_zone, chunk_block, nr_blocks); in dmz_merge_valid_blocks()
2484 chunk_block += nr_blocks; in dmz_merge_valid_blocks()
2491 * Validate all the blocks in the range [block..block+nr_blocks-1].
2494 sector_t chunk_block, unsigned int nr_blocks) in dmz_validate_blocks()
2503 nr_blocks); in dmz_validate_blocks()
2505 WARN_ON(chunk_block + nr_blocks > zone_nr_blocks); in dmz_validate_blocks()
2507 while (nr_blocks) { in dmz_validate_blocks()
2515 nr_bits = min(nr_blocks, zm in dmz_validate_blocks()
2493 dmz_validate_blocks(struct dmz_metadata *zmd, struct dm_zone *zone, sector_t chunk_block, unsigned int nr_blocks) dmz_validate_blocks() argument
2574 dmz_invalidate_blocks(struct dmz_metadata *zmd, struct dm_zone *zone, sector_t chunk_block, unsigned int nr_blocks) dmz_invalidate_blocks() argument
2648 dmz_to_next_set_block(struct dmz_metadata *zmd, struct dm_zone *zone, sector_t chunk_block, unsigned int nr_blocks, int set) dmz_to_next_set_block() argument
2765 unsigned int nr_blocks = zmd->zone_nr_blocks; dmz_get_zone_weight() local
[all...]
/kernel/linux/linux-5.10/drivers/md/persistent-data/
H A Ddm-space-map-common.c226 ll->nr_blocks = 0; in sm_ll_init()
237 dm_block_t i, nr_blocks, nr_indexes; in sm_ll_extend() local
240 nr_blocks = ll->nr_blocks + extra_blocks; in sm_ll_extend()
241 old_blocks = dm_sector_div_up(ll->nr_blocks, ll->entries_per_block); in sm_ll_extend()
242 blocks = dm_sector_div_up(nr_blocks, ll->entries_per_block); in sm_ll_extend()
244 nr_indexes = dm_sector_div_up(nr_blocks, ll->entries_per_block); in sm_ll_extend()
253 ll->nr_blocks = nr_blocks; in sm_ll_extend()
284 if (b >= ll->nr_blocks) { in sm_ll_lookup_bitmap()
[all...]
H A Ddm-space-map-metadata.c285 *count = smm->ll.nr_blocks; in sm_metadata_get_nr_blocks()
294 *count = smm->old_ll.nr_blocks - smm->old_ll.nr_allocated - in sm_metadata_get_nr_free()
454 r = sm_ll_find_common_free_block(&smm->old_ll, &smm->ll, smm->begin, smm->ll.nr_blocks, b); in sm_metadata_new_block_()
543 root_le.nr_blocks = cpu_to_le64(smm->ll.nr_blocks); in sm_metadata_copy_root()
596 *count = smm->ll.nr_blocks; in sm_bootstrap_get_nr_blocks()
605 *count = smm->ll.nr_blocks - smm->begin; in sm_bootstrap_get_nr_free()
643 if (smm->begin == smm->ll.nr_blocks) in sm_bootstrap_new_block()
708 dm_block_t old_len = smm->ll.nr_blocks; in sm_metadata_extend()
773 dm_block_t nr_blocks, in dm_sm_metadata_create()
771 dm_sm_metadata_create(struct dm_space_map *sm, struct dm_transaction_manager *tm, dm_block_t nr_blocks, dm_block_t superblock) dm_sm_metadata_create() argument
[all...]
H A Ddm-space-map-disk.c51 *count = smd->old_ll.nr_blocks; in sm_disk_get_nr_blocks()
59 *count = (smd->old_ll.nr_blocks - smd->old_ll.nr_allocated) - smd->nr_allocated_this_transaction; in sm_disk_get_nr_free()
173 r = sm_ll_find_common_free_block(&smd->old_ll, &smd->ll, smd->begin, smd->ll.nr_blocks, b); in sm_disk_new_block()
231 root_le.nr_blocks = cpu_to_le64(smd->ll.nr_blocks); in sm_disk_copy_root()
264 dm_block_t nr_blocks) in dm_sm_disk_create()
281 r = sm_ll_extend(&smd->ll, nr_blocks); in dm_sm_disk_create()
263 dm_sm_disk_create(struct dm_transaction_manager *tm, dm_block_t nr_blocks) dm_sm_disk_create() argument
/kernel/linux/linux-5.10/drivers/edac/
H A Dedac_device.c56 char *edac_block_name, unsigned nr_blocks, in edac_device_alloc_ctl_info()
71 edac_dbg(4, "instances=%d blocks=%d\n", nr_instances, nr_blocks); in edac_device_alloc_ctl_info()
91 count = nr_instances * nr_blocks; in edac_device_alloc_ctl_info()
151 inst->nr_blocks = nr_blocks; in edac_device_alloc_ctl_info()
152 blk_p = &dev_blk[instance * nr_blocks]; in edac_device_alloc_ctl_info()
160 for (block = 0; block < nr_blocks; block++) { in edac_device_alloc_ctl_info()
576 if ((block_nr >= instance->nr_blocks) || (block_nr < 0)) { in edac_device_handle_ce_count()
581 instance->nr_blocks); in edac_device_handle_ce_count()
585 if (instance->nr_blocks > in edac_device_handle_ce_count()
53 edac_device_alloc_ctl_info( unsigned sz_private, char *edac_device_name, unsigned nr_instances, char *edac_block_name, unsigned nr_blocks, unsigned offset_value, struct edac_dev_sysfs_block_attribute *attrib_spec, unsigned nr_attrib, int device_index) edac_device_alloc_ctl_info() argument
[all...]
/kernel/linux/linux-6.6/drivers/edac/
H A Dedac_device.c58 char *blk_name, unsigned nr_blocks, unsigned off_val, in edac_device_alloc_ctl_info()
70 edac_dbg(4, "instances=%d blocks=%d\n", nr_instances, nr_blocks); in edac_device_alloc_ctl_info()
82 dev_blk = kcalloc(nr_instances * nr_blocks, sizeof(struct edac_device_block), GFP_KERNEL); in edac_device_alloc_ctl_info()
119 inst->nr_blocks = nr_blocks; in edac_device_alloc_ctl_info()
120 blk_p = &dev_blk[instance * nr_blocks]; in edac_device_alloc_ctl_info()
127 for (block = 0; block < nr_blocks; block++) { in edac_device_alloc_ctl_info()
546 if ((block_nr >= instance->nr_blocks) || (block_nr < 0)) { in edac_device_handle_ce_count()
551 instance->nr_blocks); in edac_device_handle_ce_count()
555 if (instance->nr_blocks > in edac_device_handle_ce_count()
57 edac_device_alloc_ctl_info(unsigned pvt_sz, char *dev_name, unsigned nr_instances, char *blk_name, unsigned nr_blocks, unsigned off_val, struct edac_dev_sysfs_block_attribute *attrib_spec, unsigned nr_attrib, int device_index) edac_device_alloc_ctl_info() argument
[all...]
/kernel/linux/linux-6.6/drivers/md/persistent-data/
H A Ddm-space-map-metadata.c287 *count = smm->ll.nr_blocks; in sm_metadata_get_nr_blocks()
296 *count = smm->old_ll.nr_blocks - smm->old_ll.nr_allocated - in sm_metadata_get_nr_free()
458 r = sm_ll_find_common_free_block(&smm->old_ll, &smm->ll, smm->begin, smm->ll.nr_blocks, b); in sm_metadata_new_block_()
548 root_le.nr_blocks = cpu_to_le64(smm->ll.nr_blocks); in sm_metadata_copy_root()
601 *count = smm->ll.nr_blocks; in sm_bootstrap_get_nr_blocks()
610 *count = smm->ll.nr_blocks - smm->begin; in sm_bootstrap_get_nr_free()
648 if (smm->begin == smm->ll.nr_blocks) in sm_bootstrap_new_block()
723 dm_block_t old_len = smm->ll.nr_blocks; in sm_metadata_extend()
786 dm_block_t nr_blocks, in dm_sm_metadata_create()
784 dm_sm_metadata_create(struct dm_space_map *sm, struct dm_transaction_manager *tm, dm_block_t nr_blocks, dm_block_t superblock) dm_sm_metadata_create() argument
[all...]
H A Ddm-space-map-disk.c53 *count = smd->old_ll.nr_blocks; in sm_disk_get_nr_blocks()
62 *count = (smd->old_ll.nr_blocks - smd->old_ll.nr_allocated) - smd->nr_allocated_this_transaction; in sm_disk_get_nr_free()
139 r = sm_ll_find_common_free_block(&smd->old_ll, &smd->ll, smd->begin, smd->ll.nr_blocks, b); in sm_disk_new_block()
185 root_le.nr_blocks = cpu_to_le64(smd->ll.nr_blocks); in sm_disk_copy_root()
218 dm_block_t nr_blocks) in dm_sm_disk_create()
235 r = sm_ll_extend(&smd->ll, nr_blocks); in dm_sm_disk_create()
217 dm_sm_disk_create(struct dm_transaction_manager *tm, dm_block_t nr_blocks) dm_sm_disk_create() argument
H A Ddm-space-map-common.c229 ll->nr_blocks = 0; in sm_ll_init()
240 dm_block_t i, nr_blocks, nr_indexes; in sm_ll_extend() local
243 nr_blocks = ll->nr_blocks + extra_blocks; in sm_ll_extend()
244 old_blocks = dm_sector_div_up(ll->nr_blocks, ll->entries_per_block); in sm_ll_extend()
245 blocks = dm_sector_div_up(nr_blocks, ll->entries_per_block); in sm_ll_extend()
247 nr_indexes = dm_sector_div_up(nr_blocks, ll->entries_per_block); in sm_ll_extend()
256 ll->nr_blocks = nr_blocks; in sm_ll_extend()
287 if (b >= ll->nr_blocks) { in sm_ll_lookup_bitmap()
[all...]
/kernel/linux/linux-5.10/fs/afs/
H A Ddir_edit.c194 unsigned int need_slots, nr_blocks, b; in afs_edit_dir_add() local
225 nr_blocks = i_size / AFS_DIR_BLOCK_SIZE; in afs_edit_dir_add()
230 for (b = 0; b < nr_blocks + 1; b++) { in afs_edit_dir_add()
239 if (nr_blocks >= AFS_DIR_MAX_BLOCKS) in afs_edit_dir_add()
264 if (b == nr_blocks) { in afs_edit_dir_add()
304 nr_blocks = 1; in afs_edit_dir_add()
372 unsigned int need_slots, nr_blocks, b; in afs_edit_dir_remove() local
386 nr_blocks = i_size / AFS_DIR_BLOCK_SIZE; in afs_edit_dir_remove()
405 for (b = 0; b < nr_blocks; b++) { in afs_edit_dir_remove()
/kernel/linux/linux-6.6/fs/afs/
H A Ddir_edit.c213 unsigned int need_slots, nr_blocks, b; in afs_edit_dir_add() local
239 nr_blocks = i_size / AFS_DIR_BLOCK_SIZE; in afs_edit_dir_add()
244 for (b = 0; b < nr_blocks + 1; b++) { in afs_edit_dir_add()
249 if (nr_blocks >= AFS_DIR_MAX_BLOCKS) in afs_edit_dir_add()
272 if (b == nr_blocks) { in afs_edit_dir_add()
312 nr_blocks = 1; in afs_edit_dir_add()
380 unsigned int need_slots, nr_blocks, b; in afs_edit_dir_remove() local
394 nr_blocks = i_size / AFS_DIR_BLOCK_SIZE; in afs_edit_dir_remove()
410 for (b = 0; b < nr_blocks; b++) { in afs_edit_dir_remove()
/kernel/linux/linux-6.6/fs/crypto/
H A Dinline_crypt.c441 * @nr_blocks: the number of blocks we want to submit starting at @lblk
446 * This is normally just @nr_blocks, as normally the DUNs just increment along
451 * will occur. If this happens, a value less than @nr_blocks will be returned
457 u64 fscrypt_limit_io_blocks(const struct inode *inode, u64 lblk, u64 nr_blocks) in fscrypt_limit_io_blocks() argument
463 return nr_blocks; in fscrypt_limit_io_blocks()
465 if (nr_blocks <= 1) in fscrypt_limit_io_blocks()
466 return nr_blocks; in fscrypt_limit_io_blocks()
471 return nr_blocks; in fscrypt_limit_io_blocks()
477 return min_t(u64, nr_blocks, (u64)U32_MAX + 1 - dun); in fscrypt_limit_io_blocks()
/kernel/linux/linux-5.10/fs/xfs/libxfs/
H A Dxfs_btree_staging.c289 * In step four, the caller must allocate xfs_btree_bload.nr_blocks blocks and
330 * and returned via btree_height and nr_blocks.
649 uint64_t nr_blocks = 0; in xfs_btree_bload_compute_geometry() local
684 nr_blocks++; in xfs_btree_bload_compute_geometry()
716 nr_blocks++; in xfs_btree_bload_compute_geometry()
724 nr_blocks += level_blocks; in xfs_btree_bload_compute_geometry()
733 bbl->nr_blocks = nr_blocks - 1; in xfs_btree_bload_compute_geometry()
735 bbl->nr_blocks = nr_blocks; in xfs_btree_bload_compute_geometry()
[all...]
/kernel/linux/linux-6.6/fs/xfs/libxfs/
H A Dxfs_btree_staging.c289 * In step four, the caller must allocate xfs_btree_bload.nr_blocks blocks and
330 * and returned via btree_height and nr_blocks.
648 uint64_t nr_blocks = 0; in xfs_btree_bload_compute_geometry() local
683 nr_blocks++; in xfs_btree_bload_compute_geometry()
716 nr_blocks++; in xfs_btree_bload_compute_geometry()
725 nr_blocks += level_blocks; in xfs_btree_bload_compute_geometry()
734 bbl->nr_blocks = nr_blocks - 1; in xfs_btree_bload_compute_geometry()
736 bbl->nr_blocks = nr_blocks; in xfs_btree_bload_compute_geometry()
[all...]
/kernel/linux/linux-5.10/fs/jffs2/
H A Dbuild.c326 size += c->nr_blocks * 100; /* And 100 bytes per eraseblock */ in jffs2_calc_trigger_levels()
356 c->flash_size / 1024, c->sector_size / 1024, c->nr_blocks); in jffs2_calc_trigger_levels()
380 c->nr_blocks = c->flash_size / c->sector_size; in jffs2_do_mount_fs()
381 size = sizeof(struct jffs2_eraseblock) * c->nr_blocks; in jffs2_do_mount_fs()
391 for (i=0; i<c->nr_blocks; i++) { in jffs2_do_mount_fs()
/kernel/linux/linux-6.6/fs/jffs2/
H A Dbuild.c326 size += c->nr_blocks * 100; /* And 100 bytes per eraseblock */ in jffs2_calc_trigger_levels()
356 c->flash_size / 1024, c->sector_size / 1024, c->nr_blocks); in jffs2_calc_trigger_levels()
380 c->nr_blocks = c->flash_size / c->sector_size; in jffs2_do_mount_fs()
381 size = sizeof(struct jffs2_eraseblock) * c->nr_blocks; in jffs2_do_mount_fs()
391 for (i=0; i<c->nr_blocks; i++) { in jffs2_do_mount_fs()
/kernel/linux/linux-5.10/drivers/scsi/
H A Dsd.c879 u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq)); in sd_setup_unmap_cmnd() local
899 put_unaligned_be32(nr_blocks, &buf[16]); in sd_setup_unmap_cmnd()
915 u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq)); in sd_setup_write_same16_cmnd() local
931 put_unaligned_be32(nr_blocks, &cmd->cmnd[10]); in sd_setup_write_same16_cmnd()
947 u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq)); in sd_setup_write_same10_cmnd() local
963 put_unaligned_be16(nr_blocks, &cmd->cmnd[7]); in sd_setup_write_same10_cmnd()
978 u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq)); in sd_setup_write_zeroes_cmnd() local
994 if (sdkp->ws16 || lba > 0xffffffff || nr_blocks > 0xffff) in sd_setup_write_zeroes_cmnd()
1076 u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq)); in sd_setup_write_same_cmnd() local
1087 if (sdkp->ws16 || lba > 0xffffffff || nr_blocks > in sd_setup_write_same_cmnd()
1136 sd_setup_rw32_cmnd(struct scsi_cmnd *cmd, bool write, sector_t lba, unsigned int nr_blocks, unsigned char flags) sd_setup_rw32_cmnd() argument
1158 sd_setup_rw16_cmnd(struct scsi_cmnd *cmd, bool write, sector_t lba, unsigned int nr_blocks, unsigned char flags) sd_setup_rw16_cmnd() argument
1173 sd_setup_rw10_cmnd(struct scsi_cmnd *cmd, bool write, sector_t lba, unsigned int nr_blocks, unsigned char flags) sd_setup_rw10_cmnd() argument
1188 sd_setup_rw6_cmnd(struct scsi_cmnd *cmd, bool write, sector_t lba, unsigned int nr_blocks, unsigned char flags) sd_setup_rw6_cmnd() argument
1224 unsigned int nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq)); sd_setup_read_write_cmnd() local
[all...]
/kernel/linux/linux-5.10/init/
H A Ddo_mounts_rd.c177 static unsigned long nr_blocks(struct file *file) in nr_blocks() function
221 rd_blocks = nr_blocks(out_file); in rd_load_image()
234 devblocks = nr_blocks(in_file); in rd_load_image()

Completed in 28 milliseconds

1234