Home
last modified time | relevance | path

Searched refs:nr_sectors (Results 1 - 25 of 71) sorted by relevance

123

/kernel/linux/linux-6.6/block/
H A Dblk-ia-ranges.c25 return sprintf(buf, "%llu\n", iar->nr_sectors); in blk_ia_range_nr_sectors_show()
39 .attr = { .name = "nr_sectors", .mode = 0444 },
186 sector < iar->sector + iar->nr_sectors) in disk_find_ia_range()
219 swap(iar->nr_sectors, tmp->nr_sectors); in disk_check_ia_ranges()
222 sector += iar->nr_sectors; in disk_check_ia_ranges()
247 new->ia_range[i].nr_sectors != old->ia_range[i].nr_sectors) in disk_ia_ranges_changed()
H A Dblk-zoned.c239 * @nr_sectors: Number of sectors, should be at least the length of one zone and
245 * @sector..@sector+@nr_sectors. Specifying the entire disk sector range
251 sector_t sector, sector_t nr_sectors, gfp_t gfp_mask) in blkdev_zone_mgmt()
256 sector_t end_sector = sector + nr_sectors; in blkdev_zone_mgmt()
277 if (!bdev_is_zone_start(bdev, nr_sectors) && end_sector != capacity) in blkdev_zone_mgmt()
286 if (op == REQ_OP_ZONE_RESET && sector == 0 && nr_sectors == capacity) { in blkdev_zone_mgmt()
364 if (zrange->sector + zrange->nr_sectors <= zrange->sector || in blkdev_truncate_zone_range()
365 zrange->sector + zrange->nr_sectors > get_capacity(bdev->bd_disk)) in blkdev_truncate_zone_range()
370 end = ((zrange->sector + zrange->nr_sectors) << SECTOR_SHIFT) - 1; in blkdev_truncate_zone_range()
422 ret = blkdev_zone_mgmt(bdev, op, zrange.sector, zrange.nr_sectors, in blkdev_zone_mgmt_ioctl()
250 blkdev_zone_mgmt(struct block_device *bdev, enum req_op op, sector_t sector, sector_t nr_sectors, gfp_t gfp_mask) blkdev_zone_mgmt() argument
[all...]
H A Dblk-core.c534 unsigned int nr_sectors = bio_sectors(bio); in bio_check_eod() local
536 if (nr_sectors && in bio_check_eod()
537 (nr_sectors > maxsector || in bio_check_eod()
538 bio->bi_iter.bi_sector > maxsector - nr_sectors)) { in bio_check_eod()
540 "%pg: rw=%d, sector=%llu, nr_sectors = %u limit=%llu\n", in bio_check_eod()
542 bio->bi_iter.bi_sector, nr_sectors, maxsector); in bio_check_eod()
573 int nr_sectors = bio_sectors(bio); in blk_check_zone_append() local
589 if (nr_sectors > q->limits.chunk_sectors) in blk_check_zone_append()
593 if (nr_sectors > q->limits.max_zone_append_sectors) in blk_check_zone_append()
/kernel/linux/linux-5.10/drivers/block/null_blk/
H A Dzoned.c215 unsigned int nr_sectors = len >> SECTOR_SHIFT; in null_zone_valid_read_len() local
219 sector + nr_sectors <= zone->wp) in null_zone_valid_read_len()
334 unsigned int nr_sectors, bool append) in null_zone_write()
346 return null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors); in null_zone_write()
388 if (zone->wp + nr_sectors > zone->start + zone->capacity) { in null_zone_write()
409 ret = null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors); in null_zone_write()
416 zone->wp += nr_sectors; in null_zone_write()
590 sector_t sector, sector_t nr_sectors) in null_process_zoned_cmd()
598 sts = null_zone_write(cmd, sector, nr_sectors, false); in null_process_zoned_cmd()
601 sts = null_zone_write(cmd, sector, nr_sectors, tru in null_process_zoned_cmd()
333 null_zone_write(struct nullb_cmd *cmd, sector_t sector, unsigned int nr_sectors, bool append) null_zone_write() argument
589 null_process_zoned_cmd(struct nullb_cmd *cmd, enum req_opf op, sector_t sector, sector_t nr_sectors) null_process_zoned_cmd() argument
[all...]
H A Dnull_blk.h99 unsigned int nr_sectors);
109 sector_t nr_sectors);
125 enum req_opf op, sector_t sector, sector_t nr_sectors) in null_process_zoned_cmd()
124 null_process_zoned_cmd(struct nullb_cmd *cmd, enum req_opf op, sector_t sector, sector_t nr_sectors) null_process_zoned_cmd() argument
/kernel/linux/linux-5.10/block/
H A Dblk-zoned.c174 sector_t nr_sectors) in blkdev_allow_reset_all_zones()
183 return !sector && nr_sectors == get_capacity(bdev->bd_disk); in blkdev_allow_reset_all_zones()
191 * @nr_sectors: Number of sectors, should be at least the length of one zone and
197 * @sector..@sector+@nr_sectors. Specifying the entire disk sector range
203 sector_t sector, sector_t nr_sectors, in blkdev_zone_mgmt()
209 sector_t end_sector = sector + nr_sectors; in blkdev_zone_mgmt()
230 if ((nr_sectors & (zone_sectors - 1)) && end_sector != capacity) in blkdev_zone_mgmt()
242 blkdev_allow_reset_all_zones(bdev, sector, nr_sectors)) { in blkdev_zone_mgmt()
323 if (zrange->sector + zrange->nr_sectors <= zrange->sector || in blkdev_truncate_zone_range()
324 zrange->sector + zrange->nr_sectors > get_capacit in blkdev_truncate_zone_range()
172 blkdev_allow_reset_all_zones(struct block_device *bdev, sector_t sector, sector_t nr_sectors) blkdev_allow_reset_all_zones() argument
202 blkdev_zone_mgmt(struct block_device *bdev, enum req_opf op, sector_t sector, sector_t nr_sectors, gfp_t gfp_mask) blkdev_zone_mgmt() argument
[all...]
/kernel/linux/linux-5.10/drivers/md/bcache/
H A Dwriteback.h76 unsigned int nr_sectors) in bcache_dev_stripe_dirty()
87 if (nr_sectors <= dc->disk.stripe_size) in bcache_dev_stripe_dirty()
90 nr_sectors -= dc->disk.stripe_size; in bcache_dev_stripe_dirty()
145 uint64_t offset, int nr_sectors);
74 bcache_dev_stripe_dirty(struct cached_dev *dc, uint64_t offset, unsigned int nr_sectors) bcache_dev_stripe_dirty() argument
H A Dwriteback.c552 uint64_t offset, int nr_sectors) in bcache_dev_sectors_dirty_add()
566 atomic_long_add(nr_sectors, &c->flash_dev_dirty_sectors); in bcache_dev_sectors_dirty_add()
570 while (nr_sectors) { in bcache_dev_sectors_dirty_add()
571 int s = min_t(unsigned int, abs(nr_sectors), in bcache_dev_sectors_dirty_add()
574 if (nr_sectors < 0) in bcache_dev_sectors_dirty_add()
587 nr_sectors -= s; in bcache_dev_sectors_dirty_add()
551 bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned int inode, uint64_t offset, int nr_sectors) bcache_dev_sectors_dirty_add() argument
/kernel/linux/linux-6.6/drivers/md/bcache/
H A Dwriteback.h80 unsigned int nr_sectors) in bcache_dev_stripe_dirty()
91 if (nr_sectors <= dc->disk.stripe_size) in bcache_dev_stripe_dirty()
94 nr_sectors -= dc->disk.stripe_size; in bcache_dev_stripe_dirty()
149 uint64_t offset, int nr_sectors);
78 bcache_dev_stripe_dirty(struct cached_dev *dc, uint64_t offset, unsigned int nr_sectors) bcache_dev_stripe_dirty() argument
H A Dwriteback.c597 uint64_t offset, int nr_sectors) in bcache_dev_sectors_dirty_add()
611 atomic_long_add(nr_sectors, &c->flash_dev_dirty_sectors); in bcache_dev_sectors_dirty_add()
615 while (nr_sectors) { in bcache_dev_sectors_dirty_add()
616 int s = min_t(unsigned int, abs(nr_sectors), in bcache_dev_sectors_dirty_add()
619 if (nr_sectors < 0) in bcache_dev_sectors_dirty_add()
635 nr_sectors -= s; in bcache_dev_sectors_dirty_add()
596 bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned int inode, uint64_t offset, int nr_sectors) bcache_dev_sectors_dirty_add() argument
/kernel/linux/linux-6.6/block/partitions/
H A Dibm.c201 sector_t nr_sectors, in find_lnx1_partitions()
216 * 'size based on geo == size based on nr_sectors' is true, then in find_lnx1_partitions()
223 size = nr_sectors; in find_lnx1_partitions()
232 /* else keep size based on nr_sectors */ in find_lnx1_partitions()
297 sector_t nr_sectors; in ibm_partition() local
312 nr_sectors = bdev_nr_sectors(bdev); in ibm_partition()
313 if (nr_sectors == 0) in ibm_partition()
340 label, labelsect, nr_sectors, in ibm_partition()
357 size = nr_sectors; in ibm_partition()
195 find_lnx1_partitions(struct parsed_partitions *state, struct hd_geometry *geo, int blocksize, char name[], union label_t *label, sector_t labelsect, sector_t nr_sectors, dasd_information2_t *info) find_lnx1_partitions() argument
/kernel/linux/linux-6.6/drivers/block/null_blk/
H A Dzoned.c235 unsigned int nr_sectors = len >> SECTOR_SHIFT; in null_zone_valid_read_len() local
239 sector + nr_sectors <= zone->wp) in null_zone_valid_read_len()
364 unsigned int nr_sectors, bool append) in null_zone_write()
376 return null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors); in null_zone_write()
406 if (zone->wp + nr_sectors > zone->start + zone->capacity) { in null_zone_write()
433 ret = null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors); in null_zone_write()
437 zone->wp += nr_sectors; in null_zone_write()
662 sector_t sector, sector_t nr_sectors) in null_process_zoned_cmd()
670 return null_zone_write(cmd, sector, nr_sectors, false); in null_process_zoned_cmd()
672 return null_zone_write(cmd, sector, nr_sectors, tru in null_process_zoned_cmd()
363 null_zone_write(struct nullb_cmd *cmd, sector_t sector, unsigned int nr_sectors, bool append) null_zone_write() argument
661 null_process_zoned_cmd(struct nullb_cmd *cmd, enum req_op op, sector_t sector, sector_t nr_sectors) null_process_zoned_cmd() argument
[all...]
H A Dnull_blk.h145 sector_t nr_sectors);
147 sector_t sector, unsigned int nr_sectors);
156 sector_t sector, sector_t nr_sectors);
174 enum req_op op, sector_t sector, sector_t nr_sectors) in null_process_zoned_cmd()
173 null_process_zoned_cmd(struct nullb_cmd *cmd, enum req_op op, sector_t sector, sector_t nr_sectors) null_process_zoned_cmd() argument
H A Dmain.c1203 sector_t sector, sector_t nr_sectors) in null_handle_discard()
1206 size_t n = nr_sectors << SECTOR_SHIFT; in null_handle_discard()
1366 sector_t nr_sectors) in null_handle_badblocks()
1372 if (badblocks_check(bb, sector, nr_sectors, &first_bad, &bad_sectors)) in null_handle_badblocks()
1381 sector_t nr_sectors) in null_handle_memory_backed()
1387 return null_handle_discard(dev, sector, nr_sectors); in null_handle_memory_backed()
1450 sector_t sector, unsigned int nr_sectors) in null_process_cmd()
1456 ret = null_handle_badblocks(cmd, sector, nr_sectors); in null_process_cmd()
1462 return null_handle_memory_backed(cmd, op, sector, nr_sectors); in null_process_cmd()
1468 sector_t nr_sectors, enu in null_handle_cmd()
1202 null_handle_discard(struct nullb_device *dev, sector_t sector, sector_t nr_sectors) null_handle_discard() argument
1364 null_handle_badblocks(struct nullb_cmd *cmd, sector_t sector, sector_t nr_sectors) null_handle_badblocks() argument
1378 null_handle_memory_backed(struct nullb_cmd *cmd, enum req_op op, sector_t sector, sector_t nr_sectors) null_handle_memory_backed() argument
1449 null_process_cmd(struct nullb_cmd *cmd, enum req_op op, sector_t sector, unsigned int nr_sectors) null_process_cmd() argument
1467 null_handle_cmd(struct nullb_cmd *cmd, sector_t sector, sector_t nr_sectors, enum req_op op) null_handle_cmd() argument
1539 sector_t nr_sectors = bio_sectors(bio); null_submit_bio() local
1543 null_handle_cmd(alloc_cmd(nq, bio), sector, nr_sectors, bio_op(bio)); null_submit_bio() local
1711 sector_t nr_sectors = blk_rq_sectors(rq); null_queue_rq() local
[all...]
/kernel/linux/linux-6.6/fs/btrfs/
H A Dscrub.c122 u16 nr_sectors; member
257 stripe->nr_sectors = BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits; in init_scrub_stripe()
269 stripe->sectors = kcalloc(stripe->nr_sectors, in init_scrub_stripe()
700 ASSERT(sector_nr >= 0 && sector_nr < stripe->nr_sectors); in scrub_verify_one_sector()
720 if (unlikely(sector_nr + sectors_per_tree > stripe->nr_sectors)) { in scrub_verify_one_sector()
758 for_each_set_bit(sector_nr, &bitmap, stripe->nr_sectors) { in scrub_verify_one_stripe()
769 for (i = 0; i < stripe->nr_sectors; i++) { in calc_sector_number()
774 ASSERT(i < stripe->nr_sectors); in calc_sector_number()
793 ASSERT(sector_nr < stripe->nr_sectors); in scrub_repair_read_endio()
829 for_each_set_bit(i, &old_error_bitmap, stripe->nr_sectors) { in scrub_stripe_submit_repair_read()
1653 unsigned int nr_sectors = min_t(u64, BTRFS_STRIPE_LEN, stripe->bg->start + scrub_submit_initial_read() local
[all...]
H A Draid56.c176 for (i = 0; i < rbio->nr_sectors; i++) { in cache_rbio_pages()
247 for (i = 0, offset = 0; i < rbio->nr_sectors; i++, offset += sectorsize) { in index_stripe_sectors()
882 ASSERT(index >= 0 && index < rbio->nr_sectors); in sector_in_rbio()
949 rbio->nr_sectors = num_sectors; in alloc_rbio()
1244 bitmap_clear(rbio->error_bitmap, 0, rbio->nr_sectors); in rmw_assemble_write_bios()
1250 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors; in rmw_assemble_write_bios()
1285 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors; in rmw_assemble_write_bios()
1376 for (i = 0; i < rbio->nr_sectors; i++) { in find_stripe_sector()
1416 for (i = 0; i < rbio->nr_sectors; i++) { in get_bio_sector_nr()
1426 ASSERT(i < rbio->nr_sectors); in get_bio_sector_nr()
[all...]
/kernel/linux/linux-6.6/fs/zonefs/
H A Dtrace.h31 __field(sector_t, nr_sectors)
39 __entry->nr_sectors = z->z_size >> SECTOR_SHIFT;
41 TP_printk("bdev=(%d,%d), ino=%lu op=%s, sector=%llu, nr_sectors=%llu",
44 __entry->nr_sectors
/kernel/linux/linux-6.6/drivers/md/
H A Ddm-zone.c359 unsigned int nr_sectors; member
425 unsigned int nr_sectors) in dm_zone_map_bio_end()
444 WRITE_ONCE(md->zwp_offset[zno], zwp_offset + nr_sectors); in dm_zone_map_bio_end()
451 if (nr_sectors != orig_bio_details->nr_sectors) { in dm_zone_map_bio_end()
455 WRITE_ONCE(md->zwp_offset[zno], zwp_offset + nr_sectors); in dm_zone_map_bio_end()
534 orig_bio_details.nr_sectors = bio_sectors(clone); in dm_zone_map_bio()
423 dm_zone_map_bio_end(struct mapped_device *md, unsigned int zno, struct orig_bio_details *orig_bio_details, unsigned int nr_sectors) dm_zone_map_bio_end() argument
H A Dmd.h606 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors) in md_sync_acct() argument
608 atomic_add(nr_sectors, &bdev->bd_disk->sync_io); in md_sync_acct()
611 static inline void md_sync_acct_bio(struct bio *bio, unsigned long nr_sectors) in md_sync_acct_bio() argument
613 md_sync_acct(bio->bi_bdev, nr_sectors); in md_sync_acct_bio()
H A Ddm-log-writes.c72 * [ 1 sector ][ entry->nr_sectors ]
91 * nr_sectors - the number of sectors we wrote.
98 __le64 nr_sectors; member
127 sector_t nr_sectors; member
327 entry.nr_sectors = cpu_to_le64(block->nr_sectors); in log_one_block()
451 lc->next_sector += dev_to_bio_sectors(lc, block->nr_sectors); in log_writes_kthread()
704 block->nr_sectors = bio_to_dev_sectors(lc, bio_sectors(bio)); in log_writes_map()
H A Ddm-zoned-target.c631 unsigned int nr_sectors = bio_sectors(bio); in dmz_map() local
640 bio_op(bio), (unsigned long long)sector, nr_sectors, in dmz_map()
645 if (!nr_sectors && bio_op(bio) != REQ_OP_WRITE) in dmz_map()
649 if ((nr_sectors & DMZ_BLOCK_SECTORS_MASK) || (sector & DMZ_BLOCK_SECTORS_MASK)) in dmz_map()
659 if (!nr_sectors && bio_op(bio) == REQ_OP_WRITE) { in dmz_map()
669 if (chunk_sector + nr_sectors > dmz_zone_nr_sectors(zmd)) in dmz_map()
/kernel/linux/linux-5.10/drivers/block/xen-blkback/
H A Dcommon.h93 uint64_t nr_sectors; member
147 uint64_t nr_sectors; member
424 dst->u.discard.nr_sectors = src->u.discard.nr_sectors; in blkif_get_x86_32_req()
472 dst->u.discard.nr_sectors = src->u.discard.nr_sectors; in blkif_get_x86_64_req()
/kernel/linux/linux-5.10/drivers/md/
H A Ddm-log-writes.c71 * [ 1 sector ][ entry->nr_sectors ]
90 * nr_sectors - the number of sectors we wrote.
97 __le64 nr_sectors; member
126 sector_t nr_sectors; member
340 entry.nr_sectors = cpu_to_le64(block->nr_sectors); in log_one_block()
473 lc->next_sector += dev_to_bio_sectors(lc, block->nr_sectors); in log_writes_kthread()
726 block->nr_sectors = bio_to_dev_sectors(lc, bio_sectors(bio)); in log_writes_map()
937 block->nr_sectors = ALIGN(bytes, lc->sectorsize) >> lc->sectorshift; in log_dax()
H A Dmd.h551 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors) in md_sync_acct() argument
553 atomic_add(nr_sectors, &bdev->bd_disk->sync_io); in md_sync_acct()
556 static inline void md_sync_acct_bio(struct bio *bio, unsigned long nr_sectors) in md_sync_acct_bio() argument
558 atomic_add(nr_sectors, &bio->bi_disk->sync_io); in md_sync_acct_bio()
/kernel/linux/patches/linux-4.19/prebuilts/usr/include/linux/
H A Dblkzoned.h43 __u64 nr_sectors; member

Completed in 29 milliseconds

123