Lines Matching refs:sector
13 static sector_t bio_discard_limit(struct block_device *bdev, sector_t sector)
19 sector += bdev->bd_start_sect;
22 round_up(sector, discard_granularity >> SECTOR_SHIFT);
28 if (granularity_aligned_sector != sector)
29 return granularity_aligned_sector - sector;
38 int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
57 if ((sector | nr_sects) & bs_mask)
65 min(nr_sects, bio_discard_limit(bdev, sector));
68 bio->bi_iter.bi_sector = sector;
70 sector += req_sects;
90 * @sector: start sector
97 int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
105 ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, &bio);
119 sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
136 bio->bi_iter.bi_sector = sector;
143 sector += max_write_zeroes_sectors;
169 sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
182 bio->bi_iter.bi_sector = sector;
188 sector += bi_size >> 9;
202 * @sector: start sector
218 int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
226 if ((sector | nr_sects) & bs_mask)
229 ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask,
234 return __blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask,
242 * @sector: start sector
252 int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
262 if ((sector | nr_sects) & bs_mask)
269 ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects,
272 ret = __blkdev_issue_zero_pages(bdev, sector, nr_sects,
303 int blkdev_issue_secure_erase(struct block_device *bdev, sector_t sector,
319 if ((sector | nr_sects) & bs_mask)
329 bio->bi_iter.bi_sector = sector;
332 sector += len;