Lines Matching defs:limits

29  * blk_set_default_limits - reset limits to default values
65 * blk_set_stacking_limits - set default limits for stacking devices
70 * by stacking drivers like DM that have no internal limits.
76 /* Inherit limits from component devices */
100 q->limits.bounce = bounce;
125 struct queue_limits *limits = &q->limits;
135 limits->logical_block_size >> SECTOR_SHIFT);
136 limits->max_hw_sectors = max_hw_sectors;
138 max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors);
140 if (limits->max_user_sectors)
141 max_sectors = min(max_sectors, limits->max_user_sectors);
146 limits->logical_block_size >> SECTOR_SHIFT);
147 limits->max_sectors = max_sectors;
169 q->limits.chunk_sectors = chunk_sectors;
181 q->limits.max_hw_discard_sectors = max_discard_sectors;
182 q->limits.max_discard_sectors = max_discard_sectors;
194 q->limits.max_secure_erase_sectors = max_sectors;
207 q->limits.max_write_zeroes_sectors = max_write_zeroes_sectors;
224 max_sectors = min(q->limits.max_hw_sectors, max_zone_append_sectors);
225 max_sectors = min(q->limits.chunk_sectors, max_sectors);
234 q->limits.max_zone_append_sectors = max_sectors;
255 q->limits.max_segments = max_segments;
271 q->limits.max_discard_segments = max_segments;
293 WARN_ON_ONCE(q->limits.virt_boundary_mask);
295 q->limits.max_segment_size = max_size;
311 struct queue_limits *limits = &q->limits;
313 limits->logical_block_size = size;
315 if (limits->physical_block_size < size)
316 limits->physical_block_size = size;
318 if (limits->io_min < limits->physical_block_size)
319 limits->io_min = limits->physical_block_size;
321 limits->max_hw_sectors =
322 round_down(limits->max_hw_sectors, size >> SECTOR_SHIFT);
323 limits->max_sectors =
324 round_down(limits->max_sectors, size >> SECTOR_SHIFT);
340 q->limits.physical_block_size = size;
342 if (q->limits.physical_block_size < q->limits.logical_block_size)
343 q->limits.physical_block_size = q->limits.logical_block_size;
345 if (q->limits.io_min < q->limits.physical_block_size)
346 q->limits.io_min = q->limits.physical_block_size;
365 q->limits.zone_write_granularity = size;
367 if (q->limits.zone_write_granularity < q->limits.logical_block_size)
368 q->limits.zone_write_granularity = q->limits.logical_block_size;
385 q->limits.alignment_offset =
386 offset & (q->limits.physical_block_size - 1);
387 q->limits.misaligned = 0;
407 * @limits: the queue limits
416 void blk_limits_io_min(struct queue_limits *limits, unsigned int min)
418 limits->io_min = min;
420 if (limits->io_min < limits->logical_block_size)
421 limits->io_min = limits->logical_block_size;
423 if (limits->io_min < limits->physical_block_size)
424 limits->io_min = limits->physical_block_size;
444 blk_limits_io_min(&q->limits, min);
450 * @limits: the queue limits
461 void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt)
463 limits->io_opt = opt;
482 blk_limits_io_opt(&q->limits, opt);
534 * @t: the stacking driver limits (top device)
535 * @b: the underlying queue limits (bottom, component device)
698 * disk_stack_limits - adjust queue limits for stacked drivers
704 * Merges the limits for a top level gendisk and a bottom level
712 if (blk_stack_limits(&t->limits, &bdev_get_queue(bdev)->limits,
751 q->limits.seg_boundary_mask = mask;
762 q->limits.virt_boundary_mask = mask;
771 q->limits.max_segment_size = UINT_MAX;
787 q->limits.dma_alignment = mask;
809 if (mask > q->limits.dma_alignment)
810 q->limits.dma_alignment = mask;
925 unsigned int old_model = q->limits.zoned;
955 q->limits.zoned = model;
973 if (q->limits.misaligned)
976 return queue_limit_alignment_offset(&q->limits,
978 return q->limits.alignment_offset;
987 return queue_limit_discard_alignment(&q->limits,
989 return q->limits.discard_alignment;