Lines Matching defs:depth
161 * q_usage_counter, otherwise this increases the freeze depth
443 /* alloc_time includes depth and tag waits */
626 /* alloc_time includes depth and tag waits */
2764 unsigned int depth = 0;
2781 depth++;
2785 trace_block_unplug(this_hctx->queue, depth, !from_sched);
3415 unsigned int hctx_idx, unsigned int depth)
3432 left = rq_size * depth;
3434 for (i = 0; i < depth; ) {
3468 to_do = min(entries_per_page, depth - i);
3820 unsigned int depth)
3825 tags = blk_mq_alloc_rq_map(set, hctx_idx, depth, set->reserved_tags);
3829 ret = blk_mq_alloc_rqs(set, tags, hctx_idx, depth);
4370 * may reduce the depth asked for, if memory is tight. set->queue_depth
4371 * will be updated to reflect the allocated depth.
4375 unsigned int depth;
4378 depth = set->queue_depth;
4396 if (depth != set->queue_depth)
4397 pr_info("blk-mq: reduced tag depth (%u -> %u)\n",
4398 depth, set->queue_depth);
4477 * requested depth down, if it's too large. In that case, the set
4500 pr_info("blk-mq: reduced tag depth to %u\n",
4644 * queue depth. This is similar to what the old code would do.