Lines Matching refs:depth
171 * q_usage_counter, otherwise this increases the freeze depth
370 /* alloc_time includes depth and tag waits */
457 /* alloc_time includes depth and tag waits */
1943 unsigned int depth = 1;
1950 depth++;
1954 trace_block_unplug(head_rq->q, depth, !from_schedule);
2444 unsigned int hctx_idx, unsigned int depth)
2462 left = rq_size * depth;
2464 for (i = 0; i < depth; ) {
2498 to_do = min(entries_per_page, depth - i);
3147 * Helper for setting up a queue with mq ops, given queue depth, and
3392 * may reduce the depth asked for, if memory is tight. set->queue_depth
3393 * will be updated to reflect the allocated depth.
3397 unsigned int depth;
3400 depth = set->queue_depth;
3418 if (depth != set->queue_depth)
3419 pr_info("blk-mq: reduced tag depth (%u -> %u)\n",
3420 depth, set->queue_depth);
3488 * requested depth down, if it's too large. In that case, the set
3511 pr_info("blk-mq: reduced tag depth to %u\n",
3628 * queue depth. This is similar to what the old code would do.