Lines Matching refs:q_depth
35 #define SQ_SIZE(q) ((q)->q_depth << (q)->sqes)
36 #define CQ_SIZE(q) ((q)->q_depth * sizeof(struct nvme_completion))
130 u32 q_depth;
199 u32 q_depth;
476 if (next_tail == nvmeq->q_depth)
493 if (++nvmeq->sq_tail == nvmeq->q_depth)
1050 if (tmp == nvmeq->q_depth) {
1172 c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
1201 c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
1471 int q_depth = dev->q_depth;
1472 unsigned q_size_aligned = roundup(q_depth * entry_size,
1479 q_depth = div_u64(mem_per_q, entry_size);
1482 * Ensure the reduced q_depth is above some threshold where it
1486 if (q_depth < 64)
1490 return q_depth;
1527 nvmeq->q_depth = depth;
1751 aqa = nvmeq->q_depth - 1;
1780 if (nvme_alloc_queue(dev, i, dev->q_depth)) {
2297 dev->q_depth = result;
2494 dev->q_depth = min_t(u32, NVME_CAP_MQES(dev->ctrl.cap) + 1,
2514 dev->q_depth = 2;
2517 dev->q_depth);
2521 dev->q_depth = 64;
2523 "set queue depth=%u\n", dev->q_depth);
2531 (dev->q_depth < (NVME_AQ_DEPTH + 2))) {
2532 dev->q_depth = NVME_AQ_DEPTH + 2;
2534 dev->q_depth);
2536 dev->ctrl.sqsize = dev->q_depth - 1; /* 0's based queue depth */