Lines Matching refs:nvmeq

228 	struct nvme_queue *nvmeq;
286 struct nvme_queue *nvmeq, int qid)
291 nvmeq->dbbuf_sq_db = &dev->dbbuf_dbs[sq_idx(qid, dev->db_stride)];
292 nvmeq->dbbuf_cq_db = &dev->dbbuf_dbs[cq_idx(qid, dev->db_stride)];
293 nvmeq->dbbuf_sq_ei = &dev->dbbuf_eis[sq_idx(qid, dev->db_stride)];
294 nvmeq->dbbuf_cq_ei = &dev->dbbuf_eis[cq_idx(qid, dev->db_stride)];
297 static void nvme_dbbuf_free(struct nvme_queue *nvmeq)
299 if (!nvmeq->qid)
302 nvmeq->dbbuf_sq_db = NULL;
303 nvmeq->dbbuf_cq_db = NULL;
304 nvmeq->dbbuf_sq_ei = NULL;
305 nvmeq->dbbuf_cq_ei = NULL;
394 struct nvme_queue *nvmeq = &dev->queues[0];
399 hctx->driver_data = nvmeq;
407 struct nvme_queue *nvmeq = &dev->queues[hctx_idx + 1];
410 hctx->driver_data = nvmeq;
420 struct nvme_queue *nvmeq = &dev->queues[queue_idx];
422 BUG_ON(!nvmeq);
423 iod->nvmeq = nvmeq;
472 static inline void nvme_write_sq_db(struct nvme_queue *nvmeq, bool write_sq)
475 u16 next_tail = nvmeq->sq_tail + 1;
477 if (next_tail == nvmeq->q_depth)
479 if (next_tail != nvmeq->last_sq_tail)
483 if (nvme_dbbuf_update_and_check_event(nvmeq->sq_tail,
484 nvmeq->dbbuf_sq_db, nvmeq->dbbuf_sq_ei))
485 writel(nvmeq->sq_tail, nvmeq->q_db);
486 nvmeq->last_sq_tail = nvmeq->sq_tail;
491 * @nvmeq: The queue to use
495 static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd,
498 spin_lock(&nvmeq->sq_lock);
499 memcpy(nvmeq->sq_cmds + (nvmeq->sq_tail << nvmeq->sqes),
501 if (++nvmeq->sq_tail == nvmeq->q_depth)
502 nvmeq->sq_tail = 0;
503 nvme_write_sq_db(nvmeq, write_sq);
504 spin_unlock(&nvmeq->sq_lock);
509 struct nvme_queue *nvmeq = hctx->driver_data;
511 spin_lock(&nvmeq->sq_lock);
512 if (nvmeq->sq_tail != nvmeq->last_sq_tail)
513 nvme_write_sq_db(nvmeq, true);
514 spin_unlock(&nvmeq->sq_lock);
533 if (!iod->nvmeq->qid)
851 if (iod->nvmeq->qid && sgl_threshold &&
912 struct nvme_queue *nvmeq = hctx->driver_data;
913 struct nvme_dev *dev = nvmeq->dev;
927 if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags)))
947 nvme_submit_cmd(nvmeq, cmnd, bd->last);
960 struct nvme_dev *dev = iod->nvmeq->dev;
972 static inline bool nvme_cqe_pending(struct nvme_queue *nvmeq)
974 struct nvme_completion *hcqe = &nvmeq->cqes[nvmeq->cq_head];
976 return (le16_to_cpu(READ_ONCE(hcqe->status)) & 1) == nvmeq->cq_phase;
979 static inline void nvme_ring_cq_doorbell(struct nvme_queue *nvmeq)
981 u16 head = nvmeq->cq_head;
983 if (nvme_dbbuf_update_and_check_event(head, nvmeq->dbbuf_cq_db,
984 nvmeq->dbbuf_cq_ei))
985 writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
988 static inline struct blk_mq_tags *nvme_queue_tagset(struct nvme_queue *nvmeq)
990 if (!nvmeq->qid)
991 return nvmeq->dev->admin_tagset.tags[0];
992 return nvmeq->dev->tagset.tags[nvmeq->qid - 1];
995 static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
997 struct nvme_completion *cqe = &nvmeq->cqes[idx];
1007 if (unlikely(nvme_is_aen_req(nvmeq->qid, command_id))) {
1008 nvme_complete_async_event(&nvmeq->dev->ctrl,
1013 req = nvme_find_rq(nvme_queue_tagset(nvmeq), command_id);
1015 dev_warn(nvmeq->dev->ctrl.device,
1021 trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail);
1026 static inline void nvme_update_cq_head(struct nvme_queue *nvmeq)
1028 u32 tmp = nvmeq->cq_head + 1;
1030 if (tmp == nvmeq->q_depth) {
1031 nvmeq->cq_head = 0;
1032 nvmeq->cq_phase ^= 1;
1034 nvmeq->cq_head = tmp;
1038 static inline int nvme_process_cq(struct nvme_queue *nvmeq)
1042 while (nvme_cqe_pending(nvmeq)) {
1049 nvme_handle_cqe(nvmeq, nvmeq->cq_head);
1050 nvme_update_cq_head(nvmeq);
1054 nvme_ring_cq_doorbell(nvmeq);
1060 struct nvme_queue *nvmeq = data;
1068 if (nvme_process_cq(nvmeq))
1077 struct nvme_queue *nvmeq = data;
1079 if (nvme_cqe_pending(nvmeq))
1088 static void nvme_poll_irqdisable(struct nvme_queue *nvmeq)
1090 struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev);
1092 WARN_ON_ONCE(test_bit(NVMEQ_POLLED, &nvmeq->flags));
1094 disable_irq(pci_irq_vector(pdev, nvmeq->cq_vector));
1095 nvme_process_cq(nvmeq);
1096 enable_irq(pci_irq_vector(pdev, nvmeq->cq_vector));
1101 struct nvme_queue *nvmeq = hctx->driver_data;
1104 if (!nvme_cqe_pending(nvmeq))
1107 spin_lock(&nvmeq->cq_poll_lock);
1108 found = nvme_process_cq(nvmeq);
1109 spin_unlock(&nvmeq->cq_poll_lock);
1117 struct nvme_queue *nvmeq = &dev->queues[0];
1123 nvme_submit_cmd(nvmeq, &c, true);
1138 struct nvme_queue *nvmeq, s16 vector)
1143 if (!test_bit(NVMEQ_POLLED, &nvmeq->flags))
1152 c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr);
1154 c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
1162 struct nvme_queue *nvmeq)
1182 c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr);
1184 c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
1204 struct nvme_queue *nvmeq = iod->nvmeq;
1206 dev_warn(nvmeq->dev->ctrl.device,
1208 atomic_inc(&nvmeq->dev->ctrl.abort_limit);
1258 struct nvme_queue *nvmeq = iod->nvmeq;
1259 struct nvme_dev *dev = nvmeq->dev;
1284 if (test_bit(NVMEQ_POLLED, &nvmeq->flags))
1287 nvme_poll_irqdisable(nvmeq);
1292 req->tag, nvmeq->qid);
1309 req->tag, nvmeq->qid);
1324 if (!nvmeq->qid || iod->aborted) {
1327 req->tag, nvmeq->qid);
1344 cmd.abort.sqid = cpu_to_le16(nvmeq->qid);
1346 dev_warn(nvmeq->dev->ctrl.device,
1348 req->tag, nvmeq->qid);
1368 static void nvme_free_queue(struct nvme_queue *nvmeq)
1370 dma_free_coherent(nvmeq->dev->dev, CQ_SIZE(nvmeq),
1371 (void *)nvmeq->cqes, nvmeq->cq_dma_addr);
1372 if (!nvmeq->sq_cmds)
1375 if (test_and_clear_bit(NVMEQ_SQ_CMB, &nvmeq->flags)) {
1376 pci_free_p2pmem(to_pci_dev(nvmeq->dev->dev),
1377 nvmeq->sq_cmds, SQ_SIZE(nvmeq));
1379 dma_free_coherent(nvmeq->dev->dev, SQ_SIZE(nvmeq),
1380 nvmeq->sq_cmds, nvmeq->sq_dma_addr);
1396 * @nvmeq: queue to suspend
1398 static int nvme_suspend_queue(struct nvme_queue *nvmeq)
1400 if (!test_and_clear_bit(NVMEQ_ENABLED, &nvmeq->flags))
1406 nvmeq->dev->online_queues--;
1407 if (!nvmeq->qid && nvmeq->dev->ctrl.admin_q)
1408 nvme_stop_admin_queue(&nvmeq->dev->ctrl);
1409 if (!test_and_clear_bit(NVMEQ_POLLED, &nvmeq->flags))
1410 pci_free_irq(to_pci_dev(nvmeq->dev->dev), nvmeq->cq_vector, nvmeq);
1424 struct nvme_queue *nvmeq = &dev->queues[0];
1431 nvme_poll_irqdisable(nvmeq);
1476 static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
1482 nvmeq->sq_cmds = pci_alloc_p2pmem(pdev, SQ_SIZE(nvmeq));
1483 if (nvmeq->sq_cmds) {
1484 nvmeq->sq_dma_addr = pci_p2pmem_virt_to_bus(pdev,
1485 nvmeq->sq_cmds);
1486 if (nvmeq->sq_dma_addr) {
1487 set_bit(NVMEQ_SQ_CMB, &nvmeq->flags);
1491 pci_free_p2pmem(pdev, nvmeq->sq_cmds, SQ_SIZE(nvmeq));
1495 nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(nvmeq),
1496 &nvmeq->sq_dma_addr, GFP_KERNEL);
1497 if (!nvmeq->sq_cmds)
1504 struct nvme_queue *nvmeq = &dev->queues[qid];
1509 nvmeq->sqes = qid ? dev->io_sqes : NVME_ADM_SQES;
1510 nvmeq->q_depth = depth;
1511 nvmeq->cqes = dma_alloc_coherent(dev->dev, CQ_SIZE(nvmeq),
1512 &nvmeq->cq_dma_addr, GFP_KERNEL);
1513 if (!nvmeq->cqes)
1516 if (nvme_alloc_sq_cmds(dev, nvmeq, qid))
1519 nvmeq->dev = dev;
1520 spin_lock_init(&nvmeq->sq_lock);
1521 spin_lock_init(&nvmeq->cq_poll_lock);
1522 nvmeq->cq_head = 0;
1523 nvmeq->cq_phase = 1;
1524 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
1525 nvmeq->qid = qid;
1531 dma_free_coherent(dev->dev, CQ_SIZE(nvmeq), (void *)nvmeq->cqes,
1532 nvmeq->cq_dma_addr);
1537 static int queue_request_irq(struct nvme_queue *nvmeq)
1539 struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev);
1540 int nr = nvmeq->dev->ctrl.instance;
1543 return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq_check,
1544 nvme_irq, nvmeq, "nvme%dq%d", nr, nvmeq->qid);
1546 return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq,
1547 NULL, nvmeq, "nvme%dq%d", nr, nvmeq->qid);
1551 static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
1553 struct nvme_dev *dev = nvmeq->dev;
1555 nvmeq->sq_tail = 0;
1556 nvmeq->last_sq_tail = 0;
1557 nvmeq->cq_head = 0;
1558 nvmeq->cq_phase = 1;
1559 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
1560 memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq));
1561 nvme_dbbuf_init(dev, nvmeq, qid);
1566 static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled)
1568 struct nvme_dev *dev = nvmeq->dev;
1572 clear_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags);
1581 set_bit(NVMEQ_POLLED, &nvmeq->flags);
1583 result = adapter_alloc_cq(dev, qid, nvmeq, vector);
1587 result = adapter_alloc_sq(dev, qid, nvmeq);
1593 nvmeq->cq_vector = vector;
1594 nvme_init_queue(nvmeq, qid);
1597 result = queue_request_irq(nvmeq);
1602 set_bit(NVMEQ_ENABLED, &nvmeq->flags);
1710 struct nvme_queue *nvmeq;
1733 nvmeq = &dev->queues[0];
1734 aqa = nvmeq->q_depth - 1;
1738 lo_hi_writeq(nvmeq->sq_dma_addr, dev->bar + NVME_REG_ASQ);
1739 lo_hi_writeq(nvmeq->cq_dma_addr, dev->bar + NVME_REG_ACQ);
1745 nvmeq->cq_vector = 0;
1746 nvme_init_queue(nvmeq, 0);
1747 result = queue_request_irq(nvmeq);
1753 set_bit(NVMEQ_ENABLED, &nvmeq->flags);
2253 struct nvme_queue *nvmeq = req->end_io_data;
2256 complete(&nvmeq->delete_done);
2261 struct nvme_queue *nvmeq = req->end_io_data;
2264 set_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags);
2269 static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
2271 struct request_queue *q = nvmeq->dev->ctrl.admin_q;
2277 cmd.delete_queue.qid = cpu_to_le16(nvmeq->qid);
2283 req->end_io_data = nvmeq;
2285 init_completion(&nvmeq->delete_done);
2306 struct nvme_queue *nvmeq = &dev->queues[nr_queues + sent];
2308 timeout = wait_for_completion_io_timeout(&nvmeq->delete_done,