Lines Matching refs:nvmeq
302 struct nvme_queue *nvmeq, int qid)
307 nvmeq->dbbuf_sq_db = &dev->dbbuf_dbs[sq_idx(qid, dev->db_stride)];
308 nvmeq->dbbuf_cq_db = &dev->dbbuf_dbs[cq_idx(qid, dev->db_stride)];
309 nvmeq->dbbuf_sq_ei = &dev->dbbuf_eis[sq_idx(qid, dev->db_stride)];
310 nvmeq->dbbuf_cq_ei = &dev->dbbuf_eis[cq_idx(qid, dev->db_stride)];
313 static void nvme_dbbuf_free(struct nvme_queue *nvmeq)
315 if (!nvmeq->qid)
318 nvmeq->dbbuf_sq_db = NULL;
319 nvmeq->dbbuf_cq_db = NULL;
320 nvmeq->dbbuf_sq_ei = NULL;
321 nvmeq->dbbuf_cq_ei = NULL;
399 struct nvme_queue *nvmeq = &dev->queues[0];
404 hctx->driver_data = nvmeq;
412 struct nvme_queue *nvmeq = &dev->queues[hctx_idx + 1];
415 hctx->driver_data = nvmeq;
471 static inline void nvme_write_sq_db(struct nvme_queue *nvmeq, bool write_sq)
474 u16 next_tail = nvmeq->sq_tail + 1;
476 if (next_tail == nvmeq->q_depth)
478 if (next_tail != nvmeq->last_sq_tail)
482 if (nvme_dbbuf_update_and_check_event(nvmeq->sq_tail,
483 nvmeq->dbbuf_sq_db, nvmeq->dbbuf_sq_ei))
484 writel(nvmeq->sq_tail, nvmeq->q_db);
485 nvmeq->last_sq_tail = nvmeq->sq_tail;
488 static inline void nvme_sq_copy_cmd(struct nvme_queue *nvmeq,
491 memcpy(nvmeq->sq_cmds + (nvmeq->sq_tail << nvmeq->sqes),
493 if (++nvmeq->sq_tail == nvmeq->q_depth)
494 nvmeq->sq_tail = 0;
499 struct nvme_queue *nvmeq = hctx->driver_data;
501 spin_lock(&nvmeq->sq_lock);
502 if (nvmeq->sq_tail != nvmeq->last_sq_tail)
503 nvme_write_sq_db(nvmeq, true);
504 spin_unlock(&nvmeq->sq_lock);
510 struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
517 if (!nvmeq->qid)
777 struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
785 if (nvmeq->qid && sgl_threshold &&
877 struct nvme_queue *nvmeq = hctx->driver_data;
878 struct nvme_dev *dev = nvmeq->dev;
887 if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags)))
896 spin_lock(&nvmeq->sq_lock);
897 nvme_sq_copy_cmd(nvmeq, &iod->cmd);
898 nvme_write_sq_db(nvmeq, bd->last);
899 spin_unlock(&nvmeq->sq_lock);
903 static void nvme_submit_cmds(struct nvme_queue *nvmeq, struct request **rqlist)
905 spin_lock(&nvmeq->sq_lock);
910 nvme_sq_copy_cmd(nvmeq, &iod->cmd);
912 nvme_write_sq_db(nvmeq, true);
913 spin_unlock(&nvmeq->sq_lock);
916 static bool nvme_prep_rq_batch(struct nvme_queue *nvmeq, struct request *req)
922 if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags)))
924 if (unlikely(!nvme_check_ready(&nvmeq->dev->ctrl, req, true)))
928 return nvme_prep_rq(nvmeq->dev, req) == BLK_STS_OK;
937 struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
939 if (!nvme_prep_rq_batch(nvmeq, req)) {
951 nvme_submit_cmds(nvmeq, rqlist);
963 struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
964 struct nvme_dev *dev = nvmeq->dev;
989 static inline bool nvme_cqe_pending(struct nvme_queue *nvmeq)
991 struct nvme_completion *hcqe = &nvmeq->cqes[nvmeq->cq_head];
993 return (le16_to_cpu(READ_ONCE(hcqe->status)) & 1) == nvmeq->cq_phase;
996 static inline void nvme_ring_cq_doorbell(struct nvme_queue *nvmeq)
998 u16 head = nvmeq->cq_head;
1000 if (nvme_dbbuf_update_and_check_event(head, nvmeq->dbbuf_cq_db,
1001 nvmeq->dbbuf_cq_ei))
1002 writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
1005 static inline struct blk_mq_tags *nvme_queue_tagset(struct nvme_queue *nvmeq)
1007 if (!nvmeq->qid)
1008 return nvmeq->dev->admin_tagset.tags[0];
1009 return nvmeq->dev->tagset.tags[nvmeq->qid - 1];
1012 static inline void nvme_handle_cqe(struct nvme_queue *nvmeq,
1015 struct nvme_completion *cqe = &nvmeq->cqes[idx];
1025 if (unlikely(nvme_is_aen_req(nvmeq->qid, command_id))) {
1026 nvme_complete_async_event(&nvmeq->dev->ctrl,
1031 req = nvme_find_rq(nvme_queue_tagset(nvmeq), command_id);
1033 dev_warn(nvmeq->dev->ctrl.device,
1039 trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail);
1046 static inline void nvme_update_cq_head(struct nvme_queue *nvmeq)
1048 u32 tmp = nvmeq->cq_head + 1;
1050 if (tmp == nvmeq->q_depth) {
1051 nvmeq->cq_head = 0;
1052 nvmeq->cq_phase ^= 1;
1054 nvmeq->cq_head = tmp;
1058 static inline int nvme_poll_cq(struct nvme_queue *nvmeq,
1063 while (nvme_cqe_pending(nvmeq)) {
1070 nvme_handle_cqe(nvmeq, iob, nvmeq->cq_head);
1071 nvme_update_cq_head(nvmeq);
1075 nvme_ring_cq_doorbell(nvmeq);
1081 struct nvme_queue *nvmeq = data;
1084 if (nvme_poll_cq(nvmeq, &iob)) {
1094 struct nvme_queue *nvmeq = data;
1096 if (nvme_cqe_pending(nvmeq))
1105 static void nvme_poll_irqdisable(struct nvme_queue *nvmeq)
1107 struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev);
1109 WARN_ON_ONCE(test_bit(NVMEQ_POLLED, &nvmeq->flags));
1111 disable_irq(pci_irq_vector(pdev, nvmeq->cq_vector));
1112 nvme_poll_cq(nvmeq, NULL);
1113 enable_irq(pci_irq_vector(pdev, nvmeq->cq_vector));
1118 struct nvme_queue *nvmeq = hctx->driver_data;
1121 if (!nvme_cqe_pending(nvmeq))
1124 spin_lock(&nvmeq->cq_poll_lock);
1125 found = nvme_poll_cq(nvmeq, iob);
1126 spin_unlock(&nvmeq->cq_poll_lock);
1134 struct nvme_queue *nvmeq = &dev->queues[0];
1140 spin_lock(&nvmeq->sq_lock);
1141 nvme_sq_copy_cmd(nvmeq, &c);
1142 nvme_write_sq_db(nvmeq, true);
1143 spin_unlock(&nvmeq->sq_lock);
1157 struct nvme_queue *nvmeq, s16 vector)
1162 if (!test_bit(NVMEQ_POLLED, &nvmeq->flags))
1170 c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr);
1172 c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
1180 struct nvme_queue *nvmeq)
1199 c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr);
1201 c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
1220 struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
1222 dev_warn(nvmeq->dev->ctrl.device,
1224 atomic_inc(&nvmeq->dev->ctrl.abort_limit);
1283 struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
1284 struct nvme_dev *dev = nvmeq->dev;
1307 if (test_bit(NVMEQ_POLLED, &nvmeq->flags))
1310 nvme_poll_irqdisable(nvmeq);
1315 req->tag, nvmeq->qid);
1332 req->tag, nvmeq->qid);
1347 if (!nvmeq->qid || iod->aborted) {
1350 req->tag, nvmeq->qid);
1363 cmd.abort.sqid = cpu_to_le16(nvmeq->qid);
1365 dev_warn(nvmeq->dev->ctrl.device,
1369 nvmeq->qid);
1400 static void nvme_free_queue(struct nvme_queue *nvmeq)
1402 dma_free_coherent(nvmeq->dev->dev, CQ_SIZE(nvmeq),
1403 (void *)nvmeq->cqes, nvmeq->cq_dma_addr);
1404 if (!nvmeq->sq_cmds)
1407 if (test_and_clear_bit(NVMEQ_SQ_CMB, &nvmeq->flags)) {
1408 pci_free_p2pmem(to_pci_dev(nvmeq->dev->dev),
1409 nvmeq->sq_cmds, SQ_SIZE(nvmeq));
1411 dma_free_coherent(nvmeq->dev->dev, SQ_SIZE(nvmeq),
1412 nvmeq->sq_cmds, nvmeq->sq_dma_addr);
1428 struct nvme_queue *nvmeq = &dev->queues[qid];
1430 if (!test_and_clear_bit(NVMEQ_ENABLED, &nvmeq->flags))
1436 nvmeq->dev->online_queues--;
1437 if (!nvmeq->qid && nvmeq->dev->ctrl.admin_q)
1438 nvme_quiesce_admin_queue(&nvmeq->dev->ctrl);
1439 if (!test_and_clear_bit(NVMEQ_POLLED, &nvmeq->flags))
1440 pci_free_irq(to_pci_dev(dev->dev), nvmeq->cq_vector, nvmeq);
1493 static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
1499 nvmeq->sq_cmds = pci_alloc_p2pmem(pdev, SQ_SIZE(nvmeq));
1500 if (nvmeq->sq_cmds) {
1501 nvmeq->sq_dma_addr = pci_p2pmem_virt_to_bus(pdev,
1502 nvmeq->sq_cmds);
1503 if (nvmeq->sq_dma_addr) {
1504 set_bit(NVMEQ_SQ_CMB, &nvmeq->flags);
1508 pci_free_p2pmem(pdev, nvmeq->sq_cmds, SQ_SIZE(nvmeq));
1512 nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(nvmeq),
1513 &nvmeq->sq_dma_addr, GFP_KERNEL);
1514 if (!nvmeq->sq_cmds)
1521 struct nvme_queue *nvmeq = &dev->queues[qid];
1526 nvmeq->sqes = qid ? dev->io_sqes : NVME_ADM_SQES;
1527 nvmeq->q_depth = depth;
1528 nvmeq->cqes = dma_alloc_coherent(dev->dev, CQ_SIZE(nvmeq),
1529 &nvmeq->cq_dma_addr, GFP_KERNEL);
1530 if (!nvmeq->cqes)
1533 if (nvme_alloc_sq_cmds(dev, nvmeq, qid))
1536 nvmeq->dev = dev;
1537 spin_lock_init(&nvmeq->sq_lock);
1538 spin_lock_init(&nvmeq->cq_poll_lock);
1539 nvmeq->cq_head = 0;
1540 nvmeq->cq_phase = 1;
1541 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
1542 nvmeq->qid = qid;
1548 dma_free_coherent(dev->dev, CQ_SIZE(nvmeq), (void *)nvmeq->cqes,
1549 nvmeq->cq_dma_addr);
1554 static int queue_request_irq(struct nvme_queue *nvmeq)
1556 struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev);
1557 int nr = nvmeq->dev->ctrl.instance;
1560 return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq_check,
1561 nvme_irq, nvmeq, "nvme%dq%d", nr, nvmeq->qid);
1563 return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq,
1564 NULL, nvmeq, "nvme%dq%d", nr, nvmeq->qid);
1568 static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
1570 struct nvme_dev *dev = nvmeq->dev;
1572 nvmeq->sq_tail = 0;
1573 nvmeq->last_sq_tail = 0;
1574 nvmeq->cq_head = 0;
1575 nvmeq->cq_phase = 1;
1576 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
1577 memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq));
1578 nvme_dbbuf_init(dev, nvmeq, qid);
1605 static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled)
1607 struct nvme_dev *dev = nvmeq->dev;
1611 clear_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags);
1620 set_bit(NVMEQ_POLLED, &nvmeq->flags);
1622 result = adapter_alloc_cq(dev, qid, nvmeq, vector);
1626 result = adapter_alloc_sq(dev, qid, nvmeq);
1632 nvmeq->cq_vector = vector;
1637 nvme_init_queue(nvmeq, qid);
1639 result = queue_request_irq(nvmeq);
1644 set_bit(NVMEQ_ENABLED, &nvmeq->flags);
1720 struct nvme_queue *nvmeq;
1750 nvmeq = &dev->queues[0];
1751 aqa = nvmeq->q_depth - 1;
1755 lo_hi_writeq(nvmeq->sq_dma_addr, dev->bar + NVME_REG_ASQ);
1756 lo_hi_writeq(nvmeq->cq_dma_addr, dev->bar + NVME_REG_ACQ);
1762 nvmeq->cq_vector = 0;
1763 nvme_init_queue(nvmeq, 0);
1764 result = queue_request_irq(nvmeq);
1770 set_bit(NVMEQ_ENABLED, &nvmeq->flags);
2375 struct nvme_queue *nvmeq = req->end_io_data;
2378 complete(&nvmeq->delete_done);
2385 struct nvme_queue *nvmeq = req->end_io_data;
2388 set_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags);
2393 static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
2395 struct request_queue *q = nvmeq->dev->ctrl.admin_q;
2400 cmd.delete_queue.qid = cpu_to_le16(nvmeq->qid);
2411 req->end_io_data = nvmeq;
2413 init_completion(&nvmeq->delete_done);
2432 struct nvme_queue *nvmeq = &dev->queues[nr_queues + sent];
2434 timeout = wait_for_completion_io_timeout(&nvmeq->delete_done,