Lines Matching refs:req
226 struct nvme_request req;
414 static int nvme_init_request(struct blk_mq_tag_set *set, struct request *req,
418 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
425 nvme_req(req)->ctrl = &dev->ctrl;
517 static void **nvme_pci_iod_list(struct request *req)
519 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
520 return (void **)(iod->sg + blk_rq_nr_phys_segments(req));
523 static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
525 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
526 int nseg = blk_rq_nr_phys_segments(req);
529 avg_seg_size = DIV_ROUND_UP(blk_rq_payload_bytes(req), nseg);
540 static void nvme_free_prps(struct nvme_dev *dev, struct request *req)
543 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
548 __le64 *prp_list = nvme_pci_iod_list(req)[i];
557 static void nvme_free_sgls(struct nvme_dev *dev, struct request *req)
560 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
565 struct nvme_sgl_desc *sg_list = nvme_pci_iod_list(req)[i];
574 static void nvme_unmap_sg(struct nvme_dev *dev, struct request *req)
576 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
580 rq_dma_dir(req));
582 dma_unmap_sg(dev->dev, iod->sg, iod->nents, rq_dma_dir(req));
585 static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
587 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
591 rq_dma_dir(req));
597 nvme_unmap_sg(dev, req);
599 dma_pool_free(dev->prp_small_pool, nvme_pci_iod_list(req)[0],
602 nvme_free_sgls(dev, req);
604 nvme_free_prps(dev, req);
623 struct request *req, struct nvme_rw_command *cmnd)
625 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
627 int length = blk_rq_payload_bytes(req);
633 void **list = nvme_pci_iod_list(req);
705 nvme_free_prps(dev, req);
710 blk_rq_payload_bytes(req), iod->nents);
736 struct request *req, struct nvme_rw_command *cmd, int entries)
738 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
767 nvme_pci_iod_list(req)[0] = sg_list;
782 nvme_pci_iod_list(req)[iod->npages++] = sg_list;
793 nvme_free_sgls(dev, req);
798 struct request *req, struct nvme_rw_command *cmnd,
801 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
805 iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0);
819 struct request *req, struct nvme_rw_command *cmnd,
822 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
824 iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0);
836 static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
839 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
843 if (blk_rq_nr_phys_segments(req) == 1) {
844 struct bio_vec bv = req_bvec(req);
848 return nvme_setup_prp_simple(dev, req,
853 return nvme_setup_sgl_simple(dev, req,
862 sg_init_table(iod->sg, blk_rq_nr_phys_segments(req));
863 iod->nents = blk_rq_map_sg(req->q, req, iod->sg);
869 iod->nents, rq_dma_dir(req), DMA_ATTR_NO_WARN);
872 rq_dma_dir(req), DMA_ATTR_NO_WARN);
876 iod->use_sgl = nvme_pci_use_sgls(dev, req);
878 ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw, nr_mapped);
880 ret = nvme_pci_setup_prps(dev, req, &cmnd->rw);
886 nvme_unmap_sg(dev, req);
892 static blk_status_t nvme_map_metadata(struct nvme_dev *dev, struct request *req,
895 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
897 iod->meta_dma = dma_map_bvec(dev->dev, rq_integrity_vec(req),
898 rq_dma_dir(req), 0);
914 struct request *req = bd->rq;
915 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
930 ret = nvme_setup_cmd(ns, req, cmnd);
934 if (blk_rq_nr_phys_segments(req)) {
935 ret = nvme_map_data(dev, req, cmnd);
940 if (blk_integrity_rq(req)) {
941 ret = nvme_map_metadata(dev, req, cmnd);
946 blk_mq_start_request(req);
950 if (blk_rq_nr_phys_segments(req))
951 nvme_unmap_data(dev, req);
953 nvme_cleanup_cmd(req);
957 static void nvme_pci_complete_rq(struct request *req)
959 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
962 if (blk_integrity_rq(req))
964 rq_integrity_vec(req)->bv_len, rq_dma_dir(req));
966 if (blk_rq_nr_phys_segments(req))
967 nvme_unmap_data(dev, req);
968 nvme_complete_rq(req);
999 struct request *req;
1013 req = nvme_find_rq(nvme_queue_tagset(nvmeq), command_id);
1014 if (unlikely(!req)) {
1021 trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail);
1022 if (!nvme_try_complete_req(req, cqe->status, cqe->result))
1023 nvme_pci_complete_rq(req);
1201 static void abort_endio(struct request *req, blk_status_t error)
1203 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
1207 "Abort status: 0x%x", nvme_req(req)->status);
1209 blk_mq_free_request(req);
1255 static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
1257 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
1285 nvme_poll(req->mq_hctx);
1289 if (blk_mq_rq_state(req) != MQ_RQ_IN_FLIGHT) {
1292 req->tag, nvmeq->qid);
1309 req->tag, nvmeq->qid);
1310 nvme_req(req)->flags |= NVME_REQ_CANCELLED;
1327 req->tag, nvmeq->qid);
1328 nvme_req(req)->flags |= NVME_REQ_CANCELLED;
1343 cmd.abort.cid = nvme_cid(req);
1348 req->tag, nvmeq->qid);
1361 * The aborted req will be completed on receiving the abort req.
2251 static void nvme_del_queue_end(struct request *req, blk_status_t error)
2253 struct nvme_queue *nvmeq = req->end_io_data;
2255 blk_mq_free_request(req);
2259 static void nvme_del_cq_end(struct request *req, blk_status_t error)
2261 struct nvme_queue *nvmeq = req->end_io_data;
2266 nvme_del_queue_end(req, error);
2272 struct request *req;
2279 req = nvme_alloc_request(q, &cmd, BLK_MQ_REQ_NOWAIT);
2280 if (IS_ERR(req))
2281 return PTR_ERR(req);
2283 req->end_io_data = nvmeq;
2286 blk_execute_rq_nowait(q, NULL, req, false,