Lines Matching refs:req
231 struct nvme_request req;
420 struct request *req, unsigned int hctx_idx,
423 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
425 nvme_req(req)->ctrl = set->driver_data;
426 nvme_req(req)->cmd = &iod->cmd;
507 static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req,
510 struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
513 avg_seg_size = DIV_ROUND_UP(blk_rq_payload_bytes(req), nseg);
524 static void nvme_free_prps(struct nvme_dev *dev, struct request *req)
527 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
540 static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
542 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
546 rq_dma_dir(req));
552 dma_unmap_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 0);
561 nvme_free_prps(dev, req);
580 struct request *req, struct nvme_rw_command *cmnd)
582 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
584 int length = blk_rq_payload_bytes(req);
660 nvme_free_prps(dev, req);
665 blk_rq_payload_bytes(req), iod->sgt.nents);
686 struct request *req, struct nvme_rw_command *cmd)
688 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
731 struct request *req, struct nvme_rw_command *cmnd,
734 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
738 iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0);
752 struct request *req, struct nvme_rw_command *cmnd,
755 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
757 iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0);
769 static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
772 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
776 if (blk_rq_nr_phys_segments(req) == 1) {
777 struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
778 struct bio_vec bv = req_bvec(req);
782 return nvme_setup_prp_simple(dev, req,
787 return nvme_setup_sgl_simple(dev, req,
796 sg_init_table(iod->sgt.sgl, blk_rq_nr_phys_segments(req));
797 iod->sgt.orig_nents = blk_rq_map_sg(req->q, req, iod->sgt.sgl);
801 rc = dma_map_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req),
809 if (nvme_pci_use_sgls(dev, req, iod->sgt.nents))
810 ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw);
812 ret = nvme_pci_setup_prps(dev, req, &cmnd->rw);
818 dma_unmap_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 0);
824 static blk_status_t nvme_map_metadata(struct nvme_dev *dev, struct request *req,
827 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
829 iod->meta_dma = dma_map_bvec(dev->dev, rq_integrity_vec(req),
830 rq_dma_dir(req), 0);
837 static blk_status_t nvme_prep_rq(struct nvme_dev *dev, struct request *req)
839 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
846 ret = nvme_setup_cmd(req->q->queuedata, req);
850 if (blk_rq_nr_phys_segments(req)) {
851 ret = nvme_map_data(dev, req, &iod->cmd);
856 if (blk_integrity_rq(req)) {
857 ret = nvme_map_metadata(dev, req, &iod->cmd);
862 nvme_start_request(req);
865 nvme_unmap_data(dev, req);
867 nvme_cleanup_cmd(req);
879 struct request *req = bd->rq;
880 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
890 if (unlikely(!nvme_check_ready(&dev->ctrl, req, true)))
891 return nvme_fail_nonready_command(&dev->ctrl, req);
893 ret = nvme_prep_rq(dev, req);
907 struct request *req = rq_list_pop(rqlist);
908 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
916 static bool nvme_prep_rq_batch(struct nvme_queue *nvmeq, struct request *req)
924 if (unlikely(!nvme_check_ready(&nvmeq->dev->ctrl, req, true)))
927 req->mq_hctx->tags->rqs[req->tag] = req;
928 return nvme_prep_rq(nvmeq->dev, req) == BLK_STS_OK;
933 struct request *req, *next, *prev = NULL;
936 rq_list_for_each_safe(rqlist, req, next) {
937 struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
939 if (!nvme_prep_rq_batch(nvmeq, req)) {
940 /* detach 'req' and add to remainder list */
941 rq_list_move(rqlist, &requeue_list, req, prev);
943 req = prev;
944 if (!req)
948 if (!next || req->mq_hctx != next->mq_hctx) {
950 req->rq_next = NULL;
955 prev = req;
961 static __always_inline void nvme_pci_unmap_rq(struct request *req)
963 struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
966 if (blk_integrity_rq(req)) {
967 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
970 rq_integrity_vec(req)->bv_len, rq_dma_dir(req));
973 if (blk_rq_nr_phys_segments(req))
974 nvme_unmap_data(dev, req);
977 static void nvme_pci_complete_rq(struct request *req)
979 nvme_pci_unmap_rq(req);
980 nvme_complete_rq(req);
1017 struct request *req;
1031 req = nvme_find_rq(nvme_queue_tagset(nvmeq), command_id);
1032 if (unlikely(!req)) {
1039 trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail);
1040 if (!nvme_try_complete_req(req, cqe->status, cqe->result) &&
1041 !blk_mq_add_to_batch(req, iob, nvme_req(req)->status,
1043 nvme_pci_complete_rq(req);
1218 static enum rq_end_io_ret abort_endio(struct request *req, blk_status_t error)
1220 struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
1223 "Abort status: 0x%x", nvme_req(req)->status);
1225 blk_mq_free_request(req);
1280 static enum blk_eh_timer_return nvme_timeout(struct request *req)
1282 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
1283 struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
1308 nvme_poll(req->mq_hctx, NULL);
1312 if (blk_mq_rq_state(req) != MQ_RQ_IN_FLIGHT) {
1315 req->tag, nvmeq->qid);
1332 req->tag, nvmeq->qid);
1333 nvme_req(req)->flags |= NVME_REQ_CANCELLED;
1350 req->tag, nvmeq->qid);
1351 nvme_req(req)->flags |= NVME_REQ_CANCELLED;
1362 cmd.abort.cid = nvme_cid(req);
1367 req->tag,
1368 nvme_get_opcode_str(nvme_req(req)->cmd->common.opcode),
1384 * The aborted req will be completed on receiving the abort req.
2372 static enum rq_end_io_ret nvme_del_queue_end(struct request *req,
2375 struct nvme_queue *nvmeq = req->end_io_data;
2377 blk_mq_free_request(req);
2382 static enum rq_end_io_ret nvme_del_cq_end(struct request *req,
2385 struct nvme_queue *nvmeq = req->end_io_data;
2390 return nvme_del_queue_end(req, error);
2396 struct request *req;
2402 req = blk_mq_alloc_request(q, nvme_req_op(&cmd), BLK_MQ_REQ_NOWAIT);
2403 if (IS_ERR(req))
2404 return PTR_ERR(req);
2405 nvme_init_request(req, &cmd);
2408 req->end_io = nvme_del_cq_end;
2410 req->end_io = nvme_del_queue_end;
2411 req->end_io_data = nvmeq;
2414 blk_execute_rq_nowait(req, false);