Lines Matching refs:iod

423 	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
426 nvme_req(req)->cmd = &iod->cmd;
527 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
528 dma_addr_t dma_addr = iod->first_dma;
531 for (i = 0; i < iod->nr_allocations; i++) {
532 __le64 *prp_list = iod->list[i].prp_list;
542 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
544 if (iod->dma_len) {
545 dma_unmap_page(dev->dev, iod->first_dma, iod->dma_len,
550 WARN_ON_ONCE(!iod->sgt.nents);
552 dma_unmap_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 0);
554 if (iod->nr_allocations == 0)
555 dma_pool_free(dev->prp_small_pool, iod->list[0].sg_list,
556 iod->first_dma);
557 else if (iod->nr_allocations == 1)
558 dma_pool_free(dev->prp_page_pool, iod->list[0].sg_list,
559 iod->first_dma);
562 mempool_free(iod->sgt.sgl, dev->iod_mempool);
582 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
585 struct scatterlist *sg = iod->sgt.sgl;
595 iod->first_dma = 0;
609 iod->first_dma = dma_addr;
616 iod->nr_allocations = 0;
619 iod->nr_allocations = 1;
624 iod->nr_allocations = -1;
627 iod->list[0].prp_list = prp_list;
628 iod->first_dma = prp_dma;
636 iod->list[iod->nr_allocations++].prp_list = prp_list;
656 cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sgt.sgl));
657 cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma);
663 WARN(DO_ONCE(nvme_print_sgl, iod->sgt.sgl, iod->sgt.nents),
665 blk_rq_payload_bytes(req), iod->sgt.nents);
688 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
691 struct scatterlist *sg = iod->sgt.sgl;
692 unsigned int entries = iod->sgt.nents;
706 iod->nr_allocations = 0;
709 iod->nr_allocations = 1;
714 iod->nr_allocations = -1;
718 iod->list[0].sg_list = sg_list;
719 iod->first_dma = sgl_dma;
734 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
738 iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0);
739 if (dma_mapping_error(dev->dev, iod->first_dma))
741 iod->dma_len = bv->bv_len;
743 cmnd->dptr.prp1 = cpu_to_le64(iod->first_dma);
745 cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma + first_prp_len);
755 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
757 iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0);
758 if (dma_mapping_error(dev->dev, iod->first_dma))
760 iod->dma_len = bv->bv_len;
763 cmnd->dptr.sgl.addr = cpu_to_le64(iod->first_dma);
764 cmnd->dptr.sgl.length = cpu_to_le32(iod->dma_len);
772 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
792 iod->dma_len = 0;
793 iod->sgt.sgl = mempool_alloc(dev->iod_mempool, GFP_ATOMIC);
794 if (!iod->sgt.sgl)
796 sg_init_table(iod->sgt.sgl, blk_rq_nr_phys_segments(req));
797 iod->sgt.orig_nents = blk_rq_map_sg(req->q, req, iod->sgt.sgl);
798 if (!iod->sgt.orig_nents)
801 rc = dma_map_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req),
809 if (nvme_pci_use_sgls(dev, req, iod->sgt.nents))
818 dma_unmap_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 0);
820 mempool_free(iod->sgt.sgl, dev->iod_mempool);
827 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
829 iod->meta_dma = dma_map_bvec(dev->dev, rq_integrity_vec(req),
831 if (dma_mapping_error(dev->dev, iod->meta_dma))
833 cmnd->rw.metadata = cpu_to_le64(iod->meta_dma);
839 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
842 iod->aborted = false;
843 iod->nr_allocations = -1;
844 iod->sgt.nents = 0;
851 ret = nvme_map_data(dev, req, &iod->cmd);
857 ret = nvme_map_metadata(dev, req, &iod->cmd);
880 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
897 nvme_sq_copy_cmd(nvmeq, &iod->cmd);
908 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
910 nvme_sq_copy_cmd(nvmeq, &iod->cmd);
967 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
969 dma_unmap_page(dev->dev, iod->meta_dma,
1282 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
1347 if (!nvmeq->qid || iod->aborted) {
1359 iod->aborted = true;
2972 * Limit the max command size to prevent iod->sg allocations going