Lines Matching refs:iod
418 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
423 iod->nvmeq = nvmeq;
519 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
520 return (void **)(iod->sg + blk_rq_nr_phys_segments(req));
525 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
533 if (!iod->nvmeq->qid)
543 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
544 dma_addr_t dma_addr = iod->first_dma;
547 for (i = 0; i < iod->npages; i++) {
560 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
561 dma_addr_t dma_addr = iod->first_dma;
564 for (i = 0; i < iod->npages; i++) {
576 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
578 if (is_pci_p2pdma_page(sg_page(iod->sg)))
579 pci_p2pdma_unmap_sg(dev->dev, iod->sg, iod->nents,
582 dma_unmap_sg(dev->dev, iod->sg, iod->nents, rq_dma_dir(req));
587 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
589 if (iod->dma_len) {
590 dma_unmap_page(dev->dev, iod->first_dma, iod->dma_len,
595 WARN_ON_ONCE(!iod->nents);
598 if (iod->npages == 0)
600 iod->first_dma);
601 else if (iod->use_sgl)
605 mempool_free(iod->sg, dev->iod_mempool);
625 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
628 struct scatterlist *sg = iod->sg;
639 iod->first_dma = 0;
653 iod->first_dma = dma_addr;
660 iod->npages = 0;
663 iod->npages = 1;
668 iod->first_dma = dma_addr;
669 iod->npages = -1;
673 iod->first_dma = prp_dma;
681 list[iod->npages++] = prp_list;
701 cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
702 cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma);
708 WARN(DO_ONCE(nvme_print_sgl, iod->sg, iod->nents),
710 blk_rq_payload_bytes(req), iod->nents);
738 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
741 struct scatterlist *sg = iod->sg;
755 iod->npages = 0;
758 iod->npages = 1;
763 iod->npages = -1;
768 iod->first_dma = sgl_dma;
782 nvme_pci_iod_list(req)[iod->npages++] = sg_list;
801 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
805 iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0);
806 if (dma_mapping_error(dev->dev, iod->first_dma))
808 iod->dma_len = bv->bv_len;
810 cmnd->dptr.prp1 = cpu_to_le64(iod->first_dma);
812 cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma + first_prp_len);
822 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
824 iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0);
825 if (dma_mapping_error(dev->dev, iod->first_dma))
827 iod->dma_len = bv->bv_len;
830 cmnd->dptr.sgl.addr = cpu_to_le64(iod->first_dma);
831 cmnd->dptr.sgl.length = cpu_to_le32(iod->dma_len);
839 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
851 if (iod->nvmeq->qid && sgl_threshold &&
858 iod->dma_len = 0;
859 iod->sg = mempool_alloc(dev->iod_mempool, GFP_ATOMIC);
860 if (!iod->sg)
862 sg_init_table(iod->sg, blk_rq_nr_phys_segments(req));
863 iod->nents = blk_rq_map_sg(req->q, req, iod->sg);
864 if (!iod->nents)
867 if (is_pci_p2pdma_page(sg_page(iod->sg)))
868 nr_mapped = pci_p2pdma_map_sg_attrs(dev->dev, iod->sg,
869 iod->nents, rq_dma_dir(req), DMA_ATTR_NO_WARN);
871 nr_mapped = dma_map_sg_attrs(dev->dev, iod->sg, iod->nents,
876 iod->use_sgl = nvme_pci_use_sgls(dev, req);
877 if (iod->use_sgl)
888 mempool_free(iod->sg, dev->iod_mempool);
895 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
897 iod->meta_dma = dma_map_bvec(dev->dev, rq_integrity_vec(req),
899 if (dma_mapping_error(dev->dev, iod->meta_dma))
901 cmnd->rw.metadata = cpu_to_le64(iod->meta_dma);
915 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
916 struct nvme_command *cmnd = &iod->cmd;
919 iod->aborted = 0;
920 iod->npages = -1;
921 iod->nents = 0;
959 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
960 struct nvme_dev *dev = iod->nvmeq->dev;
963 dma_unmap_page(dev->dev, iod->meta_dma,
1203 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
1204 struct nvme_queue *nvmeq = iod->nvmeq;
1257 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
1258 struct nvme_queue *nvmeq = iod->nvmeq;
1324 if (!nvmeq->qid || iod->aborted) {
1339 iod->aborted = 1;
2644 * Limit the max command size to prevent iod->sg allocations going