Lines Matching refs:iod
337 struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
339 return (void **)(iod->sg + blk_rq_nr_phys_segments(req));
345 struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
346 dma_addr_t dma_addr = iod->first_dma;
349 for (i = 0; i < iod->npages; i++) {
360 struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
362 if (iod->dma_len) {
363 dma_unmap_page(anv->dev, iod->first_dma, iod->dma_len,
368 WARN_ON_ONCE(!iod->nents);
370 dma_unmap_sg(anv->dev, iod->sg, iod->nents, rq_dma_dir(req));
371 if (iod->npages == 0)
373 iod->first_dma);
376 mempool_free(iod->sg, anv->iod_mempool);
397 struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
400 struct scatterlist *sg = iod->sg;
411 iod->first_dma = 0;
425 iod->first_dma = dma_addr;
432 iod->npages = 0;
435 iod->npages = 1;
440 iod->first_dma = dma_addr;
441 iod->npages = -1;
445 iod->first_dma = prp_dma;
454 list[iod->npages++] = prp_list;
474 cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
475 cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma);
481 WARN(DO_ONCE(apple_nvme_print_sgl, iod->sg, iod->nents),
483 iod->nents);
492 struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
496 iod->first_dma = dma_map_bvec(anv->dev, bv, rq_dma_dir(req), 0);
497 if (dma_mapping_error(anv->dev, iod->first_dma))
499 iod->dma_len = bv->bv_len;
501 cmnd->dptr.prp1 = cpu_to_le64(iod->first_dma);
503 cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma + first_prp_len);
511 struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
523 iod->dma_len = 0;
524 iod->sg = mempool_alloc(anv->iod_mempool, GFP_ATOMIC);
525 if (!iod->sg)
527 sg_init_table(iod->sg, blk_rq_nr_phys_segments(req));
528 iod->nents = blk_rq_map_sg(req->q, req, iod->sg);
529 if (!iod->nents)
532 nr_mapped = dma_map_sg_attrs(anv->dev, iod->sg, iod->nents,
543 dma_unmap_sg(anv->dev, iod->sg, iod->nents, rq_dma_dir(req));
545 mempool_free(iod->sg, anv->iod_mempool);
551 struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
552 struct apple_nvme *anv = queue_to_apple_nvme(iod->q);
739 struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
740 struct nvme_command *cmnd = &iod->cmd;
743 iod->npages = -1;
744 iod->nents = 0;
788 struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
791 iod->q = q;
793 nreq->cmd = &iod->cmd;
878 struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req);
879 struct apple_nvme_queue *q = iod->q;
1049 * Limit the max command size to prevent iod->sg allocations going