Lines Matching refs:iova
20 #include <linux/iova.h>
38 dma_addr_t iova;
257 pr_warn("iova flush queue initialization failed\n");
417 msi_page->iova = start;
664 unsigned long shift, iova_len, iova;
691 iova = alloc_iova_fast(iovad, iova_len,
693 if (iova)
700 iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift, true);
702 return (dma_addr_t)iova << shift;
706 dma_addr_t iova, size_t size, struct iommu_iotlb_gather *gather)
714 queue_iova(cookie, iova_pfn(iovad, iova),
718 free_iova_fast(iovad, iova_pfn(iovad, iova),
752 dma_addr_t iova;
760 iova = iommu_dma_alloc_iova(domain, size, dma_mask, dev);
761 if (!iova)
764 if (iommu_map(domain, iova, phys - iova_off, size, prot, GFP_ATOMIC)) {
765 iommu_dma_free_iova(cookie, iova, size, NULL);
768 return iova + iova_off;
845 dma_addr_t iova;
869 iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev);
870 if (!iova)
891 ret = iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, ioprot,
896 sgt->sgl->dma_address = iova;
903 iommu_dma_free_iova(cookie, iova, size, NULL);
1040 dma_addr_t iova, dma_mask = dma_get_mask(dev);
1081 iova = __iommu_dma_map(dev, phys, size, prot, dma_mask);
1082 if (iova == DMA_MAPPING_ERROR && is_swiotlb_buffer(dev, phys))
1084 return iova;
1251 dma_addr_t iova;
1335 iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
1336 if (!iova) {
1345 ret = iommu_map_sg(domain, iova, sg, nents, prot, GFP_ATOMIC);
1349 return __finalise_sg(dev, sg, nents, iova);
1352 iommu_dma_free_iova(cookie, iova, iova_len, NULL);
1662 dma_addr_t iova;
1675 iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
1676 if (!iova)
1679 if (iommu_map(domain, iova, msi_addr, size, prot, GFP_KERNEL))
1684 msi_page->iova = iova;
1689 iommu_dma_free_iova(cookie, iova, size, NULL);
1746 msg->address_hi = upper_32_bits(msi_page->iova);
1748 msg->address_lo += lower_32_bits(msi_page->iova);