Lines Matching refs:iova
764 dma_addr_t iova;
808 iova = mapping->base + (mapping_size * i);
809 iova += start << PAGE_SHIFT;
811 return iova;
835 * The address range to be freed reaches into the iova
970 dma_addr_t dma_addr, iova;
977 iova = dma_addr;
990 ret = iommu_map(mapping->domain, iova, phys, len,
995 iova += len;
1000 iommu_unmap(mapping->domain, dma_addr, iova-dma_addr);
1005 static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size)
1010 * add optional in-page offset from iova to size and align
1013 size = PAGE_ALIGN((iova & ~PAGE_MASK) + size);
1014 iova &= PAGE_MASK;
1016 iommu_unmap(mapping->domain, iova, size);
1017 __free_iova(mapping, iova, size);
1193 dma_addr_t iova, iova_base;
1202 iova_base = iova = __alloc_iova(mapping, size);
1203 if (iova == DMA_MAPPING_ERROR)
1215 ret = iommu_map(mapping->domain, iova, phys, len, prot,
1220 iova += len;
1412 dma_addr_t iova = handle & PAGE_MASK;
1417 if (!iova)
1421 page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
1425 iommu_unmap(mapping->domain, iova, len);
1426 __free_iova(mapping, iova, len);
1475 dma_addr_t iova = dma_handle & PAGE_MASK;
1479 if (!iova)
1482 iommu_unmap(mapping->domain, iova, len);
1483 __free_iova(mapping, iova, len);
1490 dma_addr_t iova = handle & PAGE_MASK;
1494 if (dev->dma_coherent || !iova)
1497 page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
1505 dma_addr_t iova = handle & PAGE_MASK;
1509 if (dev->dma_coherent || !iova)
1512 page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));