Lines Matching refs:iova
1109 dma_addr_t iova;
1153 iova = mapping->base + (mapping_size * i);
1154 iova += start << PAGE_SHIFT;
1156 return iova;
1180 * The address range to be freed reaches into the iova
1315 dma_addr_t dma_addr, iova;
1322 iova = dma_addr;
1335 ret = iommu_map(mapping->domain, iova, phys, len,
1339 iova += len;
1344 iommu_unmap(mapping->domain, dma_addr, iova-dma_addr);
1349 static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size)
1354 * add optional in-page offset from iova to size and align
1357 size = PAGE_ALIGN((iova & ~PAGE_MASK) + size);
1358 iova &= PAGE_MASK;
1360 iommu_unmap(mapping->domain, iova, size);
1361 __free_iova(mapping, iova, size);
1583 dma_addr_t iova, iova_base;
1592 iova_base = iova = __alloc_iova(mapping, size);
1593 if (iova == DMA_MAPPING_ERROR)
1605 ret = iommu_map(mapping->domain, iova, phys, len, prot);
1609 iova += len;
1859 dma_addr_t iova = handle & PAGE_MASK;
1863 if (!iova)
1866 iommu_unmap(mapping->domain, iova, len);
1867 __free_iova(mapping, iova, len);
1883 dma_addr_t iova = handle & PAGE_MASK;
1884 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
1888 if (!iova)
1894 iommu_unmap(mapping->domain, iova, len);
1895 __free_iova(mapping, iova, len);
1944 dma_addr_t iova = dma_handle & PAGE_MASK;
1948 if (!iova)
1951 iommu_unmap(mapping->domain, iova, len);
1952 __free_iova(mapping, iova, len);
1959 dma_addr_t iova = handle & PAGE_MASK;
1960 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
1963 if (!iova)
1973 dma_addr_t iova = handle & PAGE_MASK;
1974 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
1977 if (!iova)