Lines Matching refs:iova
2355 phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
2358 return iova;
2363 return domain->ops->iova_to_phys(domain, iova);
2367 static size_t iommu_pgsize(struct iommu_domain *domain, unsigned long iova,
2373 unsigned long addr_merge = paddr | iova;
2403 if ((iova ^ paddr) & (pgsize_next - 1))
2421 static int __iommu_map_pages(struct iommu_domain *domain, unsigned long iova,
2429 pgsize = iommu_pgsize(domain, iova, paddr, size, &count);
2431 pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx count %zu\n",
2432 iova, &paddr, pgsize, count);
2435 ret = ops->map_pages(domain, iova, paddr, pgsize, count, prot,
2438 ret = ops->map(domain, iova, paddr, pgsize, prot, gfp);
2445 static int __iommu_map(struct iommu_domain *domain, unsigned long iova,
2449 unsigned long orig_iova = iova;
2470 if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
2471 pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n",
2472 iova, &paddr, size, min_pagesz);
2476 pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size);
2481 ret = __iommu_map_pages(domain, iova, paddr, size, prot, gfp,
2492 iova += mapped;
2505 int iommu_map(struct iommu_domain *domain, unsigned long iova,
2518 ret = __iommu_map(domain, iova, paddr, size, prot, gfp);
2520 ops->iotlb_sync_map(domain, iova, size);
2527 unsigned long iova, size_t size,
2533 pgsize = iommu_pgsize(domain, iova, iova, size, &count);
2535 ops->unmap_pages(domain, iova, pgsize, count, iotlb_gather) :
2536 ops->unmap(domain, iova, pgsize, iotlb_gather);
2540 unsigned long iova, size_t size,
2545 unsigned long orig_iova = iova;
2563 if (!IS_ALIGNED(iova | size, min_pagesz)) {
2564 pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
2565 iova, size, min_pagesz);
2569 pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova, size);
2576 unmapped_page = __iommu_unmap_pages(domain, iova,
2582 pr_debug("unmapped: iova 0x%lx size 0x%zx\n",
2583 iova, unmapped_page);
2585 iova += unmapped_page;
2594 unsigned long iova, size_t size)
2600 ret = __iommu_unmap(domain, iova, size, &iotlb_gather);
2608 unsigned long iova, size_t size,
2611 return __iommu_unmap(domain, iova, size, iotlb_gather);
2615 ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
2636 ret = __iommu_map(domain, iova + mapped, start,
2662 ops->iotlb_sync_map(domain, iova, mapped);
2667 iommu_unmap(domain, iova, mapped);
2677 * @iova: the faulting address
2698 unsigned long iova, int flags)
2707 ret = domain->handler(domain, dev, iova, flags,
2710 trace_io_page_fault(dev, iova, flags);