18c2ecf20Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0-only 28c2ecf20Sopenharmony_ci/* 38c2ecf20Sopenharmony_ci * A fairly generic DMA-API to IOMMU-API glue layer. 48c2ecf20Sopenharmony_ci * 58c2ecf20Sopenharmony_ci * Copyright (C) 2014-2015 ARM Ltd. 68c2ecf20Sopenharmony_ci * 78c2ecf20Sopenharmony_ci * based in part on arch/arm/mm/dma-mapping.c: 88c2ecf20Sopenharmony_ci * Copyright (C) 2000-2004 Russell King 98c2ecf20Sopenharmony_ci */ 108c2ecf20Sopenharmony_ci 118c2ecf20Sopenharmony_ci#include <linux/acpi_iort.h> 128c2ecf20Sopenharmony_ci#include <linux/device.h> 138c2ecf20Sopenharmony_ci#include <linux/dma-map-ops.h> 148c2ecf20Sopenharmony_ci#include <linux/dma-iommu.h> 158c2ecf20Sopenharmony_ci#include <linux/gfp.h> 168c2ecf20Sopenharmony_ci#include <linux/huge_mm.h> 178c2ecf20Sopenharmony_ci#include <linux/iommu.h> 188c2ecf20Sopenharmony_ci#include <linux/iova.h> 198c2ecf20Sopenharmony_ci#include <linux/irq.h> 208c2ecf20Sopenharmony_ci#include <linux/mm.h> 218c2ecf20Sopenharmony_ci#include <linux/mutex.h> 228c2ecf20Sopenharmony_ci#include <linux/pci.h> 238c2ecf20Sopenharmony_ci#include <linux/scatterlist.h> 248c2ecf20Sopenharmony_ci#include <linux/vmalloc.h> 258c2ecf20Sopenharmony_ci#include <linux/crash_dump.h> 268c2ecf20Sopenharmony_ci 278c2ecf20Sopenharmony_cistruct iommu_dma_msi_page { 288c2ecf20Sopenharmony_ci struct list_head list; 298c2ecf20Sopenharmony_ci dma_addr_t iova; 308c2ecf20Sopenharmony_ci phys_addr_t phys; 318c2ecf20Sopenharmony_ci}; 328c2ecf20Sopenharmony_ci 338c2ecf20Sopenharmony_cienum iommu_dma_cookie_type { 348c2ecf20Sopenharmony_ci IOMMU_DMA_IOVA_COOKIE, 358c2ecf20Sopenharmony_ci IOMMU_DMA_MSI_COOKIE, 368c2ecf20Sopenharmony_ci}; 378c2ecf20Sopenharmony_ci 388c2ecf20Sopenharmony_cistruct iommu_dma_cookie { 398c2ecf20Sopenharmony_ci enum iommu_dma_cookie_type type; 408c2ecf20Sopenharmony_ci union { 418c2ecf20Sopenharmony_ci /* Full allocator for IOMMU_DMA_IOVA_COOKIE */ 428c2ecf20Sopenharmony_ci struct iova_domain iovad; 438c2ecf20Sopenharmony_ci /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */ 448c2ecf20Sopenharmony_ci dma_addr_t msi_iova; 458c2ecf20Sopenharmony_ci }; 468c2ecf20Sopenharmony_ci struct list_head msi_page_list; 478c2ecf20Sopenharmony_ci 488c2ecf20Sopenharmony_ci /* Domain for flush queue callback; NULL if flush queue not in use */ 498c2ecf20Sopenharmony_ci struct iommu_domain *fq_domain; 508c2ecf20Sopenharmony_ci}; 518c2ecf20Sopenharmony_ci 528c2ecf20Sopenharmony_cistatic inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie) 538c2ecf20Sopenharmony_ci{ 548c2ecf20Sopenharmony_ci if (cookie->type == IOMMU_DMA_IOVA_COOKIE) 558c2ecf20Sopenharmony_ci return cookie->iovad.granule; 568c2ecf20Sopenharmony_ci return PAGE_SIZE; 578c2ecf20Sopenharmony_ci} 588c2ecf20Sopenharmony_ci 598c2ecf20Sopenharmony_cistatic struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type) 608c2ecf20Sopenharmony_ci{ 618c2ecf20Sopenharmony_ci struct iommu_dma_cookie *cookie; 628c2ecf20Sopenharmony_ci 638c2ecf20Sopenharmony_ci cookie = kzalloc(sizeof(*cookie), GFP_KERNEL); 648c2ecf20Sopenharmony_ci if (cookie) { 658c2ecf20Sopenharmony_ci INIT_LIST_HEAD(&cookie->msi_page_list); 668c2ecf20Sopenharmony_ci cookie->type = type; 678c2ecf20Sopenharmony_ci } 688c2ecf20Sopenharmony_ci return cookie; 698c2ecf20Sopenharmony_ci} 708c2ecf20Sopenharmony_ci 718c2ecf20Sopenharmony_ci/** 728c2ecf20Sopenharmony_ci * iommu_get_dma_cookie - Acquire DMA-API resources for a domain 738c2ecf20Sopenharmony_ci * @domain: IOMMU domain to prepare for DMA-API usage 748c2ecf20Sopenharmony_ci * 758c2ecf20Sopenharmony_ci * IOMMU drivers should normally call this from their domain_alloc 768c2ecf20Sopenharmony_ci * callback when domain->type == IOMMU_DOMAIN_DMA. 778c2ecf20Sopenharmony_ci */ 788c2ecf20Sopenharmony_ciint iommu_get_dma_cookie(struct iommu_domain *domain) 798c2ecf20Sopenharmony_ci{ 808c2ecf20Sopenharmony_ci if (domain->iova_cookie) 818c2ecf20Sopenharmony_ci return -EEXIST; 828c2ecf20Sopenharmony_ci 838c2ecf20Sopenharmony_ci domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE); 848c2ecf20Sopenharmony_ci if (!domain->iova_cookie) 858c2ecf20Sopenharmony_ci return -ENOMEM; 868c2ecf20Sopenharmony_ci 878c2ecf20Sopenharmony_ci return 0; 888c2ecf20Sopenharmony_ci} 898c2ecf20Sopenharmony_ciEXPORT_SYMBOL(iommu_get_dma_cookie); 908c2ecf20Sopenharmony_ci 918c2ecf20Sopenharmony_ci/** 928c2ecf20Sopenharmony_ci * iommu_get_msi_cookie - Acquire just MSI remapping resources 938c2ecf20Sopenharmony_ci * @domain: IOMMU domain to prepare 948c2ecf20Sopenharmony_ci * @base: Start address of IOVA region for MSI mappings 958c2ecf20Sopenharmony_ci * 968c2ecf20Sopenharmony_ci * Users who manage their own IOVA allocation and do not want DMA API support, 978c2ecf20Sopenharmony_ci * but would still like to take advantage of automatic MSI remapping, can use 988c2ecf20Sopenharmony_ci * this to initialise their own domain appropriately. Users should reserve a 998c2ecf20Sopenharmony_ci * contiguous IOVA region, starting at @base, large enough to accommodate the 1008c2ecf20Sopenharmony_ci * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address 1018c2ecf20Sopenharmony_ci * used by the devices attached to @domain. 1028c2ecf20Sopenharmony_ci */ 1038c2ecf20Sopenharmony_ciint iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base) 1048c2ecf20Sopenharmony_ci{ 1058c2ecf20Sopenharmony_ci struct iommu_dma_cookie *cookie; 1068c2ecf20Sopenharmony_ci 1078c2ecf20Sopenharmony_ci if (domain->type != IOMMU_DOMAIN_UNMANAGED) 1088c2ecf20Sopenharmony_ci return -EINVAL; 1098c2ecf20Sopenharmony_ci 1108c2ecf20Sopenharmony_ci if (domain->iova_cookie) 1118c2ecf20Sopenharmony_ci return -EEXIST; 1128c2ecf20Sopenharmony_ci 1138c2ecf20Sopenharmony_ci cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE); 1148c2ecf20Sopenharmony_ci if (!cookie) 1158c2ecf20Sopenharmony_ci return -ENOMEM; 1168c2ecf20Sopenharmony_ci 1178c2ecf20Sopenharmony_ci cookie->msi_iova = base; 1188c2ecf20Sopenharmony_ci domain->iova_cookie = cookie; 1198c2ecf20Sopenharmony_ci return 0; 1208c2ecf20Sopenharmony_ci} 1218c2ecf20Sopenharmony_ciEXPORT_SYMBOL(iommu_get_msi_cookie); 1228c2ecf20Sopenharmony_ci 1238c2ecf20Sopenharmony_ci/** 1248c2ecf20Sopenharmony_ci * iommu_put_dma_cookie - Release a domain's DMA mapping resources 1258c2ecf20Sopenharmony_ci * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or 1268c2ecf20Sopenharmony_ci * iommu_get_msi_cookie() 1278c2ecf20Sopenharmony_ci * 1288c2ecf20Sopenharmony_ci * IOMMU drivers should normally call this from their domain_free callback. 1298c2ecf20Sopenharmony_ci */ 1308c2ecf20Sopenharmony_civoid iommu_put_dma_cookie(struct iommu_domain *domain) 1318c2ecf20Sopenharmony_ci{ 1328c2ecf20Sopenharmony_ci struct iommu_dma_cookie *cookie = domain->iova_cookie; 1338c2ecf20Sopenharmony_ci struct iommu_dma_msi_page *msi, *tmp; 1348c2ecf20Sopenharmony_ci 1358c2ecf20Sopenharmony_ci if (!cookie) 1368c2ecf20Sopenharmony_ci return; 1378c2ecf20Sopenharmony_ci 1388c2ecf20Sopenharmony_ci if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule) 1398c2ecf20Sopenharmony_ci put_iova_domain(&cookie->iovad); 1408c2ecf20Sopenharmony_ci 1418c2ecf20Sopenharmony_ci list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) { 1428c2ecf20Sopenharmony_ci list_del(&msi->list); 1438c2ecf20Sopenharmony_ci kfree(msi); 1448c2ecf20Sopenharmony_ci } 1458c2ecf20Sopenharmony_ci kfree(cookie); 1468c2ecf20Sopenharmony_ci domain->iova_cookie = NULL; 1478c2ecf20Sopenharmony_ci} 1488c2ecf20Sopenharmony_ciEXPORT_SYMBOL(iommu_put_dma_cookie); 1498c2ecf20Sopenharmony_ci 1508c2ecf20Sopenharmony_ci/** 1518c2ecf20Sopenharmony_ci * iommu_dma_get_resv_regions - Reserved region driver helper 1528c2ecf20Sopenharmony_ci * @dev: Device from iommu_get_resv_regions() 1538c2ecf20Sopenharmony_ci * @list: Reserved region list from iommu_get_resv_regions() 1548c2ecf20Sopenharmony_ci * 1558c2ecf20Sopenharmony_ci * IOMMU drivers can use this to implement their .get_resv_regions callback 1568c2ecf20Sopenharmony_ci * for general non-IOMMU-specific reservations. Currently, this covers GICv3 1578c2ecf20Sopenharmony_ci * ITS region reservation on ACPI based ARM platforms that may require HW MSI 1588c2ecf20Sopenharmony_ci * reservation. 1598c2ecf20Sopenharmony_ci */ 1608c2ecf20Sopenharmony_civoid iommu_dma_get_resv_regions(struct device *dev, struct list_head *list) 1618c2ecf20Sopenharmony_ci{ 1628c2ecf20Sopenharmony_ci 1638c2ecf20Sopenharmony_ci if (!is_of_node(dev_iommu_fwspec_get(dev)->iommu_fwnode)) 1648c2ecf20Sopenharmony_ci iort_iommu_msi_get_resv_regions(dev, list); 1658c2ecf20Sopenharmony_ci 1668c2ecf20Sopenharmony_ci} 1678c2ecf20Sopenharmony_ciEXPORT_SYMBOL(iommu_dma_get_resv_regions); 1688c2ecf20Sopenharmony_ci 1698c2ecf20Sopenharmony_cistatic int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie, 1708c2ecf20Sopenharmony_ci phys_addr_t start, phys_addr_t end) 1718c2ecf20Sopenharmony_ci{ 1728c2ecf20Sopenharmony_ci struct iova_domain *iovad = &cookie->iovad; 1738c2ecf20Sopenharmony_ci struct iommu_dma_msi_page *msi_page; 1748c2ecf20Sopenharmony_ci int i, num_pages; 1758c2ecf20Sopenharmony_ci 1768c2ecf20Sopenharmony_ci start -= iova_offset(iovad, start); 1778c2ecf20Sopenharmony_ci num_pages = iova_align(iovad, end - start) >> iova_shift(iovad); 1788c2ecf20Sopenharmony_ci 1798c2ecf20Sopenharmony_ci for (i = 0; i < num_pages; i++) { 1808c2ecf20Sopenharmony_ci msi_page = kmalloc(sizeof(*msi_page), GFP_KERNEL); 1818c2ecf20Sopenharmony_ci if (!msi_page) 1828c2ecf20Sopenharmony_ci return -ENOMEM; 1838c2ecf20Sopenharmony_ci 1848c2ecf20Sopenharmony_ci msi_page->phys = start; 1858c2ecf20Sopenharmony_ci msi_page->iova = start; 1868c2ecf20Sopenharmony_ci INIT_LIST_HEAD(&msi_page->list); 1878c2ecf20Sopenharmony_ci list_add(&msi_page->list, &cookie->msi_page_list); 1888c2ecf20Sopenharmony_ci start += iovad->granule; 1898c2ecf20Sopenharmony_ci } 1908c2ecf20Sopenharmony_ci 1918c2ecf20Sopenharmony_ci return 0; 1928c2ecf20Sopenharmony_ci} 1938c2ecf20Sopenharmony_ci 1948c2ecf20Sopenharmony_cistatic int iova_reserve_pci_windows(struct pci_dev *dev, 1958c2ecf20Sopenharmony_ci struct iova_domain *iovad) 1968c2ecf20Sopenharmony_ci{ 1978c2ecf20Sopenharmony_ci struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus); 1988c2ecf20Sopenharmony_ci struct resource_entry *window; 1998c2ecf20Sopenharmony_ci unsigned long lo, hi; 2008c2ecf20Sopenharmony_ci phys_addr_t start = 0, end; 2018c2ecf20Sopenharmony_ci 2028c2ecf20Sopenharmony_ci resource_list_for_each_entry(window, &bridge->windows) { 2038c2ecf20Sopenharmony_ci if (resource_type(window->res) != IORESOURCE_MEM) 2048c2ecf20Sopenharmony_ci continue; 2058c2ecf20Sopenharmony_ci 2068c2ecf20Sopenharmony_ci lo = iova_pfn(iovad, window->res->start - window->offset); 2078c2ecf20Sopenharmony_ci hi = iova_pfn(iovad, window->res->end - window->offset); 2088c2ecf20Sopenharmony_ci reserve_iova(iovad, lo, hi); 2098c2ecf20Sopenharmony_ci } 2108c2ecf20Sopenharmony_ci 2118c2ecf20Sopenharmony_ci /* Get reserved DMA windows from host bridge */ 2128c2ecf20Sopenharmony_ci resource_list_for_each_entry(window, &bridge->dma_ranges) { 2138c2ecf20Sopenharmony_ci end = window->res->start - window->offset; 2148c2ecf20Sopenharmony_ciresv_iova: 2158c2ecf20Sopenharmony_ci if (end > start) { 2168c2ecf20Sopenharmony_ci lo = iova_pfn(iovad, start); 2178c2ecf20Sopenharmony_ci hi = iova_pfn(iovad, end); 2188c2ecf20Sopenharmony_ci reserve_iova(iovad, lo, hi); 2198c2ecf20Sopenharmony_ci } else if (end < start) { 2208c2ecf20Sopenharmony_ci /* dma_ranges list should be sorted */ 2218c2ecf20Sopenharmony_ci dev_err(&dev->dev, 2228c2ecf20Sopenharmony_ci "Failed to reserve IOVA [%pa-%pa]\n", 2238c2ecf20Sopenharmony_ci &start, &end); 2248c2ecf20Sopenharmony_ci return -EINVAL; 2258c2ecf20Sopenharmony_ci } 2268c2ecf20Sopenharmony_ci 2278c2ecf20Sopenharmony_ci start = window->res->end - window->offset + 1; 2288c2ecf20Sopenharmony_ci /* If window is last entry */ 2298c2ecf20Sopenharmony_ci if (window->node.next == &bridge->dma_ranges && 2308c2ecf20Sopenharmony_ci end != ~(phys_addr_t)0) { 2318c2ecf20Sopenharmony_ci end = ~(phys_addr_t)0; 2328c2ecf20Sopenharmony_ci goto resv_iova; 2338c2ecf20Sopenharmony_ci } 2348c2ecf20Sopenharmony_ci } 2358c2ecf20Sopenharmony_ci 2368c2ecf20Sopenharmony_ci return 0; 2378c2ecf20Sopenharmony_ci} 2388c2ecf20Sopenharmony_ci 2398c2ecf20Sopenharmony_cistatic int iova_reserve_iommu_regions(struct device *dev, 2408c2ecf20Sopenharmony_ci struct iommu_domain *domain) 2418c2ecf20Sopenharmony_ci{ 2428c2ecf20Sopenharmony_ci struct iommu_dma_cookie *cookie = domain->iova_cookie; 2438c2ecf20Sopenharmony_ci struct iova_domain *iovad = &cookie->iovad; 2448c2ecf20Sopenharmony_ci struct iommu_resv_region *region; 2458c2ecf20Sopenharmony_ci LIST_HEAD(resv_regions); 2468c2ecf20Sopenharmony_ci int ret = 0; 2478c2ecf20Sopenharmony_ci 2488c2ecf20Sopenharmony_ci if (dev_is_pci(dev)) { 2498c2ecf20Sopenharmony_ci ret = iova_reserve_pci_windows(to_pci_dev(dev), iovad); 2508c2ecf20Sopenharmony_ci if (ret) 2518c2ecf20Sopenharmony_ci return ret; 2528c2ecf20Sopenharmony_ci } 2538c2ecf20Sopenharmony_ci 2548c2ecf20Sopenharmony_ci iommu_get_resv_regions(dev, &resv_regions); 2558c2ecf20Sopenharmony_ci list_for_each_entry(region, &resv_regions, list) { 2568c2ecf20Sopenharmony_ci unsigned long lo, hi; 2578c2ecf20Sopenharmony_ci 2588c2ecf20Sopenharmony_ci /* We ARE the software that manages these! */ 2598c2ecf20Sopenharmony_ci if (region->type == IOMMU_RESV_SW_MSI) 2608c2ecf20Sopenharmony_ci continue; 2618c2ecf20Sopenharmony_ci 2628c2ecf20Sopenharmony_ci lo = iova_pfn(iovad, region->start); 2638c2ecf20Sopenharmony_ci hi = iova_pfn(iovad, region->start + region->length - 1); 2648c2ecf20Sopenharmony_ci reserve_iova(iovad, lo, hi); 2658c2ecf20Sopenharmony_ci 2668c2ecf20Sopenharmony_ci if (region->type == IOMMU_RESV_MSI) 2678c2ecf20Sopenharmony_ci ret = cookie_init_hw_msi_region(cookie, region->start, 2688c2ecf20Sopenharmony_ci region->start + region->length); 2698c2ecf20Sopenharmony_ci if (ret) 2708c2ecf20Sopenharmony_ci break; 2718c2ecf20Sopenharmony_ci } 2728c2ecf20Sopenharmony_ci iommu_put_resv_regions(dev, &resv_regions); 2738c2ecf20Sopenharmony_ci 2748c2ecf20Sopenharmony_ci return ret; 2758c2ecf20Sopenharmony_ci} 2768c2ecf20Sopenharmony_ci 2778c2ecf20Sopenharmony_cistatic void iommu_dma_flush_iotlb_all(struct iova_domain *iovad) 2788c2ecf20Sopenharmony_ci{ 2798c2ecf20Sopenharmony_ci struct iommu_dma_cookie *cookie; 2808c2ecf20Sopenharmony_ci struct iommu_domain *domain; 2818c2ecf20Sopenharmony_ci 2828c2ecf20Sopenharmony_ci cookie = container_of(iovad, struct iommu_dma_cookie, iovad); 2838c2ecf20Sopenharmony_ci domain = cookie->fq_domain; 2848c2ecf20Sopenharmony_ci /* 2858c2ecf20Sopenharmony_ci * The IOMMU driver supporting DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE 2868c2ecf20Sopenharmony_ci * implies that ops->flush_iotlb_all must be non-NULL. 2878c2ecf20Sopenharmony_ci */ 2888c2ecf20Sopenharmony_ci domain->ops->flush_iotlb_all(domain); 2898c2ecf20Sopenharmony_ci} 2908c2ecf20Sopenharmony_ci 2918c2ecf20Sopenharmony_ci/** 2928c2ecf20Sopenharmony_ci * iommu_dma_init_domain - Initialise a DMA mapping domain 2938c2ecf20Sopenharmony_ci * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() 2948c2ecf20Sopenharmony_ci * @base: IOVA at which the mappable address space starts 2958c2ecf20Sopenharmony_ci * @size: Size of IOVA space 2968c2ecf20Sopenharmony_ci * @dev: Device the domain is being initialised for 2978c2ecf20Sopenharmony_ci * 2988c2ecf20Sopenharmony_ci * @base and @size should be exact multiples of IOMMU page granularity to 2998c2ecf20Sopenharmony_ci * avoid rounding surprises. If necessary, we reserve the page at address 0 3008c2ecf20Sopenharmony_ci * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but 3018c2ecf20Sopenharmony_ci * any change which could make prior IOVAs invalid will fail. 3028c2ecf20Sopenharmony_ci */ 3038c2ecf20Sopenharmony_cistatic int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, 3048c2ecf20Sopenharmony_ci u64 size, struct device *dev) 3058c2ecf20Sopenharmony_ci{ 3068c2ecf20Sopenharmony_ci struct iommu_dma_cookie *cookie = domain->iova_cookie; 3078c2ecf20Sopenharmony_ci unsigned long order, base_pfn; 3088c2ecf20Sopenharmony_ci struct iova_domain *iovad; 3098c2ecf20Sopenharmony_ci int attr; 3108c2ecf20Sopenharmony_ci 3118c2ecf20Sopenharmony_ci if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE) 3128c2ecf20Sopenharmony_ci return -EINVAL; 3138c2ecf20Sopenharmony_ci 3148c2ecf20Sopenharmony_ci iovad = &cookie->iovad; 3158c2ecf20Sopenharmony_ci 3168c2ecf20Sopenharmony_ci /* Use the smallest supported page size for IOVA granularity */ 3178c2ecf20Sopenharmony_ci order = __ffs(domain->pgsize_bitmap); 3188c2ecf20Sopenharmony_ci base_pfn = max_t(unsigned long, 1, base >> order); 3198c2ecf20Sopenharmony_ci 3208c2ecf20Sopenharmony_ci /* Check the domain allows at least some access to the device... */ 3218c2ecf20Sopenharmony_ci if (domain->geometry.force_aperture) { 3228c2ecf20Sopenharmony_ci if (base > domain->geometry.aperture_end || 3238c2ecf20Sopenharmony_ci base + size <= domain->geometry.aperture_start) { 3248c2ecf20Sopenharmony_ci pr_warn("specified DMA range outside IOMMU capability\n"); 3258c2ecf20Sopenharmony_ci return -EFAULT; 3268c2ecf20Sopenharmony_ci } 3278c2ecf20Sopenharmony_ci /* ...then finally give it a kicking to make sure it fits */ 3288c2ecf20Sopenharmony_ci base_pfn = max_t(unsigned long, base_pfn, 3298c2ecf20Sopenharmony_ci domain->geometry.aperture_start >> order); 3308c2ecf20Sopenharmony_ci } 3318c2ecf20Sopenharmony_ci 3328c2ecf20Sopenharmony_ci /* start_pfn is always nonzero for an already-initialised domain */ 3338c2ecf20Sopenharmony_ci if (iovad->start_pfn) { 3348c2ecf20Sopenharmony_ci if (1UL << order != iovad->granule || 3358c2ecf20Sopenharmony_ci base_pfn != iovad->start_pfn) { 3368c2ecf20Sopenharmony_ci pr_warn("Incompatible range for DMA domain\n"); 3378c2ecf20Sopenharmony_ci return -EFAULT; 3388c2ecf20Sopenharmony_ci } 3398c2ecf20Sopenharmony_ci 3408c2ecf20Sopenharmony_ci return 0; 3418c2ecf20Sopenharmony_ci } 3428c2ecf20Sopenharmony_ci 3438c2ecf20Sopenharmony_ci init_iova_domain(iovad, 1UL << order, base_pfn); 3448c2ecf20Sopenharmony_ci 3458c2ecf20Sopenharmony_ci if (!cookie->fq_domain && !iommu_domain_get_attr(domain, 3468c2ecf20Sopenharmony_ci DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, &attr) && attr) { 3478c2ecf20Sopenharmony_ci if (init_iova_flush_queue(iovad, iommu_dma_flush_iotlb_all, 3488c2ecf20Sopenharmony_ci NULL)) 3498c2ecf20Sopenharmony_ci pr_warn("iova flush queue initialization failed\n"); 3508c2ecf20Sopenharmony_ci else 3518c2ecf20Sopenharmony_ci cookie->fq_domain = domain; 3528c2ecf20Sopenharmony_ci } 3538c2ecf20Sopenharmony_ci 3548c2ecf20Sopenharmony_ci if (!dev) 3558c2ecf20Sopenharmony_ci return 0; 3568c2ecf20Sopenharmony_ci 3578c2ecf20Sopenharmony_ci return iova_reserve_iommu_regions(dev, domain); 3588c2ecf20Sopenharmony_ci} 3598c2ecf20Sopenharmony_ci 3608c2ecf20Sopenharmony_cistatic int iommu_dma_deferred_attach(struct device *dev, 3618c2ecf20Sopenharmony_ci struct iommu_domain *domain) 3628c2ecf20Sopenharmony_ci{ 3638c2ecf20Sopenharmony_ci const struct iommu_ops *ops = domain->ops; 3648c2ecf20Sopenharmony_ci 3658c2ecf20Sopenharmony_ci if (!is_kdump_kernel()) 3668c2ecf20Sopenharmony_ci return 0; 3678c2ecf20Sopenharmony_ci 3688c2ecf20Sopenharmony_ci if (unlikely(ops->is_attach_deferred && 3698c2ecf20Sopenharmony_ci ops->is_attach_deferred(domain, dev))) 3708c2ecf20Sopenharmony_ci return iommu_attach_device(domain, dev); 3718c2ecf20Sopenharmony_ci 3728c2ecf20Sopenharmony_ci return 0; 3738c2ecf20Sopenharmony_ci} 3748c2ecf20Sopenharmony_ci 3758c2ecf20Sopenharmony_ci/** 3768c2ecf20Sopenharmony_ci * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API 3778c2ecf20Sopenharmony_ci * page flags. 3788c2ecf20Sopenharmony_ci * @dir: Direction of DMA transfer 3798c2ecf20Sopenharmony_ci * @coherent: Is the DMA master cache-coherent? 3808c2ecf20Sopenharmony_ci * @attrs: DMA attributes for the mapping 3818c2ecf20Sopenharmony_ci * 3828c2ecf20Sopenharmony_ci * Return: corresponding IOMMU API page protection flags 3838c2ecf20Sopenharmony_ci */ 3848c2ecf20Sopenharmony_cistatic int dma_info_to_prot(enum dma_data_direction dir, bool coherent, 3858c2ecf20Sopenharmony_ci unsigned long attrs) 3868c2ecf20Sopenharmony_ci{ 3878c2ecf20Sopenharmony_ci int prot = coherent ? IOMMU_CACHE : 0; 3888c2ecf20Sopenharmony_ci 3898c2ecf20Sopenharmony_ci if (attrs & DMA_ATTR_PRIVILEGED) 3908c2ecf20Sopenharmony_ci prot |= IOMMU_PRIV; 3918c2ecf20Sopenharmony_ci 3928c2ecf20Sopenharmony_ci switch (dir) { 3938c2ecf20Sopenharmony_ci case DMA_BIDIRECTIONAL: 3948c2ecf20Sopenharmony_ci return prot | IOMMU_READ | IOMMU_WRITE; 3958c2ecf20Sopenharmony_ci case DMA_TO_DEVICE: 3968c2ecf20Sopenharmony_ci return prot | IOMMU_READ; 3978c2ecf20Sopenharmony_ci case DMA_FROM_DEVICE: 3988c2ecf20Sopenharmony_ci return prot | IOMMU_WRITE; 3998c2ecf20Sopenharmony_ci default: 4008c2ecf20Sopenharmony_ci return 0; 4018c2ecf20Sopenharmony_ci } 4028c2ecf20Sopenharmony_ci} 4038c2ecf20Sopenharmony_ci 4048c2ecf20Sopenharmony_cistatic dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain, 4058c2ecf20Sopenharmony_ci size_t size, u64 dma_limit, struct device *dev) 4068c2ecf20Sopenharmony_ci{ 4078c2ecf20Sopenharmony_ci struct iommu_dma_cookie *cookie = domain->iova_cookie; 4088c2ecf20Sopenharmony_ci struct iova_domain *iovad = &cookie->iovad; 4098c2ecf20Sopenharmony_ci unsigned long shift, iova_len, iova = 0; 4108c2ecf20Sopenharmony_ci 4118c2ecf20Sopenharmony_ci if (cookie->type == IOMMU_DMA_MSI_COOKIE) { 4128c2ecf20Sopenharmony_ci cookie->msi_iova += size; 4138c2ecf20Sopenharmony_ci return cookie->msi_iova - size; 4148c2ecf20Sopenharmony_ci } 4158c2ecf20Sopenharmony_ci 4168c2ecf20Sopenharmony_ci shift = iova_shift(iovad); 4178c2ecf20Sopenharmony_ci iova_len = size >> shift; 4188c2ecf20Sopenharmony_ci /* 4198c2ecf20Sopenharmony_ci * Freeing non-power-of-two-sized allocations back into the IOVA caches 4208c2ecf20Sopenharmony_ci * will come back to bite us badly, so we have to waste a bit of space 4218c2ecf20Sopenharmony_ci * rounding up anything cacheable to make sure that can't happen. The 4228c2ecf20Sopenharmony_ci * order of the unadjusted size will still match upon freeing. 4238c2ecf20Sopenharmony_ci */ 4248c2ecf20Sopenharmony_ci if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1))) 4258c2ecf20Sopenharmony_ci iova_len = roundup_pow_of_two(iova_len); 4268c2ecf20Sopenharmony_ci 4278c2ecf20Sopenharmony_ci dma_limit = min_not_zero(dma_limit, dev->bus_dma_limit); 4288c2ecf20Sopenharmony_ci 4298c2ecf20Sopenharmony_ci if (domain->geometry.force_aperture) 4308c2ecf20Sopenharmony_ci dma_limit = min(dma_limit, (u64)domain->geometry.aperture_end); 4318c2ecf20Sopenharmony_ci 4328c2ecf20Sopenharmony_ci /* Try to get PCI devices a SAC address */ 4338c2ecf20Sopenharmony_ci if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev)) 4348c2ecf20Sopenharmony_ci iova = alloc_iova_fast(iovad, iova_len, 4358c2ecf20Sopenharmony_ci DMA_BIT_MASK(32) >> shift, false); 4368c2ecf20Sopenharmony_ci 4378c2ecf20Sopenharmony_ci if (!iova) 4388c2ecf20Sopenharmony_ci iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift, 4398c2ecf20Sopenharmony_ci true); 4408c2ecf20Sopenharmony_ci 4418c2ecf20Sopenharmony_ci return (dma_addr_t)iova << shift; 4428c2ecf20Sopenharmony_ci} 4438c2ecf20Sopenharmony_ci 4448c2ecf20Sopenharmony_cistatic void iommu_dma_free_iova(struct iommu_dma_cookie *cookie, 4458c2ecf20Sopenharmony_ci dma_addr_t iova, size_t size) 4468c2ecf20Sopenharmony_ci{ 4478c2ecf20Sopenharmony_ci struct iova_domain *iovad = &cookie->iovad; 4488c2ecf20Sopenharmony_ci 4498c2ecf20Sopenharmony_ci /* The MSI case is only ever cleaning up its most recent allocation */ 4508c2ecf20Sopenharmony_ci if (cookie->type == IOMMU_DMA_MSI_COOKIE) 4518c2ecf20Sopenharmony_ci cookie->msi_iova -= size; 4528c2ecf20Sopenharmony_ci else if (cookie->fq_domain) /* non-strict mode */ 4538c2ecf20Sopenharmony_ci queue_iova(iovad, iova_pfn(iovad, iova), 4548c2ecf20Sopenharmony_ci size >> iova_shift(iovad), 0); 4558c2ecf20Sopenharmony_ci else 4568c2ecf20Sopenharmony_ci free_iova_fast(iovad, iova_pfn(iovad, iova), 4578c2ecf20Sopenharmony_ci size >> iova_shift(iovad)); 4588c2ecf20Sopenharmony_ci} 4598c2ecf20Sopenharmony_ci 4608c2ecf20Sopenharmony_cistatic void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr, 4618c2ecf20Sopenharmony_ci size_t size) 4628c2ecf20Sopenharmony_ci{ 4638c2ecf20Sopenharmony_ci struct iommu_domain *domain = iommu_get_dma_domain(dev); 4648c2ecf20Sopenharmony_ci struct iommu_dma_cookie *cookie = domain->iova_cookie; 4658c2ecf20Sopenharmony_ci struct iova_domain *iovad = &cookie->iovad; 4668c2ecf20Sopenharmony_ci size_t iova_off = iova_offset(iovad, dma_addr); 4678c2ecf20Sopenharmony_ci struct iommu_iotlb_gather iotlb_gather; 4688c2ecf20Sopenharmony_ci size_t unmapped; 4698c2ecf20Sopenharmony_ci 4708c2ecf20Sopenharmony_ci dma_addr -= iova_off; 4718c2ecf20Sopenharmony_ci size = iova_align(iovad, size + iova_off); 4728c2ecf20Sopenharmony_ci iommu_iotlb_gather_init(&iotlb_gather); 4738c2ecf20Sopenharmony_ci 4748c2ecf20Sopenharmony_ci unmapped = iommu_unmap_fast(domain, dma_addr, size, &iotlb_gather); 4758c2ecf20Sopenharmony_ci WARN_ON(unmapped != size); 4768c2ecf20Sopenharmony_ci 4778c2ecf20Sopenharmony_ci if (!cookie->fq_domain) 4788c2ecf20Sopenharmony_ci iommu_iotlb_sync(domain, &iotlb_gather); 4798c2ecf20Sopenharmony_ci iommu_dma_free_iova(cookie, dma_addr, size); 4808c2ecf20Sopenharmony_ci} 4818c2ecf20Sopenharmony_ci 4828c2ecf20Sopenharmony_cistatic dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, 4838c2ecf20Sopenharmony_ci size_t size, int prot, u64 dma_mask) 4848c2ecf20Sopenharmony_ci{ 4858c2ecf20Sopenharmony_ci struct iommu_domain *domain = iommu_get_dma_domain(dev); 4868c2ecf20Sopenharmony_ci struct iommu_dma_cookie *cookie = domain->iova_cookie; 4878c2ecf20Sopenharmony_ci struct iova_domain *iovad = &cookie->iovad; 4888c2ecf20Sopenharmony_ci size_t iova_off = iova_offset(iovad, phys); 4898c2ecf20Sopenharmony_ci dma_addr_t iova; 4908c2ecf20Sopenharmony_ci 4918c2ecf20Sopenharmony_ci if (unlikely(iommu_dma_deferred_attach(dev, domain))) 4928c2ecf20Sopenharmony_ci return DMA_MAPPING_ERROR; 4938c2ecf20Sopenharmony_ci 4948c2ecf20Sopenharmony_ci size = iova_align(iovad, size + iova_off); 4958c2ecf20Sopenharmony_ci 4968c2ecf20Sopenharmony_ci iova = iommu_dma_alloc_iova(domain, size, dma_mask, dev); 4978c2ecf20Sopenharmony_ci if (!iova) 4988c2ecf20Sopenharmony_ci return DMA_MAPPING_ERROR; 4998c2ecf20Sopenharmony_ci 5008c2ecf20Sopenharmony_ci if (iommu_map_atomic(domain, iova, phys - iova_off, size, prot)) { 5018c2ecf20Sopenharmony_ci iommu_dma_free_iova(cookie, iova, size); 5028c2ecf20Sopenharmony_ci return DMA_MAPPING_ERROR; 5038c2ecf20Sopenharmony_ci } 5048c2ecf20Sopenharmony_ci return iova + iova_off; 5058c2ecf20Sopenharmony_ci} 5068c2ecf20Sopenharmony_ci 5078c2ecf20Sopenharmony_cistatic void __iommu_dma_free_pages(struct page **pages, int count) 5088c2ecf20Sopenharmony_ci{ 5098c2ecf20Sopenharmony_ci while (count--) 5108c2ecf20Sopenharmony_ci __free_page(pages[count]); 5118c2ecf20Sopenharmony_ci kvfree(pages); 5128c2ecf20Sopenharmony_ci} 5138c2ecf20Sopenharmony_ci 5148c2ecf20Sopenharmony_cistatic struct page **__iommu_dma_alloc_pages(struct device *dev, 5158c2ecf20Sopenharmony_ci unsigned int count, unsigned long order_mask, gfp_t gfp) 5168c2ecf20Sopenharmony_ci{ 5178c2ecf20Sopenharmony_ci struct page **pages; 5188c2ecf20Sopenharmony_ci unsigned int i = 0, nid = dev_to_node(dev); 5198c2ecf20Sopenharmony_ci 5208c2ecf20Sopenharmony_ci order_mask &= (2U << MAX_ORDER) - 1; 5218c2ecf20Sopenharmony_ci if (!order_mask) 5228c2ecf20Sopenharmony_ci return NULL; 5238c2ecf20Sopenharmony_ci 5248c2ecf20Sopenharmony_ci pages = kvzalloc(count * sizeof(*pages), GFP_KERNEL); 5258c2ecf20Sopenharmony_ci if (!pages) 5268c2ecf20Sopenharmony_ci return NULL; 5278c2ecf20Sopenharmony_ci 5288c2ecf20Sopenharmony_ci /* IOMMU can map any pages, so himem can also be used here */ 5298c2ecf20Sopenharmony_ci gfp |= __GFP_NOWARN | __GFP_HIGHMEM; 5308c2ecf20Sopenharmony_ci 5318c2ecf20Sopenharmony_ci /* It makes no sense to muck about with huge pages */ 5328c2ecf20Sopenharmony_ci gfp &= ~__GFP_COMP; 5338c2ecf20Sopenharmony_ci 5348c2ecf20Sopenharmony_ci while (count) { 5358c2ecf20Sopenharmony_ci struct page *page = NULL; 5368c2ecf20Sopenharmony_ci unsigned int order_size; 5378c2ecf20Sopenharmony_ci 5388c2ecf20Sopenharmony_ci /* 5398c2ecf20Sopenharmony_ci * Higher-order allocations are a convenience rather 5408c2ecf20Sopenharmony_ci * than a necessity, hence using __GFP_NORETRY until 5418c2ecf20Sopenharmony_ci * falling back to minimum-order allocations. 5428c2ecf20Sopenharmony_ci */ 5438c2ecf20Sopenharmony_ci for (order_mask &= (2U << __fls(count)) - 1; 5448c2ecf20Sopenharmony_ci order_mask; order_mask &= ~order_size) { 5458c2ecf20Sopenharmony_ci unsigned int order = __fls(order_mask); 5468c2ecf20Sopenharmony_ci gfp_t alloc_flags = gfp; 5478c2ecf20Sopenharmony_ci 5488c2ecf20Sopenharmony_ci order_size = 1U << order; 5498c2ecf20Sopenharmony_ci if (order_mask > order_size) 5508c2ecf20Sopenharmony_ci alloc_flags |= __GFP_NORETRY; 5518c2ecf20Sopenharmony_ci page = alloc_pages_node(nid, alloc_flags, order); 5528c2ecf20Sopenharmony_ci if (!page) 5538c2ecf20Sopenharmony_ci continue; 5548c2ecf20Sopenharmony_ci if (order) 5558c2ecf20Sopenharmony_ci split_page(page, order); 5568c2ecf20Sopenharmony_ci break; 5578c2ecf20Sopenharmony_ci } 5588c2ecf20Sopenharmony_ci if (!page) { 5598c2ecf20Sopenharmony_ci __iommu_dma_free_pages(pages, i); 5608c2ecf20Sopenharmony_ci return NULL; 5618c2ecf20Sopenharmony_ci } 5628c2ecf20Sopenharmony_ci count -= order_size; 5638c2ecf20Sopenharmony_ci while (order_size--) 5648c2ecf20Sopenharmony_ci pages[i++] = page++; 5658c2ecf20Sopenharmony_ci } 5668c2ecf20Sopenharmony_ci return pages; 5678c2ecf20Sopenharmony_ci} 5688c2ecf20Sopenharmony_ci 5698c2ecf20Sopenharmony_ci/** 5708c2ecf20Sopenharmony_ci * iommu_dma_alloc_remap - Allocate and map a buffer contiguous in IOVA space 5718c2ecf20Sopenharmony_ci * @dev: Device to allocate memory for. Must be a real device 5728c2ecf20Sopenharmony_ci * attached to an iommu_dma_domain 5738c2ecf20Sopenharmony_ci * @size: Size of buffer in bytes 5748c2ecf20Sopenharmony_ci * @dma_handle: Out argument for allocated DMA handle 5758c2ecf20Sopenharmony_ci * @gfp: Allocation flags 5768c2ecf20Sopenharmony_ci * @prot: pgprot_t to use for the remapped mapping 5778c2ecf20Sopenharmony_ci * @attrs: DMA attributes for this allocation 5788c2ecf20Sopenharmony_ci * 5798c2ecf20Sopenharmony_ci * If @size is less than PAGE_SIZE, then a full CPU page will be allocated, 5808c2ecf20Sopenharmony_ci * but an IOMMU which supports smaller pages might not map the whole thing. 5818c2ecf20Sopenharmony_ci * 5828c2ecf20Sopenharmony_ci * Return: Mapped virtual address, or NULL on failure. 5838c2ecf20Sopenharmony_ci */ 5848c2ecf20Sopenharmony_cistatic void *iommu_dma_alloc_remap(struct device *dev, size_t size, 5858c2ecf20Sopenharmony_ci dma_addr_t *dma_handle, gfp_t gfp, pgprot_t prot, 5868c2ecf20Sopenharmony_ci unsigned long attrs) 5878c2ecf20Sopenharmony_ci{ 5888c2ecf20Sopenharmony_ci struct iommu_domain *domain = iommu_get_dma_domain(dev); 5898c2ecf20Sopenharmony_ci struct iommu_dma_cookie *cookie = domain->iova_cookie; 5908c2ecf20Sopenharmony_ci struct iova_domain *iovad = &cookie->iovad; 5918c2ecf20Sopenharmony_ci bool coherent = dev_is_dma_coherent(dev); 5928c2ecf20Sopenharmony_ci int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs); 5938c2ecf20Sopenharmony_ci unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap; 5948c2ecf20Sopenharmony_ci struct page **pages; 5958c2ecf20Sopenharmony_ci struct sg_table sgt; 5968c2ecf20Sopenharmony_ci dma_addr_t iova; 5978c2ecf20Sopenharmony_ci void *vaddr; 5988c2ecf20Sopenharmony_ci 5998c2ecf20Sopenharmony_ci *dma_handle = DMA_MAPPING_ERROR; 6008c2ecf20Sopenharmony_ci 6018c2ecf20Sopenharmony_ci if (unlikely(iommu_dma_deferred_attach(dev, domain))) 6028c2ecf20Sopenharmony_ci return NULL; 6038c2ecf20Sopenharmony_ci 6048c2ecf20Sopenharmony_ci min_size = alloc_sizes & -alloc_sizes; 6058c2ecf20Sopenharmony_ci if (min_size < PAGE_SIZE) { 6068c2ecf20Sopenharmony_ci min_size = PAGE_SIZE; 6078c2ecf20Sopenharmony_ci alloc_sizes |= PAGE_SIZE; 6088c2ecf20Sopenharmony_ci } else { 6098c2ecf20Sopenharmony_ci size = ALIGN(size, min_size); 6108c2ecf20Sopenharmony_ci } 6118c2ecf20Sopenharmony_ci if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES) 6128c2ecf20Sopenharmony_ci alloc_sizes = min_size; 6138c2ecf20Sopenharmony_ci 6148c2ecf20Sopenharmony_ci count = PAGE_ALIGN(size) >> PAGE_SHIFT; 6158c2ecf20Sopenharmony_ci pages = __iommu_dma_alloc_pages(dev, count, alloc_sizes >> PAGE_SHIFT, 6168c2ecf20Sopenharmony_ci gfp); 6178c2ecf20Sopenharmony_ci if (!pages) 6188c2ecf20Sopenharmony_ci return NULL; 6198c2ecf20Sopenharmony_ci 6208c2ecf20Sopenharmony_ci size = iova_align(iovad, size); 6218c2ecf20Sopenharmony_ci iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev); 6228c2ecf20Sopenharmony_ci if (!iova) 6238c2ecf20Sopenharmony_ci goto out_free_pages; 6248c2ecf20Sopenharmony_ci 6258c2ecf20Sopenharmony_ci if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL)) 6268c2ecf20Sopenharmony_ci goto out_free_iova; 6278c2ecf20Sopenharmony_ci 6288c2ecf20Sopenharmony_ci if (!(ioprot & IOMMU_CACHE)) { 6298c2ecf20Sopenharmony_ci struct scatterlist *sg; 6308c2ecf20Sopenharmony_ci int i; 6318c2ecf20Sopenharmony_ci 6328c2ecf20Sopenharmony_ci for_each_sg(sgt.sgl, sg, sgt.orig_nents, i) 6338c2ecf20Sopenharmony_ci arch_dma_prep_coherent(sg_page(sg), sg->length); 6348c2ecf20Sopenharmony_ci } 6358c2ecf20Sopenharmony_ci 6368c2ecf20Sopenharmony_ci if (iommu_map_sg_atomic(domain, iova, sgt.sgl, sgt.orig_nents, ioprot) 6378c2ecf20Sopenharmony_ci < size) 6388c2ecf20Sopenharmony_ci goto out_free_sg; 6398c2ecf20Sopenharmony_ci 6408c2ecf20Sopenharmony_ci vaddr = dma_common_pages_remap(pages, size, prot, 6418c2ecf20Sopenharmony_ci __builtin_return_address(0)); 6428c2ecf20Sopenharmony_ci if (!vaddr) 6438c2ecf20Sopenharmony_ci goto out_unmap; 6448c2ecf20Sopenharmony_ci 6458c2ecf20Sopenharmony_ci *dma_handle = iova; 6468c2ecf20Sopenharmony_ci sg_free_table(&sgt); 6478c2ecf20Sopenharmony_ci return vaddr; 6488c2ecf20Sopenharmony_ci 6498c2ecf20Sopenharmony_ciout_unmap: 6508c2ecf20Sopenharmony_ci __iommu_dma_unmap(dev, iova, size); 6518c2ecf20Sopenharmony_ciout_free_sg: 6528c2ecf20Sopenharmony_ci sg_free_table(&sgt); 6538c2ecf20Sopenharmony_ciout_free_iova: 6548c2ecf20Sopenharmony_ci iommu_dma_free_iova(cookie, iova, size); 6558c2ecf20Sopenharmony_ciout_free_pages: 6568c2ecf20Sopenharmony_ci __iommu_dma_free_pages(pages, count); 6578c2ecf20Sopenharmony_ci return NULL; 6588c2ecf20Sopenharmony_ci} 6598c2ecf20Sopenharmony_ci 6608c2ecf20Sopenharmony_ci/** 6618c2ecf20Sopenharmony_ci * __iommu_dma_mmap - Map a buffer into provided user VMA 6628c2ecf20Sopenharmony_ci * @pages: Array representing buffer from __iommu_dma_alloc() 6638c2ecf20Sopenharmony_ci * @size: Size of buffer in bytes 6648c2ecf20Sopenharmony_ci * @vma: VMA describing requested userspace mapping 6658c2ecf20Sopenharmony_ci * 6668c2ecf20Sopenharmony_ci * Maps the pages of the buffer in @pages into @vma. The caller is responsible 6678c2ecf20Sopenharmony_ci * for verifying the correct size and protection of @vma beforehand. 6688c2ecf20Sopenharmony_ci */ 6698c2ecf20Sopenharmony_cistatic int __iommu_dma_mmap(struct page **pages, size_t size, 6708c2ecf20Sopenharmony_ci struct vm_area_struct *vma) 6718c2ecf20Sopenharmony_ci{ 6728c2ecf20Sopenharmony_ci return vm_map_pages(vma, pages, PAGE_ALIGN(size) >> PAGE_SHIFT); 6738c2ecf20Sopenharmony_ci} 6748c2ecf20Sopenharmony_ci 6758c2ecf20Sopenharmony_cistatic void iommu_dma_sync_single_for_cpu(struct device *dev, 6768c2ecf20Sopenharmony_ci dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) 6778c2ecf20Sopenharmony_ci{ 6788c2ecf20Sopenharmony_ci phys_addr_t phys; 6798c2ecf20Sopenharmony_ci 6808c2ecf20Sopenharmony_ci if (dev_is_dma_coherent(dev)) 6818c2ecf20Sopenharmony_ci return; 6828c2ecf20Sopenharmony_ci 6838c2ecf20Sopenharmony_ci phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle); 6848c2ecf20Sopenharmony_ci arch_sync_dma_for_cpu(phys, size, dir); 6858c2ecf20Sopenharmony_ci} 6868c2ecf20Sopenharmony_ci 6878c2ecf20Sopenharmony_cistatic void iommu_dma_sync_single_for_device(struct device *dev, 6888c2ecf20Sopenharmony_ci dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) 6898c2ecf20Sopenharmony_ci{ 6908c2ecf20Sopenharmony_ci phys_addr_t phys; 6918c2ecf20Sopenharmony_ci 6928c2ecf20Sopenharmony_ci if (dev_is_dma_coherent(dev)) 6938c2ecf20Sopenharmony_ci return; 6948c2ecf20Sopenharmony_ci 6958c2ecf20Sopenharmony_ci phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle); 6968c2ecf20Sopenharmony_ci arch_sync_dma_for_device(phys, size, dir); 6978c2ecf20Sopenharmony_ci} 6988c2ecf20Sopenharmony_ci 6998c2ecf20Sopenharmony_cistatic void iommu_dma_sync_sg_for_cpu(struct device *dev, 7008c2ecf20Sopenharmony_ci struct scatterlist *sgl, int nelems, 7018c2ecf20Sopenharmony_ci enum dma_data_direction dir) 7028c2ecf20Sopenharmony_ci{ 7038c2ecf20Sopenharmony_ci struct scatterlist *sg; 7048c2ecf20Sopenharmony_ci int i; 7058c2ecf20Sopenharmony_ci 7068c2ecf20Sopenharmony_ci if (dev_is_dma_coherent(dev)) 7078c2ecf20Sopenharmony_ci return; 7088c2ecf20Sopenharmony_ci 7098c2ecf20Sopenharmony_ci for_each_sg(sgl, sg, nelems, i) 7108c2ecf20Sopenharmony_ci arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir); 7118c2ecf20Sopenharmony_ci} 7128c2ecf20Sopenharmony_ci 7138c2ecf20Sopenharmony_cistatic void iommu_dma_sync_sg_for_device(struct device *dev, 7148c2ecf20Sopenharmony_ci struct scatterlist *sgl, int nelems, 7158c2ecf20Sopenharmony_ci enum dma_data_direction dir) 7168c2ecf20Sopenharmony_ci{ 7178c2ecf20Sopenharmony_ci struct scatterlist *sg; 7188c2ecf20Sopenharmony_ci int i; 7198c2ecf20Sopenharmony_ci 7208c2ecf20Sopenharmony_ci if (dev_is_dma_coherent(dev)) 7218c2ecf20Sopenharmony_ci return; 7228c2ecf20Sopenharmony_ci 7238c2ecf20Sopenharmony_ci for_each_sg(sgl, sg, nelems, i) 7248c2ecf20Sopenharmony_ci arch_sync_dma_for_device(sg_phys(sg), sg->length, dir); 7258c2ecf20Sopenharmony_ci} 7268c2ecf20Sopenharmony_ci 7278c2ecf20Sopenharmony_cistatic dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, 7288c2ecf20Sopenharmony_ci unsigned long offset, size_t size, enum dma_data_direction dir, 7298c2ecf20Sopenharmony_ci unsigned long attrs) 7308c2ecf20Sopenharmony_ci{ 7318c2ecf20Sopenharmony_ci phys_addr_t phys = page_to_phys(page) + offset; 7328c2ecf20Sopenharmony_ci bool coherent = dev_is_dma_coherent(dev); 7338c2ecf20Sopenharmony_ci int prot = dma_info_to_prot(dir, coherent, attrs); 7348c2ecf20Sopenharmony_ci dma_addr_t dma_handle; 7358c2ecf20Sopenharmony_ci 7368c2ecf20Sopenharmony_ci dma_handle = __iommu_dma_map(dev, phys, size, prot, dma_get_mask(dev)); 7378c2ecf20Sopenharmony_ci if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) && 7388c2ecf20Sopenharmony_ci dma_handle != DMA_MAPPING_ERROR) 7398c2ecf20Sopenharmony_ci arch_sync_dma_for_device(phys, size, dir); 7408c2ecf20Sopenharmony_ci return dma_handle; 7418c2ecf20Sopenharmony_ci} 7428c2ecf20Sopenharmony_ci 7438c2ecf20Sopenharmony_cistatic void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle, 7448c2ecf20Sopenharmony_ci size_t size, enum dma_data_direction dir, unsigned long attrs) 7458c2ecf20Sopenharmony_ci{ 7468c2ecf20Sopenharmony_ci if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) 7478c2ecf20Sopenharmony_ci iommu_dma_sync_single_for_cpu(dev, dma_handle, size, dir); 7488c2ecf20Sopenharmony_ci __iommu_dma_unmap(dev, dma_handle, size); 7498c2ecf20Sopenharmony_ci} 7508c2ecf20Sopenharmony_ci 7518c2ecf20Sopenharmony_ci/* 7528c2ecf20Sopenharmony_ci * Prepare a successfully-mapped scatterlist to give back to the caller. 7538c2ecf20Sopenharmony_ci * 7548c2ecf20Sopenharmony_ci * At this point the segments are already laid out by iommu_dma_map_sg() to 7558c2ecf20Sopenharmony_ci * avoid individually crossing any boundaries, so we merely need to check a 7568c2ecf20Sopenharmony_ci * segment's start address to avoid concatenating across one. 7578c2ecf20Sopenharmony_ci */ 7588c2ecf20Sopenharmony_cistatic int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents, 7598c2ecf20Sopenharmony_ci dma_addr_t dma_addr) 7608c2ecf20Sopenharmony_ci{ 7618c2ecf20Sopenharmony_ci struct scatterlist *s, *cur = sg; 7628c2ecf20Sopenharmony_ci unsigned long seg_mask = dma_get_seg_boundary(dev); 7638c2ecf20Sopenharmony_ci unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev); 7648c2ecf20Sopenharmony_ci int i, count = 0; 7658c2ecf20Sopenharmony_ci 7668c2ecf20Sopenharmony_ci for_each_sg(sg, s, nents, i) { 7678c2ecf20Sopenharmony_ci /* Restore this segment's original unaligned fields first */ 7688c2ecf20Sopenharmony_ci unsigned int s_iova_off = sg_dma_address(s); 7698c2ecf20Sopenharmony_ci unsigned int s_length = sg_dma_len(s); 7708c2ecf20Sopenharmony_ci unsigned int s_iova_len = s->length; 7718c2ecf20Sopenharmony_ci 7728c2ecf20Sopenharmony_ci s->offset += s_iova_off; 7738c2ecf20Sopenharmony_ci s->length = s_length; 7748c2ecf20Sopenharmony_ci sg_dma_address(s) = DMA_MAPPING_ERROR; 7758c2ecf20Sopenharmony_ci sg_dma_len(s) = 0; 7768c2ecf20Sopenharmony_ci 7778c2ecf20Sopenharmony_ci /* 7788c2ecf20Sopenharmony_ci * Now fill in the real DMA data. If... 7798c2ecf20Sopenharmony_ci * - there is a valid output segment to append to 7808c2ecf20Sopenharmony_ci * - and this segment starts on an IOVA page boundary 7818c2ecf20Sopenharmony_ci * - but doesn't fall at a segment boundary 7828c2ecf20Sopenharmony_ci * - and wouldn't make the resulting output segment too long 7838c2ecf20Sopenharmony_ci */ 7848c2ecf20Sopenharmony_ci if (cur_len && !s_iova_off && (dma_addr & seg_mask) && 7858c2ecf20Sopenharmony_ci (max_len - cur_len >= s_length)) { 7868c2ecf20Sopenharmony_ci /* ...then concatenate it with the previous one */ 7878c2ecf20Sopenharmony_ci cur_len += s_length; 7888c2ecf20Sopenharmony_ci } else { 7898c2ecf20Sopenharmony_ci /* Otherwise start the next output segment */ 7908c2ecf20Sopenharmony_ci if (i > 0) 7918c2ecf20Sopenharmony_ci cur = sg_next(cur); 7928c2ecf20Sopenharmony_ci cur_len = s_length; 7938c2ecf20Sopenharmony_ci count++; 7948c2ecf20Sopenharmony_ci 7958c2ecf20Sopenharmony_ci sg_dma_address(cur) = dma_addr + s_iova_off; 7968c2ecf20Sopenharmony_ci } 7978c2ecf20Sopenharmony_ci 7988c2ecf20Sopenharmony_ci sg_dma_len(cur) = cur_len; 7998c2ecf20Sopenharmony_ci dma_addr += s_iova_len; 8008c2ecf20Sopenharmony_ci 8018c2ecf20Sopenharmony_ci if (s_length + s_iova_off < s_iova_len) 8028c2ecf20Sopenharmony_ci cur_len = 0; 8038c2ecf20Sopenharmony_ci } 8048c2ecf20Sopenharmony_ci return count; 8058c2ecf20Sopenharmony_ci} 8068c2ecf20Sopenharmony_ci 8078c2ecf20Sopenharmony_ci/* 8088c2ecf20Sopenharmony_ci * If mapping failed, then just restore the original list, 8098c2ecf20Sopenharmony_ci * but making sure the DMA fields are invalidated. 8108c2ecf20Sopenharmony_ci */ 8118c2ecf20Sopenharmony_cistatic void __invalidate_sg(struct scatterlist *sg, int nents) 8128c2ecf20Sopenharmony_ci{ 8138c2ecf20Sopenharmony_ci struct scatterlist *s; 8148c2ecf20Sopenharmony_ci int i; 8158c2ecf20Sopenharmony_ci 8168c2ecf20Sopenharmony_ci for_each_sg(sg, s, nents, i) { 8178c2ecf20Sopenharmony_ci if (sg_dma_address(s) != DMA_MAPPING_ERROR) 8188c2ecf20Sopenharmony_ci s->offset += sg_dma_address(s); 8198c2ecf20Sopenharmony_ci if (sg_dma_len(s)) 8208c2ecf20Sopenharmony_ci s->length = sg_dma_len(s); 8218c2ecf20Sopenharmony_ci sg_dma_address(s) = DMA_MAPPING_ERROR; 8228c2ecf20Sopenharmony_ci sg_dma_len(s) = 0; 8238c2ecf20Sopenharmony_ci } 8248c2ecf20Sopenharmony_ci} 8258c2ecf20Sopenharmony_ci 8268c2ecf20Sopenharmony_ci/* 8278c2ecf20Sopenharmony_ci * The DMA API client is passing in a scatterlist which could describe 8288c2ecf20Sopenharmony_ci * any old buffer layout, but the IOMMU API requires everything to be 8298c2ecf20Sopenharmony_ci * aligned to IOMMU pages. Hence the need for this complicated bit of 8308c2ecf20Sopenharmony_ci * impedance-matching, to be able to hand off a suitably-aligned list, 8318c2ecf20Sopenharmony_ci * but still preserve the original offsets and sizes for the caller. 8328c2ecf20Sopenharmony_ci */ 8338c2ecf20Sopenharmony_cistatic int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, 8348c2ecf20Sopenharmony_ci int nents, enum dma_data_direction dir, unsigned long attrs) 8358c2ecf20Sopenharmony_ci{ 8368c2ecf20Sopenharmony_ci struct iommu_domain *domain = iommu_get_dma_domain(dev); 8378c2ecf20Sopenharmony_ci struct iommu_dma_cookie *cookie = domain->iova_cookie; 8388c2ecf20Sopenharmony_ci struct iova_domain *iovad = &cookie->iovad; 8398c2ecf20Sopenharmony_ci struct scatterlist *s, *prev = NULL; 8408c2ecf20Sopenharmony_ci int prot = dma_info_to_prot(dir, dev_is_dma_coherent(dev), attrs); 8418c2ecf20Sopenharmony_ci dma_addr_t iova; 8428c2ecf20Sopenharmony_ci size_t iova_len = 0; 8438c2ecf20Sopenharmony_ci unsigned long mask = dma_get_seg_boundary(dev); 8448c2ecf20Sopenharmony_ci int i; 8458c2ecf20Sopenharmony_ci 8468c2ecf20Sopenharmony_ci if (unlikely(iommu_dma_deferred_attach(dev, domain))) 8478c2ecf20Sopenharmony_ci return 0; 8488c2ecf20Sopenharmony_ci 8498c2ecf20Sopenharmony_ci if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) 8508c2ecf20Sopenharmony_ci iommu_dma_sync_sg_for_device(dev, sg, nents, dir); 8518c2ecf20Sopenharmony_ci 8528c2ecf20Sopenharmony_ci /* 8538c2ecf20Sopenharmony_ci * Work out how much IOVA space we need, and align the segments to 8548c2ecf20Sopenharmony_ci * IOVA granules for the IOMMU driver to handle. With some clever 8558c2ecf20Sopenharmony_ci * trickery we can modify the list in-place, but reversibly, by 8568c2ecf20Sopenharmony_ci * stashing the unaligned parts in the as-yet-unused DMA fields. 8578c2ecf20Sopenharmony_ci */ 8588c2ecf20Sopenharmony_ci for_each_sg(sg, s, nents, i) { 8598c2ecf20Sopenharmony_ci size_t s_iova_off = iova_offset(iovad, s->offset); 8608c2ecf20Sopenharmony_ci size_t s_length = s->length; 8618c2ecf20Sopenharmony_ci size_t pad_len = (mask - iova_len + 1) & mask; 8628c2ecf20Sopenharmony_ci 8638c2ecf20Sopenharmony_ci sg_dma_address(s) = s_iova_off; 8648c2ecf20Sopenharmony_ci sg_dma_len(s) = s_length; 8658c2ecf20Sopenharmony_ci s->offset -= s_iova_off; 8668c2ecf20Sopenharmony_ci s_length = iova_align(iovad, s_length + s_iova_off); 8678c2ecf20Sopenharmony_ci s->length = s_length; 8688c2ecf20Sopenharmony_ci 8698c2ecf20Sopenharmony_ci /* 8708c2ecf20Sopenharmony_ci * Due to the alignment of our single IOVA allocation, we can 8718c2ecf20Sopenharmony_ci * depend on these assumptions about the segment boundary mask: 8728c2ecf20Sopenharmony_ci * - If mask size >= IOVA size, then the IOVA range cannot 8738c2ecf20Sopenharmony_ci * possibly fall across a boundary, so we don't care. 8748c2ecf20Sopenharmony_ci * - If mask size < IOVA size, then the IOVA range must start 8758c2ecf20Sopenharmony_ci * exactly on a boundary, therefore we can lay things out 8768c2ecf20Sopenharmony_ci * based purely on segment lengths without needing to know 8778c2ecf20Sopenharmony_ci * the actual addresses beforehand. 8788c2ecf20Sopenharmony_ci * - The mask must be a power of 2, so pad_len == 0 if 8798c2ecf20Sopenharmony_ci * iova_len == 0, thus we cannot dereference prev the first 8808c2ecf20Sopenharmony_ci * time through here (i.e. before it has a meaningful value). 8818c2ecf20Sopenharmony_ci */ 8828c2ecf20Sopenharmony_ci if (pad_len && pad_len < s_length - 1) { 8838c2ecf20Sopenharmony_ci prev->length += pad_len; 8848c2ecf20Sopenharmony_ci iova_len += pad_len; 8858c2ecf20Sopenharmony_ci } 8868c2ecf20Sopenharmony_ci 8878c2ecf20Sopenharmony_ci iova_len += s_length; 8888c2ecf20Sopenharmony_ci prev = s; 8898c2ecf20Sopenharmony_ci } 8908c2ecf20Sopenharmony_ci 8918c2ecf20Sopenharmony_ci iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev); 8928c2ecf20Sopenharmony_ci if (!iova) 8938c2ecf20Sopenharmony_ci goto out_restore_sg; 8948c2ecf20Sopenharmony_ci 8958c2ecf20Sopenharmony_ci /* 8968c2ecf20Sopenharmony_ci * We'll leave any physical concatenation to the IOMMU driver's 8978c2ecf20Sopenharmony_ci * implementation - it knows better than we do. 8988c2ecf20Sopenharmony_ci */ 8998c2ecf20Sopenharmony_ci if (iommu_map_sg_atomic(domain, iova, sg, nents, prot) < iova_len) 9008c2ecf20Sopenharmony_ci goto out_free_iova; 9018c2ecf20Sopenharmony_ci 9028c2ecf20Sopenharmony_ci return __finalise_sg(dev, sg, nents, iova); 9038c2ecf20Sopenharmony_ci 9048c2ecf20Sopenharmony_ciout_free_iova: 9058c2ecf20Sopenharmony_ci iommu_dma_free_iova(cookie, iova, iova_len); 9068c2ecf20Sopenharmony_ciout_restore_sg: 9078c2ecf20Sopenharmony_ci __invalidate_sg(sg, nents); 9088c2ecf20Sopenharmony_ci return 0; 9098c2ecf20Sopenharmony_ci} 9108c2ecf20Sopenharmony_ci 9118c2ecf20Sopenharmony_cistatic void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, 9128c2ecf20Sopenharmony_ci int nents, enum dma_data_direction dir, unsigned long attrs) 9138c2ecf20Sopenharmony_ci{ 9148c2ecf20Sopenharmony_ci dma_addr_t start, end; 9158c2ecf20Sopenharmony_ci struct scatterlist *tmp; 9168c2ecf20Sopenharmony_ci int i; 9178c2ecf20Sopenharmony_ci 9188c2ecf20Sopenharmony_ci if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) 9198c2ecf20Sopenharmony_ci iommu_dma_sync_sg_for_cpu(dev, sg, nents, dir); 9208c2ecf20Sopenharmony_ci 9218c2ecf20Sopenharmony_ci /* 9228c2ecf20Sopenharmony_ci * The scatterlist segments are mapped into a single 9238c2ecf20Sopenharmony_ci * contiguous IOVA allocation, so this is incredibly easy. 9248c2ecf20Sopenharmony_ci */ 9258c2ecf20Sopenharmony_ci start = sg_dma_address(sg); 9268c2ecf20Sopenharmony_ci for_each_sg(sg_next(sg), tmp, nents - 1, i) { 9278c2ecf20Sopenharmony_ci if (sg_dma_len(tmp) == 0) 9288c2ecf20Sopenharmony_ci break; 9298c2ecf20Sopenharmony_ci sg = tmp; 9308c2ecf20Sopenharmony_ci } 9318c2ecf20Sopenharmony_ci end = sg_dma_address(sg) + sg_dma_len(sg); 9328c2ecf20Sopenharmony_ci __iommu_dma_unmap(dev, start, end - start); 9338c2ecf20Sopenharmony_ci} 9348c2ecf20Sopenharmony_ci 9358c2ecf20Sopenharmony_cistatic dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys, 9368c2ecf20Sopenharmony_ci size_t size, enum dma_data_direction dir, unsigned long attrs) 9378c2ecf20Sopenharmony_ci{ 9388c2ecf20Sopenharmony_ci return __iommu_dma_map(dev, phys, size, 9398c2ecf20Sopenharmony_ci dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO, 9408c2ecf20Sopenharmony_ci dma_get_mask(dev)); 9418c2ecf20Sopenharmony_ci} 9428c2ecf20Sopenharmony_ci 9438c2ecf20Sopenharmony_cistatic void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle, 9448c2ecf20Sopenharmony_ci size_t size, enum dma_data_direction dir, unsigned long attrs) 9458c2ecf20Sopenharmony_ci{ 9468c2ecf20Sopenharmony_ci __iommu_dma_unmap(dev, handle, size); 9478c2ecf20Sopenharmony_ci} 9488c2ecf20Sopenharmony_ci 9498c2ecf20Sopenharmony_cistatic void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr) 9508c2ecf20Sopenharmony_ci{ 9518c2ecf20Sopenharmony_ci size_t alloc_size = PAGE_ALIGN(size); 9528c2ecf20Sopenharmony_ci int count = alloc_size >> PAGE_SHIFT; 9538c2ecf20Sopenharmony_ci struct page *page = NULL, **pages = NULL; 9548c2ecf20Sopenharmony_ci 9558c2ecf20Sopenharmony_ci /* Non-coherent atomic allocation? Easy */ 9568c2ecf20Sopenharmony_ci if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && 9578c2ecf20Sopenharmony_ci dma_free_from_pool(dev, cpu_addr, alloc_size)) 9588c2ecf20Sopenharmony_ci return; 9598c2ecf20Sopenharmony_ci 9608c2ecf20Sopenharmony_ci if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) { 9618c2ecf20Sopenharmony_ci /* 9628c2ecf20Sopenharmony_ci * If it the address is remapped, then it's either non-coherent 9638c2ecf20Sopenharmony_ci * or highmem CMA, or an iommu_dma_alloc_remap() construction. 9648c2ecf20Sopenharmony_ci */ 9658c2ecf20Sopenharmony_ci pages = dma_common_find_pages(cpu_addr); 9668c2ecf20Sopenharmony_ci if (!pages) 9678c2ecf20Sopenharmony_ci page = vmalloc_to_page(cpu_addr); 9688c2ecf20Sopenharmony_ci dma_common_free_remap(cpu_addr, alloc_size); 9698c2ecf20Sopenharmony_ci } else { 9708c2ecf20Sopenharmony_ci /* Lowmem means a coherent atomic or CMA allocation */ 9718c2ecf20Sopenharmony_ci page = virt_to_page(cpu_addr); 9728c2ecf20Sopenharmony_ci } 9738c2ecf20Sopenharmony_ci 9748c2ecf20Sopenharmony_ci if (pages) 9758c2ecf20Sopenharmony_ci __iommu_dma_free_pages(pages, count); 9768c2ecf20Sopenharmony_ci if (page) 9778c2ecf20Sopenharmony_ci dma_free_contiguous(dev, page, alloc_size); 9788c2ecf20Sopenharmony_ci} 9798c2ecf20Sopenharmony_ci 9808c2ecf20Sopenharmony_cistatic void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr, 9818c2ecf20Sopenharmony_ci dma_addr_t handle, unsigned long attrs) 9828c2ecf20Sopenharmony_ci{ 9838c2ecf20Sopenharmony_ci __iommu_dma_unmap(dev, handle, size); 9848c2ecf20Sopenharmony_ci __iommu_dma_free(dev, size, cpu_addr); 9858c2ecf20Sopenharmony_ci} 9868c2ecf20Sopenharmony_ci 9878c2ecf20Sopenharmony_cistatic void *iommu_dma_alloc_pages(struct device *dev, size_t size, 9888c2ecf20Sopenharmony_ci struct page **pagep, gfp_t gfp, unsigned long attrs) 9898c2ecf20Sopenharmony_ci{ 9908c2ecf20Sopenharmony_ci bool coherent = dev_is_dma_coherent(dev); 9918c2ecf20Sopenharmony_ci size_t alloc_size = PAGE_ALIGN(size); 9928c2ecf20Sopenharmony_ci int node = dev_to_node(dev); 9938c2ecf20Sopenharmony_ci struct page *page = NULL; 9948c2ecf20Sopenharmony_ci void *cpu_addr; 9958c2ecf20Sopenharmony_ci 9968c2ecf20Sopenharmony_ci page = dma_alloc_contiguous(dev, alloc_size, gfp); 9978c2ecf20Sopenharmony_ci if (!page) 9988c2ecf20Sopenharmony_ci page = alloc_pages_node(node, gfp, get_order(alloc_size)); 9998c2ecf20Sopenharmony_ci if (!page) 10008c2ecf20Sopenharmony_ci return NULL; 10018c2ecf20Sopenharmony_ci 10028c2ecf20Sopenharmony_ci if (IS_ENABLED(CONFIG_DMA_REMAP) && (!coherent || PageHighMem(page))) { 10038c2ecf20Sopenharmony_ci pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs); 10048c2ecf20Sopenharmony_ci 10058c2ecf20Sopenharmony_ci cpu_addr = dma_common_contiguous_remap(page, alloc_size, 10068c2ecf20Sopenharmony_ci prot, __builtin_return_address(0)); 10078c2ecf20Sopenharmony_ci if (!cpu_addr) 10088c2ecf20Sopenharmony_ci goto out_free_pages; 10098c2ecf20Sopenharmony_ci 10108c2ecf20Sopenharmony_ci if (!coherent) 10118c2ecf20Sopenharmony_ci arch_dma_prep_coherent(page, size); 10128c2ecf20Sopenharmony_ci } else { 10138c2ecf20Sopenharmony_ci cpu_addr = page_address(page); 10148c2ecf20Sopenharmony_ci } 10158c2ecf20Sopenharmony_ci 10168c2ecf20Sopenharmony_ci *pagep = page; 10178c2ecf20Sopenharmony_ci memset(cpu_addr, 0, alloc_size); 10188c2ecf20Sopenharmony_ci return cpu_addr; 10198c2ecf20Sopenharmony_ciout_free_pages: 10208c2ecf20Sopenharmony_ci dma_free_contiguous(dev, page, alloc_size); 10218c2ecf20Sopenharmony_ci return NULL; 10228c2ecf20Sopenharmony_ci} 10238c2ecf20Sopenharmony_ci 10248c2ecf20Sopenharmony_cistatic void *iommu_dma_alloc(struct device *dev, size_t size, 10258c2ecf20Sopenharmony_ci dma_addr_t *handle, gfp_t gfp, unsigned long attrs) 10268c2ecf20Sopenharmony_ci{ 10278c2ecf20Sopenharmony_ci bool coherent = dev_is_dma_coherent(dev); 10288c2ecf20Sopenharmony_ci int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs); 10298c2ecf20Sopenharmony_ci struct page *page = NULL; 10308c2ecf20Sopenharmony_ci void *cpu_addr; 10318c2ecf20Sopenharmony_ci 10328c2ecf20Sopenharmony_ci gfp |= __GFP_ZERO; 10338c2ecf20Sopenharmony_ci 10348c2ecf20Sopenharmony_ci if (IS_ENABLED(CONFIG_DMA_REMAP) && gfpflags_allow_blocking(gfp) && 10358c2ecf20Sopenharmony_ci !(attrs & DMA_ATTR_FORCE_CONTIGUOUS)) { 10368c2ecf20Sopenharmony_ci return iommu_dma_alloc_remap(dev, size, handle, gfp, 10378c2ecf20Sopenharmony_ci dma_pgprot(dev, PAGE_KERNEL, attrs), attrs); 10388c2ecf20Sopenharmony_ci } 10398c2ecf20Sopenharmony_ci 10408c2ecf20Sopenharmony_ci if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && 10418c2ecf20Sopenharmony_ci !gfpflags_allow_blocking(gfp) && !coherent) 10428c2ecf20Sopenharmony_ci page = dma_alloc_from_pool(dev, PAGE_ALIGN(size), &cpu_addr, 10438c2ecf20Sopenharmony_ci gfp, NULL); 10448c2ecf20Sopenharmony_ci else 10458c2ecf20Sopenharmony_ci cpu_addr = iommu_dma_alloc_pages(dev, size, &page, gfp, attrs); 10468c2ecf20Sopenharmony_ci if (!cpu_addr) 10478c2ecf20Sopenharmony_ci return NULL; 10488c2ecf20Sopenharmony_ci 10498c2ecf20Sopenharmony_ci *handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot, 10508c2ecf20Sopenharmony_ci dev->coherent_dma_mask); 10518c2ecf20Sopenharmony_ci if (*handle == DMA_MAPPING_ERROR) { 10528c2ecf20Sopenharmony_ci __iommu_dma_free(dev, size, cpu_addr); 10538c2ecf20Sopenharmony_ci return NULL; 10548c2ecf20Sopenharmony_ci } 10558c2ecf20Sopenharmony_ci 10568c2ecf20Sopenharmony_ci return cpu_addr; 10578c2ecf20Sopenharmony_ci} 10588c2ecf20Sopenharmony_ci 10598c2ecf20Sopenharmony_ci#ifdef CONFIG_DMA_REMAP 10608c2ecf20Sopenharmony_cistatic void *iommu_dma_alloc_noncoherent(struct device *dev, size_t size, 10618c2ecf20Sopenharmony_ci dma_addr_t *handle, enum dma_data_direction dir, gfp_t gfp) 10628c2ecf20Sopenharmony_ci{ 10638c2ecf20Sopenharmony_ci if (!gfpflags_allow_blocking(gfp)) { 10648c2ecf20Sopenharmony_ci struct page *page; 10658c2ecf20Sopenharmony_ci 10668c2ecf20Sopenharmony_ci page = dma_common_alloc_pages(dev, size, handle, dir, gfp); 10678c2ecf20Sopenharmony_ci if (!page) 10688c2ecf20Sopenharmony_ci return NULL; 10698c2ecf20Sopenharmony_ci return page_address(page); 10708c2ecf20Sopenharmony_ci } 10718c2ecf20Sopenharmony_ci 10728c2ecf20Sopenharmony_ci return iommu_dma_alloc_remap(dev, size, handle, gfp | __GFP_ZERO, 10738c2ecf20Sopenharmony_ci PAGE_KERNEL, 0); 10748c2ecf20Sopenharmony_ci} 10758c2ecf20Sopenharmony_ci 10768c2ecf20Sopenharmony_cistatic void iommu_dma_free_noncoherent(struct device *dev, size_t size, 10778c2ecf20Sopenharmony_ci void *cpu_addr, dma_addr_t handle, enum dma_data_direction dir) 10788c2ecf20Sopenharmony_ci{ 10798c2ecf20Sopenharmony_ci __iommu_dma_unmap(dev, handle, size); 10808c2ecf20Sopenharmony_ci __iommu_dma_free(dev, size, cpu_addr); 10818c2ecf20Sopenharmony_ci} 10828c2ecf20Sopenharmony_ci#else 10838c2ecf20Sopenharmony_ci#define iommu_dma_alloc_noncoherent NULL 10848c2ecf20Sopenharmony_ci#define iommu_dma_free_noncoherent NULL 10858c2ecf20Sopenharmony_ci#endif /* CONFIG_DMA_REMAP */ 10868c2ecf20Sopenharmony_ci 10878c2ecf20Sopenharmony_cistatic int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma, 10888c2ecf20Sopenharmony_ci void *cpu_addr, dma_addr_t dma_addr, size_t size, 10898c2ecf20Sopenharmony_ci unsigned long attrs) 10908c2ecf20Sopenharmony_ci{ 10918c2ecf20Sopenharmony_ci unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; 10928c2ecf20Sopenharmony_ci unsigned long pfn, off = vma->vm_pgoff; 10938c2ecf20Sopenharmony_ci int ret; 10948c2ecf20Sopenharmony_ci 10958c2ecf20Sopenharmony_ci vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs); 10968c2ecf20Sopenharmony_ci 10978c2ecf20Sopenharmony_ci if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) 10988c2ecf20Sopenharmony_ci return ret; 10998c2ecf20Sopenharmony_ci 11008c2ecf20Sopenharmony_ci if (off >= nr_pages || vma_pages(vma) > nr_pages - off) 11018c2ecf20Sopenharmony_ci return -ENXIO; 11028c2ecf20Sopenharmony_ci 11038c2ecf20Sopenharmony_ci if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) { 11048c2ecf20Sopenharmony_ci struct page **pages = dma_common_find_pages(cpu_addr); 11058c2ecf20Sopenharmony_ci 11068c2ecf20Sopenharmony_ci if (pages) 11078c2ecf20Sopenharmony_ci return __iommu_dma_mmap(pages, size, vma); 11088c2ecf20Sopenharmony_ci pfn = vmalloc_to_pfn(cpu_addr); 11098c2ecf20Sopenharmony_ci } else { 11108c2ecf20Sopenharmony_ci pfn = page_to_pfn(virt_to_page(cpu_addr)); 11118c2ecf20Sopenharmony_ci } 11128c2ecf20Sopenharmony_ci 11138c2ecf20Sopenharmony_ci return remap_pfn_range(vma, vma->vm_start, pfn + off, 11148c2ecf20Sopenharmony_ci vma->vm_end - vma->vm_start, 11158c2ecf20Sopenharmony_ci vma->vm_page_prot); 11168c2ecf20Sopenharmony_ci} 11178c2ecf20Sopenharmony_ci 11188c2ecf20Sopenharmony_cistatic int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt, 11198c2ecf20Sopenharmony_ci void *cpu_addr, dma_addr_t dma_addr, size_t size, 11208c2ecf20Sopenharmony_ci unsigned long attrs) 11218c2ecf20Sopenharmony_ci{ 11228c2ecf20Sopenharmony_ci struct page *page; 11238c2ecf20Sopenharmony_ci int ret; 11248c2ecf20Sopenharmony_ci 11258c2ecf20Sopenharmony_ci if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) { 11268c2ecf20Sopenharmony_ci struct page **pages = dma_common_find_pages(cpu_addr); 11278c2ecf20Sopenharmony_ci 11288c2ecf20Sopenharmony_ci if (pages) { 11298c2ecf20Sopenharmony_ci return sg_alloc_table_from_pages(sgt, pages, 11308c2ecf20Sopenharmony_ci PAGE_ALIGN(size) >> PAGE_SHIFT, 11318c2ecf20Sopenharmony_ci 0, size, GFP_KERNEL); 11328c2ecf20Sopenharmony_ci } 11338c2ecf20Sopenharmony_ci 11348c2ecf20Sopenharmony_ci page = vmalloc_to_page(cpu_addr); 11358c2ecf20Sopenharmony_ci } else { 11368c2ecf20Sopenharmony_ci page = virt_to_page(cpu_addr); 11378c2ecf20Sopenharmony_ci } 11388c2ecf20Sopenharmony_ci 11398c2ecf20Sopenharmony_ci ret = sg_alloc_table(sgt, 1, GFP_KERNEL); 11408c2ecf20Sopenharmony_ci if (!ret) 11418c2ecf20Sopenharmony_ci sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); 11428c2ecf20Sopenharmony_ci return ret; 11438c2ecf20Sopenharmony_ci} 11448c2ecf20Sopenharmony_ci 11458c2ecf20Sopenharmony_cistatic unsigned long iommu_dma_get_merge_boundary(struct device *dev) 11468c2ecf20Sopenharmony_ci{ 11478c2ecf20Sopenharmony_ci struct iommu_domain *domain = iommu_get_dma_domain(dev); 11488c2ecf20Sopenharmony_ci 11498c2ecf20Sopenharmony_ci return (1UL << __ffs(domain->pgsize_bitmap)) - 1; 11508c2ecf20Sopenharmony_ci} 11518c2ecf20Sopenharmony_ci 11528c2ecf20Sopenharmony_cistatic const struct dma_map_ops iommu_dma_ops = { 11538c2ecf20Sopenharmony_ci .alloc = iommu_dma_alloc, 11548c2ecf20Sopenharmony_ci .free = iommu_dma_free, 11558c2ecf20Sopenharmony_ci .alloc_pages = dma_common_alloc_pages, 11568c2ecf20Sopenharmony_ci .free_pages = dma_common_free_pages, 11578c2ecf20Sopenharmony_ci .alloc_noncoherent = iommu_dma_alloc_noncoherent, 11588c2ecf20Sopenharmony_ci .free_noncoherent = iommu_dma_free_noncoherent, 11598c2ecf20Sopenharmony_ci .mmap = iommu_dma_mmap, 11608c2ecf20Sopenharmony_ci .get_sgtable = iommu_dma_get_sgtable, 11618c2ecf20Sopenharmony_ci .map_page = iommu_dma_map_page, 11628c2ecf20Sopenharmony_ci .unmap_page = iommu_dma_unmap_page, 11638c2ecf20Sopenharmony_ci .map_sg = iommu_dma_map_sg, 11648c2ecf20Sopenharmony_ci .unmap_sg = iommu_dma_unmap_sg, 11658c2ecf20Sopenharmony_ci .sync_single_for_cpu = iommu_dma_sync_single_for_cpu, 11668c2ecf20Sopenharmony_ci .sync_single_for_device = iommu_dma_sync_single_for_device, 11678c2ecf20Sopenharmony_ci .sync_sg_for_cpu = iommu_dma_sync_sg_for_cpu, 11688c2ecf20Sopenharmony_ci .sync_sg_for_device = iommu_dma_sync_sg_for_device, 11698c2ecf20Sopenharmony_ci .map_resource = iommu_dma_map_resource, 11708c2ecf20Sopenharmony_ci .unmap_resource = iommu_dma_unmap_resource, 11718c2ecf20Sopenharmony_ci .get_merge_boundary = iommu_dma_get_merge_boundary, 11728c2ecf20Sopenharmony_ci}; 11738c2ecf20Sopenharmony_ci 11748c2ecf20Sopenharmony_ci/* 11758c2ecf20Sopenharmony_ci * The IOMMU core code allocates the default DMA domain, which the underlying 11768c2ecf20Sopenharmony_ci * IOMMU driver needs to support via the dma-iommu layer. 11778c2ecf20Sopenharmony_ci */ 11788c2ecf20Sopenharmony_civoid iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size) 11798c2ecf20Sopenharmony_ci{ 11808c2ecf20Sopenharmony_ci struct iommu_domain *domain = iommu_get_domain_for_dev(dev); 11818c2ecf20Sopenharmony_ci 11828c2ecf20Sopenharmony_ci if (!domain) 11838c2ecf20Sopenharmony_ci goto out_err; 11848c2ecf20Sopenharmony_ci 11858c2ecf20Sopenharmony_ci /* 11868c2ecf20Sopenharmony_ci * The IOMMU core code allocates the default DMA domain, which the 11878c2ecf20Sopenharmony_ci * underlying IOMMU driver needs to support via the dma-iommu layer. 11888c2ecf20Sopenharmony_ci */ 11898c2ecf20Sopenharmony_ci if (domain->type == IOMMU_DOMAIN_DMA) { 11908c2ecf20Sopenharmony_ci if (iommu_dma_init_domain(domain, dma_base, size, dev)) 11918c2ecf20Sopenharmony_ci goto out_err; 11928c2ecf20Sopenharmony_ci dev->dma_ops = &iommu_dma_ops; 11938c2ecf20Sopenharmony_ci } 11948c2ecf20Sopenharmony_ci 11958c2ecf20Sopenharmony_ci return; 11968c2ecf20Sopenharmony_ciout_err: 11978c2ecf20Sopenharmony_ci pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n", 11988c2ecf20Sopenharmony_ci dev_name(dev)); 11998c2ecf20Sopenharmony_ci} 12008c2ecf20Sopenharmony_ci 12018c2ecf20Sopenharmony_cistatic struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, 12028c2ecf20Sopenharmony_ci phys_addr_t msi_addr, struct iommu_domain *domain) 12038c2ecf20Sopenharmony_ci{ 12048c2ecf20Sopenharmony_ci struct iommu_dma_cookie *cookie = domain->iova_cookie; 12058c2ecf20Sopenharmony_ci struct iommu_dma_msi_page *msi_page; 12068c2ecf20Sopenharmony_ci dma_addr_t iova; 12078c2ecf20Sopenharmony_ci int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; 12088c2ecf20Sopenharmony_ci size_t size = cookie_msi_granule(cookie); 12098c2ecf20Sopenharmony_ci 12108c2ecf20Sopenharmony_ci msi_addr &= ~(phys_addr_t)(size - 1); 12118c2ecf20Sopenharmony_ci list_for_each_entry(msi_page, &cookie->msi_page_list, list) 12128c2ecf20Sopenharmony_ci if (msi_page->phys == msi_addr) 12138c2ecf20Sopenharmony_ci return msi_page; 12148c2ecf20Sopenharmony_ci 12158c2ecf20Sopenharmony_ci msi_page = kzalloc(sizeof(*msi_page), GFP_KERNEL); 12168c2ecf20Sopenharmony_ci if (!msi_page) 12178c2ecf20Sopenharmony_ci return NULL; 12188c2ecf20Sopenharmony_ci 12198c2ecf20Sopenharmony_ci iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev); 12208c2ecf20Sopenharmony_ci if (!iova) 12218c2ecf20Sopenharmony_ci goto out_free_page; 12228c2ecf20Sopenharmony_ci 12238c2ecf20Sopenharmony_ci if (iommu_map(domain, iova, msi_addr, size, prot)) 12248c2ecf20Sopenharmony_ci goto out_free_iova; 12258c2ecf20Sopenharmony_ci 12268c2ecf20Sopenharmony_ci INIT_LIST_HEAD(&msi_page->list); 12278c2ecf20Sopenharmony_ci msi_page->phys = msi_addr; 12288c2ecf20Sopenharmony_ci msi_page->iova = iova; 12298c2ecf20Sopenharmony_ci list_add(&msi_page->list, &cookie->msi_page_list); 12308c2ecf20Sopenharmony_ci return msi_page; 12318c2ecf20Sopenharmony_ci 12328c2ecf20Sopenharmony_ciout_free_iova: 12338c2ecf20Sopenharmony_ci iommu_dma_free_iova(cookie, iova, size); 12348c2ecf20Sopenharmony_ciout_free_page: 12358c2ecf20Sopenharmony_ci kfree(msi_page); 12368c2ecf20Sopenharmony_ci return NULL; 12378c2ecf20Sopenharmony_ci} 12388c2ecf20Sopenharmony_ci 12398c2ecf20Sopenharmony_ciint iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr) 12408c2ecf20Sopenharmony_ci{ 12418c2ecf20Sopenharmony_ci struct device *dev = msi_desc_to_dev(desc); 12428c2ecf20Sopenharmony_ci struct iommu_domain *domain = iommu_get_domain_for_dev(dev); 12438c2ecf20Sopenharmony_ci struct iommu_dma_msi_page *msi_page; 12448c2ecf20Sopenharmony_ci static DEFINE_MUTEX(msi_prepare_lock); /* see below */ 12458c2ecf20Sopenharmony_ci 12468c2ecf20Sopenharmony_ci if (!domain || !domain->iova_cookie) { 12478c2ecf20Sopenharmony_ci desc->iommu_cookie = NULL; 12488c2ecf20Sopenharmony_ci return 0; 12498c2ecf20Sopenharmony_ci } 12508c2ecf20Sopenharmony_ci 12518c2ecf20Sopenharmony_ci /* 12528c2ecf20Sopenharmony_ci * In fact the whole prepare operation should already be serialised by 12538c2ecf20Sopenharmony_ci * irq_domain_mutex further up the callchain, but that's pretty subtle 12548c2ecf20Sopenharmony_ci * on its own, so consider this locking as failsafe documentation... 12558c2ecf20Sopenharmony_ci */ 12568c2ecf20Sopenharmony_ci mutex_lock(&msi_prepare_lock); 12578c2ecf20Sopenharmony_ci msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain); 12588c2ecf20Sopenharmony_ci mutex_unlock(&msi_prepare_lock); 12598c2ecf20Sopenharmony_ci 12608c2ecf20Sopenharmony_ci msi_desc_set_iommu_cookie(desc, msi_page); 12618c2ecf20Sopenharmony_ci 12628c2ecf20Sopenharmony_ci if (!msi_page) 12638c2ecf20Sopenharmony_ci return -ENOMEM; 12648c2ecf20Sopenharmony_ci return 0; 12658c2ecf20Sopenharmony_ci} 12668c2ecf20Sopenharmony_ci 12678c2ecf20Sopenharmony_civoid iommu_dma_compose_msi_msg(struct msi_desc *desc, 12688c2ecf20Sopenharmony_ci struct msi_msg *msg) 12698c2ecf20Sopenharmony_ci{ 12708c2ecf20Sopenharmony_ci struct device *dev = msi_desc_to_dev(desc); 12718c2ecf20Sopenharmony_ci const struct iommu_domain *domain = iommu_get_domain_for_dev(dev); 12728c2ecf20Sopenharmony_ci const struct iommu_dma_msi_page *msi_page; 12738c2ecf20Sopenharmony_ci 12748c2ecf20Sopenharmony_ci msi_page = msi_desc_get_iommu_cookie(desc); 12758c2ecf20Sopenharmony_ci 12768c2ecf20Sopenharmony_ci if (!domain || !domain->iova_cookie || WARN_ON(!msi_page)) 12778c2ecf20Sopenharmony_ci return; 12788c2ecf20Sopenharmony_ci 12798c2ecf20Sopenharmony_ci msg->address_hi = upper_32_bits(msi_page->iova); 12808c2ecf20Sopenharmony_ci msg->address_lo &= cookie_msi_granule(domain->iova_cookie) - 1; 12818c2ecf20Sopenharmony_ci msg->address_lo += lower_32_bits(msi_page->iova); 12828c2ecf20Sopenharmony_ci} 12838c2ecf20Sopenharmony_ci 12848c2ecf20Sopenharmony_cistatic int iommu_dma_init(void) 12858c2ecf20Sopenharmony_ci{ 12868c2ecf20Sopenharmony_ci return iova_cache_get(); 12878c2ecf20Sopenharmony_ci} 12888c2ecf20Sopenharmony_ciarch_initcall(iommu_dma_init); 1289