18c2ecf20Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0-only 28c2ecf20Sopenharmony_ci/* 38c2ecf20Sopenharmony_ci * linux/arch/arm/mm/dma-mapping.c 48c2ecf20Sopenharmony_ci * 58c2ecf20Sopenharmony_ci * Copyright (C) 2000-2004 Russell King 68c2ecf20Sopenharmony_ci * 78c2ecf20Sopenharmony_ci * DMA uncached mapping support. 88c2ecf20Sopenharmony_ci */ 98c2ecf20Sopenharmony_ci#include <linux/module.h> 108c2ecf20Sopenharmony_ci#include <linux/mm.h> 118c2ecf20Sopenharmony_ci#include <linux/genalloc.h> 128c2ecf20Sopenharmony_ci#include <linux/gfp.h> 138c2ecf20Sopenharmony_ci#include <linux/errno.h> 148c2ecf20Sopenharmony_ci#include <linux/list.h> 158c2ecf20Sopenharmony_ci#include <linux/init.h> 168c2ecf20Sopenharmony_ci#include <linux/device.h> 178c2ecf20Sopenharmony_ci#include <linux/dma-direct.h> 188c2ecf20Sopenharmony_ci#include <linux/dma-map-ops.h> 198c2ecf20Sopenharmony_ci#include <linux/highmem.h> 208c2ecf20Sopenharmony_ci#include <linux/memblock.h> 218c2ecf20Sopenharmony_ci#include <linux/slab.h> 228c2ecf20Sopenharmony_ci#include <linux/iommu.h> 238c2ecf20Sopenharmony_ci#include <linux/io.h> 248c2ecf20Sopenharmony_ci#include <linux/vmalloc.h> 258c2ecf20Sopenharmony_ci#include <linux/sizes.h> 268c2ecf20Sopenharmony_ci#include <linux/cma.h> 278c2ecf20Sopenharmony_ci 288c2ecf20Sopenharmony_ci#include <asm/memory.h> 298c2ecf20Sopenharmony_ci#include <asm/highmem.h> 308c2ecf20Sopenharmony_ci#include <asm/cacheflush.h> 318c2ecf20Sopenharmony_ci#include <asm/tlbflush.h> 328c2ecf20Sopenharmony_ci#include <asm/mach/arch.h> 338c2ecf20Sopenharmony_ci#include <asm/dma-iommu.h> 348c2ecf20Sopenharmony_ci#include <asm/mach/map.h> 358c2ecf20Sopenharmony_ci#include <asm/system_info.h> 368c2ecf20Sopenharmony_ci#include <xen/swiotlb-xen.h> 378c2ecf20Sopenharmony_ci 388c2ecf20Sopenharmony_ci#include "dma.h" 398c2ecf20Sopenharmony_ci#include "mm.h" 408c2ecf20Sopenharmony_ci 418c2ecf20Sopenharmony_cistruct arm_dma_alloc_args { 428c2ecf20Sopenharmony_ci struct device *dev; 438c2ecf20Sopenharmony_ci size_t size; 448c2ecf20Sopenharmony_ci gfp_t gfp; 458c2ecf20Sopenharmony_ci pgprot_t prot; 468c2ecf20Sopenharmony_ci const void *caller; 478c2ecf20Sopenharmony_ci bool want_vaddr; 488c2ecf20Sopenharmony_ci int coherent_flag; 498c2ecf20Sopenharmony_ci}; 508c2ecf20Sopenharmony_ci 518c2ecf20Sopenharmony_cistruct arm_dma_free_args { 528c2ecf20Sopenharmony_ci struct device *dev; 538c2ecf20Sopenharmony_ci size_t size; 548c2ecf20Sopenharmony_ci void *cpu_addr; 558c2ecf20Sopenharmony_ci struct page *page; 568c2ecf20Sopenharmony_ci bool want_vaddr; 578c2ecf20Sopenharmony_ci}; 588c2ecf20Sopenharmony_ci 598c2ecf20Sopenharmony_ci#define NORMAL 0 608c2ecf20Sopenharmony_ci#define COHERENT 1 618c2ecf20Sopenharmony_ci 628c2ecf20Sopenharmony_cistruct arm_dma_allocator { 638c2ecf20Sopenharmony_ci void *(*alloc)(struct arm_dma_alloc_args *args, 648c2ecf20Sopenharmony_ci struct page **ret_page); 658c2ecf20Sopenharmony_ci void (*free)(struct arm_dma_free_args *args); 668c2ecf20Sopenharmony_ci}; 678c2ecf20Sopenharmony_ci 688c2ecf20Sopenharmony_cistruct arm_dma_buffer { 698c2ecf20Sopenharmony_ci struct list_head list; 708c2ecf20Sopenharmony_ci void *virt; 718c2ecf20Sopenharmony_ci struct arm_dma_allocator *allocator; 728c2ecf20Sopenharmony_ci}; 738c2ecf20Sopenharmony_ci 748c2ecf20Sopenharmony_cistatic LIST_HEAD(arm_dma_bufs); 758c2ecf20Sopenharmony_cistatic DEFINE_SPINLOCK(arm_dma_bufs_lock); 768c2ecf20Sopenharmony_ci 778c2ecf20Sopenharmony_cistatic struct arm_dma_buffer *arm_dma_buffer_find(void *virt) 788c2ecf20Sopenharmony_ci{ 798c2ecf20Sopenharmony_ci struct arm_dma_buffer *buf, *found = NULL; 808c2ecf20Sopenharmony_ci unsigned long flags; 818c2ecf20Sopenharmony_ci 828c2ecf20Sopenharmony_ci spin_lock_irqsave(&arm_dma_bufs_lock, flags); 838c2ecf20Sopenharmony_ci list_for_each_entry(buf, &arm_dma_bufs, list) { 848c2ecf20Sopenharmony_ci if (buf->virt == virt) { 858c2ecf20Sopenharmony_ci list_del(&buf->list); 868c2ecf20Sopenharmony_ci found = buf; 878c2ecf20Sopenharmony_ci break; 888c2ecf20Sopenharmony_ci } 898c2ecf20Sopenharmony_ci } 908c2ecf20Sopenharmony_ci spin_unlock_irqrestore(&arm_dma_bufs_lock, flags); 918c2ecf20Sopenharmony_ci return found; 928c2ecf20Sopenharmony_ci} 938c2ecf20Sopenharmony_ci 948c2ecf20Sopenharmony_ci/* 958c2ecf20Sopenharmony_ci * The DMA API is built upon the notion of "buffer ownership". A buffer 968c2ecf20Sopenharmony_ci * is either exclusively owned by the CPU (and therefore may be accessed 978c2ecf20Sopenharmony_ci * by it) or exclusively owned by the DMA device. These helper functions 988c2ecf20Sopenharmony_ci * represent the transitions between these two ownership states. 998c2ecf20Sopenharmony_ci * 1008c2ecf20Sopenharmony_ci * Note, however, that on later ARMs, this notion does not work due to 1018c2ecf20Sopenharmony_ci * speculative prefetches. We model our approach on the assumption that 1028c2ecf20Sopenharmony_ci * the CPU does do speculative prefetches, which means we clean caches 1038c2ecf20Sopenharmony_ci * before transfers and delay cache invalidation until transfer completion. 1048c2ecf20Sopenharmony_ci * 1058c2ecf20Sopenharmony_ci */ 1068c2ecf20Sopenharmony_cistatic void __dma_page_cpu_to_dev(struct page *, unsigned long, 1078c2ecf20Sopenharmony_ci size_t, enum dma_data_direction); 1088c2ecf20Sopenharmony_cistatic void __dma_page_dev_to_cpu(struct page *, unsigned long, 1098c2ecf20Sopenharmony_ci size_t, enum dma_data_direction); 1108c2ecf20Sopenharmony_ci 1118c2ecf20Sopenharmony_ci/** 1128c2ecf20Sopenharmony_ci * arm_dma_map_page - map a portion of a page for streaming DMA 1138c2ecf20Sopenharmony_ci * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 1148c2ecf20Sopenharmony_ci * @page: page that buffer resides in 1158c2ecf20Sopenharmony_ci * @offset: offset into page for start of buffer 1168c2ecf20Sopenharmony_ci * @size: size of buffer to map 1178c2ecf20Sopenharmony_ci * @dir: DMA transfer direction 1188c2ecf20Sopenharmony_ci * 1198c2ecf20Sopenharmony_ci * Ensure that any data held in the cache is appropriately discarded 1208c2ecf20Sopenharmony_ci * or written back. 1218c2ecf20Sopenharmony_ci * 1228c2ecf20Sopenharmony_ci * The device owns this memory once this call has completed. The CPU 1238c2ecf20Sopenharmony_ci * can regain ownership by calling dma_unmap_page(). 1248c2ecf20Sopenharmony_ci */ 1258c2ecf20Sopenharmony_cistatic dma_addr_t arm_dma_map_page(struct device *dev, struct page *page, 1268c2ecf20Sopenharmony_ci unsigned long offset, size_t size, enum dma_data_direction dir, 1278c2ecf20Sopenharmony_ci unsigned long attrs) 1288c2ecf20Sopenharmony_ci{ 1298c2ecf20Sopenharmony_ci if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) 1308c2ecf20Sopenharmony_ci __dma_page_cpu_to_dev(page, offset, size, dir); 1318c2ecf20Sopenharmony_ci return pfn_to_dma(dev, page_to_pfn(page)) + offset; 1328c2ecf20Sopenharmony_ci} 1338c2ecf20Sopenharmony_ci 1348c2ecf20Sopenharmony_cistatic dma_addr_t arm_coherent_dma_map_page(struct device *dev, struct page *page, 1358c2ecf20Sopenharmony_ci unsigned long offset, size_t size, enum dma_data_direction dir, 1368c2ecf20Sopenharmony_ci unsigned long attrs) 1378c2ecf20Sopenharmony_ci{ 1388c2ecf20Sopenharmony_ci return pfn_to_dma(dev, page_to_pfn(page)) + offset; 1398c2ecf20Sopenharmony_ci} 1408c2ecf20Sopenharmony_ci 1418c2ecf20Sopenharmony_ci/** 1428c2ecf20Sopenharmony_ci * arm_dma_unmap_page - unmap a buffer previously mapped through dma_map_page() 1438c2ecf20Sopenharmony_ci * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 1448c2ecf20Sopenharmony_ci * @handle: DMA address of buffer 1458c2ecf20Sopenharmony_ci * @size: size of buffer (same as passed to dma_map_page) 1468c2ecf20Sopenharmony_ci * @dir: DMA transfer direction (same as passed to dma_map_page) 1478c2ecf20Sopenharmony_ci * 1488c2ecf20Sopenharmony_ci * Unmap a page streaming mode DMA translation. The handle and size 1498c2ecf20Sopenharmony_ci * must match what was provided in the previous dma_map_page() call. 1508c2ecf20Sopenharmony_ci * All other usages are undefined. 1518c2ecf20Sopenharmony_ci * 1528c2ecf20Sopenharmony_ci * After this call, reads by the CPU to the buffer are guaranteed to see 1538c2ecf20Sopenharmony_ci * whatever the device wrote there. 1548c2ecf20Sopenharmony_ci */ 1558c2ecf20Sopenharmony_cistatic void arm_dma_unmap_page(struct device *dev, dma_addr_t handle, 1568c2ecf20Sopenharmony_ci size_t size, enum dma_data_direction dir, unsigned long attrs) 1578c2ecf20Sopenharmony_ci{ 1588c2ecf20Sopenharmony_ci if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) 1598c2ecf20Sopenharmony_ci __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)), 1608c2ecf20Sopenharmony_ci handle & ~PAGE_MASK, size, dir); 1618c2ecf20Sopenharmony_ci} 1628c2ecf20Sopenharmony_ci 1638c2ecf20Sopenharmony_cistatic void arm_dma_sync_single_for_cpu(struct device *dev, 1648c2ecf20Sopenharmony_ci dma_addr_t handle, size_t size, enum dma_data_direction dir) 1658c2ecf20Sopenharmony_ci{ 1668c2ecf20Sopenharmony_ci unsigned int offset = handle & (PAGE_SIZE - 1); 1678c2ecf20Sopenharmony_ci struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); 1688c2ecf20Sopenharmony_ci __dma_page_dev_to_cpu(page, offset, size, dir); 1698c2ecf20Sopenharmony_ci} 1708c2ecf20Sopenharmony_ci 1718c2ecf20Sopenharmony_cistatic void arm_dma_sync_single_for_device(struct device *dev, 1728c2ecf20Sopenharmony_ci dma_addr_t handle, size_t size, enum dma_data_direction dir) 1738c2ecf20Sopenharmony_ci{ 1748c2ecf20Sopenharmony_ci unsigned int offset = handle & (PAGE_SIZE - 1); 1758c2ecf20Sopenharmony_ci struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); 1768c2ecf20Sopenharmony_ci __dma_page_cpu_to_dev(page, offset, size, dir); 1778c2ecf20Sopenharmony_ci} 1788c2ecf20Sopenharmony_ci 1798c2ecf20Sopenharmony_ci/* 1808c2ecf20Sopenharmony_ci * Return whether the given device DMA address mask can be supported 1818c2ecf20Sopenharmony_ci * properly. For example, if your device can only drive the low 24-bits 1828c2ecf20Sopenharmony_ci * during bus mastering, then you would pass 0x00ffffff as the mask 1838c2ecf20Sopenharmony_ci * to this function. 1848c2ecf20Sopenharmony_ci */ 1858c2ecf20Sopenharmony_cistatic int arm_dma_supported(struct device *dev, u64 mask) 1868c2ecf20Sopenharmony_ci{ 1878c2ecf20Sopenharmony_ci unsigned long max_dma_pfn = min(max_pfn - 1, arm_dma_pfn_limit); 1888c2ecf20Sopenharmony_ci 1898c2ecf20Sopenharmony_ci /* 1908c2ecf20Sopenharmony_ci * Translate the device's DMA mask to a PFN limit. This 1918c2ecf20Sopenharmony_ci * PFN number includes the page which we can DMA to. 1928c2ecf20Sopenharmony_ci */ 1938c2ecf20Sopenharmony_ci return dma_to_pfn(dev, mask) >= max_dma_pfn; 1948c2ecf20Sopenharmony_ci} 1958c2ecf20Sopenharmony_ci 1968c2ecf20Sopenharmony_ciconst struct dma_map_ops arm_dma_ops = { 1978c2ecf20Sopenharmony_ci .alloc = arm_dma_alloc, 1988c2ecf20Sopenharmony_ci .free = arm_dma_free, 1998c2ecf20Sopenharmony_ci .alloc_pages = dma_direct_alloc_pages, 2008c2ecf20Sopenharmony_ci .free_pages = dma_direct_free_pages, 2018c2ecf20Sopenharmony_ci .mmap = arm_dma_mmap, 2028c2ecf20Sopenharmony_ci .get_sgtable = arm_dma_get_sgtable, 2038c2ecf20Sopenharmony_ci .map_page = arm_dma_map_page, 2048c2ecf20Sopenharmony_ci .unmap_page = arm_dma_unmap_page, 2058c2ecf20Sopenharmony_ci .map_sg = arm_dma_map_sg, 2068c2ecf20Sopenharmony_ci .unmap_sg = arm_dma_unmap_sg, 2078c2ecf20Sopenharmony_ci .map_resource = dma_direct_map_resource, 2088c2ecf20Sopenharmony_ci .sync_single_for_cpu = arm_dma_sync_single_for_cpu, 2098c2ecf20Sopenharmony_ci .sync_single_for_device = arm_dma_sync_single_for_device, 2108c2ecf20Sopenharmony_ci .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu, 2118c2ecf20Sopenharmony_ci .sync_sg_for_device = arm_dma_sync_sg_for_device, 2128c2ecf20Sopenharmony_ci .dma_supported = arm_dma_supported, 2138c2ecf20Sopenharmony_ci .get_required_mask = dma_direct_get_required_mask, 2148c2ecf20Sopenharmony_ci}; 2158c2ecf20Sopenharmony_ciEXPORT_SYMBOL(arm_dma_ops); 2168c2ecf20Sopenharmony_ci 2178c2ecf20Sopenharmony_cistatic void *arm_coherent_dma_alloc(struct device *dev, size_t size, 2188c2ecf20Sopenharmony_ci dma_addr_t *handle, gfp_t gfp, unsigned long attrs); 2198c2ecf20Sopenharmony_cistatic void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr, 2208c2ecf20Sopenharmony_ci dma_addr_t handle, unsigned long attrs); 2218c2ecf20Sopenharmony_cistatic int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma, 2228c2ecf20Sopenharmony_ci void *cpu_addr, dma_addr_t dma_addr, size_t size, 2238c2ecf20Sopenharmony_ci unsigned long attrs); 2248c2ecf20Sopenharmony_ci 2258c2ecf20Sopenharmony_ciconst struct dma_map_ops arm_coherent_dma_ops = { 2268c2ecf20Sopenharmony_ci .alloc = arm_coherent_dma_alloc, 2278c2ecf20Sopenharmony_ci .free = arm_coherent_dma_free, 2288c2ecf20Sopenharmony_ci .alloc_pages = dma_direct_alloc_pages, 2298c2ecf20Sopenharmony_ci .free_pages = dma_direct_free_pages, 2308c2ecf20Sopenharmony_ci .mmap = arm_coherent_dma_mmap, 2318c2ecf20Sopenharmony_ci .get_sgtable = arm_dma_get_sgtable, 2328c2ecf20Sopenharmony_ci .map_page = arm_coherent_dma_map_page, 2338c2ecf20Sopenharmony_ci .map_sg = arm_dma_map_sg, 2348c2ecf20Sopenharmony_ci .map_resource = dma_direct_map_resource, 2358c2ecf20Sopenharmony_ci .dma_supported = arm_dma_supported, 2368c2ecf20Sopenharmony_ci .get_required_mask = dma_direct_get_required_mask, 2378c2ecf20Sopenharmony_ci}; 2388c2ecf20Sopenharmony_ciEXPORT_SYMBOL(arm_coherent_dma_ops); 2398c2ecf20Sopenharmony_ci 2408c2ecf20Sopenharmony_cistatic void __dma_clear_buffer(struct page *page, size_t size, int coherent_flag) 2418c2ecf20Sopenharmony_ci{ 2428c2ecf20Sopenharmony_ci /* 2438c2ecf20Sopenharmony_ci * Ensure that the allocated pages are zeroed, and that any data 2448c2ecf20Sopenharmony_ci * lurking in the kernel direct-mapped region is invalidated. 2458c2ecf20Sopenharmony_ci */ 2468c2ecf20Sopenharmony_ci if (PageHighMem(page)) { 2478c2ecf20Sopenharmony_ci phys_addr_t base = __pfn_to_phys(page_to_pfn(page)); 2488c2ecf20Sopenharmony_ci phys_addr_t end = base + size; 2498c2ecf20Sopenharmony_ci while (size > 0) { 2508c2ecf20Sopenharmony_ci void *ptr = kmap_atomic(page); 2518c2ecf20Sopenharmony_ci memset(ptr, 0, PAGE_SIZE); 2528c2ecf20Sopenharmony_ci if (coherent_flag != COHERENT) 2538c2ecf20Sopenharmony_ci dmac_flush_range(ptr, ptr + PAGE_SIZE); 2548c2ecf20Sopenharmony_ci kunmap_atomic(ptr); 2558c2ecf20Sopenharmony_ci page++; 2568c2ecf20Sopenharmony_ci size -= PAGE_SIZE; 2578c2ecf20Sopenharmony_ci } 2588c2ecf20Sopenharmony_ci if (coherent_flag != COHERENT) 2598c2ecf20Sopenharmony_ci outer_flush_range(base, end); 2608c2ecf20Sopenharmony_ci } else { 2618c2ecf20Sopenharmony_ci void *ptr = page_address(page); 2628c2ecf20Sopenharmony_ci memset(ptr, 0, size); 2638c2ecf20Sopenharmony_ci if (coherent_flag != COHERENT) { 2648c2ecf20Sopenharmony_ci dmac_flush_range(ptr, ptr + size); 2658c2ecf20Sopenharmony_ci outer_flush_range(__pa(ptr), __pa(ptr) + size); 2668c2ecf20Sopenharmony_ci } 2678c2ecf20Sopenharmony_ci } 2688c2ecf20Sopenharmony_ci} 2698c2ecf20Sopenharmony_ci 2708c2ecf20Sopenharmony_ci/* 2718c2ecf20Sopenharmony_ci * Allocate a DMA buffer for 'dev' of size 'size' using the 2728c2ecf20Sopenharmony_ci * specified gfp mask. Note that 'size' must be page aligned. 2738c2ecf20Sopenharmony_ci */ 2748c2ecf20Sopenharmony_cistatic struct page *__dma_alloc_buffer(struct device *dev, size_t size, 2758c2ecf20Sopenharmony_ci gfp_t gfp, int coherent_flag) 2768c2ecf20Sopenharmony_ci{ 2778c2ecf20Sopenharmony_ci unsigned long order = get_order(size); 2788c2ecf20Sopenharmony_ci struct page *page, *p, *e; 2798c2ecf20Sopenharmony_ci 2808c2ecf20Sopenharmony_ci page = alloc_pages(gfp, order); 2818c2ecf20Sopenharmony_ci if (!page) 2828c2ecf20Sopenharmony_ci return NULL; 2838c2ecf20Sopenharmony_ci 2848c2ecf20Sopenharmony_ci /* 2858c2ecf20Sopenharmony_ci * Now split the huge page and free the excess pages 2868c2ecf20Sopenharmony_ci */ 2878c2ecf20Sopenharmony_ci split_page(page, order); 2888c2ecf20Sopenharmony_ci for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++) 2898c2ecf20Sopenharmony_ci __free_page(p); 2908c2ecf20Sopenharmony_ci 2918c2ecf20Sopenharmony_ci __dma_clear_buffer(page, size, coherent_flag); 2928c2ecf20Sopenharmony_ci 2938c2ecf20Sopenharmony_ci return page; 2948c2ecf20Sopenharmony_ci} 2958c2ecf20Sopenharmony_ci 2968c2ecf20Sopenharmony_ci/* 2978c2ecf20Sopenharmony_ci * Free a DMA buffer. 'size' must be page aligned. 2988c2ecf20Sopenharmony_ci */ 2998c2ecf20Sopenharmony_cistatic void __dma_free_buffer(struct page *page, size_t size) 3008c2ecf20Sopenharmony_ci{ 3018c2ecf20Sopenharmony_ci struct page *e = page + (size >> PAGE_SHIFT); 3028c2ecf20Sopenharmony_ci 3038c2ecf20Sopenharmony_ci while (page < e) { 3048c2ecf20Sopenharmony_ci __free_page(page); 3058c2ecf20Sopenharmony_ci page++; 3068c2ecf20Sopenharmony_ci } 3078c2ecf20Sopenharmony_ci} 3088c2ecf20Sopenharmony_ci 3098c2ecf20Sopenharmony_cistatic void *__alloc_from_contiguous(struct device *dev, size_t size, 3108c2ecf20Sopenharmony_ci pgprot_t prot, struct page **ret_page, 3118c2ecf20Sopenharmony_ci const void *caller, bool want_vaddr, 3128c2ecf20Sopenharmony_ci int coherent_flag, gfp_t gfp); 3138c2ecf20Sopenharmony_ci 3148c2ecf20Sopenharmony_cistatic void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, 3158c2ecf20Sopenharmony_ci pgprot_t prot, struct page **ret_page, 3168c2ecf20Sopenharmony_ci const void *caller, bool want_vaddr); 3178c2ecf20Sopenharmony_ci 3188c2ecf20Sopenharmony_ci#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K 3198c2ecf20Sopenharmony_cistatic struct gen_pool *atomic_pool __ro_after_init; 3208c2ecf20Sopenharmony_ci 3218c2ecf20Sopenharmony_cistatic size_t atomic_pool_size __initdata = DEFAULT_DMA_COHERENT_POOL_SIZE; 3228c2ecf20Sopenharmony_ci 3238c2ecf20Sopenharmony_cistatic int __init early_coherent_pool(char *p) 3248c2ecf20Sopenharmony_ci{ 3258c2ecf20Sopenharmony_ci atomic_pool_size = memparse(p, &p); 3268c2ecf20Sopenharmony_ci return 0; 3278c2ecf20Sopenharmony_ci} 3288c2ecf20Sopenharmony_ciearly_param("coherent_pool", early_coherent_pool); 3298c2ecf20Sopenharmony_ci 3308c2ecf20Sopenharmony_ci/* 3318c2ecf20Sopenharmony_ci * Initialise the coherent pool for atomic allocations. 3328c2ecf20Sopenharmony_ci */ 3338c2ecf20Sopenharmony_cistatic int __init atomic_pool_init(void) 3348c2ecf20Sopenharmony_ci{ 3358c2ecf20Sopenharmony_ci pgprot_t prot = pgprot_dmacoherent(PAGE_KERNEL); 3368c2ecf20Sopenharmony_ci gfp_t gfp = GFP_KERNEL | GFP_DMA; 3378c2ecf20Sopenharmony_ci struct page *page; 3388c2ecf20Sopenharmony_ci void *ptr; 3398c2ecf20Sopenharmony_ci 3408c2ecf20Sopenharmony_ci atomic_pool = gen_pool_create(PAGE_SHIFT, -1); 3418c2ecf20Sopenharmony_ci if (!atomic_pool) 3428c2ecf20Sopenharmony_ci goto out; 3438c2ecf20Sopenharmony_ci /* 3448c2ecf20Sopenharmony_ci * The atomic pool is only used for non-coherent allocations 3458c2ecf20Sopenharmony_ci * so we must pass NORMAL for coherent_flag. 3468c2ecf20Sopenharmony_ci */ 3478c2ecf20Sopenharmony_ci if (dev_get_cma_area(NULL)) 3488c2ecf20Sopenharmony_ci ptr = __alloc_from_contiguous(NULL, atomic_pool_size, prot, 3498c2ecf20Sopenharmony_ci &page, atomic_pool_init, true, NORMAL, 3508c2ecf20Sopenharmony_ci GFP_KERNEL); 3518c2ecf20Sopenharmony_ci else 3528c2ecf20Sopenharmony_ci ptr = __alloc_remap_buffer(NULL, atomic_pool_size, gfp, prot, 3538c2ecf20Sopenharmony_ci &page, atomic_pool_init, true); 3548c2ecf20Sopenharmony_ci if (ptr) { 3558c2ecf20Sopenharmony_ci int ret; 3568c2ecf20Sopenharmony_ci 3578c2ecf20Sopenharmony_ci ret = gen_pool_add_virt(atomic_pool, (unsigned long)ptr, 3588c2ecf20Sopenharmony_ci page_to_phys(page), 3598c2ecf20Sopenharmony_ci atomic_pool_size, -1); 3608c2ecf20Sopenharmony_ci if (ret) 3618c2ecf20Sopenharmony_ci goto destroy_genpool; 3628c2ecf20Sopenharmony_ci 3638c2ecf20Sopenharmony_ci gen_pool_set_algo(atomic_pool, 3648c2ecf20Sopenharmony_ci gen_pool_first_fit_order_align, 3658c2ecf20Sopenharmony_ci NULL); 3668c2ecf20Sopenharmony_ci pr_info("DMA: preallocated %zu KiB pool for atomic coherent allocations\n", 3678c2ecf20Sopenharmony_ci atomic_pool_size / 1024); 3688c2ecf20Sopenharmony_ci return 0; 3698c2ecf20Sopenharmony_ci } 3708c2ecf20Sopenharmony_ci 3718c2ecf20Sopenharmony_cidestroy_genpool: 3728c2ecf20Sopenharmony_ci gen_pool_destroy(atomic_pool); 3738c2ecf20Sopenharmony_ci atomic_pool = NULL; 3748c2ecf20Sopenharmony_ciout: 3758c2ecf20Sopenharmony_ci pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n", 3768c2ecf20Sopenharmony_ci atomic_pool_size / 1024); 3778c2ecf20Sopenharmony_ci return -ENOMEM; 3788c2ecf20Sopenharmony_ci} 3798c2ecf20Sopenharmony_ci/* 3808c2ecf20Sopenharmony_ci * CMA is activated by core_initcall, so we must be called after it. 3818c2ecf20Sopenharmony_ci */ 3828c2ecf20Sopenharmony_cipostcore_initcall(atomic_pool_init); 3838c2ecf20Sopenharmony_ci 3848c2ecf20Sopenharmony_cistruct dma_contig_early_reserve { 3858c2ecf20Sopenharmony_ci phys_addr_t base; 3868c2ecf20Sopenharmony_ci unsigned long size; 3878c2ecf20Sopenharmony_ci}; 3888c2ecf20Sopenharmony_ci 3898c2ecf20Sopenharmony_cistatic struct dma_contig_early_reserve dma_mmu_remap[MAX_CMA_AREAS] __initdata; 3908c2ecf20Sopenharmony_ci 3918c2ecf20Sopenharmony_cistatic int dma_mmu_remap_num __initdata; 3928c2ecf20Sopenharmony_ci 3938c2ecf20Sopenharmony_civoid __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) 3948c2ecf20Sopenharmony_ci{ 3958c2ecf20Sopenharmony_ci dma_mmu_remap[dma_mmu_remap_num].base = base; 3968c2ecf20Sopenharmony_ci dma_mmu_remap[dma_mmu_remap_num].size = size; 3978c2ecf20Sopenharmony_ci dma_mmu_remap_num++; 3988c2ecf20Sopenharmony_ci} 3998c2ecf20Sopenharmony_ci 4008c2ecf20Sopenharmony_civoid __init dma_contiguous_remap(void) 4018c2ecf20Sopenharmony_ci{ 4028c2ecf20Sopenharmony_ci int i; 4038c2ecf20Sopenharmony_ci for (i = 0; i < dma_mmu_remap_num; i++) { 4048c2ecf20Sopenharmony_ci phys_addr_t start = dma_mmu_remap[i].base; 4058c2ecf20Sopenharmony_ci phys_addr_t end = start + dma_mmu_remap[i].size; 4068c2ecf20Sopenharmony_ci struct map_desc map; 4078c2ecf20Sopenharmony_ci unsigned long addr; 4088c2ecf20Sopenharmony_ci 4098c2ecf20Sopenharmony_ci if (end > arm_lowmem_limit) 4108c2ecf20Sopenharmony_ci end = arm_lowmem_limit; 4118c2ecf20Sopenharmony_ci if (start >= end) 4128c2ecf20Sopenharmony_ci continue; 4138c2ecf20Sopenharmony_ci 4148c2ecf20Sopenharmony_ci map.pfn = __phys_to_pfn(start); 4158c2ecf20Sopenharmony_ci map.virtual = __phys_to_virt(start); 4168c2ecf20Sopenharmony_ci map.length = end - start; 4178c2ecf20Sopenharmony_ci map.type = MT_MEMORY_DMA_READY; 4188c2ecf20Sopenharmony_ci 4198c2ecf20Sopenharmony_ci /* 4208c2ecf20Sopenharmony_ci * Clear previous low-memory mapping to ensure that the 4218c2ecf20Sopenharmony_ci * TLB does not see any conflicting entries, then flush 4228c2ecf20Sopenharmony_ci * the TLB of the old entries before creating new mappings. 4238c2ecf20Sopenharmony_ci * 4248c2ecf20Sopenharmony_ci * This ensures that any speculatively loaded TLB entries 4258c2ecf20Sopenharmony_ci * (even though they may be rare) can not cause any problems, 4268c2ecf20Sopenharmony_ci * and ensures that this code is architecturally compliant. 4278c2ecf20Sopenharmony_ci */ 4288c2ecf20Sopenharmony_ci for (addr = __phys_to_virt(start); addr < __phys_to_virt(end); 4298c2ecf20Sopenharmony_ci addr += PMD_SIZE) 4308c2ecf20Sopenharmony_ci pmd_clear(pmd_off_k(addr)); 4318c2ecf20Sopenharmony_ci 4328c2ecf20Sopenharmony_ci flush_tlb_kernel_range(__phys_to_virt(start), 4338c2ecf20Sopenharmony_ci __phys_to_virt(end)); 4348c2ecf20Sopenharmony_ci 4358c2ecf20Sopenharmony_ci iotable_init(&map, 1); 4368c2ecf20Sopenharmony_ci } 4378c2ecf20Sopenharmony_ci} 4388c2ecf20Sopenharmony_ci 4398c2ecf20Sopenharmony_cistatic int __dma_update_pte(pte_t *pte, unsigned long addr, void *data) 4408c2ecf20Sopenharmony_ci{ 4418c2ecf20Sopenharmony_ci struct page *page = virt_to_page(addr); 4428c2ecf20Sopenharmony_ci pgprot_t prot = *(pgprot_t *)data; 4438c2ecf20Sopenharmony_ci 4448c2ecf20Sopenharmony_ci set_pte_ext(pte, mk_pte(page, prot), 0); 4458c2ecf20Sopenharmony_ci return 0; 4468c2ecf20Sopenharmony_ci} 4478c2ecf20Sopenharmony_ci 4488c2ecf20Sopenharmony_cistatic void __dma_remap(struct page *page, size_t size, pgprot_t prot) 4498c2ecf20Sopenharmony_ci{ 4508c2ecf20Sopenharmony_ci unsigned long start = (unsigned long) page_address(page); 4518c2ecf20Sopenharmony_ci unsigned end = start + size; 4528c2ecf20Sopenharmony_ci 4538c2ecf20Sopenharmony_ci apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot); 4548c2ecf20Sopenharmony_ci flush_tlb_kernel_range(start, end); 4558c2ecf20Sopenharmony_ci} 4568c2ecf20Sopenharmony_ci 4578c2ecf20Sopenharmony_cistatic void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, 4588c2ecf20Sopenharmony_ci pgprot_t prot, struct page **ret_page, 4598c2ecf20Sopenharmony_ci const void *caller, bool want_vaddr) 4608c2ecf20Sopenharmony_ci{ 4618c2ecf20Sopenharmony_ci struct page *page; 4628c2ecf20Sopenharmony_ci void *ptr = NULL; 4638c2ecf20Sopenharmony_ci /* 4648c2ecf20Sopenharmony_ci * __alloc_remap_buffer is only called when the device is 4658c2ecf20Sopenharmony_ci * non-coherent 4668c2ecf20Sopenharmony_ci */ 4678c2ecf20Sopenharmony_ci page = __dma_alloc_buffer(dev, size, gfp, NORMAL); 4688c2ecf20Sopenharmony_ci if (!page) 4698c2ecf20Sopenharmony_ci return NULL; 4708c2ecf20Sopenharmony_ci if (!want_vaddr) 4718c2ecf20Sopenharmony_ci goto out; 4728c2ecf20Sopenharmony_ci 4738c2ecf20Sopenharmony_ci ptr = dma_common_contiguous_remap(page, size, prot, caller); 4748c2ecf20Sopenharmony_ci if (!ptr) { 4758c2ecf20Sopenharmony_ci __dma_free_buffer(page, size); 4768c2ecf20Sopenharmony_ci return NULL; 4778c2ecf20Sopenharmony_ci } 4788c2ecf20Sopenharmony_ci 4798c2ecf20Sopenharmony_ci out: 4808c2ecf20Sopenharmony_ci *ret_page = page; 4818c2ecf20Sopenharmony_ci return ptr; 4828c2ecf20Sopenharmony_ci} 4838c2ecf20Sopenharmony_ci 4848c2ecf20Sopenharmony_cistatic void *__alloc_from_pool(size_t size, struct page **ret_page) 4858c2ecf20Sopenharmony_ci{ 4868c2ecf20Sopenharmony_ci unsigned long val; 4878c2ecf20Sopenharmony_ci void *ptr = NULL; 4888c2ecf20Sopenharmony_ci 4898c2ecf20Sopenharmony_ci if (!atomic_pool) { 4908c2ecf20Sopenharmony_ci WARN(1, "coherent pool not initialised!\n"); 4918c2ecf20Sopenharmony_ci return NULL; 4928c2ecf20Sopenharmony_ci } 4938c2ecf20Sopenharmony_ci 4948c2ecf20Sopenharmony_ci val = gen_pool_alloc(atomic_pool, size); 4958c2ecf20Sopenharmony_ci if (val) { 4968c2ecf20Sopenharmony_ci phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val); 4978c2ecf20Sopenharmony_ci 4988c2ecf20Sopenharmony_ci *ret_page = phys_to_page(phys); 4998c2ecf20Sopenharmony_ci ptr = (void *)val; 5008c2ecf20Sopenharmony_ci } 5018c2ecf20Sopenharmony_ci 5028c2ecf20Sopenharmony_ci return ptr; 5038c2ecf20Sopenharmony_ci} 5048c2ecf20Sopenharmony_ci 5058c2ecf20Sopenharmony_cistatic bool __in_atomic_pool(void *start, size_t size) 5068c2ecf20Sopenharmony_ci{ 5078c2ecf20Sopenharmony_ci return gen_pool_has_addr(atomic_pool, (unsigned long)start, size); 5088c2ecf20Sopenharmony_ci} 5098c2ecf20Sopenharmony_ci 5108c2ecf20Sopenharmony_cistatic int __free_from_pool(void *start, size_t size) 5118c2ecf20Sopenharmony_ci{ 5128c2ecf20Sopenharmony_ci if (!__in_atomic_pool(start, size)) 5138c2ecf20Sopenharmony_ci return 0; 5148c2ecf20Sopenharmony_ci 5158c2ecf20Sopenharmony_ci gen_pool_free(atomic_pool, (unsigned long)start, size); 5168c2ecf20Sopenharmony_ci 5178c2ecf20Sopenharmony_ci return 1; 5188c2ecf20Sopenharmony_ci} 5198c2ecf20Sopenharmony_ci 5208c2ecf20Sopenharmony_cistatic void *__alloc_from_contiguous(struct device *dev, size_t size, 5218c2ecf20Sopenharmony_ci pgprot_t prot, struct page **ret_page, 5228c2ecf20Sopenharmony_ci const void *caller, bool want_vaddr, 5238c2ecf20Sopenharmony_ci int coherent_flag, gfp_t gfp) 5248c2ecf20Sopenharmony_ci{ 5258c2ecf20Sopenharmony_ci unsigned long order = get_order(size); 5268c2ecf20Sopenharmony_ci size_t count = size >> PAGE_SHIFT; 5278c2ecf20Sopenharmony_ci struct page *page; 5288c2ecf20Sopenharmony_ci void *ptr = NULL; 5298c2ecf20Sopenharmony_ci 5308c2ecf20Sopenharmony_ci page = dma_alloc_from_contiguous(dev, count, order, gfp & __GFP_NOWARN); 5318c2ecf20Sopenharmony_ci if (!page) 5328c2ecf20Sopenharmony_ci return NULL; 5338c2ecf20Sopenharmony_ci 5348c2ecf20Sopenharmony_ci __dma_clear_buffer(page, size, coherent_flag); 5358c2ecf20Sopenharmony_ci 5368c2ecf20Sopenharmony_ci if (!want_vaddr) 5378c2ecf20Sopenharmony_ci goto out; 5388c2ecf20Sopenharmony_ci 5398c2ecf20Sopenharmony_ci if (PageHighMem(page)) { 5408c2ecf20Sopenharmony_ci ptr = dma_common_contiguous_remap(page, size, prot, caller); 5418c2ecf20Sopenharmony_ci if (!ptr) { 5428c2ecf20Sopenharmony_ci dma_release_from_contiguous(dev, page, count); 5438c2ecf20Sopenharmony_ci return NULL; 5448c2ecf20Sopenharmony_ci } 5458c2ecf20Sopenharmony_ci } else { 5468c2ecf20Sopenharmony_ci __dma_remap(page, size, prot); 5478c2ecf20Sopenharmony_ci ptr = page_address(page); 5488c2ecf20Sopenharmony_ci } 5498c2ecf20Sopenharmony_ci 5508c2ecf20Sopenharmony_ci out: 5518c2ecf20Sopenharmony_ci *ret_page = page; 5528c2ecf20Sopenharmony_ci return ptr; 5538c2ecf20Sopenharmony_ci} 5548c2ecf20Sopenharmony_ci 5558c2ecf20Sopenharmony_cistatic void __free_from_contiguous(struct device *dev, struct page *page, 5568c2ecf20Sopenharmony_ci void *cpu_addr, size_t size, bool want_vaddr) 5578c2ecf20Sopenharmony_ci{ 5588c2ecf20Sopenharmony_ci if (want_vaddr) { 5598c2ecf20Sopenharmony_ci if (PageHighMem(page)) 5608c2ecf20Sopenharmony_ci dma_common_free_remap(cpu_addr, size); 5618c2ecf20Sopenharmony_ci else 5628c2ecf20Sopenharmony_ci __dma_remap(page, size, PAGE_KERNEL); 5638c2ecf20Sopenharmony_ci } 5648c2ecf20Sopenharmony_ci dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT); 5658c2ecf20Sopenharmony_ci} 5668c2ecf20Sopenharmony_ci 5678c2ecf20Sopenharmony_cistatic inline pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot) 5688c2ecf20Sopenharmony_ci{ 5698c2ecf20Sopenharmony_ci prot = (attrs & DMA_ATTR_WRITE_COMBINE) ? 5708c2ecf20Sopenharmony_ci pgprot_writecombine(prot) : 5718c2ecf20Sopenharmony_ci pgprot_dmacoherent(prot); 5728c2ecf20Sopenharmony_ci return prot; 5738c2ecf20Sopenharmony_ci} 5748c2ecf20Sopenharmony_ci 5758c2ecf20Sopenharmony_cistatic void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp, 5768c2ecf20Sopenharmony_ci struct page **ret_page) 5778c2ecf20Sopenharmony_ci{ 5788c2ecf20Sopenharmony_ci struct page *page; 5798c2ecf20Sopenharmony_ci /* __alloc_simple_buffer is only called when the device is coherent */ 5808c2ecf20Sopenharmony_ci page = __dma_alloc_buffer(dev, size, gfp, COHERENT); 5818c2ecf20Sopenharmony_ci if (!page) 5828c2ecf20Sopenharmony_ci return NULL; 5838c2ecf20Sopenharmony_ci 5848c2ecf20Sopenharmony_ci *ret_page = page; 5858c2ecf20Sopenharmony_ci return page_address(page); 5868c2ecf20Sopenharmony_ci} 5878c2ecf20Sopenharmony_ci 5888c2ecf20Sopenharmony_cistatic void *simple_allocator_alloc(struct arm_dma_alloc_args *args, 5898c2ecf20Sopenharmony_ci struct page **ret_page) 5908c2ecf20Sopenharmony_ci{ 5918c2ecf20Sopenharmony_ci return __alloc_simple_buffer(args->dev, args->size, args->gfp, 5928c2ecf20Sopenharmony_ci ret_page); 5938c2ecf20Sopenharmony_ci} 5948c2ecf20Sopenharmony_ci 5958c2ecf20Sopenharmony_cistatic void simple_allocator_free(struct arm_dma_free_args *args) 5968c2ecf20Sopenharmony_ci{ 5978c2ecf20Sopenharmony_ci __dma_free_buffer(args->page, args->size); 5988c2ecf20Sopenharmony_ci} 5998c2ecf20Sopenharmony_ci 6008c2ecf20Sopenharmony_cistatic struct arm_dma_allocator simple_allocator = { 6018c2ecf20Sopenharmony_ci .alloc = simple_allocator_alloc, 6028c2ecf20Sopenharmony_ci .free = simple_allocator_free, 6038c2ecf20Sopenharmony_ci}; 6048c2ecf20Sopenharmony_ci 6058c2ecf20Sopenharmony_cistatic void *cma_allocator_alloc(struct arm_dma_alloc_args *args, 6068c2ecf20Sopenharmony_ci struct page **ret_page) 6078c2ecf20Sopenharmony_ci{ 6088c2ecf20Sopenharmony_ci return __alloc_from_contiguous(args->dev, args->size, args->prot, 6098c2ecf20Sopenharmony_ci ret_page, args->caller, 6108c2ecf20Sopenharmony_ci args->want_vaddr, args->coherent_flag, 6118c2ecf20Sopenharmony_ci args->gfp); 6128c2ecf20Sopenharmony_ci} 6138c2ecf20Sopenharmony_ci 6148c2ecf20Sopenharmony_cistatic void cma_allocator_free(struct arm_dma_free_args *args) 6158c2ecf20Sopenharmony_ci{ 6168c2ecf20Sopenharmony_ci __free_from_contiguous(args->dev, args->page, args->cpu_addr, 6178c2ecf20Sopenharmony_ci args->size, args->want_vaddr); 6188c2ecf20Sopenharmony_ci} 6198c2ecf20Sopenharmony_ci 6208c2ecf20Sopenharmony_cistatic struct arm_dma_allocator cma_allocator = { 6218c2ecf20Sopenharmony_ci .alloc = cma_allocator_alloc, 6228c2ecf20Sopenharmony_ci .free = cma_allocator_free, 6238c2ecf20Sopenharmony_ci}; 6248c2ecf20Sopenharmony_ci 6258c2ecf20Sopenharmony_cistatic void *pool_allocator_alloc(struct arm_dma_alloc_args *args, 6268c2ecf20Sopenharmony_ci struct page **ret_page) 6278c2ecf20Sopenharmony_ci{ 6288c2ecf20Sopenharmony_ci return __alloc_from_pool(args->size, ret_page); 6298c2ecf20Sopenharmony_ci} 6308c2ecf20Sopenharmony_ci 6318c2ecf20Sopenharmony_cistatic void pool_allocator_free(struct arm_dma_free_args *args) 6328c2ecf20Sopenharmony_ci{ 6338c2ecf20Sopenharmony_ci __free_from_pool(args->cpu_addr, args->size); 6348c2ecf20Sopenharmony_ci} 6358c2ecf20Sopenharmony_ci 6368c2ecf20Sopenharmony_cistatic struct arm_dma_allocator pool_allocator = { 6378c2ecf20Sopenharmony_ci .alloc = pool_allocator_alloc, 6388c2ecf20Sopenharmony_ci .free = pool_allocator_free, 6398c2ecf20Sopenharmony_ci}; 6408c2ecf20Sopenharmony_ci 6418c2ecf20Sopenharmony_cistatic void *remap_allocator_alloc(struct arm_dma_alloc_args *args, 6428c2ecf20Sopenharmony_ci struct page **ret_page) 6438c2ecf20Sopenharmony_ci{ 6448c2ecf20Sopenharmony_ci return __alloc_remap_buffer(args->dev, args->size, args->gfp, 6458c2ecf20Sopenharmony_ci args->prot, ret_page, args->caller, 6468c2ecf20Sopenharmony_ci args->want_vaddr); 6478c2ecf20Sopenharmony_ci} 6488c2ecf20Sopenharmony_ci 6498c2ecf20Sopenharmony_cistatic void remap_allocator_free(struct arm_dma_free_args *args) 6508c2ecf20Sopenharmony_ci{ 6518c2ecf20Sopenharmony_ci if (args->want_vaddr) 6528c2ecf20Sopenharmony_ci dma_common_free_remap(args->cpu_addr, args->size); 6538c2ecf20Sopenharmony_ci 6548c2ecf20Sopenharmony_ci __dma_free_buffer(args->page, args->size); 6558c2ecf20Sopenharmony_ci} 6568c2ecf20Sopenharmony_ci 6578c2ecf20Sopenharmony_cistatic struct arm_dma_allocator remap_allocator = { 6588c2ecf20Sopenharmony_ci .alloc = remap_allocator_alloc, 6598c2ecf20Sopenharmony_ci .free = remap_allocator_free, 6608c2ecf20Sopenharmony_ci}; 6618c2ecf20Sopenharmony_ci 6628c2ecf20Sopenharmony_cistatic void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, 6638c2ecf20Sopenharmony_ci gfp_t gfp, pgprot_t prot, bool is_coherent, 6648c2ecf20Sopenharmony_ci unsigned long attrs, const void *caller) 6658c2ecf20Sopenharmony_ci{ 6668c2ecf20Sopenharmony_ci u64 mask = min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit); 6678c2ecf20Sopenharmony_ci struct page *page = NULL; 6688c2ecf20Sopenharmony_ci void *addr; 6698c2ecf20Sopenharmony_ci bool allowblock, cma; 6708c2ecf20Sopenharmony_ci struct arm_dma_buffer *buf; 6718c2ecf20Sopenharmony_ci struct arm_dma_alloc_args args = { 6728c2ecf20Sopenharmony_ci .dev = dev, 6738c2ecf20Sopenharmony_ci .size = PAGE_ALIGN(size), 6748c2ecf20Sopenharmony_ci .gfp = gfp, 6758c2ecf20Sopenharmony_ci .prot = prot, 6768c2ecf20Sopenharmony_ci .caller = caller, 6778c2ecf20Sopenharmony_ci .want_vaddr = ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0), 6788c2ecf20Sopenharmony_ci .coherent_flag = is_coherent ? COHERENT : NORMAL, 6798c2ecf20Sopenharmony_ci }; 6808c2ecf20Sopenharmony_ci 6818c2ecf20Sopenharmony_ci#ifdef CONFIG_DMA_API_DEBUG 6828c2ecf20Sopenharmony_ci u64 limit = (mask + 1) & ~mask; 6838c2ecf20Sopenharmony_ci if (limit && size >= limit) { 6848c2ecf20Sopenharmony_ci dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n", 6858c2ecf20Sopenharmony_ci size, mask); 6868c2ecf20Sopenharmony_ci return NULL; 6878c2ecf20Sopenharmony_ci } 6888c2ecf20Sopenharmony_ci#endif 6898c2ecf20Sopenharmony_ci 6908c2ecf20Sopenharmony_ci buf = kzalloc(sizeof(*buf), 6918c2ecf20Sopenharmony_ci gfp & ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM)); 6928c2ecf20Sopenharmony_ci if (!buf) 6938c2ecf20Sopenharmony_ci return NULL; 6948c2ecf20Sopenharmony_ci 6958c2ecf20Sopenharmony_ci if (mask < 0xffffffffULL) 6968c2ecf20Sopenharmony_ci gfp |= GFP_DMA; 6978c2ecf20Sopenharmony_ci 6988c2ecf20Sopenharmony_ci /* 6998c2ecf20Sopenharmony_ci * Following is a work-around (a.k.a. hack) to prevent pages 7008c2ecf20Sopenharmony_ci * with __GFP_COMP being passed to split_page() which cannot 7018c2ecf20Sopenharmony_ci * handle them. The real problem is that this flag probably 7028c2ecf20Sopenharmony_ci * should be 0 on ARM as it is not supported on this 7038c2ecf20Sopenharmony_ci * platform; see CONFIG_HUGETLBFS. 7048c2ecf20Sopenharmony_ci */ 7058c2ecf20Sopenharmony_ci gfp &= ~(__GFP_COMP); 7068c2ecf20Sopenharmony_ci args.gfp = gfp; 7078c2ecf20Sopenharmony_ci 7088c2ecf20Sopenharmony_ci *handle = DMA_MAPPING_ERROR; 7098c2ecf20Sopenharmony_ci allowblock = gfpflags_allow_blocking(gfp); 7108c2ecf20Sopenharmony_ci cma = allowblock ? dev_get_cma_area(dev) : false; 7118c2ecf20Sopenharmony_ci 7128c2ecf20Sopenharmony_ci if (cma) 7138c2ecf20Sopenharmony_ci buf->allocator = &cma_allocator; 7148c2ecf20Sopenharmony_ci else if (is_coherent) 7158c2ecf20Sopenharmony_ci buf->allocator = &simple_allocator; 7168c2ecf20Sopenharmony_ci else if (allowblock) 7178c2ecf20Sopenharmony_ci buf->allocator = &remap_allocator; 7188c2ecf20Sopenharmony_ci else 7198c2ecf20Sopenharmony_ci buf->allocator = &pool_allocator; 7208c2ecf20Sopenharmony_ci 7218c2ecf20Sopenharmony_ci addr = buf->allocator->alloc(&args, &page); 7228c2ecf20Sopenharmony_ci 7238c2ecf20Sopenharmony_ci if (page) { 7248c2ecf20Sopenharmony_ci unsigned long flags; 7258c2ecf20Sopenharmony_ci 7268c2ecf20Sopenharmony_ci *handle = pfn_to_dma(dev, page_to_pfn(page)); 7278c2ecf20Sopenharmony_ci buf->virt = args.want_vaddr ? addr : page; 7288c2ecf20Sopenharmony_ci 7298c2ecf20Sopenharmony_ci spin_lock_irqsave(&arm_dma_bufs_lock, flags); 7308c2ecf20Sopenharmony_ci list_add(&buf->list, &arm_dma_bufs); 7318c2ecf20Sopenharmony_ci spin_unlock_irqrestore(&arm_dma_bufs_lock, flags); 7328c2ecf20Sopenharmony_ci } else { 7338c2ecf20Sopenharmony_ci kfree(buf); 7348c2ecf20Sopenharmony_ci } 7358c2ecf20Sopenharmony_ci 7368c2ecf20Sopenharmony_ci return args.want_vaddr ? addr : page; 7378c2ecf20Sopenharmony_ci} 7388c2ecf20Sopenharmony_ci 7398c2ecf20Sopenharmony_ci/* 7408c2ecf20Sopenharmony_ci * Allocate DMA-coherent memory space and return both the kernel remapped 7418c2ecf20Sopenharmony_ci * virtual and bus address for that space. 7428c2ecf20Sopenharmony_ci */ 7438c2ecf20Sopenharmony_civoid *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, 7448c2ecf20Sopenharmony_ci gfp_t gfp, unsigned long attrs) 7458c2ecf20Sopenharmony_ci{ 7468c2ecf20Sopenharmony_ci pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL); 7478c2ecf20Sopenharmony_ci 7488c2ecf20Sopenharmony_ci return __dma_alloc(dev, size, handle, gfp, prot, false, 7498c2ecf20Sopenharmony_ci attrs, __builtin_return_address(0)); 7508c2ecf20Sopenharmony_ci} 7518c2ecf20Sopenharmony_ci 7528c2ecf20Sopenharmony_cistatic void *arm_coherent_dma_alloc(struct device *dev, size_t size, 7538c2ecf20Sopenharmony_ci dma_addr_t *handle, gfp_t gfp, unsigned long attrs) 7548c2ecf20Sopenharmony_ci{ 7558c2ecf20Sopenharmony_ci return __dma_alloc(dev, size, handle, gfp, PAGE_KERNEL, true, 7568c2ecf20Sopenharmony_ci attrs, __builtin_return_address(0)); 7578c2ecf20Sopenharmony_ci} 7588c2ecf20Sopenharmony_ci 7598c2ecf20Sopenharmony_cistatic int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, 7608c2ecf20Sopenharmony_ci void *cpu_addr, dma_addr_t dma_addr, size_t size, 7618c2ecf20Sopenharmony_ci unsigned long attrs) 7628c2ecf20Sopenharmony_ci{ 7638c2ecf20Sopenharmony_ci int ret = -ENXIO; 7648c2ecf20Sopenharmony_ci unsigned long nr_vma_pages = vma_pages(vma); 7658c2ecf20Sopenharmony_ci unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; 7668c2ecf20Sopenharmony_ci unsigned long pfn = dma_to_pfn(dev, dma_addr); 7678c2ecf20Sopenharmony_ci unsigned long off = vma->vm_pgoff; 7688c2ecf20Sopenharmony_ci 7698c2ecf20Sopenharmony_ci if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) 7708c2ecf20Sopenharmony_ci return ret; 7718c2ecf20Sopenharmony_ci 7728c2ecf20Sopenharmony_ci if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) { 7738c2ecf20Sopenharmony_ci ret = remap_pfn_range(vma, vma->vm_start, 7748c2ecf20Sopenharmony_ci pfn + off, 7758c2ecf20Sopenharmony_ci vma->vm_end - vma->vm_start, 7768c2ecf20Sopenharmony_ci vma->vm_page_prot); 7778c2ecf20Sopenharmony_ci } 7788c2ecf20Sopenharmony_ci 7798c2ecf20Sopenharmony_ci return ret; 7808c2ecf20Sopenharmony_ci} 7818c2ecf20Sopenharmony_ci 7828c2ecf20Sopenharmony_ci/* 7838c2ecf20Sopenharmony_ci * Create userspace mapping for the DMA-coherent memory. 7848c2ecf20Sopenharmony_ci */ 7858c2ecf20Sopenharmony_cistatic int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma, 7868c2ecf20Sopenharmony_ci void *cpu_addr, dma_addr_t dma_addr, size_t size, 7878c2ecf20Sopenharmony_ci unsigned long attrs) 7888c2ecf20Sopenharmony_ci{ 7898c2ecf20Sopenharmony_ci return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs); 7908c2ecf20Sopenharmony_ci} 7918c2ecf20Sopenharmony_ci 7928c2ecf20Sopenharmony_ciint arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, 7938c2ecf20Sopenharmony_ci void *cpu_addr, dma_addr_t dma_addr, size_t size, 7948c2ecf20Sopenharmony_ci unsigned long attrs) 7958c2ecf20Sopenharmony_ci{ 7968c2ecf20Sopenharmony_ci vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); 7978c2ecf20Sopenharmony_ci return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs); 7988c2ecf20Sopenharmony_ci} 7998c2ecf20Sopenharmony_ci 8008c2ecf20Sopenharmony_ci/* 8018c2ecf20Sopenharmony_ci * Free a buffer as defined by the above mapping. 8028c2ecf20Sopenharmony_ci */ 8038c2ecf20Sopenharmony_cistatic void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr, 8048c2ecf20Sopenharmony_ci dma_addr_t handle, unsigned long attrs, 8058c2ecf20Sopenharmony_ci bool is_coherent) 8068c2ecf20Sopenharmony_ci{ 8078c2ecf20Sopenharmony_ci struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); 8088c2ecf20Sopenharmony_ci struct arm_dma_buffer *buf; 8098c2ecf20Sopenharmony_ci struct arm_dma_free_args args = { 8108c2ecf20Sopenharmony_ci .dev = dev, 8118c2ecf20Sopenharmony_ci .size = PAGE_ALIGN(size), 8128c2ecf20Sopenharmony_ci .cpu_addr = cpu_addr, 8138c2ecf20Sopenharmony_ci .page = page, 8148c2ecf20Sopenharmony_ci .want_vaddr = ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0), 8158c2ecf20Sopenharmony_ci }; 8168c2ecf20Sopenharmony_ci 8178c2ecf20Sopenharmony_ci buf = arm_dma_buffer_find(cpu_addr); 8188c2ecf20Sopenharmony_ci if (WARN(!buf, "Freeing invalid buffer %p\n", cpu_addr)) 8198c2ecf20Sopenharmony_ci return; 8208c2ecf20Sopenharmony_ci 8218c2ecf20Sopenharmony_ci buf->allocator->free(&args); 8228c2ecf20Sopenharmony_ci kfree(buf); 8238c2ecf20Sopenharmony_ci} 8248c2ecf20Sopenharmony_ci 8258c2ecf20Sopenharmony_civoid arm_dma_free(struct device *dev, size_t size, void *cpu_addr, 8268c2ecf20Sopenharmony_ci dma_addr_t handle, unsigned long attrs) 8278c2ecf20Sopenharmony_ci{ 8288c2ecf20Sopenharmony_ci __arm_dma_free(dev, size, cpu_addr, handle, attrs, false); 8298c2ecf20Sopenharmony_ci} 8308c2ecf20Sopenharmony_ci 8318c2ecf20Sopenharmony_cistatic void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr, 8328c2ecf20Sopenharmony_ci dma_addr_t handle, unsigned long attrs) 8338c2ecf20Sopenharmony_ci{ 8348c2ecf20Sopenharmony_ci __arm_dma_free(dev, size, cpu_addr, handle, attrs, true); 8358c2ecf20Sopenharmony_ci} 8368c2ecf20Sopenharmony_ci 8378c2ecf20Sopenharmony_ciint arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt, 8388c2ecf20Sopenharmony_ci void *cpu_addr, dma_addr_t handle, size_t size, 8398c2ecf20Sopenharmony_ci unsigned long attrs) 8408c2ecf20Sopenharmony_ci{ 8418c2ecf20Sopenharmony_ci unsigned long pfn = dma_to_pfn(dev, handle); 8428c2ecf20Sopenharmony_ci struct page *page; 8438c2ecf20Sopenharmony_ci int ret; 8448c2ecf20Sopenharmony_ci 8458c2ecf20Sopenharmony_ci /* If the PFN is not valid, we do not have a struct page */ 8468c2ecf20Sopenharmony_ci if (!pfn_valid(pfn)) 8478c2ecf20Sopenharmony_ci return -ENXIO; 8488c2ecf20Sopenharmony_ci 8498c2ecf20Sopenharmony_ci page = pfn_to_page(pfn); 8508c2ecf20Sopenharmony_ci 8518c2ecf20Sopenharmony_ci ret = sg_alloc_table(sgt, 1, GFP_KERNEL); 8528c2ecf20Sopenharmony_ci if (unlikely(ret)) 8538c2ecf20Sopenharmony_ci return ret; 8548c2ecf20Sopenharmony_ci 8558c2ecf20Sopenharmony_ci sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); 8568c2ecf20Sopenharmony_ci return 0; 8578c2ecf20Sopenharmony_ci} 8588c2ecf20Sopenharmony_ci 8598c2ecf20Sopenharmony_cistatic void dma_cache_maint_page(struct page *page, unsigned long offset, 8608c2ecf20Sopenharmony_ci size_t size, enum dma_data_direction dir, 8618c2ecf20Sopenharmony_ci void (*op)(const void *, size_t, int)) 8628c2ecf20Sopenharmony_ci{ 8638c2ecf20Sopenharmony_ci unsigned long pfn; 8648c2ecf20Sopenharmony_ci size_t left = size; 8658c2ecf20Sopenharmony_ci 8668c2ecf20Sopenharmony_ci pfn = page_to_pfn(page) + offset / PAGE_SIZE; 8678c2ecf20Sopenharmony_ci offset %= PAGE_SIZE; 8688c2ecf20Sopenharmony_ci 8698c2ecf20Sopenharmony_ci /* 8708c2ecf20Sopenharmony_ci * A single sg entry may refer to multiple physically contiguous 8718c2ecf20Sopenharmony_ci * pages. But we still need to process highmem pages individually. 8728c2ecf20Sopenharmony_ci * If highmem is not configured then the bulk of this loop gets 8738c2ecf20Sopenharmony_ci * optimized out. 8748c2ecf20Sopenharmony_ci */ 8758c2ecf20Sopenharmony_ci do { 8768c2ecf20Sopenharmony_ci size_t len = left; 8778c2ecf20Sopenharmony_ci void *vaddr; 8788c2ecf20Sopenharmony_ci 8798c2ecf20Sopenharmony_ci page = pfn_to_page(pfn); 8808c2ecf20Sopenharmony_ci 8818c2ecf20Sopenharmony_ci if (PageHighMem(page)) { 8828c2ecf20Sopenharmony_ci if (len + offset > PAGE_SIZE) 8838c2ecf20Sopenharmony_ci len = PAGE_SIZE - offset; 8848c2ecf20Sopenharmony_ci 8858c2ecf20Sopenharmony_ci if (cache_is_vipt_nonaliasing()) { 8868c2ecf20Sopenharmony_ci vaddr = kmap_atomic(page); 8878c2ecf20Sopenharmony_ci op(vaddr + offset, len, dir); 8888c2ecf20Sopenharmony_ci kunmap_atomic(vaddr); 8898c2ecf20Sopenharmony_ci } else { 8908c2ecf20Sopenharmony_ci vaddr = kmap_high_get(page); 8918c2ecf20Sopenharmony_ci if (vaddr) { 8928c2ecf20Sopenharmony_ci op(vaddr + offset, len, dir); 8938c2ecf20Sopenharmony_ci kunmap_high(page); 8948c2ecf20Sopenharmony_ci } 8958c2ecf20Sopenharmony_ci } 8968c2ecf20Sopenharmony_ci } else { 8978c2ecf20Sopenharmony_ci vaddr = page_address(page) + offset; 8988c2ecf20Sopenharmony_ci op(vaddr, len, dir); 8998c2ecf20Sopenharmony_ci } 9008c2ecf20Sopenharmony_ci offset = 0; 9018c2ecf20Sopenharmony_ci pfn++; 9028c2ecf20Sopenharmony_ci left -= len; 9038c2ecf20Sopenharmony_ci } while (left); 9048c2ecf20Sopenharmony_ci} 9058c2ecf20Sopenharmony_ci 9068c2ecf20Sopenharmony_ci/* 9078c2ecf20Sopenharmony_ci * Make an area consistent for devices. 9088c2ecf20Sopenharmony_ci * Note: Drivers should NOT use this function directly, as it will break 9098c2ecf20Sopenharmony_ci * platforms with CONFIG_DMABOUNCE. 9108c2ecf20Sopenharmony_ci * Use the driver DMA support - see dma-mapping.h (dma_sync_*) 9118c2ecf20Sopenharmony_ci */ 9128c2ecf20Sopenharmony_cistatic void __dma_page_cpu_to_dev(struct page *page, unsigned long off, 9138c2ecf20Sopenharmony_ci size_t size, enum dma_data_direction dir) 9148c2ecf20Sopenharmony_ci{ 9158c2ecf20Sopenharmony_ci phys_addr_t paddr; 9168c2ecf20Sopenharmony_ci 9178c2ecf20Sopenharmony_ci dma_cache_maint_page(page, off, size, dir, dmac_map_area); 9188c2ecf20Sopenharmony_ci 9198c2ecf20Sopenharmony_ci paddr = page_to_phys(page) + off; 9208c2ecf20Sopenharmony_ci if (dir == DMA_FROM_DEVICE) { 9218c2ecf20Sopenharmony_ci outer_inv_range(paddr, paddr + size); 9228c2ecf20Sopenharmony_ci } else { 9238c2ecf20Sopenharmony_ci outer_clean_range(paddr, paddr + size); 9248c2ecf20Sopenharmony_ci } 9258c2ecf20Sopenharmony_ci /* FIXME: non-speculating: flush on bidirectional mappings? */ 9268c2ecf20Sopenharmony_ci} 9278c2ecf20Sopenharmony_ci 9288c2ecf20Sopenharmony_cistatic void __dma_page_dev_to_cpu(struct page *page, unsigned long off, 9298c2ecf20Sopenharmony_ci size_t size, enum dma_data_direction dir) 9308c2ecf20Sopenharmony_ci{ 9318c2ecf20Sopenharmony_ci phys_addr_t paddr = page_to_phys(page) + off; 9328c2ecf20Sopenharmony_ci 9338c2ecf20Sopenharmony_ci /* FIXME: non-speculating: not required */ 9348c2ecf20Sopenharmony_ci /* in any case, don't bother invalidating if DMA to device */ 9358c2ecf20Sopenharmony_ci if (dir != DMA_TO_DEVICE) { 9368c2ecf20Sopenharmony_ci outer_inv_range(paddr, paddr + size); 9378c2ecf20Sopenharmony_ci 9388c2ecf20Sopenharmony_ci dma_cache_maint_page(page, off, size, dir, dmac_unmap_area); 9398c2ecf20Sopenharmony_ci } 9408c2ecf20Sopenharmony_ci 9418c2ecf20Sopenharmony_ci /* 9428c2ecf20Sopenharmony_ci * Mark the D-cache clean for these pages to avoid extra flushing. 9438c2ecf20Sopenharmony_ci */ 9448c2ecf20Sopenharmony_ci if (dir != DMA_TO_DEVICE && size >= PAGE_SIZE) { 9458c2ecf20Sopenharmony_ci unsigned long pfn; 9468c2ecf20Sopenharmony_ci size_t left = size; 9478c2ecf20Sopenharmony_ci 9488c2ecf20Sopenharmony_ci pfn = page_to_pfn(page) + off / PAGE_SIZE; 9498c2ecf20Sopenharmony_ci off %= PAGE_SIZE; 9508c2ecf20Sopenharmony_ci if (off) { 9518c2ecf20Sopenharmony_ci pfn++; 9528c2ecf20Sopenharmony_ci left -= PAGE_SIZE - off; 9538c2ecf20Sopenharmony_ci } 9548c2ecf20Sopenharmony_ci while (left >= PAGE_SIZE) { 9558c2ecf20Sopenharmony_ci page = pfn_to_page(pfn++); 9568c2ecf20Sopenharmony_ci set_bit(PG_dcache_clean, &page->flags); 9578c2ecf20Sopenharmony_ci left -= PAGE_SIZE; 9588c2ecf20Sopenharmony_ci } 9598c2ecf20Sopenharmony_ci } 9608c2ecf20Sopenharmony_ci} 9618c2ecf20Sopenharmony_ci 9628c2ecf20Sopenharmony_ci/** 9638c2ecf20Sopenharmony_ci * arm_dma_map_sg - map a set of SG buffers for streaming mode DMA 9648c2ecf20Sopenharmony_ci * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 9658c2ecf20Sopenharmony_ci * @sg: list of buffers 9668c2ecf20Sopenharmony_ci * @nents: number of buffers to map 9678c2ecf20Sopenharmony_ci * @dir: DMA transfer direction 9688c2ecf20Sopenharmony_ci * 9698c2ecf20Sopenharmony_ci * Map a set of buffers described by scatterlist in streaming mode for DMA. 9708c2ecf20Sopenharmony_ci * This is the scatter-gather version of the dma_map_single interface. 9718c2ecf20Sopenharmony_ci * Here the scatter gather list elements are each tagged with the 9728c2ecf20Sopenharmony_ci * appropriate dma address and length. They are obtained via 9738c2ecf20Sopenharmony_ci * sg_dma_{address,length}. 9748c2ecf20Sopenharmony_ci * 9758c2ecf20Sopenharmony_ci * Device ownership issues as mentioned for dma_map_single are the same 9768c2ecf20Sopenharmony_ci * here. 9778c2ecf20Sopenharmony_ci */ 9788c2ecf20Sopenharmony_ciint arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, 9798c2ecf20Sopenharmony_ci enum dma_data_direction dir, unsigned long attrs) 9808c2ecf20Sopenharmony_ci{ 9818c2ecf20Sopenharmony_ci const struct dma_map_ops *ops = get_dma_ops(dev); 9828c2ecf20Sopenharmony_ci struct scatterlist *s; 9838c2ecf20Sopenharmony_ci int i, j; 9848c2ecf20Sopenharmony_ci 9858c2ecf20Sopenharmony_ci for_each_sg(sg, s, nents, i) { 9868c2ecf20Sopenharmony_ci#ifdef CONFIG_NEED_SG_DMA_LENGTH 9878c2ecf20Sopenharmony_ci s->dma_length = s->length; 9888c2ecf20Sopenharmony_ci#endif 9898c2ecf20Sopenharmony_ci s->dma_address = ops->map_page(dev, sg_page(s), s->offset, 9908c2ecf20Sopenharmony_ci s->length, dir, attrs); 9918c2ecf20Sopenharmony_ci if (dma_mapping_error(dev, s->dma_address)) 9928c2ecf20Sopenharmony_ci goto bad_mapping; 9938c2ecf20Sopenharmony_ci } 9948c2ecf20Sopenharmony_ci return nents; 9958c2ecf20Sopenharmony_ci 9968c2ecf20Sopenharmony_ci bad_mapping: 9978c2ecf20Sopenharmony_ci for_each_sg(sg, s, i, j) 9988c2ecf20Sopenharmony_ci ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs); 9998c2ecf20Sopenharmony_ci return 0; 10008c2ecf20Sopenharmony_ci} 10018c2ecf20Sopenharmony_ci 10028c2ecf20Sopenharmony_ci/** 10038c2ecf20Sopenharmony_ci * arm_dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg 10048c2ecf20Sopenharmony_ci * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 10058c2ecf20Sopenharmony_ci * @sg: list of buffers 10068c2ecf20Sopenharmony_ci * @nents: number of buffers to unmap (same as was passed to dma_map_sg) 10078c2ecf20Sopenharmony_ci * @dir: DMA transfer direction (same as was passed to dma_map_sg) 10088c2ecf20Sopenharmony_ci * 10098c2ecf20Sopenharmony_ci * Unmap a set of streaming mode DMA translations. Again, CPU access 10108c2ecf20Sopenharmony_ci * rules concerning calls here are the same as for dma_unmap_single(). 10118c2ecf20Sopenharmony_ci */ 10128c2ecf20Sopenharmony_civoid arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, 10138c2ecf20Sopenharmony_ci enum dma_data_direction dir, unsigned long attrs) 10148c2ecf20Sopenharmony_ci{ 10158c2ecf20Sopenharmony_ci const struct dma_map_ops *ops = get_dma_ops(dev); 10168c2ecf20Sopenharmony_ci struct scatterlist *s; 10178c2ecf20Sopenharmony_ci 10188c2ecf20Sopenharmony_ci int i; 10198c2ecf20Sopenharmony_ci 10208c2ecf20Sopenharmony_ci for_each_sg(sg, s, nents, i) 10218c2ecf20Sopenharmony_ci ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs); 10228c2ecf20Sopenharmony_ci} 10238c2ecf20Sopenharmony_ci 10248c2ecf20Sopenharmony_ci/** 10258c2ecf20Sopenharmony_ci * arm_dma_sync_sg_for_cpu 10268c2ecf20Sopenharmony_ci * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 10278c2ecf20Sopenharmony_ci * @sg: list of buffers 10288c2ecf20Sopenharmony_ci * @nents: number of buffers to map (returned from dma_map_sg) 10298c2ecf20Sopenharmony_ci * @dir: DMA transfer direction (same as was passed to dma_map_sg) 10308c2ecf20Sopenharmony_ci */ 10318c2ecf20Sopenharmony_civoid arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, 10328c2ecf20Sopenharmony_ci int nents, enum dma_data_direction dir) 10338c2ecf20Sopenharmony_ci{ 10348c2ecf20Sopenharmony_ci const struct dma_map_ops *ops = get_dma_ops(dev); 10358c2ecf20Sopenharmony_ci struct scatterlist *s; 10368c2ecf20Sopenharmony_ci int i; 10378c2ecf20Sopenharmony_ci 10388c2ecf20Sopenharmony_ci for_each_sg(sg, s, nents, i) 10398c2ecf20Sopenharmony_ci ops->sync_single_for_cpu(dev, sg_dma_address(s), s->length, 10408c2ecf20Sopenharmony_ci dir); 10418c2ecf20Sopenharmony_ci} 10428c2ecf20Sopenharmony_ci 10438c2ecf20Sopenharmony_ci/** 10448c2ecf20Sopenharmony_ci * arm_dma_sync_sg_for_device 10458c2ecf20Sopenharmony_ci * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 10468c2ecf20Sopenharmony_ci * @sg: list of buffers 10478c2ecf20Sopenharmony_ci * @nents: number of buffers to map (returned from dma_map_sg) 10488c2ecf20Sopenharmony_ci * @dir: DMA transfer direction (same as was passed to dma_map_sg) 10498c2ecf20Sopenharmony_ci */ 10508c2ecf20Sopenharmony_civoid arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, 10518c2ecf20Sopenharmony_ci int nents, enum dma_data_direction dir) 10528c2ecf20Sopenharmony_ci{ 10538c2ecf20Sopenharmony_ci const struct dma_map_ops *ops = get_dma_ops(dev); 10548c2ecf20Sopenharmony_ci struct scatterlist *s; 10558c2ecf20Sopenharmony_ci int i; 10568c2ecf20Sopenharmony_ci 10578c2ecf20Sopenharmony_ci for_each_sg(sg, s, nents, i) 10588c2ecf20Sopenharmony_ci ops->sync_single_for_device(dev, sg_dma_address(s), s->length, 10598c2ecf20Sopenharmony_ci dir); 10608c2ecf20Sopenharmony_ci} 10618c2ecf20Sopenharmony_ci 10628c2ecf20Sopenharmony_cistatic const struct dma_map_ops *arm_get_dma_map_ops(bool coherent) 10638c2ecf20Sopenharmony_ci{ 10648c2ecf20Sopenharmony_ci /* 10658c2ecf20Sopenharmony_ci * When CONFIG_ARM_LPAE is set, physical address can extend above 10668c2ecf20Sopenharmony_ci * 32-bits, which then can't be addressed by devices that only support 10678c2ecf20Sopenharmony_ci * 32-bit DMA. 10688c2ecf20Sopenharmony_ci * Use the generic dma-direct / swiotlb ops code in that case, as that 10698c2ecf20Sopenharmony_ci * handles bounce buffering for us. 10708c2ecf20Sopenharmony_ci */ 10718c2ecf20Sopenharmony_ci if (IS_ENABLED(CONFIG_ARM_LPAE)) 10728c2ecf20Sopenharmony_ci return NULL; 10738c2ecf20Sopenharmony_ci return coherent ? &arm_coherent_dma_ops : &arm_dma_ops; 10748c2ecf20Sopenharmony_ci} 10758c2ecf20Sopenharmony_ci 10768c2ecf20Sopenharmony_ci#ifdef CONFIG_ARM_DMA_USE_IOMMU 10778c2ecf20Sopenharmony_ci 10788c2ecf20Sopenharmony_cistatic int __dma_info_to_prot(enum dma_data_direction dir, unsigned long attrs) 10798c2ecf20Sopenharmony_ci{ 10808c2ecf20Sopenharmony_ci int prot = 0; 10818c2ecf20Sopenharmony_ci 10828c2ecf20Sopenharmony_ci if (attrs & DMA_ATTR_PRIVILEGED) 10838c2ecf20Sopenharmony_ci prot |= IOMMU_PRIV; 10848c2ecf20Sopenharmony_ci 10858c2ecf20Sopenharmony_ci switch (dir) { 10868c2ecf20Sopenharmony_ci case DMA_BIDIRECTIONAL: 10878c2ecf20Sopenharmony_ci return prot | IOMMU_READ | IOMMU_WRITE; 10888c2ecf20Sopenharmony_ci case DMA_TO_DEVICE: 10898c2ecf20Sopenharmony_ci return prot | IOMMU_READ; 10908c2ecf20Sopenharmony_ci case DMA_FROM_DEVICE: 10918c2ecf20Sopenharmony_ci return prot | IOMMU_WRITE; 10928c2ecf20Sopenharmony_ci default: 10938c2ecf20Sopenharmony_ci return prot; 10948c2ecf20Sopenharmony_ci } 10958c2ecf20Sopenharmony_ci} 10968c2ecf20Sopenharmony_ci 10978c2ecf20Sopenharmony_ci/* IOMMU */ 10988c2ecf20Sopenharmony_ci 10998c2ecf20Sopenharmony_cistatic int extend_iommu_mapping(struct dma_iommu_mapping *mapping); 11008c2ecf20Sopenharmony_ci 11018c2ecf20Sopenharmony_cistatic inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping, 11028c2ecf20Sopenharmony_ci size_t size) 11038c2ecf20Sopenharmony_ci{ 11048c2ecf20Sopenharmony_ci unsigned int order = get_order(size); 11058c2ecf20Sopenharmony_ci unsigned int align = 0; 11068c2ecf20Sopenharmony_ci unsigned int count, start; 11078c2ecf20Sopenharmony_ci size_t mapping_size = mapping->bits << PAGE_SHIFT; 11088c2ecf20Sopenharmony_ci unsigned long flags; 11098c2ecf20Sopenharmony_ci dma_addr_t iova; 11108c2ecf20Sopenharmony_ci int i; 11118c2ecf20Sopenharmony_ci 11128c2ecf20Sopenharmony_ci if (order > CONFIG_ARM_DMA_IOMMU_ALIGNMENT) 11138c2ecf20Sopenharmony_ci order = CONFIG_ARM_DMA_IOMMU_ALIGNMENT; 11148c2ecf20Sopenharmony_ci 11158c2ecf20Sopenharmony_ci count = PAGE_ALIGN(size) >> PAGE_SHIFT; 11168c2ecf20Sopenharmony_ci align = (1 << order) - 1; 11178c2ecf20Sopenharmony_ci 11188c2ecf20Sopenharmony_ci spin_lock_irqsave(&mapping->lock, flags); 11198c2ecf20Sopenharmony_ci for (i = 0; i < mapping->nr_bitmaps; i++) { 11208c2ecf20Sopenharmony_ci start = bitmap_find_next_zero_area(mapping->bitmaps[i], 11218c2ecf20Sopenharmony_ci mapping->bits, 0, count, align); 11228c2ecf20Sopenharmony_ci 11238c2ecf20Sopenharmony_ci if (start > mapping->bits) 11248c2ecf20Sopenharmony_ci continue; 11258c2ecf20Sopenharmony_ci 11268c2ecf20Sopenharmony_ci bitmap_set(mapping->bitmaps[i], start, count); 11278c2ecf20Sopenharmony_ci break; 11288c2ecf20Sopenharmony_ci } 11298c2ecf20Sopenharmony_ci 11308c2ecf20Sopenharmony_ci /* 11318c2ecf20Sopenharmony_ci * No unused range found. Try to extend the existing mapping 11328c2ecf20Sopenharmony_ci * and perform a second attempt to reserve an IO virtual 11338c2ecf20Sopenharmony_ci * address range of size bytes. 11348c2ecf20Sopenharmony_ci */ 11358c2ecf20Sopenharmony_ci if (i == mapping->nr_bitmaps) { 11368c2ecf20Sopenharmony_ci if (extend_iommu_mapping(mapping)) { 11378c2ecf20Sopenharmony_ci spin_unlock_irqrestore(&mapping->lock, flags); 11388c2ecf20Sopenharmony_ci return DMA_MAPPING_ERROR; 11398c2ecf20Sopenharmony_ci } 11408c2ecf20Sopenharmony_ci 11418c2ecf20Sopenharmony_ci start = bitmap_find_next_zero_area(mapping->bitmaps[i], 11428c2ecf20Sopenharmony_ci mapping->bits, 0, count, align); 11438c2ecf20Sopenharmony_ci 11448c2ecf20Sopenharmony_ci if (start > mapping->bits) { 11458c2ecf20Sopenharmony_ci spin_unlock_irqrestore(&mapping->lock, flags); 11468c2ecf20Sopenharmony_ci return DMA_MAPPING_ERROR; 11478c2ecf20Sopenharmony_ci } 11488c2ecf20Sopenharmony_ci 11498c2ecf20Sopenharmony_ci bitmap_set(mapping->bitmaps[i], start, count); 11508c2ecf20Sopenharmony_ci } 11518c2ecf20Sopenharmony_ci spin_unlock_irqrestore(&mapping->lock, flags); 11528c2ecf20Sopenharmony_ci 11538c2ecf20Sopenharmony_ci iova = mapping->base + (mapping_size * i); 11548c2ecf20Sopenharmony_ci iova += start << PAGE_SHIFT; 11558c2ecf20Sopenharmony_ci 11568c2ecf20Sopenharmony_ci return iova; 11578c2ecf20Sopenharmony_ci} 11588c2ecf20Sopenharmony_ci 11598c2ecf20Sopenharmony_cistatic inline void __free_iova(struct dma_iommu_mapping *mapping, 11608c2ecf20Sopenharmony_ci dma_addr_t addr, size_t size) 11618c2ecf20Sopenharmony_ci{ 11628c2ecf20Sopenharmony_ci unsigned int start, count; 11638c2ecf20Sopenharmony_ci size_t mapping_size = mapping->bits << PAGE_SHIFT; 11648c2ecf20Sopenharmony_ci unsigned long flags; 11658c2ecf20Sopenharmony_ci dma_addr_t bitmap_base; 11668c2ecf20Sopenharmony_ci u32 bitmap_index; 11678c2ecf20Sopenharmony_ci 11688c2ecf20Sopenharmony_ci if (!size) 11698c2ecf20Sopenharmony_ci return; 11708c2ecf20Sopenharmony_ci 11718c2ecf20Sopenharmony_ci bitmap_index = (u32) (addr - mapping->base) / (u32) mapping_size; 11728c2ecf20Sopenharmony_ci BUG_ON(addr < mapping->base || bitmap_index > mapping->extensions); 11738c2ecf20Sopenharmony_ci 11748c2ecf20Sopenharmony_ci bitmap_base = mapping->base + mapping_size * bitmap_index; 11758c2ecf20Sopenharmony_ci 11768c2ecf20Sopenharmony_ci start = (addr - bitmap_base) >> PAGE_SHIFT; 11778c2ecf20Sopenharmony_ci 11788c2ecf20Sopenharmony_ci if (addr + size > bitmap_base + mapping_size) { 11798c2ecf20Sopenharmony_ci /* 11808c2ecf20Sopenharmony_ci * The address range to be freed reaches into the iova 11818c2ecf20Sopenharmony_ci * range of the next bitmap. This should not happen as 11828c2ecf20Sopenharmony_ci * we don't allow this in __alloc_iova (at the 11838c2ecf20Sopenharmony_ci * moment). 11848c2ecf20Sopenharmony_ci */ 11858c2ecf20Sopenharmony_ci BUG(); 11868c2ecf20Sopenharmony_ci } else 11878c2ecf20Sopenharmony_ci count = size >> PAGE_SHIFT; 11888c2ecf20Sopenharmony_ci 11898c2ecf20Sopenharmony_ci spin_lock_irqsave(&mapping->lock, flags); 11908c2ecf20Sopenharmony_ci bitmap_clear(mapping->bitmaps[bitmap_index], start, count); 11918c2ecf20Sopenharmony_ci spin_unlock_irqrestore(&mapping->lock, flags); 11928c2ecf20Sopenharmony_ci} 11938c2ecf20Sopenharmony_ci 11948c2ecf20Sopenharmony_ci/* We'll try 2M, 1M, 64K, and finally 4K; array must end with 0! */ 11958c2ecf20Sopenharmony_cistatic const int iommu_order_array[] = { 9, 8, 4, 0 }; 11968c2ecf20Sopenharmony_ci 11978c2ecf20Sopenharmony_cistatic struct page **__iommu_alloc_buffer(struct device *dev, size_t size, 11988c2ecf20Sopenharmony_ci gfp_t gfp, unsigned long attrs, 11998c2ecf20Sopenharmony_ci int coherent_flag) 12008c2ecf20Sopenharmony_ci{ 12018c2ecf20Sopenharmony_ci struct page **pages; 12028c2ecf20Sopenharmony_ci int count = size >> PAGE_SHIFT; 12038c2ecf20Sopenharmony_ci int array_size = count * sizeof(struct page *); 12048c2ecf20Sopenharmony_ci int i = 0; 12058c2ecf20Sopenharmony_ci int order_idx = 0; 12068c2ecf20Sopenharmony_ci 12078c2ecf20Sopenharmony_ci if (array_size <= PAGE_SIZE) 12088c2ecf20Sopenharmony_ci pages = kzalloc(array_size, GFP_KERNEL); 12098c2ecf20Sopenharmony_ci else 12108c2ecf20Sopenharmony_ci pages = vzalloc(array_size); 12118c2ecf20Sopenharmony_ci if (!pages) 12128c2ecf20Sopenharmony_ci return NULL; 12138c2ecf20Sopenharmony_ci 12148c2ecf20Sopenharmony_ci if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) 12158c2ecf20Sopenharmony_ci { 12168c2ecf20Sopenharmony_ci unsigned long order = get_order(size); 12178c2ecf20Sopenharmony_ci struct page *page; 12188c2ecf20Sopenharmony_ci 12198c2ecf20Sopenharmony_ci page = dma_alloc_from_contiguous(dev, count, order, 12208c2ecf20Sopenharmony_ci gfp & __GFP_NOWARN); 12218c2ecf20Sopenharmony_ci if (!page) 12228c2ecf20Sopenharmony_ci goto error; 12238c2ecf20Sopenharmony_ci 12248c2ecf20Sopenharmony_ci __dma_clear_buffer(page, size, coherent_flag); 12258c2ecf20Sopenharmony_ci 12268c2ecf20Sopenharmony_ci for (i = 0; i < count; i++) 12278c2ecf20Sopenharmony_ci pages[i] = page + i; 12288c2ecf20Sopenharmony_ci 12298c2ecf20Sopenharmony_ci return pages; 12308c2ecf20Sopenharmony_ci } 12318c2ecf20Sopenharmony_ci 12328c2ecf20Sopenharmony_ci /* Go straight to 4K chunks if caller says it's OK. */ 12338c2ecf20Sopenharmony_ci if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES) 12348c2ecf20Sopenharmony_ci order_idx = ARRAY_SIZE(iommu_order_array) - 1; 12358c2ecf20Sopenharmony_ci 12368c2ecf20Sopenharmony_ci /* 12378c2ecf20Sopenharmony_ci * IOMMU can map any pages, so himem can also be used here 12388c2ecf20Sopenharmony_ci */ 12398c2ecf20Sopenharmony_ci gfp |= __GFP_NOWARN | __GFP_HIGHMEM; 12408c2ecf20Sopenharmony_ci 12418c2ecf20Sopenharmony_ci while (count) { 12428c2ecf20Sopenharmony_ci int j, order; 12438c2ecf20Sopenharmony_ci 12448c2ecf20Sopenharmony_ci order = iommu_order_array[order_idx]; 12458c2ecf20Sopenharmony_ci 12468c2ecf20Sopenharmony_ci /* Drop down when we get small */ 12478c2ecf20Sopenharmony_ci if (__fls(count) < order) { 12488c2ecf20Sopenharmony_ci order_idx++; 12498c2ecf20Sopenharmony_ci continue; 12508c2ecf20Sopenharmony_ci } 12518c2ecf20Sopenharmony_ci 12528c2ecf20Sopenharmony_ci if (order) { 12538c2ecf20Sopenharmony_ci /* See if it's easy to allocate a high-order chunk */ 12548c2ecf20Sopenharmony_ci pages[i] = alloc_pages(gfp | __GFP_NORETRY, order); 12558c2ecf20Sopenharmony_ci 12568c2ecf20Sopenharmony_ci /* Go down a notch at first sign of pressure */ 12578c2ecf20Sopenharmony_ci if (!pages[i]) { 12588c2ecf20Sopenharmony_ci order_idx++; 12598c2ecf20Sopenharmony_ci continue; 12608c2ecf20Sopenharmony_ci } 12618c2ecf20Sopenharmony_ci } else { 12628c2ecf20Sopenharmony_ci pages[i] = alloc_pages(gfp, 0); 12638c2ecf20Sopenharmony_ci if (!pages[i]) 12648c2ecf20Sopenharmony_ci goto error; 12658c2ecf20Sopenharmony_ci } 12668c2ecf20Sopenharmony_ci 12678c2ecf20Sopenharmony_ci if (order) { 12688c2ecf20Sopenharmony_ci split_page(pages[i], order); 12698c2ecf20Sopenharmony_ci j = 1 << order; 12708c2ecf20Sopenharmony_ci while (--j) 12718c2ecf20Sopenharmony_ci pages[i + j] = pages[i] + j; 12728c2ecf20Sopenharmony_ci } 12738c2ecf20Sopenharmony_ci 12748c2ecf20Sopenharmony_ci __dma_clear_buffer(pages[i], PAGE_SIZE << order, coherent_flag); 12758c2ecf20Sopenharmony_ci i += 1 << order; 12768c2ecf20Sopenharmony_ci count -= 1 << order; 12778c2ecf20Sopenharmony_ci } 12788c2ecf20Sopenharmony_ci 12798c2ecf20Sopenharmony_ci return pages; 12808c2ecf20Sopenharmony_cierror: 12818c2ecf20Sopenharmony_ci while (i--) 12828c2ecf20Sopenharmony_ci if (pages[i]) 12838c2ecf20Sopenharmony_ci __free_pages(pages[i], 0); 12848c2ecf20Sopenharmony_ci kvfree(pages); 12858c2ecf20Sopenharmony_ci return NULL; 12868c2ecf20Sopenharmony_ci} 12878c2ecf20Sopenharmony_ci 12888c2ecf20Sopenharmony_cistatic int __iommu_free_buffer(struct device *dev, struct page **pages, 12898c2ecf20Sopenharmony_ci size_t size, unsigned long attrs) 12908c2ecf20Sopenharmony_ci{ 12918c2ecf20Sopenharmony_ci int count = size >> PAGE_SHIFT; 12928c2ecf20Sopenharmony_ci int i; 12938c2ecf20Sopenharmony_ci 12948c2ecf20Sopenharmony_ci if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) { 12958c2ecf20Sopenharmony_ci dma_release_from_contiguous(dev, pages[0], count); 12968c2ecf20Sopenharmony_ci } else { 12978c2ecf20Sopenharmony_ci for (i = 0; i < count; i++) 12988c2ecf20Sopenharmony_ci if (pages[i]) 12998c2ecf20Sopenharmony_ci __free_pages(pages[i], 0); 13008c2ecf20Sopenharmony_ci } 13018c2ecf20Sopenharmony_ci 13028c2ecf20Sopenharmony_ci kvfree(pages); 13038c2ecf20Sopenharmony_ci return 0; 13048c2ecf20Sopenharmony_ci} 13058c2ecf20Sopenharmony_ci 13068c2ecf20Sopenharmony_ci/* 13078c2ecf20Sopenharmony_ci * Create a mapping in device IO address space for specified pages 13088c2ecf20Sopenharmony_ci */ 13098c2ecf20Sopenharmony_cistatic dma_addr_t 13108c2ecf20Sopenharmony_ci__iommu_create_mapping(struct device *dev, struct page **pages, size_t size, 13118c2ecf20Sopenharmony_ci unsigned long attrs) 13128c2ecf20Sopenharmony_ci{ 13138c2ecf20Sopenharmony_ci struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 13148c2ecf20Sopenharmony_ci unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; 13158c2ecf20Sopenharmony_ci dma_addr_t dma_addr, iova; 13168c2ecf20Sopenharmony_ci int i; 13178c2ecf20Sopenharmony_ci 13188c2ecf20Sopenharmony_ci dma_addr = __alloc_iova(mapping, size); 13198c2ecf20Sopenharmony_ci if (dma_addr == DMA_MAPPING_ERROR) 13208c2ecf20Sopenharmony_ci return dma_addr; 13218c2ecf20Sopenharmony_ci 13228c2ecf20Sopenharmony_ci iova = dma_addr; 13238c2ecf20Sopenharmony_ci for (i = 0; i < count; ) { 13248c2ecf20Sopenharmony_ci int ret; 13258c2ecf20Sopenharmony_ci 13268c2ecf20Sopenharmony_ci unsigned int next_pfn = page_to_pfn(pages[i]) + 1; 13278c2ecf20Sopenharmony_ci phys_addr_t phys = page_to_phys(pages[i]); 13288c2ecf20Sopenharmony_ci unsigned int len, j; 13298c2ecf20Sopenharmony_ci 13308c2ecf20Sopenharmony_ci for (j = i + 1; j < count; j++, next_pfn++) 13318c2ecf20Sopenharmony_ci if (page_to_pfn(pages[j]) != next_pfn) 13328c2ecf20Sopenharmony_ci break; 13338c2ecf20Sopenharmony_ci 13348c2ecf20Sopenharmony_ci len = (j - i) << PAGE_SHIFT; 13358c2ecf20Sopenharmony_ci ret = iommu_map(mapping->domain, iova, phys, len, 13368c2ecf20Sopenharmony_ci __dma_info_to_prot(DMA_BIDIRECTIONAL, attrs)); 13378c2ecf20Sopenharmony_ci if (ret < 0) 13388c2ecf20Sopenharmony_ci goto fail; 13398c2ecf20Sopenharmony_ci iova += len; 13408c2ecf20Sopenharmony_ci i = j; 13418c2ecf20Sopenharmony_ci } 13428c2ecf20Sopenharmony_ci return dma_addr; 13438c2ecf20Sopenharmony_cifail: 13448c2ecf20Sopenharmony_ci iommu_unmap(mapping->domain, dma_addr, iova-dma_addr); 13458c2ecf20Sopenharmony_ci __free_iova(mapping, dma_addr, size); 13468c2ecf20Sopenharmony_ci return DMA_MAPPING_ERROR; 13478c2ecf20Sopenharmony_ci} 13488c2ecf20Sopenharmony_ci 13498c2ecf20Sopenharmony_cistatic int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size) 13508c2ecf20Sopenharmony_ci{ 13518c2ecf20Sopenharmony_ci struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 13528c2ecf20Sopenharmony_ci 13538c2ecf20Sopenharmony_ci /* 13548c2ecf20Sopenharmony_ci * add optional in-page offset from iova to size and align 13558c2ecf20Sopenharmony_ci * result to page size 13568c2ecf20Sopenharmony_ci */ 13578c2ecf20Sopenharmony_ci size = PAGE_ALIGN((iova & ~PAGE_MASK) + size); 13588c2ecf20Sopenharmony_ci iova &= PAGE_MASK; 13598c2ecf20Sopenharmony_ci 13608c2ecf20Sopenharmony_ci iommu_unmap(mapping->domain, iova, size); 13618c2ecf20Sopenharmony_ci __free_iova(mapping, iova, size); 13628c2ecf20Sopenharmony_ci return 0; 13638c2ecf20Sopenharmony_ci} 13648c2ecf20Sopenharmony_ci 13658c2ecf20Sopenharmony_cistatic struct page **__atomic_get_pages(void *addr) 13668c2ecf20Sopenharmony_ci{ 13678c2ecf20Sopenharmony_ci struct page *page; 13688c2ecf20Sopenharmony_ci phys_addr_t phys; 13698c2ecf20Sopenharmony_ci 13708c2ecf20Sopenharmony_ci phys = gen_pool_virt_to_phys(atomic_pool, (unsigned long)addr); 13718c2ecf20Sopenharmony_ci page = phys_to_page(phys); 13728c2ecf20Sopenharmony_ci 13738c2ecf20Sopenharmony_ci return (struct page **)page; 13748c2ecf20Sopenharmony_ci} 13758c2ecf20Sopenharmony_ci 13768c2ecf20Sopenharmony_cistatic struct page **__iommu_get_pages(void *cpu_addr, unsigned long attrs) 13778c2ecf20Sopenharmony_ci{ 13788c2ecf20Sopenharmony_ci if (__in_atomic_pool(cpu_addr, PAGE_SIZE)) 13798c2ecf20Sopenharmony_ci return __atomic_get_pages(cpu_addr); 13808c2ecf20Sopenharmony_ci 13818c2ecf20Sopenharmony_ci if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) 13828c2ecf20Sopenharmony_ci return cpu_addr; 13838c2ecf20Sopenharmony_ci 13848c2ecf20Sopenharmony_ci return dma_common_find_pages(cpu_addr); 13858c2ecf20Sopenharmony_ci} 13868c2ecf20Sopenharmony_ci 13878c2ecf20Sopenharmony_cistatic void *__iommu_alloc_simple(struct device *dev, size_t size, gfp_t gfp, 13888c2ecf20Sopenharmony_ci dma_addr_t *handle, int coherent_flag, 13898c2ecf20Sopenharmony_ci unsigned long attrs) 13908c2ecf20Sopenharmony_ci{ 13918c2ecf20Sopenharmony_ci struct page *page; 13928c2ecf20Sopenharmony_ci void *addr; 13938c2ecf20Sopenharmony_ci 13948c2ecf20Sopenharmony_ci if (coherent_flag == COHERENT) 13958c2ecf20Sopenharmony_ci addr = __alloc_simple_buffer(dev, size, gfp, &page); 13968c2ecf20Sopenharmony_ci else 13978c2ecf20Sopenharmony_ci addr = __alloc_from_pool(size, &page); 13988c2ecf20Sopenharmony_ci if (!addr) 13998c2ecf20Sopenharmony_ci return NULL; 14008c2ecf20Sopenharmony_ci 14018c2ecf20Sopenharmony_ci *handle = __iommu_create_mapping(dev, &page, size, attrs); 14028c2ecf20Sopenharmony_ci if (*handle == DMA_MAPPING_ERROR) 14038c2ecf20Sopenharmony_ci goto err_mapping; 14048c2ecf20Sopenharmony_ci 14058c2ecf20Sopenharmony_ci return addr; 14068c2ecf20Sopenharmony_ci 14078c2ecf20Sopenharmony_cierr_mapping: 14088c2ecf20Sopenharmony_ci __free_from_pool(addr, size); 14098c2ecf20Sopenharmony_ci return NULL; 14108c2ecf20Sopenharmony_ci} 14118c2ecf20Sopenharmony_ci 14128c2ecf20Sopenharmony_cistatic void __iommu_free_atomic(struct device *dev, void *cpu_addr, 14138c2ecf20Sopenharmony_ci dma_addr_t handle, size_t size, int coherent_flag) 14148c2ecf20Sopenharmony_ci{ 14158c2ecf20Sopenharmony_ci __iommu_remove_mapping(dev, handle, size); 14168c2ecf20Sopenharmony_ci if (coherent_flag == COHERENT) 14178c2ecf20Sopenharmony_ci __dma_free_buffer(virt_to_page(cpu_addr), size); 14188c2ecf20Sopenharmony_ci else 14198c2ecf20Sopenharmony_ci __free_from_pool(cpu_addr, size); 14208c2ecf20Sopenharmony_ci} 14218c2ecf20Sopenharmony_ci 14228c2ecf20Sopenharmony_cistatic void *__arm_iommu_alloc_attrs(struct device *dev, size_t size, 14238c2ecf20Sopenharmony_ci dma_addr_t *handle, gfp_t gfp, unsigned long attrs, 14248c2ecf20Sopenharmony_ci int coherent_flag) 14258c2ecf20Sopenharmony_ci{ 14268c2ecf20Sopenharmony_ci pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL); 14278c2ecf20Sopenharmony_ci struct page **pages; 14288c2ecf20Sopenharmony_ci void *addr = NULL; 14298c2ecf20Sopenharmony_ci 14308c2ecf20Sopenharmony_ci *handle = DMA_MAPPING_ERROR; 14318c2ecf20Sopenharmony_ci size = PAGE_ALIGN(size); 14328c2ecf20Sopenharmony_ci 14338c2ecf20Sopenharmony_ci if (coherent_flag == COHERENT || !gfpflags_allow_blocking(gfp)) 14348c2ecf20Sopenharmony_ci return __iommu_alloc_simple(dev, size, gfp, handle, 14358c2ecf20Sopenharmony_ci coherent_flag, attrs); 14368c2ecf20Sopenharmony_ci 14378c2ecf20Sopenharmony_ci /* 14388c2ecf20Sopenharmony_ci * Following is a work-around (a.k.a. hack) to prevent pages 14398c2ecf20Sopenharmony_ci * with __GFP_COMP being passed to split_page() which cannot 14408c2ecf20Sopenharmony_ci * handle them. The real problem is that this flag probably 14418c2ecf20Sopenharmony_ci * should be 0 on ARM as it is not supported on this 14428c2ecf20Sopenharmony_ci * platform; see CONFIG_HUGETLBFS. 14438c2ecf20Sopenharmony_ci */ 14448c2ecf20Sopenharmony_ci gfp &= ~(__GFP_COMP); 14458c2ecf20Sopenharmony_ci 14468c2ecf20Sopenharmony_ci pages = __iommu_alloc_buffer(dev, size, gfp, attrs, coherent_flag); 14478c2ecf20Sopenharmony_ci if (!pages) 14488c2ecf20Sopenharmony_ci return NULL; 14498c2ecf20Sopenharmony_ci 14508c2ecf20Sopenharmony_ci *handle = __iommu_create_mapping(dev, pages, size, attrs); 14518c2ecf20Sopenharmony_ci if (*handle == DMA_MAPPING_ERROR) 14528c2ecf20Sopenharmony_ci goto err_buffer; 14538c2ecf20Sopenharmony_ci 14548c2ecf20Sopenharmony_ci if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) 14558c2ecf20Sopenharmony_ci return pages; 14568c2ecf20Sopenharmony_ci 14578c2ecf20Sopenharmony_ci addr = dma_common_pages_remap(pages, size, prot, 14588c2ecf20Sopenharmony_ci __builtin_return_address(0)); 14598c2ecf20Sopenharmony_ci if (!addr) 14608c2ecf20Sopenharmony_ci goto err_mapping; 14618c2ecf20Sopenharmony_ci 14628c2ecf20Sopenharmony_ci return addr; 14638c2ecf20Sopenharmony_ci 14648c2ecf20Sopenharmony_cierr_mapping: 14658c2ecf20Sopenharmony_ci __iommu_remove_mapping(dev, *handle, size); 14668c2ecf20Sopenharmony_cierr_buffer: 14678c2ecf20Sopenharmony_ci __iommu_free_buffer(dev, pages, size, attrs); 14688c2ecf20Sopenharmony_ci return NULL; 14698c2ecf20Sopenharmony_ci} 14708c2ecf20Sopenharmony_ci 14718c2ecf20Sopenharmony_cistatic void *arm_iommu_alloc_attrs(struct device *dev, size_t size, 14728c2ecf20Sopenharmony_ci dma_addr_t *handle, gfp_t gfp, unsigned long attrs) 14738c2ecf20Sopenharmony_ci{ 14748c2ecf20Sopenharmony_ci return __arm_iommu_alloc_attrs(dev, size, handle, gfp, attrs, NORMAL); 14758c2ecf20Sopenharmony_ci} 14768c2ecf20Sopenharmony_ci 14778c2ecf20Sopenharmony_cistatic void *arm_coherent_iommu_alloc_attrs(struct device *dev, size_t size, 14788c2ecf20Sopenharmony_ci dma_addr_t *handle, gfp_t gfp, unsigned long attrs) 14798c2ecf20Sopenharmony_ci{ 14808c2ecf20Sopenharmony_ci return __arm_iommu_alloc_attrs(dev, size, handle, gfp, attrs, COHERENT); 14818c2ecf20Sopenharmony_ci} 14828c2ecf20Sopenharmony_ci 14838c2ecf20Sopenharmony_cistatic int __arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma, 14848c2ecf20Sopenharmony_ci void *cpu_addr, dma_addr_t dma_addr, size_t size, 14858c2ecf20Sopenharmony_ci unsigned long attrs) 14868c2ecf20Sopenharmony_ci{ 14878c2ecf20Sopenharmony_ci struct page **pages = __iommu_get_pages(cpu_addr, attrs); 14888c2ecf20Sopenharmony_ci unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; 14898c2ecf20Sopenharmony_ci int err; 14908c2ecf20Sopenharmony_ci 14918c2ecf20Sopenharmony_ci if (!pages) 14928c2ecf20Sopenharmony_ci return -ENXIO; 14938c2ecf20Sopenharmony_ci 14948c2ecf20Sopenharmony_ci if (vma->vm_pgoff >= nr_pages) 14958c2ecf20Sopenharmony_ci return -ENXIO; 14968c2ecf20Sopenharmony_ci 14978c2ecf20Sopenharmony_ci err = vm_map_pages(vma, pages, nr_pages); 14988c2ecf20Sopenharmony_ci if (err) 14998c2ecf20Sopenharmony_ci pr_err("Remapping memory failed: %d\n", err); 15008c2ecf20Sopenharmony_ci 15018c2ecf20Sopenharmony_ci return err; 15028c2ecf20Sopenharmony_ci} 15038c2ecf20Sopenharmony_cistatic int arm_iommu_mmap_attrs(struct device *dev, 15048c2ecf20Sopenharmony_ci struct vm_area_struct *vma, void *cpu_addr, 15058c2ecf20Sopenharmony_ci dma_addr_t dma_addr, size_t size, unsigned long attrs) 15068c2ecf20Sopenharmony_ci{ 15078c2ecf20Sopenharmony_ci vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); 15088c2ecf20Sopenharmony_ci 15098c2ecf20Sopenharmony_ci return __arm_iommu_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, attrs); 15108c2ecf20Sopenharmony_ci} 15118c2ecf20Sopenharmony_ci 15128c2ecf20Sopenharmony_cistatic int arm_coherent_iommu_mmap_attrs(struct device *dev, 15138c2ecf20Sopenharmony_ci struct vm_area_struct *vma, void *cpu_addr, 15148c2ecf20Sopenharmony_ci dma_addr_t dma_addr, size_t size, unsigned long attrs) 15158c2ecf20Sopenharmony_ci{ 15168c2ecf20Sopenharmony_ci return __arm_iommu_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, attrs); 15178c2ecf20Sopenharmony_ci} 15188c2ecf20Sopenharmony_ci 15198c2ecf20Sopenharmony_ci/* 15208c2ecf20Sopenharmony_ci * free a page as defined by the above mapping. 15218c2ecf20Sopenharmony_ci * Must not be called with IRQs disabled. 15228c2ecf20Sopenharmony_ci */ 15238c2ecf20Sopenharmony_cistatic void __arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, 15248c2ecf20Sopenharmony_ci dma_addr_t handle, unsigned long attrs, int coherent_flag) 15258c2ecf20Sopenharmony_ci{ 15268c2ecf20Sopenharmony_ci struct page **pages; 15278c2ecf20Sopenharmony_ci size = PAGE_ALIGN(size); 15288c2ecf20Sopenharmony_ci 15298c2ecf20Sopenharmony_ci if (coherent_flag == COHERENT || __in_atomic_pool(cpu_addr, size)) { 15308c2ecf20Sopenharmony_ci __iommu_free_atomic(dev, cpu_addr, handle, size, coherent_flag); 15318c2ecf20Sopenharmony_ci return; 15328c2ecf20Sopenharmony_ci } 15338c2ecf20Sopenharmony_ci 15348c2ecf20Sopenharmony_ci pages = __iommu_get_pages(cpu_addr, attrs); 15358c2ecf20Sopenharmony_ci if (!pages) { 15368c2ecf20Sopenharmony_ci WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr); 15378c2ecf20Sopenharmony_ci return; 15388c2ecf20Sopenharmony_ci } 15398c2ecf20Sopenharmony_ci 15408c2ecf20Sopenharmony_ci if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0) 15418c2ecf20Sopenharmony_ci dma_common_free_remap(cpu_addr, size); 15428c2ecf20Sopenharmony_ci 15438c2ecf20Sopenharmony_ci __iommu_remove_mapping(dev, handle, size); 15448c2ecf20Sopenharmony_ci __iommu_free_buffer(dev, pages, size, attrs); 15458c2ecf20Sopenharmony_ci} 15468c2ecf20Sopenharmony_ci 15478c2ecf20Sopenharmony_cistatic void arm_iommu_free_attrs(struct device *dev, size_t size, 15488c2ecf20Sopenharmony_ci void *cpu_addr, dma_addr_t handle, 15498c2ecf20Sopenharmony_ci unsigned long attrs) 15508c2ecf20Sopenharmony_ci{ 15518c2ecf20Sopenharmony_ci __arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, NORMAL); 15528c2ecf20Sopenharmony_ci} 15538c2ecf20Sopenharmony_ci 15548c2ecf20Sopenharmony_cistatic void arm_coherent_iommu_free_attrs(struct device *dev, size_t size, 15558c2ecf20Sopenharmony_ci void *cpu_addr, dma_addr_t handle, unsigned long attrs) 15568c2ecf20Sopenharmony_ci{ 15578c2ecf20Sopenharmony_ci __arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, COHERENT); 15588c2ecf20Sopenharmony_ci} 15598c2ecf20Sopenharmony_ci 15608c2ecf20Sopenharmony_cistatic int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt, 15618c2ecf20Sopenharmony_ci void *cpu_addr, dma_addr_t dma_addr, 15628c2ecf20Sopenharmony_ci size_t size, unsigned long attrs) 15638c2ecf20Sopenharmony_ci{ 15648c2ecf20Sopenharmony_ci unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; 15658c2ecf20Sopenharmony_ci struct page **pages = __iommu_get_pages(cpu_addr, attrs); 15668c2ecf20Sopenharmony_ci 15678c2ecf20Sopenharmony_ci if (!pages) 15688c2ecf20Sopenharmony_ci return -ENXIO; 15698c2ecf20Sopenharmony_ci 15708c2ecf20Sopenharmony_ci return sg_alloc_table_from_pages(sgt, pages, count, 0, size, 15718c2ecf20Sopenharmony_ci GFP_KERNEL); 15728c2ecf20Sopenharmony_ci} 15738c2ecf20Sopenharmony_ci 15748c2ecf20Sopenharmony_ci/* 15758c2ecf20Sopenharmony_ci * Map a part of the scatter-gather list into contiguous io address space 15768c2ecf20Sopenharmony_ci */ 15778c2ecf20Sopenharmony_cistatic int __map_sg_chunk(struct device *dev, struct scatterlist *sg, 15788c2ecf20Sopenharmony_ci size_t size, dma_addr_t *handle, 15798c2ecf20Sopenharmony_ci enum dma_data_direction dir, unsigned long attrs, 15808c2ecf20Sopenharmony_ci bool is_coherent) 15818c2ecf20Sopenharmony_ci{ 15828c2ecf20Sopenharmony_ci struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 15838c2ecf20Sopenharmony_ci dma_addr_t iova, iova_base; 15848c2ecf20Sopenharmony_ci int ret = 0; 15858c2ecf20Sopenharmony_ci unsigned int count; 15868c2ecf20Sopenharmony_ci struct scatterlist *s; 15878c2ecf20Sopenharmony_ci int prot; 15888c2ecf20Sopenharmony_ci 15898c2ecf20Sopenharmony_ci size = PAGE_ALIGN(size); 15908c2ecf20Sopenharmony_ci *handle = DMA_MAPPING_ERROR; 15918c2ecf20Sopenharmony_ci 15928c2ecf20Sopenharmony_ci iova_base = iova = __alloc_iova(mapping, size); 15938c2ecf20Sopenharmony_ci if (iova == DMA_MAPPING_ERROR) 15948c2ecf20Sopenharmony_ci return -ENOMEM; 15958c2ecf20Sopenharmony_ci 15968c2ecf20Sopenharmony_ci for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) { 15978c2ecf20Sopenharmony_ci phys_addr_t phys = page_to_phys(sg_page(s)); 15988c2ecf20Sopenharmony_ci unsigned int len = PAGE_ALIGN(s->offset + s->length); 15998c2ecf20Sopenharmony_ci 16008c2ecf20Sopenharmony_ci if (!is_coherent && (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) 16018c2ecf20Sopenharmony_ci __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); 16028c2ecf20Sopenharmony_ci 16038c2ecf20Sopenharmony_ci prot = __dma_info_to_prot(dir, attrs); 16048c2ecf20Sopenharmony_ci 16058c2ecf20Sopenharmony_ci ret = iommu_map(mapping->domain, iova, phys, len, prot); 16068c2ecf20Sopenharmony_ci if (ret < 0) 16078c2ecf20Sopenharmony_ci goto fail; 16088c2ecf20Sopenharmony_ci count += len >> PAGE_SHIFT; 16098c2ecf20Sopenharmony_ci iova += len; 16108c2ecf20Sopenharmony_ci } 16118c2ecf20Sopenharmony_ci *handle = iova_base; 16128c2ecf20Sopenharmony_ci 16138c2ecf20Sopenharmony_ci return 0; 16148c2ecf20Sopenharmony_cifail: 16158c2ecf20Sopenharmony_ci iommu_unmap(mapping->domain, iova_base, count * PAGE_SIZE); 16168c2ecf20Sopenharmony_ci __free_iova(mapping, iova_base, size); 16178c2ecf20Sopenharmony_ci return ret; 16188c2ecf20Sopenharmony_ci} 16198c2ecf20Sopenharmony_ci 16208c2ecf20Sopenharmony_cistatic int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents, 16218c2ecf20Sopenharmony_ci enum dma_data_direction dir, unsigned long attrs, 16228c2ecf20Sopenharmony_ci bool is_coherent) 16238c2ecf20Sopenharmony_ci{ 16248c2ecf20Sopenharmony_ci struct scatterlist *s = sg, *dma = sg, *start = sg; 16258c2ecf20Sopenharmony_ci int i, count = 0; 16268c2ecf20Sopenharmony_ci unsigned int offset = s->offset; 16278c2ecf20Sopenharmony_ci unsigned int size = s->offset + s->length; 16288c2ecf20Sopenharmony_ci unsigned int max = dma_get_max_seg_size(dev); 16298c2ecf20Sopenharmony_ci 16308c2ecf20Sopenharmony_ci for (i = 1; i < nents; i++) { 16318c2ecf20Sopenharmony_ci s = sg_next(s); 16328c2ecf20Sopenharmony_ci 16338c2ecf20Sopenharmony_ci s->dma_address = DMA_MAPPING_ERROR; 16348c2ecf20Sopenharmony_ci s->dma_length = 0; 16358c2ecf20Sopenharmony_ci 16368c2ecf20Sopenharmony_ci if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) { 16378c2ecf20Sopenharmony_ci if (__map_sg_chunk(dev, start, size, &dma->dma_address, 16388c2ecf20Sopenharmony_ci dir, attrs, is_coherent) < 0) 16398c2ecf20Sopenharmony_ci goto bad_mapping; 16408c2ecf20Sopenharmony_ci 16418c2ecf20Sopenharmony_ci dma->dma_address += offset; 16428c2ecf20Sopenharmony_ci dma->dma_length = size - offset; 16438c2ecf20Sopenharmony_ci 16448c2ecf20Sopenharmony_ci size = offset = s->offset; 16458c2ecf20Sopenharmony_ci start = s; 16468c2ecf20Sopenharmony_ci dma = sg_next(dma); 16478c2ecf20Sopenharmony_ci count += 1; 16488c2ecf20Sopenharmony_ci } 16498c2ecf20Sopenharmony_ci size += s->length; 16508c2ecf20Sopenharmony_ci } 16518c2ecf20Sopenharmony_ci if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs, 16528c2ecf20Sopenharmony_ci is_coherent) < 0) 16538c2ecf20Sopenharmony_ci goto bad_mapping; 16548c2ecf20Sopenharmony_ci 16558c2ecf20Sopenharmony_ci dma->dma_address += offset; 16568c2ecf20Sopenharmony_ci dma->dma_length = size - offset; 16578c2ecf20Sopenharmony_ci 16588c2ecf20Sopenharmony_ci return count+1; 16598c2ecf20Sopenharmony_ci 16608c2ecf20Sopenharmony_cibad_mapping: 16618c2ecf20Sopenharmony_ci for_each_sg(sg, s, count, i) 16628c2ecf20Sopenharmony_ci __iommu_remove_mapping(dev, sg_dma_address(s), sg_dma_len(s)); 16638c2ecf20Sopenharmony_ci return 0; 16648c2ecf20Sopenharmony_ci} 16658c2ecf20Sopenharmony_ci 16668c2ecf20Sopenharmony_ci/** 16678c2ecf20Sopenharmony_ci * arm_coherent_iommu_map_sg - map a set of SG buffers for streaming mode DMA 16688c2ecf20Sopenharmony_ci * @dev: valid struct device pointer 16698c2ecf20Sopenharmony_ci * @sg: list of buffers 16708c2ecf20Sopenharmony_ci * @nents: number of buffers to map 16718c2ecf20Sopenharmony_ci * @dir: DMA transfer direction 16728c2ecf20Sopenharmony_ci * 16738c2ecf20Sopenharmony_ci * Map a set of i/o coherent buffers described by scatterlist in streaming 16748c2ecf20Sopenharmony_ci * mode for DMA. The scatter gather list elements are merged together (if 16758c2ecf20Sopenharmony_ci * possible) and tagged with the appropriate dma address and length. They are 16768c2ecf20Sopenharmony_ci * obtained via sg_dma_{address,length}. 16778c2ecf20Sopenharmony_ci */ 16788c2ecf20Sopenharmony_cistatic int arm_coherent_iommu_map_sg(struct device *dev, struct scatterlist *sg, 16798c2ecf20Sopenharmony_ci int nents, enum dma_data_direction dir, unsigned long attrs) 16808c2ecf20Sopenharmony_ci{ 16818c2ecf20Sopenharmony_ci return __iommu_map_sg(dev, sg, nents, dir, attrs, true); 16828c2ecf20Sopenharmony_ci} 16838c2ecf20Sopenharmony_ci 16848c2ecf20Sopenharmony_ci/** 16858c2ecf20Sopenharmony_ci * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA 16868c2ecf20Sopenharmony_ci * @dev: valid struct device pointer 16878c2ecf20Sopenharmony_ci * @sg: list of buffers 16888c2ecf20Sopenharmony_ci * @nents: number of buffers to map 16898c2ecf20Sopenharmony_ci * @dir: DMA transfer direction 16908c2ecf20Sopenharmony_ci * 16918c2ecf20Sopenharmony_ci * Map a set of buffers described by scatterlist in streaming mode for DMA. 16928c2ecf20Sopenharmony_ci * The scatter gather list elements are merged together (if possible) and 16938c2ecf20Sopenharmony_ci * tagged with the appropriate dma address and length. They are obtained via 16948c2ecf20Sopenharmony_ci * sg_dma_{address,length}. 16958c2ecf20Sopenharmony_ci */ 16968c2ecf20Sopenharmony_cistatic int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, 16978c2ecf20Sopenharmony_ci int nents, enum dma_data_direction dir, unsigned long attrs) 16988c2ecf20Sopenharmony_ci{ 16998c2ecf20Sopenharmony_ci return __iommu_map_sg(dev, sg, nents, dir, attrs, false); 17008c2ecf20Sopenharmony_ci} 17018c2ecf20Sopenharmony_ci 17028c2ecf20Sopenharmony_cistatic void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg, 17038c2ecf20Sopenharmony_ci int nents, enum dma_data_direction dir, 17048c2ecf20Sopenharmony_ci unsigned long attrs, bool is_coherent) 17058c2ecf20Sopenharmony_ci{ 17068c2ecf20Sopenharmony_ci struct scatterlist *s; 17078c2ecf20Sopenharmony_ci int i; 17088c2ecf20Sopenharmony_ci 17098c2ecf20Sopenharmony_ci for_each_sg(sg, s, nents, i) { 17108c2ecf20Sopenharmony_ci if (sg_dma_len(s)) 17118c2ecf20Sopenharmony_ci __iommu_remove_mapping(dev, sg_dma_address(s), 17128c2ecf20Sopenharmony_ci sg_dma_len(s)); 17138c2ecf20Sopenharmony_ci if (!is_coherent && (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) 17148c2ecf20Sopenharmony_ci __dma_page_dev_to_cpu(sg_page(s), s->offset, 17158c2ecf20Sopenharmony_ci s->length, dir); 17168c2ecf20Sopenharmony_ci } 17178c2ecf20Sopenharmony_ci} 17188c2ecf20Sopenharmony_ci 17198c2ecf20Sopenharmony_ci/** 17208c2ecf20Sopenharmony_ci * arm_coherent_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg 17218c2ecf20Sopenharmony_ci * @dev: valid struct device pointer 17228c2ecf20Sopenharmony_ci * @sg: list of buffers 17238c2ecf20Sopenharmony_ci * @nents: number of buffers to unmap (same as was passed to dma_map_sg) 17248c2ecf20Sopenharmony_ci * @dir: DMA transfer direction (same as was passed to dma_map_sg) 17258c2ecf20Sopenharmony_ci * 17268c2ecf20Sopenharmony_ci * Unmap a set of streaming mode DMA translations. Again, CPU access 17278c2ecf20Sopenharmony_ci * rules concerning calls here are the same as for dma_unmap_single(). 17288c2ecf20Sopenharmony_ci */ 17298c2ecf20Sopenharmony_cistatic void arm_coherent_iommu_unmap_sg(struct device *dev, 17308c2ecf20Sopenharmony_ci struct scatterlist *sg, int nents, enum dma_data_direction dir, 17318c2ecf20Sopenharmony_ci unsigned long attrs) 17328c2ecf20Sopenharmony_ci{ 17338c2ecf20Sopenharmony_ci __iommu_unmap_sg(dev, sg, nents, dir, attrs, true); 17348c2ecf20Sopenharmony_ci} 17358c2ecf20Sopenharmony_ci 17368c2ecf20Sopenharmony_ci/** 17378c2ecf20Sopenharmony_ci * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg 17388c2ecf20Sopenharmony_ci * @dev: valid struct device pointer 17398c2ecf20Sopenharmony_ci * @sg: list of buffers 17408c2ecf20Sopenharmony_ci * @nents: number of buffers to unmap (same as was passed to dma_map_sg) 17418c2ecf20Sopenharmony_ci * @dir: DMA transfer direction (same as was passed to dma_map_sg) 17428c2ecf20Sopenharmony_ci * 17438c2ecf20Sopenharmony_ci * Unmap a set of streaming mode DMA translations. Again, CPU access 17448c2ecf20Sopenharmony_ci * rules concerning calls here are the same as for dma_unmap_single(). 17458c2ecf20Sopenharmony_ci */ 17468c2ecf20Sopenharmony_cistatic void arm_iommu_unmap_sg(struct device *dev, 17478c2ecf20Sopenharmony_ci struct scatterlist *sg, int nents, 17488c2ecf20Sopenharmony_ci enum dma_data_direction dir, 17498c2ecf20Sopenharmony_ci unsigned long attrs) 17508c2ecf20Sopenharmony_ci{ 17518c2ecf20Sopenharmony_ci __iommu_unmap_sg(dev, sg, nents, dir, attrs, false); 17528c2ecf20Sopenharmony_ci} 17538c2ecf20Sopenharmony_ci 17548c2ecf20Sopenharmony_ci/** 17558c2ecf20Sopenharmony_ci * arm_iommu_sync_sg_for_cpu 17568c2ecf20Sopenharmony_ci * @dev: valid struct device pointer 17578c2ecf20Sopenharmony_ci * @sg: list of buffers 17588c2ecf20Sopenharmony_ci * @nents: number of buffers to map (returned from dma_map_sg) 17598c2ecf20Sopenharmony_ci * @dir: DMA transfer direction (same as was passed to dma_map_sg) 17608c2ecf20Sopenharmony_ci */ 17618c2ecf20Sopenharmony_cistatic void arm_iommu_sync_sg_for_cpu(struct device *dev, 17628c2ecf20Sopenharmony_ci struct scatterlist *sg, 17638c2ecf20Sopenharmony_ci int nents, enum dma_data_direction dir) 17648c2ecf20Sopenharmony_ci{ 17658c2ecf20Sopenharmony_ci struct scatterlist *s; 17668c2ecf20Sopenharmony_ci int i; 17678c2ecf20Sopenharmony_ci 17688c2ecf20Sopenharmony_ci for_each_sg(sg, s, nents, i) 17698c2ecf20Sopenharmony_ci __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir); 17708c2ecf20Sopenharmony_ci 17718c2ecf20Sopenharmony_ci} 17728c2ecf20Sopenharmony_ci 17738c2ecf20Sopenharmony_ci/** 17748c2ecf20Sopenharmony_ci * arm_iommu_sync_sg_for_device 17758c2ecf20Sopenharmony_ci * @dev: valid struct device pointer 17768c2ecf20Sopenharmony_ci * @sg: list of buffers 17778c2ecf20Sopenharmony_ci * @nents: number of buffers to map (returned from dma_map_sg) 17788c2ecf20Sopenharmony_ci * @dir: DMA transfer direction (same as was passed to dma_map_sg) 17798c2ecf20Sopenharmony_ci */ 17808c2ecf20Sopenharmony_cistatic void arm_iommu_sync_sg_for_device(struct device *dev, 17818c2ecf20Sopenharmony_ci struct scatterlist *sg, 17828c2ecf20Sopenharmony_ci int nents, enum dma_data_direction dir) 17838c2ecf20Sopenharmony_ci{ 17848c2ecf20Sopenharmony_ci struct scatterlist *s; 17858c2ecf20Sopenharmony_ci int i; 17868c2ecf20Sopenharmony_ci 17878c2ecf20Sopenharmony_ci for_each_sg(sg, s, nents, i) 17888c2ecf20Sopenharmony_ci __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); 17898c2ecf20Sopenharmony_ci} 17908c2ecf20Sopenharmony_ci 17918c2ecf20Sopenharmony_ci 17928c2ecf20Sopenharmony_ci/** 17938c2ecf20Sopenharmony_ci * arm_coherent_iommu_map_page 17948c2ecf20Sopenharmony_ci * @dev: valid struct device pointer 17958c2ecf20Sopenharmony_ci * @page: page that buffer resides in 17968c2ecf20Sopenharmony_ci * @offset: offset into page for start of buffer 17978c2ecf20Sopenharmony_ci * @size: size of buffer to map 17988c2ecf20Sopenharmony_ci * @dir: DMA transfer direction 17998c2ecf20Sopenharmony_ci * 18008c2ecf20Sopenharmony_ci * Coherent IOMMU aware version of arm_dma_map_page() 18018c2ecf20Sopenharmony_ci */ 18028c2ecf20Sopenharmony_cistatic dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *page, 18038c2ecf20Sopenharmony_ci unsigned long offset, size_t size, enum dma_data_direction dir, 18048c2ecf20Sopenharmony_ci unsigned long attrs) 18058c2ecf20Sopenharmony_ci{ 18068c2ecf20Sopenharmony_ci struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 18078c2ecf20Sopenharmony_ci dma_addr_t dma_addr; 18088c2ecf20Sopenharmony_ci int ret, prot, len = PAGE_ALIGN(size + offset); 18098c2ecf20Sopenharmony_ci 18108c2ecf20Sopenharmony_ci dma_addr = __alloc_iova(mapping, len); 18118c2ecf20Sopenharmony_ci if (dma_addr == DMA_MAPPING_ERROR) 18128c2ecf20Sopenharmony_ci return dma_addr; 18138c2ecf20Sopenharmony_ci 18148c2ecf20Sopenharmony_ci prot = __dma_info_to_prot(dir, attrs); 18158c2ecf20Sopenharmony_ci 18168c2ecf20Sopenharmony_ci ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot); 18178c2ecf20Sopenharmony_ci if (ret < 0) 18188c2ecf20Sopenharmony_ci goto fail; 18198c2ecf20Sopenharmony_ci 18208c2ecf20Sopenharmony_ci return dma_addr + offset; 18218c2ecf20Sopenharmony_cifail: 18228c2ecf20Sopenharmony_ci __free_iova(mapping, dma_addr, len); 18238c2ecf20Sopenharmony_ci return DMA_MAPPING_ERROR; 18248c2ecf20Sopenharmony_ci} 18258c2ecf20Sopenharmony_ci 18268c2ecf20Sopenharmony_ci/** 18278c2ecf20Sopenharmony_ci * arm_iommu_map_page 18288c2ecf20Sopenharmony_ci * @dev: valid struct device pointer 18298c2ecf20Sopenharmony_ci * @page: page that buffer resides in 18308c2ecf20Sopenharmony_ci * @offset: offset into page for start of buffer 18318c2ecf20Sopenharmony_ci * @size: size of buffer to map 18328c2ecf20Sopenharmony_ci * @dir: DMA transfer direction 18338c2ecf20Sopenharmony_ci * 18348c2ecf20Sopenharmony_ci * IOMMU aware version of arm_dma_map_page() 18358c2ecf20Sopenharmony_ci */ 18368c2ecf20Sopenharmony_cistatic dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page, 18378c2ecf20Sopenharmony_ci unsigned long offset, size_t size, enum dma_data_direction dir, 18388c2ecf20Sopenharmony_ci unsigned long attrs) 18398c2ecf20Sopenharmony_ci{ 18408c2ecf20Sopenharmony_ci if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) 18418c2ecf20Sopenharmony_ci __dma_page_cpu_to_dev(page, offset, size, dir); 18428c2ecf20Sopenharmony_ci 18438c2ecf20Sopenharmony_ci return arm_coherent_iommu_map_page(dev, page, offset, size, dir, attrs); 18448c2ecf20Sopenharmony_ci} 18458c2ecf20Sopenharmony_ci 18468c2ecf20Sopenharmony_ci/** 18478c2ecf20Sopenharmony_ci * arm_coherent_iommu_unmap_page 18488c2ecf20Sopenharmony_ci * @dev: valid struct device pointer 18498c2ecf20Sopenharmony_ci * @handle: DMA address of buffer 18508c2ecf20Sopenharmony_ci * @size: size of buffer (same as passed to dma_map_page) 18518c2ecf20Sopenharmony_ci * @dir: DMA transfer direction (same as passed to dma_map_page) 18528c2ecf20Sopenharmony_ci * 18538c2ecf20Sopenharmony_ci * Coherent IOMMU aware version of arm_dma_unmap_page() 18548c2ecf20Sopenharmony_ci */ 18558c2ecf20Sopenharmony_cistatic void arm_coherent_iommu_unmap_page(struct device *dev, dma_addr_t handle, 18568c2ecf20Sopenharmony_ci size_t size, enum dma_data_direction dir, unsigned long attrs) 18578c2ecf20Sopenharmony_ci{ 18588c2ecf20Sopenharmony_ci struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 18598c2ecf20Sopenharmony_ci dma_addr_t iova = handle & PAGE_MASK; 18608c2ecf20Sopenharmony_ci int offset = handle & ~PAGE_MASK; 18618c2ecf20Sopenharmony_ci int len = PAGE_ALIGN(size + offset); 18628c2ecf20Sopenharmony_ci 18638c2ecf20Sopenharmony_ci if (!iova) 18648c2ecf20Sopenharmony_ci return; 18658c2ecf20Sopenharmony_ci 18668c2ecf20Sopenharmony_ci iommu_unmap(mapping->domain, iova, len); 18678c2ecf20Sopenharmony_ci __free_iova(mapping, iova, len); 18688c2ecf20Sopenharmony_ci} 18698c2ecf20Sopenharmony_ci 18708c2ecf20Sopenharmony_ci/** 18718c2ecf20Sopenharmony_ci * arm_iommu_unmap_page 18728c2ecf20Sopenharmony_ci * @dev: valid struct device pointer 18738c2ecf20Sopenharmony_ci * @handle: DMA address of buffer 18748c2ecf20Sopenharmony_ci * @size: size of buffer (same as passed to dma_map_page) 18758c2ecf20Sopenharmony_ci * @dir: DMA transfer direction (same as passed to dma_map_page) 18768c2ecf20Sopenharmony_ci * 18778c2ecf20Sopenharmony_ci * IOMMU aware version of arm_dma_unmap_page() 18788c2ecf20Sopenharmony_ci */ 18798c2ecf20Sopenharmony_cistatic void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle, 18808c2ecf20Sopenharmony_ci size_t size, enum dma_data_direction dir, unsigned long attrs) 18818c2ecf20Sopenharmony_ci{ 18828c2ecf20Sopenharmony_ci struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 18838c2ecf20Sopenharmony_ci dma_addr_t iova = handle & PAGE_MASK; 18848c2ecf20Sopenharmony_ci struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); 18858c2ecf20Sopenharmony_ci int offset = handle & ~PAGE_MASK; 18868c2ecf20Sopenharmony_ci int len = PAGE_ALIGN(size + offset); 18878c2ecf20Sopenharmony_ci 18888c2ecf20Sopenharmony_ci if (!iova) 18898c2ecf20Sopenharmony_ci return; 18908c2ecf20Sopenharmony_ci 18918c2ecf20Sopenharmony_ci if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) 18928c2ecf20Sopenharmony_ci __dma_page_dev_to_cpu(page, offset, size, dir); 18938c2ecf20Sopenharmony_ci 18948c2ecf20Sopenharmony_ci iommu_unmap(mapping->domain, iova, len); 18958c2ecf20Sopenharmony_ci __free_iova(mapping, iova, len); 18968c2ecf20Sopenharmony_ci} 18978c2ecf20Sopenharmony_ci 18988c2ecf20Sopenharmony_ci/** 18998c2ecf20Sopenharmony_ci * arm_iommu_map_resource - map a device resource for DMA 19008c2ecf20Sopenharmony_ci * @dev: valid struct device pointer 19018c2ecf20Sopenharmony_ci * @phys_addr: physical address of resource 19028c2ecf20Sopenharmony_ci * @size: size of resource to map 19038c2ecf20Sopenharmony_ci * @dir: DMA transfer direction 19048c2ecf20Sopenharmony_ci */ 19058c2ecf20Sopenharmony_cistatic dma_addr_t arm_iommu_map_resource(struct device *dev, 19068c2ecf20Sopenharmony_ci phys_addr_t phys_addr, size_t size, 19078c2ecf20Sopenharmony_ci enum dma_data_direction dir, unsigned long attrs) 19088c2ecf20Sopenharmony_ci{ 19098c2ecf20Sopenharmony_ci struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 19108c2ecf20Sopenharmony_ci dma_addr_t dma_addr; 19118c2ecf20Sopenharmony_ci int ret, prot; 19128c2ecf20Sopenharmony_ci phys_addr_t addr = phys_addr & PAGE_MASK; 19138c2ecf20Sopenharmony_ci unsigned int offset = phys_addr & ~PAGE_MASK; 19148c2ecf20Sopenharmony_ci size_t len = PAGE_ALIGN(size + offset); 19158c2ecf20Sopenharmony_ci 19168c2ecf20Sopenharmony_ci dma_addr = __alloc_iova(mapping, len); 19178c2ecf20Sopenharmony_ci if (dma_addr == DMA_MAPPING_ERROR) 19188c2ecf20Sopenharmony_ci return dma_addr; 19198c2ecf20Sopenharmony_ci 19208c2ecf20Sopenharmony_ci prot = __dma_info_to_prot(dir, attrs) | IOMMU_MMIO; 19218c2ecf20Sopenharmony_ci 19228c2ecf20Sopenharmony_ci ret = iommu_map(mapping->domain, dma_addr, addr, len, prot); 19238c2ecf20Sopenharmony_ci if (ret < 0) 19248c2ecf20Sopenharmony_ci goto fail; 19258c2ecf20Sopenharmony_ci 19268c2ecf20Sopenharmony_ci return dma_addr + offset; 19278c2ecf20Sopenharmony_cifail: 19288c2ecf20Sopenharmony_ci __free_iova(mapping, dma_addr, len); 19298c2ecf20Sopenharmony_ci return DMA_MAPPING_ERROR; 19308c2ecf20Sopenharmony_ci} 19318c2ecf20Sopenharmony_ci 19328c2ecf20Sopenharmony_ci/** 19338c2ecf20Sopenharmony_ci * arm_iommu_unmap_resource - unmap a device DMA resource 19348c2ecf20Sopenharmony_ci * @dev: valid struct device pointer 19358c2ecf20Sopenharmony_ci * @dma_handle: DMA address to resource 19368c2ecf20Sopenharmony_ci * @size: size of resource to map 19378c2ecf20Sopenharmony_ci * @dir: DMA transfer direction 19388c2ecf20Sopenharmony_ci */ 19398c2ecf20Sopenharmony_cistatic void arm_iommu_unmap_resource(struct device *dev, dma_addr_t dma_handle, 19408c2ecf20Sopenharmony_ci size_t size, enum dma_data_direction dir, 19418c2ecf20Sopenharmony_ci unsigned long attrs) 19428c2ecf20Sopenharmony_ci{ 19438c2ecf20Sopenharmony_ci struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 19448c2ecf20Sopenharmony_ci dma_addr_t iova = dma_handle & PAGE_MASK; 19458c2ecf20Sopenharmony_ci unsigned int offset = dma_handle & ~PAGE_MASK; 19468c2ecf20Sopenharmony_ci size_t len = PAGE_ALIGN(size + offset); 19478c2ecf20Sopenharmony_ci 19488c2ecf20Sopenharmony_ci if (!iova) 19498c2ecf20Sopenharmony_ci return; 19508c2ecf20Sopenharmony_ci 19518c2ecf20Sopenharmony_ci iommu_unmap(mapping->domain, iova, len); 19528c2ecf20Sopenharmony_ci __free_iova(mapping, iova, len); 19538c2ecf20Sopenharmony_ci} 19548c2ecf20Sopenharmony_ci 19558c2ecf20Sopenharmony_cistatic void arm_iommu_sync_single_for_cpu(struct device *dev, 19568c2ecf20Sopenharmony_ci dma_addr_t handle, size_t size, enum dma_data_direction dir) 19578c2ecf20Sopenharmony_ci{ 19588c2ecf20Sopenharmony_ci struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 19598c2ecf20Sopenharmony_ci dma_addr_t iova = handle & PAGE_MASK; 19608c2ecf20Sopenharmony_ci struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); 19618c2ecf20Sopenharmony_ci unsigned int offset = handle & ~PAGE_MASK; 19628c2ecf20Sopenharmony_ci 19638c2ecf20Sopenharmony_ci if (!iova) 19648c2ecf20Sopenharmony_ci return; 19658c2ecf20Sopenharmony_ci 19668c2ecf20Sopenharmony_ci __dma_page_dev_to_cpu(page, offset, size, dir); 19678c2ecf20Sopenharmony_ci} 19688c2ecf20Sopenharmony_ci 19698c2ecf20Sopenharmony_cistatic void arm_iommu_sync_single_for_device(struct device *dev, 19708c2ecf20Sopenharmony_ci dma_addr_t handle, size_t size, enum dma_data_direction dir) 19718c2ecf20Sopenharmony_ci{ 19728c2ecf20Sopenharmony_ci struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 19738c2ecf20Sopenharmony_ci dma_addr_t iova = handle & PAGE_MASK; 19748c2ecf20Sopenharmony_ci struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); 19758c2ecf20Sopenharmony_ci unsigned int offset = handle & ~PAGE_MASK; 19768c2ecf20Sopenharmony_ci 19778c2ecf20Sopenharmony_ci if (!iova) 19788c2ecf20Sopenharmony_ci return; 19798c2ecf20Sopenharmony_ci 19808c2ecf20Sopenharmony_ci __dma_page_cpu_to_dev(page, offset, size, dir); 19818c2ecf20Sopenharmony_ci} 19828c2ecf20Sopenharmony_ci 19838c2ecf20Sopenharmony_cistatic const struct dma_map_ops iommu_ops = { 19848c2ecf20Sopenharmony_ci .alloc = arm_iommu_alloc_attrs, 19858c2ecf20Sopenharmony_ci .free = arm_iommu_free_attrs, 19868c2ecf20Sopenharmony_ci .mmap = arm_iommu_mmap_attrs, 19878c2ecf20Sopenharmony_ci .get_sgtable = arm_iommu_get_sgtable, 19888c2ecf20Sopenharmony_ci 19898c2ecf20Sopenharmony_ci .map_page = arm_iommu_map_page, 19908c2ecf20Sopenharmony_ci .unmap_page = arm_iommu_unmap_page, 19918c2ecf20Sopenharmony_ci .sync_single_for_cpu = arm_iommu_sync_single_for_cpu, 19928c2ecf20Sopenharmony_ci .sync_single_for_device = arm_iommu_sync_single_for_device, 19938c2ecf20Sopenharmony_ci 19948c2ecf20Sopenharmony_ci .map_sg = arm_iommu_map_sg, 19958c2ecf20Sopenharmony_ci .unmap_sg = arm_iommu_unmap_sg, 19968c2ecf20Sopenharmony_ci .sync_sg_for_cpu = arm_iommu_sync_sg_for_cpu, 19978c2ecf20Sopenharmony_ci .sync_sg_for_device = arm_iommu_sync_sg_for_device, 19988c2ecf20Sopenharmony_ci 19998c2ecf20Sopenharmony_ci .map_resource = arm_iommu_map_resource, 20008c2ecf20Sopenharmony_ci .unmap_resource = arm_iommu_unmap_resource, 20018c2ecf20Sopenharmony_ci 20028c2ecf20Sopenharmony_ci .dma_supported = arm_dma_supported, 20038c2ecf20Sopenharmony_ci}; 20048c2ecf20Sopenharmony_ci 20058c2ecf20Sopenharmony_cistatic const struct dma_map_ops iommu_coherent_ops = { 20068c2ecf20Sopenharmony_ci .alloc = arm_coherent_iommu_alloc_attrs, 20078c2ecf20Sopenharmony_ci .free = arm_coherent_iommu_free_attrs, 20088c2ecf20Sopenharmony_ci .mmap = arm_coherent_iommu_mmap_attrs, 20098c2ecf20Sopenharmony_ci .get_sgtable = arm_iommu_get_sgtable, 20108c2ecf20Sopenharmony_ci 20118c2ecf20Sopenharmony_ci .map_page = arm_coherent_iommu_map_page, 20128c2ecf20Sopenharmony_ci .unmap_page = arm_coherent_iommu_unmap_page, 20138c2ecf20Sopenharmony_ci 20148c2ecf20Sopenharmony_ci .map_sg = arm_coherent_iommu_map_sg, 20158c2ecf20Sopenharmony_ci .unmap_sg = arm_coherent_iommu_unmap_sg, 20168c2ecf20Sopenharmony_ci 20178c2ecf20Sopenharmony_ci .map_resource = arm_iommu_map_resource, 20188c2ecf20Sopenharmony_ci .unmap_resource = arm_iommu_unmap_resource, 20198c2ecf20Sopenharmony_ci 20208c2ecf20Sopenharmony_ci .dma_supported = arm_dma_supported, 20218c2ecf20Sopenharmony_ci}; 20228c2ecf20Sopenharmony_ci 20238c2ecf20Sopenharmony_ci/** 20248c2ecf20Sopenharmony_ci * arm_iommu_create_mapping 20258c2ecf20Sopenharmony_ci * @bus: pointer to the bus holding the client device (for IOMMU calls) 20268c2ecf20Sopenharmony_ci * @base: start address of the valid IO address space 20278c2ecf20Sopenharmony_ci * @size: maximum size of the valid IO address space 20288c2ecf20Sopenharmony_ci * 20298c2ecf20Sopenharmony_ci * Creates a mapping structure which holds information about used/unused 20308c2ecf20Sopenharmony_ci * IO address ranges, which is required to perform memory allocation and 20318c2ecf20Sopenharmony_ci * mapping with IOMMU aware functions. 20328c2ecf20Sopenharmony_ci * 20338c2ecf20Sopenharmony_ci * The client device need to be attached to the mapping with 20348c2ecf20Sopenharmony_ci * arm_iommu_attach_device function. 20358c2ecf20Sopenharmony_ci */ 20368c2ecf20Sopenharmony_cistruct dma_iommu_mapping * 20378c2ecf20Sopenharmony_ciarm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, u64 size) 20388c2ecf20Sopenharmony_ci{ 20398c2ecf20Sopenharmony_ci unsigned int bits = size >> PAGE_SHIFT; 20408c2ecf20Sopenharmony_ci unsigned int bitmap_size = BITS_TO_LONGS(bits) * sizeof(long); 20418c2ecf20Sopenharmony_ci struct dma_iommu_mapping *mapping; 20428c2ecf20Sopenharmony_ci int extensions = 1; 20438c2ecf20Sopenharmony_ci int err = -ENOMEM; 20448c2ecf20Sopenharmony_ci 20458c2ecf20Sopenharmony_ci /* currently only 32-bit DMA address space is supported */ 20468c2ecf20Sopenharmony_ci if (size > DMA_BIT_MASK(32) + 1) 20478c2ecf20Sopenharmony_ci return ERR_PTR(-ERANGE); 20488c2ecf20Sopenharmony_ci 20498c2ecf20Sopenharmony_ci if (!bitmap_size) 20508c2ecf20Sopenharmony_ci return ERR_PTR(-EINVAL); 20518c2ecf20Sopenharmony_ci 20528c2ecf20Sopenharmony_ci if (bitmap_size > PAGE_SIZE) { 20538c2ecf20Sopenharmony_ci extensions = bitmap_size / PAGE_SIZE; 20548c2ecf20Sopenharmony_ci bitmap_size = PAGE_SIZE; 20558c2ecf20Sopenharmony_ci } 20568c2ecf20Sopenharmony_ci 20578c2ecf20Sopenharmony_ci mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL); 20588c2ecf20Sopenharmony_ci if (!mapping) 20598c2ecf20Sopenharmony_ci goto err; 20608c2ecf20Sopenharmony_ci 20618c2ecf20Sopenharmony_ci mapping->bitmap_size = bitmap_size; 20628c2ecf20Sopenharmony_ci mapping->bitmaps = kcalloc(extensions, sizeof(unsigned long *), 20638c2ecf20Sopenharmony_ci GFP_KERNEL); 20648c2ecf20Sopenharmony_ci if (!mapping->bitmaps) 20658c2ecf20Sopenharmony_ci goto err2; 20668c2ecf20Sopenharmony_ci 20678c2ecf20Sopenharmony_ci mapping->bitmaps[0] = kzalloc(bitmap_size, GFP_KERNEL); 20688c2ecf20Sopenharmony_ci if (!mapping->bitmaps[0]) 20698c2ecf20Sopenharmony_ci goto err3; 20708c2ecf20Sopenharmony_ci 20718c2ecf20Sopenharmony_ci mapping->nr_bitmaps = 1; 20728c2ecf20Sopenharmony_ci mapping->extensions = extensions; 20738c2ecf20Sopenharmony_ci mapping->base = base; 20748c2ecf20Sopenharmony_ci mapping->bits = BITS_PER_BYTE * bitmap_size; 20758c2ecf20Sopenharmony_ci 20768c2ecf20Sopenharmony_ci spin_lock_init(&mapping->lock); 20778c2ecf20Sopenharmony_ci 20788c2ecf20Sopenharmony_ci mapping->domain = iommu_domain_alloc(bus); 20798c2ecf20Sopenharmony_ci if (!mapping->domain) 20808c2ecf20Sopenharmony_ci goto err4; 20818c2ecf20Sopenharmony_ci 20828c2ecf20Sopenharmony_ci kref_init(&mapping->kref); 20838c2ecf20Sopenharmony_ci return mapping; 20848c2ecf20Sopenharmony_cierr4: 20858c2ecf20Sopenharmony_ci kfree(mapping->bitmaps[0]); 20868c2ecf20Sopenharmony_cierr3: 20878c2ecf20Sopenharmony_ci kfree(mapping->bitmaps); 20888c2ecf20Sopenharmony_cierr2: 20898c2ecf20Sopenharmony_ci kfree(mapping); 20908c2ecf20Sopenharmony_cierr: 20918c2ecf20Sopenharmony_ci return ERR_PTR(err); 20928c2ecf20Sopenharmony_ci} 20938c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(arm_iommu_create_mapping); 20948c2ecf20Sopenharmony_ci 20958c2ecf20Sopenharmony_cistatic void release_iommu_mapping(struct kref *kref) 20968c2ecf20Sopenharmony_ci{ 20978c2ecf20Sopenharmony_ci int i; 20988c2ecf20Sopenharmony_ci struct dma_iommu_mapping *mapping = 20998c2ecf20Sopenharmony_ci container_of(kref, struct dma_iommu_mapping, kref); 21008c2ecf20Sopenharmony_ci 21018c2ecf20Sopenharmony_ci iommu_domain_free(mapping->domain); 21028c2ecf20Sopenharmony_ci for (i = 0; i < mapping->nr_bitmaps; i++) 21038c2ecf20Sopenharmony_ci kfree(mapping->bitmaps[i]); 21048c2ecf20Sopenharmony_ci kfree(mapping->bitmaps); 21058c2ecf20Sopenharmony_ci kfree(mapping); 21068c2ecf20Sopenharmony_ci} 21078c2ecf20Sopenharmony_ci 21088c2ecf20Sopenharmony_cistatic int extend_iommu_mapping(struct dma_iommu_mapping *mapping) 21098c2ecf20Sopenharmony_ci{ 21108c2ecf20Sopenharmony_ci int next_bitmap; 21118c2ecf20Sopenharmony_ci 21128c2ecf20Sopenharmony_ci if (mapping->nr_bitmaps >= mapping->extensions) 21138c2ecf20Sopenharmony_ci return -EINVAL; 21148c2ecf20Sopenharmony_ci 21158c2ecf20Sopenharmony_ci next_bitmap = mapping->nr_bitmaps; 21168c2ecf20Sopenharmony_ci mapping->bitmaps[next_bitmap] = kzalloc(mapping->bitmap_size, 21178c2ecf20Sopenharmony_ci GFP_ATOMIC); 21188c2ecf20Sopenharmony_ci if (!mapping->bitmaps[next_bitmap]) 21198c2ecf20Sopenharmony_ci return -ENOMEM; 21208c2ecf20Sopenharmony_ci 21218c2ecf20Sopenharmony_ci mapping->nr_bitmaps++; 21228c2ecf20Sopenharmony_ci 21238c2ecf20Sopenharmony_ci return 0; 21248c2ecf20Sopenharmony_ci} 21258c2ecf20Sopenharmony_ci 21268c2ecf20Sopenharmony_civoid arm_iommu_release_mapping(struct dma_iommu_mapping *mapping) 21278c2ecf20Sopenharmony_ci{ 21288c2ecf20Sopenharmony_ci if (mapping) 21298c2ecf20Sopenharmony_ci kref_put(&mapping->kref, release_iommu_mapping); 21308c2ecf20Sopenharmony_ci} 21318c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(arm_iommu_release_mapping); 21328c2ecf20Sopenharmony_ci 21338c2ecf20Sopenharmony_cistatic int __arm_iommu_attach_device(struct device *dev, 21348c2ecf20Sopenharmony_ci struct dma_iommu_mapping *mapping) 21358c2ecf20Sopenharmony_ci{ 21368c2ecf20Sopenharmony_ci int err; 21378c2ecf20Sopenharmony_ci 21388c2ecf20Sopenharmony_ci err = iommu_attach_device(mapping->domain, dev); 21398c2ecf20Sopenharmony_ci if (err) 21408c2ecf20Sopenharmony_ci return err; 21418c2ecf20Sopenharmony_ci 21428c2ecf20Sopenharmony_ci kref_get(&mapping->kref); 21438c2ecf20Sopenharmony_ci to_dma_iommu_mapping(dev) = mapping; 21448c2ecf20Sopenharmony_ci 21458c2ecf20Sopenharmony_ci pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev)); 21468c2ecf20Sopenharmony_ci return 0; 21478c2ecf20Sopenharmony_ci} 21488c2ecf20Sopenharmony_ci 21498c2ecf20Sopenharmony_ci/** 21508c2ecf20Sopenharmony_ci * arm_iommu_attach_device 21518c2ecf20Sopenharmony_ci * @dev: valid struct device pointer 21528c2ecf20Sopenharmony_ci * @mapping: io address space mapping structure (returned from 21538c2ecf20Sopenharmony_ci * arm_iommu_create_mapping) 21548c2ecf20Sopenharmony_ci * 21558c2ecf20Sopenharmony_ci * Attaches specified io address space mapping to the provided device. 21568c2ecf20Sopenharmony_ci * This replaces the dma operations (dma_map_ops pointer) with the 21578c2ecf20Sopenharmony_ci * IOMMU aware version. 21588c2ecf20Sopenharmony_ci * 21598c2ecf20Sopenharmony_ci * More than one client might be attached to the same io address space 21608c2ecf20Sopenharmony_ci * mapping. 21618c2ecf20Sopenharmony_ci */ 21628c2ecf20Sopenharmony_ciint arm_iommu_attach_device(struct device *dev, 21638c2ecf20Sopenharmony_ci struct dma_iommu_mapping *mapping) 21648c2ecf20Sopenharmony_ci{ 21658c2ecf20Sopenharmony_ci int err; 21668c2ecf20Sopenharmony_ci 21678c2ecf20Sopenharmony_ci err = __arm_iommu_attach_device(dev, mapping); 21688c2ecf20Sopenharmony_ci if (err) 21698c2ecf20Sopenharmony_ci return err; 21708c2ecf20Sopenharmony_ci 21718c2ecf20Sopenharmony_ci set_dma_ops(dev, &iommu_ops); 21728c2ecf20Sopenharmony_ci return 0; 21738c2ecf20Sopenharmony_ci} 21748c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(arm_iommu_attach_device); 21758c2ecf20Sopenharmony_ci 21768c2ecf20Sopenharmony_ci/** 21778c2ecf20Sopenharmony_ci * arm_iommu_detach_device 21788c2ecf20Sopenharmony_ci * @dev: valid struct device pointer 21798c2ecf20Sopenharmony_ci * 21808c2ecf20Sopenharmony_ci * Detaches the provided device from a previously attached map. 21818c2ecf20Sopenharmony_ci * This overwrites the dma_ops pointer with appropriate non-IOMMU ops. 21828c2ecf20Sopenharmony_ci */ 21838c2ecf20Sopenharmony_civoid arm_iommu_detach_device(struct device *dev) 21848c2ecf20Sopenharmony_ci{ 21858c2ecf20Sopenharmony_ci struct dma_iommu_mapping *mapping; 21868c2ecf20Sopenharmony_ci 21878c2ecf20Sopenharmony_ci mapping = to_dma_iommu_mapping(dev); 21888c2ecf20Sopenharmony_ci if (!mapping) { 21898c2ecf20Sopenharmony_ci dev_warn(dev, "Not attached\n"); 21908c2ecf20Sopenharmony_ci return; 21918c2ecf20Sopenharmony_ci } 21928c2ecf20Sopenharmony_ci 21938c2ecf20Sopenharmony_ci iommu_detach_device(mapping->domain, dev); 21948c2ecf20Sopenharmony_ci kref_put(&mapping->kref, release_iommu_mapping); 21958c2ecf20Sopenharmony_ci to_dma_iommu_mapping(dev) = NULL; 21968c2ecf20Sopenharmony_ci set_dma_ops(dev, arm_get_dma_map_ops(dev->archdata.dma_coherent)); 21978c2ecf20Sopenharmony_ci 21988c2ecf20Sopenharmony_ci pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev)); 21998c2ecf20Sopenharmony_ci} 22008c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(arm_iommu_detach_device); 22018c2ecf20Sopenharmony_ci 22028c2ecf20Sopenharmony_cistatic const struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent) 22038c2ecf20Sopenharmony_ci{ 22048c2ecf20Sopenharmony_ci return coherent ? &iommu_coherent_ops : &iommu_ops; 22058c2ecf20Sopenharmony_ci} 22068c2ecf20Sopenharmony_ci 22078c2ecf20Sopenharmony_cistatic bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size, 22088c2ecf20Sopenharmony_ci const struct iommu_ops *iommu) 22098c2ecf20Sopenharmony_ci{ 22108c2ecf20Sopenharmony_ci struct dma_iommu_mapping *mapping; 22118c2ecf20Sopenharmony_ci 22128c2ecf20Sopenharmony_ci if (!iommu) 22138c2ecf20Sopenharmony_ci return false; 22148c2ecf20Sopenharmony_ci 22158c2ecf20Sopenharmony_ci mapping = arm_iommu_create_mapping(dev->bus, dma_base, size); 22168c2ecf20Sopenharmony_ci if (IS_ERR(mapping)) { 22178c2ecf20Sopenharmony_ci pr_warn("Failed to create %llu-byte IOMMU mapping for device %s\n", 22188c2ecf20Sopenharmony_ci size, dev_name(dev)); 22198c2ecf20Sopenharmony_ci return false; 22208c2ecf20Sopenharmony_ci } 22218c2ecf20Sopenharmony_ci 22228c2ecf20Sopenharmony_ci if (__arm_iommu_attach_device(dev, mapping)) { 22238c2ecf20Sopenharmony_ci pr_warn("Failed to attached device %s to IOMMU_mapping\n", 22248c2ecf20Sopenharmony_ci dev_name(dev)); 22258c2ecf20Sopenharmony_ci arm_iommu_release_mapping(mapping); 22268c2ecf20Sopenharmony_ci return false; 22278c2ecf20Sopenharmony_ci } 22288c2ecf20Sopenharmony_ci 22298c2ecf20Sopenharmony_ci return true; 22308c2ecf20Sopenharmony_ci} 22318c2ecf20Sopenharmony_ci 22328c2ecf20Sopenharmony_cistatic void arm_teardown_iommu_dma_ops(struct device *dev) 22338c2ecf20Sopenharmony_ci{ 22348c2ecf20Sopenharmony_ci struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 22358c2ecf20Sopenharmony_ci 22368c2ecf20Sopenharmony_ci if (!mapping) 22378c2ecf20Sopenharmony_ci return; 22388c2ecf20Sopenharmony_ci 22398c2ecf20Sopenharmony_ci arm_iommu_detach_device(dev); 22408c2ecf20Sopenharmony_ci arm_iommu_release_mapping(mapping); 22418c2ecf20Sopenharmony_ci} 22428c2ecf20Sopenharmony_ci 22438c2ecf20Sopenharmony_ci#else 22448c2ecf20Sopenharmony_ci 22458c2ecf20Sopenharmony_cistatic bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size, 22468c2ecf20Sopenharmony_ci const struct iommu_ops *iommu) 22478c2ecf20Sopenharmony_ci{ 22488c2ecf20Sopenharmony_ci return false; 22498c2ecf20Sopenharmony_ci} 22508c2ecf20Sopenharmony_ci 22518c2ecf20Sopenharmony_cistatic void arm_teardown_iommu_dma_ops(struct device *dev) { } 22528c2ecf20Sopenharmony_ci 22538c2ecf20Sopenharmony_ci#define arm_get_iommu_dma_map_ops arm_get_dma_map_ops 22548c2ecf20Sopenharmony_ci 22558c2ecf20Sopenharmony_ci#endif /* CONFIG_ARM_DMA_USE_IOMMU */ 22568c2ecf20Sopenharmony_ci 22578c2ecf20Sopenharmony_civoid arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, 22588c2ecf20Sopenharmony_ci const struct iommu_ops *iommu, bool coherent) 22598c2ecf20Sopenharmony_ci{ 22608c2ecf20Sopenharmony_ci const struct dma_map_ops *dma_ops; 22618c2ecf20Sopenharmony_ci 22628c2ecf20Sopenharmony_ci dev->archdata.dma_coherent = coherent; 22638c2ecf20Sopenharmony_ci#ifdef CONFIG_SWIOTLB 22648c2ecf20Sopenharmony_ci dev->dma_coherent = coherent; 22658c2ecf20Sopenharmony_ci#endif 22668c2ecf20Sopenharmony_ci 22678c2ecf20Sopenharmony_ci /* 22688c2ecf20Sopenharmony_ci * Don't override the dma_ops if they have already been set. Ideally 22698c2ecf20Sopenharmony_ci * this should be the only location where dma_ops are set, remove this 22708c2ecf20Sopenharmony_ci * check when all other callers of set_dma_ops will have disappeared. 22718c2ecf20Sopenharmony_ci */ 22728c2ecf20Sopenharmony_ci if (dev->dma_ops) 22738c2ecf20Sopenharmony_ci return; 22748c2ecf20Sopenharmony_ci 22758c2ecf20Sopenharmony_ci if (arm_setup_iommu_dma_ops(dev, dma_base, size, iommu)) 22768c2ecf20Sopenharmony_ci dma_ops = arm_get_iommu_dma_map_ops(coherent); 22778c2ecf20Sopenharmony_ci else 22788c2ecf20Sopenharmony_ci dma_ops = arm_get_dma_map_ops(coherent); 22798c2ecf20Sopenharmony_ci 22808c2ecf20Sopenharmony_ci set_dma_ops(dev, dma_ops); 22818c2ecf20Sopenharmony_ci 22828c2ecf20Sopenharmony_ci#ifdef CONFIG_XEN 22838c2ecf20Sopenharmony_ci if (xen_initial_domain()) 22848c2ecf20Sopenharmony_ci dev->dma_ops = &xen_swiotlb_dma_ops; 22858c2ecf20Sopenharmony_ci#endif 22868c2ecf20Sopenharmony_ci dev->archdata.dma_ops_setup = true; 22878c2ecf20Sopenharmony_ci} 22888c2ecf20Sopenharmony_ci 22898c2ecf20Sopenharmony_civoid arch_teardown_dma_ops(struct device *dev) 22908c2ecf20Sopenharmony_ci{ 22918c2ecf20Sopenharmony_ci if (!dev->archdata.dma_ops_setup) 22928c2ecf20Sopenharmony_ci return; 22938c2ecf20Sopenharmony_ci 22948c2ecf20Sopenharmony_ci arm_teardown_iommu_dma_ops(dev); 22958c2ecf20Sopenharmony_ci /* Let arch_setup_dma_ops() start again from scratch upon re-probe */ 22968c2ecf20Sopenharmony_ci set_dma_ops(dev, NULL); 22978c2ecf20Sopenharmony_ci} 22988c2ecf20Sopenharmony_ci 22998c2ecf20Sopenharmony_ci#ifdef CONFIG_SWIOTLB 23008c2ecf20Sopenharmony_civoid arch_sync_dma_for_device(phys_addr_t paddr, size_t size, 23018c2ecf20Sopenharmony_ci enum dma_data_direction dir) 23028c2ecf20Sopenharmony_ci{ 23038c2ecf20Sopenharmony_ci __dma_page_cpu_to_dev(phys_to_page(paddr), paddr & (PAGE_SIZE - 1), 23048c2ecf20Sopenharmony_ci size, dir); 23058c2ecf20Sopenharmony_ci} 23068c2ecf20Sopenharmony_ci 23078c2ecf20Sopenharmony_civoid arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, 23088c2ecf20Sopenharmony_ci enum dma_data_direction dir) 23098c2ecf20Sopenharmony_ci{ 23108c2ecf20Sopenharmony_ci __dma_page_dev_to_cpu(phys_to_page(paddr), paddr & (PAGE_SIZE - 1), 23118c2ecf20Sopenharmony_ci size, dir); 23128c2ecf20Sopenharmony_ci} 23138c2ecf20Sopenharmony_ci 23148c2ecf20Sopenharmony_civoid *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, 23158c2ecf20Sopenharmony_ci gfp_t gfp, unsigned long attrs) 23168c2ecf20Sopenharmony_ci{ 23178c2ecf20Sopenharmony_ci return __dma_alloc(dev, size, dma_handle, gfp, 23188c2ecf20Sopenharmony_ci __get_dma_pgprot(attrs, PAGE_KERNEL), false, 23198c2ecf20Sopenharmony_ci attrs, __builtin_return_address(0)); 23208c2ecf20Sopenharmony_ci} 23218c2ecf20Sopenharmony_ci 23228c2ecf20Sopenharmony_civoid arch_dma_free(struct device *dev, size_t size, void *cpu_addr, 23238c2ecf20Sopenharmony_ci dma_addr_t dma_handle, unsigned long attrs) 23248c2ecf20Sopenharmony_ci{ 23258c2ecf20Sopenharmony_ci __arm_dma_free(dev, size, cpu_addr, dma_handle, attrs, false); 23268c2ecf20Sopenharmony_ci} 23278c2ecf20Sopenharmony_ci#endif /* CONFIG_SWIOTLB */ 2328