162306a36Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0-only
262306a36Sopenharmony_ci/*
362306a36Sopenharmony_ci *  linux/arch/arm/mm/dma-mapping.c
462306a36Sopenharmony_ci *
562306a36Sopenharmony_ci *  Copyright (C) 2000-2004 Russell King
662306a36Sopenharmony_ci *
762306a36Sopenharmony_ci *  DMA uncached mapping support.
862306a36Sopenharmony_ci */
962306a36Sopenharmony_ci#include <linux/module.h>
1062306a36Sopenharmony_ci#include <linux/mm.h>
1162306a36Sopenharmony_ci#include <linux/genalloc.h>
1262306a36Sopenharmony_ci#include <linux/gfp.h>
1362306a36Sopenharmony_ci#include <linux/errno.h>
1462306a36Sopenharmony_ci#include <linux/list.h>
1562306a36Sopenharmony_ci#include <linux/init.h>
1662306a36Sopenharmony_ci#include <linux/device.h>
1762306a36Sopenharmony_ci#include <linux/dma-direct.h>
1862306a36Sopenharmony_ci#include <linux/dma-map-ops.h>
1962306a36Sopenharmony_ci#include <linux/highmem.h>
2062306a36Sopenharmony_ci#include <linux/memblock.h>
2162306a36Sopenharmony_ci#include <linux/slab.h>
2262306a36Sopenharmony_ci#include <linux/iommu.h>
2362306a36Sopenharmony_ci#include <linux/io.h>
2462306a36Sopenharmony_ci#include <linux/vmalloc.h>
2562306a36Sopenharmony_ci#include <linux/sizes.h>
2662306a36Sopenharmony_ci#include <linux/cma.h>
2762306a36Sopenharmony_ci
2862306a36Sopenharmony_ci#include <asm/page.h>
2962306a36Sopenharmony_ci#include <asm/highmem.h>
3062306a36Sopenharmony_ci#include <asm/cacheflush.h>
3162306a36Sopenharmony_ci#include <asm/tlbflush.h>
3262306a36Sopenharmony_ci#include <asm/mach/arch.h>
3362306a36Sopenharmony_ci#include <asm/dma-iommu.h>
3462306a36Sopenharmony_ci#include <asm/mach/map.h>
3562306a36Sopenharmony_ci#include <asm/system_info.h>
3662306a36Sopenharmony_ci#include <asm/xen/xen-ops.h>
3762306a36Sopenharmony_ci
3862306a36Sopenharmony_ci#include "dma.h"
3962306a36Sopenharmony_ci#include "mm.h"
4062306a36Sopenharmony_ci
4162306a36Sopenharmony_cistruct arm_dma_alloc_args {
4262306a36Sopenharmony_ci	struct device *dev;
4362306a36Sopenharmony_ci	size_t size;
4462306a36Sopenharmony_ci	gfp_t gfp;
4562306a36Sopenharmony_ci	pgprot_t prot;
4662306a36Sopenharmony_ci	const void *caller;
4762306a36Sopenharmony_ci	bool want_vaddr;
4862306a36Sopenharmony_ci	int coherent_flag;
4962306a36Sopenharmony_ci};
5062306a36Sopenharmony_ci
5162306a36Sopenharmony_cistruct arm_dma_free_args {
5262306a36Sopenharmony_ci	struct device *dev;
5362306a36Sopenharmony_ci	size_t size;
5462306a36Sopenharmony_ci	void *cpu_addr;
5562306a36Sopenharmony_ci	struct page *page;
5662306a36Sopenharmony_ci	bool want_vaddr;
5762306a36Sopenharmony_ci};
5862306a36Sopenharmony_ci
5962306a36Sopenharmony_ci#define NORMAL	    0
6062306a36Sopenharmony_ci#define COHERENT    1
6162306a36Sopenharmony_ci
6262306a36Sopenharmony_cistruct arm_dma_allocator {
6362306a36Sopenharmony_ci	void *(*alloc)(struct arm_dma_alloc_args *args,
6462306a36Sopenharmony_ci		       struct page **ret_page);
6562306a36Sopenharmony_ci	void (*free)(struct arm_dma_free_args *args);
6662306a36Sopenharmony_ci};
6762306a36Sopenharmony_ci
6862306a36Sopenharmony_cistruct arm_dma_buffer {
6962306a36Sopenharmony_ci	struct list_head list;
7062306a36Sopenharmony_ci	void *virt;
7162306a36Sopenharmony_ci	struct arm_dma_allocator *allocator;
7262306a36Sopenharmony_ci};
7362306a36Sopenharmony_ci
7462306a36Sopenharmony_cistatic LIST_HEAD(arm_dma_bufs);
7562306a36Sopenharmony_cistatic DEFINE_SPINLOCK(arm_dma_bufs_lock);
7662306a36Sopenharmony_ci
7762306a36Sopenharmony_cistatic struct arm_dma_buffer *arm_dma_buffer_find(void *virt)
7862306a36Sopenharmony_ci{
7962306a36Sopenharmony_ci	struct arm_dma_buffer *buf, *found = NULL;
8062306a36Sopenharmony_ci	unsigned long flags;
8162306a36Sopenharmony_ci
8262306a36Sopenharmony_ci	spin_lock_irqsave(&arm_dma_bufs_lock, flags);
8362306a36Sopenharmony_ci	list_for_each_entry(buf, &arm_dma_bufs, list) {
8462306a36Sopenharmony_ci		if (buf->virt == virt) {
8562306a36Sopenharmony_ci			list_del(&buf->list);
8662306a36Sopenharmony_ci			found = buf;
8762306a36Sopenharmony_ci			break;
8862306a36Sopenharmony_ci		}
8962306a36Sopenharmony_ci	}
9062306a36Sopenharmony_ci	spin_unlock_irqrestore(&arm_dma_bufs_lock, flags);
9162306a36Sopenharmony_ci	return found;
9262306a36Sopenharmony_ci}
9362306a36Sopenharmony_ci
9462306a36Sopenharmony_ci/*
9562306a36Sopenharmony_ci * The DMA API is built upon the notion of "buffer ownership".  A buffer
9662306a36Sopenharmony_ci * is either exclusively owned by the CPU (and therefore may be accessed
9762306a36Sopenharmony_ci * by it) or exclusively owned by the DMA device.  These helper functions
9862306a36Sopenharmony_ci * represent the transitions between these two ownership states.
9962306a36Sopenharmony_ci *
10062306a36Sopenharmony_ci * Note, however, that on later ARMs, this notion does not work due to
10162306a36Sopenharmony_ci * speculative prefetches.  We model our approach on the assumption that
10262306a36Sopenharmony_ci * the CPU does do speculative prefetches, which means we clean caches
10362306a36Sopenharmony_ci * before transfers and delay cache invalidation until transfer completion.
10462306a36Sopenharmony_ci *
10562306a36Sopenharmony_ci */
10662306a36Sopenharmony_ci
10762306a36Sopenharmony_cistatic void __dma_clear_buffer(struct page *page, size_t size, int coherent_flag)
10862306a36Sopenharmony_ci{
10962306a36Sopenharmony_ci	/*
11062306a36Sopenharmony_ci	 * Ensure that the allocated pages are zeroed, and that any data
11162306a36Sopenharmony_ci	 * lurking in the kernel direct-mapped region is invalidated.
11262306a36Sopenharmony_ci	 */
11362306a36Sopenharmony_ci	if (PageHighMem(page)) {
11462306a36Sopenharmony_ci		phys_addr_t base = __pfn_to_phys(page_to_pfn(page));
11562306a36Sopenharmony_ci		phys_addr_t end = base + size;
11662306a36Sopenharmony_ci		while (size > 0) {
11762306a36Sopenharmony_ci			void *ptr = kmap_atomic(page);
11862306a36Sopenharmony_ci			memset(ptr, 0, PAGE_SIZE);
11962306a36Sopenharmony_ci			if (coherent_flag != COHERENT)
12062306a36Sopenharmony_ci				dmac_flush_range(ptr, ptr + PAGE_SIZE);
12162306a36Sopenharmony_ci			kunmap_atomic(ptr);
12262306a36Sopenharmony_ci			page++;
12362306a36Sopenharmony_ci			size -= PAGE_SIZE;
12462306a36Sopenharmony_ci		}
12562306a36Sopenharmony_ci		if (coherent_flag != COHERENT)
12662306a36Sopenharmony_ci			outer_flush_range(base, end);
12762306a36Sopenharmony_ci	} else {
12862306a36Sopenharmony_ci		void *ptr = page_address(page);
12962306a36Sopenharmony_ci		memset(ptr, 0, size);
13062306a36Sopenharmony_ci		if (coherent_flag != COHERENT) {
13162306a36Sopenharmony_ci			dmac_flush_range(ptr, ptr + size);
13262306a36Sopenharmony_ci			outer_flush_range(__pa(ptr), __pa(ptr) + size);
13362306a36Sopenharmony_ci		}
13462306a36Sopenharmony_ci	}
13562306a36Sopenharmony_ci}
13662306a36Sopenharmony_ci
13762306a36Sopenharmony_ci/*
13862306a36Sopenharmony_ci * Allocate a DMA buffer for 'dev' of size 'size' using the
13962306a36Sopenharmony_ci * specified gfp mask.  Note that 'size' must be page aligned.
14062306a36Sopenharmony_ci */
14162306a36Sopenharmony_cistatic struct page *__dma_alloc_buffer(struct device *dev, size_t size,
14262306a36Sopenharmony_ci				       gfp_t gfp, int coherent_flag)
14362306a36Sopenharmony_ci{
14462306a36Sopenharmony_ci	unsigned long order = get_order(size);
14562306a36Sopenharmony_ci	struct page *page, *p, *e;
14662306a36Sopenharmony_ci
14762306a36Sopenharmony_ci	page = alloc_pages(gfp, order);
14862306a36Sopenharmony_ci	if (!page)
14962306a36Sopenharmony_ci		return NULL;
15062306a36Sopenharmony_ci
15162306a36Sopenharmony_ci	/*
15262306a36Sopenharmony_ci	 * Now split the huge page and free the excess pages
15362306a36Sopenharmony_ci	 */
15462306a36Sopenharmony_ci	split_page(page, order);
15562306a36Sopenharmony_ci	for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
15662306a36Sopenharmony_ci		__free_page(p);
15762306a36Sopenharmony_ci
15862306a36Sopenharmony_ci	__dma_clear_buffer(page, size, coherent_flag);
15962306a36Sopenharmony_ci
16062306a36Sopenharmony_ci	return page;
16162306a36Sopenharmony_ci}
16262306a36Sopenharmony_ci
16362306a36Sopenharmony_ci/*
16462306a36Sopenharmony_ci * Free a DMA buffer.  'size' must be page aligned.
16562306a36Sopenharmony_ci */
16662306a36Sopenharmony_cistatic void __dma_free_buffer(struct page *page, size_t size)
16762306a36Sopenharmony_ci{
16862306a36Sopenharmony_ci	struct page *e = page + (size >> PAGE_SHIFT);
16962306a36Sopenharmony_ci
17062306a36Sopenharmony_ci	while (page < e) {
17162306a36Sopenharmony_ci		__free_page(page);
17262306a36Sopenharmony_ci		page++;
17362306a36Sopenharmony_ci	}
17462306a36Sopenharmony_ci}
17562306a36Sopenharmony_ci
17662306a36Sopenharmony_cistatic void *__alloc_from_contiguous(struct device *dev, size_t size,
17762306a36Sopenharmony_ci				     pgprot_t prot, struct page **ret_page,
17862306a36Sopenharmony_ci				     const void *caller, bool want_vaddr,
17962306a36Sopenharmony_ci				     int coherent_flag, gfp_t gfp);
18062306a36Sopenharmony_ci
18162306a36Sopenharmony_cistatic void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
18262306a36Sopenharmony_ci				 pgprot_t prot, struct page **ret_page,
18362306a36Sopenharmony_ci				 const void *caller, bool want_vaddr);
18462306a36Sopenharmony_ci
18562306a36Sopenharmony_ci#define DEFAULT_DMA_COHERENT_POOL_SIZE	SZ_256K
18662306a36Sopenharmony_cistatic struct gen_pool *atomic_pool __ro_after_init;
18762306a36Sopenharmony_ci
18862306a36Sopenharmony_cistatic size_t atomic_pool_size __initdata = DEFAULT_DMA_COHERENT_POOL_SIZE;
18962306a36Sopenharmony_ci
19062306a36Sopenharmony_cistatic int __init early_coherent_pool(char *p)
19162306a36Sopenharmony_ci{
19262306a36Sopenharmony_ci	atomic_pool_size = memparse(p, &p);
19362306a36Sopenharmony_ci	return 0;
19462306a36Sopenharmony_ci}
19562306a36Sopenharmony_ciearly_param("coherent_pool", early_coherent_pool);
19662306a36Sopenharmony_ci
19762306a36Sopenharmony_ci/*
19862306a36Sopenharmony_ci * Initialise the coherent pool for atomic allocations.
19962306a36Sopenharmony_ci */
20062306a36Sopenharmony_cistatic int __init atomic_pool_init(void)
20162306a36Sopenharmony_ci{
20262306a36Sopenharmony_ci	pgprot_t prot = pgprot_dmacoherent(PAGE_KERNEL);
20362306a36Sopenharmony_ci	gfp_t gfp = GFP_KERNEL | GFP_DMA;
20462306a36Sopenharmony_ci	struct page *page;
20562306a36Sopenharmony_ci	void *ptr;
20662306a36Sopenharmony_ci
20762306a36Sopenharmony_ci	atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
20862306a36Sopenharmony_ci	if (!atomic_pool)
20962306a36Sopenharmony_ci		goto out;
21062306a36Sopenharmony_ci	/*
21162306a36Sopenharmony_ci	 * The atomic pool is only used for non-coherent allocations
21262306a36Sopenharmony_ci	 * so we must pass NORMAL for coherent_flag.
21362306a36Sopenharmony_ci	 */
21462306a36Sopenharmony_ci	if (dev_get_cma_area(NULL))
21562306a36Sopenharmony_ci		ptr = __alloc_from_contiguous(NULL, atomic_pool_size, prot,
21662306a36Sopenharmony_ci				      &page, atomic_pool_init, true, NORMAL,
21762306a36Sopenharmony_ci				      GFP_KERNEL);
21862306a36Sopenharmony_ci	else
21962306a36Sopenharmony_ci		ptr = __alloc_remap_buffer(NULL, atomic_pool_size, gfp, prot,
22062306a36Sopenharmony_ci					   &page, atomic_pool_init, true);
22162306a36Sopenharmony_ci	if (ptr) {
22262306a36Sopenharmony_ci		int ret;
22362306a36Sopenharmony_ci
22462306a36Sopenharmony_ci		ret = gen_pool_add_virt(atomic_pool, (unsigned long)ptr,
22562306a36Sopenharmony_ci					page_to_phys(page),
22662306a36Sopenharmony_ci					atomic_pool_size, -1);
22762306a36Sopenharmony_ci		if (ret)
22862306a36Sopenharmony_ci			goto destroy_genpool;
22962306a36Sopenharmony_ci
23062306a36Sopenharmony_ci		gen_pool_set_algo(atomic_pool,
23162306a36Sopenharmony_ci				gen_pool_first_fit_order_align,
23262306a36Sopenharmony_ci				NULL);
23362306a36Sopenharmony_ci		pr_info("DMA: preallocated %zu KiB pool for atomic coherent allocations\n",
23462306a36Sopenharmony_ci		       atomic_pool_size / 1024);
23562306a36Sopenharmony_ci		return 0;
23662306a36Sopenharmony_ci	}
23762306a36Sopenharmony_ci
23862306a36Sopenharmony_cidestroy_genpool:
23962306a36Sopenharmony_ci	gen_pool_destroy(atomic_pool);
24062306a36Sopenharmony_ci	atomic_pool = NULL;
24162306a36Sopenharmony_ciout:
24262306a36Sopenharmony_ci	pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n",
24362306a36Sopenharmony_ci	       atomic_pool_size / 1024);
24462306a36Sopenharmony_ci	return -ENOMEM;
24562306a36Sopenharmony_ci}
24662306a36Sopenharmony_ci/*
24762306a36Sopenharmony_ci * CMA is activated by core_initcall, so we must be called after it.
24862306a36Sopenharmony_ci */
24962306a36Sopenharmony_cipostcore_initcall(atomic_pool_init);
25062306a36Sopenharmony_ci
25162306a36Sopenharmony_ci#ifdef CONFIG_CMA_AREAS
25262306a36Sopenharmony_cistruct dma_contig_early_reserve {
25362306a36Sopenharmony_ci	phys_addr_t base;
25462306a36Sopenharmony_ci	unsigned long size;
25562306a36Sopenharmony_ci};
25662306a36Sopenharmony_ci
25762306a36Sopenharmony_cistatic struct dma_contig_early_reserve dma_mmu_remap[MAX_CMA_AREAS] __initdata;
25862306a36Sopenharmony_ci
25962306a36Sopenharmony_cistatic int dma_mmu_remap_num __initdata;
26062306a36Sopenharmony_ci
26162306a36Sopenharmony_ci#ifdef CONFIG_DMA_CMA
26262306a36Sopenharmony_civoid __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size)
26362306a36Sopenharmony_ci{
26462306a36Sopenharmony_ci	dma_mmu_remap[dma_mmu_remap_num].base = base;
26562306a36Sopenharmony_ci	dma_mmu_remap[dma_mmu_remap_num].size = size;
26662306a36Sopenharmony_ci	dma_mmu_remap_num++;
26762306a36Sopenharmony_ci}
26862306a36Sopenharmony_ci#endif
26962306a36Sopenharmony_ci
27062306a36Sopenharmony_civoid __init dma_contiguous_remap(void)
27162306a36Sopenharmony_ci{
27262306a36Sopenharmony_ci	int i;
27362306a36Sopenharmony_ci	for (i = 0; i < dma_mmu_remap_num; i++) {
27462306a36Sopenharmony_ci		phys_addr_t start = dma_mmu_remap[i].base;
27562306a36Sopenharmony_ci		phys_addr_t end = start + dma_mmu_remap[i].size;
27662306a36Sopenharmony_ci		struct map_desc map;
27762306a36Sopenharmony_ci		unsigned long addr;
27862306a36Sopenharmony_ci
27962306a36Sopenharmony_ci		if (end > arm_lowmem_limit)
28062306a36Sopenharmony_ci			end = arm_lowmem_limit;
28162306a36Sopenharmony_ci		if (start >= end)
28262306a36Sopenharmony_ci			continue;
28362306a36Sopenharmony_ci
28462306a36Sopenharmony_ci		map.pfn = __phys_to_pfn(start);
28562306a36Sopenharmony_ci		map.virtual = __phys_to_virt(start);
28662306a36Sopenharmony_ci		map.length = end - start;
28762306a36Sopenharmony_ci		map.type = MT_MEMORY_DMA_READY;
28862306a36Sopenharmony_ci
28962306a36Sopenharmony_ci		/*
29062306a36Sopenharmony_ci		 * Clear previous low-memory mapping to ensure that the
29162306a36Sopenharmony_ci		 * TLB does not see any conflicting entries, then flush
29262306a36Sopenharmony_ci		 * the TLB of the old entries before creating new mappings.
29362306a36Sopenharmony_ci		 *
29462306a36Sopenharmony_ci		 * This ensures that any speculatively loaded TLB entries
29562306a36Sopenharmony_ci		 * (even though they may be rare) can not cause any problems,
29662306a36Sopenharmony_ci		 * and ensures that this code is architecturally compliant.
29762306a36Sopenharmony_ci		 */
29862306a36Sopenharmony_ci		for (addr = __phys_to_virt(start); addr < __phys_to_virt(end);
29962306a36Sopenharmony_ci		     addr += PMD_SIZE)
30062306a36Sopenharmony_ci			pmd_clear(pmd_off_k(addr));
30162306a36Sopenharmony_ci
30262306a36Sopenharmony_ci		flush_tlb_kernel_range(__phys_to_virt(start),
30362306a36Sopenharmony_ci				       __phys_to_virt(end));
30462306a36Sopenharmony_ci
30562306a36Sopenharmony_ci		iotable_init(&map, 1);
30662306a36Sopenharmony_ci	}
30762306a36Sopenharmony_ci}
30862306a36Sopenharmony_ci#endif
30962306a36Sopenharmony_ci
31062306a36Sopenharmony_cistatic int __dma_update_pte(pte_t *pte, unsigned long addr, void *data)
31162306a36Sopenharmony_ci{
31262306a36Sopenharmony_ci	struct page *page = virt_to_page((void *)addr);
31362306a36Sopenharmony_ci	pgprot_t prot = *(pgprot_t *)data;
31462306a36Sopenharmony_ci
31562306a36Sopenharmony_ci	set_pte_ext(pte, mk_pte(page, prot), 0);
31662306a36Sopenharmony_ci	return 0;
31762306a36Sopenharmony_ci}
31862306a36Sopenharmony_ci
31962306a36Sopenharmony_cistatic void __dma_remap(struct page *page, size_t size, pgprot_t prot)
32062306a36Sopenharmony_ci{
32162306a36Sopenharmony_ci	unsigned long start = (unsigned long) page_address(page);
32262306a36Sopenharmony_ci	unsigned end = start + size;
32362306a36Sopenharmony_ci
32462306a36Sopenharmony_ci	apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot);
32562306a36Sopenharmony_ci	flush_tlb_kernel_range(start, end);
32662306a36Sopenharmony_ci}
32762306a36Sopenharmony_ci
32862306a36Sopenharmony_cistatic void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
32962306a36Sopenharmony_ci				 pgprot_t prot, struct page **ret_page,
33062306a36Sopenharmony_ci				 const void *caller, bool want_vaddr)
33162306a36Sopenharmony_ci{
33262306a36Sopenharmony_ci	struct page *page;
33362306a36Sopenharmony_ci	void *ptr = NULL;
33462306a36Sopenharmony_ci	/*
33562306a36Sopenharmony_ci	 * __alloc_remap_buffer is only called when the device is
33662306a36Sopenharmony_ci	 * non-coherent
33762306a36Sopenharmony_ci	 */
33862306a36Sopenharmony_ci	page = __dma_alloc_buffer(dev, size, gfp, NORMAL);
33962306a36Sopenharmony_ci	if (!page)
34062306a36Sopenharmony_ci		return NULL;
34162306a36Sopenharmony_ci	if (!want_vaddr)
34262306a36Sopenharmony_ci		goto out;
34362306a36Sopenharmony_ci
34462306a36Sopenharmony_ci	ptr = dma_common_contiguous_remap(page, size, prot, caller);
34562306a36Sopenharmony_ci	if (!ptr) {
34662306a36Sopenharmony_ci		__dma_free_buffer(page, size);
34762306a36Sopenharmony_ci		return NULL;
34862306a36Sopenharmony_ci	}
34962306a36Sopenharmony_ci
35062306a36Sopenharmony_ci out:
35162306a36Sopenharmony_ci	*ret_page = page;
35262306a36Sopenharmony_ci	return ptr;
35362306a36Sopenharmony_ci}
35462306a36Sopenharmony_ci
35562306a36Sopenharmony_cistatic void *__alloc_from_pool(size_t size, struct page **ret_page)
35662306a36Sopenharmony_ci{
35762306a36Sopenharmony_ci	unsigned long val;
35862306a36Sopenharmony_ci	void *ptr = NULL;
35962306a36Sopenharmony_ci
36062306a36Sopenharmony_ci	if (!atomic_pool) {
36162306a36Sopenharmony_ci		WARN(1, "coherent pool not initialised!\n");
36262306a36Sopenharmony_ci		return NULL;
36362306a36Sopenharmony_ci	}
36462306a36Sopenharmony_ci
36562306a36Sopenharmony_ci	val = gen_pool_alloc(atomic_pool, size);
36662306a36Sopenharmony_ci	if (val) {
36762306a36Sopenharmony_ci		phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val);
36862306a36Sopenharmony_ci
36962306a36Sopenharmony_ci		*ret_page = phys_to_page(phys);
37062306a36Sopenharmony_ci		ptr = (void *)val;
37162306a36Sopenharmony_ci	}
37262306a36Sopenharmony_ci
37362306a36Sopenharmony_ci	return ptr;
37462306a36Sopenharmony_ci}
37562306a36Sopenharmony_ci
37662306a36Sopenharmony_cistatic bool __in_atomic_pool(void *start, size_t size)
37762306a36Sopenharmony_ci{
37862306a36Sopenharmony_ci	return gen_pool_has_addr(atomic_pool, (unsigned long)start, size);
37962306a36Sopenharmony_ci}
38062306a36Sopenharmony_ci
38162306a36Sopenharmony_cistatic int __free_from_pool(void *start, size_t size)
38262306a36Sopenharmony_ci{
38362306a36Sopenharmony_ci	if (!__in_atomic_pool(start, size))
38462306a36Sopenharmony_ci		return 0;
38562306a36Sopenharmony_ci
38662306a36Sopenharmony_ci	gen_pool_free(atomic_pool, (unsigned long)start, size);
38762306a36Sopenharmony_ci
38862306a36Sopenharmony_ci	return 1;
38962306a36Sopenharmony_ci}
39062306a36Sopenharmony_ci
39162306a36Sopenharmony_cistatic void *__alloc_from_contiguous(struct device *dev, size_t size,
39262306a36Sopenharmony_ci				     pgprot_t prot, struct page **ret_page,
39362306a36Sopenharmony_ci				     const void *caller, bool want_vaddr,
39462306a36Sopenharmony_ci				     int coherent_flag, gfp_t gfp)
39562306a36Sopenharmony_ci{
39662306a36Sopenharmony_ci	unsigned long order = get_order(size);
39762306a36Sopenharmony_ci	size_t count = size >> PAGE_SHIFT;
39862306a36Sopenharmony_ci	struct page *page;
39962306a36Sopenharmony_ci	void *ptr = NULL;
40062306a36Sopenharmony_ci
40162306a36Sopenharmony_ci	page = dma_alloc_from_contiguous(dev, count, order, gfp & __GFP_NOWARN);
40262306a36Sopenharmony_ci	if (!page)
40362306a36Sopenharmony_ci		return NULL;
40462306a36Sopenharmony_ci
40562306a36Sopenharmony_ci	__dma_clear_buffer(page, size, coherent_flag);
40662306a36Sopenharmony_ci
40762306a36Sopenharmony_ci	if (!want_vaddr)
40862306a36Sopenharmony_ci		goto out;
40962306a36Sopenharmony_ci
41062306a36Sopenharmony_ci	if (PageHighMem(page)) {
41162306a36Sopenharmony_ci		ptr = dma_common_contiguous_remap(page, size, prot, caller);
41262306a36Sopenharmony_ci		if (!ptr) {
41362306a36Sopenharmony_ci			dma_release_from_contiguous(dev, page, count);
41462306a36Sopenharmony_ci			return NULL;
41562306a36Sopenharmony_ci		}
41662306a36Sopenharmony_ci	} else {
41762306a36Sopenharmony_ci		__dma_remap(page, size, prot);
41862306a36Sopenharmony_ci		ptr = page_address(page);
41962306a36Sopenharmony_ci	}
42062306a36Sopenharmony_ci
42162306a36Sopenharmony_ci out:
42262306a36Sopenharmony_ci	*ret_page = page;
42362306a36Sopenharmony_ci	return ptr;
42462306a36Sopenharmony_ci}
42562306a36Sopenharmony_ci
42662306a36Sopenharmony_cistatic void __free_from_contiguous(struct device *dev, struct page *page,
42762306a36Sopenharmony_ci				   void *cpu_addr, size_t size, bool want_vaddr)
42862306a36Sopenharmony_ci{
42962306a36Sopenharmony_ci	if (want_vaddr) {
43062306a36Sopenharmony_ci		if (PageHighMem(page))
43162306a36Sopenharmony_ci			dma_common_free_remap(cpu_addr, size);
43262306a36Sopenharmony_ci		else
43362306a36Sopenharmony_ci			__dma_remap(page, size, PAGE_KERNEL);
43462306a36Sopenharmony_ci	}
43562306a36Sopenharmony_ci	dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
43662306a36Sopenharmony_ci}
43762306a36Sopenharmony_ci
43862306a36Sopenharmony_cistatic inline pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot)
43962306a36Sopenharmony_ci{
44062306a36Sopenharmony_ci	prot = (attrs & DMA_ATTR_WRITE_COMBINE) ?
44162306a36Sopenharmony_ci			pgprot_writecombine(prot) :
44262306a36Sopenharmony_ci			pgprot_dmacoherent(prot);
44362306a36Sopenharmony_ci	return prot;
44462306a36Sopenharmony_ci}
44562306a36Sopenharmony_ci
44662306a36Sopenharmony_cistatic void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp,
44762306a36Sopenharmony_ci				   struct page **ret_page)
44862306a36Sopenharmony_ci{
44962306a36Sopenharmony_ci	struct page *page;
45062306a36Sopenharmony_ci	/* __alloc_simple_buffer is only called when the device is coherent */
45162306a36Sopenharmony_ci	page = __dma_alloc_buffer(dev, size, gfp, COHERENT);
45262306a36Sopenharmony_ci	if (!page)
45362306a36Sopenharmony_ci		return NULL;
45462306a36Sopenharmony_ci
45562306a36Sopenharmony_ci	*ret_page = page;
45662306a36Sopenharmony_ci	return page_address(page);
45762306a36Sopenharmony_ci}
45862306a36Sopenharmony_ci
45962306a36Sopenharmony_cistatic void *simple_allocator_alloc(struct arm_dma_alloc_args *args,
46062306a36Sopenharmony_ci				    struct page **ret_page)
46162306a36Sopenharmony_ci{
46262306a36Sopenharmony_ci	return __alloc_simple_buffer(args->dev, args->size, args->gfp,
46362306a36Sopenharmony_ci				     ret_page);
46462306a36Sopenharmony_ci}
46562306a36Sopenharmony_ci
46662306a36Sopenharmony_cistatic void simple_allocator_free(struct arm_dma_free_args *args)
46762306a36Sopenharmony_ci{
46862306a36Sopenharmony_ci	__dma_free_buffer(args->page, args->size);
46962306a36Sopenharmony_ci}
47062306a36Sopenharmony_ci
47162306a36Sopenharmony_cistatic struct arm_dma_allocator simple_allocator = {
47262306a36Sopenharmony_ci	.alloc = simple_allocator_alloc,
47362306a36Sopenharmony_ci	.free = simple_allocator_free,
47462306a36Sopenharmony_ci};
47562306a36Sopenharmony_ci
47662306a36Sopenharmony_cistatic void *cma_allocator_alloc(struct arm_dma_alloc_args *args,
47762306a36Sopenharmony_ci				 struct page **ret_page)
47862306a36Sopenharmony_ci{
47962306a36Sopenharmony_ci	return __alloc_from_contiguous(args->dev, args->size, args->prot,
48062306a36Sopenharmony_ci				       ret_page, args->caller,
48162306a36Sopenharmony_ci				       args->want_vaddr, args->coherent_flag,
48262306a36Sopenharmony_ci				       args->gfp);
48362306a36Sopenharmony_ci}
48462306a36Sopenharmony_ci
48562306a36Sopenharmony_cistatic void cma_allocator_free(struct arm_dma_free_args *args)
48662306a36Sopenharmony_ci{
48762306a36Sopenharmony_ci	__free_from_contiguous(args->dev, args->page, args->cpu_addr,
48862306a36Sopenharmony_ci			       args->size, args->want_vaddr);
48962306a36Sopenharmony_ci}
49062306a36Sopenharmony_ci
49162306a36Sopenharmony_cistatic struct arm_dma_allocator cma_allocator = {
49262306a36Sopenharmony_ci	.alloc = cma_allocator_alloc,
49362306a36Sopenharmony_ci	.free = cma_allocator_free,
49462306a36Sopenharmony_ci};
49562306a36Sopenharmony_ci
49662306a36Sopenharmony_cistatic void *pool_allocator_alloc(struct arm_dma_alloc_args *args,
49762306a36Sopenharmony_ci				  struct page **ret_page)
49862306a36Sopenharmony_ci{
49962306a36Sopenharmony_ci	return __alloc_from_pool(args->size, ret_page);
50062306a36Sopenharmony_ci}
50162306a36Sopenharmony_ci
50262306a36Sopenharmony_cistatic void pool_allocator_free(struct arm_dma_free_args *args)
50362306a36Sopenharmony_ci{
50462306a36Sopenharmony_ci	__free_from_pool(args->cpu_addr, args->size);
50562306a36Sopenharmony_ci}
50662306a36Sopenharmony_ci
50762306a36Sopenharmony_cistatic struct arm_dma_allocator pool_allocator = {
50862306a36Sopenharmony_ci	.alloc = pool_allocator_alloc,
50962306a36Sopenharmony_ci	.free = pool_allocator_free,
51062306a36Sopenharmony_ci};
51162306a36Sopenharmony_ci
51262306a36Sopenharmony_cistatic void *remap_allocator_alloc(struct arm_dma_alloc_args *args,
51362306a36Sopenharmony_ci				   struct page **ret_page)
51462306a36Sopenharmony_ci{
51562306a36Sopenharmony_ci	return __alloc_remap_buffer(args->dev, args->size, args->gfp,
51662306a36Sopenharmony_ci				    args->prot, ret_page, args->caller,
51762306a36Sopenharmony_ci				    args->want_vaddr);
51862306a36Sopenharmony_ci}
51962306a36Sopenharmony_ci
52062306a36Sopenharmony_cistatic void remap_allocator_free(struct arm_dma_free_args *args)
52162306a36Sopenharmony_ci{
52262306a36Sopenharmony_ci	if (args->want_vaddr)
52362306a36Sopenharmony_ci		dma_common_free_remap(args->cpu_addr, args->size);
52462306a36Sopenharmony_ci
52562306a36Sopenharmony_ci	__dma_free_buffer(args->page, args->size);
52662306a36Sopenharmony_ci}
52762306a36Sopenharmony_ci
52862306a36Sopenharmony_cistatic struct arm_dma_allocator remap_allocator = {
52962306a36Sopenharmony_ci	.alloc = remap_allocator_alloc,
53062306a36Sopenharmony_ci	.free = remap_allocator_free,
53162306a36Sopenharmony_ci};
53262306a36Sopenharmony_ci
53362306a36Sopenharmony_cistatic void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
53462306a36Sopenharmony_ci			 gfp_t gfp, pgprot_t prot, bool is_coherent,
53562306a36Sopenharmony_ci			 unsigned long attrs, const void *caller)
53662306a36Sopenharmony_ci{
53762306a36Sopenharmony_ci	u64 mask = min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit);
53862306a36Sopenharmony_ci	struct page *page = NULL;
53962306a36Sopenharmony_ci	void *addr;
54062306a36Sopenharmony_ci	bool allowblock, cma;
54162306a36Sopenharmony_ci	struct arm_dma_buffer *buf;
54262306a36Sopenharmony_ci	struct arm_dma_alloc_args args = {
54362306a36Sopenharmony_ci		.dev = dev,
54462306a36Sopenharmony_ci		.size = PAGE_ALIGN(size),
54562306a36Sopenharmony_ci		.gfp = gfp,
54662306a36Sopenharmony_ci		.prot = prot,
54762306a36Sopenharmony_ci		.caller = caller,
54862306a36Sopenharmony_ci		.want_vaddr = ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0),
54962306a36Sopenharmony_ci		.coherent_flag = is_coherent ? COHERENT : NORMAL,
55062306a36Sopenharmony_ci	};
55162306a36Sopenharmony_ci
55262306a36Sopenharmony_ci#ifdef CONFIG_DMA_API_DEBUG
55362306a36Sopenharmony_ci	u64 limit = (mask + 1) & ~mask;
55462306a36Sopenharmony_ci	if (limit && size >= limit) {
55562306a36Sopenharmony_ci		dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n",
55662306a36Sopenharmony_ci			size, mask);
55762306a36Sopenharmony_ci		return NULL;
55862306a36Sopenharmony_ci	}
55962306a36Sopenharmony_ci#endif
56062306a36Sopenharmony_ci
56162306a36Sopenharmony_ci	buf = kzalloc(sizeof(*buf),
56262306a36Sopenharmony_ci		      gfp & ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM));
56362306a36Sopenharmony_ci	if (!buf)
56462306a36Sopenharmony_ci		return NULL;
56562306a36Sopenharmony_ci
56662306a36Sopenharmony_ci	if (mask < 0xffffffffULL)
56762306a36Sopenharmony_ci		gfp |= GFP_DMA;
56862306a36Sopenharmony_ci
56962306a36Sopenharmony_ci	args.gfp = gfp;
57062306a36Sopenharmony_ci
57162306a36Sopenharmony_ci	*handle = DMA_MAPPING_ERROR;
57262306a36Sopenharmony_ci	allowblock = gfpflags_allow_blocking(gfp);
57362306a36Sopenharmony_ci	cma = allowblock ? dev_get_cma_area(dev) : NULL;
57462306a36Sopenharmony_ci
57562306a36Sopenharmony_ci	if (cma)
57662306a36Sopenharmony_ci		buf->allocator = &cma_allocator;
57762306a36Sopenharmony_ci	else if (is_coherent)
57862306a36Sopenharmony_ci		buf->allocator = &simple_allocator;
57962306a36Sopenharmony_ci	else if (allowblock)
58062306a36Sopenharmony_ci		buf->allocator = &remap_allocator;
58162306a36Sopenharmony_ci	else
58262306a36Sopenharmony_ci		buf->allocator = &pool_allocator;
58362306a36Sopenharmony_ci
58462306a36Sopenharmony_ci	addr = buf->allocator->alloc(&args, &page);
58562306a36Sopenharmony_ci
58662306a36Sopenharmony_ci	if (page) {
58762306a36Sopenharmony_ci		unsigned long flags;
58862306a36Sopenharmony_ci
58962306a36Sopenharmony_ci		*handle = phys_to_dma(dev, page_to_phys(page));
59062306a36Sopenharmony_ci		buf->virt = args.want_vaddr ? addr : page;
59162306a36Sopenharmony_ci
59262306a36Sopenharmony_ci		spin_lock_irqsave(&arm_dma_bufs_lock, flags);
59362306a36Sopenharmony_ci		list_add(&buf->list, &arm_dma_bufs);
59462306a36Sopenharmony_ci		spin_unlock_irqrestore(&arm_dma_bufs_lock, flags);
59562306a36Sopenharmony_ci	} else {
59662306a36Sopenharmony_ci		kfree(buf);
59762306a36Sopenharmony_ci	}
59862306a36Sopenharmony_ci
59962306a36Sopenharmony_ci	return args.want_vaddr ? addr : page;
60062306a36Sopenharmony_ci}
60162306a36Sopenharmony_ci
60262306a36Sopenharmony_ci/*
60362306a36Sopenharmony_ci * Free a buffer as defined by the above mapping.
60462306a36Sopenharmony_ci */
60562306a36Sopenharmony_cistatic void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
60662306a36Sopenharmony_ci			   dma_addr_t handle, unsigned long attrs,
60762306a36Sopenharmony_ci			   bool is_coherent)
60862306a36Sopenharmony_ci{
60962306a36Sopenharmony_ci	struct page *page = phys_to_page(dma_to_phys(dev, handle));
61062306a36Sopenharmony_ci	struct arm_dma_buffer *buf;
61162306a36Sopenharmony_ci	struct arm_dma_free_args args = {
61262306a36Sopenharmony_ci		.dev = dev,
61362306a36Sopenharmony_ci		.size = PAGE_ALIGN(size),
61462306a36Sopenharmony_ci		.cpu_addr = cpu_addr,
61562306a36Sopenharmony_ci		.page = page,
61662306a36Sopenharmony_ci		.want_vaddr = ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0),
61762306a36Sopenharmony_ci	};
61862306a36Sopenharmony_ci
61962306a36Sopenharmony_ci	buf = arm_dma_buffer_find(cpu_addr);
62062306a36Sopenharmony_ci	if (WARN(!buf, "Freeing invalid buffer %p\n", cpu_addr))
62162306a36Sopenharmony_ci		return;
62262306a36Sopenharmony_ci
62362306a36Sopenharmony_ci	buf->allocator->free(&args);
62462306a36Sopenharmony_ci	kfree(buf);
62562306a36Sopenharmony_ci}
62662306a36Sopenharmony_ci
62762306a36Sopenharmony_cistatic void dma_cache_maint_page(struct page *page, unsigned long offset,
62862306a36Sopenharmony_ci	size_t size, enum dma_data_direction dir,
62962306a36Sopenharmony_ci	void (*op)(const void *, size_t, int))
63062306a36Sopenharmony_ci{
63162306a36Sopenharmony_ci	unsigned long pfn;
63262306a36Sopenharmony_ci	size_t left = size;
63362306a36Sopenharmony_ci
63462306a36Sopenharmony_ci	pfn = page_to_pfn(page) + offset / PAGE_SIZE;
63562306a36Sopenharmony_ci	offset %= PAGE_SIZE;
63662306a36Sopenharmony_ci
63762306a36Sopenharmony_ci	/*
63862306a36Sopenharmony_ci	 * A single sg entry may refer to multiple physically contiguous
63962306a36Sopenharmony_ci	 * pages.  But we still need to process highmem pages individually.
64062306a36Sopenharmony_ci	 * If highmem is not configured then the bulk of this loop gets
64162306a36Sopenharmony_ci	 * optimized out.
64262306a36Sopenharmony_ci	 */
64362306a36Sopenharmony_ci	do {
64462306a36Sopenharmony_ci		size_t len = left;
64562306a36Sopenharmony_ci		void *vaddr;
64662306a36Sopenharmony_ci
64762306a36Sopenharmony_ci		page = pfn_to_page(pfn);
64862306a36Sopenharmony_ci
64962306a36Sopenharmony_ci		if (PageHighMem(page)) {
65062306a36Sopenharmony_ci			if (len + offset > PAGE_SIZE)
65162306a36Sopenharmony_ci				len = PAGE_SIZE - offset;
65262306a36Sopenharmony_ci
65362306a36Sopenharmony_ci			if (cache_is_vipt_nonaliasing()) {
65462306a36Sopenharmony_ci				vaddr = kmap_atomic(page);
65562306a36Sopenharmony_ci				op(vaddr + offset, len, dir);
65662306a36Sopenharmony_ci				kunmap_atomic(vaddr);
65762306a36Sopenharmony_ci			} else {
65862306a36Sopenharmony_ci				vaddr = kmap_high_get(page);
65962306a36Sopenharmony_ci				if (vaddr) {
66062306a36Sopenharmony_ci					op(vaddr + offset, len, dir);
66162306a36Sopenharmony_ci					kunmap_high(page);
66262306a36Sopenharmony_ci				}
66362306a36Sopenharmony_ci			}
66462306a36Sopenharmony_ci		} else {
66562306a36Sopenharmony_ci			vaddr = page_address(page) + offset;
66662306a36Sopenharmony_ci			op(vaddr, len, dir);
66762306a36Sopenharmony_ci		}
66862306a36Sopenharmony_ci		offset = 0;
66962306a36Sopenharmony_ci		pfn++;
67062306a36Sopenharmony_ci		left -= len;
67162306a36Sopenharmony_ci	} while (left);
67262306a36Sopenharmony_ci}
67362306a36Sopenharmony_ci
67462306a36Sopenharmony_ci/*
67562306a36Sopenharmony_ci * Make an area consistent for devices.
67662306a36Sopenharmony_ci * Note: Drivers should NOT use this function directly.
67762306a36Sopenharmony_ci * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
67862306a36Sopenharmony_ci */
67962306a36Sopenharmony_cistatic void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
68062306a36Sopenharmony_ci	size_t size, enum dma_data_direction dir)
68162306a36Sopenharmony_ci{
68262306a36Sopenharmony_ci	phys_addr_t paddr;
68362306a36Sopenharmony_ci
68462306a36Sopenharmony_ci	dma_cache_maint_page(page, off, size, dir, dmac_map_area);
68562306a36Sopenharmony_ci
68662306a36Sopenharmony_ci	paddr = page_to_phys(page) + off;
68762306a36Sopenharmony_ci	if (dir == DMA_FROM_DEVICE) {
68862306a36Sopenharmony_ci		outer_inv_range(paddr, paddr + size);
68962306a36Sopenharmony_ci	} else {
69062306a36Sopenharmony_ci		outer_clean_range(paddr, paddr + size);
69162306a36Sopenharmony_ci	}
69262306a36Sopenharmony_ci	/* FIXME: non-speculating: flush on bidirectional mappings? */
69362306a36Sopenharmony_ci}
69462306a36Sopenharmony_ci
69562306a36Sopenharmony_cistatic void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
69662306a36Sopenharmony_ci	size_t size, enum dma_data_direction dir)
69762306a36Sopenharmony_ci{
69862306a36Sopenharmony_ci	phys_addr_t paddr = page_to_phys(page) + off;
69962306a36Sopenharmony_ci
70062306a36Sopenharmony_ci	/* FIXME: non-speculating: not required */
70162306a36Sopenharmony_ci	/* in any case, don't bother invalidating if DMA to device */
70262306a36Sopenharmony_ci	if (dir != DMA_TO_DEVICE) {
70362306a36Sopenharmony_ci		outer_inv_range(paddr, paddr + size);
70462306a36Sopenharmony_ci
70562306a36Sopenharmony_ci		dma_cache_maint_page(page, off, size, dir, dmac_unmap_area);
70662306a36Sopenharmony_ci	}
70762306a36Sopenharmony_ci
70862306a36Sopenharmony_ci	/*
70962306a36Sopenharmony_ci	 * Mark the D-cache clean for these pages to avoid extra flushing.
71062306a36Sopenharmony_ci	 */
71162306a36Sopenharmony_ci	if (dir != DMA_TO_DEVICE && size >= PAGE_SIZE) {
71262306a36Sopenharmony_ci		struct folio *folio = pfn_folio(paddr / PAGE_SIZE);
71362306a36Sopenharmony_ci		size_t offset = offset_in_folio(folio, paddr);
71462306a36Sopenharmony_ci
71562306a36Sopenharmony_ci		for (;;) {
71662306a36Sopenharmony_ci			size_t sz = folio_size(folio) - offset;
71762306a36Sopenharmony_ci
71862306a36Sopenharmony_ci			if (size < sz)
71962306a36Sopenharmony_ci				break;
72062306a36Sopenharmony_ci			if (!offset)
72162306a36Sopenharmony_ci				set_bit(PG_dcache_clean, &folio->flags);
72262306a36Sopenharmony_ci			offset = 0;
72362306a36Sopenharmony_ci			size -= sz;
72462306a36Sopenharmony_ci			if (!size)
72562306a36Sopenharmony_ci				break;
72662306a36Sopenharmony_ci			folio = folio_next(folio);
72762306a36Sopenharmony_ci		}
72862306a36Sopenharmony_ci	}
72962306a36Sopenharmony_ci}
73062306a36Sopenharmony_ci
73162306a36Sopenharmony_ci#ifdef CONFIG_ARM_DMA_USE_IOMMU
73262306a36Sopenharmony_ci
73362306a36Sopenharmony_cistatic int __dma_info_to_prot(enum dma_data_direction dir, unsigned long attrs)
73462306a36Sopenharmony_ci{
73562306a36Sopenharmony_ci	int prot = 0;
73662306a36Sopenharmony_ci
73762306a36Sopenharmony_ci	if (attrs & DMA_ATTR_PRIVILEGED)
73862306a36Sopenharmony_ci		prot |= IOMMU_PRIV;
73962306a36Sopenharmony_ci
74062306a36Sopenharmony_ci	switch (dir) {
74162306a36Sopenharmony_ci	case DMA_BIDIRECTIONAL:
74262306a36Sopenharmony_ci		return prot | IOMMU_READ | IOMMU_WRITE;
74362306a36Sopenharmony_ci	case DMA_TO_DEVICE:
74462306a36Sopenharmony_ci		return prot | IOMMU_READ;
74562306a36Sopenharmony_ci	case DMA_FROM_DEVICE:
74662306a36Sopenharmony_ci		return prot | IOMMU_WRITE;
74762306a36Sopenharmony_ci	default:
74862306a36Sopenharmony_ci		return prot;
74962306a36Sopenharmony_ci	}
75062306a36Sopenharmony_ci}
75162306a36Sopenharmony_ci
75262306a36Sopenharmony_ci/* IOMMU */
75362306a36Sopenharmony_ci
75462306a36Sopenharmony_cistatic int extend_iommu_mapping(struct dma_iommu_mapping *mapping);
75562306a36Sopenharmony_ci
75662306a36Sopenharmony_cistatic inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
75762306a36Sopenharmony_ci				      size_t size)
75862306a36Sopenharmony_ci{
75962306a36Sopenharmony_ci	unsigned int order = get_order(size);
76062306a36Sopenharmony_ci	unsigned int align = 0;
76162306a36Sopenharmony_ci	unsigned int count, start;
76262306a36Sopenharmony_ci	size_t mapping_size = mapping->bits << PAGE_SHIFT;
76362306a36Sopenharmony_ci	unsigned long flags;
76462306a36Sopenharmony_ci	dma_addr_t iova;
76562306a36Sopenharmony_ci	int i;
76662306a36Sopenharmony_ci
76762306a36Sopenharmony_ci	if (order > CONFIG_ARM_DMA_IOMMU_ALIGNMENT)
76862306a36Sopenharmony_ci		order = CONFIG_ARM_DMA_IOMMU_ALIGNMENT;
76962306a36Sopenharmony_ci
77062306a36Sopenharmony_ci	count = PAGE_ALIGN(size) >> PAGE_SHIFT;
77162306a36Sopenharmony_ci	align = (1 << order) - 1;
77262306a36Sopenharmony_ci
77362306a36Sopenharmony_ci	spin_lock_irqsave(&mapping->lock, flags);
77462306a36Sopenharmony_ci	for (i = 0; i < mapping->nr_bitmaps; i++) {
77562306a36Sopenharmony_ci		start = bitmap_find_next_zero_area(mapping->bitmaps[i],
77662306a36Sopenharmony_ci				mapping->bits, 0, count, align);
77762306a36Sopenharmony_ci
77862306a36Sopenharmony_ci		if (start > mapping->bits)
77962306a36Sopenharmony_ci			continue;
78062306a36Sopenharmony_ci
78162306a36Sopenharmony_ci		bitmap_set(mapping->bitmaps[i], start, count);
78262306a36Sopenharmony_ci		break;
78362306a36Sopenharmony_ci	}
78462306a36Sopenharmony_ci
78562306a36Sopenharmony_ci	/*
78662306a36Sopenharmony_ci	 * No unused range found. Try to extend the existing mapping
78762306a36Sopenharmony_ci	 * and perform a second attempt to reserve an IO virtual
78862306a36Sopenharmony_ci	 * address range of size bytes.
78962306a36Sopenharmony_ci	 */
79062306a36Sopenharmony_ci	if (i == mapping->nr_bitmaps) {
79162306a36Sopenharmony_ci		if (extend_iommu_mapping(mapping)) {
79262306a36Sopenharmony_ci			spin_unlock_irqrestore(&mapping->lock, flags);
79362306a36Sopenharmony_ci			return DMA_MAPPING_ERROR;
79462306a36Sopenharmony_ci		}
79562306a36Sopenharmony_ci
79662306a36Sopenharmony_ci		start = bitmap_find_next_zero_area(mapping->bitmaps[i],
79762306a36Sopenharmony_ci				mapping->bits, 0, count, align);
79862306a36Sopenharmony_ci
79962306a36Sopenharmony_ci		if (start > mapping->bits) {
80062306a36Sopenharmony_ci			spin_unlock_irqrestore(&mapping->lock, flags);
80162306a36Sopenharmony_ci			return DMA_MAPPING_ERROR;
80262306a36Sopenharmony_ci		}
80362306a36Sopenharmony_ci
80462306a36Sopenharmony_ci		bitmap_set(mapping->bitmaps[i], start, count);
80562306a36Sopenharmony_ci	}
80662306a36Sopenharmony_ci	spin_unlock_irqrestore(&mapping->lock, flags);
80762306a36Sopenharmony_ci
80862306a36Sopenharmony_ci	iova = mapping->base + (mapping_size * i);
80962306a36Sopenharmony_ci	iova += start << PAGE_SHIFT;
81062306a36Sopenharmony_ci
81162306a36Sopenharmony_ci	return iova;
81262306a36Sopenharmony_ci}
81362306a36Sopenharmony_ci
81462306a36Sopenharmony_cistatic inline void __free_iova(struct dma_iommu_mapping *mapping,
81562306a36Sopenharmony_ci			       dma_addr_t addr, size_t size)
81662306a36Sopenharmony_ci{
81762306a36Sopenharmony_ci	unsigned int start, count;
81862306a36Sopenharmony_ci	size_t mapping_size = mapping->bits << PAGE_SHIFT;
81962306a36Sopenharmony_ci	unsigned long flags;
82062306a36Sopenharmony_ci	dma_addr_t bitmap_base;
82162306a36Sopenharmony_ci	u32 bitmap_index;
82262306a36Sopenharmony_ci
82362306a36Sopenharmony_ci	if (!size)
82462306a36Sopenharmony_ci		return;
82562306a36Sopenharmony_ci
82662306a36Sopenharmony_ci	bitmap_index = (u32) (addr - mapping->base) / (u32) mapping_size;
82762306a36Sopenharmony_ci	BUG_ON(addr < mapping->base || bitmap_index > mapping->extensions);
82862306a36Sopenharmony_ci
82962306a36Sopenharmony_ci	bitmap_base = mapping->base + mapping_size * bitmap_index;
83062306a36Sopenharmony_ci
83162306a36Sopenharmony_ci	start = (addr - bitmap_base) >>	PAGE_SHIFT;
83262306a36Sopenharmony_ci
83362306a36Sopenharmony_ci	if (addr + size > bitmap_base + mapping_size) {
83462306a36Sopenharmony_ci		/*
83562306a36Sopenharmony_ci		 * The address range to be freed reaches into the iova
83662306a36Sopenharmony_ci		 * range of the next bitmap. This should not happen as
83762306a36Sopenharmony_ci		 * we don't allow this in __alloc_iova (at the
83862306a36Sopenharmony_ci		 * moment).
83962306a36Sopenharmony_ci		 */
84062306a36Sopenharmony_ci		BUG();
84162306a36Sopenharmony_ci	} else
84262306a36Sopenharmony_ci		count = size >> PAGE_SHIFT;
84362306a36Sopenharmony_ci
84462306a36Sopenharmony_ci	spin_lock_irqsave(&mapping->lock, flags);
84562306a36Sopenharmony_ci	bitmap_clear(mapping->bitmaps[bitmap_index], start, count);
84662306a36Sopenharmony_ci	spin_unlock_irqrestore(&mapping->lock, flags);
84762306a36Sopenharmony_ci}
84862306a36Sopenharmony_ci
84962306a36Sopenharmony_ci/* We'll try 2M, 1M, 64K, and finally 4K; array must end with 0! */
85062306a36Sopenharmony_cistatic const int iommu_order_array[] = { 9, 8, 4, 0 };
85162306a36Sopenharmony_ci
85262306a36Sopenharmony_cistatic struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
85362306a36Sopenharmony_ci					  gfp_t gfp, unsigned long attrs,
85462306a36Sopenharmony_ci					  int coherent_flag)
85562306a36Sopenharmony_ci{
85662306a36Sopenharmony_ci	struct page **pages;
85762306a36Sopenharmony_ci	int count = size >> PAGE_SHIFT;
85862306a36Sopenharmony_ci	int array_size = count * sizeof(struct page *);
85962306a36Sopenharmony_ci	int i = 0;
86062306a36Sopenharmony_ci	int order_idx = 0;
86162306a36Sopenharmony_ci
86262306a36Sopenharmony_ci	if (array_size <= PAGE_SIZE)
86362306a36Sopenharmony_ci		pages = kzalloc(array_size, GFP_KERNEL);
86462306a36Sopenharmony_ci	else
86562306a36Sopenharmony_ci		pages = vzalloc(array_size);
86662306a36Sopenharmony_ci	if (!pages)
86762306a36Sopenharmony_ci		return NULL;
86862306a36Sopenharmony_ci
86962306a36Sopenharmony_ci	if (attrs & DMA_ATTR_FORCE_CONTIGUOUS)
87062306a36Sopenharmony_ci	{
87162306a36Sopenharmony_ci		unsigned long order = get_order(size);
87262306a36Sopenharmony_ci		struct page *page;
87362306a36Sopenharmony_ci
87462306a36Sopenharmony_ci		page = dma_alloc_from_contiguous(dev, count, order,
87562306a36Sopenharmony_ci						 gfp & __GFP_NOWARN);
87662306a36Sopenharmony_ci		if (!page)
87762306a36Sopenharmony_ci			goto error;
87862306a36Sopenharmony_ci
87962306a36Sopenharmony_ci		__dma_clear_buffer(page, size, coherent_flag);
88062306a36Sopenharmony_ci
88162306a36Sopenharmony_ci		for (i = 0; i < count; i++)
88262306a36Sopenharmony_ci			pages[i] = page + i;
88362306a36Sopenharmony_ci
88462306a36Sopenharmony_ci		return pages;
88562306a36Sopenharmony_ci	}
88662306a36Sopenharmony_ci
88762306a36Sopenharmony_ci	/* Go straight to 4K chunks if caller says it's OK. */
88862306a36Sopenharmony_ci	if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES)
88962306a36Sopenharmony_ci		order_idx = ARRAY_SIZE(iommu_order_array) - 1;
89062306a36Sopenharmony_ci
89162306a36Sopenharmony_ci	/*
89262306a36Sopenharmony_ci	 * IOMMU can map any pages, so himem can also be used here
89362306a36Sopenharmony_ci	 */
89462306a36Sopenharmony_ci	gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
89562306a36Sopenharmony_ci
89662306a36Sopenharmony_ci	while (count) {
89762306a36Sopenharmony_ci		int j, order;
89862306a36Sopenharmony_ci
89962306a36Sopenharmony_ci		order = iommu_order_array[order_idx];
90062306a36Sopenharmony_ci
90162306a36Sopenharmony_ci		/* Drop down when we get small */
90262306a36Sopenharmony_ci		if (__fls(count) < order) {
90362306a36Sopenharmony_ci			order_idx++;
90462306a36Sopenharmony_ci			continue;
90562306a36Sopenharmony_ci		}
90662306a36Sopenharmony_ci
90762306a36Sopenharmony_ci		if (order) {
90862306a36Sopenharmony_ci			/* See if it's easy to allocate a high-order chunk */
90962306a36Sopenharmony_ci			pages[i] = alloc_pages(gfp | __GFP_NORETRY, order);
91062306a36Sopenharmony_ci
91162306a36Sopenharmony_ci			/* Go down a notch at first sign of pressure */
91262306a36Sopenharmony_ci			if (!pages[i]) {
91362306a36Sopenharmony_ci				order_idx++;
91462306a36Sopenharmony_ci				continue;
91562306a36Sopenharmony_ci			}
91662306a36Sopenharmony_ci		} else {
91762306a36Sopenharmony_ci			pages[i] = alloc_pages(gfp, 0);
91862306a36Sopenharmony_ci			if (!pages[i])
91962306a36Sopenharmony_ci				goto error;
92062306a36Sopenharmony_ci		}
92162306a36Sopenharmony_ci
92262306a36Sopenharmony_ci		if (order) {
92362306a36Sopenharmony_ci			split_page(pages[i], order);
92462306a36Sopenharmony_ci			j = 1 << order;
92562306a36Sopenharmony_ci			while (--j)
92662306a36Sopenharmony_ci				pages[i + j] = pages[i] + j;
92762306a36Sopenharmony_ci		}
92862306a36Sopenharmony_ci
92962306a36Sopenharmony_ci		__dma_clear_buffer(pages[i], PAGE_SIZE << order, coherent_flag);
93062306a36Sopenharmony_ci		i += 1 << order;
93162306a36Sopenharmony_ci		count -= 1 << order;
93262306a36Sopenharmony_ci	}
93362306a36Sopenharmony_ci
93462306a36Sopenharmony_ci	return pages;
93562306a36Sopenharmony_cierror:
93662306a36Sopenharmony_ci	while (i--)
93762306a36Sopenharmony_ci		if (pages[i])
93862306a36Sopenharmony_ci			__free_pages(pages[i], 0);
93962306a36Sopenharmony_ci	kvfree(pages);
94062306a36Sopenharmony_ci	return NULL;
94162306a36Sopenharmony_ci}
94262306a36Sopenharmony_ci
94362306a36Sopenharmony_cistatic int __iommu_free_buffer(struct device *dev, struct page **pages,
94462306a36Sopenharmony_ci			       size_t size, unsigned long attrs)
94562306a36Sopenharmony_ci{
94662306a36Sopenharmony_ci	int count = size >> PAGE_SHIFT;
94762306a36Sopenharmony_ci	int i;
94862306a36Sopenharmony_ci
94962306a36Sopenharmony_ci	if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
95062306a36Sopenharmony_ci		dma_release_from_contiguous(dev, pages[0], count);
95162306a36Sopenharmony_ci	} else {
95262306a36Sopenharmony_ci		for (i = 0; i < count; i++)
95362306a36Sopenharmony_ci			if (pages[i])
95462306a36Sopenharmony_ci				__free_pages(pages[i], 0);
95562306a36Sopenharmony_ci	}
95662306a36Sopenharmony_ci
95762306a36Sopenharmony_ci	kvfree(pages);
95862306a36Sopenharmony_ci	return 0;
95962306a36Sopenharmony_ci}
96062306a36Sopenharmony_ci
96162306a36Sopenharmony_ci/*
96262306a36Sopenharmony_ci * Create a mapping in device IO address space for specified pages
96362306a36Sopenharmony_ci */
96462306a36Sopenharmony_cistatic dma_addr_t
96562306a36Sopenharmony_ci__iommu_create_mapping(struct device *dev, struct page **pages, size_t size,
96662306a36Sopenharmony_ci		       unsigned long attrs)
96762306a36Sopenharmony_ci{
96862306a36Sopenharmony_ci	struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
96962306a36Sopenharmony_ci	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
97062306a36Sopenharmony_ci	dma_addr_t dma_addr, iova;
97162306a36Sopenharmony_ci	int i;
97262306a36Sopenharmony_ci
97362306a36Sopenharmony_ci	dma_addr = __alloc_iova(mapping, size);
97462306a36Sopenharmony_ci	if (dma_addr == DMA_MAPPING_ERROR)
97562306a36Sopenharmony_ci		return dma_addr;
97662306a36Sopenharmony_ci
97762306a36Sopenharmony_ci	iova = dma_addr;
97862306a36Sopenharmony_ci	for (i = 0; i < count; ) {
97962306a36Sopenharmony_ci		int ret;
98062306a36Sopenharmony_ci
98162306a36Sopenharmony_ci		unsigned int next_pfn = page_to_pfn(pages[i]) + 1;
98262306a36Sopenharmony_ci		phys_addr_t phys = page_to_phys(pages[i]);
98362306a36Sopenharmony_ci		unsigned int len, j;
98462306a36Sopenharmony_ci
98562306a36Sopenharmony_ci		for (j = i + 1; j < count; j++, next_pfn++)
98662306a36Sopenharmony_ci			if (page_to_pfn(pages[j]) != next_pfn)
98762306a36Sopenharmony_ci				break;
98862306a36Sopenharmony_ci
98962306a36Sopenharmony_ci		len = (j - i) << PAGE_SHIFT;
99062306a36Sopenharmony_ci		ret = iommu_map(mapping->domain, iova, phys, len,
99162306a36Sopenharmony_ci				__dma_info_to_prot(DMA_BIDIRECTIONAL, attrs),
99262306a36Sopenharmony_ci				GFP_KERNEL);
99362306a36Sopenharmony_ci		if (ret < 0)
99462306a36Sopenharmony_ci			goto fail;
99562306a36Sopenharmony_ci		iova += len;
99662306a36Sopenharmony_ci		i = j;
99762306a36Sopenharmony_ci	}
99862306a36Sopenharmony_ci	return dma_addr;
99962306a36Sopenharmony_cifail:
100062306a36Sopenharmony_ci	iommu_unmap(mapping->domain, dma_addr, iova-dma_addr);
100162306a36Sopenharmony_ci	__free_iova(mapping, dma_addr, size);
100262306a36Sopenharmony_ci	return DMA_MAPPING_ERROR;
100362306a36Sopenharmony_ci}
100462306a36Sopenharmony_ci
100562306a36Sopenharmony_cistatic int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size)
100662306a36Sopenharmony_ci{
100762306a36Sopenharmony_ci	struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
100862306a36Sopenharmony_ci
100962306a36Sopenharmony_ci	/*
101062306a36Sopenharmony_ci	 * add optional in-page offset from iova to size and align
101162306a36Sopenharmony_ci	 * result to page size
101262306a36Sopenharmony_ci	 */
101362306a36Sopenharmony_ci	size = PAGE_ALIGN((iova & ~PAGE_MASK) + size);
101462306a36Sopenharmony_ci	iova &= PAGE_MASK;
101562306a36Sopenharmony_ci
101662306a36Sopenharmony_ci	iommu_unmap(mapping->domain, iova, size);
101762306a36Sopenharmony_ci	__free_iova(mapping, iova, size);
101862306a36Sopenharmony_ci	return 0;
101962306a36Sopenharmony_ci}
102062306a36Sopenharmony_ci
102162306a36Sopenharmony_cistatic struct page **__atomic_get_pages(void *addr)
102262306a36Sopenharmony_ci{
102362306a36Sopenharmony_ci	struct page *page;
102462306a36Sopenharmony_ci	phys_addr_t phys;
102562306a36Sopenharmony_ci
102662306a36Sopenharmony_ci	phys = gen_pool_virt_to_phys(atomic_pool, (unsigned long)addr);
102762306a36Sopenharmony_ci	page = phys_to_page(phys);
102862306a36Sopenharmony_ci
102962306a36Sopenharmony_ci	return (struct page **)page;
103062306a36Sopenharmony_ci}
103162306a36Sopenharmony_ci
103262306a36Sopenharmony_cistatic struct page **__iommu_get_pages(void *cpu_addr, unsigned long attrs)
103362306a36Sopenharmony_ci{
103462306a36Sopenharmony_ci	if (__in_atomic_pool(cpu_addr, PAGE_SIZE))
103562306a36Sopenharmony_ci		return __atomic_get_pages(cpu_addr);
103662306a36Sopenharmony_ci
103762306a36Sopenharmony_ci	if (attrs & DMA_ATTR_NO_KERNEL_MAPPING)
103862306a36Sopenharmony_ci		return cpu_addr;
103962306a36Sopenharmony_ci
104062306a36Sopenharmony_ci	return dma_common_find_pages(cpu_addr);
104162306a36Sopenharmony_ci}
104262306a36Sopenharmony_ci
104362306a36Sopenharmony_cistatic void *__iommu_alloc_simple(struct device *dev, size_t size, gfp_t gfp,
104462306a36Sopenharmony_ci				  dma_addr_t *handle, int coherent_flag,
104562306a36Sopenharmony_ci				  unsigned long attrs)
104662306a36Sopenharmony_ci{
104762306a36Sopenharmony_ci	struct page *page;
104862306a36Sopenharmony_ci	void *addr;
104962306a36Sopenharmony_ci
105062306a36Sopenharmony_ci	if (coherent_flag  == COHERENT)
105162306a36Sopenharmony_ci		addr = __alloc_simple_buffer(dev, size, gfp, &page);
105262306a36Sopenharmony_ci	else
105362306a36Sopenharmony_ci		addr = __alloc_from_pool(size, &page);
105462306a36Sopenharmony_ci	if (!addr)
105562306a36Sopenharmony_ci		return NULL;
105662306a36Sopenharmony_ci
105762306a36Sopenharmony_ci	*handle = __iommu_create_mapping(dev, &page, size, attrs);
105862306a36Sopenharmony_ci	if (*handle == DMA_MAPPING_ERROR)
105962306a36Sopenharmony_ci		goto err_mapping;
106062306a36Sopenharmony_ci
106162306a36Sopenharmony_ci	return addr;
106262306a36Sopenharmony_ci
106362306a36Sopenharmony_cierr_mapping:
106462306a36Sopenharmony_ci	__free_from_pool(addr, size);
106562306a36Sopenharmony_ci	return NULL;
106662306a36Sopenharmony_ci}
106762306a36Sopenharmony_ci
106862306a36Sopenharmony_cistatic void __iommu_free_atomic(struct device *dev, void *cpu_addr,
106962306a36Sopenharmony_ci			dma_addr_t handle, size_t size, int coherent_flag)
107062306a36Sopenharmony_ci{
107162306a36Sopenharmony_ci	__iommu_remove_mapping(dev, handle, size);
107262306a36Sopenharmony_ci	if (coherent_flag == COHERENT)
107362306a36Sopenharmony_ci		__dma_free_buffer(virt_to_page(cpu_addr), size);
107462306a36Sopenharmony_ci	else
107562306a36Sopenharmony_ci		__free_from_pool(cpu_addr, size);
107662306a36Sopenharmony_ci}
107762306a36Sopenharmony_ci
107862306a36Sopenharmony_cistatic void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
107962306a36Sopenharmony_ci	    dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
108062306a36Sopenharmony_ci{
108162306a36Sopenharmony_ci	pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
108262306a36Sopenharmony_ci	struct page **pages;
108362306a36Sopenharmony_ci	void *addr = NULL;
108462306a36Sopenharmony_ci	int coherent_flag = dev->dma_coherent ? COHERENT : NORMAL;
108562306a36Sopenharmony_ci
108662306a36Sopenharmony_ci	*handle = DMA_MAPPING_ERROR;
108762306a36Sopenharmony_ci	size = PAGE_ALIGN(size);
108862306a36Sopenharmony_ci
108962306a36Sopenharmony_ci	if (coherent_flag  == COHERENT || !gfpflags_allow_blocking(gfp))
109062306a36Sopenharmony_ci		return __iommu_alloc_simple(dev, size, gfp, handle,
109162306a36Sopenharmony_ci					    coherent_flag, attrs);
109262306a36Sopenharmony_ci
109362306a36Sopenharmony_ci	pages = __iommu_alloc_buffer(dev, size, gfp, attrs, coherent_flag);
109462306a36Sopenharmony_ci	if (!pages)
109562306a36Sopenharmony_ci		return NULL;
109662306a36Sopenharmony_ci
109762306a36Sopenharmony_ci	*handle = __iommu_create_mapping(dev, pages, size, attrs);
109862306a36Sopenharmony_ci	if (*handle == DMA_MAPPING_ERROR)
109962306a36Sopenharmony_ci		goto err_buffer;
110062306a36Sopenharmony_ci
110162306a36Sopenharmony_ci	if (attrs & DMA_ATTR_NO_KERNEL_MAPPING)
110262306a36Sopenharmony_ci		return pages;
110362306a36Sopenharmony_ci
110462306a36Sopenharmony_ci	addr = dma_common_pages_remap(pages, size, prot,
110562306a36Sopenharmony_ci				   __builtin_return_address(0));
110662306a36Sopenharmony_ci	if (!addr)
110762306a36Sopenharmony_ci		goto err_mapping;
110862306a36Sopenharmony_ci
110962306a36Sopenharmony_ci	return addr;
111062306a36Sopenharmony_ci
111162306a36Sopenharmony_cierr_mapping:
111262306a36Sopenharmony_ci	__iommu_remove_mapping(dev, *handle, size);
111362306a36Sopenharmony_cierr_buffer:
111462306a36Sopenharmony_ci	__iommu_free_buffer(dev, pages, size, attrs);
111562306a36Sopenharmony_ci	return NULL;
111662306a36Sopenharmony_ci}
111762306a36Sopenharmony_ci
111862306a36Sopenharmony_cistatic int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
111962306a36Sopenharmony_ci		    void *cpu_addr, dma_addr_t dma_addr, size_t size,
112062306a36Sopenharmony_ci		    unsigned long attrs)
112162306a36Sopenharmony_ci{
112262306a36Sopenharmony_ci	struct page **pages = __iommu_get_pages(cpu_addr, attrs);
112362306a36Sopenharmony_ci	unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
112462306a36Sopenharmony_ci	int err;
112562306a36Sopenharmony_ci
112662306a36Sopenharmony_ci	if (!pages)
112762306a36Sopenharmony_ci		return -ENXIO;
112862306a36Sopenharmony_ci
112962306a36Sopenharmony_ci	if (vma->vm_pgoff >= nr_pages)
113062306a36Sopenharmony_ci		return -ENXIO;
113162306a36Sopenharmony_ci
113262306a36Sopenharmony_ci	if (!dev->dma_coherent)
113362306a36Sopenharmony_ci		vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
113462306a36Sopenharmony_ci
113562306a36Sopenharmony_ci	err = vm_map_pages(vma, pages, nr_pages);
113662306a36Sopenharmony_ci	if (err)
113762306a36Sopenharmony_ci		pr_err("Remapping memory failed: %d\n", err);
113862306a36Sopenharmony_ci
113962306a36Sopenharmony_ci	return err;
114062306a36Sopenharmony_ci}
114162306a36Sopenharmony_ci
114262306a36Sopenharmony_ci/*
114362306a36Sopenharmony_ci * free a page as defined by the above mapping.
114462306a36Sopenharmony_ci * Must not be called with IRQs disabled.
114562306a36Sopenharmony_ci */
114662306a36Sopenharmony_cistatic void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
114762306a36Sopenharmony_ci	dma_addr_t handle, unsigned long attrs)
114862306a36Sopenharmony_ci{
114962306a36Sopenharmony_ci	int coherent_flag = dev->dma_coherent ? COHERENT : NORMAL;
115062306a36Sopenharmony_ci	struct page **pages;
115162306a36Sopenharmony_ci	size = PAGE_ALIGN(size);
115262306a36Sopenharmony_ci
115362306a36Sopenharmony_ci	if (coherent_flag == COHERENT || __in_atomic_pool(cpu_addr, size)) {
115462306a36Sopenharmony_ci		__iommu_free_atomic(dev, cpu_addr, handle, size, coherent_flag);
115562306a36Sopenharmony_ci		return;
115662306a36Sopenharmony_ci	}
115762306a36Sopenharmony_ci
115862306a36Sopenharmony_ci	pages = __iommu_get_pages(cpu_addr, attrs);
115962306a36Sopenharmony_ci	if (!pages) {
116062306a36Sopenharmony_ci		WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
116162306a36Sopenharmony_ci		return;
116262306a36Sopenharmony_ci	}
116362306a36Sopenharmony_ci
116462306a36Sopenharmony_ci	if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0)
116562306a36Sopenharmony_ci		dma_common_free_remap(cpu_addr, size);
116662306a36Sopenharmony_ci
116762306a36Sopenharmony_ci	__iommu_remove_mapping(dev, handle, size);
116862306a36Sopenharmony_ci	__iommu_free_buffer(dev, pages, size, attrs);
116962306a36Sopenharmony_ci}
117062306a36Sopenharmony_ci
117162306a36Sopenharmony_cistatic int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
117262306a36Sopenharmony_ci				 void *cpu_addr, dma_addr_t dma_addr,
117362306a36Sopenharmony_ci				 size_t size, unsigned long attrs)
117462306a36Sopenharmony_ci{
117562306a36Sopenharmony_ci	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
117662306a36Sopenharmony_ci	struct page **pages = __iommu_get_pages(cpu_addr, attrs);
117762306a36Sopenharmony_ci
117862306a36Sopenharmony_ci	if (!pages)
117962306a36Sopenharmony_ci		return -ENXIO;
118062306a36Sopenharmony_ci
118162306a36Sopenharmony_ci	return sg_alloc_table_from_pages(sgt, pages, count, 0, size,
118262306a36Sopenharmony_ci					 GFP_KERNEL);
118362306a36Sopenharmony_ci}
118462306a36Sopenharmony_ci
118562306a36Sopenharmony_ci/*
118662306a36Sopenharmony_ci * Map a part of the scatter-gather list into contiguous io address space
118762306a36Sopenharmony_ci */
118862306a36Sopenharmony_cistatic int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
118962306a36Sopenharmony_ci			  size_t size, dma_addr_t *handle,
119062306a36Sopenharmony_ci			  enum dma_data_direction dir, unsigned long attrs)
119162306a36Sopenharmony_ci{
119262306a36Sopenharmony_ci	struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
119362306a36Sopenharmony_ci	dma_addr_t iova, iova_base;
119462306a36Sopenharmony_ci	int ret = 0;
119562306a36Sopenharmony_ci	unsigned int count;
119662306a36Sopenharmony_ci	struct scatterlist *s;
119762306a36Sopenharmony_ci	int prot;
119862306a36Sopenharmony_ci
119962306a36Sopenharmony_ci	size = PAGE_ALIGN(size);
120062306a36Sopenharmony_ci	*handle = DMA_MAPPING_ERROR;
120162306a36Sopenharmony_ci
120262306a36Sopenharmony_ci	iova_base = iova = __alloc_iova(mapping, size);
120362306a36Sopenharmony_ci	if (iova == DMA_MAPPING_ERROR)
120462306a36Sopenharmony_ci		return -ENOMEM;
120562306a36Sopenharmony_ci
120662306a36Sopenharmony_ci	for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) {
120762306a36Sopenharmony_ci		phys_addr_t phys = page_to_phys(sg_page(s));
120862306a36Sopenharmony_ci		unsigned int len = PAGE_ALIGN(s->offset + s->length);
120962306a36Sopenharmony_ci
121062306a36Sopenharmony_ci		if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
121162306a36Sopenharmony_ci			__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
121262306a36Sopenharmony_ci
121362306a36Sopenharmony_ci		prot = __dma_info_to_prot(dir, attrs);
121462306a36Sopenharmony_ci
121562306a36Sopenharmony_ci		ret = iommu_map(mapping->domain, iova, phys, len, prot,
121662306a36Sopenharmony_ci				GFP_KERNEL);
121762306a36Sopenharmony_ci		if (ret < 0)
121862306a36Sopenharmony_ci			goto fail;
121962306a36Sopenharmony_ci		count += len >> PAGE_SHIFT;
122062306a36Sopenharmony_ci		iova += len;
122162306a36Sopenharmony_ci	}
122262306a36Sopenharmony_ci	*handle = iova_base;
122362306a36Sopenharmony_ci
122462306a36Sopenharmony_ci	return 0;
122562306a36Sopenharmony_cifail:
122662306a36Sopenharmony_ci	iommu_unmap(mapping->domain, iova_base, count * PAGE_SIZE);
122762306a36Sopenharmony_ci	__free_iova(mapping, iova_base, size);
122862306a36Sopenharmony_ci	return ret;
122962306a36Sopenharmony_ci}
123062306a36Sopenharmony_ci
123162306a36Sopenharmony_ci/**
123262306a36Sopenharmony_ci * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA
123362306a36Sopenharmony_ci * @dev: valid struct device pointer
123462306a36Sopenharmony_ci * @sg: list of buffers
123562306a36Sopenharmony_ci * @nents: number of buffers to map
123662306a36Sopenharmony_ci * @dir: DMA transfer direction
123762306a36Sopenharmony_ci *
123862306a36Sopenharmony_ci * Map a set of buffers described by scatterlist in streaming mode for DMA.
123962306a36Sopenharmony_ci * The scatter gather list elements are merged together (if possible) and
124062306a36Sopenharmony_ci * tagged with the appropriate dma address and length. They are obtained via
124162306a36Sopenharmony_ci * sg_dma_{address,length}.
124262306a36Sopenharmony_ci */
124362306a36Sopenharmony_cistatic int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg,
124462306a36Sopenharmony_ci		int nents, enum dma_data_direction dir, unsigned long attrs)
124562306a36Sopenharmony_ci{
124662306a36Sopenharmony_ci	struct scatterlist *s = sg, *dma = sg, *start = sg;
124762306a36Sopenharmony_ci	int i, count = 0, ret;
124862306a36Sopenharmony_ci	unsigned int offset = s->offset;
124962306a36Sopenharmony_ci	unsigned int size = s->offset + s->length;
125062306a36Sopenharmony_ci	unsigned int max = dma_get_max_seg_size(dev);
125162306a36Sopenharmony_ci
125262306a36Sopenharmony_ci	for (i = 1; i < nents; i++) {
125362306a36Sopenharmony_ci		s = sg_next(s);
125462306a36Sopenharmony_ci
125562306a36Sopenharmony_ci		s->dma_length = 0;
125662306a36Sopenharmony_ci
125762306a36Sopenharmony_ci		if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) {
125862306a36Sopenharmony_ci			ret = __map_sg_chunk(dev, start, size,
125962306a36Sopenharmony_ci					     &dma->dma_address, dir, attrs);
126062306a36Sopenharmony_ci			if (ret < 0)
126162306a36Sopenharmony_ci				goto bad_mapping;
126262306a36Sopenharmony_ci
126362306a36Sopenharmony_ci			dma->dma_address += offset;
126462306a36Sopenharmony_ci			dma->dma_length = size - offset;
126562306a36Sopenharmony_ci
126662306a36Sopenharmony_ci			size = offset = s->offset;
126762306a36Sopenharmony_ci			start = s;
126862306a36Sopenharmony_ci			dma = sg_next(dma);
126962306a36Sopenharmony_ci			count += 1;
127062306a36Sopenharmony_ci		}
127162306a36Sopenharmony_ci		size += s->length;
127262306a36Sopenharmony_ci	}
127362306a36Sopenharmony_ci	ret = __map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs);
127462306a36Sopenharmony_ci	if (ret < 0)
127562306a36Sopenharmony_ci		goto bad_mapping;
127662306a36Sopenharmony_ci
127762306a36Sopenharmony_ci	dma->dma_address += offset;
127862306a36Sopenharmony_ci	dma->dma_length = size - offset;
127962306a36Sopenharmony_ci
128062306a36Sopenharmony_ci	return count+1;
128162306a36Sopenharmony_ci
128262306a36Sopenharmony_cibad_mapping:
128362306a36Sopenharmony_ci	for_each_sg(sg, s, count, i)
128462306a36Sopenharmony_ci		__iommu_remove_mapping(dev, sg_dma_address(s), sg_dma_len(s));
128562306a36Sopenharmony_ci	if (ret == -ENOMEM)
128662306a36Sopenharmony_ci		return ret;
128762306a36Sopenharmony_ci	return -EINVAL;
128862306a36Sopenharmony_ci}
128962306a36Sopenharmony_ci
129062306a36Sopenharmony_ci/**
129162306a36Sopenharmony_ci * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
129262306a36Sopenharmony_ci * @dev: valid struct device pointer
129362306a36Sopenharmony_ci * @sg: list of buffers
129462306a36Sopenharmony_ci * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
129562306a36Sopenharmony_ci * @dir: DMA transfer direction (same as was passed to dma_map_sg)
129662306a36Sopenharmony_ci *
129762306a36Sopenharmony_ci * Unmap a set of streaming mode DMA translations.  Again, CPU access
129862306a36Sopenharmony_ci * rules concerning calls here are the same as for dma_unmap_single().
129962306a36Sopenharmony_ci */
130062306a36Sopenharmony_cistatic void arm_iommu_unmap_sg(struct device *dev,
130162306a36Sopenharmony_ci			       struct scatterlist *sg, int nents,
130262306a36Sopenharmony_ci			       enum dma_data_direction dir,
130362306a36Sopenharmony_ci			       unsigned long attrs)
130462306a36Sopenharmony_ci{
130562306a36Sopenharmony_ci	struct scatterlist *s;
130662306a36Sopenharmony_ci	int i;
130762306a36Sopenharmony_ci
130862306a36Sopenharmony_ci	for_each_sg(sg, s, nents, i) {
130962306a36Sopenharmony_ci		if (sg_dma_len(s))
131062306a36Sopenharmony_ci			__iommu_remove_mapping(dev, sg_dma_address(s),
131162306a36Sopenharmony_ci					       sg_dma_len(s));
131262306a36Sopenharmony_ci		if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
131362306a36Sopenharmony_ci			__dma_page_dev_to_cpu(sg_page(s), s->offset,
131462306a36Sopenharmony_ci					      s->length, dir);
131562306a36Sopenharmony_ci	}
131662306a36Sopenharmony_ci}
131762306a36Sopenharmony_ci
131862306a36Sopenharmony_ci/**
131962306a36Sopenharmony_ci * arm_iommu_sync_sg_for_cpu
132062306a36Sopenharmony_ci * @dev: valid struct device pointer
132162306a36Sopenharmony_ci * @sg: list of buffers
132262306a36Sopenharmony_ci * @nents: number of buffers to map (returned from dma_map_sg)
132362306a36Sopenharmony_ci * @dir: DMA transfer direction (same as was passed to dma_map_sg)
132462306a36Sopenharmony_ci */
132562306a36Sopenharmony_cistatic void arm_iommu_sync_sg_for_cpu(struct device *dev,
132662306a36Sopenharmony_ci			struct scatterlist *sg,
132762306a36Sopenharmony_ci			int nents, enum dma_data_direction dir)
132862306a36Sopenharmony_ci{
132962306a36Sopenharmony_ci	struct scatterlist *s;
133062306a36Sopenharmony_ci	int i;
133162306a36Sopenharmony_ci
133262306a36Sopenharmony_ci	if (dev->dma_coherent)
133362306a36Sopenharmony_ci		return;
133462306a36Sopenharmony_ci
133562306a36Sopenharmony_ci	for_each_sg(sg, s, nents, i)
133662306a36Sopenharmony_ci		__dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir);
133762306a36Sopenharmony_ci
133862306a36Sopenharmony_ci}
133962306a36Sopenharmony_ci
134062306a36Sopenharmony_ci/**
134162306a36Sopenharmony_ci * arm_iommu_sync_sg_for_device
134262306a36Sopenharmony_ci * @dev: valid struct device pointer
134362306a36Sopenharmony_ci * @sg: list of buffers
134462306a36Sopenharmony_ci * @nents: number of buffers to map (returned from dma_map_sg)
134562306a36Sopenharmony_ci * @dir: DMA transfer direction (same as was passed to dma_map_sg)
134662306a36Sopenharmony_ci */
134762306a36Sopenharmony_cistatic void arm_iommu_sync_sg_for_device(struct device *dev,
134862306a36Sopenharmony_ci			struct scatterlist *sg,
134962306a36Sopenharmony_ci			int nents, enum dma_data_direction dir)
135062306a36Sopenharmony_ci{
135162306a36Sopenharmony_ci	struct scatterlist *s;
135262306a36Sopenharmony_ci	int i;
135362306a36Sopenharmony_ci
135462306a36Sopenharmony_ci	if (dev->dma_coherent)
135562306a36Sopenharmony_ci		return;
135662306a36Sopenharmony_ci
135762306a36Sopenharmony_ci	for_each_sg(sg, s, nents, i)
135862306a36Sopenharmony_ci		__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
135962306a36Sopenharmony_ci}
136062306a36Sopenharmony_ci
136162306a36Sopenharmony_ci/**
136262306a36Sopenharmony_ci * arm_iommu_map_page
136362306a36Sopenharmony_ci * @dev: valid struct device pointer
136462306a36Sopenharmony_ci * @page: page that buffer resides in
136562306a36Sopenharmony_ci * @offset: offset into page for start of buffer
136662306a36Sopenharmony_ci * @size: size of buffer to map
136762306a36Sopenharmony_ci * @dir: DMA transfer direction
136862306a36Sopenharmony_ci *
136962306a36Sopenharmony_ci * IOMMU aware version of arm_dma_map_page()
137062306a36Sopenharmony_ci */
137162306a36Sopenharmony_cistatic dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
137262306a36Sopenharmony_ci	     unsigned long offset, size_t size, enum dma_data_direction dir,
137362306a36Sopenharmony_ci	     unsigned long attrs)
137462306a36Sopenharmony_ci{
137562306a36Sopenharmony_ci	struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
137662306a36Sopenharmony_ci	dma_addr_t dma_addr;
137762306a36Sopenharmony_ci	int ret, prot, len = PAGE_ALIGN(size + offset);
137862306a36Sopenharmony_ci
137962306a36Sopenharmony_ci	if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
138062306a36Sopenharmony_ci		__dma_page_cpu_to_dev(page, offset, size, dir);
138162306a36Sopenharmony_ci
138262306a36Sopenharmony_ci	dma_addr = __alloc_iova(mapping, len);
138362306a36Sopenharmony_ci	if (dma_addr == DMA_MAPPING_ERROR)
138462306a36Sopenharmony_ci		return dma_addr;
138562306a36Sopenharmony_ci
138662306a36Sopenharmony_ci	prot = __dma_info_to_prot(dir, attrs);
138762306a36Sopenharmony_ci
138862306a36Sopenharmony_ci	ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len,
138962306a36Sopenharmony_ci			prot, GFP_KERNEL);
139062306a36Sopenharmony_ci	if (ret < 0)
139162306a36Sopenharmony_ci		goto fail;
139262306a36Sopenharmony_ci
139362306a36Sopenharmony_ci	return dma_addr + offset;
139462306a36Sopenharmony_cifail:
139562306a36Sopenharmony_ci	__free_iova(mapping, dma_addr, len);
139662306a36Sopenharmony_ci	return DMA_MAPPING_ERROR;
139762306a36Sopenharmony_ci}
139862306a36Sopenharmony_ci
139962306a36Sopenharmony_ci/**
140062306a36Sopenharmony_ci * arm_iommu_unmap_page
140162306a36Sopenharmony_ci * @dev: valid struct device pointer
140262306a36Sopenharmony_ci * @handle: DMA address of buffer
140362306a36Sopenharmony_ci * @size: size of buffer (same as passed to dma_map_page)
140462306a36Sopenharmony_ci * @dir: DMA transfer direction (same as passed to dma_map_page)
140562306a36Sopenharmony_ci *
140662306a36Sopenharmony_ci * IOMMU aware version of arm_dma_unmap_page()
140762306a36Sopenharmony_ci */
140862306a36Sopenharmony_cistatic void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
140962306a36Sopenharmony_ci		size_t size, enum dma_data_direction dir, unsigned long attrs)
141062306a36Sopenharmony_ci{
141162306a36Sopenharmony_ci	struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
141262306a36Sopenharmony_ci	dma_addr_t iova = handle & PAGE_MASK;
141362306a36Sopenharmony_ci	struct page *page;
141462306a36Sopenharmony_ci	int offset = handle & ~PAGE_MASK;
141562306a36Sopenharmony_ci	int len = PAGE_ALIGN(size + offset);
141662306a36Sopenharmony_ci
141762306a36Sopenharmony_ci	if (!iova)
141862306a36Sopenharmony_ci		return;
141962306a36Sopenharmony_ci
142062306a36Sopenharmony_ci	if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
142162306a36Sopenharmony_ci		page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
142262306a36Sopenharmony_ci		__dma_page_dev_to_cpu(page, offset, size, dir);
142362306a36Sopenharmony_ci	}
142462306a36Sopenharmony_ci
142562306a36Sopenharmony_ci	iommu_unmap(mapping->domain, iova, len);
142662306a36Sopenharmony_ci	__free_iova(mapping, iova, len);
142762306a36Sopenharmony_ci}
142862306a36Sopenharmony_ci
142962306a36Sopenharmony_ci/**
143062306a36Sopenharmony_ci * arm_iommu_map_resource - map a device resource for DMA
143162306a36Sopenharmony_ci * @dev: valid struct device pointer
143262306a36Sopenharmony_ci * @phys_addr: physical address of resource
143362306a36Sopenharmony_ci * @size: size of resource to map
143462306a36Sopenharmony_ci * @dir: DMA transfer direction
143562306a36Sopenharmony_ci */
143662306a36Sopenharmony_cistatic dma_addr_t arm_iommu_map_resource(struct device *dev,
143762306a36Sopenharmony_ci		phys_addr_t phys_addr, size_t size,
143862306a36Sopenharmony_ci		enum dma_data_direction dir, unsigned long attrs)
143962306a36Sopenharmony_ci{
144062306a36Sopenharmony_ci	struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
144162306a36Sopenharmony_ci	dma_addr_t dma_addr;
144262306a36Sopenharmony_ci	int ret, prot;
144362306a36Sopenharmony_ci	phys_addr_t addr = phys_addr & PAGE_MASK;
144462306a36Sopenharmony_ci	unsigned int offset = phys_addr & ~PAGE_MASK;
144562306a36Sopenharmony_ci	size_t len = PAGE_ALIGN(size + offset);
144662306a36Sopenharmony_ci
144762306a36Sopenharmony_ci	dma_addr = __alloc_iova(mapping, len);
144862306a36Sopenharmony_ci	if (dma_addr == DMA_MAPPING_ERROR)
144962306a36Sopenharmony_ci		return dma_addr;
145062306a36Sopenharmony_ci
145162306a36Sopenharmony_ci	prot = __dma_info_to_prot(dir, attrs) | IOMMU_MMIO;
145262306a36Sopenharmony_ci
145362306a36Sopenharmony_ci	ret = iommu_map(mapping->domain, dma_addr, addr, len, prot, GFP_KERNEL);
145462306a36Sopenharmony_ci	if (ret < 0)
145562306a36Sopenharmony_ci		goto fail;
145662306a36Sopenharmony_ci
145762306a36Sopenharmony_ci	return dma_addr + offset;
145862306a36Sopenharmony_cifail:
145962306a36Sopenharmony_ci	__free_iova(mapping, dma_addr, len);
146062306a36Sopenharmony_ci	return DMA_MAPPING_ERROR;
146162306a36Sopenharmony_ci}
146262306a36Sopenharmony_ci
146362306a36Sopenharmony_ci/**
146462306a36Sopenharmony_ci * arm_iommu_unmap_resource - unmap a device DMA resource
146562306a36Sopenharmony_ci * @dev: valid struct device pointer
146662306a36Sopenharmony_ci * @dma_handle: DMA address to resource
146762306a36Sopenharmony_ci * @size: size of resource to map
146862306a36Sopenharmony_ci * @dir: DMA transfer direction
146962306a36Sopenharmony_ci */
147062306a36Sopenharmony_cistatic void arm_iommu_unmap_resource(struct device *dev, dma_addr_t dma_handle,
147162306a36Sopenharmony_ci		size_t size, enum dma_data_direction dir,
147262306a36Sopenharmony_ci		unsigned long attrs)
147362306a36Sopenharmony_ci{
147462306a36Sopenharmony_ci	struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
147562306a36Sopenharmony_ci	dma_addr_t iova = dma_handle & PAGE_MASK;
147662306a36Sopenharmony_ci	unsigned int offset = dma_handle & ~PAGE_MASK;
147762306a36Sopenharmony_ci	size_t len = PAGE_ALIGN(size + offset);
147862306a36Sopenharmony_ci
147962306a36Sopenharmony_ci	if (!iova)
148062306a36Sopenharmony_ci		return;
148162306a36Sopenharmony_ci
148262306a36Sopenharmony_ci	iommu_unmap(mapping->domain, iova, len);
148362306a36Sopenharmony_ci	__free_iova(mapping, iova, len);
148462306a36Sopenharmony_ci}
148562306a36Sopenharmony_ci
148662306a36Sopenharmony_cistatic void arm_iommu_sync_single_for_cpu(struct device *dev,
148762306a36Sopenharmony_ci		dma_addr_t handle, size_t size, enum dma_data_direction dir)
148862306a36Sopenharmony_ci{
148962306a36Sopenharmony_ci	struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
149062306a36Sopenharmony_ci	dma_addr_t iova = handle & PAGE_MASK;
149162306a36Sopenharmony_ci	struct page *page;
149262306a36Sopenharmony_ci	unsigned int offset = handle & ~PAGE_MASK;
149362306a36Sopenharmony_ci
149462306a36Sopenharmony_ci	if (dev->dma_coherent || !iova)
149562306a36Sopenharmony_ci		return;
149662306a36Sopenharmony_ci
149762306a36Sopenharmony_ci	page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
149862306a36Sopenharmony_ci	__dma_page_dev_to_cpu(page, offset, size, dir);
149962306a36Sopenharmony_ci}
150062306a36Sopenharmony_ci
150162306a36Sopenharmony_cistatic void arm_iommu_sync_single_for_device(struct device *dev,
150262306a36Sopenharmony_ci		dma_addr_t handle, size_t size, enum dma_data_direction dir)
150362306a36Sopenharmony_ci{
150462306a36Sopenharmony_ci	struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
150562306a36Sopenharmony_ci	dma_addr_t iova = handle & PAGE_MASK;
150662306a36Sopenharmony_ci	struct page *page;
150762306a36Sopenharmony_ci	unsigned int offset = handle & ~PAGE_MASK;
150862306a36Sopenharmony_ci
150962306a36Sopenharmony_ci	if (dev->dma_coherent || !iova)
151062306a36Sopenharmony_ci		return;
151162306a36Sopenharmony_ci
151262306a36Sopenharmony_ci	page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
151362306a36Sopenharmony_ci	__dma_page_cpu_to_dev(page, offset, size, dir);
151462306a36Sopenharmony_ci}
151562306a36Sopenharmony_ci
151662306a36Sopenharmony_cistatic const struct dma_map_ops iommu_ops = {
151762306a36Sopenharmony_ci	.alloc		= arm_iommu_alloc_attrs,
151862306a36Sopenharmony_ci	.free		= arm_iommu_free_attrs,
151962306a36Sopenharmony_ci	.mmap		= arm_iommu_mmap_attrs,
152062306a36Sopenharmony_ci	.get_sgtable	= arm_iommu_get_sgtable,
152162306a36Sopenharmony_ci
152262306a36Sopenharmony_ci	.map_page		= arm_iommu_map_page,
152362306a36Sopenharmony_ci	.unmap_page		= arm_iommu_unmap_page,
152462306a36Sopenharmony_ci	.sync_single_for_cpu	= arm_iommu_sync_single_for_cpu,
152562306a36Sopenharmony_ci	.sync_single_for_device	= arm_iommu_sync_single_for_device,
152662306a36Sopenharmony_ci
152762306a36Sopenharmony_ci	.map_sg			= arm_iommu_map_sg,
152862306a36Sopenharmony_ci	.unmap_sg		= arm_iommu_unmap_sg,
152962306a36Sopenharmony_ci	.sync_sg_for_cpu	= arm_iommu_sync_sg_for_cpu,
153062306a36Sopenharmony_ci	.sync_sg_for_device	= arm_iommu_sync_sg_for_device,
153162306a36Sopenharmony_ci
153262306a36Sopenharmony_ci	.map_resource		= arm_iommu_map_resource,
153362306a36Sopenharmony_ci	.unmap_resource		= arm_iommu_unmap_resource,
153462306a36Sopenharmony_ci};
153562306a36Sopenharmony_ci
153662306a36Sopenharmony_ci/**
153762306a36Sopenharmony_ci * arm_iommu_create_mapping
153862306a36Sopenharmony_ci * @bus: pointer to the bus holding the client device (for IOMMU calls)
153962306a36Sopenharmony_ci * @base: start address of the valid IO address space
154062306a36Sopenharmony_ci * @size: maximum size of the valid IO address space
154162306a36Sopenharmony_ci *
154262306a36Sopenharmony_ci * Creates a mapping structure which holds information about used/unused
154362306a36Sopenharmony_ci * IO address ranges, which is required to perform memory allocation and
154462306a36Sopenharmony_ci * mapping with IOMMU aware functions.
154562306a36Sopenharmony_ci *
154662306a36Sopenharmony_ci * The client device need to be attached to the mapping with
154762306a36Sopenharmony_ci * arm_iommu_attach_device function.
154862306a36Sopenharmony_ci */
154962306a36Sopenharmony_cistruct dma_iommu_mapping *
155062306a36Sopenharmony_ciarm_iommu_create_mapping(const struct bus_type *bus, dma_addr_t base, u64 size)
155162306a36Sopenharmony_ci{
155262306a36Sopenharmony_ci	unsigned int bits = size >> PAGE_SHIFT;
155362306a36Sopenharmony_ci	unsigned int bitmap_size = BITS_TO_LONGS(bits) * sizeof(long);
155462306a36Sopenharmony_ci	struct dma_iommu_mapping *mapping;
155562306a36Sopenharmony_ci	int extensions = 1;
155662306a36Sopenharmony_ci	int err = -ENOMEM;
155762306a36Sopenharmony_ci
155862306a36Sopenharmony_ci	/* currently only 32-bit DMA address space is supported */
155962306a36Sopenharmony_ci	if (size > DMA_BIT_MASK(32) + 1)
156062306a36Sopenharmony_ci		return ERR_PTR(-ERANGE);
156162306a36Sopenharmony_ci
156262306a36Sopenharmony_ci	if (!bitmap_size)
156362306a36Sopenharmony_ci		return ERR_PTR(-EINVAL);
156462306a36Sopenharmony_ci
156562306a36Sopenharmony_ci	if (bitmap_size > PAGE_SIZE) {
156662306a36Sopenharmony_ci		extensions = bitmap_size / PAGE_SIZE;
156762306a36Sopenharmony_ci		bitmap_size = PAGE_SIZE;
156862306a36Sopenharmony_ci	}
156962306a36Sopenharmony_ci
157062306a36Sopenharmony_ci	mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL);
157162306a36Sopenharmony_ci	if (!mapping)
157262306a36Sopenharmony_ci		goto err;
157362306a36Sopenharmony_ci
157462306a36Sopenharmony_ci	mapping->bitmap_size = bitmap_size;
157562306a36Sopenharmony_ci	mapping->bitmaps = kcalloc(extensions, sizeof(unsigned long *),
157662306a36Sopenharmony_ci				   GFP_KERNEL);
157762306a36Sopenharmony_ci	if (!mapping->bitmaps)
157862306a36Sopenharmony_ci		goto err2;
157962306a36Sopenharmony_ci
158062306a36Sopenharmony_ci	mapping->bitmaps[0] = kzalloc(bitmap_size, GFP_KERNEL);
158162306a36Sopenharmony_ci	if (!mapping->bitmaps[0])
158262306a36Sopenharmony_ci		goto err3;
158362306a36Sopenharmony_ci
158462306a36Sopenharmony_ci	mapping->nr_bitmaps = 1;
158562306a36Sopenharmony_ci	mapping->extensions = extensions;
158662306a36Sopenharmony_ci	mapping->base = base;
158762306a36Sopenharmony_ci	mapping->bits = BITS_PER_BYTE * bitmap_size;
158862306a36Sopenharmony_ci
158962306a36Sopenharmony_ci	spin_lock_init(&mapping->lock);
159062306a36Sopenharmony_ci
159162306a36Sopenharmony_ci	mapping->domain = iommu_domain_alloc(bus);
159262306a36Sopenharmony_ci	if (!mapping->domain)
159362306a36Sopenharmony_ci		goto err4;
159462306a36Sopenharmony_ci
159562306a36Sopenharmony_ci	kref_init(&mapping->kref);
159662306a36Sopenharmony_ci	return mapping;
159762306a36Sopenharmony_cierr4:
159862306a36Sopenharmony_ci	kfree(mapping->bitmaps[0]);
159962306a36Sopenharmony_cierr3:
160062306a36Sopenharmony_ci	kfree(mapping->bitmaps);
160162306a36Sopenharmony_cierr2:
160262306a36Sopenharmony_ci	kfree(mapping);
160362306a36Sopenharmony_cierr:
160462306a36Sopenharmony_ci	return ERR_PTR(err);
160562306a36Sopenharmony_ci}
160662306a36Sopenharmony_ciEXPORT_SYMBOL_GPL(arm_iommu_create_mapping);
160762306a36Sopenharmony_ci
160862306a36Sopenharmony_cistatic void release_iommu_mapping(struct kref *kref)
160962306a36Sopenharmony_ci{
161062306a36Sopenharmony_ci	int i;
161162306a36Sopenharmony_ci	struct dma_iommu_mapping *mapping =
161262306a36Sopenharmony_ci		container_of(kref, struct dma_iommu_mapping, kref);
161362306a36Sopenharmony_ci
161462306a36Sopenharmony_ci	iommu_domain_free(mapping->domain);
161562306a36Sopenharmony_ci	for (i = 0; i < mapping->nr_bitmaps; i++)
161662306a36Sopenharmony_ci		kfree(mapping->bitmaps[i]);
161762306a36Sopenharmony_ci	kfree(mapping->bitmaps);
161862306a36Sopenharmony_ci	kfree(mapping);
161962306a36Sopenharmony_ci}
162062306a36Sopenharmony_ci
162162306a36Sopenharmony_cistatic int extend_iommu_mapping(struct dma_iommu_mapping *mapping)
162262306a36Sopenharmony_ci{
162362306a36Sopenharmony_ci	int next_bitmap;
162462306a36Sopenharmony_ci
162562306a36Sopenharmony_ci	if (mapping->nr_bitmaps >= mapping->extensions)
162662306a36Sopenharmony_ci		return -EINVAL;
162762306a36Sopenharmony_ci
162862306a36Sopenharmony_ci	next_bitmap = mapping->nr_bitmaps;
162962306a36Sopenharmony_ci	mapping->bitmaps[next_bitmap] = kzalloc(mapping->bitmap_size,
163062306a36Sopenharmony_ci						GFP_ATOMIC);
163162306a36Sopenharmony_ci	if (!mapping->bitmaps[next_bitmap])
163262306a36Sopenharmony_ci		return -ENOMEM;
163362306a36Sopenharmony_ci
163462306a36Sopenharmony_ci	mapping->nr_bitmaps++;
163562306a36Sopenharmony_ci
163662306a36Sopenharmony_ci	return 0;
163762306a36Sopenharmony_ci}
163862306a36Sopenharmony_ci
163962306a36Sopenharmony_civoid arm_iommu_release_mapping(struct dma_iommu_mapping *mapping)
164062306a36Sopenharmony_ci{
164162306a36Sopenharmony_ci	if (mapping)
164262306a36Sopenharmony_ci		kref_put(&mapping->kref, release_iommu_mapping);
164362306a36Sopenharmony_ci}
164462306a36Sopenharmony_ciEXPORT_SYMBOL_GPL(arm_iommu_release_mapping);
164562306a36Sopenharmony_ci
164662306a36Sopenharmony_cistatic int __arm_iommu_attach_device(struct device *dev,
164762306a36Sopenharmony_ci				     struct dma_iommu_mapping *mapping)
164862306a36Sopenharmony_ci{
164962306a36Sopenharmony_ci	int err;
165062306a36Sopenharmony_ci
165162306a36Sopenharmony_ci	err = iommu_attach_device(mapping->domain, dev);
165262306a36Sopenharmony_ci	if (err)
165362306a36Sopenharmony_ci		return err;
165462306a36Sopenharmony_ci
165562306a36Sopenharmony_ci	kref_get(&mapping->kref);
165662306a36Sopenharmony_ci	to_dma_iommu_mapping(dev) = mapping;
165762306a36Sopenharmony_ci
165862306a36Sopenharmony_ci	pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev));
165962306a36Sopenharmony_ci	return 0;
166062306a36Sopenharmony_ci}
166162306a36Sopenharmony_ci
166262306a36Sopenharmony_ci/**
166362306a36Sopenharmony_ci * arm_iommu_attach_device
166462306a36Sopenharmony_ci * @dev: valid struct device pointer
166562306a36Sopenharmony_ci * @mapping: io address space mapping structure (returned from
166662306a36Sopenharmony_ci *	arm_iommu_create_mapping)
166762306a36Sopenharmony_ci *
166862306a36Sopenharmony_ci * Attaches specified io address space mapping to the provided device.
166962306a36Sopenharmony_ci * This replaces the dma operations (dma_map_ops pointer) with the
167062306a36Sopenharmony_ci * IOMMU aware version.
167162306a36Sopenharmony_ci *
167262306a36Sopenharmony_ci * More than one client might be attached to the same io address space
167362306a36Sopenharmony_ci * mapping.
167462306a36Sopenharmony_ci */
167562306a36Sopenharmony_ciint arm_iommu_attach_device(struct device *dev,
167662306a36Sopenharmony_ci			    struct dma_iommu_mapping *mapping)
167762306a36Sopenharmony_ci{
167862306a36Sopenharmony_ci	int err;
167962306a36Sopenharmony_ci
168062306a36Sopenharmony_ci	err = __arm_iommu_attach_device(dev, mapping);
168162306a36Sopenharmony_ci	if (err)
168262306a36Sopenharmony_ci		return err;
168362306a36Sopenharmony_ci
168462306a36Sopenharmony_ci	set_dma_ops(dev, &iommu_ops);
168562306a36Sopenharmony_ci	return 0;
168662306a36Sopenharmony_ci}
168762306a36Sopenharmony_ciEXPORT_SYMBOL_GPL(arm_iommu_attach_device);
168862306a36Sopenharmony_ci
168962306a36Sopenharmony_ci/**
169062306a36Sopenharmony_ci * arm_iommu_detach_device
169162306a36Sopenharmony_ci * @dev: valid struct device pointer
169262306a36Sopenharmony_ci *
169362306a36Sopenharmony_ci * Detaches the provided device from a previously attached map.
169462306a36Sopenharmony_ci * This overwrites the dma_ops pointer with appropriate non-IOMMU ops.
169562306a36Sopenharmony_ci */
169662306a36Sopenharmony_civoid arm_iommu_detach_device(struct device *dev)
169762306a36Sopenharmony_ci{
169862306a36Sopenharmony_ci	struct dma_iommu_mapping *mapping;
169962306a36Sopenharmony_ci
170062306a36Sopenharmony_ci	mapping = to_dma_iommu_mapping(dev);
170162306a36Sopenharmony_ci	if (!mapping) {
170262306a36Sopenharmony_ci		dev_warn(dev, "Not attached\n");
170362306a36Sopenharmony_ci		return;
170462306a36Sopenharmony_ci	}
170562306a36Sopenharmony_ci
170662306a36Sopenharmony_ci	iommu_detach_device(mapping->domain, dev);
170762306a36Sopenharmony_ci	kref_put(&mapping->kref, release_iommu_mapping);
170862306a36Sopenharmony_ci	to_dma_iommu_mapping(dev) = NULL;
170962306a36Sopenharmony_ci	set_dma_ops(dev, NULL);
171062306a36Sopenharmony_ci
171162306a36Sopenharmony_ci	pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev));
171262306a36Sopenharmony_ci}
171362306a36Sopenharmony_ciEXPORT_SYMBOL_GPL(arm_iommu_detach_device);
171462306a36Sopenharmony_ci
171562306a36Sopenharmony_cistatic void arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
171662306a36Sopenharmony_ci				    const struct iommu_ops *iommu, bool coherent)
171762306a36Sopenharmony_ci{
171862306a36Sopenharmony_ci	struct dma_iommu_mapping *mapping;
171962306a36Sopenharmony_ci
172062306a36Sopenharmony_ci	mapping = arm_iommu_create_mapping(dev->bus, dma_base, size);
172162306a36Sopenharmony_ci	if (IS_ERR(mapping)) {
172262306a36Sopenharmony_ci		pr_warn("Failed to create %llu-byte IOMMU mapping for device %s\n",
172362306a36Sopenharmony_ci				size, dev_name(dev));
172462306a36Sopenharmony_ci		return;
172562306a36Sopenharmony_ci	}
172662306a36Sopenharmony_ci
172762306a36Sopenharmony_ci	if (__arm_iommu_attach_device(dev, mapping)) {
172862306a36Sopenharmony_ci		pr_warn("Failed to attached device %s to IOMMU_mapping\n",
172962306a36Sopenharmony_ci				dev_name(dev));
173062306a36Sopenharmony_ci		arm_iommu_release_mapping(mapping);
173162306a36Sopenharmony_ci		return;
173262306a36Sopenharmony_ci	}
173362306a36Sopenharmony_ci
173462306a36Sopenharmony_ci	set_dma_ops(dev, &iommu_ops);
173562306a36Sopenharmony_ci}
173662306a36Sopenharmony_ci
173762306a36Sopenharmony_cistatic void arm_teardown_iommu_dma_ops(struct device *dev)
173862306a36Sopenharmony_ci{
173962306a36Sopenharmony_ci	struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
174062306a36Sopenharmony_ci
174162306a36Sopenharmony_ci	if (!mapping)
174262306a36Sopenharmony_ci		return;
174362306a36Sopenharmony_ci
174462306a36Sopenharmony_ci	arm_iommu_detach_device(dev);
174562306a36Sopenharmony_ci	arm_iommu_release_mapping(mapping);
174662306a36Sopenharmony_ci}
174762306a36Sopenharmony_ci
174862306a36Sopenharmony_ci#else
174962306a36Sopenharmony_ci
175062306a36Sopenharmony_cistatic void arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
175162306a36Sopenharmony_ci				    const struct iommu_ops *iommu, bool coherent)
175262306a36Sopenharmony_ci{
175362306a36Sopenharmony_ci}
175462306a36Sopenharmony_ci
175562306a36Sopenharmony_cistatic void arm_teardown_iommu_dma_ops(struct device *dev) { }
175662306a36Sopenharmony_ci
175762306a36Sopenharmony_ci#endif	/* CONFIG_ARM_DMA_USE_IOMMU */
175862306a36Sopenharmony_ci
175962306a36Sopenharmony_civoid arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
176062306a36Sopenharmony_ci			const struct iommu_ops *iommu, bool coherent)
176162306a36Sopenharmony_ci{
176262306a36Sopenharmony_ci	/*
176362306a36Sopenharmony_ci	 * Due to legacy code that sets the ->dma_coherent flag from a bus
176462306a36Sopenharmony_ci	 * notifier we can't just assign coherent to the ->dma_coherent flag
176562306a36Sopenharmony_ci	 * here, but instead have to make sure we only set but never clear it
176662306a36Sopenharmony_ci	 * for now.
176762306a36Sopenharmony_ci	 */
176862306a36Sopenharmony_ci	if (coherent)
176962306a36Sopenharmony_ci		dev->dma_coherent = true;
177062306a36Sopenharmony_ci
177162306a36Sopenharmony_ci	/*
177262306a36Sopenharmony_ci	 * Don't override the dma_ops if they have already been set. Ideally
177362306a36Sopenharmony_ci	 * this should be the only location where dma_ops are set, remove this
177462306a36Sopenharmony_ci	 * check when all other callers of set_dma_ops will have disappeared.
177562306a36Sopenharmony_ci	 */
177662306a36Sopenharmony_ci	if (dev->dma_ops)
177762306a36Sopenharmony_ci		return;
177862306a36Sopenharmony_ci
177962306a36Sopenharmony_ci	if (iommu)
178062306a36Sopenharmony_ci		arm_setup_iommu_dma_ops(dev, dma_base, size, iommu, coherent);
178162306a36Sopenharmony_ci
178262306a36Sopenharmony_ci	xen_setup_dma_ops(dev);
178362306a36Sopenharmony_ci	dev->archdata.dma_ops_setup = true;
178462306a36Sopenharmony_ci}
178562306a36Sopenharmony_ci
178662306a36Sopenharmony_civoid arch_teardown_dma_ops(struct device *dev)
178762306a36Sopenharmony_ci{
178862306a36Sopenharmony_ci	if (!dev->archdata.dma_ops_setup)
178962306a36Sopenharmony_ci		return;
179062306a36Sopenharmony_ci
179162306a36Sopenharmony_ci	arm_teardown_iommu_dma_ops(dev);
179262306a36Sopenharmony_ci	/* Let arch_setup_dma_ops() start again from scratch upon re-probe */
179362306a36Sopenharmony_ci	set_dma_ops(dev, NULL);
179462306a36Sopenharmony_ci}
179562306a36Sopenharmony_ci
179662306a36Sopenharmony_civoid arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
179762306a36Sopenharmony_ci		enum dma_data_direction dir)
179862306a36Sopenharmony_ci{
179962306a36Sopenharmony_ci	__dma_page_cpu_to_dev(phys_to_page(paddr), paddr & (PAGE_SIZE - 1),
180062306a36Sopenharmony_ci			      size, dir);
180162306a36Sopenharmony_ci}
180262306a36Sopenharmony_ci
180362306a36Sopenharmony_civoid arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
180462306a36Sopenharmony_ci		enum dma_data_direction dir)
180562306a36Sopenharmony_ci{
180662306a36Sopenharmony_ci	__dma_page_dev_to_cpu(phys_to_page(paddr), paddr & (PAGE_SIZE - 1),
180762306a36Sopenharmony_ci			      size, dir);
180862306a36Sopenharmony_ci}
180962306a36Sopenharmony_ci
181062306a36Sopenharmony_civoid *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
181162306a36Sopenharmony_ci		gfp_t gfp, unsigned long attrs)
181262306a36Sopenharmony_ci{
181362306a36Sopenharmony_ci	return __dma_alloc(dev, size, dma_handle, gfp,
181462306a36Sopenharmony_ci			   __get_dma_pgprot(attrs, PAGE_KERNEL), false,
181562306a36Sopenharmony_ci			   attrs, __builtin_return_address(0));
181662306a36Sopenharmony_ci}
181762306a36Sopenharmony_ci
181862306a36Sopenharmony_civoid arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
181962306a36Sopenharmony_ci		dma_addr_t dma_handle, unsigned long attrs)
182062306a36Sopenharmony_ci{
182162306a36Sopenharmony_ci	__arm_dma_free(dev, size, cpu_addr, dma_handle, attrs, false);
182262306a36Sopenharmony_ci}
1823