xref: /kernel/linux/linux-6.6/kernel/dma/direct.c (revision 62306a36)
162306a36Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0
262306a36Sopenharmony_ci/*
362306a36Sopenharmony_ci * Copyright (C) 2018-2020 Christoph Hellwig.
462306a36Sopenharmony_ci *
562306a36Sopenharmony_ci * DMA operations that map physical memory directly without using an IOMMU.
662306a36Sopenharmony_ci */
762306a36Sopenharmony_ci#include <linux/memblock.h> /* for max_pfn */
862306a36Sopenharmony_ci#include <linux/export.h>
962306a36Sopenharmony_ci#include <linux/mm.h>
1062306a36Sopenharmony_ci#include <linux/dma-map-ops.h>
1162306a36Sopenharmony_ci#include <linux/scatterlist.h>
1262306a36Sopenharmony_ci#include <linux/pfn.h>
1362306a36Sopenharmony_ci#include <linux/vmalloc.h>
1462306a36Sopenharmony_ci#include <linux/set_memory.h>
1562306a36Sopenharmony_ci#include <linux/slab.h>
1662306a36Sopenharmony_ci#include "direct.h"
1762306a36Sopenharmony_ci
1862306a36Sopenharmony_ci/*
1962306a36Sopenharmony_ci * Most architectures use ZONE_DMA for the first 16 Megabytes, but some use
2062306a36Sopenharmony_ci * it for entirely different regions. In that case the arch code needs to
2162306a36Sopenharmony_ci * override the variable below for dma-direct to work properly.
2262306a36Sopenharmony_ci */
2362306a36Sopenharmony_ciunsigned int zone_dma_bits __ro_after_init = 24;
2462306a36Sopenharmony_ci
2562306a36Sopenharmony_cistatic inline dma_addr_t phys_to_dma_direct(struct device *dev,
2662306a36Sopenharmony_ci		phys_addr_t phys)
2762306a36Sopenharmony_ci{
2862306a36Sopenharmony_ci	if (force_dma_unencrypted(dev))
2962306a36Sopenharmony_ci		return phys_to_dma_unencrypted(dev, phys);
3062306a36Sopenharmony_ci	return phys_to_dma(dev, phys);
3162306a36Sopenharmony_ci}
3262306a36Sopenharmony_ci
3362306a36Sopenharmony_cistatic inline struct page *dma_direct_to_page(struct device *dev,
3462306a36Sopenharmony_ci		dma_addr_t dma_addr)
3562306a36Sopenharmony_ci{
3662306a36Sopenharmony_ci	return pfn_to_page(PHYS_PFN(dma_to_phys(dev, dma_addr)));
3762306a36Sopenharmony_ci}
3862306a36Sopenharmony_ci
3962306a36Sopenharmony_ciu64 dma_direct_get_required_mask(struct device *dev)
4062306a36Sopenharmony_ci{
4162306a36Sopenharmony_ci	phys_addr_t phys = (phys_addr_t)(max_pfn - 1) << PAGE_SHIFT;
4262306a36Sopenharmony_ci	u64 max_dma = phys_to_dma_direct(dev, phys);
4362306a36Sopenharmony_ci
4462306a36Sopenharmony_ci	return (1ULL << (fls64(max_dma) - 1)) * 2 - 1;
4562306a36Sopenharmony_ci}
4662306a36Sopenharmony_ci
4762306a36Sopenharmony_cistatic gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 *phys_limit)
4862306a36Sopenharmony_ci{
4962306a36Sopenharmony_ci	u64 dma_limit = min_not_zero(
5062306a36Sopenharmony_ci		dev->coherent_dma_mask,
5162306a36Sopenharmony_ci		dev->bus_dma_limit);
5262306a36Sopenharmony_ci
5362306a36Sopenharmony_ci	/*
5462306a36Sopenharmony_ci	 * Optimistically try the zone that the physical address mask falls
5562306a36Sopenharmony_ci	 * into first.  If that returns memory that isn't actually addressable
5662306a36Sopenharmony_ci	 * we will fallback to the next lower zone and try again.
5762306a36Sopenharmony_ci	 *
5862306a36Sopenharmony_ci	 * Note that GFP_DMA32 and GFP_DMA are no ops without the corresponding
5962306a36Sopenharmony_ci	 * zones.
6062306a36Sopenharmony_ci	 */
6162306a36Sopenharmony_ci	*phys_limit = dma_to_phys(dev, dma_limit);
6262306a36Sopenharmony_ci	if (*phys_limit <= DMA_BIT_MASK(zone_dma_bits))
6362306a36Sopenharmony_ci		return GFP_DMA;
6462306a36Sopenharmony_ci	if (*phys_limit <= DMA_BIT_MASK(32))
6562306a36Sopenharmony_ci		return GFP_DMA32;
6662306a36Sopenharmony_ci	return 0;
6762306a36Sopenharmony_ci}
6862306a36Sopenharmony_ci
6962306a36Sopenharmony_cibool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
7062306a36Sopenharmony_ci{
7162306a36Sopenharmony_ci	dma_addr_t dma_addr = phys_to_dma_direct(dev, phys);
7262306a36Sopenharmony_ci
7362306a36Sopenharmony_ci	if (dma_addr == DMA_MAPPING_ERROR)
7462306a36Sopenharmony_ci		return false;
7562306a36Sopenharmony_ci	return dma_addr + size - 1 <=
7662306a36Sopenharmony_ci		min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit);
7762306a36Sopenharmony_ci}
7862306a36Sopenharmony_ci
7962306a36Sopenharmony_cistatic int dma_set_decrypted(struct device *dev, void *vaddr, size_t size)
8062306a36Sopenharmony_ci{
8162306a36Sopenharmony_ci	if (!force_dma_unencrypted(dev))
8262306a36Sopenharmony_ci		return 0;
8362306a36Sopenharmony_ci	return set_memory_decrypted((unsigned long)vaddr, PFN_UP(size));
8462306a36Sopenharmony_ci}
8562306a36Sopenharmony_ci
8662306a36Sopenharmony_cistatic int dma_set_encrypted(struct device *dev, void *vaddr, size_t size)
8762306a36Sopenharmony_ci{
8862306a36Sopenharmony_ci	int ret;
8962306a36Sopenharmony_ci
9062306a36Sopenharmony_ci	if (!force_dma_unencrypted(dev))
9162306a36Sopenharmony_ci		return 0;
9262306a36Sopenharmony_ci	ret = set_memory_encrypted((unsigned long)vaddr, PFN_UP(size));
9362306a36Sopenharmony_ci	if (ret)
9462306a36Sopenharmony_ci		pr_warn_ratelimited("leaking DMA memory that can't be re-encrypted\n");
9562306a36Sopenharmony_ci	return ret;
9662306a36Sopenharmony_ci}
9762306a36Sopenharmony_ci
9862306a36Sopenharmony_cistatic void __dma_direct_free_pages(struct device *dev, struct page *page,
9962306a36Sopenharmony_ci				    size_t size)
10062306a36Sopenharmony_ci{
10162306a36Sopenharmony_ci	if (swiotlb_free(dev, page, size))
10262306a36Sopenharmony_ci		return;
10362306a36Sopenharmony_ci	dma_free_contiguous(dev, page, size);
10462306a36Sopenharmony_ci}
10562306a36Sopenharmony_ci
10662306a36Sopenharmony_cistatic struct page *dma_direct_alloc_swiotlb(struct device *dev, size_t size)
10762306a36Sopenharmony_ci{
10862306a36Sopenharmony_ci	struct page *page = swiotlb_alloc(dev, size);
10962306a36Sopenharmony_ci
11062306a36Sopenharmony_ci	if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
11162306a36Sopenharmony_ci		swiotlb_free(dev, page, size);
11262306a36Sopenharmony_ci		return NULL;
11362306a36Sopenharmony_ci	}
11462306a36Sopenharmony_ci
11562306a36Sopenharmony_ci	return page;
11662306a36Sopenharmony_ci}
11762306a36Sopenharmony_ci
11862306a36Sopenharmony_cistatic struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
11962306a36Sopenharmony_ci		gfp_t gfp, bool allow_highmem)
12062306a36Sopenharmony_ci{
12162306a36Sopenharmony_ci	int node = dev_to_node(dev);
12262306a36Sopenharmony_ci	struct page *page = NULL;
12362306a36Sopenharmony_ci	u64 phys_limit;
12462306a36Sopenharmony_ci
12562306a36Sopenharmony_ci	WARN_ON_ONCE(!PAGE_ALIGNED(size));
12662306a36Sopenharmony_ci
12762306a36Sopenharmony_ci	if (is_swiotlb_for_alloc(dev))
12862306a36Sopenharmony_ci		return dma_direct_alloc_swiotlb(dev, size);
12962306a36Sopenharmony_ci
13062306a36Sopenharmony_ci	gfp |= dma_direct_optimal_gfp_mask(dev, &phys_limit);
13162306a36Sopenharmony_ci	page = dma_alloc_contiguous(dev, size, gfp);
13262306a36Sopenharmony_ci	if (page) {
13362306a36Sopenharmony_ci		if (!dma_coherent_ok(dev, page_to_phys(page), size) ||
13462306a36Sopenharmony_ci		    (!allow_highmem && PageHighMem(page))) {
13562306a36Sopenharmony_ci			dma_free_contiguous(dev, page, size);
13662306a36Sopenharmony_ci			page = NULL;
13762306a36Sopenharmony_ci		}
13862306a36Sopenharmony_ci	}
13962306a36Sopenharmony_ciagain:
14062306a36Sopenharmony_ci	if (!page)
14162306a36Sopenharmony_ci		page = alloc_pages_node(node, gfp, get_order(size));
14262306a36Sopenharmony_ci	if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
14362306a36Sopenharmony_ci		dma_free_contiguous(dev, page, size);
14462306a36Sopenharmony_ci		page = NULL;
14562306a36Sopenharmony_ci
14662306a36Sopenharmony_ci		if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
14762306a36Sopenharmony_ci		    phys_limit < DMA_BIT_MASK(64) &&
14862306a36Sopenharmony_ci		    !(gfp & (GFP_DMA32 | GFP_DMA))) {
14962306a36Sopenharmony_ci			gfp |= GFP_DMA32;
15062306a36Sopenharmony_ci			goto again;
15162306a36Sopenharmony_ci		}
15262306a36Sopenharmony_ci
15362306a36Sopenharmony_ci		if (IS_ENABLED(CONFIG_ZONE_DMA) && !(gfp & GFP_DMA)) {
15462306a36Sopenharmony_ci			gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
15562306a36Sopenharmony_ci			goto again;
15662306a36Sopenharmony_ci		}
15762306a36Sopenharmony_ci	}
15862306a36Sopenharmony_ci
15962306a36Sopenharmony_ci	return page;
16062306a36Sopenharmony_ci}
16162306a36Sopenharmony_ci
16262306a36Sopenharmony_ci/*
16362306a36Sopenharmony_ci * Check if a potentially blocking operations needs to dip into the atomic
16462306a36Sopenharmony_ci * pools for the given device/gfp.
16562306a36Sopenharmony_ci */
16662306a36Sopenharmony_cistatic bool dma_direct_use_pool(struct device *dev, gfp_t gfp)
16762306a36Sopenharmony_ci{
16862306a36Sopenharmony_ci	return !gfpflags_allow_blocking(gfp) && !is_swiotlb_for_alloc(dev);
16962306a36Sopenharmony_ci}
17062306a36Sopenharmony_ci
17162306a36Sopenharmony_cistatic void *dma_direct_alloc_from_pool(struct device *dev, size_t size,
17262306a36Sopenharmony_ci		dma_addr_t *dma_handle, gfp_t gfp)
17362306a36Sopenharmony_ci{
17462306a36Sopenharmony_ci	struct page *page;
17562306a36Sopenharmony_ci	u64 phys_limit;
17662306a36Sopenharmony_ci	void *ret;
17762306a36Sopenharmony_ci
17862306a36Sopenharmony_ci	if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_DMA_COHERENT_POOL)))
17962306a36Sopenharmony_ci		return NULL;
18062306a36Sopenharmony_ci
18162306a36Sopenharmony_ci	gfp |= dma_direct_optimal_gfp_mask(dev, &phys_limit);
18262306a36Sopenharmony_ci	page = dma_alloc_from_pool(dev, size, &ret, gfp, dma_coherent_ok);
18362306a36Sopenharmony_ci	if (!page)
18462306a36Sopenharmony_ci		return NULL;
18562306a36Sopenharmony_ci	*dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
18662306a36Sopenharmony_ci	return ret;
18762306a36Sopenharmony_ci}
18862306a36Sopenharmony_ci
18962306a36Sopenharmony_cistatic void *dma_direct_alloc_no_mapping(struct device *dev, size_t size,
19062306a36Sopenharmony_ci		dma_addr_t *dma_handle, gfp_t gfp)
19162306a36Sopenharmony_ci{
19262306a36Sopenharmony_ci	struct page *page;
19362306a36Sopenharmony_ci
19462306a36Sopenharmony_ci	page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO, true);
19562306a36Sopenharmony_ci	if (!page)
19662306a36Sopenharmony_ci		return NULL;
19762306a36Sopenharmony_ci
19862306a36Sopenharmony_ci	/* remove any dirty cache lines on the kernel alias */
19962306a36Sopenharmony_ci	if (!PageHighMem(page))
20062306a36Sopenharmony_ci		arch_dma_prep_coherent(page, size);
20162306a36Sopenharmony_ci
20262306a36Sopenharmony_ci	/* return the page pointer as the opaque cookie */
20362306a36Sopenharmony_ci	*dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
20462306a36Sopenharmony_ci	return page;
20562306a36Sopenharmony_ci}
20662306a36Sopenharmony_ci
20762306a36Sopenharmony_civoid *dma_direct_alloc(struct device *dev, size_t size,
20862306a36Sopenharmony_ci		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
20962306a36Sopenharmony_ci{
21062306a36Sopenharmony_ci	bool remap = false, set_uncached = false;
21162306a36Sopenharmony_ci	struct page *page;
21262306a36Sopenharmony_ci	void *ret;
21362306a36Sopenharmony_ci
21462306a36Sopenharmony_ci	size = PAGE_ALIGN(size);
21562306a36Sopenharmony_ci	if (attrs & DMA_ATTR_NO_WARN)
21662306a36Sopenharmony_ci		gfp |= __GFP_NOWARN;
21762306a36Sopenharmony_ci
21862306a36Sopenharmony_ci	if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
21962306a36Sopenharmony_ci	    !force_dma_unencrypted(dev) && !is_swiotlb_for_alloc(dev))
22062306a36Sopenharmony_ci		return dma_direct_alloc_no_mapping(dev, size, dma_handle, gfp);
22162306a36Sopenharmony_ci
22262306a36Sopenharmony_ci	if (!dev_is_dma_coherent(dev)) {
22362306a36Sopenharmony_ci		/*
22462306a36Sopenharmony_ci		 * Fallback to the arch handler if it exists.  This should
22562306a36Sopenharmony_ci		 * eventually go away.
22662306a36Sopenharmony_ci		 */
22762306a36Sopenharmony_ci		if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
22862306a36Sopenharmony_ci		    !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
22962306a36Sopenharmony_ci		    !IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) &&
23062306a36Sopenharmony_ci		    !is_swiotlb_for_alloc(dev))
23162306a36Sopenharmony_ci			return arch_dma_alloc(dev, size, dma_handle, gfp,
23262306a36Sopenharmony_ci					      attrs);
23362306a36Sopenharmony_ci
23462306a36Sopenharmony_ci		/*
23562306a36Sopenharmony_ci		 * If there is a global pool, always allocate from it for
23662306a36Sopenharmony_ci		 * non-coherent devices.
23762306a36Sopenharmony_ci		 */
23862306a36Sopenharmony_ci		if (IS_ENABLED(CONFIG_DMA_GLOBAL_POOL))
23962306a36Sopenharmony_ci			return dma_alloc_from_global_coherent(dev, size,
24062306a36Sopenharmony_ci					dma_handle);
24162306a36Sopenharmony_ci
24262306a36Sopenharmony_ci		/*
24362306a36Sopenharmony_ci		 * Otherwise remap if the architecture is asking for it.  But
24462306a36Sopenharmony_ci		 * given that remapping memory is a blocking operation we'll
24562306a36Sopenharmony_ci		 * instead have to dip into the atomic pools.
24662306a36Sopenharmony_ci		 */
24762306a36Sopenharmony_ci		remap = IS_ENABLED(CONFIG_DMA_DIRECT_REMAP);
24862306a36Sopenharmony_ci		if (remap) {
24962306a36Sopenharmony_ci			if (dma_direct_use_pool(dev, gfp))
25062306a36Sopenharmony_ci				return dma_direct_alloc_from_pool(dev, size,
25162306a36Sopenharmony_ci						dma_handle, gfp);
25262306a36Sopenharmony_ci		} else {
25362306a36Sopenharmony_ci			if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED))
25462306a36Sopenharmony_ci				return NULL;
25562306a36Sopenharmony_ci			set_uncached = true;
25662306a36Sopenharmony_ci		}
25762306a36Sopenharmony_ci	}
25862306a36Sopenharmony_ci
25962306a36Sopenharmony_ci	/*
26062306a36Sopenharmony_ci	 * Decrypting memory may block, so allocate the memory from the atomic
26162306a36Sopenharmony_ci	 * pools if we can't block.
26262306a36Sopenharmony_ci	 */
26362306a36Sopenharmony_ci	if (force_dma_unencrypted(dev) && dma_direct_use_pool(dev, gfp))
26462306a36Sopenharmony_ci		return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
26562306a36Sopenharmony_ci
26662306a36Sopenharmony_ci	/* we always manually zero the memory once we are done */
26762306a36Sopenharmony_ci	page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO, true);
26862306a36Sopenharmony_ci	if (!page)
26962306a36Sopenharmony_ci		return NULL;
27062306a36Sopenharmony_ci
27162306a36Sopenharmony_ci	/*
27262306a36Sopenharmony_ci	 * dma_alloc_contiguous can return highmem pages depending on a
27362306a36Sopenharmony_ci	 * combination the cma= arguments and per-arch setup.  These need to be
27462306a36Sopenharmony_ci	 * remapped to return a kernel virtual address.
27562306a36Sopenharmony_ci	 */
27662306a36Sopenharmony_ci	if (PageHighMem(page)) {
27762306a36Sopenharmony_ci		remap = true;
27862306a36Sopenharmony_ci		set_uncached = false;
27962306a36Sopenharmony_ci	}
28062306a36Sopenharmony_ci
28162306a36Sopenharmony_ci	if (remap) {
28262306a36Sopenharmony_ci		pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
28362306a36Sopenharmony_ci
28462306a36Sopenharmony_ci		if (force_dma_unencrypted(dev))
28562306a36Sopenharmony_ci			prot = pgprot_decrypted(prot);
28662306a36Sopenharmony_ci
28762306a36Sopenharmony_ci		/* remove any dirty cache lines on the kernel alias */
28862306a36Sopenharmony_ci		arch_dma_prep_coherent(page, size);
28962306a36Sopenharmony_ci
29062306a36Sopenharmony_ci		/* create a coherent mapping */
29162306a36Sopenharmony_ci		ret = dma_common_contiguous_remap(page, size, prot,
29262306a36Sopenharmony_ci				__builtin_return_address(0));
29362306a36Sopenharmony_ci		if (!ret)
29462306a36Sopenharmony_ci			goto out_free_pages;
29562306a36Sopenharmony_ci	} else {
29662306a36Sopenharmony_ci		ret = page_address(page);
29762306a36Sopenharmony_ci		if (dma_set_decrypted(dev, ret, size))
29862306a36Sopenharmony_ci			goto out_free_pages;
29962306a36Sopenharmony_ci	}
30062306a36Sopenharmony_ci
30162306a36Sopenharmony_ci	memset(ret, 0, size);
30262306a36Sopenharmony_ci
30362306a36Sopenharmony_ci	if (set_uncached) {
30462306a36Sopenharmony_ci		arch_dma_prep_coherent(page, size);
30562306a36Sopenharmony_ci		ret = arch_dma_set_uncached(ret, size);
30662306a36Sopenharmony_ci		if (IS_ERR(ret))
30762306a36Sopenharmony_ci			goto out_encrypt_pages;
30862306a36Sopenharmony_ci	}
30962306a36Sopenharmony_ci
31062306a36Sopenharmony_ci	*dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
31162306a36Sopenharmony_ci	return ret;
31262306a36Sopenharmony_ci
31362306a36Sopenharmony_ciout_encrypt_pages:
31462306a36Sopenharmony_ci	if (dma_set_encrypted(dev, page_address(page), size))
31562306a36Sopenharmony_ci		return NULL;
31662306a36Sopenharmony_ciout_free_pages:
31762306a36Sopenharmony_ci	__dma_direct_free_pages(dev, page, size);
31862306a36Sopenharmony_ci	return NULL;
31962306a36Sopenharmony_ci}
32062306a36Sopenharmony_ci
32162306a36Sopenharmony_civoid dma_direct_free(struct device *dev, size_t size,
32262306a36Sopenharmony_ci		void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs)
32362306a36Sopenharmony_ci{
32462306a36Sopenharmony_ci	unsigned int page_order = get_order(size);
32562306a36Sopenharmony_ci
32662306a36Sopenharmony_ci	if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
32762306a36Sopenharmony_ci	    !force_dma_unencrypted(dev) && !is_swiotlb_for_alloc(dev)) {
32862306a36Sopenharmony_ci		/* cpu_addr is a struct page cookie, not a kernel address */
32962306a36Sopenharmony_ci		dma_free_contiguous(dev, cpu_addr, size);
33062306a36Sopenharmony_ci		return;
33162306a36Sopenharmony_ci	}
33262306a36Sopenharmony_ci
33362306a36Sopenharmony_ci	if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
33462306a36Sopenharmony_ci	    !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
33562306a36Sopenharmony_ci	    !IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) &&
33662306a36Sopenharmony_ci	    !dev_is_dma_coherent(dev) &&
33762306a36Sopenharmony_ci	    !is_swiotlb_for_alloc(dev)) {
33862306a36Sopenharmony_ci		arch_dma_free(dev, size, cpu_addr, dma_addr, attrs);
33962306a36Sopenharmony_ci		return;
34062306a36Sopenharmony_ci	}
34162306a36Sopenharmony_ci
34262306a36Sopenharmony_ci	if (IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) &&
34362306a36Sopenharmony_ci	    !dev_is_dma_coherent(dev)) {
34462306a36Sopenharmony_ci		if (!dma_release_from_global_coherent(page_order, cpu_addr))
34562306a36Sopenharmony_ci			WARN_ON_ONCE(1);
34662306a36Sopenharmony_ci		return;
34762306a36Sopenharmony_ci	}
34862306a36Sopenharmony_ci
34962306a36Sopenharmony_ci	/* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */
35062306a36Sopenharmony_ci	if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
35162306a36Sopenharmony_ci	    dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size)))
35262306a36Sopenharmony_ci		return;
35362306a36Sopenharmony_ci
35462306a36Sopenharmony_ci	if (is_vmalloc_addr(cpu_addr)) {
35562306a36Sopenharmony_ci		vunmap(cpu_addr);
35662306a36Sopenharmony_ci	} else {
35762306a36Sopenharmony_ci		if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_CLEAR_UNCACHED))
35862306a36Sopenharmony_ci			arch_dma_clear_uncached(cpu_addr, size);
35962306a36Sopenharmony_ci		if (dma_set_encrypted(dev, cpu_addr, size))
36062306a36Sopenharmony_ci			return;
36162306a36Sopenharmony_ci	}
36262306a36Sopenharmony_ci
36362306a36Sopenharmony_ci	__dma_direct_free_pages(dev, dma_direct_to_page(dev, dma_addr), size);
36462306a36Sopenharmony_ci}
36562306a36Sopenharmony_ci
36662306a36Sopenharmony_cistruct page *dma_direct_alloc_pages(struct device *dev, size_t size,
36762306a36Sopenharmony_ci		dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
36862306a36Sopenharmony_ci{
36962306a36Sopenharmony_ci	struct page *page;
37062306a36Sopenharmony_ci	void *ret;
37162306a36Sopenharmony_ci
37262306a36Sopenharmony_ci	if (force_dma_unencrypted(dev) && dma_direct_use_pool(dev, gfp))
37362306a36Sopenharmony_ci		return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
37462306a36Sopenharmony_ci
37562306a36Sopenharmony_ci	page = __dma_direct_alloc_pages(dev, size, gfp, false);
37662306a36Sopenharmony_ci	if (!page)
37762306a36Sopenharmony_ci		return NULL;
37862306a36Sopenharmony_ci
37962306a36Sopenharmony_ci	ret = page_address(page);
38062306a36Sopenharmony_ci	if (dma_set_decrypted(dev, ret, size))
38162306a36Sopenharmony_ci		goto out_free_pages;
38262306a36Sopenharmony_ci	memset(ret, 0, size);
38362306a36Sopenharmony_ci	*dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
38462306a36Sopenharmony_ci	return page;
38562306a36Sopenharmony_ciout_free_pages:
38662306a36Sopenharmony_ci	__dma_direct_free_pages(dev, page, size);
38762306a36Sopenharmony_ci	return NULL;
38862306a36Sopenharmony_ci}
38962306a36Sopenharmony_ci
39062306a36Sopenharmony_civoid dma_direct_free_pages(struct device *dev, size_t size,
39162306a36Sopenharmony_ci		struct page *page, dma_addr_t dma_addr,
39262306a36Sopenharmony_ci		enum dma_data_direction dir)
39362306a36Sopenharmony_ci{
39462306a36Sopenharmony_ci	void *vaddr = page_address(page);
39562306a36Sopenharmony_ci
39662306a36Sopenharmony_ci	/* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */
39762306a36Sopenharmony_ci	if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
39862306a36Sopenharmony_ci	    dma_free_from_pool(dev, vaddr, size))
39962306a36Sopenharmony_ci		return;
40062306a36Sopenharmony_ci
40162306a36Sopenharmony_ci	if (dma_set_encrypted(dev, vaddr, size))
40262306a36Sopenharmony_ci		return;
40362306a36Sopenharmony_ci	__dma_direct_free_pages(dev, page, size);
40462306a36Sopenharmony_ci}
40562306a36Sopenharmony_ci
40662306a36Sopenharmony_ci#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
40762306a36Sopenharmony_ci    defined(CONFIG_SWIOTLB)
40862306a36Sopenharmony_civoid dma_direct_sync_sg_for_device(struct device *dev,
40962306a36Sopenharmony_ci		struct scatterlist *sgl, int nents, enum dma_data_direction dir)
41062306a36Sopenharmony_ci{
41162306a36Sopenharmony_ci	struct scatterlist *sg;
41262306a36Sopenharmony_ci	int i;
41362306a36Sopenharmony_ci
41462306a36Sopenharmony_ci	for_each_sg(sgl, sg, nents, i) {
41562306a36Sopenharmony_ci		phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg));
41662306a36Sopenharmony_ci
41762306a36Sopenharmony_ci		if (unlikely(is_swiotlb_buffer(dev, paddr)))
41862306a36Sopenharmony_ci			swiotlb_sync_single_for_device(dev, paddr, sg->length,
41962306a36Sopenharmony_ci						       dir);
42062306a36Sopenharmony_ci
42162306a36Sopenharmony_ci		if (!dev_is_dma_coherent(dev))
42262306a36Sopenharmony_ci			arch_sync_dma_for_device(paddr, sg->length,
42362306a36Sopenharmony_ci					dir);
42462306a36Sopenharmony_ci	}
42562306a36Sopenharmony_ci}
42662306a36Sopenharmony_ci#endif
42762306a36Sopenharmony_ci
42862306a36Sopenharmony_ci#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
42962306a36Sopenharmony_ci    defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
43062306a36Sopenharmony_ci    defined(CONFIG_SWIOTLB)
43162306a36Sopenharmony_civoid dma_direct_sync_sg_for_cpu(struct device *dev,
43262306a36Sopenharmony_ci		struct scatterlist *sgl, int nents, enum dma_data_direction dir)
43362306a36Sopenharmony_ci{
43462306a36Sopenharmony_ci	struct scatterlist *sg;
43562306a36Sopenharmony_ci	int i;
43662306a36Sopenharmony_ci
43762306a36Sopenharmony_ci	for_each_sg(sgl, sg, nents, i) {
43862306a36Sopenharmony_ci		phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg));
43962306a36Sopenharmony_ci
44062306a36Sopenharmony_ci		if (!dev_is_dma_coherent(dev))
44162306a36Sopenharmony_ci			arch_sync_dma_for_cpu(paddr, sg->length, dir);
44262306a36Sopenharmony_ci
44362306a36Sopenharmony_ci		if (unlikely(is_swiotlb_buffer(dev, paddr)))
44462306a36Sopenharmony_ci			swiotlb_sync_single_for_cpu(dev, paddr, sg->length,
44562306a36Sopenharmony_ci						    dir);
44662306a36Sopenharmony_ci
44762306a36Sopenharmony_ci		if (dir == DMA_FROM_DEVICE)
44862306a36Sopenharmony_ci			arch_dma_mark_clean(paddr, sg->length);
44962306a36Sopenharmony_ci	}
45062306a36Sopenharmony_ci
45162306a36Sopenharmony_ci	if (!dev_is_dma_coherent(dev))
45262306a36Sopenharmony_ci		arch_sync_dma_for_cpu_all();
45362306a36Sopenharmony_ci}
45462306a36Sopenharmony_ci
45562306a36Sopenharmony_ci/*
45662306a36Sopenharmony_ci * Unmaps segments, except for ones marked as pci_p2pdma which do not
45762306a36Sopenharmony_ci * require any further action as they contain a bus address.
45862306a36Sopenharmony_ci */
45962306a36Sopenharmony_civoid dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
46062306a36Sopenharmony_ci		int nents, enum dma_data_direction dir, unsigned long attrs)
46162306a36Sopenharmony_ci{
46262306a36Sopenharmony_ci	struct scatterlist *sg;
46362306a36Sopenharmony_ci	int i;
46462306a36Sopenharmony_ci
46562306a36Sopenharmony_ci	for_each_sg(sgl,  sg, nents, i) {
46662306a36Sopenharmony_ci		if (sg_dma_is_bus_address(sg))
46762306a36Sopenharmony_ci			sg_dma_unmark_bus_address(sg);
46862306a36Sopenharmony_ci		else
46962306a36Sopenharmony_ci			dma_direct_unmap_page(dev, sg->dma_address,
47062306a36Sopenharmony_ci					      sg_dma_len(sg), dir, attrs);
47162306a36Sopenharmony_ci	}
47262306a36Sopenharmony_ci}
47362306a36Sopenharmony_ci#endif
47462306a36Sopenharmony_ci
47562306a36Sopenharmony_ciint dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
47662306a36Sopenharmony_ci		enum dma_data_direction dir, unsigned long attrs)
47762306a36Sopenharmony_ci{
47862306a36Sopenharmony_ci	struct pci_p2pdma_map_state p2pdma_state = {};
47962306a36Sopenharmony_ci	enum pci_p2pdma_map_type map;
48062306a36Sopenharmony_ci	struct scatterlist *sg;
48162306a36Sopenharmony_ci	int i, ret;
48262306a36Sopenharmony_ci
48362306a36Sopenharmony_ci	for_each_sg(sgl, sg, nents, i) {
48462306a36Sopenharmony_ci		if (is_pci_p2pdma_page(sg_page(sg))) {
48562306a36Sopenharmony_ci			map = pci_p2pdma_map_segment(&p2pdma_state, dev, sg);
48662306a36Sopenharmony_ci			switch (map) {
48762306a36Sopenharmony_ci			case PCI_P2PDMA_MAP_BUS_ADDR:
48862306a36Sopenharmony_ci				continue;
48962306a36Sopenharmony_ci			case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE:
49062306a36Sopenharmony_ci				/*
49162306a36Sopenharmony_ci				 * Any P2P mapping that traverses the PCI
49262306a36Sopenharmony_ci				 * host bridge must be mapped with CPU physical
49362306a36Sopenharmony_ci				 * address and not PCI bus addresses. This is
49462306a36Sopenharmony_ci				 * done with dma_direct_map_page() below.
49562306a36Sopenharmony_ci				 */
49662306a36Sopenharmony_ci				break;
49762306a36Sopenharmony_ci			default:
49862306a36Sopenharmony_ci				ret = -EREMOTEIO;
49962306a36Sopenharmony_ci				goto out_unmap;
50062306a36Sopenharmony_ci			}
50162306a36Sopenharmony_ci		}
50262306a36Sopenharmony_ci
50362306a36Sopenharmony_ci		sg->dma_address = dma_direct_map_page(dev, sg_page(sg),
50462306a36Sopenharmony_ci				sg->offset, sg->length, dir, attrs);
50562306a36Sopenharmony_ci		if (sg->dma_address == DMA_MAPPING_ERROR) {
50662306a36Sopenharmony_ci			ret = -EIO;
50762306a36Sopenharmony_ci			goto out_unmap;
50862306a36Sopenharmony_ci		}
50962306a36Sopenharmony_ci		sg_dma_len(sg) = sg->length;
51062306a36Sopenharmony_ci	}
51162306a36Sopenharmony_ci
51262306a36Sopenharmony_ci	return nents;
51362306a36Sopenharmony_ci
51462306a36Sopenharmony_ciout_unmap:
51562306a36Sopenharmony_ci	dma_direct_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
51662306a36Sopenharmony_ci	return ret;
51762306a36Sopenharmony_ci}
51862306a36Sopenharmony_ci
51962306a36Sopenharmony_cidma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
52062306a36Sopenharmony_ci		size_t size, enum dma_data_direction dir, unsigned long attrs)
52162306a36Sopenharmony_ci{
52262306a36Sopenharmony_ci	dma_addr_t dma_addr = paddr;
52362306a36Sopenharmony_ci
52462306a36Sopenharmony_ci	if (unlikely(!dma_capable(dev, dma_addr, size, false))) {
52562306a36Sopenharmony_ci		dev_err_once(dev,
52662306a36Sopenharmony_ci			     "DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
52762306a36Sopenharmony_ci			     &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
52862306a36Sopenharmony_ci		WARN_ON_ONCE(1);
52962306a36Sopenharmony_ci		return DMA_MAPPING_ERROR;
53062306a36Sopenharmony_ci	}
53162306a36Sopenharmony_ci
53262306a36Sopenharmony_ci	return dma_addr;
53362306a36Sopenharmony_ci}
53462306a36Sopenharmony_ci
53562306a36Sopenharmony_ciint dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt,
53662306a36Sopenharmony_ci		void *cpu_addr, dma_addr_t dma_addr, size_t size,
53762306a36Sopenharmony_ci		unsigned long attrs)
53862306a36Sopenharmony_ci{
53962306a36Sopenharmony_ci	struct page *page = dma_direct_to_page(dev, dma_addr);
54062306a36Sopenharmony_ci	int ret;
54162306a36Sopenharmony_ci
54262306a36Sopenharmony_ci	ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
54362306a36Sopenharmony_ci	if (!ret)
54462306a36Sopenharmony_ci		sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
54562306a36Sopenharmony_ci	return ret;
54662306a36Sopenharmony_ci}
54762306a36Sopenharmony_ci
54862306a36Sopenharmony_cibool dma_direct_can_mmap(struct device *dev)
54962306a36Sopenharmony_ci{
55062306a36Sopenharmony_ci	return dev_is_dma_coherent(dev) ||
55162306a36Sopenharmony_ci		IS_ENABLED(CONFIG_DMA_NONCOHERENT_MMAP);
55262306a36Sopenharmony_ci}
55362306a36Sopenharmony_ci
55462306a36Sopenharmony_ciint dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
55562306a36Sopenharmony_ci		void *cpu_addr, dma_addr_t dma_addr, size_t size,
55662306a36Sopenharmony_ci		unsigned long attrs)
55762306a36Sopenharmony_ci{
55862306a36Sopenharmony_ci	unsigned long user_count = vma_pages(vma);
55962306a36Sopenharmony_ci	unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
56062306a36Sopenharmony_ci	unsigned long pfn = PHYS_PFN(dma_to_phys(dev, dma_addr));
56162306a36Sopenharmony_ci	int ret = -ENXIO;
56262306a36Sopenharmony_ci
56362306a36Sopenharmony_ci	vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
56462306a36Sopenharmony_ci	if (force_dma_unencrypted(dev))
56562306a36Sopenharmony_ci		vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
56662306a36Sopenharmony_ci
56762306a36Sopenharmony_ci	if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
56862306a36Sopenharmony_ci		return ret;
56962306a36Sopenharmony_ci	if (dma_mmap_from_global_coherent(vma, cpu_addr, size, &ret))
57062306a36Sopenharmony_ci		return ret;
57162306a36Sopenharmony_ci
57262306a36Sopenharmony_ci	if (vma->vm_pgoff >= count || user_count > count - vma->vm_pgoff)
57362306a36Sopenharmony_ci		return -ENXIO;
57462306a36Sopenharmony_ci	return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
57562306a36Sopenharmony_ci			user_count << PAGE_SHIFT, vma->vm_page_prot);
57662306a36Sopenharmony_ci}
57762306a36Sopenharmony_ci
57862306a36Sopenharmony_ciint dma_direct_supported(struct device *dev, u64 mask)
57962306a36Sopenharmony_ci{
58062306a36Sopenharmony_ci	u64 min_mask = (max_pfn - 1) << PAGE_SHIFT;
58162306a36Sopenharmony_ci
58262306a36Sopenharmony_ci	/*
58362306a36Sopenharmony_ci	 * Because 32-bit DMA masks are so common we expect every architecture
58462306a36Sopenharmony_ci	 * to be able to satisfy them - either by not supporting more physical
58562306a36Sopenharmony_ci	 * memory, or by providing a ZONE_DMA32.  If neither is the case, the
58662306a36Sopenharmony_ci	 * architecture needs to use an IOMMU instead of the direct mapping.
58762306a36Sopenharmony_ci	 */
58862306a36Sopenharmony_ci	if (mask >= DMA_BIT_MASK(32))
58962306a36Sopenharmony_ci		return 1;
59062306a36Sopenharmony_ci
59162306a36Sopenharmony_ci	/*
59262306a36Sopenharmony_ci	 * This check needs to be against the actual bit mask value, so use
59362306a36Sopenharmony_ci	 * phys_to_dma_unencrypted() here so that the SME encryption mask isn't
59462306a36Sopenharmony_ci	 * part of the check.
59562306a36Sopenharmony_ci	 */
59662306a36Sopenharmony_ci	if (IS_ENABLED(CONFIG_ZONE_DMA))
59762306a36Sopenharmony_ci		min_mask = min_t(u64, min_mask, DMA_BIT_MASK(zone_dma_bits));
59862306a36Sopenharmony_ci	return mask >= phys_to_dma_unencrypted(dev, min_mask);
59962306a36Sopenharmony_ci}
60062306a36Sopenharmony_ci
60162306a36Sopenharmony_cisize_t dma_direct_max_mapping_size(struct device *dev)
60262306a36Sopenharmony_ci{
60362306a36Sopenharmony_ci	/* If SWIOTLB is active, use its maximum mapping size */
60462306a36Sopenharmony_ci	if (is_swiotlb_active(dev) &&
60562306a36Sopenharmony_ci	    (dma_addressing_limited(dev) || is_swiotlb_force_bounce(dev)))
60662306a36Sopenharmony_ci		return swiotlb_max_mapping_size(dev);
60762306a36Sopenharmony_ci	return SIZE_MAX;
60862306a36Sopenharmony_ci}
60962306a36Sopenharmony_ci
61062306a36Sopenharmony_cibool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr)
61162306a36Sopenharmony_ci{
61262306a36Sopenharmony_ci	return !dev_is_dma_coherent(dev) ||
61362306a36Sopenharmony_ci	       is_swiotlb_buffer(dev, dma_to_phys(dev, dma_addr));
61462306a36Sopenharmony_ci}
61562306a36Sopenharmony_ci
61662306a36Sopenharmony_ci/**
61762306a36Sopenharmony_ci * dma_direct_set_offset - Assign scalar offset for a single DMA range.
61862306a36Sopenharmony_ci * @dev:	device pointer; needed to "own" the alloced memory.
61962306a36Sopenharmony_ci * @cpu_start:  beginning of memory region covered by this offset.
62062306a36Sopenharmony_ci * @dma_start:  beginning of DMA/PCI region covered by this offset.
62162306a36Sopenharmony_ci * @size:	size of the region.
62262306a36Sopenharmony_ci *
62362306a36Sopenharmony_ci * This is for the simple case of a uniform offset which cannot
62462306a36Sopenharmony_ci * be discovered by "dma-ranges".
62562306a36Sopenharmony_ci *
62662306a36Sopenharmony_ci * It returns -ENOMEM if out of memory, -EINVAL if a map
62762306a36Sopenharmony_ci * already exists, 0 otherwise.
62862306a36Sopenharmony_ci *
62962306a36Sopenharmony_ci * Note: any call to this from a driver is a bug.  The mapping needs
63062306a36Sopenharmony_ci * to be described by the device tree or other firmware interfaces.
63162306a36Sopenharmony_ci */
63262306a36Sopenharmony_ciint dma_direct_set_offset(struct device *dev, phys_addr_t cpu_start,
63362306a36Sopenharmony_ci			 dma_addr_t dma_start, u64 size)
63462306a36Sopenharmony_ci{
63562306a36Sopenharmony_ci	struct bus_dma_region *map;
63662306a36Sopenharmony_ci	u64 offset = (u64)cpu_start - (u64)dma_start;
63762306a36Sopenharmony_ci
63862306a36Sopenharmony_ci	if (dev->dma_range_map) {
63962306a36Sopenharmony_ci		dev_err(dev, "attempt to add DMA range to existing map\n");
64062306a36Sopenharmony_ci		return -EINVAL;
64162306a36Sopenharmony_ci	}
64262306a36Sopenharmony_ci
64362306a36Sopenharmony_ci	if (!offset)
64462306a36Sopenharmony_ci		return 0;
64562306a36Sopenharmony_ci
64662306a36Sopenharmony_ci	map = kcalloc(2, sizeof(*map), GFP_KERNEL);
64762306a36Sopenharmony_ci	if (!map)
64862306a36Sopenharmony_ci		return -ENOMEM;
64962306a36Sopenharmony_ci	map[0].cpu_start = cpu_start;
65062306a36Sopenharmony_ci	map[0].dma_start = dma_start;
65162306a36Sopenharmony_ci	map[0].offset = offset;
65262306a36Sopenharmony_ci	map[0].size = size;
65362306a36Sopenharmony_ci	dev->dma_range_map = map;
65462306a36Sopenharmony_ci	return 0;
65562306a36Sopenharmony_ci}
656