13d0407baSopenharmony_ci/* SPDX-License-Identifier: GPL-2.0 */
23d0407baSopenharmony_ci#ifndef _LINUX_DMA_MAPPING_H
33d0407baSopenharmony_ci#define _LINUX_DMA_MAPPING_H
43d0407baSopenharmony_ci
53d0407baSopenharmony_ci#include <linux/sizes.h>
63d0407baSopenharmony_ci#include <linux/string.h>
73d0407baSopenharmony_ci#include <linux/device.h>
83d0407baSopenharmony_ci#include <linux/err.h>
93d0407baSopenharmony_ci#include <linux/dma-direction.h>
103d0407baSopenharmony_ci#include <linux/scatterlist.h>
113d0407baSopenharmony_ci#include <linux/bug.h>
123d0407baSopenharmony_ci#include <linux/mem_encrypt.h>
133d0407baSopenharmony_ci
143d0407baSopenharmony_ci/**
153d0407baSopenharmony_ci * List of possible attributes associated with a DMA mapping. The semantics
163d0407baSopenharmony_ci * of each attribute should be defined in Documentation/core-api/dma-attributes.rst.
173d0407baSopenharmony_ci */
183d0407baSopenharmony_ci
193d0407baSopenharmony_ci/*
203d0407baSopenharmony_ci * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping
213d0407baSopenharmony_ci * may be weakly ordered, that is that reads and writes may pass each other.
223d0407baSopenharmony_ci */
233d0407baSopenharmony_ci#define DMA_ATTR_WEAK_ORDERING (1UL << 1)
243d0407baSopenharmony_ci/*
253d0407baSopenharmony_ci * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be
263d0407baSopenharmony_ci * buffered to improve performance.
273d0407baSopenharmony_ci */
283d0407baSopenharmony_ci#define DMA_ATTR_WRITE_COMBINE (1UL << 2)
293d0407baSopenharmony_ci/*
303d0407baSopenharmony_ci * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel
313d0407baSopenharmony_ci * virtual mapping for the allocated buffer.
323d0407baSopenharmony_ci */
333d0407baSopenharmony_ci#define DMA_ATTR_NO_KERNEL_MAPPING (1UL << 4)
343d0407baSopenharmony_ci/*
353d0407baSopenharmony_ci * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of
363d0407baSopenharmony_ci * the CPU cache for the given buffer assuming that it has been already
373d0407baSopenharmony_ci * transferred to 'device' domain.
383d0407baSopenharmony_ci */
393d0407baSopenharmony_ci#define DMA_ATTR_SKIP_CPU_SYNC (1UL << 5)
403d0407baSopenharmony_ci/*
413d0407baSopenharmony_ci * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer
423d0407baSopenharmony_ci * in physical memory.
433d0407baSopenharmony_ci */
443d0407baSopenharmony_ci#define DMA_ATTR_FORCE_CONTIGUOUS (1UL << 6)
453d0407baSopenharmony_ci/*
463d0407baSopenharmony_ci * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem
473d0407baSopenharmony_ci * that it's probably not worth the time to try to allocate memory to in a way
483d0407baSopenharmony_ci * that gives better TLB efficiency.
493d0407baSopenharmony_ci */
503d0407baSopenharmony_ci#define DMA_ATTR_ALLOC_SINGLE_PAGES (1UL << 7)
513d0407baSopenharmony_ci/*
523d0407baSopenharmony_ci * DMA_ATTR_NO_WARN: This tells the DMA-mapping subsystem to suppress
533d0407baSopenharmony_ci * allocation failure reports (similarly to __GFP_NOWARN).
543d0407baSopenharmony_ci */
553d0407baSopenharmony_ci#define DMA_ATTR_NO_WARN (1UL << 8)
563d0407baSopenharmony_ci
573d0407baSopenharmony_ci/*
583d0407baSopenharmony_ci * DMA_ATTR_PRIVILEGED: used to indicate that the buffer is fully
593d0407baSopenharmony_ci * accessible at an elevated privilege level (and ideally inaccessible or
603d0407baSopenharmony_ci * at least read-only at lesser-privileged levels).
613d0407baSopenharmony_ci */
623d0407baSopenharmony_ci#define DMA_ATTR_PRIVILEGED (1UL << 9)
633d0407baSopenharmony_ci
643d0407baSopenharmony_ci/*
653d0407baSopenharmony_ci * DMA_ATTR_SYS_CACHE_ONLY: used to indicate that the buffer should be mapped
663d0407baSopenharmony_ci * with the correct memory attributes so that it can be cached in the system
673d0407baSopenharmony_ci * or last level cache. This is useful for buffers that are being mapped for
683d0407baSopenharmony_ci * devices that are non-coherent, but can use the system cache.
693d0407baSopenharmony_ci */
703d0407baSopenharmony_ci#define DMA_ATTR_SYS_CACHE_ONLY (1UL << 10)
713d0407baSopenharmony_ci
723d0407baSopenharmony_ci/*
733d0407baSopenharmony_ci * DMA_ATTR_SYS_CACHE_ONLY_NWA: used to indicate that the buffer should be
743d0407baSopenharmony_ci * mapped with the correct memory attributes so that it can be cached in the
753d0407baSopenharmony_ci * system or last level cache, with a no write allocate cache policy. This is
763d0407baSopenharmony_ci * useful for buffers that are being mapped for devices that are non-coherent,
773d0407baSopenharmony_ci * but can use the system cache.
783d0407baSopenharmony_ci */
793d0407baSopenharmony_ci#define DMA_ATTR_SYS_CACHE_ONLY_NWA (1UL << 11)
803d0407baSopenharmony_ci
813d0407baSopenharmony_ci/*
823d0407baSopenharmony_ci * A dma_addr_t can hold any valid DMA or bus address for the platform.  It can
833d0407baSopenharmony_ci * be given to a device to use as a DMA source or target.  It is specific to a
843d0407baSopenharmony_ci * given device and there may be a translation between the CPU physical address
853d0407baSopenharmony_ci * space and the bus address space.
863d0407baSopenharmony_ci *
873d0407baSopenharmony_ci * DMA_MAPPING_ERROR is the magic error code if a mapping failed.  It should not
883d0407baSopenharmony_ci * be used directly in drivers, but checked for using dma_mapping_error()
893d0407baSopenharmony_ci * instead.
903d0407baSopenharmony_ci */
913d0407baSopenharmony_ci#define DMA_MAPPING_ERROR (~(dma_addr_t)0)
923d0407baSopenharmony_ci
933d0407baSopenharmony_ci#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL << (n)) - 1))
943d0407baSopenharmony_ci
953d0407baSopenharmony_ci#ifdef CONFIG_DMA_API_DEBUG
963d0407baSopenharmony_civoid debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
973d0407baSopenharmony_civoid debug_dma_map_single(struct device *dev, const void *addr, unsigned long len);
983d0407baSopenharmony_ci#else
993d0407baSopenharmony_cistatic inline void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
1003d0407baSopenharmony_ci{
1013d0407baSopenharmony_ci}
1023d0407baSopenharmony_cistatic inline void debug_dma_map_single(struct device *dev, const void *addr, unsigned long len)
1033d0407baSopenharmony_ci{
1043d0407baSopenharmony_ci}
1053d0407baSopenharmony_ci#endif /* CONFIG_DMA_API_DEBUG */
1063d0407baSopenharmony_ci
1073d0407baSopenharmony_ci#ifdef CONFIG_HAS_DMA
1083d0407baSopenharmony_cistatic inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
1093d0407baSopenharmony_ci{
1103d0407baSopenharmony_ci    debug_dma_mapping_error(dev, dma_addr);
1113d0407baSopenharmony_ci
1123d0407baSopenharmony_ci    if (dma_addr == DMA_MAPPING_ERROR) {
1133d0407baSopenharmony_ci        return -ENOMEM;
1143d0407baSopenharmony_ci    }
1153d0407baSopenharmony_ci    return 0;
1163d0407baSopenharmony_ci}
1173d0407baSopenharmony_ci
1183d0407baSopenharmony_cidma_addr_t dma_map_page_attrs(struct device *dev, struct page *page, size_t offset, size_t size,
1193d0407baSopenharmony_ci                              enum dma_data_direction dir, unsigned long attrs);
1203d0407baSopenharmony_civoid dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir,
1213d0407baSopenharmony_ci                          unsigned long attrs);
1223d0407baSopenharmony_ciint dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir,
1233d0407baSopenharmony_ci                     unsigned long attrs);
1243d0407baSopenharmony_civoid dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir,
1253d0407baSopenharmony_ci                        unsigned long attrs);
1263d0407baSopenharmony_cidma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr, size_t size, enum dma_data_direction dir,
1273d0407baSopenharmony_ci                            unsigned long attrs);
1283d0407baSopenharmony_civoid dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir,
1293d0407baSopenharmony_ci                        unsigned long attrs);
1303d0407baSopenharmony_civoid dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir);
1313d0407baSopenharmony_civoid dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir);
1323d0407baSopenharmony_civoid dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction dir);
1333d0407baSopenharmony_civoid dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction dir);
1343d0407baSopenharmony_civoid *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs);
1353d0407baSopenharmony_civoid dma_free_attrs(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs);
1363d0407baSopenharmony_civoid *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs);
1373d0407baSopenharmony_civoid dmam_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle);
1383d0407baSopenharmony_ciint dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr, size_t size,
1393d0407baSopenharmony_ci                          unsigned long attrs);
1403d0407baSopenharmony_ciint dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size,
1413d0407baSopenharmony_ci                   unsigned long attrs);
1423d0407baSopenharmony_cibool dma_can_mmap(struct device *dev);
1433d0407baSopenharmony_ciint dma_supported(struct device *dev, u64 mask);
1443d0407baSopenharmony_ciint dma_set_mask(struct device *dev, u64 mask);
1453d0407baSopenharmony_ciint dma_set_coherent_mask(struct device *dev, u64 mask);
1463d0407baSopenharmony_ciu64 dma_get_required_mask(struct device *dev);
1473d0407baSopenharmony_cisize_t dma_max_mapping_size(struct device *dev);
1483d0407baSopenharmony_cibool dma_need_sync(struct device *dev, dma_addr_t dma_addr);
1493d0407baSopenharmony_ciunsigned long dma_get_merge_boundary(struct device *dev);
1503d0407baSopenharmony_ci#else  /* CONFIG_HAS_DMA */
1513d0407baSopenharmony_cistatic inline dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page, size_t offset, size_t size,
1523d0407baSopenharmony_ci                                            enum dma_data_direction dir, unsigned long attrs)
1533d0407baSopenharmony_ci{
1543d0407baSopenharmony_ci    return DMA_MAPPING_ERROR;
1553d0407baSopenharmony_ci}
1563d0407baSopenharmony_cistatic inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir,
1573d0407baSopenharmony_ci                                        unsigned long attrs)
1583d0407baSopenharmony_ci{
1593d0407baSopenharmony_ci}
1603d0407baSopenharmony_cistatic inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir,
1613d0407baSopenharmony_ci                                   unsigned long attrs)
1623d0407baSopenharmony_ci{
1633d0407baSopenharmony_ci    return 0;
1643d0407baSopenharmony_ci}
1653d0407baSopenharmony_cistatic inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, int nents,
1663d0407baSopenharmony_ci                                      enum dma_data_direction dir, unsigned long attrs)
1673d0407baSopenharmony_ci{
1683d0407baSopenharmony_ci}
1693d0407baSopenharmony_cistatic inline dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr, size_t size,
1703d0407baSopenharmony_ci                                          enum dma_data_direction dir, unsigned long attrs)
1713d0407baSopenharmony_ci{
1723d0407baSopenharmony_ci    return DMA_MAPPING_ERROR;
1733d0407baSopenharmony_ci}
1743d0407baSopenharmony_cistatic inline void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir,
1753d0407baSopenharmony_ci                                      unsigned long attrs)
1763d0407baSopenharmony_ci{
1773d0407baSopenharmony_ci}
1783d0407baSopenharmony_cistatic inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
1793d0407baSopenharmony_ci                                           enum dma_data_direction dir)
1803d0407baSopenharmony_ci{
1813d0407baSopenharmony_ci}
1823d0407baSopenharmony_cistatic inline void dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size,
1833d0407baSopenharmony_ci                                              enum dma_data_direction dir)
1843d0407baSopenharmony_ci{
1853d0407baSopenharmony_ci}
1863d0407baSopenharmony_cistatic inline void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
1873d0407baSopenharmony_ci                                       enum dma_data_direction dir)
1883d0407baSopenharmony_ci{
1893d0407baSopenharmony_ci}
1903d0407baSopenharmony_cistatic inline void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
1913d0407baSopenharmony_ci                                          enum dma_data_direction dir)
1923d0407baSopenharmony_ci{
1933d0407baSopenharmony_ci}
1943d0407baSopenharmony_cistatic inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
1953d0407baSopenharmony_ci{
1963d0407baSopenharmony_ci    return -ENOMEM;
1973d0407baSopenharmony_ci}
1983d0407baSopenharmony_cistatic inline void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag,
1993d0407baSopenharmony_ci                                    unsigned long attrs)
2003d0407baSopenharmony_ci{
2013d0407baSopenharmony_ci    return NULL;
2023d0407baSopenharmony_ci}
2033d0407baSopenharmony_cistatic void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs)
2043d0407baSopenharmony_ci{
2053d0407baSopenharmony_ci}
2063d0407baSopenharmony_cistatic inline void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp,
2073d0407baSopenharmony_ci                                     unsigned long attrs)
2083d0407baSopenharmony_ci{
2093d0407baSopenharmony_ci    return NULL;
2103d0407baSopenharmony_ci}
2113d0407baSopenharmony_cistatic inline void dmam_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle)
2123d0407baSopenharmony_ci{
2133d0407baSopenharmony_ci}
2143d0407baSopenharmony_cistatic inline int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr,
2153d0407baSopenharmony_ci                                        size_t size, unsigned long attrs)
2163d0407baSopenharmony_ci{
2173d0407baSopenharmony_ci    return -ENXIO;
2183d0407baSopenharmony_ci}
2193d0407baSopenharmony_cistatic inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr,
2203d0407baSopenharmony_ci                                 size_t size, unsigned long attrs)
2213d0407baSopenharmony_ci{
2223d0407baSopenharmony_ci    return -ENXIO;
2233d0407baSopenharmony_ci}
2243d0407baSopenharmony_cistatic inline bool dma_can_mmap(struct device *dev)
2253d0407baSopenharmony_ci{
2263d0407baSopenharmony_ci    return false;
2273d0407baSopenharmony_ci}
2283d0407baSopenharmony_cistatic inline int dma_supported(struct device *dev, u64 mask)
2293d0407baSopenharmony_ci{
2303d0407baSopenharmony_ci    return 0;
2313d0407baSopenharmony_ci}
2323d0407baSopenharmony_cistatic inline int dma_set_mask(struct device *dev, u64 mask)
2333d0407baSopenharmony_ci{
2343d0407baSopenharmony_ci    return -EIO;
2353d0407baSopenharmony_ci}
2363d0407baSopenharmony_cistatic inline int dma_set_coherent_mask(struct device *dev, u64 mask)
2373d0407baSopenharmony_ci{
2383d0407baSopenharmony_ci    return -EIO;
2393d0407baSopenharmony_ci}
2403d0407baSopenharmony_cistatic inline u64 dma_get_required_mask(struct device *dev)
2413d0407baSopenharmony_ci{
2423d0407baSopenharmony_ci    return 0;
2433d0407baSopenharmony_ci}
2443d0407baSopenharmony_cistatic inline size_t dma_max_mapping_size(struct device *dev)
2453d0407baSopenharmony_ci{
2463d0407baSopenharmony_ci    return 0;
2473d0407baSopenharmony_ci}
2483d0407baSopenharmony_cistatic inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
2493d0407baSopenharmony_ci{
2503d0407baSopenharmony_ci    return false;
2513d0407baSopenharmony_ci}
2523d0407baSopenharmony_cistatic inline unsigned long dma_get_merge_boundary(struct device *dev)
2533d0407baSopenharmony_ci{
2543d0407baSopenharmony_ci    return 0;
2553d0407baSopenharmony_ci}
2563d0407baSopenharmony_ci#endif /* CONFIG_HAS_DMA */
2573d0407baSopenharmony_ci
2583d0407baSopenharmony_cistruct page *dma_alloc_pages(struct device *dev, size_t size, dma_addr_t *dma_handle, enum dma_data_direction dir,
2593d0407baSopenharmony_ci                             gfp_t gfp);
2603d0407baSopenharmony_civoid dma_free_pages(struct device *dev, size_t size, struct page *page, dma_addr_t dma_handle,
2613d0407baSopenharmony_ci                    enum dma_data_direction dir);
2623d0407baSopenharmony_civoid *dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *dma_handle, enum dma_data_direction dir,
2633d0407baSopenharmony_ci                            gfp_t gfp);
2643d0407baSopenharmony_civoid dma_free_noncoherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle,
2653d0407baSopenharmony_ci                          enum dma_data_direction dir);
2663d0407baSopenharmony_ci
2673d0407baSopenharmony_cistatic inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, size_t size, enum dma_data_direction dir,
2683d0407baSopenharmony_ci                                              unsigned long attrs)
2693d0407baSopenharmony_ci{
2703d0407baSopenharmony_ci    /* DMA must never operate on areas that might be remapped. */
2713d0407baSopenharmony_ci    if (dev_WARN_ONCE(dev, is_vmalloc_addr(ptr), "rejecting DMA map of vmalloc memory\n")) {
2723d0407baSopenharmony_ci        return DMA_MAPPING_ERROR;
2733d0407baSopenharmony_ci    }
2743d0407baSopenharmony_ci    debug_dma_map_single(dev, ptr, size);
2753d0407baSopenharmony_ci    return dma_map_page_attrs(dev, virt_to_page(ptr), offset_in_page(ptr), size, dir, attrs);
2763d0407baSopenharmony_ci}
2773d0407baSopenharmony_ci
2783d0407baSopenharmony_cistatic inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir,
2793d0407baSopenharmony_ci                                          unsigned long attrs)
2803d0407baSopenharmony_ci{
2813d0407baSopenharmony_ci    return dma_unmap_page_attrs(dev, addr, size, dir, attrs);
2823d0407baSopenharmony_ci}
2833d0407baSopenharmony_ci
2843d0407baSopenharmony_cistatic inline void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t addr, unsigned long offset, size_t size,
2853d0407baSopenharmony_ci                                                 enum dma_data_direction dir)
2863d0407baSopenharmony_ci{
2873d0407baSopenharmony_ci    return dma_sync_single_for_cpu(dev, addr + offset, size, dir);
2883d0407baSopenharmony_ci}
2893d0407baSopenharmony_ci
2903d0407baSopenharmony_cistatic inline void dma_sync_single_range_for_device(struct device *dev, dma_addr_t addr, unsigned long offset,
2913d0407baSopenharmony_ci                                                    size_t size, enum dma_data_direction dir)
2923d0407baSopenharmony_ci{
2933d0407baSopenharmony_ci    return dma_sync_single_for_device(dev, addr + offset, size, dir);
2943d0407baSopenharmony_ci}
2953d0407baSopenharmony_ci
2963d0407baSopenharmony_ci/**
2973d0407baSopenharmony_ci * dma_map_sgtable - Map the given buffer for DMA
2983d0407baSopenharmony_ci * @dev:    The device for which to perform the DMA operation
2993d0407baSopenharmony_ci * @sgt:    The sg_table object describing the buffer
3003d0407baSopenharmony_ci * @dir:    DMA direction
3013d0407baSopenharmony_ci * @attrs:    Optional DMA attributes for the map operation
3023d0407baSopenharmony_ci *
3033d0407baSopenharmony_ci * Maps a buffer described by a scatterlist stored in the given sg_table
3043d0407baSopenharmony_ci * object for the @dir DMA operation by the @dev device. After success the
3053d0407baSopenharmony_ci * ownership for the buffer is transferred to the DMA domain.  One has to
3063d0407baSopenharmony_ci * call dma_sync_sgtable_for_cpu() or dma_unmap_sgtable() to move the
3073d0407baSopenharmony_ci * ownership of the buffer back to the CPU domain before touching the
3083d0407baSopenharmony_ci * buffer by the CPU.
3093d0407baSopenharmony_ci *
3103d0407baSopenharmony_ci * Returns 0 on success or -EINVAL on error during mapping the buffer.
3113d0407baSopenharmony_ci */
3123d0407baSopenharmony_cistatic inline int dma_map_sgtable(struct device *dev, struct sg_table *sgt, enum dma_data_direction dir,
3133d0407baSopenharmony_ci                                  unsigned long attrs)
3143d0407baSopenharmony_ci{
3153d0407baSopenharmony_ci    int nents;
3163d0407baSopenharmony_ci
3173d0407baSopenharmony_ci    nents = dma_map_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs);
3183d0407baSopenharmony_ci    if (nents <= 0) {
3193d0407baSopenharmony_ci        return -EINVAL;
3203d0407baSopenharmony_ci    }
3213d0407baSopenharmony_ci    sgt->nents = nents;
3223d0407baSopenharmony_ci    return 0;
3233d0407baSopenharmony_ci}
3243d0407baSopenharmony_ci
3253d0407baSopenharmony_ci/**
3263d0407baSopenharmony_ci * dma_unmap_sgtable - Unmap the given buffer for DMA
3273d0407baSopenharmony_ci * @dev:    The device for which to perform the DMA operation
3283d0407baSopenharmony_ci * @sgt:    The sg_table object describing the buffer
3293d0407baSopenharmony_ci * @dir:    DMA direction
3303d0407baSopenharmony_ci * @attrs:    Optional DMA attributes for the unmap operation
3313d0407baSopenharmony_ci *
3323d0407baSopenharmony_ci * Unmaps a buffer described by a scatterlist stored in the given sg_table
3333d0407baSopenharmony_ci * object for the @dir DMA operation by the @dev device. After this function
3343d0407baSopenharmony_ci * the ownership of the buffer is transferred back to the CPU domain.
3353d0407baSopenharmony_ci */
3363d0407baSopenharmony_cistatic inline void dma_unmap_sgtable(struct device *dev, struct sg_table *sgt, enum dma_data_direction dir,
3373d0407baSopenharmony_ci                                     unsigned long attrs)
3383d0407baSopenharmony_ci{
3393d0407baSopenharmony_ci    dma_unmap_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs);
3403d0407baSopenharmony_ci}
3413d0407baSopenharmony_ci
3423d0407baSopenharmony_ci/**
3433d0407baSopenharmony_ci * dma_sync_sgtable_for_cpu - Synchronize the given buffer for CPU access
3443d0407baSopenharmony_ci * @dev:    The device for which to perform the DMA operation
3453d0407baSopenharmony_ci * @sgt:    The sg_table object describing the buffer
3463d0407baSopenharmony_ci * @dir:    DMA direction
3473d0407baSopenharmony_ci *
3483d0407baSopenharmony_ci * Performs the needed cache synchronization and moves the ownership of the
3493d0407baSopenharmony_ci * buffer back to the CPU domain, so it is safe to perform any access to it
3503d0407baSopenharmony_ci * by the CPU. Before doing any further DMA operations, one has to transfer
3513d0407baSopenharmony_ci * the ownership of the buffer back to the DMA domain by calling the
3523d0407baSopenharmony_ci * dma_sync_sgtable_for_device().
3533d0407baSopenharmony_ci */
3543d0407baSopenharmony_cistatic inline void dma_sync_sgtable_for_cpu(struct device *dev, struct sg_table *sgt, enum dma_data_direction dir)
3553d0407baSopenharmony_ci{
3563d0407baSopenharmony_ci    dma_sync_sg_for_cpu(dev, sgt->sgl, sgt->orig_nents, dir);
3573d0407baSopenharmony_ci}
3583d0407baSopenharmony_ci
3593d0407baSopenharmony_ci/**
3603d0407baSopenharmony_ci * dma_sync_sgtable_for_device - Synchronize the given buffer for DMA
3613d0407baSopenharmony_ci * @dev:    The device for which to perform the DMA operation
3623d0407baSopenharmony_ci * @sgt:    The sg_table object describing the buffer
3633d0407baSopenharmony_ci * @dir:    DMA direction
3643d0407baSopenharmony_ci *
3653d0407baSopenharmony_ci * Performs the needed cache synchronization and moves the ownership of the
3663d0407baSopenharmony_ci * buffer back to the DMA domain, so it is safe to perform the DMA operation.
3673d0407baSopenharmony_ci * Once finished, one has to call dma_sync_sgtable_for_cpu() or
3683d0407baSopenharmony_ci * dma_unmap_sgtable().
3693d0407baSopenharmony_ci */
3703d0407baSopenharmony_cistatic inline void dma_sync_sgtable_for_device(struct device *dev, struct sg_table *sgt, enum dma_data_direction dir)
3713d0407baSopenharmony_ci{
3723d0407baSopenharmony_ci    dma_sync_sg_for_device(dev, sgt->sgl, sgt->orig_nents, dir);
3733d0407baSopenharmony_ci}
3743d0407baSopenharmony_ci
3753d0407baSopenharmony_ci#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
3763d0407baSopenharmony_ci#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
3773d0407baSopenharmony_ci#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
3783d0407baSopenharmony_ci#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0)
3793d0407baSopenharmony_ci#define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
3803d0407baSopenharmony_ci#define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
3813d0407baSopenharmony_ci#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
3823d0407baSopenharmony_ci#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
3833d0407baSopenharmony_ci
3843d0407baSopenharmony_cistatic inline void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp)
3853d0407baSopenharmony_ci{
3863d0407baSopenharmony_ci    return dma_alloc_attrs(dev, size, dma_handle, gfp, (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0);
3873d0407baSopenharmony_ci}
3883d0407baSopenharmony_ci
3893d0407baSopenharmony_cistatic inline void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_handle)
3903d0407baSopenharmony_ci{
3913d0407baSopenharmony_ci    return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0);
3923d0407baSopenharmony_ci}
3933d0407baSopenharmony_ci
3943d0407baSopenharmony_cistatic inline u64 dma_get_mask(struct device *dev)
3953d0407baSopenharmony_ci{
3963d0407baSopenharmony_ci    if (dev->dma_mask && *dev->dma_mask) {
3973d0407baSopenharmony_ci        return *dev->dma_mask;
3983d0407baSopenharmony_ci    }
3993d0407baSopenharmony_ci    return DMA_BIT_MASK(32);
4003d0407baSopenharmony_ci}
4013d0407baSopenharmony_ci
4023d0407baSopenharmony_ci/*
4033d0407baSopenharmony_ci * Set both the DMA mask and the coherent DMA mask to the same thing.
4043d0407baSopenharmony_ci * Note that we don't check the return value from dma_set_coherent_mask()
4053d0407baSopenharmony_ci * as the DMA API guarantees that the coherent DMA mask can be set to
4063d0407baSopenharmony_ci * the same or smaller than the streaming DMA mask.
4073d0407baSopenharmony_ci */
4083d0407baSopenharmony_cistatic inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
4093d0407baSopenharmony_ci{
4103d0407baSopenharmony_ci    int rc = dma_set_mask(dev, mask);
4113d0407baSopenharmony_ci    if (rc == 0) {
4123d0407baSopenharmony_ci        dma_set_coherent_mask(dev, mask);
4133d0407baSopenharmony_ci    }
4143d0407baSopenharmony_ci    return rc;
4153d0407baSopenharmony_ci}
4163d0407baSopenharmony_ci
4173d0407baSopenharmony_ci/*
4183d0407baSopenharmony_ci * Similar to the above, except it deals with the case where the device
4193d0407baSopenharmony_ci * does not have dev->dma_mask appropriately setup.
4203d0407baSopenharmony_ci */
4213d0407baSopenharmony_cistatic inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
4223d0407baSopenharmony_ci{
4233d0407baSopenharmony_ci    dev->dma_mask = &dev->coherent_dma_mask;
4243d0407baSopenharmony_ci    return dma_set_mask_and_coherent(dev, mask);
4253d0407baSopenharmony_ci}
4263d0407baSopenharmony_ci
4273d0407baSopenharmony_ci/**
4283d0407baSopenharmony_ci * dma_addressing_limited - return if the device is addressing limited
4293d0407baSopenharmony_ci * @dev:    device to check
4303d0407baSopenharmony_ci *
4313d0407baSopenharmony_ci * Return %true if the devices DMA mask is too small to address all memory in
4323d0407baSopenharmony_ci * the system, else %false.  Lack of addressing bits is the prime reason for
4333d0407baSopenharmony_ci * bounce buffering, but might not be the only one.
4343d0407baSopenharmony_ci */
4353d0407baSopenharmony_cistatic inline bool dma_addressing_limited(struct device *dev)
4363d0407baSopenharmony_ci{
4373d0407baSopenharmony_ci    return min_not_zero(dma_get_mask(dev), dev->bus_dma_limit) < dma_get_required_mask(dev);
4383d0407baSopenharmony_ci}
4393d0407baSopenharmony_ci
4403d0407baSopenharmony_cistatic inline unsigned int dma_get_max_seg_size(struct device *dev)
4413d0407baSopenharmony_ci{
4423d0407baSopenharmony_ci    if (dev->dma_parms && dev->dma_parms->max_segment_size) {
4433d0407baSopenharmony_ci        return dev->dma_parms->max_segment_size;
4443d0407baSopenharmony_ci    }
4453d0407baSopenharmony_ci    return SZ_64K;
4463d0407baSopenharmony_ci}
4473d0407baSopenharmony_ci
4483d0407baSopenharmony_cistatic inline int dma_set_max_seg_size(struct device *dev, unsigned int size)
4493d0407baSopenharmony_ci{
4503d0407baSopenharmony_ci    if (dev->dma_parms) {
4513d0407baSopenharmony_ci        dev->dma_parms->max_segment_size = size;
4523d0407baSopenharmony_ci        return 0;
4533d0407baSopenharmony_ci    }
4543d0407baSopenharmony_ci    return -EIO;
4553d0407baSopenharmony_ci}
4563d0407baSopenharmony_ci
4573d0407baSopenharmony_cistatic inline unsigned long dma_get_seg_boundary(struct device *dev)
4583d0407baSopenharmony_ci{
4593d0407baSopenharmony_ci    if (dev->dma_parms && dev->dma_parms->segment_boundary_mask) {
4603d0407baSopenharmony_ci        return dev->dma_parms->segment_boundary_mask;
4613d0407baSopenharmony_ci    }
4623d0407baSopenharmony_ci    return ULONG_MAX;
4633d0407baSopenharmony_ci}
4643d0407baSopenharmony_ci
4653d0407baSopenharmony_ci/**
4663d0407baSopenharmony_ci * dma_get_seg_boundary_nr_pages - return the segment boundary in "page" units
4673d0407baSopenharmony_ci * @dev: device to guery the boundary for
4683d0407baSopenharmony_ci * @page_shift: ilog() of the IOMMU page size
4693d0407baSopenharmony_ci *
4703d0407baSopenharmony_ci * Return the segment boundary in IOMMU page units (which may be different from
4713d0407baSopenharmony_ci * the CPU page size) for the passed in device.
4723d0407baSopenharmony_ci *
4733d0407baSopenharmony_ci * If @dev is NULL a boundary of U32_MAX is assumed, this case is just for
4743d0407baSopenharmony_ci * non-DMA API callers.
4753d0407baSopenharmony_ci */
4763d0407baSopenharmony_cistatic inline unsigned long dma_get_seg_boundary_nr_pages(struct device *dev, unsigned int page_shift)
4773d0407baSopenharmony_ci{
4783d0407baSopenharmony_ci    if (!dev) {
4793d0407baSopenharmony_ci        return (U32_MAX >> page_shift) + 1;
4803d0407baSopenharmony_ci    }
4813d0407baSopenharmony_ci    return (dma_get_seg_boundary(dev) >> page_shift) + 1;
4823d0407baSopenharmony_ci}
4833d0407baSopenharmony_ci
4843d0407baSopenharmony_cistatic inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
4853d0407baSopenharmony_ci{
4863d0407baSopenharmony_ci    if (dev->dma_parms) {
4873d0407baSopenharmony_ci        dev->dma_parms->segment_boundary_mask = mask;
4883d0407baSopenharmony_ci        return 0;
4893d0407baSopenharmony_ci    }
4903d0407baSopenharmony_ci    return -EIO;
4913d0407baSopenharmony_ci}
4923d0407baSopenharmony_ci
4933d0407baSopenharmony_cistatic inline unsigned int dma_get_min_align_mask(struct device *dev)
4943d0407baSopenharmony_ci{
4953d0407baSopenharmony_ci    if (dev->dma_parms) {
4963d0407baSopenharmony_ci        return dev->dma_parms->min_align_mask;
4973d0407baSopenharmony_ci    }
4983d0407baSopenharmony_ci    return 0;
4993d0407baSopenharmony_ci}
5003d0407baSopenharmony_ci
5013d0407baSopenharmony_cistatic inline int dma_set_min_align_mask(struct device *dev, unsigned int min_align_mask)
5023d0407baSopenharmony_ci{
5033d0407baSopenharmony_ci    if (WARN_ON_ONCE(!dev->dma_parms)) {
5043d0407baSopenharmony_ci        return -EIO;
5053d0407baSopenharmony_ci    }
5063d0407baSopenharmony_ci    dev->dma_parms->min_align_mask = min_align_mask;
5073d0407baSopenharmony_ci    return 0;
5083d0407baSopenharmony_ci}
5093d0407baSopenharmony_ci
5103d0407baSopenharmony_cistatic inline int dma_get_cache_alignment(void)
5113d0407baSopenharmony_ci{
5123d0407baSopenharmony_ci#ifdef ARCH_DMA_MINALIGN
5133d0407baSopenharmony_ci    return ARCH_DMA_MINALIGN;
5143d0407baSopenharmony_ci#endif
5153d0407baSopenharmony_ci    return 1;
5163d0407baSopenharmony_ci}
5173d0407baSopenharmony_ci
5183d0407baSopenharmony_cistatic inline void *dmam_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp)
5193d0407baSopenharmony_ci{
5203d0407baSopenharmony_ci    return dmam_alloc_attrs(dev, size, dma_handle, gfp, (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0);
5213d0407baSopenharmony_ci}
5223d0407baSopenharmony_ci
5233d0407baSopenharmony_cistatic inline void *dma_alloc_wc(struct device *dev, size_t size, dma_addr_t *dma_addr, gfp_t gfp)
5243d0407baSopenharmony_ci{
5253d0407baSopenharmony_ci    unsigned long attrs = DMA_ATTR_WRITE_COMBINE;
5263d0407baSopenharmony_ci
5273d0407baSopenharmony_ci    if (gfp & __GFP_NOWARN) {
5283d0407baSopenharmony_ci        attrs |= DMA_ATTR_NO_WARN;
5293d0407baSopenharmony_ci    }
5303d0407baSopenharmony_ci
5313d0407baSopenharmony_ci    return dma_alloc_attrs(dev, size, dma_addr, gfp, attrs);
5323d0407baSopenharmony_ci}
5333d0407baSopenharmony_ci
5343d0407baSopenharmony_cistatic inline void dma_free_wc(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_addr)
5353d0407baSopenharmony_ci{
5363d0407baSopenharmony_ci    return dma_free_attrs(dev, size, cpu_addr, dma_addr, DMA_ATTR_WRITE_COMBINE);
5373d0407baSopenharmony_ci}
5383d0407baSopenharmony_ci
5393d0407baSopenharmony_cistatic inline int dma_mmap_wc(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr,
5403d0407baSopenharmony_ci                              size_t size)
5413d0407baSopenharmony_ci{
5423d0407baSopenharmony_ci    return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, DMA_ATTR_WRITE_COMBINE);
5433d0407baSopenharmony_ci}
5443d0407baSopenharmony_ci
5453d0407baSopenharmony_ci#ifdef CONFIG_NEED_DMA_MAP_STATE
5463d0407baSopenharmony_ci#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
5473d0407baSopenharmony_ci#define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME
5483d0407baSopenharmony_ci#define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
5493d0407baSopenharmony_ci#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL))
5503d0407baSopenharmony_ci#define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME)
5513d0407baSopenharmony_ci#define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL))
5523d0407baSopenharmony_ci#else
5533d0407baSopenharmony_ci#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)
5543d0407baSopenharmony_ci#define DEFINE_DMA_UNMAP_LEN(LEN_NAME)
5553d0407baSopenharmony_ci#define dma_unmap_addr(PTR, ADDR_NAME) (0)
5563d0407baSopenharmony_ci#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL)                                                                        \
5573d0407baSopenharmony_ci    do {                                                                                                               \
5583d0407baSopenharmony_ci    } while (0)
5593d0407baSopenharmony_ci#define dma_unmap_len(PTR, LEN_NAME) (0)
5603d0407baSopenharmony_ci#define dma_unmap_len_set(PTR, LEN_NAME, VAL)                                                                          \
5613d0407baSopenharmony_ci    do {                                                                                                               \
5623d0407baSopenharmony_ci    } while (0)
5633d0407baSopenharmony_ci#endif
5643d0407baSopenharmony_ci
5653d0407baSopenharmony_ci/*
5663d0407baSopenharmony_ci * Legacy interface to set up the dma offset map.  Drivers really should not
5673d0407baSopenharmony_ci * actually use it, but we have a few legacy cases left.
5683d0407baSopenharmony_ci */
5693d0407baSopenharmony_ciint dma_direct_set_offset(struct device *dev, phys_addr_t cpu_start, dma_addr_t dma_start, u64 size);
5703d0407baSopenharmony_ci
5713d0407baSopenharmony_ciextern const struct dma_map_ops dma_virt_ops;
5723d0407baSopenharmony_ci
5733d0407baSopenharmony_ci#endif /* _LINUX_DMA_MAPPING_H */
574