1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * This header is for implementations of dma_map_ops and related code.
4 * It should not be included in drivers just using the DMA API.
5 */
6#ifndef _LINUX_DMA_MAP_OPS_H
7#define _LINUX_DMA_MAP_OPS_H
8
9#include <linux/dma-mapping.h>
10#include <linux/pgtable.h>
11#include <linux/android_kabi.h>
12
13struct cma;
14
15struct dma_map_ops {
16    void *(*alloc)(struct device *dev, size_t size, dma_addr_t *dma_handle,
17                   gfp_t gfp, unsigned long attrs);
18    void (*free)(struct device *dev, size_t size, void *vaddr,
19                 dma_addr_t dma_handle, unsigned long attrs);
20    struct page *(*alloc_pages)(struct device *dev, size_t size,
21                                dma_addr_t *dma_handle,
22                                enum dma_data_direction dir, gfp_t gfp);
23    void (*free_pages)(struct device *dev, size_t size, struct page *vaddr,
24                       dma_addr_t dma_handle, enum dma_data_direction dir);
25    void *(*alloc_noncoherent)(struct device *dev, size_t size,
26                               dma_addr_t *dma_handle,
27                               enum dma_data_direction dir, gfp_t gfp);
28    void (*free_noncoherent)(struct device *dev, size_t size, void *vaddr,
29                             dma_addr_t dma_handle,
30                             enum dma_data_direction dir);
31    int (*mmap)(struct device *, struct vm_area_struct *, void *, dma_addr_t,
32                size_t, unsigned long attrs);
33
34    int (*get_sgtable)(struct device *dev, struct sg_table *sgt, void *cpu_addr,
35                       dma_addr_t dma_addr, size_t size, unsigned long attrs);
36
37    dma_addr_t (*map_page)(struct device *dev, struct page *page,
38                           unsigned long offset, size_t size,
39                           enum dma_data_direction dir, unsigned long attrs);
40    void (*unmap_page)(struct device *dev, dma_addr_t dma_handle, size_t size,
41                       enum dma_data_direction dir, unsigned long attrs);
42    /*
43     * map_sg returns 0 on error and a value > 0 on success.
44     * It should never return a value < 0.
45     */
46    int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents,
47                  enum dma_data_direction dir, unsigned long attrs);
48    void (*unmap_sg)(struct device *dev, struct scatterlist *sg, int nents,
49                     enum dma_data_direction dir, unsigned long attrs);
50    dma_addr_t (*map_resource)(struct device *dev, phys_addr_t phys_addr,
51                               size_t size, enum dma_data_direction dir,
52                               unsigned long attrs);
53    void (*unmap_resource)(struct device *dev, dma_addr_t dma_handle,
54                           size_t size, enum dma_data_direction dir,
55                           unsigned long attrs);
56    void (*sync_single_for_cpu)(struct device *dev, dma_addr_t dma_handle,
57                                size_t size, enum dma_data_direction dir);
58    void (*sync_single_for_device)(struct device *dev, dma_addr_t dma_handle,
59                                   size_t size, enum dma_data_direction dir);
60    void (*sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg,
61                            int nents, enum dma_data_direction dir);
62    void (*sync_sg_for_device)(struct device *dev, struct scatterlist *sg,
63                               int nents, enum dma_data_direction dir);
64    void (*cache_sync)(struct device *dev, void *vaddr, size_t size,
65                       enum dma_data_direction direction);
66    int (*dma_supported)(struct device *dev, u64 mask);
67    u64 (*get_required_mask)(struct device *dev);
68    size_t (*max_mapping_size)(struct device *dev);
69    unsigned long (*get_merge_boundary)(struct device *dev);
70
71    ANDROID_KABI_RESERVE(1);
72    ANDROID_KABI_RESERVE(2);
73    ANDROID_KABI_RESERVE(3);
74    ANDROID_KABI_RESERVE(4);
75};
76
77#ifdef CONFIG_DMA_OPS
78#include <asm/dma-mapping.h>
79
80static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
81{
82    if (dev->dma_ops) {
83        return dev->dma_ops;
84    }
85    return get_arch_dma_ops(dev->bus);
86}
87
88static inline void set_dma_ops(struct device *dev,
89                               const struct dma_map_ops *dma_ops)
90{
91    dev->dma_ops = dma_ops;
92}
93#else  /* CONFIG_DMA_OPS */
94static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
95{
96    return NULL;
97}
98static inline void set_dma_ops(struct device *dev,
99                               const struct dma_map_ops *dma_ops)
100{
101}
102#endif /* CONFIG_DMA_OPS */
103
104#ifdef CONFIG_DMA_CMA
105extern struct cma *dma_contiguous_default_area;
106
107static inline struct cma *dev_get_cma_area(struct device *dev)
108{
109    if (dev && dev->cma_area) {
110        return dev->cma_area;
111    }
112    return dma_contiguous_default_area;
113}
114
115void dma_contiguous_reserve(phys_addr_t addr_limit);
116int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
117                                       phys_addr_t limit, struct cma **res_cma,
118                                       bool fixed);
119
120struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
121                                       unsigned int order, bool no_warn);
122bool dma_release_from_contiguous(struct device *dev, struct page *pages,
123                                 int count);
124struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp);
125void dma_free_contiguous(struct device *dev, struct page *page, size_t size);
126
127void dma_contiguous_early_fixup(phys_addr_t base, unsigned long size);
128#else  /* CONFIG_DMA_CMA */
129static inline struct cma *dev_get_cma_area(struct device *dev)
130{
131    return NULL;
132}
133static inline void dma_contiguous_reserve(phys_addr_t limit)
134{
135}
136static inline int dma_contiguous_reserve_area(phys_addr_t size,
137                                              phys_addr_t base,
138                                              phys_addr_t limit,
139                                              struct cma **res_cma, bool fixed)
140{
141    return -ENOSYS;
142}
143static inline struct page *dma_alloc_from_contiguous(struct device *dev,
144                                                     size_t count,
145                                                     unsigned int order,
146                                                     bool no_warn)
147{
148    return NULL;
149}
150static inline bool dma_release_from_contiguous(struct device *dev,
151                                               struct page *pages, int count)
152{
153    return false;
154}
155/* Use fallback alloc() and free() when CONFIG_DMA_CMA=n */
156static inline struct page *dma_alloc_contiguous(struct device *dev, size_t size,
157                                                gfp_t gfp)
158{
159    return NULL;
160}
161static inline void dma_free_contiguous(struct device *dev, struct page *page,
162                                       size_t size)
163{
164    __free_pages(page, get_order(size));
165}
166#endif /* CONFIG_DMA_CMA */
167
168#ifdef CONFIG_DMA_PERNUMA_CMA
169void dma_pernuma_cma_reserve(void);
170#else
171static inline void dma_pernuma_cma_reserve(void)
172{
173}
174#endif /* CONFIG_DMA_PERNUMA_CMA */
175
176#ifdef CONFIG_DMA_DECLARE_COHERENT
177int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
178                                dma_addr_t device_addr, size_t size);
179int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
180                                dma_addr_t *dma_handle, void **ret);
181int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr);
182int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
183                               void *cpu_addr, size_t size, int *ret);
184
185void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size,
186                                     dma_addr_t *dma_handle);
187int dma_release_from_global_coherent(int order, void *vaddr);
188int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr,
189                                  size_t size, int *ret);
190
191#else
192static inline int dma_declare_coherent_memory(struct device *dev,
193                                              phys_addr_t phys_addr,
194                                              dma_addr_t device_addr,
195                                              size_t size)
196{
197    return -ENOSYS;
198}
199#define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0)
200#define dma_release_from_dev_coherent(dev, order, vaddr) (0)
201#define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0)
202
203static inline void *dma_alloc_from_global_coherent(struct device *dev,
204                                                   ssize_t size,
205                                                   dma_addr_t *dma_handle)
206{
207    return NULL;
208}
209static inline int dma_release_from_global_coherent(int order, void *vaddr)
210{
211    return 0;
212}
213static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma,
214                                                void *cpu_addr, size_t size,
215                                                int *ret)
216{
217    return 0;
218}
219#endif /* CONFIG_DMA_DECLARE_COHERENT */
220
221int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
222                           void *cpu_addr, dma_addr_t dma_addr, size_t size,
223                           unsigned long attrs);
224int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
225                    void *cpu_addr, dma_addr_t dma_addr, size_t size,
226                    unsigned long attrs);
227struct page *dma_common_alloc_pages(struct device *dev, size_t size,
228                                    dma_addr_t *dma_handle,
229                                    enum dma_data_direction dir, gfp_t gfp);
230void dma_common_free_pages(struct device *dev, size_t size, struct page *vaddr,
231                           dma_addr_t dma_handle, enum dma_data_direction dir);
232
233struct page **dma_common_find_pages(void *cpu_addr);
234void *dma_common_contiguous_remap(struct page *page, size_t size, pgprot_t prot,
235                                  const void *caller);
236void *dma_common_pages_remap(struct page **pages, size_t size, pgprot_t prot,
237                             const void *caller);
238void dma_common_free_remap(void *cpu_addr, size_t size);
239
240struct page *dma_alloc_from_pool(struct device *dev, size_t size,
241                                 void **cpu_addr, gfp_t flags,
242                                 bool (*phys_addr_ok)(struct device *,
243                                                      phys_addr_t, size_t));
244bool dma_free_from_pool(struct device *dev, void *start, size_t size);
245
246#ifdef CONFIG_ARCH_HAS_DMA_COHERENCE_H
247#include <asm/dma-coherence.h>
248#elif defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) ||                          \
249    defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) ||                               \
250    defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
251static inline bool dev_is_dma_coherent(struct device *dev)
252{
253    return dev->dma_coherent;
254}
255#else
256static inline bool dev_is_dma_coherent(struct device *dev)
257{
258    return true;
259}
260#endif /* CONFIG_ARCH_HAS_DMA_COHERENCE_H */
261
262void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
263                     gfp_t gfp, unsigned long attrs);
264void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
265                   dma_addr_t dma_addr, unsigned long attrs);
266
267#ifdef CONFIG_MMU
268/*
269 * Page protection so that devices that can't snoop CPU caches can use the
270 * memory coherently.  We default to pgprot_noncached which is usually used
271 * for ioremap as a safe bet, but architectures can override this with less
272 * strict semantics if possible.
273 */
274#ifndef pgprot_dmacoherent
275#define pgprot_dmacoherent(prot) pgprot_noncached(prot)
276#endif
277
278/*
279 * If there is no system cache pgprot, then fallback to dmacoherent
280 * pgprot, as the expectation is that the device is not coherent.
281 */
282#ifndef pgprot_syscached
283#define pgprot_syscached(prot) pgprot_dmacoherent(prot)
284#endif
285
286pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs);
287#else
288static inline pgprot_t dma_pgprot(struct device *dev, pgprot_t prot,
289                                  unsigned long attrs)
290{
291    return prot; /* no protection bits supported without page tables */
292}
293#endif /* CONFIG_MMU */
294
295#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE
296void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
297                              enum dma_data_direction dir);
298#else
299static inline void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
300                                            enum dma_data_direction dir)
301{
302}
303#endif /* ARCH_HAS_SYNC_DMA_FOR_DEVICE */
304
305#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
306void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
307                           enum dma_data_direction dir);
308#else
309static inline void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
310                                         enum dma_data_direction dir)
311{
312}
313#endif /* ARCH_HAS_SYNC_DMA_FOR_CPU */
314
315#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL
316void arch_sync_dma_for_cpu_all(void);
317#else
318static inline void arch_sync_dma_for_cpu_all(void)
319{
320}
321#endif /* CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL */
322
323#ifdef CONFIG_ARCH_HAS_DMA_PREP_COHERENT
324void arch_dma_prep_coherent(struct page *page, size_t size);
325#else
326static inline void arch_dma_prep_coherent(struct page *page, size_t size)
327{
328}
329#endif /* CONFIG_ARCH_HAS_DMA_PREP_COHERENT */
330
331#ifdef CONFIG_ARCH_HAS_DMA_MARK_CLEAN
332void arch_dma_mark_clean(phys_addr_t paddr, size_t size);
333#else
334static inline void arch_dma_mark_clean(phys_addr_t paddr, size_t size)
335{
336}
337#endif /* ARCH_HAS_DMA_MARK_CLEAN */
338
339void *arch_dma_set_uncached(void *addr, size_t size);
340void arch_dma_clear_uncached(void *addr, size_t size);
341
342#ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
343void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
344                        const struct iommu_ops *iommu, bool coherent);
345#else
346static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
347                                      u64 size, const struct iommu_ops *iommu,
348                                      bool coherent)
349{
350}
351#endif /* CONFIG_ARCH_HAS_SETUP_DMA_OPS */
352
353#ifdef CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS
354void arch_teardown_dma_ops(struct device *dev);
355#else
356static inline void arch_teardown_dma_ops(struct device *dev)
357{
358}
359#endif /* CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS */
360
361#ifdef CONFIG_DMA_API_DEBUG
362void dma_debug_add_bus(struct bus_type *bus);
363void debug_dma_dump_mappings(struct device *dev);
364#else
365static inline void dma_debug_add_bus(struct bus_type *bus)
366{
367}
368static inline void debug_dma_dump_mappings(struct device *dev)
369{
370}
371#endif /* CONFIG_DMA_API_DEBUG */
372
373extern const struct dma_map_ops dma_dummy_ops;
374
375#endif /* _LINUX_DMA_MAP_OPS_H */
376