1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * DMABUF CMA heap exporter
4  *
5  * Copyright (C) 2012, 2019, 2020 Linaro Ltd.
6  * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
7  *
8  * Also utilizing parts of Andrew Davis' SRAM heap:
9  * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
10  *    Andrew F. Davis <afd@ti.com>
11  */
12 #include <linux/cma.h>
13 #include <linux/dma-buf.h>
14 #include <linux/dma-heap.h>
15 #include <linux/dma-map-ops.h>
16 #include <linux/err.h>
17 #include <linux/highmem.h>
18 #include <linux/io.h>
19 #include <linux/mm.h>
20 #include <linux/module.h>
21 #include <linux/scatterlist.h>
22 #include <linux/slab.h>
23 #include <linux/vmalloc.h>
24 
25 struct cma_heap {
26     struct dma_heap *heap;
27     struct cma *cma;
28 };
29 
30 struct cma_heap_buffer {
31     struct cma_heap *heap;
32     struct list_head attachments;
33     struct mutex lock;
34     unsigned long len;
35     struct page *cma_pages;
36     struct page **pages;
37     pgoff_t pagecount;
38     int vmap_cnt;
39     void *vaddr;
40 };
41 
42 struct dma_heap_attachment {
43     struct device *dev;
44     struct sg_table table;
45     struct list_head list;
46     bool mapped;
47 };
48 
cma_heap_attach(struct dma_buf *dmabuf, struct dma_buf_attachment *attachment)49 static int cma_heap_attach(struct dma_buf *dmabuf, struct dma_buf_attachment *attachment)
50 {
51     struct cma_heap_buffer *buffer = dmabuf->priv;
52     struct dma_heap_attachment *a;
53     int ret;
54 
55     a = kzalloc(sizeof(*a), GFP_KERNEL);
56     if (!a) {
57         return -ENOMEM;
58     }
59 
60     ret = sg_alloc_table_from_pages(&a->table, buffer->pages, buffer->pagecount, 0, buffer->pagecount << PAGE_SHIFT,
61                                     GFP_KERNEL);
62     if (ret) {
63         kfree(a);
64         return ret;
65     }
66 
67     a->dev = attachment->dev;
68     INIT_LIST_HEAD(&a->list);
69     a->mapped = false;
70 
71     attachment->priv = a;
72 
73     mutex_lock(&buffer->lock);
74     list_add(&a->list, &buffer->attachments);
75     mutex_unlock(&buffer->lock);
76 
77     return 0;
78 }
79 
cma_heap_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attachment)80 static void cma_heap_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attachment)
81 {
82     struct cma_heap_buffer *buffer = dmabuf->priv;
83     struct dma_heap_attachment *a = attachment->priv;
84 
85     mutex_lock(&buffer->lock);
86     list_del(&a->list);
87     mutex_unlock(&buffer->lock);
88 
89     sg_free_table(&a->table);
90     kfree(a);
91 }
92 
cma_heap_map_dma_buf(struct dma_buf_attachment *attachment, enum dma_data_direction direction)93 static struct sg_table *cma_heap_map_dma_buf(struct dma_buf_attachment *attachment, enum dma_data_direction direction)
94 {
95     struct dma_heap_attachment *a = attachment->priv;
96     struct sg_table *table = &a->table;
97     int ret;
98 
99     ret = dma_map_sgtable(attachment->dev, table, direction, 0);
100     if (ret) {
101         return ERR_PTR(-ENOMEM);
102     }
103     a->mapped = true;
104     return table;
105 }
106 
cma_heap_unmap_dma_buf(struct dma_buf_attachment *attachment, struct sg_table *table, enum dma_data_direction direction)107 static void cma_heap_unmap_dma_buf(struct dma_buf_attachment *attachment, struct sg_table *table,
108                                    enum dma_data_direction direction)
109 {
110     struct dma_heap_attachment *a = attachment->priv;
111 
112     a->mapped = false;
113     dma_unmap_sgtable(attachment->dev, table, direction, 0);
114 }
115 
cma_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, enum dma_data_direction direction)116 static int cma_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, enum dma_data_direction direction)
117 {
118     struct cma_heap_buffer *buffer = dmabuf->priv;
119     struct dma_heap_attachment *a;
120 
121     if (buffer->vmap_cnt) {
122         invalidate_kernel_vmap_range(buffer->vaddr, buffer->len);
123     }
124 
125     mutex_lock(&buffer->lock);
126     list_for_each_entry(a, &buffer->attachments, list)
127     {
128         if (!a->mapped) {
129             continue;
130         }
131         dma_sync_sgtable_for_cpu(a->dev, &a->table, direction);
132     }
133     mutex_unlock(&buffer->lock);
134 
135     return 0;
136 }
137 
cma_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf, enum dma_data_direction direction)138 static int cma_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf, enum dma_data_direction direction)
139 {
140     struct cma_heap_buffer *buffer = dmabuf->priv;
141     struct dma_heap_attachment *a;
142 
143     if (buffer->vmap_cnt) {
144         flush_kernel_vmap_range(buffer->vaddr, buffer->len);
145     }
146 
147     mutex_lock(&buffer->lock);
148     list_for_each_entry(a, &buffer->attachments, list)
149     {
150         if (!a->mapped) {
151             continue;
152         }
153         dma_sync_sgtable_for_device(a->dev, &a->table, direction);
154     }
155     mutex_unlock(&buffer->lock);
156 
157     return 0;
158 }
159 
cma_heap_vm_fault(struct vm_fault *vmf)160 static vm_fault_t cma_heap_vm_fault(struct vm_fault *vmf)
161 {
162     struct vm_area_struct *vma = vmf->vma;
163     struct cma_heap_buffer *buffer = vma->vm_private_data;
164 
165     if (vmf->pgoff > buffer->pagecount) {
166         return VM_FAULT_SIGBUS;
167     }
168 
169     vmf->page = buffer->pages[vmf->pgoff];
170     get_page(vmf->page);
171 
172     return 0;
173 }
174 
175 static const struct vm_operations_struct dma_heap_vm_ops = {
176     .fault = cma_heap_vm_fault,
177 };
178 
cma_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)179 static int cma_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
180 {
181     struct cma_heap_buffer *buffer = dmabuf->priv;
182 
183     if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0) {
184         return -EINVAL;
185     }
186 
187     vma->vm_ops = &dma_heap_vm_ops;
188     vma->vm_private_data = buffer;
189 
190     return 0;
191 }
192 
cma_heap_do_vmap(struct cma_heap_buffer *buffer)193 static void *cma_heap_do_vmap(struct cma_heap_buffer *buffer)
194 {
195     void *vaddr;
196 
197     vaddr = vmap(buffer->pages, buffer->pagecount, VM_MAP, PAGE_KERNEL);
198     if (!vaddr) {
199         return ERR_PTR(-ENOMEM);
200     }
201 
202     return vaddr;
203 }
204 
cma_heap_vmap(struct dma_buf *dmabuf)205 static void *cma_heap_vmap(struct dma_buf *dmabuf)
206 {
207     struct cma_heap_buffer *buffer = dmabuf->priv;
208     void *vaddr;
209 
210     mutex_lock(&buffer->lock);
211     if (buffer->vmap_cnt) {
212         buffer->vmap_cnt++;
213         vaddr = buffer->vaddr;
214         goto out;
215     }
216 
217     vaddr = cma_heap_do_vmap(buffer);
218     if (IS_ERR(vaddr)) {
219         goto out;
220     }
221 
222     buffer->vaddr = vaddr;
223     buffer->vmap_cnt++;
224 out:
225     mutex_unlock(&buffer->lock);
226 
227     return vaddr;
228 }
229 
cma_heap_vunmap(struct dma_buf *dmabuf, void *vaddr)230 static void cma_heap_vunmap(struct dma_buf *dmabuf, void *vaddr)
231 {
232     struct cma_heap_buffer *buffer = dmabuf->priv;
233 
234     mutex_lock(&buffer->lock);
235     if (!--buffer->vmap_cnt) {
236         vunmap(buffer->vaddr);
237         buffer->vaddr = NULL;
238     }
239     mutex_unlock(&buffer->lock);
240 }
241 
cma_heap_dma_buf_release(struct dma_buf *dmabuf)242 static void cma_heap_dma_buf_release(struct dma_buf *dmabuf)
243 {
244     struct cma_heap_buffer *buffer = dmabuf->priv;
245     struct cma_heap *cma_heap = buffer->heap;
246 
247     if (buffer->vmap_cnt > 0) {
248         WARN(1, "%s: buffer still mapped in the kernel\n", __func__);
249         vunmap(buffer->vaddr);
250     }
251 
252     /* free page list */
253     kfree(buffer->pages);
254     /* release memory */
255     cma_release(cma_heap->cma, buffer->cma_pages, buffer->pagecount);
256     kfree(buffer);
257 }
258 
259 static const struct dma_buf_ops cma_heap_buf_ops = {
260     .attach = cma_heap_attach,
261     .detach = cma_heap_detach,
262     .map_dma_buf = cma_heap_map_dma_buf,
263     .unmap_dma_buf = cma_heap_unmap_dma_buf,
264     .begin_cpu_access = cma_heap_dma_buf_begin_cpu_access,
265     .end_cpu_access = cma_heap_dma_buf_end_cpu_access,
266     .mmap = cma_heap_mmap,
267     .vmap = cma_heap_vmap,
268     .vunmap = cma_heap_vunmap,
269     .release = cma_heap_dma_buf_release,
270 };
271 
cma_heap_allocate(struct dma_heap *heap, unsigned long len, unsigned long fd_flags, unsigned long heap_flags)272 static struct dma_buf *cma_heap_allocate(struct dma_heap *heap, unsigned long len, unsigned long fd_flags,
273                                          unsigned long heap_flags)
274 {
275     struct cma_heap *cma_heap = dma_heap_get_drvdata(heap);
276     struct cma_heap_buffer *buffer;
277     DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
278     size_t size = PAGE_ALIGN(len);
279     pgoff_t pagecount = size >> PAGE_SHIFT;
280     unsigned long align = get_order(size);
281     struct page *cma_pages;
282     struct dma_buf *dmabuf;
283     int ret = -ENOMEM;
284     pgoff_t pg;
285 
286     buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
287     if (!buffer) {
288         return ERR_PTR(-ENOMEM);
289     }
290 
291     INIT_LIST_HEAD(&buffer->attachments);
292     mutex_init(&buffer->lock);
293     buffer->len = size;
294 
295     if (align > CONFIG_CMA_ALIGNMENT) {
296         align = CONFIG_CMA_ALIGNMENT;
297     }
298 
299     cma_pages = cma_alloc(cma_heap->cma, pagecount, align, GFP_KERNEL);
300     if (!cma_pages) {
301         goto free_buffer;
302     }
303 
304     /* Clear the cma pages */
305     if (PageHighMem(cma_pages)) {
306         unsigned long nr_clear_pages = pagecount;
307         struct page *page = cma_pages;
308 
309         while (nr_clear_pages > 0) {
310             void *vaddr = kmap_atomic(page);
311 
312             memset(vaddr, 0, PAGE_SIZE);
313             kunmap_atomic(vaddr);
314             /*
315              * Avoid wasting time zeroing memory if the process
316              * has been killed by by SIGKILL
317              */
318             if (fatal_signal_pending(current)) {
319                 goto free_cma;
320             }
321             page++;
322             nr_clear_pages--;
323         }
324     } else {
325         memset(page_address(cma_pages), 0, size);
326     }
327 
328     buffer->pages = kmalloc_array(pagecount, sizeof(*buffer->pages), GFP_KERNEL);
329     if (!buffer->pages) {
330         ret = -ENOMEM;
331         goto free_cma;
332     }
333 
334     for (pg = 0; pg < pagecount; pg++) {
335         buffer->pages[pg] = &cma_pages[pg];
336     }
337 
338     buffer->cma_pages = cma_pages;
339     buffer->heap = cma_heap;
340     buffer->pagecount = pagecount;
341 
342     /* create the dmabuf */
343     exp_info.exp_name = dma_heap_get_name(heap);
344     exp_info.ops = &cma_heap_buf_ops;
345     exp_info.size = buffer->len;
346     exp_info.flags = fd_flags;
347     exp_info.priv = buffer;
348     dmabuf = dma_buf_export(&exp_info);
349     if (IS_ERR(dmabuf)) {
350         ret = PTR_ERR(dmabuf);
351         goto free_pages;
352     }
353 
354     return dmabuf;
355 
356 free_pages:
357     kfree(buffer->pages);
358 free_cma:
359     cma_release(cma_heap->cma, cma_pages, pagecount);
360 free_buffer:
361     kfree(buffer);
362 
363     return ERR_PTR(ret);
364 }
365 
366 static const struct dma_heap_ops cma_heap_ops = {
367     .allocate = cma_heap_allocate,
368 };
369 
_add_cma_heap(struct cma *cma, void *data)370 static int _add_cma_heap(struct cma *cma, void *data)
371 {
372     struct cma_heap *cma_heap;
373     struct dma_heap_export_info exp_info;
374 
375     cma_heap = kzalloc(sizeof(*cma_heap), GFP_KERNEL);
376     if (!cma_heap) {
377         return -ENOMEM;
378     }
379     cma_heap->cma = cma;
380 
381     exp_info.name = cma_get_name(cma);
382     exp_info.ops = &cma_heap_ops;
383     exp_info.priv = cma_heap;
384 
385     cma_heap->heap = dma_heap_add(&exp_info);
386     if (IS_ERR(cma_heap->heap)) {
387         int ret = PTR_ERR(cma_heap->heap);
388 
389         kfree(cma_heap);
390         return ret;
391     }
392 
393     return 0;
394 }
395 
add_default_cma_heap(void)396 static int add_default_cma_heap(void)
397 {
398     struct cma *default_cma = dev_get_cma_area(NULL);
399     int ret = 0;
400 
401     if (default_cma) {
402         ret = _add_cma_heap(default_cma, NULL);
403     }
404 
405     return ret;
406 }
407 module_init(add_default_cma_heap);
408 MODULE_DESCRIPTION("DMA-BUF CMA Heap");
409 MODULE_LICENSE("GPL v2");
410