1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
4 * Author:Mark Yao <mark.yao@rock-chips.com>
5 */
6
7#include <linux/dma-buf.h>
8#include <linux/iommu.h>
9#include <linux/vmalloc.h>
10
11#include <drm/drm.h>
12#include <drm/drm_gem.h>
13#include <drm/drm_prime.h>
14#include <drm/drm_vma_manager.h>
15
16#include "rockchip_drm_drv.h"
17#include "rockchip_drm_gem.h"
18
19static int rockchip_gem_iommu_map(struct rockchip_gem_object *rk_obj)
20{
21	struct drm_device *drm = rk_obj->base.dev;
22	struct rockchip_drm_private *private = drm->dev_private;
23	int prot = IOMMU_READ | IOMMU_WRITE;
24	ssize_t ret;
25
26	mutex_lock(&private->mm_lock);
27	ret = drm_mm_insert_node_generic(&private->mm, &rk_obj->mm,
28					 rk_obj->base.size, PAGE_SIZE,
29					 0, 0);
30	mutex_unlock(&private->mm_lock);
31
32	if (ret < 0) {
33		DRM_ERROR("out of I/O virtual memory: %zd\n", ret);
34		return ret;
35	}
36
37	rk_obj->dma_addr = rk_obj->mm.start;
38
39	ret = iommu_map_sgtable(private->domain, rk_obj->dma_addr, rk_obj->sgt,
40				prot);
41	if (ret < (ssize_t)rk_obj->base.size) {
42		DRM_ERROR("failed to map buffer: size=%zd request_size=%zd\n",
43			  ret, rk_obj->base.size);
44		ret = -ENOMEM;
45		goto err_remove_node;
46	}
47
48	rk_obj->size = ret;
49
50	return 0;
51
52err_remove_node:
53	mutex_lock(&private->mm_lock);
54	drm_mm_remove_node(&rk_obj->mm);
55	mutex_unlock(&private->mm_lock);
56
57	return ret;
58}
59
60static int rockchip_gem_iommu_unmap(struct rockchip_gem_object *rk_obj)
61{
62	struct drm_device *drm = rk_obj->base.dev;
63	struct rockchip_drm_private *private = drm->dev_private;
64
65	iommu_unmap(private->domain, rk_obj->dma_addr, rk_obj->size);
66
67	mutex_lock(&private->mm_lock);
68
69	drm_mm_remove_node(&rk_obj->mm);
70
71	mutex_unlock(&private->mm_lock);
72
73	return 0;
74}
75
76static int rockchip_gem_get_pages(struct rockchip_gem_object *rk_obj)
77{
78	struct drm_device *drm = rk_obj->base.dev;
79	int ret, i;
80	struct scatterlist *s;
81
82	rk_obj->pages = drm_gem_get_pages(&rk_obj->base);
83	if (IS_ERR(rk_obj->pages))
84		return PTR_ERR(rk_obj->pages);
85
86	rk_obj->num_pages = rk_obj->base.size >> PAGE_SHIFT;
87
88	rk_obj->sgt = drm_prime_pages_to_sg(rk_obj->base.dev,
89					    rk_obj->pages, rk_obj->num_pages);
90	if (IS_ERR(rk_obj->sgt)) {
91		ret = PTR_ERR(rk_obj->sgt);
92		goto err_put_pages;
93	}
94
95	/*
96	 * Fake up the SG table so that dma_sync_sg_for_device() can be used
97	 * to flush the pages associated with it.
98	 *
99	 * TODO: Replace this by drm_clflush_sg() once it can be implemented
100	 * without relying on symbols that are not exported.
101	 */
102	for_each_sgtable_sg(rk_obj->sgt, s, i)
103		sg_dma_address(s) = sg_phys(s);
104
105	dma_sync_sgtable_for_device(drm->dev, rk_obj->sgt, DMA_TO_DEVICE);
106
107	return 0;
108
109err_put_pages:
110	drm_gem_put_pages(&rk_obj->base, rk_obj->pages, false, false);
111	return ret;
112}
113
114static void rockchip_gem_put_pages(struct rockchip_gem_object *rk_obj)
115{
116	sg_free_table(rk_obj->sgt);
117	kfree(rk_obj->sgt);
118	drm_gem_put_pages(&rk_obj->base, rk_obj->pages, true, true);
119}
120
121static int rockchip_gem_alloc_iommu(struct rockchip_gem_object *rk_obj,
122				    bool alloc_kmap)
123{
124	int ret;
125
126	ret = rockchip_gem_get_pages(rk_obj);
127	if (ret < 0)
128		return ret;
129
130	ret = rockchip_gem_iommu_map(rk_obj);
131	if (ret < 0)
132		goto err_free;
133
134	if (alloc_kmap) {
135		rk_obj->kvaddr = vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP,
136				      pgprot_writecombine(PAGE_KERNEL));
137		if (!rk_obj->kvaddr) {
138			DRM_ERROR("failed to vmap() buffer\n");
139			ret = -ENOMEM;
140			goto err_unmap;
141		}
142	}
143
144	return 0;
145
146err_unmap:
147	rockchip_gem_iommu_unmap(rk_obj);
148err_free:
149	rockchip_gem_put_pages(rk_obj);
150
151	return ret;
152}
153
154static int rockchip_gem_alloc_dma(struct rockchip_gem_object *rk_obj,
155				  bool alloc_kmap)
156{
157	struct drm_gem_object *obj = &rk_obj->base;
158	struct drm_device *drm = obj->dev;
159
160	rk_obj->dma_attrs = DMA_ATTR_WRITE_COMBINE;
161
162	if (!alloc_kmap)
163		rk_obj->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
164
165	rk_obj->kvaddr = dma_alloc_attrs(drm->dev, obj->size,
166					 &rk_obj->dma_addr, GFP_KERNEL,
167					 rk_obj->dma_attrs);
168	if (!rk_obj->kvaddr) {
169		DRM_ERROR("failed to allocate %zu byte dma buffer", obj->size);
170		return -ENOMEM;
171	}
172
173	return 0;
174}
175
176static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj,
177				  bool alloc_kmap)
178{
179	struct drm_gem_object *obj = &rk_obj->base;
180	struct drm_device *drm = obj->dev;
181	struct rockchip_drm_private *private = drm->dev_private;
182
183	if (private->domain)
184		return rockchip_gem_alloc_iommu(rk_obj, alloc_kmap);
185	else
186		return rockchip_gem_alloc_dma(rk_obj, alloc_kmap);
187}
188
189static void rockchip_gem_free_iommu(struct rockchip_gem_object *rk_obj)
190{
191	vunmap(rk_obj->kvaddr);
192	rockchip_gem_iommu_unmap(rk_obj);
193	rockchip_gem_put_pages(rk_obj);
194}
195
196static void rockchip_gem_free_dma(struct rockchip_gem_object *rk_obj)
197{
198	struct drm_gem_object *obj = &rk_obj->base;
199	struct drm_device *drm = obj->dev;
200
201	dma_free_attrs(drm->dev, obj->size, rk_obj->kvaddr, rk_obj->dma_addr,
202		       rk_obj->dma_attrs);
203}
204
205static void rockchip_gem_free_buf(struct rockchip_gem_object *rk_obj)
206{
207	if (rk_obj->pages)
208		rockchip_gem_free_iommu(rk_obj);
209	else
210		rockchip_gem_free_dma(rk_obj);
211}
212
213static int rockchip_drm_gem_object_mmap_iommu(struct drm_gem_object *obj,
214					      struct vm_area_struct *vma)
215{
216	struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
217	unsigned int count = obj->size >> PAGE_SHIFT;
218	unsigned long user_count = vma_pages(vma);
219
220	if (user_count == 0)
221		return -ENXIO;
222
223	return vm_map_pages(vma, rk_obj->pages, count);
224}
225
226static int rockchip_drm_gem_object_mmap_dma(struct drm_gem_object *obj,
227					    struct vm_area_struct *vma)
228{
229	struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
230	struct drm_device *drm = obj->dev;
231
232	return dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr,
233			      obj->size, rk_obj->dma_attrs);
234}
235
236static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj,
237					struct vm_area_struct *vma)
238{
239	int ret;
240	struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
241
242	/*
243	 * We allocated a struct page table for rk_obj, so clear
244	 * VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
245	 */
246	vma->vm_flags &= ~VM_PFNMAP;
247
248	if (rk_obj->pages)
249		ret = rockchip_drm_gem_object_mmap_iommu(obj, vma);
250	else
251		ret = rockchip_drm_gem_object_mmap_dma(obj, vma);
252
253	return ret;
254}
255
256int rockchip_gem_mmap_buf(struct drm_gem_object *obj,
257			  struct vm_area_struct *vma)
258{
259	int ret;
260
261	ret = drm_gem_mmap_obj(obj, obj->size, vma);
262	if (ret)
263		return ret;
264
265	return rockchip_drm_gem_object_mmap(obj, vma);
266}
267
268/* drm driver mmap file operations */
269int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma)
270{
271	struct drm_gem_object *obj;
272	int ret;
273
274	ret = drm_gem_mmap(filp, vma);
275	if (ret)
276		return ret;
277
278	/*
279	 * Set vm_pgoff (used as a fake buffer offset by DRM) to 0 and map the
280	 * whole buffer from the start.
281	 */
282	vma->vm_pgoff = 0;
283
284	obj = vma->vm_private_data;
285
286	return rockchip_drm_gem_object_mmap(obj, vma);
287}
288
289static void rockchip_gem_release_object(struct rockchip_gem_object *rk_obj)
290{
291	drm_gem_object_release(&rk_obj->base);
292	kfree(rk_obj);
293}
294
295static struct rockchip_gem_object *
296	rockchip_gem_alloc_object(struct drm_device *drm, unsigned int size)
297{
298	struct rockchip_gem_object *rk_obj;
299	struct drm_gem_object *obj;
300
301	size = round_up(size, PAGE_SIZE);
302
303	rk_obj = kzalloc(sizeof(*rk_obj), GFP_KERNEL);
304	if (!rk_obj)
305		return ERR_PTR(-ENOMEM);
306
307	obj = &rk_obj->base;
308
309	drm_gem_object_init(drm, obj, size);
310
311	return rk_obj;
312}
313
314struct rockchip_gem_object *
315rockchip_gem_create_object(struct drm_device *drm, unsigned int size,
316			   bool alloc_kmap)
317{
318	struct rockchip_gem_object *rk_obj;
319	int ret;
320
321	rk_obj = rockchip_gem_alloc_object(drm, size);
322	if (IS_ERR(rk_obj))
323		return rk_obj;
324
325	ret = rockchip_gem_alloc_buf(rk_obj, alloc_kmap);
326	if (ret)
327		goto err_free_rk_obj;
328
329	return rk_obj;
330
331err_free_rk_obj:
332	rockchip_gem_release_object(rk_obj);
333	return ERR_PTR(ret);
334}
335
336/*
337 * rockchip_gem_free_object - (struct drm_driver)->gem_free_object_unlocked
338 * callback function
339 */
340void rockchip_gem_free_object(struct drm_gem_object *obj)
341{
342	struct drm_device *drm = obj->dev;
343	struct rockchip_drm_private *private = drm->dev_private;
344	struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
345
346	if (obj->import_attach) {
347		if (private->domain) {
348			rockchip_gem_iommu_unmap(rk_obj);
349		} else {
350			dma_unmap_sgtable(drm->dev, rk_obj->sgt,
351					  DMA_BIDIRECTIONAL, 0);
352		}
353		drm_prime_gem_destroy(obj, rk_obj->sgt);
354	} else {
355		rockchip_gem_free_buf(rk_obj);
356	}
357
358	rockchip_gem_release_object(rk_obj);
359}
360
361/*
362 * rockchip_gem_create_with_handle - allocate an object with the given
363 * size and create a gem handle on it
364 *
365 * returns a struct rockchip_gem_object* on success or ERR_PTR values
366 * on failure.
367 */
368static struct rockchip_gem_object *
369rockchip_gem_create_with_handle(struct drm_file *file_priv,
370				struct drm_device *drm, unsigned int size,
371				unsigned int *handle)
372{
373	struct rockchip_gem_object *rk_obj;
374	struct drm_gem_object *obj;
375	int ret;
376
377	rk_obj = rockchip_gem_create_object(drm, size, false);
378	if (IS_ERR(rk_obj))
379		return ERR_CAST(rk_obj);
380
381	obj = &rk_obj->base;
382
383	/*
384	 * allocate a id of idr table where the obj is registered
385	 * and handle has the id what user can see.
386	 */
387	ret = drm_gem_handle_create(file_priv, obj, handle);
388	if (ret)
389		goto err_handle_create;
390
391	/* drop reference from allocate - handle holds it now. */
392	drm_gem_object_put(obj);
393
394	return rk_obj;
395
396err_handle_create:
397	rockchip_gem_free_object(obj);
398
399	return ERR_PTR(ret);
400}
401
402/*
403 * rockchip_gem_dumb_create - (struct drm_driver)->dumb_create callback
404 * function
405 *
406 * This aligns the pitch and size arguments to the minimum required. wrap
407 * this into your own function if you need bigger alignment.
408 */
409int rockchip_gem_dumb_create(struct drm_file *file_priv,
410			     struct drm_device *dev,
411			     struct drm_mode_create_dumb *args)
412{
413	struct rockchip_gem_object *rk_obj;
414	int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
415
416	/*
417	 * align to 64 bytes since Mali requires it.
418	 */
419	args->pitch = ALIGN(min_pitch, 64);
420	args->size = args->pitch * args->height;
421
422	rk_obj = rockchip_gem_create_with_handle(file_priv, dev, args->size,
423						 &args->handle);
424
425	return PTR_ERR_OR_ZERO(rk_obj);
426}
427
428/*
429 * Allocate a sg_table for this GEM object.
430 * Note: Both the table's contents, and the sg_table itself must be freed by
431 *       the caller.
432 * Returns a pointer to the newly allocated sg_table, or an ERR_PTR() error.
433 */
434struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj)
435{
436	struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
437	struct drm_device *drm = obj->dev;
438	struct sg_table *sgt;
439	int ret;
440
441	if (rk_obj->pages)
442		return drm_prime_pages_to_sg(obj->dev, rk_obj->pages, rk_obj->num_pages);
443
444	sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
445	if (!sgt)
446		return ERR_PTR(-ENOMEM);
447
448	ret = dma_get_sgtable_attrs(drm->dev, sgt, rk_obj->kvaddr,
449				    rk_obj->dma_addr, obj->size,
450				    rk_obj->dma_attrs);
451	if (ret) {
452		DRM_ERROR("failed to allocate sgt, %d\n", ret);
453		kfree(sgt);
454		return ERR_PTR(ret);
455	}
456
457	return sgt;
458}
459
460static int
461rockchip_gem_iommu_map_sg(struct drm_device *drm,
462			  struct dma_buf_attachment *attach,
463			  struct sg_table *sg,
464			  struct rockchip_gem_object *rk_obj)
465{
466	rk_obj->sgt = sg;
467	return rockchip_gem_iommu_map(rk_obj);
468}
469
470static int
471rockchip_gem_dma_map_sg(struct drm_device *drm,
472			struct dma_buf_attachment *attach,
473			struct sg_table *sg,
474			struct rockchip_gem_object *rk_obj)
475{
476	int err = dma_map_sgtable(drm->dev, sg, DMA_BIDIRECTIONAL, 0);
477	if (err)
478		return err;
479
480	if (drm_prime_get_contiguous_size(sg) < attach->dmabuf->size) {
481		DRM_ERROR("failed to map sg_table to contiguous linear address.\n");
482		dma_unmap_sgtable(drm->dev, sg, DMA_BIDIRECTIONAL, 0);
483		return -EINVAL;
484	}
485
486	rk_obj->dma_addr = sg_dma_address(sg->sgl);
487	rk_obj->sgt = sg;
488	return 0;
489}
490
491struct drm_gem_object *
492rockchip_gem_prime_import_sg_table(struct drm_device *drm,
493				   struct dma_buf_attachment *attach,
494				   struct sg_table *sg)
495{
496	struct rockchip_drm_private *private = drm->dev_private;
497	struct rockchip_gem_object *rk_obj;
498	int ret;
499
500	rk_obj = rockchip_gem_alloc_object(drm, attach->dmabuf->size);
501	if (IS_ERR(rk_obj))
502		return ERR_CAST(rk_obj);
503
504	if (private->domain)
505		ret = rockchip_gem_iommu_map_sg(drm, attach, sg, rk_obj);
506	else
507		ret = rockchip_gem_dma_map_sg(drm, attach, sg, rk_obj);
508
509	if (ret < 0) {
510		DRM_ERROR("failed to import sg table: %d\n", ret);
511		goto err_free_rk_obj;
512	}
513
514	return &rk_obj->base;
515
516err_free_rk_obj:
517	rockchip_gem_release_object(rk_obj);
518	return ERR_PTR(ret);
519}
520
521void *rockchip_gem_prime_vmap(struct drm_gem_object *obj)
522{
523	struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
524
525	if (rk_obj->pages)
526		return vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP,
527			    pgprot_writecombine(PAGE_KERNEL));
528
529	if (rk_obj->dma_attrs & DMA_ATTR_NO_KERNEL_MAPPING)
530		return NULL;
531
532	return rk_obj->kvaddr;
533}
534
535void rockchip_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
536{
537	struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
538
539	if (rk_obj->pages) {
540		vunmap(vaddr);
541		return;
542	}
543
544	/* Nothing to do if allocated by DMA mapping API. */
545}
546