1// SPDX-License-Identifier: GPL-2.0-or-later
2/* exynos_drm_gem.c
3 *
4 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
5 * Author: Inki Dae <inki.dae@samsung.com>
6 */
7
8
9#include <linux/dma-buf.h>
10#include <linux/pfn_t.h>
11#include <linux/shmem_fs.h>
12
13#include <drm/drm_prime.h>
14#include <drm/drm_vma_manager.h>
15#include <drm/exynos_drm.h>
16
17#include "exynos_drm_drv.h"
18#include "exynos_drm_gem.h"
19
20static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem, bool kvmap)
21{
22	struct drm_device *dev = exynos_gem->base.dev;
23	unsigned long attr = 0;
24
25	if (exynos_gem->dma_addr) {
26		DRM_DEV_DEBUG_KMS(to_dma_dev(dev), "already allocated.\n");
27		return 0;
28	}
29
30	/*
31	 * if EXYNOS_BO_CONTIG, fully physically contiguous memory
32	 * region will be allocated else physically contiguous
33	 * as possible.
34	 */
35	if (!(exynos_gem->flags & EXYNOS_BO_NONCONTIG))
36		attr |= DMA_ATTR_FORCE_CONTIGUOUS;
37
38	/*
39	 * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
40	 * else cachable mapping.
41	 */
42	if (exynos_gem->flags & EXYNOS_BO_WC ||
43			!(exynos_gem->flags & EXYNOS_BO_CACHABLE))
44		attr |= DMA_ATTR_WRITE_COMBINE;
45
46	/* FBDev emulation requires kernel mapping */
47	if (!kvmap)
48		attr |= DMA_ATTR_NO_KERNEL_MAPPING;
49
50	exynos_gem->dma_attrs = attr;
51	exynos_gem->cookie = dma_alloc_attrs(to_dma_dev(dev), exynos_gem->size,
52					     &exynos_gem->dma_addr, GFP_KERNEL,
53					     exynos_gem->dma_attrs);
54	if (!exynos_gem->cookie) {
55		DRM_DEV_ERROR(to_dma_dev(dev), "failed to allocate buffer.\n");
56		return -ENOMEM;
57	}
58
59	if (kvmap)
60		exynos_gem->kvaddr = exynos_gem->cookie;
61
62	DRM_DEV_DEBUG_KMS(to_dma_dev(dev), "dma_addr(0x%lx), size(0x%lx)\n",
63			(unsigned long)exynos_gem->dma_addr, exynos_gem->size);
64	return 0;
65}
66
67static void exynos_drm_free_buf(struct exynos_drm_gem *exynos_gem)
68{
69	struct drm_device *dev = exynos_gem->base.dev;
70
71	if (!exynos_gem->dma_addr) {
72		DRM_DEV_DEBUG_KMS(dev->dev, "dma_addr is invalid.\n");
73		return;
74	}
75
76	DRM_DEV_DEBUG_KMS(dev->dev, "dma_addr(0x%lx), size(0x%lx)\n",
77			(unsigned long)exynos_gem->dma_addr, exynos_gem->size);
78
79	dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie,
80			(dma_addr_t)exynos_gem->dma_addr,
81			exynos_gem->dma_attrs);
82}
83
84static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
85					struct drm_file *file_priv,
86					unsigned int *handle)
87{
88	int ret;
89
90	/*
91	 * allocate a id of idr table where the obj is registered
92	 * and handle has the id what user can see.
93	 */
94	ret = drm_gem_handle_create(file_priv, obj, handle);
95	if (ret)
96		return ret;
97
98	DRM_DEV_DEBUG_KMS(to_dma_dev(obj->dev), "gem handle = 0x%x\n", *handle);
99
100	/* drop reference from allocate - handle holds it now. */
101	drm_gem_object_put(obj);
102
103	return 0;
104}
105
106void exynos_drm_gem_destroy(struct exynos_drm_gem *exynos_gem)
107{
108	struct drm_gem_object *obj = &exynos_gem->base;
109
110	DRM_DEV_DEBUG_KMS(to_dma_dev(obj->dev), "handle count = %d\n",
111			  obj->handle_count);
112
113	/*
114	 * do not release memory region from exporter.
115	 *
116	 * the region will be released by exporter
117	 * once dmabuf's refcount becomes 0.
118	 */
119	if (obj->import_attach)
120		drm_prime_gem_destroy(obj, exynos_gem->sgt);
121	else
122		exynos_drm_free_buf(exynos_gem);
123
124	/* release file pointer to gem object. */
125	drm_gem_object_release(obj);
126
127	kfree(exynos_gem);
128}
129
130static struct exynos_drm_gem *exynos_drm_gem_init(struct drm_device *dev,
131						  unsigned long size)
132{
133	struct exynos_drm_gem *exynos_gem;
134	struct drm_gem_object *obj;
135	int ret;
136
137	exynos_gem = kzalloc(sizeof(*exynos_gem), GFP_KERNEL);
138	if (!exynos_gem)
139		return ERR_PTR(-ENOMEM);
140
141	exynos_gem->size = size;
142	obj = &exynos_gem->base;
143
144	ret = drm_gem_object_init(dev, obj, size);
145	if (ret < 0) {
146		DRM_DEV_ERROR(dev->dev, "failed to initialize gem object\n");
147		kfree(exynos_gem);
148		return ERR_PTR(ret);
149	}
150
151	ret = drm_gem_create_mmap_offset(obj);
152	if (ret < 0) {
153		drm_gem_object_release(obj);
154		kfree(exynos_gem);
155		return ERR_PTR(ret);
156	}
157
158	DRM_DEV_DEBUG_KMS(dev->dev, "created file object = %pK\n", obj->filp);
159
160	return exynos_gem;
161}
162
163struct exynos_drm_gem *exynos_drm_gem_create(struct drm_device *dev,
164					     unsigned int flags,
165					     unsigned long size,
166					     bool kvmap)
167{
168	struct exynos_drm_gem *exynos_gem;
169	int ret;
170
171	if (flags & ~(EXYNOS_BO_MASK)) {
172		DRM_DEV_ERROR(dev->dev,
173			      "invalid GEM buffer flags: %u\n", flags);
174		return ERR_PTR(-EINVAL);
175	}
176
177	if (!size) {
178		DRM_DEV_ERROR(dev->dev, "invalid GEM buffer size: %lu\n", size);
179		return ERR_PTR(-EINVAL);
180	}
181
182	size = roundup(size, PAGE_SIZE);
183
184	exynos_gem = exynos_drm_gem_init(dev, size);
185	if (IS_ERR(exynos_gem))
186		return exynos_gem;
187
188	if (!is_drm_iommu_supported(dev) && (flags & EXYNOS_BO_NONCONTIG)) {
189		/*
190		 * when no IOMMU is available, all allocated buffers are
191		 * contiguous anyway, so drop EXYNOS_BO_NONCONTIG flag
192		 */
193		flags &= ~EXYNOS_BO_NONCONTIG;
194		DRM_WARN("Non-contiguous allocation is not supported without IOMMU, falling back to contiguous buffer\n");
195	}
196
197	/* set memory type and cache attribute from user side. */
198	exynos_gem->flags = flags;
199
200	ret = exynos_drm_alloc_buf(exynos_gem, kvmap);
201	if (ret < 0) {
202		drm_gem_object_release(&exynos_gem->base);
203		kfree(exynos_gem);
204		return ERR_PTR(ret);
205	}
206
207	return exynos_gem;
208}
209
210int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
211				struct drm_file *file_priv)
212{
213	struct drm_exynos_gem_create *args = data;
214	struct exynos_drm_gem *exynos_gem;
215	int ret;
216
217	exynos_gem = exynos_drm_gem_create(dev, args->flags, args->size, false);
218	if (IS_ERR(exynos_gem))
219		return PTR_ERR(exynos_gem);
220
221	ret = exynos_drm_gem_handle_create(&exynos_gem->base, file_priv,
222					   &args->handle);
223	if (ret) {
224		exynos_drm_gem_destroy(exynos_gem);
225		return ret;
226	}
227
228	return 0;
229}
230
231int exynos_drm_gem_map_ioctl(struct drm_device *dev, void *data,
232			     struct drm_file *file_priv)
233{
234	struct drm_exynos_gem_map *args = data;
235
236	return drm_gem_dumb_map_offset(file_priv, dev, args->handle,
237				       &args->offset);
238}
239
240struct exynos_drm_gem *exynos_drm_gem_get(struct drm_file *filp,
241					  unsigned int gem_handle)
242{
243	struct drm_gem_object *obj;
244
245	obj = drm_gem_object_lookup(filp, gem_handle);
246	if (!obj)
247		return NULL;
248	return to_exynos_gem(obj);
249}
250
251static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem *exynos_gem,
252				      struct vm_area_struct *vma)
253{
254	struct drm_device *drm_dev = exynos_gem->base.dev;
255	unsigned long vm_size;
256	int ret;
257
258	vma->vm_flags &= ~VM_PFNMAP;
259	vma->vm_pgoff = 0;
260
261	vm_size = vma->vm_end - vma->vm_start;
262
263	/* check if user-requested size is valid. */
264	if (vm_size > exynos_gem->size)
265		return -EINVAL;
266
267	ret = dma_mmap_attrs(to_dma_dev(drm_dev), vma, exynos_gem->cookie,
268			     exynos_gem->dma_addr, exynos_gem->size,
269			     exynos_gem->dma_attrs);
270	if (ret < 0) {
271		DRM_ERROR("failed to mmap.\n");
272		return ret;
273	}
274
275	return 0;
276}
277
278int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
279				      struct drm_file *file_priv)
280{
281	struct exynos_drm_gem *exynos_gem;
282	struct drm_exynos_gem_info *args = data;
283	struct drm_gem_object *obj;
284
285	obj = drm_gem_object_lookup(file_priv, args->handle);
286	if (!obj) {
287		DRM_DEV_ERROR(dev->dev, "failed to lookup gem object.\n");
288		return -EINVAL;
289	}
290
291	exynos_gem = to_exynos_gem(obj);
292
293	args->flags = exynos_gem->flags;
294	args->size = exynos_gem->size;
295
296	drm_gem_object_put(obj);
297
298	return 0;
299}
300
301void exynos_drm_gem_free_object(struct drm_gem_object *obj)
302{
303	exynos_drm_gem_destroy(to_exynos_gem(obj));
304}
305
306int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
307			       struct drm_device *dev,
308			       struct drm_mode_create_dumb *args)
309{
310	struct exynos_drm_gem *exynos_gem;
311	unsigned int flags;
312	int ret;
313
314	/*
315	 * allocate memory to be used for framebuffer.
316	 * - this callback would be called by user application
317	 *	with DRM_IOCTL_MODE_CREATE_DUMB command.
318	 */
319
320	args->pitch = args->width * ((args->bpp + 7) / 8);
321	args->size = args->pitch * args->height;
322
323	if (is_drm_iommu_supported(dev))
324		flags = EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC;
325	else
326		flags = EXYNOS_BO_CONTIG | EXYNOS_BO_WC;
327
328	exynos_gem = exynos_drm_gem_create(dev, flags, args->size, false);
329	if (IS_ERR(exynos_gem)) {
330		dev_warn(dev->dev, "FB allocation failed.\n");
331		return PTR_ERR(exynos_gem);
332	}
333
334	ret = exynos_drm_gem_handle_create(&exynos_gem->base, file_priv,
335					   &args->handle);
336	if (ret) {
337		exynos_drm_gem_destroy(exynos_gem);
338		return ret;
339	}
340
341	return 0;
342}
343
344static int exynos_drm_gem_mmap_obj(struct drm_gem_object *obj,
345				   struct vm_area_struct *vma)
346{
347	struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
348	int ret;
349
350	DRM_DEV_DEBUG_KMS(to_dma_dev(obj->dev), "flags = 0x%x\n",
351			  exynos_gem->flags);
352
353	/* non-cachable as default. */
354	if (exynos_gem->flags & EXYNOS_BO_CACHABLE)
355		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
356	else if (exynos_gem->flags & EXYNOS_BO_WC)
357		vma->vm_page_prot =
358			pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
359	else
360		vma->vm_page_prot =
361			pgprot_noncached(vm_get_page_prot(vma->vm_flags));
362
363	ret = exynos_drm_gem_mmap_buffer(exynos_gem, vma);
364	if (ret)
365		goto err_close_vm;
366
367	return ret;
368
369err_close_vm:
370	drm_gem_vm_close(vma);
371
372	return ret;
373}
374
375int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
376{
377	struct drm_gem_object *obj;
378	int ret;
379
380	/* set vm_area_struct. */
381	ret = drm_gem_mmap(filp, vma);
382	if (ret < 0) {
383		DRM_ERROR("failed to mmap.\n");
384		return ret;
385	}
386
387	obj = vma->vm_private_data;
388
389	if (obj->import_attach)
390		return dma_buf_mmap(obj->dma_buf, vma, 0);
391
392	return exynos_drm_gem_mmap_obj(obj, vma);
393}
394
395/* low-level interface prime helpers */
396struct drm_gem_object *exynos_drm_gem_prime_import(struct drm_device *dev,
397					    struct dma_buf *dma_buf)
398{
399	return drm_gem_prime_import_dev(dev, dma_buf, to_dma_dev(dev));
400}
401
402struct sg_table *exynos_drm_gem_prime_get_sg_table(struct drm_gem_object *obj)
403{
404	struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
405	struct drm_device *drm_dev = obj->dev;
406	struct sg_table *sgt;
407	int ret;
408
409	sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
410	if (!sgt)
411		return ERR_PTR(-ENOMEM);
412
413	ret = dma_get_sgtable_attrs(to_dma_dev(drm_dev), sgt, exynos_gem->cookie,
414				    exynos_gem->dma_addr, exynos_gem->size,
415				    exynos_gem->dma_attrs);
416	if (ret) {
417		DRM_ERROR("failed to get sgtable, %d\n", ret);
418		kfree(sgt);
419		return ERR_PTR(ret);
420	}
421
422	return sgt;
423}
424
425struct drm_gem_object *
426exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
427				     struct dma_buf_attachment *attach,
428				     struct sg_table *sgt)
429{
430	struct exynos_drm_gem *exynos_gem;
431
432	/* check if the entries in the sg_table are contiguous */
433	if (drm_prime_get_contiguous_size(sgt) < attach->dmabuf->size) {
434		DRM_ERROR("buffer chunks must be mapped contiguously");
435		return ERR_PTR(-EINVAL);
436	}
437
438	exynos_gem = exynos_drm_gem_init(dev, attach->dmabuf->size);
439	if (IS_ERR(exynos_gem))
440		return ERR_CAST(exynos_gem);
441
442	/*
443	 * Buffer has been mapped as contiguous into DMA address space,
444	 * but if there is IOMMU, it can be either CONTIG or NONCONTIG.
445	 * We assume a simplified logic below:
446	 */
447	if (is_drm_iommu_supported(dev))
448		exynos_gem->flags |= EXYNOS_BO_NONCONTIG;
449	else
450		exynos_gem->flags |= EXYNOS_BO_CONTIG;
451
452	exynos_gem->dma_addr = sg_dma_address(sgt->sgl);
453	exynos_gem->sgt = sgt;
454	return &exynos_gem->base;
455}
456
457void *exynos_drm_gem_prime_vmap(struct drm_gem_object *obj)
458{
459	return NULL;
460}
461
462void exynos_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
463{
464	/* Nothing to do */
465}
466
467int exynos_drm_gem_prime_mmap(struct drm_gem_object *obj,
468			      struct vm_area_struct *vma)
469{
470	int ret;
471
472	ret = drm_gem_mmap_obj(obj, obj->size, vma);
473	if (ret < 0)
474		return ret;
475
476	return exynos_drm_gem_mmap_obj(obj, vma);
477}
478