1/*
2 * Copyright (C) 2015 Red Hat, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 */
25
26#include <linux/dma-mapping.h>
27#include <linux/moduleparam.h>
28
29#include "virtgpu_drv.h"
30
31static int virtio_gpu_virglrenderer_workaround = 1;
32module_param_named(virglhack, virtio_gpu_virglrenderer_workaround, int, 0400);
33
34static int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
35				       uint32_t *resid)
36{
37	if (virtio_gpu_virglrenderer_workaround) {
38		/*
39		 * Hack to avoid re-using resource IDs.
40		 *
41		 * virglrenderer versions up to (and including) 0.7.0
42		 * can't deal with that.  virglrenderer commit
43		 * "f91a9dd35715 Fix unlinking resources from hash
44		 * table." (Feb 2019) fixes the bug.
45		 */
46		static atomic_t seqno = ATOMIC_INIT(0);
47		int handle = atomic_inc_return(&seqno);
48		*resid = handle + 1;
49	} else {
50		int handle = ida_alloc(&vgdev->resource_ida, GFP_KERNEL);
51		if (handle < 0)
52			return handle;
53		*resid = handle + 1;
54	}
55	return 0;
56}
57
58static void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
59{
60	if (!virtio_gpu_virglrenderer_workaround) {
61		ida_free(&vgdev->resource_ida, id - 1);
62	}
63}
64
65void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo)
66{
67	struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
68
69	virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
70	if (virtio_gpu_is_shmem(bo)) {
71		struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
72
73		if (shmem->pages) {
74			if (shmem->mapped) {
75				dma_unmap_sgtable(vgdev->vdev->dev.parent,
76					     shmem->pages, DMA_TO_DEVICE, 0);
77				shmem->mapped = 0;
78			}
79
80			sg_free_table(shmem->pages);
81			kfree(shmem->pages);
82			shmem->pages = NULL;
83			drm_gem_shmem_unpin(&bo->base.base);
84		}
85
86		drm_gem_shmem_free_object(&bo->base.base);
87	}
88}
89
90static void virtio_gpu_free_object(struct drm_gem_object *obj)
91{
92	struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
93	struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
94
95	if (bo->created) {
96		virtio_gpu_cmd_unref_resource(vgdev, bo);
97		virtio_gpu_notify(vgdev);
98		/* completion handler calls virtio_gpu_cleanup_object() */
99		return;
100	}
101	virtio_gpu_cleanup_object(bo);
102}
103
104static const struct drm_gem_object_funcs virtio_gpu_shmem_funcs = {
105	.free = virtio_gpu_free_object,
106	.open = virtio_gpu_gem_object_open,
107	.close = virtio_gpu_gem_object_close,
108
109	.print_info = drm_gem_shmem_print_info,
110	.pin = drm_gem_shmem_pin,
111	.unpin = drm_gem_shmem_unpin,
112	.get_sg_table = drm_gem_shmem_get_sg_table,
113	.vmap = drm_gem_shmem_vmap,
114	.vunmap = drm_gem_shmem_vunmap,
115	.mmap = drm_gem_shmem_mmap,
116};
117
118bool virtio_gpu_is_shmem(struct virtio_gpu_object *bo)
119{
120	return bo->base.base.funcs == &virtio_gpu_shmem_funcs;
121}
122
123struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev,
124						size_t size)
125{
126	struct virtio_gpu_object_shmem *shmem;
127	struct drm_gem_shmem_object *dshmem;
128
129	shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
130	if (!shmem)
131		return NULL;
132
133	dshmem = &shmem->base.base;
134	dshmem->base.funcs = &virtio_gpu_shmem_funcs;
135	dshmem->map_cached = true;
136	return &dshmem->base;
137}
138
139static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev,
140					struct virtio_gpu_object *bo,
141					struct virtio_gpu_mem_entry **ents,
142					unsigned int *nents)
143{
144	bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
145	struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
146	struct scatterlist *sg;
147	int si, ret;
148
149	ret = drm_gem_shmem_pin(&bo->base.base);
150	if (ret < 0)
151		return -EINVAL;
152
153	/*
154	 * virtio_gpu uses drm_gem_shmem_get_sg_table instead of
155	 * drm_gem_shmem_get_pages_sgt because virtio has it's own set of
156	 * dma-ops. This is discouraged for other drivers, but should be fine
157	 * since virtio_gpu doesn't support dma-buf import from other devices.
158	 */
159	shmem->pages = drm_gem_shmem_get_sg_table(&bo->base.base);
160	if (IS_ERR(shmem->pages)) {
161		drm_gem_shmem_unpin(&bo->base.base);
162		ret = PTR_ERR(shmem->pages);
163		shmem->pages = NULL;
164		return ret;
165	}
166
167	if (use_dma_api) {
168		ret = dma_map_sgtable(vgdev->vdev->dev.parent,
169				      shmem->pages, DMA_TO_DEVICE, 0);
170		if (ret)
171			return ret;
172		*nents = shmem->mapped = shmem->pages->nents;
173	} else {
174		*nents = shmem->pages->orig_nents;
175	}
176
177	*ents = kvmalloc_array(*nents,
178			       sizeof(struct virtio_gpu_mem_entry),
179			       GFP_KERNEL);
180	if (!(*ents)) {
181		DRM_ERROR("failed to allocate ent list\n");
182		return -ENOMEM;
183	}
184
185	if (use_dma_api) {
186		for_each_sgtable_dma_sg(shmem->pages, sg, si) {
187			(*ents)[si].addr = cpu_to_le64(sg_dma_address(sg));
188			(*ents)[si].length = cpu_to_le32(sg_dma_len(sg));
189			(*ents)[si].padding = 0;
190		}
191	} else {
192		for_each_sgtable_sg(shmem->pages, sg, si) {
193			(*ents)[si].addr = cpu_to_le64(sg_phys(sg));
194			(*ents)[si].length = cpu_to_le32(sg->length);
195			(*ents)[si].padding = 0;
196		}
197	}
198
199	return 0;
200}
201
202int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
203			     struct virtio_gpu_object_params *params,
204			     struct virtio_gpu_object **bo_ptr,
205			     struct virtio_gpu_fence *fence)
206{
207	struct virtio_gpu_object_array *objs = NULL;
208	struct drm_gem_shmem_object *shmem_obj;
209	struct virtio_gpu_object *bo;
210	struct virtio_gpu_mem_entry *ents;
211	unsigned int nents;
212	int ret;
213
214	*bo_ptr = NULL;
215
216	params->size = roundup(params->size, PAGE_SIZE);
217	shmem_obj = drm_gem_shmem_create(vgdev->ddev, params->size);
218	if (IS_ERR(shmem_obj))
219		return PTR_ERR(shmem_obj);
220	bo = gem_to_virtio_gpu_obj(&shmem_obj->base);
221
222	ret = virtio_gpu_resource_id_get(vgdev, &bo->hw_res_handle);
223	if (ret < 0)
224		goto err_free_gem;
225
226	bo->dumb = params->dumb;
227
228	if (fence) {
229		ret = -ENOMEM;
230		objs = virtio_gpu_array_alloc(1);
231		if (!objs)
232			goto err_put_id;
233		virtio_gpu_array_add_obj(objs, &bo->base.base);
234
235		ret = virtio_gpu_array_lock_resv(objs);
236		if (ret != 0)
237			goto err_put_objs;
238	}
239
240	if (params->virgl) {
241		virtio_gpu_cmd_resource_create_3d(vgdev, bo, params,
242						  objs, fence);
243	} else {
244		virtio_gpu_cmd_create_resource(vgdev, bo, params,
245					       objs, fence);
246	}
247
248	ret = virtio_gpu_object_shmem_init(vgdev, bo, &ents, &nents);
249	if (ret != 0) {
250		virtio_gpu_free_object(&shmem_obj->base);
251		return ret;
252	}
253
254	virtio_gpu_object_attach(vgdev, bo, ents, nents);
255
256	*bo_ptr = bo;
257	return 0;
258
259err_put_objs:
260	virtio_gpu_array_put_free(objs);
261err_put_id:
262	virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
263err_free_gem:
264	drm_gem_shmem_free_object(&shmem_obj->base);
265	return ret;
266}
267