1// SPDX-License-Identifier: GPL-2.0
2/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
3
4#include <linux/err.h>
5#include <linux/slab.h>
6#include <linux/dma-buf.h>
7#include <linux/dma-mapping.h>
8
9#include <drm/panfrost_drm.h>
10#include "panfrost_device.h"
11#include "panfrost_gem.h"
12#include "panfrost_mmu.h"
13
14/* Called DRM core on the last userspace/kernel unreference of the
15 * BO.
16 */
17static void panfrost_gem_free_object(struct drm_gem_object *obj)
18{
19	struct panfrost_gem_object *bo = to_panfrost_bo(obj);
20	struct panfrost_device *pfdev = obj->dev->dev_private;
21
22	/*
23	 * Make sure the BO is no longer inserted in the shrinker list before
24	 * taking care of the destruction itself. If we don't do that we have a
25	 * race condition between this function and what's done in
26	 * panfrost_gem_shrinker_scan().
27	 */
28	mutex_lock(&pfdev->shrinker_lock);
29	list_del_init(&bo->base.madv_list);
30	mutex_unlock(&pfdev->shrinker_lock);
31
32	/*
33	 * If we still have mappings attached to the BO, there's a problem in
34	 * our refcounting.
35	 */
36	WARN_ON_ONCE(!list_empty(&bo->mappings.list));
37
38	if (bo->sgts) {
39		int i;
40		int n_sgt = bo->base.base.size / SZ_2M;
41
42		for (i = 0; i < n_sgt; i++) {
43			if (bo->sgts[i].sgl) {
44				dma_unmap_sgtable(pfdev->dev, &bo->sgts[i],
45						  DMA_BIDIRECTIONAL, 0);
46				sg_free_table(&bo->sgts[i]);
47			}
48		}
49		kvfree(bo->sgts);
50	}
51
52	drm_gem_shmem_free_object(obj);
53}
54
55struct panfrost_gem_mapping *
56panfrost_gem_mapping_get(struct panfrost_gem_object *bo,
57			 struct panfrost_file_priv *priv)
58{
59	struct panfrost_gem_mapping *iter, *mapping = NULL;
60
61	mutex_lock(&bo->mappings.lock);
62	list_for_each_entry(iter, &bo->mappings.list, node) {
63		if (iter->mmu == priv->mmu) {
64			kref_get(&iter->refcount);
65			mapping = iter;
66			break;
67		}
68	}
69	mutex_unlock(&bo->mappings.lock);
70
71	return mapping;
72}
73
74static void
75panfrost_gem_teardown_mapping(struct panfrost_gem_mapping *mapping)
76{
77	if (mapping->active)
78		panfrost_mmu_unmap(mapping);
79
80	spin_lock(&mapping->mmu->mm_lock);
81	if (drm_mm_node_allocated(&mapping->mmnode))
82		drm_mm_remove_node(&mapping->mmnode);
83	spin_unlock(&mapping->mmu->mm_lock);
84}
85
86static void panfrost_gem_mapping_release(struct kref *kref)
87{
88	struct panfrost_gem_mapping *mapping;
89
90	mapping = container_of(kref, struct panfrost_gem_mapping, refcount);
91
92	panfrost_gem_teardown_mapping(mapping);
93	drm_gem_object_put(&mapping->obj->base.base);
94	panfrost_mmu_ctx_put(mapping->mmu);
95	kfree(mapping);
96}
97
98void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping)
99{
100	if (!mapping)
101		return;
102
103	kref_put(&mapping->refcount, panfrost_gem_mapping_release);
104}
105
106void panfrost_gem_teardown_mappings_locked(struct panfrost_gem_object *bo)
107{
108	struct panfrost_gem_mapping *mapping;
109
110	list_for_each_entry(mapping, &bo->mappings.list, node)
111		panfrost_gem_teardown_mapping(mapping);
112}
113
114int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)
115{
116	int ret;
117	size_t size = obj->size;
118	u64 align;
119	struct panfrost_gem_object *bo = to_panfrost_bo(obj);
120	unsigned long color = bo->noexec ? PANFROST_BO_NOEXEC : 0;
121	struct panfrost_file_priv *priv = file_priv->driver_priv;
122	struct panfrost_gem_mapping *mapping;
123
124	mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
125	if (!mapping)
126		return -ENOMEM;
127
128	INIT_LIST_HEAD(&mapping->node);
129	kref_init(&mapping->refcount);
130	drm_gem_object_get(obj);
131	mapping->obj = bo;
132
133	/*
134	 * Executable buffers cannot cross a 16MB boundary as the program
135	 * counter is 24-bits. We assume executable buffers will be less than
136	 * 16MB and aligning executable buffers to their size will avoid
137	 * crossing a 16MB boundary.
138	 */
139	if (!bo->noexec)
140		align = size >> PAGE_SHIFT;
141	else
142		align = size >= SZ_2M ? SZ_2M >> PAGE_SHIFT : 0;
143
144	mapping->mmu = panfrost_mmu_ctx_get(priv->mmu);
145	spin_lock(&mapping->mmu->mm_lock);
146	ret = drm_mm_insert_node_generic(&mapping->mmu->mm, &mapping->mmnode,
147					 size >> PAGE_SHIFT, align, color, 0);
148	spin_unlock(&mapping->mmu->mm_lock);
149	if (ret)
150		goto err;
151
152	if (!bo->is_heap) {
153		ret = panfrost_mmu_map(mapping);
154		if (ret)
155			goto err;
156	}
157
158	mutex_lock(&bo->mappings.lock);
159	WARN_ON(bo->base.madv != PANFROST_MADV_WILLNEED);
160	list_add_tail(&mapping->node, &bo->mappings.list);
161	mutex_unlock(&bo->mappings.lock);
162
163err:
164	if (ret)
165		panfrost_gem_mapping_put(mapping);
166	return ret;
167}
168
169void panfrost_gem_close(struct drm_gem_object *obj, struct drm_file *file_priv)
170{
171	struct panfrost_file_priv *priv = file_priv->driver_priv;
172	struct panfrost_gem_object *bo = to_panfrost_bo(obj);
173	struct panfrost_gem_mapping *mapping = NULL, *iter;
174
175	mutex_lock(&bo->mappings.lock);
176	list_for_each_entry(iter, &bo->mappings.list, node) {
177		if (iter->mmu == priv->mmu) {
178			mapping = iter;
179			list_del(&iter->node);
180			break;
181		}
182	}
183	mutex_unlock(&bo->mappings.lock);
184
185	panfrost_gem_mapping_put(mapping);
186}
187
188static int panfrost_gem_pin(struct drm_gem_object *obj)
189{
190	if (to_panfrost_bo(obj)->is_heap)
191		return -EINVAL;
192
193	return drm_gem_shmem_pin(obj);
194}
195
196static const struct drm_gem_object_funcs panfrost_gem_funcs = {
197	.free = panfrost_gem_free_object,
198	.open = panfrost_gem_open,
199	.close = panfrost_gem_close,
200	.print_info = drm_gem_shmem_print_info,
201	.pin = panfrost_gem_pin,
202	.unpin = drm_gem_shmem_unpin,
203	.get_sg_table = drm_gem_shmem_get_sg_table,
204	.vmap = drm_gem_shmem_vmap,
205	.vunmap = drm_gem_shmem_vunmap,
206	.mmap = drm_gem_shmem_mmap,
207};
208
209/**
210 * panfrost_gem_create_object - Implementation of driver->gem_create_object.
211 * @dev: DRM device
212 * @size: Size in bytes of the memory the object will reference
213 *
214 * This lets the GEM helpers allocate object structs for us, and keep
215 * our BO stats correct.
216 */
217struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size)
218{
219	struct panfrost_device *pfdev = dev->dev_private;
220	struct panfrost_gem_object *obj;
221
222	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
223	if (!obj)
224		return NULL;
225
226	INIT_LIST_HEAD(&obj->mappings.list);
227	mutex_init(&obj->mappings.lock);
228	obj->base.base.funcs = &panfrost_gem_funcs;
229	obj->base.map_cached = pfdev->coherent;
230
231	return &obj->base.base;
232}
233
234struct panfrost_gem_object *
235panfrost_gem_create(struct drm_device *dev, size_t size, u32 flags)
236{
237	struct drm_gem_shmem_object *shmem;
238	struct panfrost_gem_object *bo;
239
240	/* Round up heap allocations to 2MB to keep fault handling simple */
241	if (flags & PANFROST_BO_HEAP)
242		size = roundup(size, SZ_2M);
243
244	shmem = drm_gem_shmem_create(dev, size);
245	if (IS_ERR(shmem))
246		return ERR_CAST(shmem);
247
248	bo = to_panfrost_bo(&shmem->base);
249	bo->noexec = !!(flags & PANFROST_BO_NOEXEC);
250	bo->is_heap = !!(flags & PANFROST_BO_HEAP);
251
252	return bo;
253}
254
255struct drm_gem_object *
256panfrost_gem_prime_import_sg_table(struct drm_device *dev,
257				   struct dma_buf_attachment *attach,
258				   struct sg_table *sgt)
259{
260	struct drm_gem_object *obj;
261	struct panfrost_gem_object *bo;
262
263	obj = drm_gem_shmem_prime_import_sg_table(dev, attach, sgt);
264	if (IS_ERR(obj))
265		return ERR_CAST(obj);
266
267	bo = to_panfrost_bo(obj);
268	bo->noexec = true;
269
270	return obj;
271}
272