1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2015-2018 Etnaviv Project
4 */
5
6#include <linux/dma-mapping.h>
7#include <linux/scatterlist.h>
8
9#include "common.xml.h"
10#include "etnaviv_cmdbuf.h"
11#include "etnaviv_drv.h"
12#include "etnaviv_gem.h"
13#include "etnaviv_gpu.h"
14#include "etnaviv_mmu.h"
15
16static void etnaviv_context_unmap(struct etnaviv_iommu_context *context,
17				 unsigned long iova, size_t size)
18{
19	size_t unmapped_page, unmapped = 0;
20	size_t pgsize = SZ_4K;
21
22	if (!IS_ALIGNED(iova | size, pgsize)) {
23		pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%zx\n",
24		       iova, size, pgsize);
25		return;
26	}
27
28	while (unmapped < size) {
29		unmapped_page = context->global->ops->unmap(context, iova,
30							    pgsize);
31		if (!unmapped_page)
32			break;
33
34		iova += unmapped_page;
35		unmapped += unmapped_page;
36	}
37}
38
39static int etnaviv_context_map(struct etnaviv_iommu_context *context,
40			      unsigned long iova, phys_addr_t paddr,
41			      size_t size, int prot)
42{
43	unsigned long orig_iova = iova;
44	size_t pgsize = SZ_4K;
45	size_t orig_size = size;
46	int ret = 0;
47
48	if (!IS_ALIGNED(iova | paddr | size, pgsize)) {
49		pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%zx\n",
50		       iova, &paddr, size, pgsize);
51		return -EINVAL;
52	}
53
54	while (size) {
55		ret = context->global->ops->map(context, iova, paddr, pgsize,
56						prot);
57		if (ret)
58			break;
59
60		iova += pgsize;
61		paddr += pgsize;
62		size -= pgsize;
63	}
64
65	/* unroll mapping in case something went wrong */
66	if (ret)
67		etnaviv_context_unmap(context, orig_iova, orig_size - size);
68
69	return ret;
70}
71
72static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova,
73			     struct sg_table *sgt, unsigned len, int prot)
74{	struct scatterlist *sg;
75	unsigned int da = iova;
76	unsigned int i;
77	int ret;
78
79	if (!context || !sgt)
80		return -EINVAL;
81
82	for_each_sgtable_dma_sg(sgt, sg, i) {
83		phys_addr_t pa = sg_dma_address(sg) - sg->offset;
84		size_t bytes = sg_dma_len(sg) + sg->offset;
85
86		VERB("map[%d]: %08x %pap(%zx)", i, iova, &pa, bytes);
87
88		ret = etnaviv_context_map(context, da, pa, bytes, prot);
89		if (ret)
90			goto fail;
91
92		da += bytes;
93	}
94
95	return 0;
96
97fail:
98	etnaviv_context_unmap(context, iova, da - iova);
99	return ret;
100}
101
102static void etnaviv_iommu_unmap(struct etnaviv_iommu_context *context, u32 iova,
103				struct sg_table *sgt, unsigned len)
104{
105	struct scatterlist *sg;
106	unsigned int da = iova;
107	int i;
108
109	for_each_sgtable_dma_sg(sgt, sg, i) {
110		size_t bytes = sg_dma_len(sg) + sg->offset;
111
112		etnaviv_context_unmap(context, da, bytes);
113
114		VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
115
116		BUG_ON(!PAGE_ALIGNED(bytes));
117
118		da += bytes;
119	}
120}
121
122static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu_context *context,
123	struct etnaviv_vram_mapping *mapping)
124{
125	struct etnaviv_gem_object *etnaviv_obj = mapping->object;
126
127	etnaviv_iommu_unmap(context, mapping->vram_node.start,
128			    etnaviv_obj->sgt, etnaviv_obj->base.size);
129	drm_mm_remove_node(&mapping->vram_node);
130}
131
132static int etnaviv_iommu_find_iova(struct etnaviv_iommu_context *context,
133				   struct drm_mm_node *node, size_t size)
134{
135	struct etnaviv_vram_mapping *free = NULL;
136	enum drm_mm_insert_mode mode = DRM_MM_INSERT_LOW;
137	int ret;
138
139	lockdep_assert_held(&context->lock);
140
141	while (1) {
142		struct etnaviv_vram_mapping *m, *n;
143		struct drm_mm_scan scan;
144		struct list_head list;
145		bool found;
146
147		ret = drm_mm_insert_node_in_range(&context->mm, node,
148						  size, 0, 0, 0, U64_MAX, mode);
149		if (ret != -ENOSPC)
150			break;
151
152		/* Try to retire some entries */
153		drm_mm_scan_init(&scan, &context->mm, size, 0, 0, mode);
154
155		found = 0;
156		INIT_LIST_HEAD(&list);
157		list_for_each_entry(free, &context->mappings, mmu_node) {
158			/* If this vram node has not been used, skip this. */
159			if (!free->vram_node.mm)
160				continue;
161
162			/*
163			 * If the iova is pinned, then it's in-use,
164			 * so we must keep its mapping.
165			 */
166			if (free->use)
167				continue;
168
169			list_add(&free->scan_node, &list);
170			if (drm_mm_scan_add_block(&scan, &free->vram_node)) {
171				found = true;
172				break;
173			}
174		}
175
176		if (!found) {
177			/* Nothing found, clean up and fail */
178			list_for_each_entry_safe(m, n, &list, scan_node)
179				BUG_ON(drm_mm_scan_remove_block(&scan, &m->vram_node));
180			break;
181		}
182
183		/*
184		 * drm_mm does not allow any other operations while
185		 * scanning, so we have to remove all blocks first.
186		 * If drm_mm_scan_remove_block() returns false, we
187		 * can leave the block pinned.
188		 */
189		list_for_each_entry_safe(m, n, &list, scan_node)
190			if (!drm_mm_scan_remove_block(&scan, &m->vram_node))
191				list_del_init(&m->scan_node);
192
193		/*
194		 * Unmap the blocks which need to be reaped from the MMU.
195		 * Clear the mmu pointer to prevent the mapping_get finding
196		 * this mapping.
197		 */
198		list_for_each_entry_safe(m, n, &list, scan_node) {
199			etnaviv_iommu_remove_mapping(context, m);
200			etnaviv_iommu_context_put(m->context);
201			m->context = NULL;
202			list_del_init(&m->mmu_node);
203			list_del_init(&m->scan_node);
204		}
205
206		mode = DRM_MM_INSERT_EVICT;
207
208		/*
209		 * We removed enough mappings so that the new allocation will
210		 * succeed, retry the allocation one more time.
211		 */
212	}
213
214	return ret;
215}
216
217static int etnaviv_iommu_insert_exact(struct etnaviv_iommu_context *context,
218		   struct drm_mm_node *node, size_t size, u64 va)
219{
220	return drm_mm_insert_node_in_range(&context->mm, node, size, 0, 0, va,
221					   va + size, DRM_MM_INSERT_LOWEST);
222}
223
224int etnaviv_iommu_map_gem(struct etnaviv_iommu_context *context,
225	struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
226	struct etnaviv_vram_mapping *mapping, u64 va)
227{
228	struct sg_table *sgt = etnaviv_obj->sgt;
229	struct drm_mm_node *node;
230	int ret;
231
232	lockdep_assert_held(&etnaviv_obj->lock);
233
234	mutex_lock(&context->lock);
235
236	/* v1 MMU can optimize single entry (contiguous) scatterlists */
237	if (context->global->version == ETNAVIV_IOMMU_V1 &&
238	    sgt->nents == 1 && !(etnaviv_obj->flags & ETNA_BO_FORCE_MMU)) {
239		u32 iova;
240
241		iova = sg_dma_address(sgt->sgl) - memory_base;
242		if (iova < 0x80000000 - sg_dma_len(sgt->sgl)) {
243			mapping->iova = iova;
244			list_add_tail(&mapping->mmu_node, &context->mappings);
245			ret = 0;
246			goto unlock;
247		}
248	}
249
250	node = &mapping->vram_node;
251
252	if (va)
253		ret = etnaviv_iommu_insert_exact(context, node,
254						 etnaviv_obj->base.size, va);
255	else
256		ret = etnaviv_iommu_find_iova(context, node,
257					      etnaviv_obj->base.size);
258	if (ret < 0)
259		goto unlock;
260
261	mapping->iova = node->start;
262	ret = etnaviv_iommu_map(context, node->start, sgt, etnaviv_obj->base.size,
263				ETNAVIV_PROT_READ | ETNAVIV_PROT_WRITE);
264
265	if (ret < 0) {
266		drm_mm_remove_node(node);
267		goto unlock;
268	}
269
270	list_add_tail(&mapping->mmu_node, &context->mappings);
271	context->flush_seq++;
272unlock:
273	mutex_unlock(&context->lock);
274
275	return ret;
276}
277
278void etnaviv_iommu_unmap_gem(struct etnaviv_iommu_context *context,
279	struct etnaviv_vram_mapping *mapping)
280{
281	WARN_ON(mapping->use);
282
283	mutex_lock(&context->lock);
284
285	/* Bail if the mapping has been reaped by another thread */
286	if (!mapping->context) {
287		mutex_unlock(&context->lock);
288		return;
289	}
290
291	/* If the vram node is on the mm, unmap and remove the node */
292	if (mapping->vram_node.mm == &context->mm)
293		etnaviv_iommu_remove_mapping(context, mapping);
294
295	list_del(&mapping->mmu_node);
296	context->flush_seq++;
297	mutex_unlock(&context->lock);
298}
299
300static void etnaviv_iommu_context_free(struct kref *kref)
301{
302	struct etnaviv_iommu_context *context =
303		container_of(kref, struct etnaviv_iommu_context, refcount);
304
305	etnaviv_cmdbuf_suballoc_unmap(context, &context->cmdbuf_mapping);
306
307	context->global->ops->free(context);
308}
309void etnaviv_iommu_context_put(struct etnaviv_iommu_context *context)
310{
311	kref_put(&context->refcount, etnaviv_iommu_context_free);
312}
313
314struct etnaviv_iommu_context *
315etnaviv_iommu_context_init(struct etnaviv_iommu_global *global,
316			   struct etnaviv_cmdbuf_suballoc *suballoc)
317{
318	struct etnaviv_iommu_context *ctx;
319	int ret;
320
321	if (global->version == ETNAVIV_IOMMU_V1)
322		ctx = etnaviv_iommuv1_context_alloc(global);
323	else
324		ctx = etnaviv_iommuv2_context_alloc(global);
325
326	if (!ctx)
327		return NULL;
328
329	ret = etnaviv_cmdbuf_suballoc_map(suballoc, ctx, &ctx->cmdbuf_mapping,
330					  global->memory_base);
331	if (ret)
332		goto out_free;
333
334	if (global->version == ETNAVIV_IOMMU_V1 &&
335	    ctx->cmdbuf_mapping.iova > 0x80000000) {
336		dev_err(global->dev,
337		        "command buffer outside valid memory window\n");
338		goto out_unmap;
339	}
340
341	return ctx;
342
343out_unmap:
344	etnaviv_cmdbuf_suballoc_unmap(ctx, &ctx->cmdbuf_mapping);
345out_free:
346	global->ops->free(ctx);
347	return NULL;
348}
349
350void etnaviv_iommu_restore(struct etnaviv_gpu *gpu,
351			   struct etnaviv_iommu_context *context)
352{
353	context->global->ops->restore(gpu, context);
354}
355
356int etnaviv_iommu_get_suballoc_va(struct etnaviv_iommu_context *context,
357				  struct etnaviv_vram_mapping *mapping,
358				  u32 memory_base, dma_addr_t paddr,
359				  size_t size)
360{
361	mutex_lock(&context->lock);
362
363	if (mapping->use > 0) {
364		mapping->use++;
365		mutex_unlock(&context->lock);
366		return 0;
367	}
368
369	/*
370	 * For MMUv1 we don't add the suballoc region to the pagetables, as
371	 * those GPUs can only work with cmdbufs accessed through the linear
372	 * window. Instead we manufacture a mapping to make it look uniform
373	 * to the upper layers.
374	 */
375	if (context->global->version == ETNAVIV_IOMMU_V1) {
376		mapping->iova = paddr - memory_base;
377	} else {
378		struct drm_mm_node *node = &mapping->vram_node;
379		int ret;
380
381		ret = etnaviv_iommu_find_iova(context, node, size);
382		if (ret < 0) {
383			mutex_unlock(&context->lock);
384			return ret;
385		}
386
387		mapping->iova = node->start;
388		ret = etnaviv_context_map(context, node->start, paddr, size,
389					  ETNAVIV_PROT_READ);
390		if (ret < 0) {
391			drm_mm_remove_node(node);
392			mutex_unlock(&context->lock);
393			return ret;
394		}
395
396		context->flush_seq++;
397	}
398
399	list_add_tail(&mapping->mmu_node, &context->mappings);
400	mapping->use = 1;
401
402	mutex_unlock(&context->lock);
403
404	return 0;
405}
406
407void etnaviv_iommu_put_suballoc_va(struct etnaviv_iommu_context *context,
408		  struct etnaviv_vram_mapping *mapping)
409{
410	struct drm_mm_node *node = &mapping->vram_node;
411
412	mutex_lock(&context->lock);
413	mapping->use--;
414
415	if (mapping->use > 0 || context->global->version == ETNAVIV_IOMMU_V1) {
416		mutex_unlock(&context->lock);
417		return;
418	}
419
420	etnaviv_context_unmap(context, node->start, node->size);
421	drm_mm_remove_node(node);
422	mutex_unlock(&context->lock);
423}
424
425size_t etnaviv_iommu_dump_size(struct etnaviv_iommu_context *context)
426{
427	return context->global->ops->dump_size(context);
428}
429
430void etnaviv_iommu_dump(struct etnaviv_iommu_context *context, void *buf)
431{
432	context->global->ops->dump(context, buf);
433}
434
435int etnaviv_iommu_global_init(struct etnaviv_gpu *gpu)
436{
437	enum etnaviv_iommu_version version = ETNAVIV_IOMMU_V1;
438	struct etnaviv_drm_private *priv = gpu->drm->dev_private;
439	struct etnaviv_iommu_global *global;
440	struct device *dev = gpu->drm->dev;
441
442	if (gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION)
443		version = ETNAVIV_IOMMU_V2;
444
445	if (priv->mmu_global) {
446		if (priv->mmu_global->version != version) {
447			dev_err(gpu->dev,
448				"MMU version doesn't match global version\n");
449			return -ENXIO;
450		}
451
452		priv->mmu_global->use++;
453		return 0;
454	}
455
456	global = kzalloc(sizeof(*global), GFP_KERNEL);
457	if (!global)
458		return -ENOMEM;
459
460	global->bad_page_cpu = dma_alloc_wc(dev, SZ_4K, &global->bad_page_dma,
461					    GFP_KERNEL);
462	if (!global->bad_page_cpu)
463		goto free_global;
464
465	memset32(global->bad_page_cpu, 0xdead55aa, SZ_4K / sizeof(u32));
466
467	if (version == ETNAVIV_IOMMU_V2) {
468		global->v2.pta_cpu = dma_alloc_wc(dev, ETNAVIV_PTA_SIZE,
469					       &global->v2.pta_dma, GFP_KERNEL);
470		if (!global->v2.pta_cpu)
471			goto free_bad_page;
472	}
473
474	global->dev = dev;
475	global->version = version;
476	global->use = 1;
477	mutex_init(&global->lock);
478
479	if (version == ETNAVIV_IOMMU_V1)
480		global->ops = &etnaviv_iommuv1_ops;
481	else
482		global->ops = &etnaviv_iommuv2_ops;
483
484	priv->mmu_global = global;
485
486	return 0;
487
488free_bad_page:
489	dma_free_wc(dev, SZ_4K, global->bad_page_cpu, global->bad_page_dma);
490free_global:
491	kfree(global);
492
493	return -ENOMEM;
494}
495
496void etnaviv_iommu_global_fini(struct etnaviv_gpu *gpu)
497{
498	struct etnaviv_drm_private *priv = gpu->drm->dev_private;
499	struct etnaviv_iommu_global *global = priv->mmu_global;
500
501	if (--global->use > 0)
502		return;
503
504	if (global->v2.pta_cpu)
505		dma_free_wc(global->dev, ETNAVIV_PTA_SIZE,
506			    global->v2.pta_cpu, global->v2.pta_dma);
507
508	if (global->bad_page_cpu)
509		dma_free_wc(global->dev, SZ_4K,
510			    global->bad_page_cpu, global->bad_page_dma);
511
512	mutex_destroy(&global->lock);
513	kfree(global);
514
515	priv->mmu_global = NULL;
516}
517