1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2015-2018 Etnaviv Project
4 */
5
6#include <linux/component.h>
7#include <linux/dma-mapping.h>
8#include <linux/module.h>
9#include <linux/of.h>
10#include <linux/of_device.h>
11#include <linux/platform_device.h>
12#include <linux/uaccess.h>
13
14#include <drm/drm_debugfs.h>
15#include <drm/drm_drv.h>
16#include <drm/drm_file.h>
17#include <drm/drm_ioctl.h>
18#include <drm/drm_of.h>
19#include <drm/drm_prime.h>
20
21#include "etnaviv_cmdbuf.h"
22#include "etnaviv_drv.h"
23#include "etnaviv_gpu.h"
24#include "etnaviv_gem.h"
25#include "etnaviv_mmu.h"
26#include "etnaviv_perfmon.h"
27
28/*
29 * DRM operations:
30 */
31
32
33static void load_gpu(struct drm_device *dev)
34{
35	struct etnaviv_drm_private *priv = dev->dev_private;
36	unsigned int i;
37
38	for (i = 0; i < ETNA_MAX_PIPES; i++) {
39		struct etnaviv_gpu *g = priv->gpu[i];
40
41		if (g) {
42			int ret;
43
44			ret = etnaviv_gpu_init(g);
45			if (ret)
46				priv->gpu[i] = NULL;
47		}
48	}
49}
50
51static int etnaviv_open(struct drm_device *dev, struct drm_file *file)
52{
53	struct etnaviv_drm_private *priv = dev->dev_private;
54	struct etnaviv_file_private *ctx;
55	int ret, i;
56
57	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
58	if (!ctx)
59		return -ENOMEM;
60
61	ret = xa_alloc_cyclic(&priv->active_contexts, &ctx->id, ctx,
62			      xa_limit_32b, &priv->next_context_id, GFP_KERNEL);
63	if (ret < 0)
64		goto out_free;
65
66	ctx->mmu = etnaviv_iommu_context_init(priv->mmu_global,
67					      priv->cmdbuf_suballoc);
68	if (!ctx->mmu) {
69		ret = -ENOMEM;
70		goto out_free;
71	}
72
73	for (i = 0; i < ETNA_MAX_PIPES; i++) {
74		struct etnaviv_gpu *gpu = priv->gpu[i];
75		struct drm_gpu_scheduler *sched;
76
77		if (gpu) {
78			sched = &gpu->sched;
79			drm_sched_entity_init(&ctx->sched_entity[i],
80					      DRM_SCHED_PRIORITY_NORMAL, &sched,
81					      1, NULL);
82			}
83	}
84
85	file->driver_priv = ctx;
86
87	return 0;
88
89out_free:
90	kfree(ctx);
91	return ret;
92}
93
94static void etnaviv_postclose(struct drm_device *dev, struct drm_file *file)
95{
96	struct etnaviv_drm_private *priv = dev->dev_private;
97	struct etnaviv_file_private *ctx = file->driver_priv;
98	unsigned int i;
99
100	for (i = 0; i < ETNA_MAX_PIPES; i++) {
101		struct etnaviv_gpu *gpu = priv->gpu[i];
102
103		if (gpu)
104			drm_sched_entity_destroy(&ctx->sched_entity[i]);
105	}
106
107	etnaviv_iommu_context_put(ctx->mmu);
108
109	xa_erase(&priv->active_contexts, ctx->id);
110
111	kfree(ctx);
112}
113
114/*
115 * DRM debugfs:
116 */
117
118#ifdef CONFIG_DEBUG_FS
119static int etnaviv_gem_show(struct drm_device *dev, struct seq_file *m)
120{
121	struct etnaviv_drm_private *priv = dev->dev_private;
122
123	etnaviv_gem_describe_objects(priv, m);
124
125	return 0;
126}
127
128static int etnaviv_mm_show(struct drm_device *dev, struct seq_file *m)
129{
130	struct drm_printer p = drm_seq_file_printer(m);
131
132	read_lock(&dev->vma_offset_manager->vm_lock);
133	drm_mm_print(&dev->vma_offset_manager->vm_addr_space_mm, &p);
134	read_unlock(&dev->vma_offset_manager->vm_lock);
135
136	return 0;
137}
138
139static int etnaviv_mmu_show(struct etnaviv_gpu *gpu, struct seq_file *m)
140{
141	struct drm_printer p = drm_seq_file_printer(m);
142	struct etnaviv_iommu_context *mmu_context;
143
144	seq_printf(m, "Active Objects (%s):\n", dev_name(gpu->dev));
145
146	/*
147	 * Lock the GPU to avoid a MMU context switch just now and elevate
148	 * the refcount of the current context to avoid it disappearing from
149	 * under our feet.
150	 */
151	mutex_lock(&gpu->lock);
152	mmu_context = gpu->mmu_context;
153	if (mmu_context)
154		etnaviv_iommu_context_get(mmu_context);
155	mutex_unlock(&gpu->lock);
156
157	if (!mmu_context)
158		return 0;
159
160	mutex_lock(&mmu_context->lock);
161	drm_mm_print(&mmu_context->mm, &p);
162	mutex_unlock(&mmu_context->lock);
163
164	etnaviv_iommu_context_put(mmu_context);
165
166	return 0;
167}
168
169static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu, struct seq_file *m)
170{
171	struct etnaviv_cmdbuf *buf = &gpu->buffer;
172	u32 size = buf->size;
173	u32 *ptr = buf->vaddr;
174	u32 i;
175
176	seq_printf(m, "virt %p - phys 0x%llx - free 0x%08x\n",
177			buf->vaddr, (u64)etnaviv_cmdbuf_get_pa(buf),
178			size - buf->user_size);
179
180	for (i = 0; i < size / 4; i++) {
181		if (i && !(i % 4))
182			seq_puts(m, "\n");
183		if (i % 4 == 0)
184			seq_printf(m, "\t0x%p: ", ptr + i);
185		seq_printf(m, "%08x ", *(ptr + i));
186	}
187	seq_puts(m, "\n");
188}
189
190static int etnaviv_ring_show(struct etnaviv_gpu *gpu, struct seq_file *m)
191{
192	seq_printf(m, "Ring Buffer (%s): ", dev_name(gpu->dev));
193
194	mutex_lock(&gpu->lock);
195	etnaviv_buffer_dump(gpu, m);
196	mutex_unlock(&gpu->lock);
197
198	return 0;
199}
200
201static int show_unlocked(struct seq_file *m, void *arg)
202{
203	struct drm_info_node *node = (struct drm_info_node *) m->private;
204	struct drm_device *dev = node->minor->dev;
205	int (*show)(struct drm_device *dev, struct seq_file *m) =
206			node->info_ent->data;
207
208	return show(dev, m);
209}
210
211static int show_each_gpu(struct seq_file *m, void *arg)
212{
213	struct drm_info_node *node = (struct drm_info_node *) m->private;
214	struct drm_device *dev = node->minor->dev;
215	struct etnaviv_drm_private *priv = dev->dev_private;
216	struct etnaviv_gpu *gpu;
217	int (*show)(struct etnaviv_gpu *gpu, struct seq_file *m) =
218			node->info_ent->data;
219	unsigned int i;
220	int ret = 0;
221
222	for (i = 0; i < ETNA_MAX_PIPES; i++) {
223		gpu = priv->gpu[i];
224		if (!gpu)
225			continue;
226
227		ret = show(gpu, m);
228		if (ret < 0)
229			break;
230	}
231
232	return ret;
233}
234
235static struct drm_info_list etnaviv_debugfs_list[] = {
236		{"gpu", show_each_gpu, 0, etnaviv_gpu_debugfs},
237		{"gem", show_unlocked, 0, etnaviv_gem_show},
238		{ "mm", show_unlocked, 0, etnaviv_mm_show },
239		{"mmu", show_each_gpu, 0, etnaviv_mmu_show},
240		{"ring", show_each_gpu, 0, etnaviv_ring_show},
241};
242
243static void etnaviv_debugfs_init(struct drm_minor *minor)
244{
245	drm_debugfs_create_files(etnaviv_debugfs_list,
246				 ARRAY_SIZE(etnaviv_debugfs_list),
247				 minor->debugfs_root, minor);
248}
249#endif
250
251/*
252 * DRM ioctls:
253 */
254
255static int etnaviv_ioctl_get_param(struct drm_device *dev, void *data,
256		struct drm_file *file)
257{
258	struct etnaviv_drm_private *priv = dev->dev_private;
259	struct drm_etnaviv_param *args = data;
260	struct etnaviv_gpu *gpu;
261
262	if (args->pipe >= ETNA_MAX_PIPES)
263		return -EINVAL;
264
265	gpu = priv->gpu[args->pipe];
266	if (!gpu)
267		return -ENXIO;
268
269	return etnaviv_gpu_get_param(gpu, args->param, &args->value);
270}
271
272static int etnaviv_ioctl_gem_new(struct drm_device *dev, void *data,
273		struct drm_file *file)
274{
275	struct drm_etnaviv_gem_new *args = data;
276
277	if (args->flags & ~(ETNA_BO_CACHED | ETNA_BO_WC | ETNA_BO_UNCACHED |
278			    ETNA_BO_FORCE_MMU))
279		return -EINVAL;
280
281	return etnaviv_gem_new_handle(dev, file, args->size,
282			args->flags, &args->handle);
283}
284
285static int etnaviv_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
286		struct drm_file *file)
287{
288	struct drm_etnaviv_gem_cpu_prep *args = data;
289	struct drm_gem_object *obj;
290	int ret;
291
292	if (args->op & ~(ETNA_PREP_READ | ETNA_PREP_WRITE | ETNA_PREP_NOSYNC))
293		return -EINVAL;
294
295	obj = drm_gem_object_lookup(file, args->handle);
296	if (!obj)
297		return -ENOENT;
298
299	ret = etnaviv_gem_cpu_prep(obj, args->op, &args->timeout);
300
301	drm_gem_object_put(obj);
302
303	return ret;
304}
305
306static int etnaviv_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
307		struct drm_file *file)
308{
309	struct drm_etnaviv_gem_cpu_fini *args = data;
310	struct drm_gem_object *obj;
311	int ret;
312
313	if (args->flags)
314		return -EINVAL;
315
316	obj = drm_gem_object_lookup(file, args->handle);
317	if (!obj)
318		return -ENOENT;
319
320	ret = etnaviv_gem_cpu_fini(obj);
321
322	drm_gem_object_put(obj);
323
324	return ret;
325}
326
327static int etnaviv_ioctl_gem_info(struct drm_device *dev, void *data,
328		struct drm_file *file)
329{
330	struct drm_etnaviv_gem_info *args = data;
331	struct drm_gem_object *obj;
332	int ret;
333
334	if (args->pad)
335		return -EINVAL;
336
337	obj = drm_gem_object_lookup(file, args->handle);
338	if (!obj)
339		return -ENOENT;
340
341	ret = etnaviv_gem_mmap_offset(obj, &args->offset);
342	drm_gem_object_put(obj);
343
344	return ret;
345}
346
347static int etnaviv_ioctl_wait_fence(struct drm_device *dev, void *data,
348		struct drm_file *file)
349{
350	struct drm_etnaviv_wait_fence *args = data;
351	struct etnaviv_drm_private *priv = dev->dev_private;
352	struct drm_etnaviv_timespec *timeout = &args->timeout;
353	struct etnaviv_gpu *gpu;
354
355	if (args->flags & ~(ETNA_WAIT_NONBLOCK))
356		return -EINVAL;
357
358	if (args->pipe >= ETNA_MAX_PIPES)
359		return -EINVAL;
360
361	gpu = priv->gpu[args->pipe];
362	if (!gpu)
363		return -ENXIO;
364
365	if (args->flags & ETNA_WAIT_NONBLOCK)
366		timeout = NULL;
367
368	return etnaviv_gpu_wait_fence_interruptible(gpu, args->fence,
369						    timeout);
370}
371
372static int etnaviv_ioctl_gem_userptr(struct drm_device *dev, void *data,
373	struct drm_file *file)
374{
375	struct drm_etnaviv_gem_userptr *args = data;
376
377	if (args->flags & ~(ETNA_USERPTR_READ|ETNA_USERPTR_WRITE) ||
378	    args->flags == 0)
379		return -EINVAL;
380
381	if (offset_in_page(args->user_ptr | args->user_size) ||
382	    (uintptr_t)args->user_ptr != args->user_ptr ||
383	    (u32)args->user_size != args->user_size ||
384	    args->user_ptr & ~PAGE_MASK)
385		return -EINVAL;
386
387	if (!access_ok((void __user *)(unsigned long)args->user_ptr,
388		       args->user_size))
389		return -EFAULT;
390
391	return etnaviv_gem_new_userptr(dev, file, args->user_ptr,
392				       args->user_size, args->flags,
393				       &args->handle);
394}
395
396static int etnaviv_ioctl_gem_wait(struct drm_device *dev, void *data,
397	struct drm_file *file)
398{
399	struct etnaviv_drm_private *priv = dev->dev_private;
400	struct drm_etnaviv_gem_wait *args = data;
401	struct drm_etnaviv_timespec *timeout = &args->timeout;
402	struct drm_gem_object *obj;
403	struct etnaviv_gpu *gpu;
404	int ret;
405
406	if (args->flags & ~(ETNA_WAIT_NONBLOCK))
407		return -EINVAL;
408
409	if (args->pipe >= ETNA_MAX_PIPES)
410		return -EINVAL;
411
412	gpu = priv->gpu[args->pipe];
413	if (!gpu)
414		return -ENXIO;
415
416	obj = drm_gem_object_lookup(file, args->handle);
417	if (!obj)
418		return -ENOENT;
419
420	if (args->flags & ETNA_WAIT_NONBLOCK)
421		timeout = NULL;
422
423	ret = etnaviv_gem_wait_bo(gpu, obj, timeout);
424
425	drm_gem_object_put(obj);
426
427	return ret;
428}
429
430static int etnaviv_ioctl_pm_query_dom(struct drm_device *dev, void *data,
431	struct drm_file *file)
432{
433	struct etnaviv_drm_private *priv = dev->dev_private;
434	struct drm_etnaviv_pm_domain *args = data;
435	struct etnaviv_gpu *gpu;
436
437	if (args->pipe >= ETNA_MAX_PIPES)
438		return -EINVAL;
439
440	gpu = priv->gpu[args->pipe];
441	if (!gpu)
442		return -ENXIO;
443
444	return etnaviv_pm_query_dom(gpu, args);
445}
446
447static int etnaviv_ioctl_pm_query_sig(struct drm_device *dev, void *data,
448	struct drm_file *file)
449{
450	struct etnaviv_drm_private *priv = dev->dev_private;
451	struct drm_etnaviv_pm_signal *args = data;
452	struct etnaviv_gpu *gpu;
453
454	if (args->pipe >= ETNA_MAX_PIPES)
455		return -EINVAL;
456
457	gpu = priv->gpu[args->pipe];
458	if (!gpu)
459		return -ENXIO;
460
461	return etnaviv_pm_query_sig(gpu, args);
462}
463
464static const struct drm_ioctl_desc etnaviv_ioctls[] = {
465#define ETNA_IOCTL(n, func, flags) \
466	DRM_IOCTL_DEF_DRV(ETNAVIV_##n, etnaviv_ioctl_##func, flags)
467	ETNA_IOCTL(GET_PARAM,    get_param,    DRM_RENDER_ALLOW),
468	ETNA_IOCTL(GEM_NEW,      gem_new,      DRM_RENDER_ALLOW),
469	ETNA_IOCTL(GEM_INFO,     gem_info,     DRM_RENDER_ALLOW),
470	ETNA_IOCTL(GEM_CPU_PREP, gem_cpu_prep, DRM_RENDER_ALLOW),
471	ETNA_IOCTL(GEM_CPU_FINI, gem_cpu_fini, DRM_RENDER_ALLOW),
472	ETNA_IOCTL(GEM_SUBMIT,   gem_submit,   DRM_RENDER_ALLOW),
473	ETNA_IOCTL(WAIT_FENCE,   wait_fence,   DRM_RENDER_ALLOW),
474	ETNA_IOCTL(GEM_USERPTR,  gem_userptr,  DRM_RENDER_ALLOW),
475	ETNA_IOCTL(GEM_WAIT,     gem_wait,     DRM_RENDER_ALLOW),
476	ETNA_IOCTL(PM_QUERY_DOM, pm_query_dom, DRM_RENDER_ALLOW),
477	ETNA_IOCTL(PM_QUERY_SIG, pm_query_sig, DRM_RENDER_ALLOW),
478};
479
480DEFINE_DRM_GEM_FOPS(fops);
481
482static const struct drm_driver etnaviv_drm_driver = {
483	.driver_features    = DRIVER_GEM | DRIVER_RENDER,
484	.open               = etnaviv_open,
485	.postclose           = etnaviv_postclose,
486	.gem_prime_import_sg_table = etnaviv_gem_prime_import_sg_table,
487#ifdef CONFIG_DEBUG_FS
488	.debugfs_init       = etnaviv_debugfs_init,
489#endif
490	.ioctls             = etnaviv_ioctls,
491	.num_ioctls         = DRM_ETNAVIV_NUM_IOCTLS,
492	.fops               = &fops,
493	.name               = "etnaviv",
494	.desc               = "etnaviv DRM",
495	.date               = "20151214",
496	.major              = 1,
497	.minor              = 4,
498};
499
500/*
501 * Platform driver:
502 */
503static int etnaviv_bind(struct device *dev)
504{
505	struct etnaviv_drm_private *priv;
506	struct drm_device *drm;
507	int ret;
508
509	drm = drm_dev_alloc(&etnaviv_drm_driver, dev);
510	if (IS_ERR(drm))
511		return PTR_ERR(drm);
512
513	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
514	if (!priv) {
515		dev_err(dev, "failed to allocate private data\n");
516		ret = -ENOMEM;
517		goto out_put;
518	}
519	drm->dev_private = priv;
520
521	dma_set_max_seg_size(dev, SZ_2G);
522
523	xa_init_flags(&priv->active_contexts, XA_FLAGS_ALLOC);
524
525	mutex_init(&priv->gem_lock);
526	INIT_LIST_HEAD(&priv->gem_list);
527	priv->num_gpus = 0;
528	priv->shm_gfp_mask = GFP_HIGHUSER | __GFP_RETRY_MAYFAIL | __GFP_NOWARN;
529
530	priv->cmdbuf_suballoc = etnaviv_cmdbuf_suballoc_new(drm->dev);
531	if (IS_ERR(priv->cmdbuf_suballoc)) {
532		dev_err(drm->dev, "Failed to create cmdbuf suballocator\n");
533		ret = PTR_ERR(priv->cmdbuf_suballoc);
534		goto out_free_priv;
535	}
536
537	dev_set_drvdata(dev, drm);
538
539	ret = component_bind_all(dev, drm);
540	if (ret < 0)
541		goto out_destroy_suballoc;
542
543	load_gpu(drm);
544
545	ret = drm_dev_register(drm, 0);
546	if (ret)
547		goto out_unbind;
548
549	return 0;
550
551out_unbind:
552	component_unbind_all(dev, drm);
553out_destroy_suballoc:
554	etnaviv_cmdbuf_suballoc_destroy(priv->cmdbuf_suballoc);
555out_free_priv:
556	kfree(priv);
557out_put:
558	drm_dev_put(drm);
559
560	return ret;
561}
562
563static void etnaviv_unbind(struct device *dev)
564{
565	struct drm_device *drm = dev_get_drvdata(dev);
566	struct etnaviv_drm_private *priv = drm->dev_private;
567
568	drm_dev_unregister(drm);
569
570	component_unbind_all(dev, drm);
571
572	etnaviv_cmdbuf_suballoc_destroy(priv->cmdbuf_suballoc);
573
574	xa_destroy(&priv->active_contexts);
575
576	drm->dev_private = NULL;
577	kfree(priv);
578
579	drm_dev_put(drm);
580}
581
582static const struct component_master_ops etnaviv_master_ops = {
583	.bind = etnaviv_bind,
584	.unbind = etnaviv_unbind,
585};
586
587static int etnaviv_pdev_probe(struct platform_device *pdev)
588{
589	struct device *dev = &pdev->dev;
590	struct device_node *first_node = NULL;
591	struct component_match *match = NULL;
592
593	if (!dev->platform_data) {
594		struct device_node *core_node;
595
596		for_each_compatible_node(core_node, NULL, "vivante,gc") {
597			if (!of_device_is_available(core_node))
598				continue;
599
600			if (!first_node)
601				first_node = core_node;
602
603			drm_of_component_match_add(&pdev->dev, &match,
604						   component_compare_of, core_node);
605		}
606	} else {
607		char **names = dev->platform_data;
608		unsigned i;
609
610		for (i = 0; names[i]; i++)
611			component_match_add(dev, &match, component_compare_dev_name, names[i]);
612	}
613
614	/*
615	 * PTA and MTLB can have 40 bit base addresses, but
616	 * unfortunately, an entry in the MTLB can only point to a
617	 * 32 bit base address of a STLB. Moreover, to initialize the
618	 * MMU we need a command buffer with a 32 bit address because
619	 * without an MMU there is only an indentity mapping between
620	 * the internal 32 bit addresses and the bus addresses.
621	 *
622	 * To make things easy, we set the dma_coherent_mask to 32
623	 * bit to make sure we are allocating the command buffers and
624	 * TLBs in the lower 4 GiB address space.
625	 */
626	if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(40)) ||
627	    dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32))) {
628		dev_dbg(&pdev->dev, "No suitable DMA available\n");
629		return -ENODEV;
630	}
631
632	/*
633	 * Apply the same DMA configuration to the virtual etnaviv
634	 * device as the GPU we found. This assumes that all Vivante
635	 * GPUs in the system share the same DMA constraints.
636	 */
637	if (first_node)
638		of_dma_configure(&pdev->dev, first_node, true);
639
640	return component_master_add_with_match(dev, &etnaviv_master_ops, match);
641}
642
643static int etnaviv_pdev_remove(struct platform_device *pdev)
644{
645	component_master_del(&pdev->dev, &etnaviv_master_ops);
646
647	return 0;
648}
649
650static struct platform_driver etnaviv_platform_driver = {
651	.probe      = etnaviv_pdev_probe,
652	.remove     = etnaviv_pdev_remove,
653	.driver     = {
654		.name   = "etnaviv",
655	},
656};
657
658static struct platform_device *etnaviv_drm;
659
660static int __init etnaviv_init(void)
661{
662	struct platform_device *pdev;
663	int ret;
664	struct device_node *np;
665
666	etnaviv_validate_init();
667
668	ret = platform_driver_register(&etnaviv_gpu_driver);
669	if (ret != 0)
670		return ret;
671
672	ret = platform_driver_register(&etnaviv_platform_driver);
673	if (ret != 0)
674		goto unregister_gpu_driver;
675
676	/*
677	 * If the DT contains at least one available GPU device, instantiate
678	 * the DRM platform device.
679	 */
680	for_each_compatible_node(np, NULL, "vivante,gc") {
681		if (!of_device_is_available(np))
682			continue;
683
684		pdev = platform_device_alloc("etnaviv", PLATFORM_DEVID_NONE);
685		if (!pdev) {
686			ret = -ENOMEM;
687			of_node_put(np);
688			goto unregister_platform_driver;
689		}
690
691		ret = platform_device_add(pdev);
692		if (ret) {
693			platform_device_put(pdev);
694			of_node_put(np);
695			goto unregister_platform_driver;
696		}
697
698		etnaviv_drm = pdev;
699		of_node_put(np);
700		break;
701	}
702
703	return 0;
704
705unregister_platform_driver:
706	platform_driver_unregister(&etnaviv_platform_driver);
707unregister_gpu_driver:
708	platform_driver_unregister(&etnaviv_gpu_driver);
709	return ret;
710}
711module_init(etnaviv_init);
712
713static void __exit etnaviv_exit(void)
714{
715	platform_device_unregister(etnaviv_drm);
716	platform_driver_unregister(&etnaviv_platform_driver);
717	platform_driver_unregister(&etnaviv_gpu_driver);
718}
719module_exit(etnaviv_exit);
720
721MODULE_AUTHOR("Christian Gmeiner <christian.gmeiner@gmail.com>");
722MODULE_AUTHOR("Russell King <rmk+kernel@armlinux.org.uk>");
723MODULE_AUTHOR("Lucas Stach <l.stach@pengutronix.de>");
724MODULE_DESCRIPTION("etnaviv DRM Driver");
725MODULE_LICENSE("GPL v2");
726MODULE_ALIAS("platform:etnaviv");
727