1// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2/*
3 * Copyright 2013-2016 Freescale Semiconductor Inc.
4 * Copyright 2016-2017,2019-2020 NXP
5 */
6
7#include <linux/device.h>
8#include <linux/iommu.h>
9#include <linux/module.h>
10#include <linux/mutex.h>
11#include <linux/slab.h>
12#include <linux/types.h>
13#include <linux/vfio.h>
14#include <linux/fsl/mc.h>
15#include <linux/delay.h>
16#include <linux/io-64-nonatomic-hi-lo.h>
17
18#include "vfio_fsl_mc_private.h"
19
20static struct fsl_mc_driver vfio_fsl_mc_driver;
21
22static DEFINE_MUTEX(reflck_lock);
23
24static void vfio_fsl_mc_reflck_get(struct vfio_fsl_mc_reflck *reflck)
25{
26	kref_get(&reflck->kref);
27}
28
29static void vfio_fsl_mc_reflck_release(struct kref *kref)
30{
31	struct vfio_fsl_mc_reflck *reflck = container_of(kref,
32						      struct vfio_fsl_mc_reflck,
33						      kref);
34
35	mutex_destroy(&reflck->lock);
36	kfree(reflck);
37	mutex_unlock(&reflck_lock);
38}
39
40static void vfio_fsl_mc_reflck_put(struct vfio_fsl_mc_reflck *reflck)
41{
42	kref_put_mutex(&reflck->kref, vfio_fsl_mc_reflck_release, &reflck_lock);
43}
44
45static struct vfio_fsl_mc_reflck *vfio_fsl_mc_reflck_alloc(void)
46{
47	struct vfio_fsl_mc_reflck *reflck;
48
49	reflck = kzalloc(sizeof(*reflck), GFP_KERNEL);
50	if (!reflck)
51		return ERR_PTR(-ENOMEM);
52
53	kref_init(&reflck->kref);
54	mutex_init(&reflck->lock);
55
56	return reflck;
57}
58
59static int vfio_fsl_mc_reflck_attach(struct vfio_fsl_mc_device *vdev)
60{
61	int ret = 0;
62
63	mutex_lock(&reflck_lock);
64	if (is_fsl_mc_bus_dprc(vdev->mc_dev)) {
65		vdev->reflck = vfio_fsl_mc_reflck_alloc();
66		ret = PTR_ERR_OR_ZERO(vdev->reflck);
67	} else {
68		struct device *mc_cont_dev = vdev->mc_dev->dev.parent;
69		struct vfio_device *device;
70		struct vfio_fsl_mc_device *cont_vdev;
71
72		device = vfio_device_get_from_dev(mc_cont_dev);
73		if (!device) {
74			ret = -ENODEV;
75			goto unlock;
76		}
77
78		cont_vdev = vfio_device_data(device);
79		if (!cont_vdev || !cont_vdev->reflck) {
80			vfio_device_put(device);
81			ret = -ENODEV;
82			goto unlock;
83		}
84		vfio_fsl_mc_reflck_get(cont_vdev->reflck);
85		vdev->reflck = cont_vdev->reflck;
86		vfio_device_put(device);
87	}
88
89unlock:
90	mutex_unlock(&reflck_lock);
91	return ret;
92}
93
94static int vfio_fsl_mc_regions_init(struct vfio_fsl_mc_device *vdev)
95{
96	struct fsl_mc_device *mc_dev = vdev->mc_dev;
97	int count = mc_dev->obj_desc.region_count;
98	int i;
99
100	vdev->regions = kcalloc(count, sizeof(struct vfio_fsl_mc_region),
101				GFP_KERNEL);
102	if (!vdev->regions)
103		return -ENOMEM;
104
105	for (i = 0; i < count; i++) {
106		struct resource *res = &mc_dev->regions[i];
107		int no_mmap = is_fsl_mc_bus_dprc(mc_dev);
108
109		vdev->regions[i].addr = res->start;
110		vdev->regions[i].size = resource_size(res);
111		vdev->regions[i].type = mc_dev->regions[i].flags & IORESOURCE_BITS;
112		/*
113		 * Only regions addressed with PAGE granularity may be
114		 * MMAPed securely.
115		 */
116		if (!no_mmap && !(vdev->regions[i].addr & ~PAGE_MASK) &&
117				!(vdev->regions[i].size & ~PAGE_MASK))
118			vdev->regions[i].flags |=
119					VFIO_REGION_INFO_FLAG_MMAP;
120		vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_READ;
121		if (!(mc_dev->regions[i].flags & IORESOURCE_READONLY))
122			vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_WRITE;
123	}
124
125	return 0;
126}
127
128static void vfio_fsl_mc_regions_cleanup(struct vfio_fsl_mc_device *vdev)
129{
130	struct fsl_mc_device *mc_dev = vdev->mc_dev;
131	int i;
132
133	for (i = 0; i < mc_dev->obj_desc.region_count; i++)
134		iounmap(vdev->regions[i].ioaddr);
135	kfree(vdev->regions);
136}
137
138static int vfio_fsl_mc_open(void *device_data)
139{
140	struct vfio_fsl_mc_device *vdev = device_data;
141	int ret;
142
143	if (!try_module_get(THIS_MODULE))
144		return -ENODEV;
145
146	mutex_lock(&vdev->reflck->lock);
147	if (!vdev->refcnt) {
148		ret = vfio_fsl_mc_regions_init(vdev);
149		if (ret)
150			goto err_reg_init;
151	}
152	vdev->refcnt++;
153
154	mutex_unlock(&vdev->reflck->lock);
155
156	return 0;
157
158err_reg_init:
159	mutex_unlock(&vdev->reflck->lock);
160	module_put(THIS_MODULE);
161	return ret;
162}
163
164static void vfio_fsl_mc_release(void *device_data)
165{
166	struct vfio_fsl_mc_device *vdev = device_data;
167	int ret;
168
169	mutex_lock(&vdev->reflck->lock);
170
171	if (!(--vdev->refcnt)) {
172		struct fsl_mc_device *mc_dev = vdev->mc_dev;
173		struct device *cont_dev = fsl_mc_cont_dev(&mc_dev->dev);
174		struct fsl_mc_device *mc_cont = to_fsl_mc_device(cont_dev);
175
176		vfio_fsl_mc_regions_cleanup(vdev);
177
178		/* reset the device before cleaning up the interrupts */
179		ret = dprc_reset_container(mc_cont->mc_io, 0,
180		      mc_cont->mc_handle,
181			  mc_cont->obj_desc.id,
182			  DPRC_RESET_OPTION_NON_RECURSIVE);
183
184		if (ret) {
185			dev_warn(&mc_cont->dev, "VFIO_FLS_MC: reset device has failed (%d)\n",
186				 ret);
187			WARN_ON(1);
188		}
189
190		vfio_fsl_mc_irqs_cleanup(vdev);
191
192		fsl_mc_cleanup_irq_pool(mc_cont);
193	}
194
195	mutex_unlock(&vdev->reflck->lock);
196
197	module_put(THIS_MODULE);
198}
199
200static long vfio_fsl_mc_ioctl(void *device_data, unsigned int cmd,
201			      unsigned long arg)
202{
203	unsigned long minsz;
204	struct vfio_fsl_mc_device *vdev = device_data;
205	struct fsl_mc_device *mc_dev = vdev->mc_dev;
206
207	switch (cmd) {
208	case VFIO_DEVICE_GET_INFO:
209	{
210		struct vfio_device_info info;
211
212		minsz = offsetofend(struct vfio_device_info, num_irqs);
213
214		if (copy_from_user(&info, (void __user *)arg, minsz))
215			return -EFAULT;
216
217		if (info.argsz < minsz)
218			return -EINVAL;
219
220		info.flags = VFIO_DEVICE_FLAGS_FSL_MC;
221
222		if (is_fsl_mc_bus_dprc(mc_dev))
223			info.flags |= VFIO_DEVICE_FLAGS_RESET;
224
225		info.num_regions = mc_dev->obj_desc.region_count;
226		info.num_irqs = mc_dev->obj_desc.irq_count;
227
228		return copy_to_user((void __user *)arg, &info, minsz) ?
229			-EFAULT : 0;
230	}
231	case VFIO_DEVICE_GET_REGION_INFO:
232	{
233		struct vfio_region_info info;
234
235		minsz = offsetofend(struct vfio_region_info, offset);
236
237		if (copy_from_user(&info, (void __user *)arg, minsz))
238			return -EFAULT;
239
240		if (info.argsz < minsz)
241			return -EINVAL;
242
243		if (info.index >= mc_dev->obj_desc.region_count)
244			return -EINVAL;
245
246		/* map offset to the physical address  */
247		info.offset = VFIO_FSL_MC_INDEX_TO_OFFSET(info.index);
248		info.size = vdev->regions[info.index].size;
249		info.flags = vdev->regions[info.index].flags;
250
251		if (copy_to_user((void __user *)arg, &info, minsz))
252			return -EFAULT;
253		return 0;
254	}
255	case VFIO_DEVICE_GET_IRQ_INFO:
256	{
257		struct vfio_irq_info info;
258
259		minsz = offsetofend(struct vfio_irq_info, count);
260		if (copy_from_user(&info, (void __user *)arg, minsz))
261			return -EFAULT;
262
263		if (info.argsz < minsz)
264			return -EINVAL;
265
266		if (info.index >= mc_dev->obj_desc.irq_count)
267			return -EINVAL;
268
269		info.flags = VFIO_IRQ_INFO_EVENTFD;
270		info.count = 1;
271
272		if (copy_to_user((void __user *)arg, &info, minsz))
273			return -EFAULT;
274		return 0;
275	}
276	case VFIO_DEVICE_SET_IRQS:
277	{
278		struct vfio_irq_set hdr;
279		u8 *data = NULL;
280		int ret = 0;
281		size_t data_size = 0;
282
283		minsz = offsetofend(struct vfio_irq_set, count);
284
285		if (copy_from_user(&hdr, (void __user *)arg, minsz))
286			return -EFAULT;
287
288		ret = vfio_set_irqs_validate_and_prepare(&hdr, mc_dev->obj_desc.irq_count,
289					mc_dev->obj_desc.irq_count, &data_size);
290		if (ret)
291			return ret;
292
293		if (data_size) {
294			data = memdup_user((void __user *)(arg + minsz),
295				   data_size);
296			if (IS_ERR(data))
297				return PTR_ERR(data);
298		}
299
300		mutex_lock(&vdev->igate);
301		ret = vfio_fsl_mc_set_irqs_ioctl(vdev, hdr.flags,
302						 hdr.index, hdr.start,
303						 hdr.count, data);
304		mutex_unlock(&vdev->igate);
305		kfree(data);
306
307		return ret;
308	}
309	case VFIO_DEVICE_RESET:
310	{
311		int ret;
312		struct fsl_mc_device *mc_dev = vdev->mc_dev;
313
314		/* reset is supported only for the DPRC */
315		if (!is_fsl_mc_bus_dprc(mc_dev))
316			return -ENOTTY;
317
318		ret = dprc_reset_container(mc_dev->mc_io, 0,
319					   mc_dev->mc_handle,
320					   mc_dev->obj_desc.id,
321					   DPRC_RESET_OPTION_NON_RECURSIVE);
322		return ret;
323
324	}
325	default:
326		return -ENOTTY;
327	}
328}
329
330static ssize_t vfio_fsl_mc_read(void *device_data, char __user *buf,
331				size_t count, loff_t *ppos)
332{
333	struct vfio_fsl_mc_device *vdev = device_data;
334	unsigned int index = VFIO_FSL_MC_OFFSET_TO_INDEX(*ppos);
335	loff_t off = *ppos & VFIO_FSL_MC_OFFSET_MASK;
336	struct fsl_mc_device *mc_dev = vdev->mc_dev;
337	struct vfio_fsl_mc_region *region;
338	u64 data[8];
339	int i;
340
341	if (index >= mc_dev->obj_desc.region_count)
342		return -EINVAL;
343
344	region = &vdev->regions[index];
345
346	if (!(region->flags & VFIO_REGION_INFO_FLAG_READ))
347		return -EINVAL;
348
349	if (!region->ioaddr) {
350		region->ioaddr = ioremap(region->addr, region->size);
351		if (!region->ioaddr)
352			return -ENOMEM;
353	}
354
355	if (count != 64 || off != 0)
356		return -EINVAL;
357
358	for (i = 7; i >= 0; i--)
359		data[i] = readq(region->ioaddr + i * sizeof(uint64_t));
360
361	if (copy_to_user(buf, data, 64))
362		return -EFAULT;
363
364	return count;
365}
366
367#define MC_CMD_COMPLETION_TIMEOUT_MS    5000
368#define MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS    500
369
370static int vfio_fsl_mc_send_command(void __iomem *ioaddr, uint64_t *cmd_data)
371{
372	int i;
373	enum mc_cmd_status status;
374	unsigned long timeout_usecs = MC_CMD_COMPLETION_TIMEOUT_MS * 1000;
375
376	/* Write at command parameter into portal */
377	for (i = 7; i >= 1; i--)
378		writeq_relaxed(cmd_data[i], ioaddr + i * sizeof(uint64_t));
379
380	/* Write command header in the end */
381	writeq(cmd_data[0], ioaddr);
382
383	/* Wait for response before returning to user-space
384	 * This can be optimized in future to even prepare response
385	 * before returning to user-space and avoid read ioctl.
386	 */
387	for (;;) {
388		u64 header;
389		struct mc_cmd_header *resp_hdr;
390
391		header = cpu_to_le64(readq_relaxed(ioaddr));
392
393		resp_hdr = (struct mc_cmd_header *)&header;
394		status = (enum mc_cmd_status)resp_hdr->status;
395		if (status != MC_CMD_STATUS_READY)
396			break;
397
398		udelay(MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS);
399		timeout_usecs -= MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS;
400		if (timeout_usecs == 0)
401			return -ETIMEDOUT;
402	}
403
404	return 0;
405}
406
407static ssize_t vfio_fsl_mc_write(void *device_data, const char __user *buf,
408				 size_t count, loff_t *ppos)
409{
410	struct vfio_fsl_mc_device *vdev = device_data;
411	unsigned int index = VFIO_FSL_MC_OFFSET_TO_INDEX(*ppos);
412	loff_t off = *ppos & VFIO_FSL_MC_OFFSET_MASK;
413	struct fsl_mc_device *mc_dev = vdev->mc_dev;
414	struct vfio_fsl_mc_region *region;
415	u64 data[8];
416	int ret;
417
418	if (index >= mc_dev->obj_desc.region_count)
419		return -EINVAL;
420
421	region = &vdev->regions[index];
422
423	if (!(region->flags & VFIO_REGION_INFO_FLAG_WRITE))
424		return -EINVAL;
425
426	if (!region->ioaddr) {
427		region->ioaddr = ioremap(region->addr, region->size);
428		if (!region->ioaddr)
429			return -ENOMEM;
430	}
431
432	if (count != 64 || off != 0)
433		return -EINVAL;
434
435	if (copy_from_user(&data, buf, 64))
436		return -EFAULT;
437
438	ret = vfio_fsl_mc_send_command(region->ioaddr, data);
439	if (ret)
440		return ret;
441
442	return count;
443
444}
445
446static int vfio_fsl_mc_mmap_mmio(struct vfio_fsl_mc_region region,
447				 struct vm_area_struct *vma)
448{
449	u64 size = vma->vm_end - vma->vm_start;
450	u64 pgoff, base;
451	u8 region_cacheable;
452
453	pgoff = vma->vm_pgoff &
454		((1U << (VFIO_FSL_MC_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
455	base = pgoff << PAGE_SHIFT;
456
457	if (region.size < PAGE_SIZE || base + size > region.size)
458		return -EINVAL;
459
460	region_cacheable = (region.type & FSL_MC_REGION_CACHEABLE) &&
461			   (region.type & FSL_MC_REGION_SHAREABLE);
462	if (!region_cacheable)
463		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
464
465	vma->vm_pgoff = (region.addr >> PAGE_SHIFT) + pgoff;
466
467	return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
468			       size, vma->vm_page_prot);
469}
470
471static int vfio_fsl_mc_mmap(void *device_data, struct vm_area_struct *vma)
472{
473	struct vfio_fsl_mc_device *vdev = device_data;
474	struct fsl_mc_device *mc_dev = vdev->mc_dev;
475	unsigned int index;
476
477	index = vma->vm_pgoff >> (VFIO_FSL_MC_OFFSET_SHIFT - PAGE_SHIFT);
478
479	if (vma->vm_end < vma->vm_start)
480		return -EINVAL;
481	if (vma->vm_start & ~PAGE_MASK)
482		return -EINVAL;
483	if (vma->vm_end & ~PAGE_MASK)
484		return -EINVAL;
485	if (!(vma->vm_flags & VM_SHARED))
486		return -EINVAL;
487	if (index >= mc_dev->obj_desc.region_count)
488		return -EINVAL;
489
490	if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_MMAP))
491		return -EINVAL;
492
493	if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_READ)
494			&& (vma->vm_flags & VM_READ))
495		return -EINVAL;
496
497	if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_WRITE)
498			&& (vma->vm_flags & VM_WRITE))
499		return -EINVAL;
500
501	vma->vm_private_data = mc_dev;
502
503	return vfio_fsl_mc_mmap_mmio(vdev->regions[index], vma);
504}
505
506static const struct vfio_device_ops vfio_fsl_mc_ops = {
507	.name		= "vfio-fsl-mc",
508	.open		= vfio_fsl_mc_open,
509	.release	= vfio_fsl_mc_release,
510	.ioctl		= vfio_fsl_mc_ioctl,
511	.read		= vfio_fsl_mc_read,
512	.write		= vfio_fsl_mc_write,
513	.mmap		= vfio_fsl_mc_mmap,
514};
515
516static int vfio_fsl_mc_bus_notifier(struct notifier_block *nb,
517				    unsigned long action, void *data)
518{
519	struct vfio_fsl_mc_device *vdev = container_of(nb,
520					struct vfio_fsl_mc_device, nb);
521	struct device *dev = data;
522	struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
523	struct fsl_mc_device *mc_cont = to_fsl_mc_device(mc_dev->dev.parent);
524
525	if (action == BUS_NOTIFY_ADD_DEVICE &&
526	    vdev->mc_dev == mc_cont) {
527		mc_dev->driver_override = kasprintf(GFP_KERNEL, "%s",
528						    vfio_fsl_mc_ops.name);
529		if (!mc_dev->driver_override)
530			dev_warn(dev, "VFIO_FSL_MC: Setting driver override for device in dprc %s failed\n",
531				 dev_name(&mc_cont->dev));
532		else
533			dev_info(dev, "VFIO_FSL_MC: Setting driver override for device in dprc %s\n",
534				 dev_name(&mc_cont->dev));
535	} else if (action == BUS_NOTIFY_BOUND_DRIVER &&
536		vdev->mc_dev == mc_cont) {
537		struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(dev->driver);
538
539		if (mc_drv && mc_drv != &vfio_fsl_mc_driver)
540			dev_warn(dev, "VFIO_FSL_MC: Object %s bound to driver %s while DPRC bound to vfio-fsl-mc\n",
541				 dev_name(dev), mc_drv->driver.name);
542	}
543
544	return 0;
545}
546
547static int vfio_fsl_mc_init_device(struct vfio_fsl_mc_device *vdev)
548{
549	struct fsl_mc_device *mc_dev = vdev->mc_dev;
550	int ret;
551
552	/* Non-dprc devices share mc_io from parent */
553	if (!is_fsl_mc_bus_dprc(mc_dev)) {
554		struct fsl_mc_device *mc_cont = to_fsl_mc_device(mc_dev->dev.parent);
555
556		mc_dev->mc_io = mc_cont->mc_io;
557		return 0;
558	}
559
560	vdev->nb.notifier_call = vfio_fsl_mc_bus_notifier;
561	ret = bus_register_notifier(&fsl_mc_bus_type, &vdev->nb);
562	if (ret)
563		return ret;
564
565	/* open DPRC, allocate a MC portal */
566	ret = dprc_setup(mc_dev);
567	if (ret) {
568		dev_err(&mc_dev->dev, "VFIO_FSL_MC: Failed to setup DPRC (%d)\n", ret);
569		goto out_nc_unreg;
570	}
571	return 0;
572
573out_nc_unreg:
574	bus_unregister_notifier(&fsl_mc_bus_type, &vdev->nb);
575	return ret;
576}
577
578static int vfio_fsl_mc_scan_container(struct fsl_mc_device *mc_dev)
579{
580	int ret;
581
582	/* non dprc devices do not scan for other devices */
583	if (!is_fsl_mc_bus_dprc(mc_dev))
584		return 0;
585	ret = dprc_scan_container(mc_dev, false);
586	if (ret) {
587		dev_err(&mc_dev->dev,
588			"VFIO_FSL_MC: Container scanning failed (%d)\n", ret);
589		dprc_remove_devices(mc_dev, NULL, 0);
590		return ret;
591	}
592	return 0;
593}
594
595static void vfio_fsl_uninit_device(struct vfio_fsl_mc_device *vdev)
596{
597	struct fsl_mc_device *mc_dev = vdev->mc_dev;
598
599	if (!is_fsl_mc_bus_dprc(mc_dev))
600		return;
601
602	dprc_cleanup(mc_dev);
603	bus_unregister_notifier(&fsl_mc_bus_type, &vdev->nb);
604}
605
606static int vfio_fsl_mc_probe(struct fsl_mc_device *mc_dev)
607{
608	struct iommu_group *group;
609	struct vfio_fsl_mc_device *vdev;
610	struct device *dev = &mc_dev->dev;
611	int ret;
612
613	group = vfio_iommu_group_get(dev);
614	if (!group) {
615		dev_err(dev, "VFIO_FSL_MC: No IOMMU group\n");
616		return -EINVAL;
617	}
618
619	vdev = devm_kzalloc(dev, sizeof(*vdev), GFP_KERNEL);
620	if (!vdev) {
621		ret = -ENOMEM;
622		goto out_group_put;
623	}
624
625	vdev->mc_dev = mc_dev;
626	mutex_init(&vdev->igate);
627
628	ret = vfio_fsl_mc_reflck_attach(vdev);
629	if (ret)
630		goto out_group_put;
631
632	ret = vfio_fsl_mc_init_device(vdev);
633	if (ret)
634		goto out_reflck;
635
636	ret = vfio_add_group_dev(dev, &vfio_fsl_mc_ops, vdev);
637	if (ret) {
638		dev_err(dev, "VFIO_FSL_MC: Failed to add to vfio group\n");
639		goto out_device;
640	}
641
642	/*
643	 * This triggers recursion into vfio_fsl_mc_probe() on another device
644	 * and the vfio_fsl_mc_reflck_attach() must succeed, which relies on the
645	 * vfio_add_group_dev() above. It has no impact on this vdev, so it is
646	 * safe to be after the vfio device is made live.
647	 */
648	ret = vfio_fsl_mc_scan_container(mc_dev);
649	if (ret)
650		goto out_group_dev;
651	return 0;
652
653out_group_dev:
654	vfio_del_group_dev(dev);
655out_device:
656	vfio_fsl_uninit_device(vdev);
657out_reflck:
658	vfio_fsl_mc_reflck_put(vdev->reflck);
659out_group_put:
660	vfio_iommu_group_put(group, dev);
661	return ret;
662}
663
664static int vfio_fsl_mc_remove(struct fsl_mc_device *mc_dev)
665{
666	struct vfio_fsl_mc_device *vdev;
667	struct device *dev = &mc_dev->dev;
668
669	vdev = vfio_del_group_dev(dev);
670	if (!vdev)
671		return -EINVAL;
672
673	mutex_destroy(&vdev->igate);
674
675	dprc_remove_devices(mc_dev, NULL, 0);
676	vfio_fsl_uninit_device(vdev);
677	vfio_fsl_mc_reflck_put(vdev->reflck);
678
679	vfio_iommu_group_put(mc_dev->dev.iommu_group, dev);
680
681	return 0;
682}
683
684static struct fsl_mc_driver vfio_fsl_mc_driver = {
685	.probe		= vfio_fsl_mc_probe,
686	.remove		= vfio_fsl_mc_remove,
687	.driver	= {
688		.name	= "vfio-fsl-mc",
689		.owner	= THIS_MODULE,
690	},
691};
692
693static int __init vfio_fsl_mc_driver_init(void)
694{
695	return fsl_mc_driver_register(&vfio_fsl_mc_driver);
696}
697
698static void __exit vfio_fsl_mc_driver_exit(void)
699{
700	fsl_mc_driver_unregister(&vfio_fsl_mc_driver);
701}
702
703module_init(vfio_fsl_mc_driver_init);
704module_exit(vfio_fsl_mc_driver_exit);
705
706MODULE_LICENSE("Dual BSD/GPL");
707MODULE_DESCRIPTION("VFIO for FSL-MC devices - User Level meta-driver");
708