1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * VGIC: KVM DEVICE API
4 *
5 * Copyright (C) 2015 ARM Ltd.
6 * Author: Marc Zyngier <marc.zyngier@arm.com>
7 */
8#include <linux/kvm_host.h>
9#include <kvm/arm_vgic.h>
10#include <linux/uaccess.h>
11#include <asm/kvm_mmu.h>
12#include <asm/cputype.h>
13#include "vgic.h"
14
15/* common helpers */
16
17int vgic_check_iorange(struct kvm *kvm, phys_addr_t ioaddr,
18		       phys_addr_t addr, phys_addr_t alignment,
19		       phys_addr_t size)
20{
21	if (!IS_VGIC_ADDR_UNDEF(ioaddr))
22		return -EEXIST;
23
24	if (!IS_ALIGNED(addr, alignment) || !IS_ALIGNED(size, alignment))
25		return -EINVAL;
26
27	if (addr + size < addr)
28		return -EINVAL;
29
30	if (addr & ~kvm_phys_mask(kvm) || addr + size > kvm_phys_size(kvm))
31		return -E2BIG;
32
33	return 0;
34}
35
36static int vgic_check_type(struct kvm *kvm, int type_needed)
37{
38	if (kvm->arch.vgic.vgic_model != type_needed)
39		return -ENODEV;
40	else
41		return 0;
42}
43
44int kvm_set_legacy_vgic_v2_addr(struct kvm *kvm, struct kvm_arm_device_addr *dev_addr)
45{
46	struct vgic_dist *vgic = &kvm->arch.vgic;
47	int r;
48
49	mutex_lock(&kvm->arch.config_lock);
50	switch (FIELD_GET(KVM_ARM_DEVICE_TYPE_MASK, dev_addr->id)) {
51	case KVM_VGIC_V2_ADDR_TYPE_DIST:
52		r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
53		if (!r)
54			r = vgic_check_iorange(kvm, vgic->vgic_dist_base, dev_addr->addr,
55					       SZ_4K, KVM_VGIC_V2_DIST_SIZE);
56		if (!r)
57			vgic->vgic_dist_base = dev_addr->addr;
58		break;
59	case KVM_VGIC_V2_ADDR_TYPE_CPU:
60		r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
61		if (!r)
62			r = vgic_check_iorange(kvm, vgic->vgic_cpu_base, dev_addr->addr,
63					       SZ_4K, KVM_VGIC_V2_CPU_SIZE);
64		if (!r)
65			vgic->vgic_cpu_base = dev_addr->addr;
66		break;
67	default:
68		r = -ENODEV;
69	}
70
71	mutex_unlock(&kvm->arch.config_lock);
72
73	return r;
74}
75
76/**
77 * kvm_vgic_addr - set or get vgic VM base addresses
78 * @kvm:   pointer to the vm struct
79 * @attr:  pointer to the attribute being retrieved/updated
80 * @write: if true set the address in the VM address space, if false read the
81 *          address
82 *
83 * Set or get the vgic base addresses for the distributor and the virtual CPU
84 * interface in the VM physical address space.  These addresses are properties
85 * of the emulated core/SoC and therefore user space initially knows this
86 * information.
87 * Check them for sanity (alignment, double assignment). We can't check for
88 * overlapping regions in case of a virtual GICv3 here, since we don't know
89 * the number of VCPUs yet, so we defer this check to map_resources().
90 */
91static int kvm_vgic_addr(struct kvm *kvm, struct kvm_device_attr *attr, bool write)
92{
93	u64 __user *uaddr = (u64 __user *)attr->addr;
94	struct vgic_dist *vgic = &kvm->arch.vgic;
95	phys_addr_t *addr_ptr, alignment, size;
96	u64 undef_value = VGIC_ADDR_UNDEF;
97	u64 addr;
98	int r;
99
100	/* Reading a redistributor region addr implies getting the index */
101	if (write || attr->attr == KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION)
102		if (get_user(addr, uaddr))
103			return -EFAULT;
104
105	/*
106	 * Since we can't hold config_lock while registering the redistributor
107	 * iodevs, take the slots_lock immediately.
108	 */
109	mutex_lock(&kvm->slots_lock);
110	switch (attr->attr) {
111	case KVM_VGIC_V2_ADDR_TYPE_DIST:
112		r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
113		addr_ptr = &vgic->vgic_dist_base;
114		alignment = SZ_4K;
115		size = KVM_VGIC_V2_DIST_SIZE;
116		break;
117	case KVM_VGIC_V2_ADDR_TYPE_CPU:
118		r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
119		addr_ptr = &vgic->vgic_cpu_base;
120		alignment = SZ_4K;
121		size = KVM_VGIC_V2_CPU_SIZE;
122		break;
123	case KVM_VGIC_V3_ADDR_TYPE_DIST:
124		r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V3);
125		addr_ptr = &vgic->vgic_dist_base;
126		alignment = SZ_64K;
127		size = KVM_VGIC_V3_DIST_SIZE;
128		break;
129	case KVM_VGIC_V3_ADDR_TYPE_REDIST: {
130		struct vgic_redist_region *rdreg;
131
132		r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V3);
133		if (r)
134			break;
135		if (write) {
136			r = vgic_v3_set_redist_base(kvm, 0, addr, 0);
137			goto out;
138		}
139		rdreg = list_first_entry_or_null(&vgic->rd_regions,
140						 struct vgic_redist_region, list);
141		if (!rdreg)
142			addr_ptr = &undef_value;
143		else
144			addr_ptr = &rdreg->base;
145		break;
146	}
147	case KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION:
148	{
149		struct vgic_redist_region *rdreg;
150		u8 index;
151
152		r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V3);
153		if (r)
154			break;
155
156		index = addr & KVM_VGIC_V3_RDIST_INDEX_MASK;
157
158		if (write) {
159			gpa_t base = addr & KVM_VGIC_V3_RDIST_BASE_MASK;
160			u32 count = FIELD_GET(KVM_VGIC_V3_RDIST_COUNT_MASK, addr);
161			u8 flags = FIELD_GET(KVM_VGIC_V3_RDIST_FLAGS_MASK, addr);
162
163			if (!count || flags)
164				r = -EINVAL;
165			else
166				r = vgic_v3_set_redist_base(kvm, index,
167							    base, count);
168			goto out;
169		}
170
171		rdreg = vgic_v3_rdist_region_from_index(kvm, index);
172		if (!rdreg) {
173			r = -ENOENT;
174			goto out;
175		}
176
177		addr = index;
178		addr |= rdreg->base;
179		addr |= (u64)rdreg->count << KVM_VGIC_V3_RDIST_COUNT_SHIFT;
180		goto out;
181	}
182	default:
183		r = -ENODEV;
184	}
185
186	if (r)
187		goto out;
188
189	mutex_lock(&kvm->arch.config_lock);
190	if (write) {
191		r = vgic_check_iorange(kvm, *addr_ptr, addr, alignment, size);
192		if (!r)
193			*addr_ptr = addr;
194	} else {
195		addr = *addr_ptr;
196	}
197	mutex_unlock(&kvm->arch.config_lock);
198
199out:
200	mutex_unlock(&kvm->slots_lock);
201
202	if (!r && !write)
203		r =  put_user(addr, uaddr);
204
205	return r;
206}
207
208static int vgic_set_common_attr(struct kvm_device *dev,
209				struct kvm_device_attr *attr)
210{
211	int r;
212
213	switch (attr->group) {
214	case KVM_DEV_ARM_VGIC_GRP_ADDR:
215		r = kvm_vgic_addr(dev->kvm, attr, true);
216		return (r == -ENODEV) ? -ENXIO : r;
217	case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: {
218		u32 __user *uaddr = (u32 __user *)(long)attr->addr;
219		u32 val;
220		int ret = 0;
221
222		if (get_user(val, uaddr))
223			return -EFAULT;
224
225		/*
226		 * We require:
227		 * - at least 32 SPIs on top of the 16 SGIs and 16 PPIs
228		 * - at most 1024 interrupts
229		 * - a multiple of 32 interrupts
230		 */
231		if (val < (VGIC_NR_PRIVATE_IRQS + 32) ||
232		    val > VGIC_MAX_RESERVED ||
233		    (val & 31))
234			return -EINVAL;
235
236		mutex_lock(&dev->kvm->arch.config_lock);
237
238		if (vgic_ready(dev->kvm) || dev->kvm->arch.vgic.nr_spis)
239			ret = -EBUSY;
240		else
241			dev->kvm->arch.vgic.nr_spis =
242				val - VGIC_NR_PRIVATE_IRQS;
243
244		mutex_unlock(&dev->kvm->arch.config_lock);
245
246		return ret;
247	}
248	case KVM_DEV_ARM_VGIC_GRP_CTRL: {
249		switch (attr->attr) {
250		case KVM_DEV_ARM_VGIC_CTRL_INIT:
251			mutex_lock(&dev->kvm->arch.config_lock);
252			r = vgic_init(dev->kvm);
253			mutex_unlock(&dev->kvm->arch.config_lock);
254			return r;
255		case KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES:
256			/*
257			 * OK, this one isn't common at all, but we
258			 * want to handle all control group attributes
259			 * in a single place.
260			 */
261			if (vgic_check_type(dev->kvm, KVM_DEV_TYPE_ARM_VGIC_V3))
262				return -ENXIO;
263			mutex_lock(&dev->kvm->lock);
264
265			if (!lock_all_vcpus(dev->kvm)) {
266				mutex_unlock(&dev->kvm->lock);
267				return -EBUSY;
268			}
269
270			mutex_lock(&dev->kvm->arch.config_lock);
271			r = vgic_v3_save_pending_tables(dev->kvm);
272			mutex_unlock(&dev->kvm->arch.config_lock);
273			unlock_all_vcpus(dev->kvm);
274			mutex_unlock(&dev->kvm->lock);
275			return r;
276		}
277		break;
278	}
279	}
280
281	return -ENXIO;
282}
283
284static int vgic_get_common_attr(struct kvm_device *dev,
285				struct kvm_device_attr *attr)
286{
287	int r = -ENXIO;
288
289	switch (attr->group) {
290	case KVM_DEV_ARM_VGIC_GRP_ADDR:
291		r = kvm_vgic_addr(dev->kvm, attr, false);
292		return (r == -ENODEV) ? -ENXIO : r;
293	case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: {
294		u32 __user *uaddr = (u32 __user *)(long)attr->addr;
295
296		r = put_user(dev->kvm->arch.vgic.nr_spis +
297			     VGIC_NR_PRIVATE_IRQS, uaddr);
298		break;
299	}
300	}
301
302	return r;
303}
304
305static int vgic_create(struct kvm_device *dev, u32 type)
306{
307	return kvm_vgic_create(dev->kvm, type);
308}
309
310static void vgic_destroy(struct kvm_device *dev)
311{
312	kfree(dev);
313}
314
315int kvm_register_vgic_device(unsigned long type)
316{
317	int ret = -ENODEV;
318
319	switch (type) {
320	case KVM_DEV_TYPE_ARM_VGIC_V2:
321		ret = kvm_register_device_ops(&kvm_arm_vgic_v2_ops,
322					      KVM_DEV_TYPE_ARM_VGIC_V2);
323		break;
324	case KVM_DEV_TYPE_ARM_VGIC_V3:
325		ret = kvm_register_device_ops(&kvm_arm_vgic_v3_ops,
326					      KVM_DEV_TYPE_ARM_VGIC_V3);
327
328		if (ret)
329			break;
330		ret = kvm_vgic_register_its_device();
331		break;
332	}
333
334	return ret;
335}
336
337int vgic_v2_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr,
338		       struct vgic_reg_attr *reg_attr)
339{
340	int cpuid;
341
342	cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >>
343		 KVM_DEV_ARM_VGIC_CPUID_SHIFT;
344
345	if (cpuid >= atomic_read(&dev->kvm->online_vcpus))
346		return -EINVAL;
347
348	reg_attr->vcpu = kvm_get_vcpu(dev->kvm, cpuid);
349	reg_attr->addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
350
351	return 0;
352}
353
354/**
355 * vgic_v2_attr_regs_access - allows user space to access VGIC v2 state
356 *
357 * @dev:      kvm device handle
358 * @attr:     kvm device attribute
359 * @is_write: true if userspace is writing a register
360 */
361static int vgic_v2_attr_regs_access(struct kvm_device *dev,
362				    struct kvm_device_attr *attr,
363				    bool is_write)
364{
365	u32 __user *uaddr = (u32 __user *)(unsigned long)attr->addr;
366	struct vgic_reg_attr reg_attr;
367	gpa_t addr;
368	struct kvm_vcpu *vcpu;
369	int ret;
370	u32 val;
371
372	ret = vgic_v2_parse_attr(dev, attr, &reg_attr);
373	if (ret)
374		return ret;
375
376	vcpu = reg_attr.vcpu;
377	addr = reg_attr.addr;
378
379	if (is_write)
380		if (get_user(val, uaddr))
381			return -EFAULT;
382
383	mutex_lock(&dev->kvm->lock);
384
385	if (!lock_all_vcpus(dev->kvm)) {
386		mutex_unlock(&dev->kvm->lock);
387		return -EBUSY;
388	}
389
390	mutex_lock(&dev->kvm->arch.config_lock);
391
392	ret = vgic_init(dev->kvm);
393	if (ret)
394		goto out;
395
396	switch (attr->group) {
397	case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
398		ret = vgic_v2_cpuif_uaccess(vcpu, is_write, addr, &val);
399		break;
400	case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
401		ret = vgic_v2_dist_uaccess(vcpu, is_write, addr, &val);
402		break;
403	default:
404		ret = -EINVAL;
405		break;
406	}
407
408out:
409	mutex_unlock(&dev->kvm->arch.config_lock);
410	unlock_all_vcpus(dev->kvm);
411	mutex_unlock(&dev->kvm->lock);
412
413	if (!ret && !is_write)
414		ret = put_user(val, uaddr);
415
416	return ret;
417}
418
419static int vgic_v2_set_attr(struct kvm_device *dev,
420			    struct kvm_device_attr *attr)
421{
422	switch (attr->group) {
423	case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
424	case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
425		return vgic_v2_attr_regs_access(dev, attr, true);
426	default:
427		return vgic_set_common_attr(dev, attr);
428	}
429}
430
431static int vgic_v2_get_attr(struct kvm_device *dev,
432			    struct kvm_device_attr *attr)
433{
434	switch (attr->group) {
435	case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
436	case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
437		return vgic_v2_attr_regs_access(dev, attr, false);
438	default:
439		return vgic_get_common_attr(dev, attr);
440	}
441}
442
443static int vgic_v2_has_attr(struct kvm_device *dev,
444			    struct kvm_device_attr *attr)
445{
446	switch (attr->group) {
447	case KVM_DEV_ARM_VGIC_GRP_ADDR:
448		switch (attr->attr) {
449		case KVM_VGIC_V2_ADDR_TYPE_DIST:
450		case KVM_VGIC_V2_ADDR_TYPE_CPU:
451			return 0;
452		}
453		break;
454	case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
455	case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
456		return vgic_v2_has_attr_regs(dev, attr);
457	case KVM_DEV_ARM_VGIC_GRP_NR_IRQS:
458		return 0;
459	case KVM_DEV_ARM_VGIC_GRP_CTRL:
460		switch (attr->attr) {
461		case KVM_DEV_ARM_VGIC_CTRL_INIT:
462			return 0;
463		}
464	}
465	return -ENXIO;
466}
467
468struct kvm_device_ops kvm_arm_vgic_v2_ops = {
469	.name = "kvm-arm-vgic-v2",
470	.create = vgic_create,
471	.destroy = vgic_destroy,
472	.set_attr = vgic_v2_set_attr,
473	.get_attr = vgic_v2_get_attr,
474	.has_attr = vgic_v2_has_attr,
475};
476
477int vgic_v3_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr,
478		       struct vgic_reg_attr *reg_attr)
479{
480	unsigned long vgic_mpidr, mpidr_reg;
481
482	/*
483	 * For KVM_DEV_ARM_VGIC_GRP_DIST_REGS group,
484	 * attr might not hold MPIDR. Hence assume vcpu0.
485	 */
486	if (attr->group != KVM_DEV_ARM_VGIC_GRP_DIST_REGS) {
487		vgic_mpidr = (attr->attr & KVM_DEV_ARM_VGIC_V3_MPIDR_MASK) >>
488			      KVM_DEV_ARM_VGIC_V3_MPIDR_SHIFT;
489
490		mpidr_reg = VGIC_TO_MPIDR(vgic_mpidr);
491		reg_attr->vcpu = kvm_mpidr_to_vcpu(dev->kvm, mpidr_reg);
492	} else {
493		reg_attr->vcpu = kvm_get_vcpu(dev->kvm, 0);
494	}
495
496	if (!reg_attr->vcpu)
497		return -EINVAL;
498
499	reg_attr->addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
500
501	return 0;
502}
503
504/*
505 * vgic_v3_attr_regs_access - allows user space to access VGIC v3 state
506 *
507 * @dev:      kvm device handle
508 * @attr:     kvm device attribute
509 * @is_write: true if userspace is writing a register
510 */
511static int vgic_v3_attr_regs_access(struct kvm_device *dev,
512				    struct kvm_device_attr *attr,
513				    bool is_write)
514{
515	struct vgic_reg_attr reg_attr;
516	gpa_t addr;
517	struct kvm_vcpu *vcpu;
518	bool uaccess;
519	u32 val;
520	int ret;
521
522	ret = vgic_v3_parse_attr(dev, attr, &reg_attr);
523	if (ret)
524		return ret;
525
526	vcpu = reg_attr.vcpu;
527	addr = reg_attr.addr;
528
529	switch (attr->group) {
530	case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS:
531		/* Sysregs uaccess is performed by the sysreg handling code */
532		uaccess = false;
533		break;
534	default:
535		uaccess = true;
536	}
537
538	if (uaccess && is_write) {
539		u32 __user *uaddr = (u32 __user *)(unsigned long)attr->addr;
540		if (get_user(val, uaddr))
541			return -EFAULT;
542	}
543
544	mutex_lock(&dev->kvm->lock);
545
546	if (!lock_all_vcpus(dev->kvm)) {
547		mutex_unlock(&dev->kvm->lock);
548		return -EBUSY;
549	}
550
551	mutex_lock(&dev->kvm->arch.config_lock);
552
553	if (unlikely(!vgic_initialized(dev->kvm))) {
554		ret = -EBUSY;
555		goto out;
556	}
557
558	switch (attr->group) {
559	case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
560		ret = vgic_v3_dist_uaccess(vcpu, is_write, addr, &val);
561		break;
562	case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS:
563		ret = vgic_v3_redist_uaccess(vcpu, is_write, addr, &val);
564		break;
565	case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS:
566		ret = vgic_v3_cpu_sysregs_uaccess(vcpu, attr, is_write);
567		break;
568	case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO: {
569		unsigned int info, intid;
570
571		info = (attr->attr & KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK) >>
572			KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT;
573		if (info == VGIC_LEVEL_INFO_LINE_LEVEL) {
574			intid = attr->attr &
575				KVM_DEV_ARM_VGIC_LINE_LEVEL_INTID_MASK;
576			ret = vgic_v3_line_level_info_uaccess(vcpu, is_write,
577							      intid, &val);
578		} else {
579			ret = -EINVAL;
580		}
581		break;
582	}
583	default:
584		ret = -EINVAL;
585		break;
586	}
587
588out:
589	mutex_unlock(&dev->kvm->arch.config_lock);
590	unlock_all_vcpus(dev->kvm);
591	mutex_unlock(&dev->kvm->lock);
592
593	if (!ret && uaccess && !is_write) {
594		u32 __user *uaddr = (u32 __user *)(unsigned long)attr->addr;
595		ret = put_user(val, uaddr);
596	}
597
598	return ret;
599}
600
601static int vgic_v3_set_attr(struct kvm_device *dev,
602			    struct kvm_device_attr *attr)
603{
604	switch (attr->group) {
605	case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
606	case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS:
607	case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS:
608	case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO:
609		return vgic_v3_attr_regs_access(dev, attr, true);
610	default:
611		return vgic_set_common_attr(dev, attr);
612	}
613}
614
615static int vgic_v3_get_attr(struct kvm_device *dev,
616			    struct kvm_device_attr *attr)
617{
618	switch (attr->group) {
619	case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
620	case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS:
621	case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS:
622	case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO:
623		return vgic_v3_attr_regs_access(dev, attr, false);
624	default:
625		return vgic_get_common_attr(dev, attr);
626	}
627}
628
629static int vgic_v3_has_attr(struct kvm_device *dev,
630			    struct kvm_device_attr *attr)
631{
632	switch (attr->group) {
633	case KVM_DEV_ARM_VGIC_GRP_ADDR:
634		switch (attr->attr) {
635		case KVM_VGIC_V3_ADDR_TYPE_DIST:
636		case KVM_VGIC_V3_ADDR_TYPE_REDIST:
637		case KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION:
638			return 0;
639		}
640		break;
641	case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
642	case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS:
643	case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS:
644		return vgic_v3_has_attr_regs(dev, attr);
645	case KVM_DEV_ARM_VGIC_GRP_NR_IRQS:
646		return 0;
647	case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO: {
648		if (((attr->attr & KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK) >>
649		      KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT) ==
650		      VGIC_LEVEL_INFO_LINE_LEVEL)
651			return 0;
652		break;
653	}
654	case KVM_DEV_ARM_VGIC_GRP_CTRL:
655		switch (attr->attr) {
656		case KVM_DEV_ARM_VGIC_CTRL_INIT:
657			return 0;
658		case KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES:
659			return 0;
660		}
661	}
662	return -ENXIO;
663}
664
665struct kvm_device_ops kvm_arm_vgic_v3_ops = {
666	.name = "kvm-arm-vgic-v3",
667	.create = vgic_create,
668	.destroy = vgic_destroy,
669	.set_attr = vgic_v3_set_attr,
670	.get_attr = vgic_v3_get_attr,
671	.has_attr = vgic_v3_has_attr,
672};
673