1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * VGICv2 MMIO handling functions
4 */
5
6#include <linux/irqchip/arm-gic.h>
7#include <linux/kvm.h>
8#include <linux/kvm_host.h>
9#include <linux/nospec.h>
10
11#include <kvm/iodev.h>
12#include <kvm/arm_vgic.h>
13
14#include "vgic.h"
15#include "vgic-mmio.h"
16
17/*
18 * The Revision field in the IIDR have the following meanings:
19 *
20 * Revision 1: Report GICv2 interrupts as group 0 instead of group 1
21 * Revision 2: Interrupt groups are guest-configurable and signaled using
22 * 	       their configured groups.
23 */
24
25static unsigned long vgic_mmio_read_v2_misc(struct kvm_vcpu *vcpu,
26					    gpa_t addr, unsigned int len)
27{
28	struct vgic_dist *vgic = &vcpu->kvm->arch.vgic;
29	u32 value;
30
31	switch (addr & 0x0c) {
32	case GIC_DIST_CTRL:
33		value = vgic->enabled ? GICD_ENABLE : 0;
34		break;
35	case GIC_DIST_CTR:
36		value = vgic->nr_spis + VGIC_NR_PRIVATE_IRQS;
37		value = (value >> 5) - 1;
38		value |= (atomic_read(&vcpu->kvm->online_vcpus) - 1) << 5;
39		break;
40	case GIC_DIST_IIDR:
41		value = (PRODUCT_ID_KVM << GICD_IIDR_PRODUCT_ID_SHIFT) |
42			(vgic->implementation_rev << GICD_IIDR_REVISION_SHIFT) |
43			(IMPLEMENTER_ARM << GICD_IIDR_IMPLEMENTER_SHIFT);
44		break;
45	default:
46		return 0;
47	}
48
49	return value;
50}
51
52static void vgic_mmio_write_v2_misc(struct kvm_vcpu *vcpu,
53				    gpa_t addr, unsigned int len,
54				    unsigned long val)
55{
56	struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
57	bool was_enabled = dist->enabled;
58
59	switch (addr & 0x0c) {
60	case GIC_DIST_CTRL:
61		dist->enabled = val & GICD_ENABLE;
62		if (!was_enabled && dist->enabled)
63			vgic_kick_vcpus(vcpu->kvm);
64		break;
65	case GIC_DIST_CTR:
66	case GIC_DIST_IIDR:
67		/* Nothing to do */
68		return;
69	}
70}
71
72static int vgic_mmio_uaccess_write_v2_misc(struct kvm_vcpu *vcpu,
73					   gpa_t addr, unsigned int len,
74					   unsigned long val)
75{
76	switch (addr & 0x0c) {
77	case GIC_DIST_IIDR:
78		if (val != vgic_mmio_read_v2_misc(vcpu, addr, len))
79			return -EINVAL;
80
81		/*
82		 * If we observe a write to GICD_IIDR we know that userspace
83		 * has been updated and has had a chance to cope with older
84		 * kernels (VGICv2 IIDR.Revision == 0) incorrectly reporting
85		 * interrupts as group 1, and therefore we now allow groups to
86		 * be user writable.  Doing this by default would break
87		 * migration from old kernels to new kernels with legacy
88		 * userspace.
89		 */
90		vcpu->kvm->arch.vgic.v2_groups_user_writable = true;
91		return 0;
92	}
93
94	vgic_mmio_write_v2_misc(vcpu, addr, len, val);
95	return 0;
96}
97
98static int vgic_mmio_uaccess_write_v2_group(struct kvm_vcpu *vcpu,
99					    gpa_t addr, unsigned int len,
100					    unsigned long val)
101{
102	if (vcpu->kvm->arch.vgic.v2_groups_user_writable)
103		vgic_mmio_write_group(vcpu, addr, len, val);
104
105	return 0;
106}
107
108static void vgic_mmio_write_sgir(struct kvm_vcpu *source_vcpu,
109				 gpa_t addr, unsigned int len,
110				 unsigned long val)
111{
112	int nr_vcpus = atomic_read(&source_vcpu->kvm->online_vcpus);
113	int intid = val & 0xf;
114	int targets = (val >> 16) & 0xff;
115	int mode = (val >> 24) & 0x03;
116	int c;
117	struct kvm_vcpu *vcpu;
118	unsigned long flags;
119
120	switch (mode) {
121	case 0x0:		/* as specified by targets */
122		break;
123	case 0x1:
124		targets = (1U << nr_vcpus) - 1;			/* all, ... */
125		targets &= ~(1U << source_vcpu->vcpu_id);	/* but self */
126		break;
127	case 0x2:		/* this very vCPU only */
128		targets = (1U << source_vcpu->vcpu_id);
129		break;
130	case 0x3:		/* reserved */
131		return;
132	}
133
134	kvm_for_each_vcpu(c, vcpu, source_vcpu->kvm) {
135		struct vgic_irq *irq;
136
137		if (!(targets & (1U << c)))
138			continue;
139
140		irq = vgic_get_irq(source_vcpu->kvm, vcpu, intid);
141
142		raw_spin_lock_irqsave(&irq->irq_lock, flags);
143		irq->pending_latch = true;
144		irq->source |= 1U << source_vcpu->vcpu_id;
145
146		vgic_queue_irq_unlock(source_vcpu->kvm, irq, flags);
147		vgic_put_irq(source_vcpu->kvm, irq);
148	}
149}
150
151static unsigned long vgic_mmio_read_target(struct kvm_vcpu *vcpu,
152					   gpa_t addr, unsigned int len)
153{
154	u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
155	int i;
156	u64 val = 0;
157
158	for (i = 0; i < len; i++) {
159		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
160
161		val |= (u64)irq->targets << (i * 8);
162
163		vgic_put_irq(vcpu->kvm, irq);
164	}
165
166	return val;
167}
168
169static void vgic_mmio_write_target(struct kvm_vcpu *vcpu,
170				   gpa_t addr, unsigned int len,
171				   unsigned long val)
172{
173	u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
174	u8 cpu_mask = GENMASK(atomic_read(&vcpu->kvm->online_vcpus) - 1, 0);
175	int i;
176	unsigned long flags;
177
178	/* GICD_ITARGETSR[0-7] are read-only */
179	if (intid < VGIC_NR_PRIVATE_IRQS)
180		return;
181
182	for (i = 0; i < len; i++) {
183		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid + i);
184		int target;
185
186		raw_spin_lock_irqsave(&irq->irq_lock, flags);
187
188		irq->targets = (val >> (i * 8)) & cpu_mask;
189		target = irq->targets ? __ffs(irq->targets) : 0;
190		irq->target_vcpu = kvm_get_vcpu(vcpu->kvm, target);
191
192		raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
193		vgic_put_irq(vcpu->kvm, irq);
194	}
195}
196
197static unsigned long vgic_mmio_read_sgipend(struct kvm_vcpu *vcpu,
198					    gpa_t addr, unsigned int len)
199{
200	u32 intid = addr & 0x0f;
201	int i;
202	u64 val = 0;
203
204	for (i = 0; i < len; i++) {
205		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
206
207		val |= (u64)irq->source << (i * 8);
208
209		vgic_put_irq(vcpu->kvm, irq);
210	}
211	return val;
212}
213
214static void vgic_mmio_write_sgipendc(struct kvm_vcpu *vcpu,
215				     gpa_t addr, unsigned int len,
216				     unsigned long val)
217{
218	u32 intid = addr & 0x0f;
219	int i;
220	unsigned long flags;
221
222	for (i = 0; i < len; i++) {
223		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
224
225		raw_spin_lock_irqsave(&irq->irq_lock, flags);
226
227		irq->source &= ~((val >> (i * 8)) & 0xff);
228		if (!irq->source)
229			irq->pending_latch = false;
230
231		raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
232		vgic_put_irq(vcpu->kvm, irq);
233	}
234}
235
236static void vgic_mmio_write_sgipends(struct kvm_vcpu *vcpu,
237				     gpa_t addr, unsigned int len,
238				     unsigned long val)
239{
240	u32 intid = addr & 0x0f;
241	int i;
242	unsigned long flags;
243
244	for (i = 0; i < len; i++) {
245		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
246
247		raw_spin_lock_irqsave(&irq->irq_lock, flags);
248
249		irq->source |= (val >> (i * 8)) & 0xff;
250
251		if (irq->source) {
252			irq->pending_latch = true;
253			vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
254		} else {
255			raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
256		}
257		vgic_put_irq(vcpu->kvm, irq);
258	}
259}
260
261#define GICC_ARCH_VERSION_V2	0x2
262
263/* These are for userland accesses only, there is no guest-facing emulation. */
264static unsigned long vgic_mmio_read_vcpuif(struct kvm_vcpu *vcpu,
265					   gpa_t addr, unsigned int len)
266{
267	struct vgic_vmcr vmcr;
268	u32 val;
269
270	vgic_get_vmcr(vcpu, &vmcr);
271
272	switch (addr & 0xff) {
273	case GIC_CPU_CTRL:
274		val = vmcr.grpen0 << GIC_CPU_CTRL_EnableGrp0_SHIFT;
275		val |= vmcr.grpen1 << GIC_CPU_CTRL_EnableGrp1_SHIFT;
276		val |= vmcr.ackctl << GIC_CPU_CTRL_AckCtl_SHIFT;
277		val |= vmcr.fiqen << GIC_CPU_CTRL_FIQEn_SHIFT;
278		val |= vmcr.cbpr << GIC_CPU_CTRL_CBPR_SHIFT;
279		val |= vmcr.eoim << GIC_CPU_CTRL_EOImodeNS_SHIFT;
280
281		break;
282	case GIC_CPU_PRIMASK:
283		/*
284		 * Our KVM_DEV_TYPE_ARM_VGIC_V2 device ABI exports the
285		 * the PMR field as GICH_VMCR.VMPriMask rather than
286		 * GICC_PMR.Priority, so we expose the upper five bits of
287		 * priority mask to userspace using the lower bits in the
288		 * unsigned long.
289		 */
290		val = (vmcr.pmr & GICV_PMR_PRIORITY_MASK) >>
291			GICV_PMR_PRIORITY_SHIFT;
292		break;
293	case GIC_CPU_BINPOINT:
294		val = vmcr.bpr;
295		break;
296	case GIC_CPU_ALIAS_BINPOINT:
297		val = vmcr.abpr;
298		break;
299	case GIC_CPU_IDENT:
300		val = ((PRODUCT_ID_KVM << 20) |
301		       (GICC_ARCH_VERSION_V2 << 16) |
302		       IMPLEMENTER_ARM);
303		break;
304	default:
305		return 0;
306	}
307
308	return val;
309}
310
311static void vgic_mmio_write_vcpuif(struct kvm_vcpu *vcpu,
312				   gpa_t addr, unsigned int len,
313				   unsigned long val)
314{
315	struct vgic_vmcr vmcr;
316
317	vgic_get_vmcr(vcpu, &vmcr);
318
319	switch (addr & 0xff) {
320	case GIC_CPU_CTRL:
321		vmcr.grpen0 = !!(val & GIC_CPU_CTRL_EnableGrp0);
322		vmcr.grpen1 = !!(val & GIC_CPU_CTRL_EnableGrp1);
323		vmcr.ackctl = !!(val & GIC_CPU_CTRL_AckCtl);
324		vmcr.fiqen = !!(val & GIC_CPU_CTRL_FIQEn);
325		vmcr.cbpr = !!(val & GIC_CPU_CTRL_CBPR);
326		vmcr.eoim = !!(val & GIC_CPU_CTRL_EOImodeNS);
327
328		break;
329	case GIC_CPU_PRIMASK:
330		/*
331		 * Our KVM_DEV_TYPE_ARM_VGIC_V2 device ABI exports the
332		 * the PMR field as GICH_VMCR.VMPriMask rather than
333		 * GICC_PMR.Priority, so we expose the upper five bits of
334		 * priority mask to userspace using the lower bits in the
335		 * unsigned long.
336		 */
337		vmcr.pmr = (val << GICV_PMR_PRIORITY_SHIFT) &
338			GICV_PMR_PRIORITY_MASK;
339		break;
340	case GIC_CPU_BINPOINT:
341		vmcr.bpr = val;
342		break;
343	case GIC_CPU_ALIAS_BINPOINT:
344		vmcr.abpr = val;
345		break;
346	}
347
348	vgic_set_vmcr(vcpu, &vmcr);
349}
350
351static unsigned long vgic_mmio_read_apr(struct kvm_vcpu *vcpu,
352					gpa_t addr, unsigned int len)
353{
354	int n; /* which APRn is this */
355
356	n = (addr >> 2) & 0x3;
357
358	if (kvm_vgic_global_state.type == VGIC_V2) {
359		/* GICv2 hardware systems support max. 32 groups */
360		if (n != 0)
361			return 0;
362		return vcpu->arch.vgic_cpu.vgic_v2.vgic_apr;
363	} else {
364		struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
365
366		if (n > vgic_v3_max_apr_idx(vcpu))
367			return 0;
368
369		n = array_index_nospec(n, 4);
370
371		/* GICv3 only uses ICH_AP1Rn for memory mapped (GICv2) guests */
372		return vgicv3->vgic_ap1r[n];
373	}
374}
375
376static void vgic_mmio_write_apr(struct kvm_vcpu *vcpu,
377				gpa_t addr, unsigned int len,
378				unsigned long val)
379{
380	int n; /* which APRn is this */
381
382	n = (addr >> 2) & 0x3;
383
384	if (kvm_vgic_global_state.type == VGIC_V2) {
385		/* GICv2 hardware systems support max. 32 groups */
386		if (n != 0)
387			return;
388		vcpu->arch.vgic_cpu.vgic_v2.vgic_apr = val;
389	} else {
390		struct vgic_v3_cpu_if *vgicv3 = &vcpu->arch.vgic_cpu.vgic_v3;
391
392		if (n > vgic_v3_max_apr_idx(vcpu))
393			return;
394
395		n = array_index_nospec(n, 4);
396
397		/* GICv3 only uses ICH_AP1Rn for memory mapped (GICv2) guests */
398		vgicv3->vgic_ap1r[n] = val;
399	}
400}
401
402static const struct vgic_register_region vgic_v2_dist_registers[] = {
403	REGISTER_DESC_WITH_LENGTH_UACCESS(GIC_DIST_CTRL,
404		vgic_mmio_read_v2_misc, vgic_mmio_write_v2_misc,
405		NULL, vgic_mmio_uaccess_write_v2_misc,
406		12, VGIC_ACCESS_32bit),
407	REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_IGROUP,
408		vgic_mmio_read_group, vgic_mmio_write_group,
409		NULL, vgic_mmio_uaccess_write_v2_group, 1,
410		VGIC_ACCESS_32bit),
411	REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ENABLE_SET,
412		vgic_mmio_read_enable, vgic_mmio_write_senable,
413		NULL, vgic_uaccess_write_senable, 1,
414		VGIC_ACCESS_32bit),
415	REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ENABLE_CLEAR,
416		vgic_mmio_read_enable, vgic_mmio_write_cenable,
417		NULL, vgic_uaccess_write_cenable, 1,
418		VGIC_ACCESS_32bit),
419	REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_SET,
420		vgic_mmio_read_pending, vgic_mmio_write_spending,
421		vgic_uaccess_read_pending, vgic_uaccess_write_spending, 1,
422		VGIC_ACCESS_32bit),
423	REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_CLEAR,
424		vgic_mmio_read_pending, vgic_mmio_write_cpending,
425		vgic_uaccess_read_pending, vgic_uaccess_write_cpending, 1,
426		VGIC_ACCESS_32bit),
427	REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_SET,
428		vgic_mmio_read_active, vgic_mmio_write_sactive,
429		vgic_uaccess_read_active, vgic_mmio_uaccess_write_sactive, 1,
430		VGIC_ACCESS_32bit),
431	REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_CLEAR,
432		vgic_mmio_read_active, vgic_mmio_write_cactive,
433		vgic_uaccess_read_active, vgic_mmio_uaccess_write_cactive, 1,
434		VGIC_ACCESS_32bit),
435	REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PRI,
436		vgic_mmio_read_priority, vgic_mmio_write_priority, NULL, NULL,
437		8, VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
438	REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_TARGET,
439		vgic_mmio_read_target, vgic_mmio_write_target, NULL, NULL, 8,
440		VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
441	REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_CONFIG,
442		vgic_mmio_read_config, vgic_mmio_write_config, NULL, NULL, 2,
443		VGIC_ACCESS_32bit),
444	REGISTER_DESC_WITH_LENGTH(GIC_DIST_SOFTINT,
445		vgic_mmio_read_raz, vgic_mmio_write_sgir, 4,
446		VGIC_ACCESS_32bit),
447	REGISTER_DESC_WITH_LENGTH(GIC_DIST_SGI_PENDING_CLEAR,
448		vgic_mmio_read_sgipend, vgic_mmio_write_sgipendc, 16,
449		VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
450	REGISTER_DESC_WITH_LENGTH(GIC_DIST_SGI_PENDING_SET,
451		vgic_mmio_read_sgipend, vgic_mmio_write_sgipends, 16,
452		VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
453};
454
455static const struct vgic_register_region vgic_v2_cpu_registers[] = {
456	REGISTER_DESC_WITH_LENGTH(GIC_CPU_CTRL,
457		vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
458		VGIC_ACCESS_32bit),
459	REGISTER_DESC_WITH_LENGTH(GIC_CPU_PRIMASK,
460		vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
461		VGIC_ACCESS_32bit),
462	REGISTER_DESC_WITH_LENGTH(GIC_CPU_BINPOINT,
463		vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
464		VGIC_ACCESS_32bit),
465	REGISTER_DESC_WITH_LENGTH(GIC_CPU_ALIAS_BINPOINT,
466		vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
467		VGIC_ACCESS_32bit),
468	REGISTER_DESC_WITH_LENGTH(GIC_CPU_ACTIVEPRIO,
469		vgic_mmio_read_apr, vgic_mmio_write_apr, 16,
470		VGIC_ACCESS_32bit),
471	REGISTER_DESC_WITH_LENGTH(GIC_CPU_IDENT,
472		vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
473		VGIC_ACCESS_32bit),
474};
475
476unsigned int vgic_v2_init_dist_iodev(struct vgic_io_device *dev)
477{
478	dev->regions = vgic_v2_dist_registers;
479	dev->nr_regions = ARRAY_SIZE(vgic_v2_dist_registers);
480
481	kvm_iodevice_init(&dev->dev, &kvm_io_gic_ops);
482
483	return SZ_4K;
484}
485
486int vgic_v2_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr)
487{
488	const struct vgic_register_region *region;
489	struct vgic_io_device iodev;
490	struct vgic_reg_attr reg_attr;
491	struct kvm_vcpu *vcpu;
492	gpa_t addr;
493	int ret;
494
495	ret = vgic_v2_parse_attr(dev, attr, &reg_attr);
496	if (ret)
497		return ret;
498
499	vcpu = reg_attr.vcpu;
500	addr = reg_attr.addr;
501
502	switch (attr->group) {
503	case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
504		iodev.regions = vgic_v2_dist_registers;
505		iodev.nr_regions = ARRAY_SIZE(vgic_v2_dist_registers);
506		iodev.base_addr = 0;
507		break;
508	case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
509		iodev.regions = vgic_v2_cpu_registers;
510		iodev.nr_regions = ARRAY_SIZE(vgic_v2_cpu_registers);
511		iodev.base_addr = 0;
512		break;
513	default:
514		return -ENXIO;
515	}
516
517	/* We only support aligned 32-bit accesses. */
518	if (addr & 3)
519		return -ENXIO;
520
521	region = vgic_get_mmio_region(vcpu, &iodev, addr, sizeof(u32));
522	if (!region)
523		return -ENXIO;
524
525	return 0;
526}
527
528int vgic_v2_cpuif_uaccess(struct kvm_vcpu *vcpu, bool is_write,
529			  int offset, u32 *val)
530{
531	struct vgic_io_device dev = {
532		.regions = vgic_v2_cpu_registers,
533		.nr_regions = ARRAY_SIZE(vgic_v2_cpu_registers),
534		.iodev_type = IODEV_CPUIF,
535	};
536
537	return vgic_uaccess(vcpu, &dev, is_write, offset, val);
538}
539
540int vgic_v2_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
541			 int offset, u32 *val)
542{
543	struct vgic_io_device dev = {
544		.regions = vgic_v2_dist_registers,
545		.nr_regions = ARRAY_SIZE(vgic_v2_dist_registers),
546		.iodev_type = IODEV_DIST,
547	};
548
549	return vgic_uaccess(vcpu, &dev, is_write, offset, val);
550}
551