1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2012-2015 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 */
6
7#include <linux/compiler.h>
8#include <linux/irqchip/arm-gic-v3.h>
9#include <linux/kvm_host.h>
10
11#include <asm/kvm_emulate.h>
12#include <asm/kvm_hyp.h>
13#include <asm/kvm_mmu.h>
14
15#define vtr_to_max_lr_idx(v)		((v) & 0xf)
16#define vtr_to_nr_pre_bits(v)		((((u32)(v) >> 26) & 7) + 1)
17#define vtr_to_nr_apr_regs(v)		(1 << (vtr_to_nr_pre_bits(v) - 5))
18
19static u64 __gic_v3_get_lr(unsigned int lr)
20{
21	switch (lr & 0xf) {
22	case 0:
23		return read_gicreg(ICH_LR0_EL2);
24	case 1:
25		return read_gicreg(ICH_LR1_EL2);
26	case 2:
27		return read_gicreg(ICH_LR2_EL2);
28	case 3:
29		return read_gicreg(ICH_LR3_EL2);
30	case 4:
31		return read_gicreg(ICH_LR4_EL2);
32	case 5:
33		return read_gicreg(ICH_LR5_EL2);
34	case 6:
35		return read_gicreg(ICH_LR6_EL2);
36	case 7:
37		return read_gicreg(ICH_LR7_EL2);
38	case 8:
39		return read_gicreg(ICH_LR8_EL2);
40	case 9:
41		return read_gicreg(ICH_LR9_EL2);
42	case 10:
43		return read_gicreg(ICH_LR10_EL2);
44	case 11:
45		return read_gicreg(ICH_LR11_EL2);
46	case 12:
47		return read_gicreg(ICH_LR12_EL2);
48	case 13:
49		return read_gicreg(ICH_LR13_EL2);
50	case 14:
51		return read_gicreg(ICH_LR14_EL2);
52	case 15:
53		return read_gicreg(ICH_LR15_EL2);
54	}
55
56	unreachable();
57}
58
59static void __gic_v3_set_lr(u64 val, int lr)
60{
61	switch (lr & 0xf) {
62	case 0:
63		write_gicreg(val, ICH_LR0_EL2);
64		break;
65	case 1:
66		write_gicreg(val, ICH_LR1_EL2);
67		break;
68	case 2:
69		write_gicreg(val, ICH_LR2_EL2);
70		break;
71	case 3:
72		write_gicreg(val, ICH_LR3_EL2);
73		break;
74	case 4:
75		write_gicreg(val, ICH_LR4_EL2);
76		break;
77	case 5:
78		write_gicreg(val, ICH_LR5_EL2);
79		break;
80	case 6:
81		write_gicreg(val, ICH_LR6_EL2);
82		break;
83	case 7:
84		write_gicreg(val, ICH_LR7_EL2);
85		break;
86	case 8:
87		write_gicreg(val, ICH_LR8_EL2);
88		break;
89	case 9:
90		write_gicreg(val, ICH_LR9_EL2);
91		break;
92	case 10:
93		write_gicreg(val, ICH_LR10_EL2);
94		break;
95	case 11:
96		write_gicreg(val, ICH_LR11_EL2);
97		break;
98	case 12:
99		write_gicreg(val, ICH_LR12_EL2);
100		break;
101	case 13:
102		write_gicreg(val, ICH_LR13_EL2);
103		break;
104	case 14:
105		write_gicreg(val, ICH_LR14_EL2);
106		break;
107	case 15:
108		write_gicreg(val, ICH_LR15_EL2);
109		break;
110	}
111}
112
113static void __vgic_v3_write_ap0rn(u32 val, int n)
114{
115	switch (n) {
116	case 0:
117		write_gicreg(val, ICH_AP0R0_EL2);
118		break;
119	case 1:
120		write_gicreg(val, ICH_AP0R1_EL2);
121		break;
122	case 2:
123		write_gicreg(val, ICH_AP0R2_EL2);
124		break;
125	case 3:
126		write_gicreg(val, ICH_AP0R3_EL2);
127		break;
128	}
129}
130
131static void __vgic_v3_write_ap1rn(u32 val, int n)
132{
133	switch (n) {
134	case 0:
135		write_gicreg(val, ICH_AP1R0_EL2);
136		break;
137	case 1:
138		write_gicreg(val, ICH_AP1R1_EL2);
139		break;
140	case 2:
141		write_gicreg(val, ICH_AP1R2_EL2);
142		break;
143	case 3:
144		write_gicreg(val, ICH_AP1R3_EL2);
145		break;
146	}
147}
148
149static u32 __vgic_v3_read_ap0rn(int n)
150{
151	u32 val;
152
153	switch (n) {
154	case 0:
155		val = read_gicreg(ICH_AP0R0_EL2);
156		break;
157	case 1:
158		val = read_gicreg(ICH_AP0R1_EL2);
159		break;
160	case 2:
161		val = read_gicreg(ICH_AP0R2_EL2);
162		break;
163	case 3:
164		val = read_gicreg(ICH_AP0R3_EL2);
165		break;
166	default:
167		unreachable();
168	}
169
170	return val;
171}
172
173static u32 __vgic_v3_read_ap1rn(int n)
174{
175	u32 val;
176
177	switch (n) {
178	case 0:
179		val = read_gicreg(ICH_AP1R0_EL2);
180		break;
181	case 1:
182		val = read_gicreg(ICH_AP1R1_EL2);
183		break;
184	case 2:
185		val = read_gicreg(ICH_AP1R2_EL2);
186		break;
187	case 3:
188		val = read_gicreg(ICH_AP1R3_EL2);
189		break;
190	default:
191		unreachable();
192	}
193
194	return val;
195}
196
197void __vgic_v3_save_state(struct vgic_v3_cpu_if *cpu_if)
198{
199	u64 used_lrs = cpu_if->used_lrs;
200
201	/*
202	 * Make sure stores to the GIC via the memory mapped interface
203	 * are now visible to the system register interface when reading the
204	 * LRs, and when reading back the VMCR on non-VHE systems.
205	 */
206	if (used_lrs || !has_vhe()) {
207		if (!cpu_if->vgic_sre) {
208			dsb(sy);
209			isb();
210		}
211	}
212
213	if (used_lrs || cpu_if->its_vpe.its_vm) {
214		int i;
215		u32 elrsr;
216
217		elrsr = read_gicreg(ICH_ELRSR_EL2);
218
219		write_gicreg(cpu_if->vgic_hcr & ~ICH_HCR_EN, ICH_HCR_EL2);
220
221		for (i = 0; i < used_lrs; i++) {
222			if (elrsr & (1 << i))
223				cpu_if->vgic_lr[i] &= ~ICH_LR_STATE;
224			else
225				cpu_if->vgic_lr[i] = __gic_v3_get_lr(i);
226
227			__gic_v3_set_lr(0, i);
228		}
229	}
230}
231
232void __vgic_v3_restore_state(struct vgic_v3_cpu_if *cpu_if)
233{
234	u64 used_lrs = cpu_if->used_lrs;
235	int i;
236
237	if (used_lrs || cpu_if->its_vpe.its_vm) {
238		write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
239
240		for (i = 0; i < used_lrs; i++)
241			__gic_v3_set_lr(cpu_if->vgic_lr[i], i);
242	}
243
244	/*
245	 * Ensure that writes to the LRs, and on non-VHE systems ensure that
246	 * the write to the VMCR in __vgic_v3_activate_traps(), will have
247	 * reached the (re)distributors. This ensure the guest will read the
248	 * correct values from the memory-mapped interface.
249	 */
250	if (used_lrs || !has_vhe()) {
251		if (!cpu_if->vgic_sre) {
252			isb();
253			dsb(sy);
254		}
255	}
256}
257
258void __vgic_v3_activate_traps(struct vgic_v3_cpu_if *cpu_if)
259{
260	/*
261	 * VFIQEn is RES1 if ICC_SRE_EL1.SRE is 1. This causes a
262	 * Group0 interrupt (as generated in GICv2 mode) to be
263	 * delivered as a FIQ to the guest, with potentially fatal
264	 * consequences. So we must make sure that ICC_SRE_EL1 has
265	 * been actually programmed with the value we want before
266	 * starting to mess with the rest of the GIC, and VMCR_EL2 in
267	 * particular.  This logic must be called before
268	 * __vgic_v3_restore_state().
269	 */
270	if (!cpu_if->vgic_sre) {
271		write_gicreg(0, ICC_SRE_EL1);
272		isb();
273		write_gicreg(cpu_if->vgic_vmcr, ICH_VMCR_EL2);
274
275
276		if (has_vhe()) {
277			/*
278			 * Ensure that the write to the VMCR will have reached
279			 * the (re)distributors. This ensure the guest will
280			 * read the correct values from the memory-mapped
281			 * interface.
282			 */
283			isb();
284			dsb(sy);
285		}
286	}
287
288	/*
289	 * Prevent the guest from touching the GIC system registers if
290	 * SRE isn't enabled for GICv3 emulation.
291	 */
292	write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE,
293		     ICC_SRE_EL2);
294
295	/*
296	 * If we need to trap system registers, we must write
297	 * ICH_HCR_EL2 anyway, even if no interrupts are being
298	 * injected,
299	 */
300	if (static_branch_unlikely(&vgic_v3_cpuif_trap) ||
301	    cpu_if->its_vpe.its_vm)
302		write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
303}
304
305void __vgic_v3_deactivate_traps(struct vgic_v3_cpu_if *cpu_if)
306{
307	u64 val;
308
309	if (!cpu_if->vgic_sre) {
310		cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2);
311	}
312
313	val = read_gicreg(ICC_SRE_EL2);
314	write_gicreg(val | ICC_SRE_EL2_ENABLE, ICC_SRE_EL2);
315
316	if (!cpu_if->vgic_sre) {
317		/* Make sure ENABLE is set at EL2 before setting SRE at EL1 */
318		isb();
319		write_gicreg(1, ICC_SRE_EL1);
320	}
321
322	/*
323	 * If we were trapping system registers, we enabled the VGIC even if
324	 * no interrupts were being injected, and we disable it again here.
325	 */
326	if (static_branch_unlikely(&vgic_v3_cpuif_trap) ||
327	    cpu_if->its_vpe.its_vm)
328		write_gicreg(0, ICH_HCR_EL2);
329}
330
331void __vgic_v3_save_aprs(struct vgic_v3_cpu_if *cpu_if)
332{
333	u64 val;
334	u32 nr_pre_bits;
335
336	val = read_gicreg(ICH_VTR_EL2);
337	nr_pre_bits = vtr_to_nr_pre_bits(val);
338
339	switch (nr_pre_bits) {
340	case 7:
341		cpu_if->vgic_ap0r[3] = __vgic_v3_read_ap0rn(3);
342		cpu_if->vgic_ap0r[2] = __vgic_v3_read_ap0rn(2);
343		fallthrough;
344	case 6:
345		cpu_if->vgic_ap0r[1] = __vgic_v3_read_ap0rn(1);
346		fallthrough;
347	default:
348		cpu_if->vgic_ap0r[0] = __vgic_v3_read_ap0rn(0);
349	}
350
351	switch (nr_pre_bits) {
352	case 7:
353		cpu_if->vgic_ap1r[3] = __vgic_v3_read_ap1rn(3);
354		cpu_if->vgic_ap1r[2] = __vgic_v3_read_ap1rn(2);
355		fallthrough;
356	case 6:
357		cpu_if->vgic_ap1r[1] = __vgic_v3_read_ap1rn(1);
358		fallthrough;
359	default:
360		cpu_if->vgic_ap1r[0] = __vgic_v3_read_ap1rn(0);
361	}
362}
363
364void __vgic_v3_restore_aprs(struct vgic_v3_cpu_if *cpu_if)
365{
366	u64 val;
367	u32 nr_pre_bits;
368
369	val = read_gicreg(ICH_VTR_EL2);
370	nr_pre_bits = vtr_to_nr_pre_bits(val);
371
372	switch (nr_pre_bits) {
373	case 7:
374		__vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[3], 3);
375		__vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[2], 2);
376		fallthrough;
377	case 6:
378		__vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[1], 1);
379		fallthrough;
380	default:
381		__vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[0], 0);
382	}
383
384	switch (nr_pre_bits) {
385	case 7:
386		__vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[3], 3);
387		__vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[2], 2);
388		fallthrough;
389	case 6:
390		__vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[1], 1);
391		fallthrough;
392	default:
393		__vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[0], 0);
394	}
395}
396
397void __vgic_v3_init_lrs(void)
398{
399	int max_lr_idx = vtr_to_max_lr_idx(read_gicreg(ICH_VTR_EL2));
400	int i;
401
402	for (i = 0; i <= max_lr_idx; i++)
403		__gic_v3_set_lr(0, i);
404}
405
406u64 __vgic_v3_get_ich_vtr_el2(void)
407{
408	return read_gicreg(ICH_VTR_EL2);
409}
410
411u64 __vgic_v3_read_vmcr(void)
412{
413	return read_gicreg(ICH_VMCR_EL2);
414}
415
416void __vgic_v3_write_vmcr(u32 vmcr)
417{
418	write_gicreg(vmcr, ICH_VMCR_EL2);
419}
420
421static int __vgic_v3_bpr_min(void)
422{
423	/* See Pseudocode for VPriorityGroup */
424	return 8 - vtr_to_nr_pre_bits(read_gicreg(ICH_VTR_EL2));
425}
426
427static int __vgic_v3_get_group(struct kvm_vcpu *vcpu)
428{
429	u32 esr = kvm_vcpu_get_esr(vcpu);
430	u8 crm = (esr & ESR_ELx_SYS64_ISS_CRM_MASK) >> ESR_ELx_SYS64_ISS_CRM_SHIFT;
431
432	return crm != 8;
433}
434
435#define GICv3_IDLE_PRIORITY	0xff
436
437static int __vgic_v3_highest_priority_lr(struct kvm_vcpu *vcpu, u32 vmcr,
438					 u64 *lr_val)
439{
440	unsigned int used_lrs = vcpu->arch.vgic_cpu.vgic_v3.used_lrs;
441	u8 priority = GICv3_IDLE_PRIORITY;
442	int i, lr = -1;
443
444	for (i = 0; i < used_lrs; i++) {
445		u64 val = __gic_v3_get_lr(i);
446		u8 lr_prio = (val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT;
447
448		/* Not pending in the state? */
449		if ((val & ICH_LR_STATE) != ICH_LR_PENDING_BIT)
450			continue;
451
452		/* Group-0 interrupt, but Group-0 disabled? */
453		if (!(val & ICH_LR_GROUP) && !(vmcr & ICH_VMCR_ENG0_MASK))
454			continue;
455
456		/* Group-1 interrupt, but Group-1 disabled? */
457		if ((val & ICH_LR_GROUP) && !(vmcr & ICH_VMCR_ENG1_MASK))
458			continue;
459
460		/* Not the highest priority? */
461		if (lr_prio >= priority)
462			continue;
463
464		/* This is a candidate */
465		priority = lr_prio;
466		*lr_val = val;
467		lr = i;
468	}
469
470	if (lr == -1)
471		*lr_val = ICC_IAR1_EL1_SPURIOUS;
472
473	return lr;
474}
475
476static int __vgic_v3_find_active_lr(struct kvm_vcpu *vcpu, int intid,
477				    u64 *lr_val)
478{
479	unsigned int used_lrs = vcpu->arch.vgic_cpu.vgic_v3.used_lrs;
480	int i;
481
482	for (i = 0; i < used_lrs; i++) {
483		u64 val = __gic_v3_get_lr(i);
484
485		if ((val & ICH_LR_VIRTUAL_ID_MASK) == intid &&
486		    (val & ICH_LR_ACTIVE_BIT)) {
487			*lr_val = val;
488			return i;
489		}
490	}
491
492	*lr_val = ICC_IAR1_EL1_SPURIOUS;
493	return -1;
494}
495
496static int __vgic_v3_get_highest_active_priority(void)
497{
498	u8 nr_apr_regs = vtr_to_nr_apr_regs(read_gicreg(ICH_VTR_EL2));
499	u32 hap = 0;
500	int i;
501
502	for (i = 0; i < nr_apr_regs; i++) {
503		u32 val;
504
505		/*
506		 * The ICH_AP0Rn_EL2 and ICH_AP1Rn_EL2 registers
507		 * contain the active priority levels for this VCPU
508		 * for the maximum number of supported priority
509		 * levels, and we return the full priority level only
510		 * if the BPR is programmed to its minimum, otherwise
511		 * we return a combination of the priority level and
512		 * subpriority, as determined by the setting of the
513		 * BPR, but without the full subpriority.
514		 */
515		val  = __vgic_v3_read_ap0rn(i);
516		val |= __vgic_v3_read_ap1rn(i);
517		if (!val) {
518			hap += 32;
519			continue;
520		}
521
522		return (hap + __ffs(val)) << __vgic_v3_bpr_min();
523	}
524
525	return GICv3_IDLE_PRIORITY;
526}
527
528static unsigned int __vgic_v3_get_bpr0(u32 vmcr)
529{
530	return (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT;
531}
532
533static unsigned int __vgic_v3_get_bpr1(u32 vmcr)
534{
535	unsigned int bpr;
536
537	if (vmcr & ICH_VMCR_CBPR_MASK) {
538		bpr = __vgic_v3_get_bpr0(vmcr);
539		if (bpr < 7)
540			bpr++;
541	} else {
542		bpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT;
543	}
544
545	return bpr;
546}
547
548/*
549 * Convert a priority to a preemption level, taking the relevant BPR
550 * into account by zeroing the sub-priority bits.
551 */
552static u8 __vgic_v3_pri_to_pre(u8 pri, u32 vmcr, int grp)
553{
554	unsigned int bpr;
555
556	if (!grp)
557		bpr = __vgic_v3_get_bpr0(vmcr) + 1;
558	else
559		bpr = __vgic_v3_get_bpr1(vmcr);
560
561	return pri & (GENMASK(7, 0) << bpr);
562}
563
564/*
565 * The priority value is independent of any of the BPR values, so we
566 * normalize it using the minimal BPR value. This guarantees that no
567 * matter what the guest does with its BPR, we can always set/get the
568 * same value of a priority.
569 */
570static void __vgic_v3_set_active_priority(u8 pri, u32 vmcr, int grp)
571{
572	u8 pre, ap;
573	u32 val;
574	int apr;
575
576	pre = __vgic_v3_pri_to_pre(pri, vmcr, grp);
577	ap = pre >> __vgic_v3_bpr_min();
578	apr = ap / 32;
579
580	if (!grp) {
581		val = __vgic_v3_read_ap0rn(apr);
582		__vgic_v3_write_ap0rn(val | BIT(ap % 32), apr);
583	} else {
584		val = __vgic_v3_read_ap1rn(apr);
585		__vgic_v3_write_ap1rn(val | BIT(ap % 32), apr);
586	}
587}
588
589static int __vgic_v3_clear_highest_active_priority(void)
590{
591	u8 nr_apr_regs = vtr_to_nr_apr_regs(read_gicreg(ICH_VTR_EL2));
592	u32 hap = 0;
593	int i;
594
595	for (i = 0; i < nr_apr_regs; i++) {
596		u32 ap0, ap1;
597		int c0, c1;
598
599		ap0 = __vgic_v3_read_ap0rn(i);
600		ap1 = __vgic_v3_read_ap1rn(i);
601		if (!ap0 && !ap1) {
602			hap += 32;
603			continue;
604		}
605
606		c0 = ap0 ? __ffs(ap0) : 32;
607		c1 = ap1 ? __ffs(ap1) : 32;
608
609		/* Always clear the LSB, which is the highest priority */
610		if (c0 < c1) {
611			ap0 &= ~BIT(c0);
612			__vgic_v3_write_ap0rn(ap0, i);
613			hap += c0;
614		} else {
615			ap1 &= ~BIT(c1);
616			__vgic_v3_write_ap1rn(ap1, i);
617			hap += c1;
618		}
619
620		/* Rescale to 8 bits of priority */
621		return hap << __vgic_v3_bpr_min();
622	}
623
624	return GICv3_IDLE_PRIORITY;
625}
626
627static void __vgic_v3_read_iar(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
628{
629	u64 lr_val;
630	u8 lr_prio, pmr;
631	int lr, grp;
632
633	grp = __vgic_v3_get_group(vcpu);
634
635	lr = __vgic_v3_highest_priority_lr(vcpu, vmcr, &lr_val);
636	if (lr < 0)
637		goto spurious;
638
639	if (grp != !!(lr_val & ICH_LR_GROUP))
640		goto spurious;
641
642	pmr = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT;
643	lr_prio = (lr_val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT;
644	if (pmr <= lr_prio)
645		goto spurious;
646
647	if (__vgic_v3_get_highest_active_priority() <= __vgic_v3_pri_to_pre(lr_prio, vmcr, grp))
648		goto spurious;
649
650	lr_val &= ~ICH_LR_STATE;
651	/* No active state for LPIs */
652	if ((lr_val & ICH_LR_VIRTUAL_ID_MASK) <= VGIC_MAX_SPI)
653		lr_val |= ICH_LR_ACTIVE_BIT;
654	__gic_v3_set_lr(lr_val, lr);
655	__vgic_v3_set_active_priority(lr_prio, vmcr, grp);
656	vcpu_set_reg(vcpu, rt, lr_val & ICH_LR_VIRTUAL_ID_MASK);
657	return;
658
659spurious:
660	vcpu_set_reg(vcpu, rt, ICC_IAR1_EL1_SPURIOUS);
661}
662
663static void __vgic_v3_clear_active_lr(int lr, u64 lr_val)
664{
665	lr_val &= ~ICH_LR_ACTIVE_BIT;
666	if (lr_val & ICH_LR_HW) {
667		u32 pid;
668
669		pid = (lr_val & ICH_LR_PHYS_ID_MASK) >> ICH_LR_PHYS_ID_SHIFT;
670		gic_write_dir(pid);
671	}
672
673	__gic_v3_set_lr(lr_val, lr);
674}
675
676static void __vgic_v3_bump_eoicount(void)
677{
678	u32 hcr;
679
680	hcr = read_gicreg(ICH_HCR_EL2);
681	hcr += 1 << ICH_HCR_EOIcount_SHIFT;
682	write_gicreg(hcr, ICH_HCR_EL2);
683}
684
685static void __vgic_v3_write_dir(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
686{
687	u32 vid = vcpu_get_reg(vcpu, rt);
688	u64 lr_val;
689	int lr;
690
691	/* EOImode == 0, nothing to be done here */
692	if (!(vmcr & ICH_VMCR_EOIM_MASK))
693		return;
694
695	/* No deactivate to be performed on an LPI */
696	if (vid >= VGIC_MIN_LPI)
697		return;
698
699	lr = __vgic_v3_find_active_lr(vcpu, vid, &lr_val);
700	if (lr == -1) {
701		__vgic_v3_bump_eoicount();
702		return;
703	}
704
705	__vgic_v3_clear_active_lr(lr, lr_val);
706}
707
708static void __vgic_v3_write_eoir(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
709{
710	u32 vid = vcpu_get_reg(vcpu, rt);
711	u64 lr_val;
712	u8 lr_prio, act_prio;
713	int lr, grp;
714
715	grp = __vgic_v3_get_group(vcpu);
716
717	/* Drop priority in any case */
718	act_prio = __vgic_v3_clear_highest_active_priority();
719
720	/* If EOIing an LPI, no deactivate to be performed */
721	if (vid >= VGIC_MIN_LPI)
722		return;
723
724	/* EOImode == 1, nothing to be done here */
725	if (vmcr & ICH_VMCR_EOIM_MASK)
726		return;
727
728	lr = __vgic_v3_find_active_lr(vcpu, vid, &lr_val);
729	if (lr == -1) {
730		__vgic_v3_bump_eoicount();
731		return;
732	}
733
734	lr_prio = (lr_val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT;
735
736	/* If priorities or group do not match, the guest has fscked-up. */
737	if (grp != !!(lr_val & ICH_LR_GROUP) ||
738	    __vgic_v3_pri_to_pre(lr_prio, vmcr, grp) != act_prio)
739		return;
740
741	/* Let's now perform the deactivation */
742	__vgic_v3_clear_active_lr(lr, lr_val);
743}
744
745static void __vgic_v3_read_igrpen0(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
746{
747	vcpu_set_reg(vcpu, rt, !!(vmcr & ICH_VMCR_ENG0_MASK));
748}
749
750static void __vgic_v3_read_igrpen1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
751{
752	vcpu_set_reg(vcpu, rt, !!(vmcr & ICH_VMCR_ENG1_MASK));
753}
754
755static void __vgic_v3_write_igrpen0(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
756{
757	u64 val = vcpu_get_reg(vcpu, rt);
758
759	if (val & 1)
760		vmcr |= ICH_VMCR_ENG0_MASK;
761	else
762		vmcr &= ~ICH_VMCR_ENG0_MASK;
763
764	__vgic_v3_write_vmcr(vmcr);
765}
766
767static void __vgic_v3_write_igrpen1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
768{
769	u64 val = vcpu_get_reg(vcpu, rt);
770
771	if (val & 1)
772		vmcr |= ICH_VMCR_ENG1_MASK;
773	else
774		vmcr &= ~ICH_VMCR_ENG1_MASK;
775
776	__vgic_v3_write_vmcr(vmcr);
777}
778
779static void __vgic_v3_read_bpr0(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
780{
781	vcpu_set_reg(vcpu, rt, __vgic_v3_get_bpr0(vmcr));
782}
783
784static void __vgic_v3_read_bpr1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
785{
786	vcpu_set_reg(vcpu, rt, __vgic_v3_get_bpr1(vmcr));
787}
788
789static void __vgic_v3_write_bpr0(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
790{
791	u64 val = vcpu_get_reg(vcpu, rt);
792	u8 bpr_min = __vgic_v3_bpr_min() - 1;
793
794	/* Enforce BPR limiting */
795	if (val < bpr_min)
796		val = bpr_min;
797
798	val <<= ICH_VMCR_BPR0_SHIFT;
799	val &= ICH_VMCR_BPR0_MASK;
800	vmcr &= ~ICH_VMCR_BPR0_MASK;
801	vmcr |= val;
802
803	__vgic_v3_write_vmcr(vmcr);
804}
805
806static void __vgic_v3_write_bpr1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
807{
808	u64 val = vcpu_get_reg(vcpu, rt);
809	u8 bpr_min = __vgic_v3_bpr_min();
810
811	if (vmcr & ICH_VMCR_CBPR_MASK)
812		return;
813
814	/* Enforce BPR limiting */
815	if (val < bpr_min)
816		val = bpr_min;
817
818	val <<= ICH_VMCR_BPR1_SHIFT;
819	val &= ICH_VMCR_BPR1_MASK;
820	vmcr &= ~ICH_VMCR_BPR1_MASK;
821	vmcr |= val;
822
823	__vgic_v3_write_vmcr(vmcr);
824}
825
826static void __vgic_v3_read_apxrn(struct kvm_vcpu *vcpu, int rt, int n)
827{
828	u32 val;
829
830	if (!__vgic_v3_get_group(vcpu))
831		val = __vgic_v3_read_ap0rn(n);
832	else
833		val = __vgic_v3_read_ap1rn(n);
834
835	vcpu_set_reg(vcpu, rt, val);
836}
837
838static void __vgic_v3_write_apxrn(struct kvm_vcpu *vcpu, int rt, int n)
839{
840	u32 val = vcpu_get_reg(vcpu, rt);
841
842	if (!__vgic_v3_get_group(vcpu))
843		__vgic_v3_write_ap0rn(val, n);
844	else
845		__vgic_v3_write_ap1rn(val, n);
846}
847
848static void __vgic_v3_read_apxr0(struct kvm_vcpu *vcpu,
849					    u32 vmcr, int rt)
850{
851	__vgic_v3_read_apxrn(vcpu, rt, 0);
852}
853
854static void __vgic_v3_read_apxr1(struct kvm_vcpu *vcpu,
855					    u32 vmcr, int rt)
856{
857	__vgic_v3_read_apxrn(vcpu, rt, 1);
858}
859
860static void __vgic_v3_read_apxr2(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
861{
862	__vgic_v3_read_apxrn(vcpu, rt, 2);
863}
864
865static void __vgic_v3_read_apxr3(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
866{
867	__vgic_v3_read_apxrn(vcpu, rt, 3);
868}
869
870static void __vgic_v3_write_apxr0(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
871{
872	__vgic_v3_write_apxrn(vcpu, rt, 0);
873}
874
875static void __vgic_v3_write_apxr1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
876{
877	__vgic_v3_write_apxrn(vcpu, rt, 1);
878}
879
880static void __vgic_v3_write_apxr2(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
881{
882	__vgic_v3_write_apxrn(vcpu, rt, 2);
883}
884
885static void __vgic_v3_write_apxr3(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
886{
887	__vgic_v3_write_apxrn(vcpu, rt, 3);
888}
889
890static void __vgic_v3_read_hppir(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
891{
892	u64 lr_val;
893	int lr, lr_grp, grp;
894
895	grp = __vgic_v3_get_group(vcpu);
896
897	lr = __vgic_v3_highest_priority_lr(vcpu, vmcr, &lr_val);
898	if (lr == -1)
899		goto spurious;
900
901	lr_grp = !!(lr_val & ICH_LR_GROUP);
902	if (lr_grp != grp)
903		lr_val = ICC_IAR1_EL1_SPURIOUS;
904
905spurious:
906	vcpu_set_reg(vcpu, rt, lr_val & ICH_LR_VIRTUAL_ID_MASK);
907}
908
909static void __vgic_v3_read_pmr(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
910{
911	vmcr &= ICH_VMCR_PMR_MASK;
912	vmcr >>= ICH_VMCR_PMR_SHIFT;
913	vcpu_set_reg(vcpu, rt, vmcr);
914}
915
916static void __vgic_v3_write_pmr(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
917{
918	u32 val = vcpu_get_reg(vcpu, rt);
919
920	val <<= ICH_VMCR_PMR_SHIFT;
921	val &= ICH_VMCR_PMR_MASK;
922	vmcr &= ~ICH_VMCR_PMR_MASK;
923	vmcr |= val;
924
925	write_gicreg(vmcr, ICH_VMCR_EL2);
926}
927
928static void __vgic_v3_read_rpr(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
929{
930	u32 val = __vgic_v3_get_highest_active_priority();
931	vcpu_set_reg(vcpu, rt, val);
932}
933
934static void __vgic_v3_read_ctlr(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
935{
936	u32 vtr, val;
937
938	vtr = read_gicreg(ICH_VTR_EL2);
939	/* PRIbits */
940	val = ((vtr >> 29) & 7) << ICC_CTLR_EL1_PRI_BITS_SHIFT;
941	/* IDbits */
942	val |= ((vtr >> 23) & 7) << ICC_CTLR_EL1_ID_BITS_SHIFT;
943	/* SEIS */
944	val |= ((vtr >> 22) & 1) << ICC_CTLR_EL1_SEIS_SHIFT;
945	/* A3V */
946	val |= ((vtr >> 21) & 1) << ICC_CTLR_EL1_A3V_SHIFT;
947	/* EOImode */
948	val |= ((vmcr & ICH_VMCR_EOIM_MASK) >> ICH_VMCR_EOIM_SHIFT) << ICC_CTLR_EL1_EOImode_SHIFT;
949	/* CBPR */
950	val |= (vmcr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT;
951
952	vcpu_set_reg(vcpu, rt, val);
953}
954
955static void __vgic_v3_write_ctlr(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
956{
957	u32 val = vcpu_get_reg(vcpu, rt);
958
959	if (val & ICC_CTLR_EL1_CBPR_MASK)
960		vmcr |= ICH_VMCR_CBPR_MASK;
961	else
962		vmcr &= ~ICH_VMCR_CBPR_MASK;
963
964	if (val & ICC_CTLR_EL1_EOImode_MASK)
965		vmcr |= ICH_VMCR_EOIM_MASK;
966	else
967		vmcr &= ~ICH_VMCR_EOIM_MASK;
968
969	write_gicreg(vmcr, ICH_VMCR_EL2);
970}
971
972int __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu)
973{
974	int rt;
975	u32 esr;
976	u32 vmcr;
977	void (*fn)(struct kvm_vcpu *, u32, int);
978	bool is_read;
979	u32 sysreg;
980
981	esr = kvm_vcpu_get_esr(vcpu);
982	if (vcpu_mode_is_32bit(vcpu)) {
983		if (!kvm_condition_valid(vcpu)) {
984			__kvm_skip_instr(vcpu);
985			return 1;
986		}
987
988		sysreg = esr_cp15_to_sysreg(esr);
989	} else {
990		sysreg = esr_sys64_to_sysreg(esr);
991	}
992
993	is_read = (esr & ESR_ELx_SYS64_ISS_DIR_MASK) == ESR_ELx_SYS64_ISS_DIR_READ;
994
995	switch (sysreg) {
996	case SYS_ICC_IAR0_EL1:
997	case SYS_ICC_IAR1_EL1:
998		if (unlikely(!is_read))
999			return 0;
1000		fn = __vgic_v3_read_iar;
1001		break;
1002	case SYS_ICC_EOIR0_EL1:
1003	case SYS_ICC_EOIR1_EL1:
1004		if (unlikely(is_read))
1005			return 0;
1006		fn = __vgic_v3_write_eoir;
1007		break;
1008	case SYS_ICC_IGRPEN1_EL1:
1009		if (is_read)
1010			fn = __vgic_v3_read_igrpen1;
1011		else
1012			fn = __vgic_v3_write_igrpen1;
1013		break;
1014	case SYS_ICC_BPR1_EL1:
1015		if (is_read)
1016			fn = __vgic_v3_read_bpr1;
1017		else
1018			fn = __vgic_v3_write_bpr1;
1019		break;
1020	case SYS_ICC_AP0Rn_EL1(0):
1021	case SYS_ICC_AP1Rn_EL1(0):
1022		if (is_read)
1023			fn = __vgic_v3_read_apxr0;
1024		else
1025			fn = __vgic_v3_write_apxr0;
1026		break;
1027	case SYS_ICC_AP0Rn_EL1(1):
1028	case SYS_ICC_AP1Rn_EL1(1):
1029		if (is_read)
1030			fn = __vgic_v3_read_apxr1;
1031		else
1032			fn = __vgic_v3_write_apxr1;
1033		break;
1034	case SYS_ICC_AP0Rn_EL1(2):
1035	case SYS_ICC_AP1Rn_EL1(2):
1036		if (is_read)
1037			fn = __vgic_v3_read_apxr2;
1038		else
1039			fn = __vgic_v3_write_apxr2;
1040		break;
1041	case SYS_ICC_AP0Rn_EL1(3):
1042	case SYS_ICC_AP1Rn_EL1(3):
1043		if (is_read)
1044			fn = __vgic_v3_read_apxr3;
1045		else
1046			fn = __vgic_v3_write_apxr3;
1047		break;
1048	case SYS_ICC_HPPIR0_EL1:
1049	case SYS_ICC_HPPIR1_EL1:
1050		if (unlikely(!is_read))
1051			return 0;
1052		fn = __vgic_v3_read_hppir;
1053		break;
1054	case SYS_ICC_IGRPEN0_EL1:
1055		if (is_read)
1056			fn = __vgic_v3_read_igrpen0;
1057		else
1058			fn = __vgic_v3_write_igrpen0;
1059		break;
1060	case SYS_ICC_BPR0_EL1:
1061		if (is_read)
1062			fn = __vgic_v3_read_bpr0;
1063		else
1064			fn = __vgic_v3_write_bpr0;
1065		break;
1066	case SYS_ICC_DIR_EL1:
1067		if (unlikely(is_read))
1068			return 0;
1069		fn = __vgic_v3_write_dir;
1070		break;
1071	case SYS_ICC_RPR_EL1:
1072		if (unlikely(!is_read))
1073			return 0;
1074		fn = __vgic_v3_read_rpr;
1075		break;
1076	case SYS_ICC_CTLR_EL1:
1077		if (is_read)
1078			fn = __vgic_v3_read_ctlr;
1079		else
1080			fn = __vgic_v3_write_ctlr;
1081		break;
1082	case SYS_ICC_PMR_EL1:
1083		if (is_read)
1084			fn = __vgic_v3_read_pmr;
1085		else
1086			fn = __vgic_v3_write_pmr;
1087		break;
1088	default:
1089		return 0;
1090	}
1091
1092	vmcr = __vgic_v3_read_vmcr();
1093	rt = kvm_vcpu_sys_get_rt(vcpu);
1094	fn(vcpu, vmcr, rt);
1095
1096	__kvm_skip_instr(vcpu);
1097
1098	return 1;
1099}
1100