162306a36Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0-only
262306a36Sopenharmony_ci/*
362306a36Sopenharmony_ci * Copyright (C) 2017 ARM Ltd.
462306a36Sopenharmony_ci * Author: Marc Zyngier <marc.zyngier@arm.com>
562306a36Sopenharmony_ci */
662306a36Sopenharmony_ci
762306a36Sopenharmony_ci#include <linux/interrupt.h>
862306a36Sopenharmony_ci#include <linux/irq.h>
962306a36Sopenharmony_ci#include <linux/irqdomain.h>
1062306a36Sopenharmony_ci#include <linux/kvm_host.h>
1162306a36Sopenharmony_ci#include <linux/irqchip/arm-gic-v3.h>
1262306a36Sopenharmony_ci
1362306a36Sopenharmony_ci#include "vgic.h"
1462306a36Sopenharmony_ci
1562306a36Sopenharmony_ci/*
1662306a36Sopenharmony_ci * How KVM uses GICv4 (insert rude comments here):
1762306a36Sopenharmony_ci *
1862306a36Sopenharmony_ci * The vgic-v4 layer acts as a bridge between several entities:
1962306a36Sopenharmony_ci * - The GICv4 ITS representation offered by the ITS driver
2062306a36Sopenharmony_ci * - VFIO, which is in charge of the PCI endpoint
2162306a36Sopenharmony_ci * - The virtual ITS, which is the only thing the guest sees
2262306a36Sopenharmony_ci *
2362306a36Sopenharmony_ci * The configuration of VLPIs is triggered by a callback from VFIO,
2462306a36Sopenharmony_ci * instructing KVM that a PCI device has been configured to deliver
2562306a36Sopenharmony_ci * MSIs to a vITS.
2662306a36Sopenharmony_ci *
2762306a36Sopenharmony_ci * kvm_vgic_v4_set_forwarding() is thus called with the routing entry,
2862306a36Sopenharmony_ci * and this is used to find the corresponding vITS data structures
2962306a36Sopenharmony_ci * (ITS instance, device, event and irq) using a process that is
3062306a36Sopenharmony_ci * extremely similar to the injection of an MSI.
3162306a36Sopenharmony_ci *
3262306a36Sopenharmony_ci * At this stage, we can link the guest's view of an LPI (uniquely
3362306a36Sopenharmony_ci * identified by the routing entry) and the host irq, using the GICv4
3462306a36Sopenharmony_ci * driver mapping operation. Should the mapping succeed, we've then
3562306a36Sopenharmony_ci * successfully upgraded the guest's LPI to a VLPI. We can then start
3662306a36Sopenharmony_ci * with updating GICv4's view of the property table and generating an
3762306a36Sopenharmony_ci * INValidation in order to kickstart the delivery of this VLPI to the
3862306a36Sopenharmony_ci * guest directly, without software intervention. Well, almost.
3962306a36Sopenharmony_ci *
4062306a36Sopenharmony_ci * When the PCI endpoint is deconfigured, this operation is reversed
4162306a36Sopenharmony_ci * with VFIO calling kvm_vgic_v4_unset_forwarding().
4262306a36Sopenharmony_ci *
4362306a36Sopenharmony_ci * Once the VLPI has been mapped, it needs to follow any change the
4462306a36Sopenharmony_ci * guest performs on its LPI through the vITS. For that, a number of
4562306a36Sopenharmony_ci * command handlers have hooks to communicate these changes to the HW:
4662306a36Sopenharmony_ci * - Any invalidation triggers a call to its_prop_update_vlpi()
4762306a36Sopenharmony_ci * - The INT command results in a irq_set_irqchip_state(), which
4862306a36Sopenharmony_ci *   generates an INT on the corresponding VLPI.
4962306a36Sopenharmony_ci * - The CLEAR command results in a irq_set_irqchip_state(), which
5062306a36Sopenharmony_ci *   generates an CLEAR on the corresponding VLPI.
5162306a36Sopenharmony_ci * - DISCARD translates into an unmap, similar to a call to
5262306a36Sopenharmony_ci *   kvm_vgic_v4_unset_forwarding().
5362306a36Sopenharmony_ci * - MOVI is translated by an update of the existing mapping, changing
5462306a36Sopenharmony_ci *   the target vcpu, resulting in a VMOVI being generated.
5562306a36Sopenharmony_ci * - MOVALL is translated by a string of mapping updates (similar to
5662306a36Sopenharmony_ci *   the handling of MOVI). MOVALL is horrible.
5762306a36Sopenharmony_ci *
5862306a36Sopenharmony_ci * Note that a DISCARD/MAPTI sequence emitted from the guest without
5962306a36Sopenharmony_ci * reprogramming the PCI endpoint after MAPTI does not result in a
6062306a36Sopenharmony_ci * VLPI being mapped, as there is no callback from VFIO (the guest
6162306a36Sopenharmony_ci * will get the interrupt via the normal SW injection). Fixing this is
6262306a36Sopenharmony_ci * not trivial, and requires some horrible messing with the VFIO
6362306a36Sopenharmony_ci * internals. Not fun. Don't do that.
6462306a36Sopenharmony_ci *
6562306a36Sopenharmony_ci * Then there is the scheduling. Each time a vcpu is about to run on a
6662306a36Sopenharmony_ci * physical CPU, KVM must tell the corresponding redistributor about
6762306a36Sopenharmony_ci * it. And if we've migrated our vcpu from one CPU to another, we must
6862306a36Sopenharmony_ci * tell the ITS (so that the messages reach the right redistributor).
6962306a36Sopenharmony_ci * This is done in two steps: first issue a irq_set_affinity() on the
7062306a36Sopenharmony_ci * irq corresponding to the vcpu, then call its_make_vpe_resident().
7162306a36Sopenharmony_ci * You must be in a non-preemptible context. On exit, a call to
7262306a36Sopenharmony_ci * its_make_vpe_non_resident() tells the redistributor that we're done
7362306a36Sopenharmony_ci * with the vcpu.
7462306a36Sopenharmony_ci *
7562306a36Sopenharmony_ci * Finally, the doorbell handling: Each vcpu is allocated an interrupt
7662306a36Sopenharmony_ci * which will fire each time a VLPI is made pending whilst the vcpu is
7762306a36Sopenharmony_ci * not running. Each time the vcpu gets blocked, the doorbell
7862306a36Sopenharmony_ci * interrupt gets enabled. When the vcpu is unblocked (for whatever
7962306a36Sopenharmony_ci * reason), the doorbell interrupt is disabled.
8062306a36Sopenharmony_ci */
8162306a36Sopenharmony_ci
8262306a36Sopenharmony_ci#define DB_IRQ_FLAGS	(IRQ_NOAUTOEN | IRQ_DISABLE_UNLAZY | IRQ_NO_BALANCING)
8362306a36Sopenharmony_ci
8462306a36Sopenharmony_cistatic irqreturn_t vgic_v4_doorbell_handler(int irq, void *info)
8562306a36Sopenharmony_ci{
8662306a36Sopenharmony_ci	struct kvm_vcpu *vcpu = info;
8762306a36Sopenharmony_ci
8862306a36Sopenharmony_ci	/* We got the message, no need to fire again */
8962306a36Sopenharmony_ci	if (!kvm_vgic_global_state.has_gicv4_1 &&
9062306a36Sopenharmony_ci	    !irqd_irq_disabled(&irq_to_desc(irq)->irq_data))
9162306a36Sopenharmony_ci		disable_irq_nosync(irq);
9262306a36Sopenharmony_ci
9362306a36Sopenharmony_ci	/*
9462306a36Sopenharmony_ci	 * The v4.1 doorbell can fire concurrently with the vPE being
9562306a36Sopenharmony_ci	 * made non-resident. Ensure we only update pending_last
9662306a36Sopenharmony_ci	 * *after* the non-residency sequence has completed.
9762306a36Sopenharmony_ci	 */
9862306a36Sopenharmony_ci	raw_spin_lock(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vpe_lock);
9962306a36Sopenharmony_ci	vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last = true;
10062306a36Sopenharmony_ci	raw_spin_unlock(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vpe_lock);
10162306a36Sopenharmony_ci
10262306a36Sopenharmony_ci	kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
10362306a36Sopenharmony_ci	kvm_vcpu_kick(vcpu);
10462306a36Sopenharmony_ci
10562306a36Sopenharmony_ci	return IRQ_HANDLED;
10662306a36Sopenharmony_ci}
10762306a36Sopenharmony_ci
10862306a36Sopenharmony_cistatic void vgic_v4_sync_sgi_config(struct its_vpe *vpe, struct vgic_irq *irq)
10962306a36Sopenharmony_ci{
11062306a36Sopenharmony_ci	vpe->sgi_config[irq->intid].enabled	= irq->enabled;
11162306a36Sopenharmony_ci	vpe->sgi_config[irq->intid].group 	= irq->group;
11262306a36Sopenharmony_ci	vpe->sgi_config[irq->intid].priority	= irq->priority;
11362306a36Sopenharmony_ci}
11462306a36Sopenharmony_ci
11562306a36Sopenharmony_cistatic void vgic_v4_enable_vsgis(struct kvm_vcpu *vcpu)
11662306a36Sopenharmony_ci{
11762306a36Sopenharmony_ci	struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
11862306a36Sopenharmony_ci	int i;
11962306a36Sopenharmony_ci
12062306a36Sopenharmony_ci	/*
12162306a36Sopenharmony_ci	 * With GICv4.1, every virtual SGI can be directly injected. So
12262306a36Sopenharmony_ci	 * let's pretend that they are HW interrupts, tied to a host
12362306a36Sopenharmony_ci	 * IRQ. The SGI code will do its magic.
12462306a36Sopenharmony_ci	 */
12562306a36Sopenharmony_ci	for (i = 0; i < VGIC_NR_SGIS; i++) {
12662306a36Sopenharmony_ci		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, i);
12762306a36Sopenharmony_ci		struct irq_desc *desc;
12862306a36Sopenharmony_ci		unsigned long flags;
12962306a36Sopenharmony_ci		int ret;
13062306a36Sopenharmony_ci
13162306a36Sopenharmony_ci		raw_spin_lock_irqsave(&irq->irq_lock, flags);
13262306a36Sopenharmony_ci
13362306a36Sopenharmony_ci		if (irq->hw)
13462306a36Sopenharmony_ci			goto unlock;
13562306a36Sopenharmony_ci
13662306a36Sopenharmony_ci		irq->hw = true;
13762306a36Sopenharmony_ci		irq->host_irq = irq_find_mapping(vpe->sgi_domain, i);
13862306a36Sopenharmony_ci
13962306a36Sopenharmony_ci		/* Transfer the full irq state to the vPE */
14062306a36Sopenharmony_ci		vgic_v4_sync_sgi_config(vpe, irq);
14162306a36Sopenharmony_ci		desc = irq_to_desc(irq->host_irq);
14262306a36Sopenharmony_ci		ret = irq_domain_activate_irq(irq_desc_get_irq_data(desc),
14362306a36Sopenharmony_ci					      false);
14462306a36Sopenharmony_ci		if (!WARN_ON(ret)) {
14562306a36Sopenharmony_ci			/* Transfer pending state */
14662306a36Sopenharmony_ci			ret = irq_set_irqchip_state(irq->host_irq,
14762306a36Sopenharmony_ci						    IRQCHIP_STATE_PENDING,
14862306a36Sopenharmony_ci						    irq->pending_latch);
14962306a36Sopenharmony_ci			WARN_ON(ret);
15062306a36Sopenharmony_ci			irq->pending_latch = false;
15162306a36Sopenharmony_ci		}
15262306a36Sopenharmony_ci	unlock:
15362306a36Sopenharmony_ci		raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
15462306a36Sopenharmony_ci		vgic_put_irq(vcpu->kvm, irq);
15562306a36Sopenharmony_ci	}
15662306a36Sopenharmony_ci}
15762306a36Sopenharmony_ci
15862306a36Sopenharmony_cistatic void vgic_v4_disable_vsgis(struct kvm_vcpu *vcpu)
15962306a36Sopenharmony_ci{
16062306a36Sopenharmony_ci	int i;
16162306a36Sopenharmony_ci
16262306a36Sopenharmony_ci	for (i = 0; i < VGIC_NR_SGIS; i++) {
16362306a36Sopenharmony_ci		struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, i);
16462306a36Sopenharmony_ci		struct irq_desc *desc;
16562306a36Sopenharmony_ci		unsigned long flags;
16662306a36Sopenharmony_ci		int ret;
16762306a36Sopenharmony_ci
16862306a36Sopenharmony_ci		raw_spin_lock_irqsave(&irq->irq_lock, flags);
16962306a36Sopenharmony_ci
17062306a36Sopenharmony_ci		if (!irq->hw)
17162306a36Sopenharmony_ci			goto unlock;
17262306a36Sopenharmony_ci
17362306a36Sopenharmony_ci		irq->hw = false;
17462306a36Sopenharmony_ci		ret = irq_get_irqchip_state(irq->host_irq,
17562306a36Sopenharmony_ci					    IRQCHIP_STATE_PENDING,
17662306a36Sopenharmony_ci					    &irq->pending_latch);
17762306a36Sopenharmony_ci		WARN_ON(ret);
17862306a36Sopenharmony_ci
17962306a36Sopenharmony_ci		desc = irq_to_desc(irq->host_irq);
18062306a36Sopenharmony_ci		irq_domain_deactivate_irq(irq_desc_get_irq_data(desc));
18162306a36Sopenharmony_ci	unlock:
18262306a36Sopenharmony_ci		raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
18362306a36Sopenharmony_ci		vgic_put_irq(vcpu->kvm, irq);
18462306a36Sopenharmony_ci	}
18562306a36Sopenharmony_ci}
18662306a36Sopenharmony_ci
18762306a36Sopenharmony_civoid vgic_v4_configure_vsgis(struct kvm *kvm)
18862306a36Sopenharmony_ci{
18962306a36Sopenharmony_ci	struct vgic_dist *dist = &kvm->arch.vgic;
19062306a36Sopenharmony_ci	struct kvm_vcpu *vcpu;
19162306a36Sopenharmony_ci	unsigned long i;
19262306a36Sopenharmony_ci
19362306a36Sopenharmony_ci	lockdep_assert_held(&kvm->arch.config_lock);
19462306a36Sopenharmony_ci
19562306a36Sopenharmony_ci	kvm_arm_halt_guest(kvm);
19662306a36Sopenharmony_ci
19762306a36Sopenharmony_ci	kvm_for_each_vcpu(i, vcpu, kvm) {
19862306a36Sopenharmony_ci		if (dist->nassgireq)
19962306a36Sopenharmony_ci			vgic_v4_enable_vsgis(vcpu);
20062306a36Sopenharmony_ci		else
20162306a36Sopenharmony_ci			vgic_v4_disable_vsgis(vcpu);
20262306a36Sopenharmony_ci	}
20362306a36Sopenharmony_ci
20462306a36Sopenharmony_ci	kvm_arm_resume_guest(kvm);
20562306a36Sopenharmony_ci}
20662306a36Sopenharmony_ci
20762306a36Sopenharmony_ci/*
20862306a36Sopenharmony_ci * Must be called with GICv4.1 and the vPE unmapped, which
20962306a36Sopenharmony_ci * indicates the invalidation of any VPT caches associated
21062306a36Sopenharmony_ci * with the vPE, thus we can get the VLPI state by peeking
21162306a36Sopenharmony_ci * at the VPT.
21262306a36Sopenharmony_ci */
21362306a36Sopenharmony_civoid vgic_v4_get_vlpi_state(struct vgic_irq *irq, bool *val)
21462306a36Sopenharmony_ci{
21562306a36Sopenharmony_ci	struct its_vpe *vpe = &irq->target_vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
21662306a36Sopenharmony_ci	int mask = BIT(irq->intid % BITS_PER_BYTE);
21762306a36Sopenharmony_ci	void *va;
21862306a36Sopenharmony_ci	u8 *ptr;
21962306a36Sopenharmony_ci
22062306a36Sopenharmony_ci	va = page_address(vpe->vpt_page);
22162306a36Sopenharmony_ci	ptr = va + irq->intid / BITS_PER_BYTE;
22262306a36Sopenharmony_ci
22362306a36Sopenharmony_ci	*val = !!(*ptr & mask);
22462306a36Sopenharmony_ci}
22562306a36Sopenharmony_ci
22662306a36Sopenharmony_ciint vgic_v4_request_vpe_irq(struct kvm_vcpu *vcpu, int irq)
22762306a36Sopenharmony_ci{
22862306a36Sopenharmony_ci	return request_irq(irq, vgic_v4_doorbell_handler, 0, "vcpu", vcpu);
22962306a36Sopenharmony_ci}
23062306a36Sopenharmony_ci
23162306a36Sopenharmony_ci/**
23262306a36Sopenharmony_ci * vgic_v4_init - Initialize the GICv4 data structures
23362306a36Sopenharmony_ci * @kvm:	Pointer to the VM being initialized
23462306a36Sopenharmony_ci *
23562306a36Sopenharmony_ci * We may be called each time a vITS is created, or when the
23662306a36Sopenharmony_ci * vgic is initialized. In both cases, the number of vcpus
23762306a36Sopenharmony_ci * should now be fixed.
23862306a36Sopenharmony_ci */
23962306a36Sopenharmony_ciint vgic_v4_init(struct kvm *kvm)
24062306a36Sopenharmony_ci{
24162306a36Sopenharmony_ci	struct vgic_dist *dist = &kvm->arch.vgic;
24262306a36Sopenharmony_ci	struct kvm_vcpu *vcpu;
24362306a36Sopenharmony_ci	int nr_vcpus, ret;
24462306a36Sopenharmony_ci	unsigned long i;
24562306a36Sopenharmony_ci
24662306a36Sopenharmony_ci	lockdep_assert_held(&kvm->arch.config_lock);
24762306a36Sopenharmony_ci
24862306a36Sopenharmony_ci	if (!kvm_vgic_global_state.has_gicv4)
24962306a36Sopenharmony_ci		return 0; /* Nothing to see here... move along. */
25062306a36Sopenharmony_ci
25162306a36Sopenharmony_ci	if (dist->its_vm.vpes)
25262306a36Sopenharmony_ci		return 0;
25362306a36Sopenharmony_ci
25462306a36Sopenharmony_ci	nr_vcpus = atomic_read(&kvm->online_vcpus);
25562306a36Sopenharmony_ci
25662306a36Sopenharmony_ci	dist->its_vm.vpes = kcalloc(nr_vcpus, sizeof(*dist->its_vm.vpes),
25762306a36Sopenharmony_ci				    GFP_KERNEL_ACCOUNT);
25862306a36Sopenharmony_ci	if (!dist->its_vm.vpes)
25962306a36Sopenharmony_ci		return -ENOMEM;
26062306a36Sopenharmony_ci
26162306a36Sopenharmony_ci	dist->its_vm.nr_vpes = nr_vcpus;
26262306a36Sopenharmony_ci
26362306a36Sopenharmony_ci	kvm_for_each_vcpu(i, vcpu, kvm)
26462306a36Sopenharmony_ci		dist->its_vm.vpes[i] = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
26562306a36Sopenharmony_ci
26662306a36Sopenharmony_ci	ret = its_alloc_vcpu_irqs(&dist->its_vm);
26762306a36Sopenharmony_ci	if (ret < 0) {
26862306a36Sopenharmony_ci		kvm_err("VPE IRQ allocation failure\n");
26962306a36Sopenharmony_ci		kfree(dist->its_vm.vpes);
27062306a36Sopenharmony_ci		dist->its_vm.nr_vpes = 0;
27162306a36Sopenharmony_ci		dist->its_vm.vpes = NULL;
27262306a36Sopenharmony_ci		return ret;
27362306a36Sopenharmony_ci	}
27462306a36Sopenharmony_ci
27562306a36Sopenharmony_ci	kvm_for_each_vcpu(i, vcpu, kvm) {
27662306a36Sopenharmony_ci		int irq = dist->its_vm.vpes[i]->irq;
27762306a36Sopenharmony_ci		unsigned long irq_flags = DB_IRQ_FLAGS;
27862306a36Sopenharmony_ci
27962306a36Sopenharmony_ci		/*
28062306a36Sopenharmony_ci		 * Don't automatically enable the doorbell, as we're
28162306a36Sopenharmony_ci		 * flipping it back and forth when the vcpu gets
28262306a36Sopenharmony_ci		 * blocked. Also disable the lazy disabling, as the
28362306a36Sopenharmony_ci		 * doorbell could kick us out of the guest too
28462306a36Sopenharmony_ci		 * early...
28562306a36Sopenharmony_ci		 *
28662306a36Sopenharmony_ci		 * On GICv4.1, the doorbell is managed in HW and must
28762306a36Sopenharmony_ci		 * be left enabled.
28862306a36Sopenharmony_ci		 */
28962306a36Sopenharmony_ci		if (kvm_vgic_global_state.has_gicv4_1)
29062306a36Sopenharmony_ci			irq_flags &= ~IRQ_NOAUTOEN;
29162306a36Sopenharmony_ci		irq_set_status_flags(irq, irq_flags);
29262306a36Sopenharmony_ci
29362306a36Sopenharmony_ci		ret = vgic_v4_request_vpe_irq(vcpu, irq);
29462306a36Sopenharmony_ci		if (ret) {
29562306a36Sopenharmony_ci			kvm_err("failed to allocate vcpu IRQ%d\n", irq);
29662306a36Sopenharmony_ci			/*
29762306a36Sopenharmony_ci			 * Trick: adjust the number of vpes so we know
29862306a36Sopenharmony_ci			 * how many to nuke on teardown...
29962306a36Sopenharmony_ci			 */
30062306a36Sopenharmony_ci			dist->its_vm.nr_vpes = i;
30162306a36Sopenharmony_ci			break;
30262306a36Sopenharmony_ci		}
30362306a36Sopenharmony_ci	}
30462306a36Sopenharmony_ci
30562306a36Sopenharmony_ci	if (ret)
30662306a36Sopenharmony_ci		vgic_v4_teardown(kvm);
30762306a36Sopenharmony_ci
30862306a36Sopenharmony_ci	return ret;
30962306a36Sopenharmony_ci}
31062306a36Sopenharmony_ci
31162306a36Sopenharmony_ci/**
31262306a36Sopenharmony_ci * vgic_v4_teardown - Free the GICv4 data structures
31362306a36Sopenharmony_ci * @kvm:	Pointer to the VM being destroyed
31462306a36Sopenharmony_ci */
31562306a36Sopenharmony_civoid vgic_v4_teardown(struct kvm *kvm)
31662306a36Sopenharmony_ci{
31762306a36Sopenharmony_ci	struct its_vm *its_vm = &kvm->arch.vgic.its_vm;
31862306a36Sopenharmony_ci	int i;
31962306a36Sopenharmony_ci
32062306a36Sopenharmony_ci	lockdep_assert_held(&kvm->arch.config_lock);
32162306a36Sopenharmony_ci
32262306a36Sopenharmony_ci	if (!its_vm->vpes)
32362306a36Sopenharmony_ci		return;
32462306a36Sopenharmony_ci
32562306a36Sopenharmony_ci	for (i = 0; i < its_vm->nr_vpes; i++) {
32662306a36Sopenharmony_ci		struct kvm_vcpu *vcpu = kvm_get_vcpu(kvm, i);
32762306a36Sopenharmony_ci		int irq = its_vm->vpes[i]->irq;
32862306a36Sopenharmony_ci
32962306a36Sopenharmony_ci		irq_clear_status_flags(irq, DB_IRQ_FLAGS);
33062306a36Sopenharmony_ci		free_irq(irq, vcpu);
33162306a36Sopenharmony_ci	}
33262306a36Sopenharmony_ci
33362306a36Sopenharmony_ci	its_free_vcpu_irqs(its_vm);
33462306a36Sopenharmony_ci	kfree(its_vm->vpes);
33562306a36Sopenharmony_ci	its_vm->nr_vpes = 0;
33662306a36Sopenharmony_ci	its_vm->vpes = NULL;
33762306a36Sopenharmony_ci}
33862306a36Sopenharmony_ci
33962306a36Sopenharmony_ciint vgic_v4_put(struct kvm_vcpu *vcpu)
34062306a36Sopenharmony_ci{
34162306a36Sopenharmony_ci	struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
34262306a36Sopenharmony_ci
34362306a36Sopenharmony_ci	if (!vgic_supports_direct_msis(vcpu->kvm) || !vpe->resident)
34462306a36Sopenharmony_ci		return 0;
34562306a36Sopenharmony_ci
34662306a36Sopenharmony_ci	return its_make_vpe_non_resident(vpe, !!vcpu_get_flag(vcpu, IN_WFI));
34762306a36Sopenharmony_ci}
34862306a36Sopenharmony_ci
34962306a36Sopenharmony_ciint vgic_v4_load(struct kvm_vcpu *vcpu)
35062306a36Sopenharmony_ci{
35162306a36Sopenharmony_ci	struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
35262306a36Sopenharmony_ci	int err;
35362306a36Sopenharmony_ci
35462306a36Sopenharmony_ci	if (!vgic_supports_direct_msis(vcpu->kvm) || vpe->resident)
35562306a36Sopenharmony_ci		return 0;
35662306a36Sopenharmony_ci
35762306a36Sopenharmony_ci	if (vcpu_get_flag(vcpu, IN_WFI))
35862306a36Sopenharmony_ci		return 0;
35962306a36Sopenharmony_ci
36062306a36Sopenharmony_ci	/*
36162306a36Sopenharmony_ci	 * Before making the VPE resident, make sure the redistributor
36262306a36Sopenharmony_ci	 * corresponding to our current CPU expects us here. See the
36362306a36Sopenharmony_ci	 * doc in drivers/irqchip/irq-gic-v4.c to understand how this
36462306a36Sopenharmony_ci	 * turns into a VMOVP command at the ITS level.
36562306a36Sopenharmony_ci	 */
36662306a36Sopenharmony_ci	err = irq_set_affinity(vpe->irq, cpumask_of(smp_processor_id()));
36762306a36Sopenharmony_ci	if (err)
36862306a36Sopenharmony_ci		return err;
36962306a36Sopenharmony_ci
37062306a36Sopenharmony_ci	err = its_make_vpe_resident(vpe, false, vcpu->kvm->arch.vgic.enabled);
37162306a36Sopenharmony_ci	if (err)
37262306a36Sopenharmony_ci		return err;
37362306a36Sopenharmony_ci
37462306a36Sopenharmony_ci	/*
37562306a36Sopenharmony_ci	 * Now that the VPE is resident, let's get rid of a potential
37662306a36Sopenharmony_ci	 * doorbell interrupt that would still be pending. This is a
37762306a36Sopenharmony_ci	 * GICv4.0 only "feature"...
37862306a36Sopenharmony_ci	 */
37962306a36Sopenharmony_ci	if (!kvm_vgic_global_state.has_gicv4_1)
38062306a36Sopenharmony_ci		err = irq_set_irqchip_state(vpe->irq, IRQCHIP_STATE_PENDING, false);
38162306a36Sopenharmony_ci
38262306a36Sopenharmony_ci	return err;
38362306a36Sopenharmony_ci}
38462306a36Sopenharmony_ci
38562306a36Sopenharmony_civoid vgic_v4_commit(struct kvm_vcpu *vcpu)
38662306a36Sopenharmony_ci{
38762306a36Sopenharmony_ci	struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
38862306a36Sopenharmony_ci
38962306a36Sopenharmony_ci	/*
39062306a36Sopenharmony_ci	 * No need to wait for the vPE to be ready across a shallow guest
39162306a36Sopenharmony_ci	 * exit, as only a vcpu_put will invalidate it.
39262306a36Sopenharmony_ci	 */
39362306a36Sopenharmony_ci	if (!vpe->ready)
39462306a36Sopenharmony_ci		its_commit_vpe(vpe);
39562306a36Sopenharmony_ci}
39662306a36Sopenharmony_ci
39762306a36Sopenharmony_cistatic struct vgic_its *vgic_get_its(struct kvm *kvm,
39862306a36Sopenharmony_ci				     struct kvm_kernel_irq_routing_entry *irq_entry)
39962306a36Sopenharmony_ci{
40062306a36Sopenharmony_ci	struct kvm_msi msi  = (struct kvm_msi) {
40162306a36Sopenharmony_ci		.address_lo	= irq_entry->msi.address_lo,
40262306a36Sopenharmony_ci		.address_hi	= irq_entry->msi.address_hi,
40362306a36Sopenharmony_ci		.data		= irq_entry->msi.data,
40462306a36Sopenharmony_ci		.flags		= irq_entry->msi.flags,
40562306a36Sopenharmony_ci		.devid		= irq_entry->msi.devid,
40662306a36Sopenharmony_ci	};
40762306a36Sopenharmony_ci
40862306a36Sopenharmony_ci	return vgic_msi_to_its(kvm, &msi);
40962306a36Sopenharmony_ci}
41062306a36Sopenharmony_ci
41162306a36Sopenharmony_ciint kvm_vgic_v4_set_forwarding(struct kvm *kvm, int virq,
41262306a36Sopenharmony_ci			       struct kvm_kernel_irq_routing_entry *irq_entry)
41362306a36Sopenharmony_ci{
41462306a36Sopenharmony_ci	struct vgic_its *its;
41562306a36Sopenharmony_ci	struct vgic_irq *irq;
41662306a36Sopenharmony_ci	struct its_vlpi_map map;
41762306a36Sopenharmony_ci	unsigned long flags;
41862306a36Sopenharmony_ci	int ret;
41962306a36Sopenharmony_ci
42062306a36Sopenharmony_ci	if (!vgic_supports_direct_msis(kvm))
42162306a36Sopenharmony_ci		return 0;
42262306a36Sopenharmony_ci
42362306a36Sopenharmony_ci	/*
42462306a36Sopenharmony_ci	 * Get the ITS, and escape early on error (not a valid
42562306a36Sopenharmony_ci	 * doorbell for any of our vITSs).
42662306a36Sopenharmony_ci	 */
42762306a36Sopenharmony_ci	its = vgic_get_its(kvm, irq_entry);
42862306a36Sopenharmony_ci	if (IS_ERR(its))
42962306a36Sopenharmony_ci		return 0;
43062306a36Sopenharmony_ci
43162306a36Sopenharmony_ci	mutex_lock(&its->its_lock);
43262306a36Sopenharmony_ci
43362306a36Sopenharmony_ci	/* Perform the actual DevID/EventID -> LPI translation. */
43462306a36Sopenharmony_ci	ret = vgic_its_resolve_lpi(kvm, its, irq_entry->msi.devid,
43562306a36Sopenharmony_ci				   irq_entry->msi.data, &irq);
43662306a36Sopenharmony_ci	if (ret)
43762306a36Sopenharmony_ci		goto out;
43862306a36Sopenharmony_ci
43962306a36Sopenharmony_ci	/*
44062306a36Sopenharmony_ci	 * Emit the mapping request. If it fails, the ITS probably
44162306a36Sopenharmony_ci	 * isn't v4 compatible, so let's silently bail out. Holding
44262306a36Sopenharmony_ci	 * the ITS lock should ensure that nothing can modify the
44362306a36Sopenharmony_ci	 * target vcpu.
44462306a36Sopenharmony_ci	 */
44562306a36Sopenharmony_ci	map = (struct its_vlpi_map) {
44662306a36Sopenharmony_ci		.vm		= &kvm->arch.vgic.its_vm,
44762306a36Sopenharmony_ci		.vpe		= &irq->target_vcpu->arch.vgic_cpu.vgic_v3.its_vpe,
44862306a36Sopenharmony_ci		.vintid		= irq->intid,
44962306a36Sopenharmony_ci		.properties	= ((irq->priority & 0xfc) |
45062306a36Sopenharmony_ci				   (irq->enabled ? LPI_PROP_ENABLED : 0) |
45162306a36Sopenharmony_ci				   LPI_PROP_GROUP1),
45262306a36Sopenharmony_ci		.db_enabled	= true,
45362306a36Sopenharmony_ci	};
45462306a36Sopenharmony_ci
45562306a36Sopenharmony_ci	ret = its_map_vlpi(virq, &map);
45662306a36Sopenharmony_ci	if (ret)
45762306a36Sopenharmony_ci		goto out;
45862306a36Sopenharmony_ci
45962306a36Sopenharmony_ci	irq->hw		= true;
46062306a36Sopenharmony_ci	irq->host_irq	= virq;
46162306a36Sopenharmony_ci	atomic_inc(&map.vpe->vlpi_count);
46262306a36Sopenharmony_ci
46362306a36Sopenharmony_ci	/* Transfer pending state */
46462306a36Sopenharmony_ci	raw_spin_lock_irqsave(&irq->irq_lock, flags);
46562306a36Sopenharmony_ci	if (irq->pending_latch) {
46662306a36Sopenharmony_ci		ret = irq_set_irqchip_state(irq->host_irq,
46762306a36Sopenharmony_ci					    IRQCHIP_STATE_PENDING,
46862306a36Sopenharmony_ci					    irq->pending_latch);
46962306a36Sopenharmony_ci		WARN_RATELIMIT(ret, "IRQ %d", irq->host_irq);
47062306a36Sopenharmony_ci
47162306a36Sopenharmony_ci		/*
47262306a36Sopenharmony_ci		 * Clear pending_latch and communicate this state
47362306a36Sopenharmony_ci		 * change via vgic_queue_irq_unlock.
47462306a36Sopenharmony_ci		 */
47562306a36Sopenharmony_ci		irq->pending_latch = false;
47662306a36Sopenharmony_ci		vgic_queue_irq_unlock(kvm, irq, flags);
47762306a36Sopenharmony_ci	} else {
47862306a36Sopenharmony_ci		raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
47962306a36Sopenharmony_ci	}
48062306a36Sopenharmony_ci
48162306a36Sopenharmony_ciout:
48262306a36Sopenharmony_ci	mutex_unlock(&its->its_lock);
48362306a36Sopenharmony_ci	return ret;
48462306a36Sopenharmony_ci}
48562306a36Sopenharmony_ci
48662306a36Sopenharmony_ciint kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int virq,
48762306a36Sopenharmony_ci				 struct kvm_kernel_irq_routing_entry *irq_entry)
48862306a36Sopenharmony_ci{
48962306a36Sopenharmony_ci	struct vgic_its *its;
49062306a36Sopenharmony_ci	struct vgic_irq *irq;
49162306a36Sopenharmony_ci	int ret;
49262306a36Sopenharmony_ci
49362306a36Sopenharmony_ci	if (!vgic_supports_direct_msis(kvm))
49462306a36Sopenharmony_ci		return 0;
49562306a36Sopenharmony_ci
49662306a36Sopenharmony_ci	/*
49762306a36Sopenharmony_ci	 * Get the ITS, and escape early on error (not a valid
49862306a36Sopenharmony_ci	 * doorbell for any of our vITSs).
49962306a36Sopenharmony_ci	 */
50062306a36Sopenharmony_ci	its = vgic_get_its(kvm, irq_entry);
50162306a36Sopenharmony_ci	if (IS_ERR(its))
50262306a36Sopenharmony_ci		return 0;
50362306a36Sopenharmony_ci
50462306a36Sopenharmony_ci	mutex_lock(&its->its_lock);
50562306a36Sopenharmony_ci
50662306a36Sopenharmony_ci	ret = vgic_its_resolve_lpi(kvm, its, irq_entry->msi.devid,
50762306a36Sopenharmony_ci				   irq_entry->msi.data, &irq);
50862306a36Sopenharmony_ci	if (ret)
50962306a36Sopenharmony_ci		goto out;
51062306a36Sopenharmony_ci
51162306a36Sopenharmony_ci	WARN_ON(!(irq->hw && irq->host_irq == virq));
51262306a36Sopenharmony_ci	if (irq->hw) {
51362306a36Sopenharmony_ci		atomic_dec(&irq->target_vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count);
51462306a36Sopenharmony_ci		irq->hw = false;
51562306a36Sopenharmony_ci		ret = its_unmap_vlpi(virq);
51662306a36Sopenharmony_ci	}
51762306a36Sopenharmony_ci
51862306a36Sopenharmony_ciout:
51962306a36Sopenharmony_ci	mutex_unlock(&its->its_lock);
52062306a36Sopenharmony_ci	return ret;
52162306a36Sopenharmony_ci}
522