xref: /kernel/linux/linux-5.10/arch/arm64/kvm/vgic/vgic.c (revision 8c2ecf20)
18c2ecf20Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0-only
28c2ecf20Sopenharmony_ci/*
38c2ecf20Sopenharmony_ci * Copyright (C) 2015, 2016 ARM Ltd.
48c2ecf20Sopenharmony_ci */
58c2ecf20Sopenharmony_ci
68c2ecf20Sopenharmony_ci#include <linux/interrupt.h>
78c2ecf20Sopenharmony_ci#include <linux/irq.h>
88c2ecf20Sopenharmony_ci#include <linux/kvm.h>
98c2ecf20Sopenharmony_ci#include <linux/kvm_host.h>
108c2ecf20Sopenharmony_ci#include <linux/list_sort.h>
118c2ecf20Sopenharmony_ci#include <linux/nospec.h>
128c2ecf20Sopenharmony_ci
138c2ecf20Sopenharmony_ci#include <asm/kvm_hyp.h>
148c2ecf20Sopenharmony_ci
158c2ecf20Sopenharmony_ci#include "vgic.h"
168c2ecf20Sopenharmony_ci
178c2ecf20Sopenharmony_ci#define CREATE_TRACE_POINTS
188c2ecf20Sopenharmony_ci#include "trace.h"
198c2ecf20Sopenharmony_ci
208c2ecf20Sopenharmony_cistruct vgic_global kvm_vgic_global_state __ro_after_init = {
218c2ecf20Sopenharmony_ci	.gicv3_cpuif = STATIC_KEY_FALSE_INIT,
228c2ecf20Sopenharmony_ci};
238c2ecf20Sopenharmony_ci
248c2ecf20Sopenharmony_ci/*
258c2ecf20Sopenharmony_ci * Locking order is always:
268c2ecf20Sopenharmony_ci * kvm->lock (mutex)
278c2ecf20Sopenharmony_ci *   its->cmd_lock (mutex)
288c2ecf20Sopenharmony_ci *     its->its_lock (mutex)
298c2ecf20Sopenharmony_ci *       vgic_cpu->ap_list_lock		must be taken with IRQs disabled
308c2ecf20Sopenharmony_ci *         kvm->lpi_list_lock		must be taken with IRQs disabled
318c2ecf20Sopenharmony_ci *           vgic_irq->irq_lock		must be taken with IRQs disabled
328c2ecf20Sopenharmony_ci *
338c2ecf20Sopenharmony_ci * As the ap_list_lock might be taken from the timer interrupt handler,
348c2ecf20Sopenharmony_ci * we have to disable IRQs before taking this lock and everything lower
358c2ecf20Sopenharmony_ci * than it.
368c2ecf20Sopenharmony_ci *
378c2ecf20Sopenharmony_ci * If you need to take multiple locks, always take the upper lock first,
388c2ecf20Sopenharmony_ci * then the lower ones, e.g. first take the its_lock, then the irq_lock.
398c2ecf20Sopenharmony_ci * If you are already holding a lock and need to take a higher one, you
408c2ecf20Sopenharmony_ci * have to drop the lower ranking lock first and re-aquire it after having
418c2ecf20Sopenharmony_ci * taken the upper one.
428c2ecf20Sopenharmony_ci *
438c2ecf20Sopenharmony_ci * When taking more than one ap_list_lock at the same time, always take the
448c2ecf20Sopenharmony_ci * lowest numbered VCPU's ap_list_lock first, so:
458c2ecf20Sopenharmony_ci *   vcpuX->vcpu_id < vcpuY->vcpu_id:
468c2ecf20Sopenharmony_ci *     raw_spin_lock(vcpuX->arch.vgic_cpu.ap_list_lock);
478c2ecf20Sopenharmony_ci *     raw_spin_lock(vcpuY->arch.vgic_cpu.ap_list_lock);
488c2ecf20Sopenharmony_ci *
498c2ecf20Sopenharmony_ci * Since the VGIC must support injecting virtual interrupts from ISRs, we have
508c2ecf20Sopenharmony_ci * to use the raw_spin_lock_irqsave/raw_spin_unlock_irqrestore versions of outer
518c2ecf20Sopenharmony_ci * spinlocks for any lock that may be taken while injecting an interrupt.
528c2ecf20Sopenharmony_ci */
538c2ecf20Sopenharmony_ci
548c2ecf20Sopenharmony_ci/*
558c2ecf20Sopenharmony_ci * Iterate over the VM's list of mapped LPIs to find the one with a
568c2ecf20Sopenharmony_ci * matching interrupt ID and return a reference to the IRQ structure.
578c2ecf20Sopenharmony_ci */
588c2ecf20Sopenharmony_cistatic struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid)
598c2ecf20Sopenharmony_ci{
608c2ecf20Sopenharmony_ci	struct vgic_dist *dist = &kvm->arch.vgic;
618c2ecf20Sopenharmony_ci	struct vgic_irq *irq = NULL;
628c2ecf20Sopenharmony_ci	unsigned long flags;
638c2ecf20Sopenharmony_ci
648c2ecf20Sopenharmony_ci	raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
658c2ecf20Sopenharmony_ci
668c2ecf20Sopenharmony_ci	list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
678c2ecf20Sopenharmony_ci		if (irq->intid != intid)
688c2ecf20Sopenharmony_ci			continue;
698c2ecf20Sopenharmony_ci
708c2ecf20Sopenharmony_ci		/*
718c2ecf20Sopenharmony_ci		 * This increases the refcount, the caller is expected to
728c2ecf20Sopenharmony_ci		 * call vgic_put_irq() later once it's finished with the IRQ.
738c2ecf20Sopenharmony_ci		 */
748c2ecf20Sopenharmony_ci		vgic_get_irq_kref(irq);
758c2ecf20Sopenharmony_ci		goto out_unlock;
768c2ecf20Sopenharmony_ci	}
778c2ecf20Sopenharmony_ci	irq = NULL;
788c2ecf20Sopenharmony_ci
798c2ecf20Sopenharmony_ciout_unlock:
808c2ecf20Sopenharmony_ci	raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
818c2ecf20Sopenharmony_ci
828c2ecf20Sopenharmony_ci	return irq;
838c2ecf20Sopenharmony_ci}
848c2ecf20Sopenharmony_ci
858c2ecf20Sopenharmony_ci/*
868c2ecf20Sopenharmony_ci * This looks up the virtual interrupt ID to get the corresponding
878c2ecf20Sopenharmony_ci * struct vgic_irq. It also increases the refcount, so any caller is expected
888c2ecf20Sopenharmony_ci * to call vgic_put_irq() once it's finished with this IRQ.
898c2ecf20Sopenharmony_ci */
908c2ecf20Sopenharmony_cistruct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu,
918c2ecf20Sopenharmony_ci			      u32 intid)
928c2ecf20Sopenharmony_ci{
938c2ecf20Sopenharmony_ci	/* SGIs and PPIs */
948c2ecf20Sopenharmony_ci	if (intid <= VGIC_MAX_PRIVATE) {
958c2ecf20Sopenharmony_ci		intid = array_index_nospec(intid, VGIC_MAX_PRIVATE + 1);
968c2ecf20Sopenharmony_ci		return &vcpu->arch.vgic_cpu.private_irqs[intid];
978c2ecf20Sopenharmony_ci	}
988c2ecf20Sopenharmony_ci
998c2ecf20Sopenharmony_ci	/* SPIs */
1008c2ecf20Sopenharmony_ci	if (intid < (kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS)) {
1018c2ecf20Sopenharmony_ci		intid = array_index_nospec(intid, kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS);
1028c2ecf20Sopenharmony_ci		return &kvm->arch.vgic.spis[intid - VGIC_NR_PRIVATE_IRQS];
1038c2ecf20Sopenharmony_ci	}
1048c2ecf20Sopenharmony_ci
1058c2ecf20Sopenharmony_ci	/* LPIs */
1068c2ecf20Sopenharmony_ci	if (intid >= VGIC_MIN_LPI)
1078c2ecf20Sopenharmony_ci		return vgic_get_lpi(kvm, intid);
1088c2ecf20Sopenharmony_ci
1098c2ecf20Sopenharmony_ci	WARN(1, "Looking up struct vgic_irq for reserved INTID");
1108c2ecf20Sopenharmony_ci	return NULL;
1118c2ecf20Sopenharmony_ci}
1128c2ecf20Sopenharmony_ci
1138c2ecf20Sopenharmony_ci/*
1148c2ecf20Sopenharmony_ci * We can't do anything in here, because we lack the kvm pointer to
1158c2ecf20Sopenharmony_ci * lock and remove the item from the lpi_list. So we keep this function
1168c2ecf20Sopenharmony_ci * empty and use the return value of kref_put() to trigger the freeing.
1178c2ecf20Sopenharmony_ci */
1188c2ecf20Sopenharmony_cistatic void vgic_irq_release(struct kref *ref)
1198c2ecf20Sopenharmony_ci{
1208c2ecf20Sopenharmony_ci}
1218c2ecf20Sopenharmony_ci
1228c2ecf20Sopenharmony_ci/*
1238c2ecf20Sopenharmony_ci * Drop the refcount on the LPI. Must be called with lpi_list_lock held.
1248c2ecf20Sopenharmony_ci */
1258c2ecf20Sopenharmony_civoid __vgic_put_lpi_locked(struct kvm *kvm, struct vgic_irq *irq)
1268c2ecf20Sopenharmony_ci{
1278c2ecf20Sopenharmony_ci	struct vgic_dist *dist = &kvm->arch.vgic;
1288c2ecf20Sopenharmony_ci
1298c2ecf20Sopenharmony_ci	if (!kref_put(&irq->refcount, vgic_irq_release))
1308c2ecf20Sopenharmony_ci		return;
1318c2ecf20Sopenharmony_ci
1328c2ecf20Sopenharmony_ci	list_del(&irq->lpi_list);
1338c2ecf20Sopenharmony_ci	dist->lpi_list_count--;
1348c2ecf20Sopenharmony_ci
1358c2ecf20Sopenharmony_ci	kfree(irq);
1368c2ecf20Sopenharmony_ci}
1378c2ecf20Sopenharmony_ci
1388c2ecf20Sopenharmony_civoid vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
1398c2ecf20Sopenharmony_ci{
1408c2ecf20Sopenharmony_ci	struct vgic_dist *dist = &kvm->arch.vgic;
1418c2ecf20Sopenharmony_ci	unsigned long flags;
1428c2ecf20Sopenharmony_ci
1438c2ecf20Sopenharmony_ci	if (irq->intid < VGIC_MIN_LPI)
1448c2ecf20Sopenharmony_ci		return;
1458c2ecf20Sopenharmony_ci
1468c2ecf20Sopenharmony_ci	raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
1478c2ecf20Sopenharmony_ci	__vgic_put_lpi_locked(kvm, irq);
1488c2ecf20Sopenharmony_ci	raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
1498c2ecf20Sopenharmony_ci}
1508c2ecf20Sopenharmony_ci
1518c2ecf20Sopenharmony_civoid vgic_flush_pending_lpis(struct kvm_vcpu *vcpu)
1528c2ecf20Sopenharmony_ci{
1538c2ecf20Sopenharmony_ci	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1548c2ecf20Sopenharmony_ci	struct vgic_irq *irq, *tmp;
1558c2ecf20Sopenharmony_ci	unsigned long flags;
1568c2ecf20Sopenharmony_ci
1578c2ecf20Sopenharmony_ci	raw_spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags);
1588c2ecf20Sopenharmony_ci
1598c2ecf20Sopenharmony_ci	list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) {
1608c2ecf20Sopenharmony_ci		if (irq->intid >= VGIC_MIN_LPI) {
1618c2ecf20Sopenharmony_ci			raw_spin_lock(&irq->irq_lock);
1628c2ecf20Sopenharmony_ci			list_del(&irq->ap_list);
1638c2ecf20Sopenharmony_ci			irq->vcpu = NULL;
1648c2ecf20Sopenharmony_ci			raw_spin_unlock(&irq->irq_lock);
1658c2ecf20Sopenharmony_ci			vgic_put_irq(vcpu->kvm, irq);
1668c2ecf20Sopenharmony_ci		}
1678c2ecf20Sopenharmony_ci	}
1688c2ecf20Sopenharmony_ci
1698c2ecf20Sopenharmony_ci	raw_spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags);
1708c2ecf20Sopenharmony_ci}
1718c2ecf20Sopenharmony_ci
1728c2ecf20Sopenharmony_civoid vgic_irq_set_phys_pending(struct vgic_irq *irq, bool pending)
1738c2ecf20Sopenharmony_ci{
1748c2ecf20Sopenharmony_ci	WARN_ON(irq_set_irqchip_state(irq->host_irq,
1758c2ecf20Sopenharmony_ci				      IRQCHIP_STATE_PENDING,
1768c2ecf20Sopenharmony_ci				      pending));
1778c2ecf20Sopenharmony_ci}
1788c2ecf20Sopenharmony_ci
1798c2ecf20Sopenharmony_cibool vgic_get_phys_line_level(struct vgic_irq *irq)
1808c2ecf20Sopenharmony_ci{
1818c2ecf20Sopenharmony_ci	bool line_level;
1828c2ecf20Sopenharmony_ci
1838c2ecf20Sopenharmony_ci	BUG_ON(!irq->hw);
1848c2ecf20Sopenharmony_ci
1858c2ecf20Sopenharmony_ci	if (irq->get_input_level)
1868c2ecf20Sopenharmony_ci		return irq->get_input_level(irq->intid);
1878c2ecf20Sopenharmony_ci
1888c2ecf20Sopenharmony_ci	WARN_ON(irq_get_irqchip_state(irq->host_irq,
1898c2ecf20Sopenharmony_ci				      IRQCHIP_STATE_PENDING,
1908c2ecf20Sopenharmony_ci				      &line_level));
1918c2ecf20Sopenharmony_ci	return line_level;
1928c2ecf20Sopenharmony_ci}
1938c2ecf20Sopenharmony_ci
1948c2ecf20Sopenharmony_ci/* Set/Clear the physical active state */
1958c2ecf20Sopenharmony_civoid vgic_irq_set_phys_active(struct vgic_irq *irq, bool active)
1968c2ecf20Sopenharmony_ci{
1978c2ecf20Sopenharmony_ci
1988c2ecf20Sopenharmony_ci	BUG_ON(!irq->hw);
1998c2ecf20Sopenharmony_ci	WARN_ON(irq_set_irqchip_state(irq->host_irq,
2008c2ecf20Sopenharmony_ci				      IRQCHIP_STATE_ACTIVE,
2018c2ecf20Sopenharmony_ci				      active));
2028c2ecf20Sopenharmony_ci}
2038c2ecf20Sopenharmony_ci
2048c2ecf20Sopenharmony_ci/**
2058c2ecf20Sopenharmony_ci * kvm_vgic_target_oracle - compute the target vcpu for an irq
2068c2ecf20Sopenharmony_ci *
2078c2ecf20Sopenharmony_ci * @irq:	The irq to route. Must be already locked.
2088c2ecf20Sopenharmony_ci *
2098c2ecf20Sopenharmony_ci * Based on the current state of the interrupt (enabled, pending,
2108c2ecf20Sopenharmony_ci * active, vcpu and target_vcpu), compute the next vcpu this should be
2118c2ecf20Sopenharmony_ci * given to. Return NULL if this shouldn't be injected at all.
2128c2ecf20Sopenharmony_ci *
2138c2ecf20Sopenharmony_ci * Requires the IRQ lock to be held.
2148c2ecf20Sopenharmony_ci */
2158c2ecf20Sopenharmony_cistatic struct kvm_vcpu *vgic_target_oracle(struct vgic_irq *irq)
2168c2ecf20Sopenharmony_ci{
2178c2ecf20Sopenharmony_ci	lockdep_assert_held(&irq->irq_lock);
2188c2ecf20Sopenharmony_ci
2198c2ecf20Sopenharmony_ci	/* If the interrupt is active, it must stay on the current vcpu */
2208c2ecf20Sopenharmony_ci	if (irq->active)
2218c2ecf20Sopenharmony_ci		return irq->vcpu ? : irq->target_vcpu;
2228c2ecf20Sopenharmony_ci
2238c2ecf20Sopenharmony_ci	/*
2248c2ecf20Sopenharmony_ci	 * If the IRQ is not active but enabled and pending, we should direct
2258c2ecf20Sopenharmony_ci	 * it to its configured target VCPU.
2268c2ecf20Sopenharmony_ci	 * If the distributor is disabled, pending interrupts shouldn't be
2278c2ecf20Sopenharmony_ci	 * forwarded.
2288c2ecf20Sopenharmony_ci	 */
2298c2ecf20Sopenharmony_ci	if (irq->enabled && irq_is_pending(irq)) {
2308c2ecf20Sopenharmony_ci		if (unlikely(irq->target_vcpu &&
2318c2ecf20Sopenharmony_ci			     !irq->target_vcpu->kvm->arch.vgic.enabled))
2328c2ecf20Sopenharmony_ci			return NULL;
2338c2ecf20Sopenharmony_ci
2348c2ecf20Sopenharmony_ci		return irq->target_vcpu;
2358c2ecf20Sopenharmony_ci	}
2368c2ecf20Sopenharmony_ci
2378c2ecf20Sopenharmony_ci	/* If neither active nor pending and enabled, then this IRQ should not
2388c2ecf20Sopenharmony_ci	 * be queued to any VCPU.
2398c2ecf20Sopenharmony_ci	 */
2408c2ecf20Sopenharmony_ci	return NULL;
2418c2ecf20Sopenharmony_ci}
2428c2ecf20Sopenharmony_ci
2438c2ecf20Sopenharmony_ci/*
2448c2ecf20Sopenharmony_ci * The order of items in the ap_lists defines how we'll pack things in LRs as
2458c2ecf20Sopenharmony_ci * well, the first items in the list being the first things populated in the
2468c2ecf20Sopenharmony_ci * LRs.
2478c2ecf20Sopenharmony_ci *
2488c2ecf20Sopenharmony_ci * A hard rule is that active interrupts can never be pushed out of the LRs
2498c2ecf20Sopenharmony_ci * (and therefore take priority) since we cannot reliably trap on deactivation
2508c2ecf20Sopenharmony_ci * of IRQs and therefore they have to be present in the LRs.
2518c2ecf20Sopenharmony_ci *
2528c2ecf20Sopenharmony_ci * Otherwise things should be sorted by the priority field and the GIC
2538c2ecf20Sopenharmony_ci * hardware support will take care of preemption of priority groups etc.
2548c2ecf20Sopenharmony_ci *
2558c2ecf20Sopenharmony_ci * Return negative if "a" sorts before "b", 0 to preserve order, and positive
2568c2ecf20Sopenharmony_ci * to sort "b" before "a".
2578c2ecf20Sopenharmony_ci */
2588c2ecf20Sopenharmony_cistatic int vgic_irq_cmp(void *priv, const struct list_head *a,
2598c2ecf20Sopenharmony_ci			const struct list_head *b)
2608c2ecf20Sopenharmony_ci{
2618c2ecf20Sopenharmony_ci	struct vgic_irq *irqa = container_of(a, struct vgic_irq, ap_list);
2628c2ecf20Sopenharmony_ci	struct vgic_irq *irqb = container_of(b, struct vgic_irq, ap_list);
2638c2ecf20Sopenharmony_ci	bool penda, pendb;
2648c2ecf20Sopenharmony_ci	int ret;
2658c2ecf20Sopenharmony_ci
2668c2ecf20Sopenharmony_ci	/*
2678c2ecf20Sopenharmony_ci	 * list_sort may call this function with the same element when
2688c2ecf20Sopenharmony_ci	 * the list is fairly long.
2698c2ecf20Sopenharmony_ci	 */
2708c2ecf20Sopenharmony_ci	if (unlikely(irqa == irqb))
2718c2ecf20Sopenharmony_ci		return 0;
2728c2ecf20Sopenharmony_ci
2738c2ecf20Sopenharmony_ci	raw_spin_lock(&irqa->irq_lock);
2748c2ecf20Sopenharmony_ci	raw_spin_lock_nested(&irqb->irq_lock, SINGLE_DEPTH_NESTING);
2758c2ecf20Sopenharmony_ci
2768c2ecf20Sopenharmony_ci	if (irqa->active || irqb->active) {
2778c2ecf20Sopenharmony_ci		ret = (int)irqb->active - (int)irqa->active;
2788c2ecf20Sopenharmony_ci		goto out;
2798c2ecf20Sopenharmony_ci	}
2808c2ecf20Sopenharmony_ci
2818c2ecf20Sopenharmony_ci	penda = irqa->enabled && irq_is_pending(irqa);
2828c2ecf20Sopenharmony_ci	pendb = irqb->enabled && irq_is_pending(irqb);
2838c2ecf20Sopenharmony_ci
2848c2ecf20Sopenharmony_ci	if (!penda || !pendb) {
2858c2ecf20Sopenharmony_ci		ret = (int)pendb - (int)penda;
2868c2ecf20Sopenharmony_ci		goto out;
2878c2ecf20Sopenharmony_ci	}
2888c2ecf20Sopenharmony_ci
2898c2ecf20Sopenharmony_ci	/* Both pending and enabled, sort by priority */
2908c2ecf20Sopenharmony_ci	ret = irqa->priority - irqb->priority;
2918c2ecf20Sopenharmony_ciout:
2928c2ecf20Sopenharmony_ci	raw_spin_unlock(&irqb->irq_lock);
2938c2ecf20Sopenharmony_ci	raw_spin_unlock(&irqa->irq_lock);
2948c2ecf20Sopenharmony_ci	return ret;
2958c2ecf20Sopenharmony_ci}
2968c2ecf20Sopenharmony_ci
2978c2ecf20Sopenharmony_ci/* Must be called with the ap_list_lock held */
2988c2ecf20Sopenharmony_cistatic void vgic_sort_ap_list(struct kvm_vcpu *vcpu)
2998c2ecf20Sopenharmony_ci{
3008c2ecf20Sopenharmony_ci	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
3018c2ecf20Sopenharmony_ci
3028c2ecf20Sopenharmony_ci	lockdep_assert_held(&vgic_cpu->ap_list_lock);
3038c2ecf20Sopenharmony_ci
3048c2ecf20Sopenharmony_ci	list_sort(NULL, &vgic_cpu->ap_list_head, vgic_irq_cmp);
3058c2ecf20Sopenharmony_ci}
3068c2ecf20Sopenharmony_ci
3078c2ecf20Sopenharmony_ci/*
3088c2ecf20Sopenharmony_ci * Only valid injection if changing level for level-triggered IRQs or for a
3098c2ecf20Sopenharmony_ci * rising edge, and in-kernel connected IRQ lines can only be controlled by
3108c2ecf20Sopenharmony_ci * their owner.
3118c2ecf20Sopenharmony_ci */
3128c2ecf20Sopenharmony_cistatic bool vgic_validate_injection(struct vgic_irq *irq, bool level, void *owner)
3138c2ecf20Sopenharmony_ci{
3148c2ecf20Sopenharmony_ci	if (irq->owner != owner)
3158c2ecf20Sopenharmony_ci		return false;
3168c2ecf20Sopenharmony_ci
3178c2ecf20Sopenharmony_ci	switch (irq->config) {
3188c2ecf20Sopenharmony_ci	case VGIC_CONFIG_LEVEL:
3198c2ecf20Sopenharmony_ci		return irq->line_level != level;
3208c2ecf20Sopenharmony_ci	case VGIC_CONFIG_EDGE:
3218c2ecf20Sopenharmony_ci		return level;
3228c2ecf20Sopenharmony_ci	}
3238c2ecf20Sopenharmony_ci
3248c2ecf20Sopenharmony_ci	return false;
3258c2ecf20Sopenharmony_ci}
3268c2ecf20Sopenharmony_ci
3278c2ecf20Sopenharmony_ci/*
3288c2ecf20Sopenharmony_ci * Check whether an IRQ needs to (and can) be queued to a VCPU's ap list.
3298c2ecf20Sopenharmony_ci * Do the queuing if necessary, taking the right locks in the right order.
3308c2ecf20Sopenharmony_ci * Returns true when the IRQ was queued, false otherwise.
3318c2ecf20Sopenharmony_ci *
3328c2ecf20Sopenharmony_ci * Needs to be entered with the IRQ lock already held, but will return
3338c2ecf20Sopenharmony_ci * with all locks dropped.
3348c2ecf20Sopenharmony_ci */
3358c2ecf20Sopenharmony_cibool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq,
3368c2ecf20Sopenharmony_ci			   unsigned long flags)
3378c2ecf20Sopenharmony_ci{
3388c2ecf20Sopenharmony_ci	struct kvm_vcpu *vcpu;
3398c2ecf20Sopenharmony_ci
3408c2ecf20Sopenharmony_ci	lockdep_assert_held(&irq->irq_lock);
3418c2ecf20Sopenharmony_ci
3428c2ecf20Sopenharmony_ciretry:
3438c2ecf20Sopenharmony_ci	vcpu = vgic_target_oracle(irq);
3448c2ecf20Sopenharmony_ci	if (irq->vcpu || !vcpu) {
3458c2ecf20Sopenharmony_ci		/*
3468c2ecf20Sopenharmony_ci		 * If this IRQ is already on a VCPU's ap_list, then it
3478c2ecf20Sopenharmony_ci		 * cannot be moved or modified and there is no more work for
3488c2ecf20Sopenharmony_ci		 * us to do.
3498c2ecf20Sopenharmony_ci		 *
3508c2ecf20Sopenharmony_ci		 * Otherwise, if the irq is not pending and enabled, it does
3518c2ecf20Sopenharmony_ci		 * not need to be inserted into an ap_list and there is also
3528c2ecf20Sopenharmony_ci		 * no more work for us to do.
3538c2ecf20Sopenharmony_ci		 */
3548c2ecf20Sopenharmony_ci		raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
3558c2ecf20Sopenharmony_ci
3568c2ecf20Sopenharmony_ci		/*
3578c2ecf20Sopenharmony_ci		 * We have to kick the VCPU here, because we could be
3588c2ecf20Sopenharmony_ci		 * queueing an edge-triggered interrupt for which we
3598c2ecf20Sopenharmony_ci		 * get no EOI maintenance interrupt. In that case,
3608c2ecf20Sopenharmony_ci		 * while the IRQ is already on the VCPU's AP list, the
3618c2ecf20Sopenharmony_ci		 * VCPU could have EOI'ed the original interrupt and
3628c2ecf20Sopenharmony_ci		 * won't see this one until it exits for some other
3638c2ecf20Sopenharmony_ci		 * reason.
3648c2ecf20Sopenharmony_ci		 */
3658c2ecf20Sopenharmony_ci		if (vcpu) {
3668c2ecf20Sopenharmony_ci			kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
3678c2ecf20Sopenharmony_ci			kvm_vcpu_kick(vcpu);
3688c2ecf20Sopenharmony_ci		}
3698c2ecf20Sopenharmony_ci		return false;
3708c2ecf20Sopenharmony_ci	}
3718c2ecf20Sopenharmony_ci
3728c2ecf20Sopenharmony_ci	/*
3738c2ecf20Sopenharmony_ci	 * We must unlock the irq lock to take the ap_list_lock where
3748c2ecf20Sopenharmony_ci	 * we are going to insert this new pending interrupt.
3758c2ecf20Sopenharmony_ci	 */
3768c2ecf20Sopenharmony_ci	raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
3778c2ecf20Sopenharmony_ci
3788c2ecf20Sopenharmony_ci	/* someone can do stuff here, which we re-check below */
3798c2ecf20Sopenharmony_ci
3808c2ecf20Sopenharmony_ci	raw_spin_lock_irqsave(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
3818c2ecf20Sopenharmony_ci	raw_spin_lock(&irq->irq_lock);
3828c2ecf20Sopenharmony_ci
3838c2ecf20Sopenharmony_ci	/*
3848c2ecf20Sopenharmony_ci	 * Did something change behind our backs?
3858c2ecf20Sopenharmony_ci	 *
3868c2ecf20Sopenharmony_ci	 * There are two cases:
3878c2ecf20Sopenharmony_ci	 * 1) The irq lost its pending state or was disabled behind our
3888c2ecf20Sopenharmony_ci	 *    backs and/or it was queued to another VCPU's ap_list.
3898c2ecf20Sopenharmony_ci	 * 2) Someone changed the affinity on this irq behind our
3908c2ecf20Sopenharmony_ci	 *    backs and we are now holding the wrong ap_list_lock.
3918c2ecf20Sopenharmony_ci	 *
3928c2ecf20Sopenharmony_ci	 * In both cases, drop the locks and retry.
3938c2ecf20Sopenharmony_ci	 */
3948c2ecf20Sopenharmony_ci
3958c2ecf20Sopenharmony_ci	if (unlikely(irq->vcpu || vcpu != vgic_target_oracle(irq))) {
3968c2ecf20Sopenharmony_ci		raw_spin_unlock(&irq->irq_lock);
3978c2ecf20Sopenharmony_ci		raw_spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock,
3988c2ecf20Sopenharmony_ci					   flags);
3998c2ecf20Sopenharmony_ci
4008c2ecf20Sopenharmony_ci		raw_spin_lock_irqsave(&irq->irq_lock, flags);
4018c2ecf20Sopenharmony_ci		goto retry;
4028c2ecf20Sopenharmony_ci	}
4038c2ecf20Sopenharmony_ci
4048c2ecf20Sopenharmony_ci	/*
4058c2ecf20Sopenharmony_ci	 * Grab a reference to the irq to reflect the fact that it is
4068c2ecf20Sopenharmony_ci	 * now in the ap_list.
4078c2ecf20Sopenharmony_ci	 */
4088c2ecf20Sopenharmony_ci	vgic_get_irq_kref(irq);
4098c2ecf20Sopenharmony_ci	list_add_tail(&irq->ap_list, &vcpu->arch.vgic_cpu.ap_list_head);
4108c2ecf20Sopenharmony_ci	irq->vcpu = vcpu;
4118c2ecf20Sopenharmony_ci
4128c2ecf20Sopenharmony_ci	raw_spin_unlock(&irq->irq_lock);
4138c2ecf20Sopenharmony_ci	raw_spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
4148c2ecf20Sopenharmony_ci
4158c2ecf20Sopenharmony_ci	kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
4168c2ecf20Sopenharmony_ci	kvm_vcpu_kick(vcpu);
4178c2ecf20Sopenharmony_ci
4188c2ecf20Sopenharmony_ci	return true;
4198c2ecf20Sopenharmony_ci}
4208c2ecf20Sopenharmony_ci
4218c2ecf20Sopenharmony_ci/**
4228c2ecf20Sopenharmony_ci * kvm_vgic_inject_irq - Inject an IRQ from a device to the vgic
4238c2ecf20Sopenharmony_ci * @kvm:     The VM structure pointer
4248c2ecf20Sopenharmony_ci * @cpuid:   The CPU for PPIs
4258c2ecf20Sopenharmony_ci * @intid:   The INTID to inject a new state to.
4268c2ecf20Sopenharmony_ci * @level:   Edge-triggered:  true:  to trigger the interrupt
4278c2ecf20Sopenharmony_ci *			      false: to ignore the call
4288c2ecf20Sopenharmony_ci *	     Level-sensitive  true:  raise the input signal
4298c2ecf20Sopenharmony_ci *			      false: lower the input signal
4308c2ecf20Sopenharmony_ci * @owner:   The opaque pointer to the owner of the IRQ being raised to verify
4318c2ecf20Sopenharmony_ci *           that the caller is allowed to inject this IRQ.  Userspace
4328c2ecf20Sopenharmony_ci *           injections will have owner == NULL.
4338c2ecf20Sopenharmony_ci *
4348c2ecf20Sopenharmony_ci * The VGIC is not concerned with devices being active-LOW or active-HIGH for
4358c2ecf20Sopenharmony_ci * level-sensitive interrupts.  You can think of the level parameter as 1
4368c2ecf20Sopenharmony_ci * being HIGH and 0 being LOW and all devices being active-HIGH.
4378c2ecf20Sopenharmony_ci */
4388c2ecf20Sopenharmony_ciint kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
4398c2ecf20Sopenharmony_ci			bool level, void *owner)
4408c2ecf20Sopenharmony_ci{
4418c2ecf20Sopenharmony_ci	struct kvm_vcpu *vcpu;
4428c2ecf20Sopenharmony_ci	struct vgic_irq *irq;
4438c2ecf20Sopenharmony_ci	unsigned long flags;
4448c2ecf20Sopenharmony_ci	int ret;
4458c2ecf20Sopenharmony_ci
4468c2ecf20Sopenharmony_ci	trace_vgic_update_irq_pending(cpuid, intid, level);
4478c2ecf20Sopenharmony_ci
4488c2ecf20Sopenharmony_ci	ret = vgic_lazy_init(kvm);
4498c2ecf20Sopenharmony_ci	if (ret)
4508c2ecf20Sopenharmony_ci		return ret;
4518c2ecf20Sopenharmony_ci
4528c2ecf20Sopenharmony_ci	vcpu = kvm_get_vcpu(kvm, cpuid);
4538c2ecf20Sopenharmony_ci	if (!vcpu && intid < VGIC_NR_PRIVATE_IRQS)
4548c2ecf20Sopenharmony_ci		return -EINVAL;
4558c2ecf20Sopenharmony_ci
4568c2ecf20Sopenharmony_ci	irq = vgic_get_irq(kvm, vcpu, intid);
4578c2ecf20Sopenharmony_ci	if (!irq)
4588c2ecf20Sopenharmony_ci		return -EINVAL;
4598c2ecf20Sopenharmony_ci
4608c2ecf20Sopenharmony_ci	raw_spin_lock_irqsave(&irq->irq_lock, flags);
4618c2ecf20Sopenharmony_ci
4628c2ecf20Sopenharmony_ci	if (!vgic_validate_injection(irq, level, owner)) {
4638c2ecf20Sopenharmony_ci		/* Nothing to see here, move along... */
4648c2ecf20Sopenharmony_ci		raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
4658c2ecf20Sopenharmony_ci		vgic_put_irq(kvm, irq);
4668c2ecf20Sopenharmony_ci		return 0;
4678c2ecf20Sopenharmony_ci	}
4688c2ecf20Sopenharmony_ci
4698c2ecf20Sopenharmony_ci	if (irq->config == VGIC_CONFIG_LEVEL)
4708c2ecf20Sopenharmony_ci		irq->line_level = level;
4718c2ecf20Sopenharmony_ci	else
4728c2ecf20Sopenharmony_ci		irq->pending_latch = true;
4738c2ecf20Sopenharmony_ci
4748c2ecf20Sopenharmony_ci	vgic_queue_irq_unlock(kvm, irq, flags);
4758c2ecf20Sopenharmony_ci	vgic_put_irq(kvm, irq);
4768c2ecf20Sopenharmony_ci
4778c2ecf20Sopenharmony_ci	return 0;
4788c2ecf20Sopenharmony_ci}
4798c2ecf20Sopenharmony_ci
4808c2ecf20Sopenharmony_ci/* @irq->irq_lock must be held */
4818c2ecf20Sopenharmony_cistatic int kvm_vgic_map_irq(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
4828c2ecf20Sopenharmony_ci			    unsigned int host_irq,
4838c2ecf20Sopenharmony_ci			    bool (*get_input_level)(int vindid))
4848c2ecf20Sopenharmony_ci{
4858c2ecf20Sopenharmony_ci	struct irq_desc *desc;
4868c2ecf20Sopenharmony_ci	struct irq_data *data;
4878c2ecf20Sopenharmony_ci
4888c2ecf20Sopenharmony_ci	/*
4898c2ecf20Sopenharmony_ci	 * Find the physical IRQ number corresponding to @host_irq
4908c2ecf20Sopenharmony_ci	 */
4918c2ecf20Sopenharmony_ci	desc = irq_to_desc(host_irq);
4928c2ecf20Sopenharmony_ci	if (!desc) {
4938c2ecf20Sopenharmony_ci		kvm_err("%s: no interrupt descriptor\n", __func__);
4948c2ecf20Sopenharmony_ci		return -EINVAL;
4958c2ecf20Sopenharmony_ci	}
4968c2ecf20Sopenharmony_ci	data = irq_desc_get_irq_data(desc);
4978c2ecf20Sopenharmony_ci	while (data->parent_data)
4988c2ecf20Sopenharmony_ci		data = data->parent_data;
4998c2ecf20Sopenharmony_ci
5008c2ecf20Sopenharmony_ci	irq->hw = true;
5018c2ecf20Sopenharmony_ci	irq->host_irq = host_irq;
5028c2ecf20Sopenharmony_ci	irq->hwintid = data->hwirq;
5038c2ecf20Sopenharmony_ci	irq->get_input_level = get_input_level;
5048c2ecf20Sopenharmony_ci	return 0;
5058c2ecf20Sopenharmony_ci}
5068c2ecf20Sopenharmony_ci
5078c2ecf20Sopenharmony_ci/* @irq->irq_lock must be held */
5088c2ecf20Sopenharmony_cistatic inline void kvm_vgic_unmap_irq(struct vgic_irq *irq)
5098c2ecf20Sopenharmony_ci{
5108c2ecf20Sopenharmony_ci	irq->hw = false;
5118c2ecf20Sopenharmony_ci	irq->hwintid = 0;
5128c2ecf20Sopenharmony_ci	irq->get_input_level = NULL;
5138c2ecf20Sopenharmony_ci}
5148c2ecf20Sopenharmony_ci
5158c2ecf20Sopenharmony_ciint kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq,
5168c2ecf20Sopenharmony_ci			  u32 vintid, bool (*get_input_level)(int vindid))
5178c2ecf20Sopenharmony_ci{
5188c2ecf20Sopenharmony_ci	struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
5198c2ecf20Sopenharmony_ci	unsigned long flags;
5208c2ecf20Sopenharmony_ci	int ret;
5218c2ecf20Sopenharmony_ci
5228c2ecf20Sopenharmony_ci	BUG_ON(!irq);
5238c2ecf20Sopenharmony_ci
5248c2ecf20Sopenharmony_ci	raw_spin_lock_irqsave(&irq->irq_lock, flags);
5258c2ecf20Sopenharmony_ci	ret = kvm_vgic_map_irq(vcpu, irq, host_irq, get_input_level);
5268c2ecf20Sopenharmony_ci	raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
5278c2ecf20Sopenharmony_ci	vgic_put_irq(vcpu->kvm, irq);
5288c2ecf20Sopenharmony_ci
5298c2ecf20Sopenharmony_ci	return ret;
5308c2ecf20Sopenharmony_ci}
5318c2ecf20Sopenharmony_ci
5328c2ecf20Sopenharmony_ci/**
5338c2ecf20Sopenharmony_ci * kvm_vgic_reset_mapped_irq - Reset a mapped IRQ
5348c2ecf20Sopenharmony_ci * @vcpu: The VCPU pointer
5358c2ecf20Sopenharmony_ci * @vintid: The INTID of the interrupt
5368c2ecf20Sopenharmony_ci *
5378c2ecf20Sopenharmony_ci * Reset the active and pending states of a mapped interrupt.  Kernel
5388c2ecf20Sopenharmony_ci * subsystems injecting mapped interrupts should reset their interrupt lines
5398c2ecf20Sopenharmony_ci * when we are doing a reset of the VM.
5408c2ecf20Sopenharmony_ci */
5418c2ecf20Sopenharmony_civoid kvm_vgic_reset_mapped_irq(struct kvm_vcpu *vcpu, u32 vintid)
5428c2ecf20Sopenharmony_ci{
5438c2ecf20Sopenharmony_ci	struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
5448c2ecf20Sopenharmony_ci	unsigned long flags;
5458c2ecf20Sopenharmony_ci
5468c2ecf20Sopenharmony_ci	if (!irq->hw)
5478c2ecf20Sopenharmony_ci		goto out;
5488c2ecf20Sopenharmony_ci
5498c2ecf20Sopenharmony_ci	raw_spin_lock_irqsave(&irq->irq_lock, flags);
5508c2ecf20Sopenharmony_ci	irq->active = false;
5518c2ecf20Sopenharmony_ci	irq->pending_latch = false;
5528c2ecf20Sopenharmony_ci	irq->line_level = false;
5538c2ecf20Sopenharmony_ci	raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
5548c2ecf20Sopenharmony_ciout:
5558c2ecf20Sopenharmony_ci	vgic_put_irq(vcpu->kvm, irq);
5568c2ecf20Sopenharmony_ci}
5578c2ecf20Sopenharmony_ci
5588c2ecf20Sopenharmony_ciint kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid)
5598c2ecf20Sopenharmony_ci{
5608c2ecf20Sopenharmony_ci	struct vgic_irq *irq;
5618c2ecf20Sopenharmony_ci	unsigned long flags;
5628c2ecf20Sopenharmony_ci
5638c2ecf20Sopenharmony_ci	if (!vgic_initialized(vcpu->kvm))
5648c2ecf20Sopenharmony_ci		return -EAGAIN;
5658c2ecf20Sopenharmony_ci
5668c2ecf20Sopenharmony_ci	irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
5678c2ecf20Sopenharmony_ci	BUG_ON(!irq);
5688c2ecf20Sopenharmony_ci
5698c2ecf20Sopenharmony_ci	raw_spin_lock_irqsave(&irq->irq_lock, flags);
5708c2ecf20Sopenharmony_ci	kvm_vgic_unmap_irq(irq);
5718c2ecf20Sopenharmony_ci	raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
5728c2ecf20Sopenharmony_ci	vgic_put_irq(vcpu->kvm, irq);
5738c2ecf20Sopenharmony_ci
5748c2ecf20Sopenharmony_ci	return 0;
5758c2ecf20Sopenharmony_ci}
5768c2ecf20Sopenharmony_ci
5778c2ecf20Sopenharmony_ci/**
5788c2ecf20Sopenharmony_ci * kvm_vgic_set_owner - Set the owner of an interrupt for a VM
5798c2ecf20Sopenharmony_ci *
5808c2ecf20Sopenharmony_ci * @vcpu:   Pointer to the VCPU (used for PPIs)
5818c2ecf20Sopenharmony_ci * @intid:  The virtual INTID identifying the interrupt (PPI or SPI)
5828c2ecf20Sopenharmony_ci * @owner:  Opaque pointer to the owner
5838c2ecf20Sopenharmony_ci *
5848c2ecf20Sopenharmony_ci * Returns 0 if intid is not already used by another in-kernel device and the
5858c2ecf20Sopenharmony_ci * owner is set, otherwise returns an error code.
5868c2ecf20Sopenharmony_ci */
5878c2ecf20Sopenharmony_ciint kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner)
5888c2ecf20Sopenharmony_ci{
5898c2ecf20Sopenharmony_ci	struct vgic_irq *irq;
5908c2ecf20Sopenharmony_ci	unsigned long flags;
5918c2ecf20Sopenharmony_ci	int ret = 0;
5928c2ecf20Sopenharmony_ci
5938c2ecf20Sopenharmony_ci	if (!vgic_initialized(vcpu->kvm))
5948c2ecf20Sopenharmony_ci		return -EAGAIN;
5958c2ecf20Sopenharmony_ci
5968c2ecf20Sopenharmony_ci	/* SGIs and LPIs cannot be wired up to any device */
5978c2ecf20Sopenharmony_ci	if (!irq_is_ppi(intid) && !vgic_valid_spi(vcpu->kvm, intid))
5988c2ecf20Sopenharmony_ci		return -EINVAL;
5998c2ecf20Sopenharmony_ci
6008c2ecf20Sopenharmony_ci	irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
6018c2ecf20Sopenharmony_ci	raw_spin_lock_irqsave(&irq->irq_lock, flags);
6028c2ecf20Sopenharmony_ci	if (irq->owner && irq->owner != owner)
6038c2ecf20Sopenharmony_ci		ret = -EEXIST;
6048c2ecf20Sopenharmony_ci	else
6058c2ecf20Sopenharmony_ci		irq->owner = owner;
6068c2ecf20Sopenharmony_ci	raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
6078c2ecf20Sopenharmony_ci
6088c2ecf20Sopenharmony_ci	return ret;
6098c2ecf20Sopenharmony_ci}
6108c2ecf20Sopenharmony_ci
6118c2ecf20Sopenharmony_ci/**
6128c2ecf20Sopenharmony_ci * vgic_prune_ap_list - Remove non-relevant interrupts from the list
6138c2ecf20Sopenharmony_ci *
6148c2ecf20Sopenharmony_ci * @vcpu: The VCPU pointer
6158c2ecf20Sopenharmony_ci *
6168c2ecf20Sopenharmony_ci * Go over the list of "interesting" interrupts, and prune those that we
6178c2ecf20Sopenharmony_ci * won't have to consider in the near future.
6188c2ecf20Sopenharmony_ci */
6198c2ecf20Sopenharmony_cistatic void vgic_prune_ap_list(struct kvm_vcpu *vcpu)
6208c2ecf20Sopenharmony_ci{
6218c2ecf20Sopenharmony_ci	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
6228c2ecf20Sopenharmony_ci	struct vgic_irq *irq, *tmp;
6238c2ecf20Sopenharmony_ci
6248c2ecf20Sopenharmony_ci	DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
6258c2ecf20Sopenharmony_ci
6268c2ecf20Sopenharmony_ciretry:
6278c2ecf20Sopenharmony_ci	raw_spin_lock(&vgic_cpu->ap_list_lock);
6288c2ecf20Sopenharmony_ci
6298c2ecf20Sopenharmony_ci	list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) {
6308c2ecf20Sopenharmony_ci		struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB;
6318c2ecf20Sopenharmony_ci		bool target_vcpu_needs_kick = false;
6328c2ecf20Sopenharmony_ci
6338c2ecf20Sopenharmony_ci		raw_spin_lock(&irq->irq_lock);
6348c2ecf20Sopenharmony_ci
6358c2ecf20Sopenharmony_ci		BUG_ON(vcpu != irq->vcpu);
6368c2ecf20Sopenharmony_ci
6378c2ecf20Sopenharmony_ci		target_vcpu = vgic_target_oracle(irq);
6388c2ecf20Sopenharmony_ci
6398c2ecf20Sopenharmony_ci		if (!target_vcpu) {
6408c2ecf20Sopenharmony_ci			/*
6418c2ecf20Sopenharmony_ci			 * We don't need to process this interrupt any
6428c2ecf20Sopenharmony_ci			 * further, move it off the list.
6438c2ecf20Sopenharmony_ci			 */
6448c2ecf20Sopenharmony_ci			list_del(&irq->ap_list);
6458c2ecf20Sopenharmony_ci			irq->vcpu = NULL;
6468c2ecf20Sopenharmony_ci			raw_spin_unlock(&irq->irq_lock);
6478c2ecf20Sopenharmony_ci
6488c2ecf20Sopenharmony_ci			/*
6498c2ecf20Sopenharmony_ci			 * This vgic_put_irq call matches the
6508c2ecf20Sopenharmony_ci			 * vgic_get_irq_kref in vgic_queue_irq_unlock,
6518c2ecf20Sopenharmony_ci			 * where we added the LPI to the ap_list. As
6528c2ecf20Sopenharmony_ci			 * we remove the irq from the list, we drop
6538c2ecf20Sopenharmony_ci			 * also drop the refcount.
6548c2ecf20Sopenharmony_ci			 */
6558c2ecf20Sopenharmony_ci			vgic_put_irq(vcpu->kvm, irq);
6568c2ecf20Sopenharmony_ci			continue;
6578c2ecf20Sopenharmony_ci		}
6588c2ecf20Sopenharmony_ci
6598c2ecf20Sopenharmony_ci		if (target_vcpu == vcpu) {
6608c2ecf20Sopenharmony_ci			/* We're on the right CPU */
6618c2ecf20Sopenharmony_ci			raw_spin_unlock(&irq->irq_lock);
6628c2ecf20Sopenharmony_ci			continue;
6638c2ecf20Sopenharmony_ci		}
6648c2ecf20Sopenharmony_ci
6658c2ecf20Sopenharmony_ci		/* This interrupt looks like it has to be migrated. */
6668c2ecf20Sopenharmony_ci
6678c2ecf20Sopenharmony_ci		raw_spin_unlock(&irq->irq_lock);
6688c2ecf20Sopenharmony_ci		raw_spin_unlock(&vgic_cpu->ap_list_lock);
6698c2ecf20Sopenharmony_ci
6708c2ecf20Sopenharmony_ci		/*
6718c2ecf20Sopenharmony_ci		 * Ensure locking order by always locking the smallest
6728c2ecf20Sopenharmony_ci		 * ID first.
6738c2ecf20Sopenharmony_ci		 */
6748c2ecf20Sopenharmony_ci		if (vcpu->vcpu_id < target_vcpu->vcpu_id) {
6758c2ecf20Sopenharmony_ci			vcpuA = vcpu;
6768c2ecf20Sopenharmony_ci			vcpuB = target_vcpu;
6778c2ecf20Sopenharmony_ci		} else {
6788c2ecf20Sopenharmony_ci			vcpuA = target_vcpu;
6798c2ecf20Sopenharmony_ci			vcpuB = vcpu;
6808c2ecf20Sopenharmony_ci		}
6818c2ecf20Sopenharmony_ci
6828c2ecf20Sopenharmony_ci		raw_spin_lock(&vcpuA->arch.vgic_cpu.ap_list_lock);
6838c2ecf20Sopenharmony_ci		raw_spin_lock_nested(&vcpuB->arch.vgic_cpu.ap_list_lock,
6848c2ecf20Sopenharmony_ci				      SINGLE_DEPTH_NESTING);
6858c2ecf20Sopenharmony_ci		raw_spin_lock(&irq->irq_lock);
6868c2ecf20Sopenharmony_ci
6878c2ecf20Sopenharmony_ci		/*
6888c2ecf20Sopenharmony_ci		 * If the affinity has been preserved, move the
6898c2ecf20Sopenharmony_ci		 * interrupt around. Otherwise, it means things have
6908c2ecf20Sopenharmony_ci		 * changed while the interrupt was unlocked, and we
6918c2ecf20Sopenharmony_ci		 * need to replay this.
6928c2ecf20Sopenharmony_ci		 *
6938c2ecf20Sopenharmony_ci		 * In all cases, we cannot trust the list not to have
6948c2ecf20Sopenharmony_ci		 * changed, so we restart from the beginning.
6958c2ecf20Sopenharmony_ci		 */
6968c2ecf20Sopenharmony_ci		if (target_vcpu == vgic_target_oracle(irq)) {
6978c2ecf20Sopenharmony_ci			struct vgic_cpu *new_cpu = &target_vcpu->arch.vgic_cpu;
6988c2ecf20Sopenharmony_ci
6998c2ecf20Sopenharmony_ci			list_del(&irq->ap_list);
7008c2ecf20Sopenharmony_ci			irq->vcpu = target_vcpu;
7018c2ecf20Sopenharmony_ci			list_add_tail(&irq->ap_list, &new_cpu->ap_list_head);
7028c2ecf20Sopenharmony_ci			target_vcpu_needs_kick = true;
7038c2ecf20Sopenharmony_ci		}
7048c2ecf20Sopenharmony_ci
7058c2ecf20Sopenharmony_ci		raw_spin_unlock(&irq->irq_lock);
7068c2ecf20Sopenharmony_ci		raw_spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock);
7078c2ecf20Sopenharmony_ci		raw_spin_unlock(&vcpuA->arch.vgic_cpu.ap_list_lock);
7088c2ecf20Sopenharmony_ci
7098c2ecf20Sopenharmony_ci		if (target_vcpu_needs_kick) {
7108c2ecf20Sopenharmony_ci			kvm_make_request(KVM_REQ_IRQ_PENDING, target_vcpu);
7118c2ecf20Sopenharmony_ci			kvm_vcpu_kick(target_vcpu);
7128c2ecf20Sopenharmony_ci		}
7138c2ecf20Sopenharmony_ci
7148c2ecf20Sopenharmony_ci		goto retry;
7158c2ecf20Sopenharmony_ci	}
7168c2ecf20Sopenharmony_ci
7178c2ecf20Sopenharmony_ci	raw_spin_unlock(&vgic_cpu->ap_list_lock);
7188c2ecf20Sopenharmony_ci}
7198c2ecf20Sopenharmony_ci
7208c2ecf20Sopenharmony_cistatic inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu)
7218c2ecf20Sopenharmony_ci{
7228c2ecf20Sopenharmony_ci	if (kvm_vgic_global_state.type == VGIC_V2)
7238c2ecf20Sopenharmony_ci		vgic_v2_fold_lr_state(vcpu);
7248c2ecf20Sopenharmony_ci	else
7258c2ecf20Sopenharmony_ci		vgic_v3_fold_lr_state(vcpu);
7268c2ecf20Sopenharmony_ci}
7278c2ecf20Sopenharmony_ci
7288c2ecf20Sopenharmony_ci/* Requires the irq_lock to be held. */
7298c2ecf20Sopenharmony_cistatic inline void vgic_populate_lr(struct kvm_vcpu *vcpu,
7308c2ecf20Sopenharmony_ci				    struct vgic_irq *irq, int lr)
7318c2ecf20Sopenharmony_ci{
7328c2ecf20Sopenharmony_ci	lockdep_assert_held(&irq->irq_lock);
7338c2ecf20Sopenharmony_ci
7348c2ecf20Sopenharmony_ci	if (kvm_vgic_global_state.type == VGIC_V2)
7358c2ecf20Sopenharmony_ci		vgic_v2_populate_lr(vcpu, irq, lr);
7368c2ecf20Sopenharmony_ci	else
7378c2ecf20Sopenharmony_ci		vgic_v3_populate_lr(vcpu, irq, lr);
7388c2ecf20Sopenharmony_ci}
7398c2ecf20Sopenharmony_ci
7408c2ecf20Sopenharmony_cistatic inline void vgic_clear_lr(struct kvm_vcpu *vcpu, int lr)
7418c2ecf20Sopenharmony_ci{
7428c2ecf20Sopenharmony_ci	if (kvm_vgic_global_state.type == VGIC_V2)
7438c2ecf20Sopenharmony_ci		vgic_v2_clear_lr(vcpu, lr);
7448c2ecf20Sopenharmony_ci	else
7458c2ecf20Sopenharmony_ci		vgic_v3_clear_lr(vcpu, lr);
7468c2ecf20Sopenharmony_ci}
7478c2ecf20Sopenharmony_ci
7488c2ecf20Sopenharmony_cistatic inline void vgic_set_underflow(struct kvm_vcpu *vcpu)
7498c2ecf20Sopenharmony_ci{
7508c2ecf20Sopenharmony_ci	if (kvm_vgic_global_state.type == VGIC_V2)
7518c2ecf20Sopenharmony_ci		vgic_v2_set_underflow(vcpu);
7528c2ecf20Sopenharmony_ci	else
7538c2ecf20Sopenharmony_ci		vgic_v3_set_underflow(vcpu);
7548c2ecf20Sopenharmony_ci}
7558c2ecf20Sopenharmony_ci
7568c2ecf20Sopenharmony_ci/* Requires the ap_list_lock to be held. */
7578c2ecf20Sopenharmony_cistatic int compute_ap_list_depth(struct kvm_vcpu *vcpu,
7588c2ecf20Sopenharmony_ci				 bool *multi_sgi)
7598c2ecf20Sopenharmony_ci{
7608c2ecf20Sopenharmony_ci	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
7618c2ecf20Sopenharmony_ci	struct vgic_irq *irq;
7628c2ecf20Sopenharmony_ci	int count = 0;
7638c2ecf20Sopenharmony_ci
7648c2ecf20Sopenharmony_ci	*multi_sgi = false;
7658c2ecf20Sopenharmony_ci
7668c2ecf20Sopenharmony_ci	lockdep_assert_held(&vgic_cpu->ap_list_lock);
7678c2ecf20Sopenharmony_ci
7688c2ecf20Sopenharmony_ci	list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
7698c2ecf20Sopenharmony_ci		int w;
7708c2ecf20Sopenharmony_ci
7718c2ecf20Sopenharmony_ci		raw_spin_lock(&irq->irq_lock);
7728c2ecf20Sopenharmony_ci		/* GICv2 SGIs can count for more than one... */
7738c2ecf20Sopenharmony_ci		w = vgic_irq_get_lr_count(irq);
7748c2ecf20Sopenharmony_ci		raw_spin_unlock(&irq->irq_lock);
7758c2ecf20Sopenharmony_ci
7768c2ecf20Sopenharmony_ci		count += w;
7778c2ecf20Sopenharmony_ci		*multi_sgi |= (w > 1);
7788c2ecf20Sopenharmony_ci	}
7798c2ecf20Sopenharmony_ci	return count;
7808c2ecf20Sopenharmony_ci}
7818c2ecf20Sopenharmony_ci
7828c2ecf20Sopenharmony_ci/* Requires the VCPU's ap_list_lock to be held. */
7838c2ecf20Sopenharmony_cistatic void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
7848c2ecf20Sopenharmony_ci{
7858c2ecf20Sopenharmony_ci	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
7868c2ecf20Sopenharmony_ci	struct vgic_irq *irq;
7878c2ecf20Sopenharmony_ci	int count;
7888c2ecf20Sopenharmony_ci	bool multi_sgi;
7898c2ecf20Sopenharmony_ci	u8 prio = 0xff;
7908c2ecf20Sopenharmony_ci	int i = 0;
7918c2ecf20Sopenharmony_ci
7928c2ecf20Sopenharmony_ci	lockdep_assert_held(&vgic_cpu->ap_list_lock);
7938c2ecf20Sopenharmony_ci
7948c2ecf20Sopenharmony_ci	count = compute_ap_list_depth(vcpu, &multi_sgi);
7958c2ecf20Sopenharmony_ci	if (count > kvm_vgic_global_state.nr_lr || multi_sgi)
7968c2ecf20Sopenharmony_ci		vgic_sort_ap_list(vcpu);
7978c2ecf20Sopenharmony_ci
7988c2ecf20Sopenharmony_ci	count = 0;
7998c2ecf20Sopenharmony_ci
8008c2ecf20Sopenharmony_ci	list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
8018c2ecf20Sopenharmony_ci		raw_spin_lock(&irq->irq_lock);
8028c2ecf20Sopenharmony_ci
8038c2ecf20Sopenharmony_ci		/*
8048c2ecf20Sopenharmony_ci		 * If we have multi-SGIs in the pipeline, we need to
8058c2ecf20Sopenharmony_ci		 * guarantee that they are all seen before any IRQ of
8068c2ecf20Sopenharmony_ci		 * lower priority. In that case, we need to filter out
8078c2ecf20Sopenharmony_ci		 * these interrupts by exiting early. This is easy as
8088c2ecf20Sopenharmony_ci		 * the AP list has been sorted already.
8098c2ecf20Sopenharmony_ci		 */
8108c2ecf20Sopenharmony_ci		if (multi_sgi && irq->priority > prio) {
8118c2ecf20Sopenharmony_ci			_raw_spin_unlock(&irq->irq_lock);
8128c2ecf20Sopenharmony_ci			break;
8138c2ecf20Sopenharmony_ci		}
8148c2ecf20Sopenharmony_ci
8158c2ecf20Sopenharmony_ci		if (likely(vgic_target_oracle(irq) == vcpu)) {
8168c2ecf20Sopenharmony_ci			vgic_populate_lr(vcpu, irq, count++);
8178c2ecf20Sopenharmony_ci
8188c2ecf20Sopenharmony_ci			if (irq->source)
8198c2ecf20Sopenharmony_ci				prio = irq->priority;
8208c2ecf20Sopenharmony_ci		}
8218c2ecf20Sopenharmony_ci
8228c2ecf20Sopenharmony_ci		raw_spin_unlock(&irq->irq_lock);
8238c2ecf20Sopenharmony_ci
8248c2ecf20Sopenharmony_ci		if (count == kvm_vgic_global_state.nr_lr) {
8258c2ecf20Sopenharmony_ci			if (!list_is_last(&irq->ap_list,
8268c2ecf20Sopenharmony_ci					  &vgic_cpu->ap_list_head))
8278c2ecf20Sopenharmony_ci				vgic_set_underflow(vcpu);
8288c2ecf20Sopenharmony_ci			break;
8298c2ecf20Sopenharmony_ci		}
8308c2ecf20Sopenharmony_ci	}
8318c2ecf20Sopenharmony_ci
8328c2ecf20Sopenharmony_ci	/* Nuke remaining LRs */
8338c2ecf20Sopenharmony_ci	for (i = count ; i < kvm_vgic_global_state.nr_lr; i++)
8348c2ecf20Sopenharmony_ci		vgic_clear_lr(vcpu, i);
8358c2ecf20Sopenharmony_ci
8368c2ecf20Sopenharmony_ci	if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
8378c2ecf20Sopenharmony_ci		vcpu->arch.vgic_cpu.vgic_v2.used_lrs = count;
8388c2ecf20Sopenharmony_ci	else
8398c2ecf20Sopenharmony_ci		vcpu->arch.vgic_cpu.vgic_v3.used_lrs = count;
8408c2ecf20Sopenharmony_ci}
8418c2ecf20Sopenharmony_ci
8428c2ecf20Sopenharmony_cistatic inline bool can_access_vgic_from_kernel(void)
8438c2ecf20Sopenharmony_ci{
8448c2ecf20Sopenharmony_ci	/*
8458c2ecf20Sopenharmony_ci	 * GICv2 can always be accessed from the kernel because it is
8468c2ecf20Sopenharmony_ci	 * memory-mapped, and VHE systems can access GICv3 EL2 system
8478c2ecf20Sopenharmony_ci	 * registers.
8488c2ecf20Sopenharmony_ci	 */
8498c2ecf20Sopenharmony_ci	return !static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif) || has_vhe();
8508c2ecf20Sopenharmony_ci}
8518c2ecf20Sopenharmony_ci
8528c2ecf20Sopenharmony_cistatic inline void vgic_save_state(struct kvm_vcpu *vcpu)
8538c2ecf20Sopenharmony_ci{
8548c2ecf20Sopenharmony_ci	if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
8558c2ecf20Sopenharmony_ci		vgic_v2_save_state(vcpu);
8568c2ecf20Sopenharmony_ci	else
8578c2ecf20Sopenharmony_ci		__vgic_v3_save_state(&vcpu->arch.vgic_cpu.vgic_v3);
8588c2ecf20Sopenharmony_ci}
8598c2ecf20Sopenharmony_ci
8608c2ecf20Sopenharmony_ci/* Sync back the hardware VGIC state into our emulation after a guest's run. */
8618c2ecf20Sopenharmony_civoid kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
8628c2ecf20Sopenharmony_ci{
8638c2ecf20Sopenharmony_ci	int used_lrs;
8648c2ecf20Sopenharmony_ci
8658c2ecf20Sopenharmony_ci	/* An empty ap_list_head implies used_lrs == 0 */
8668c2ecf20Sopenharmony_ci	if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head))
8678c2ecf20Sopenharmony_ci		return;
8688c2ecf20Sopenharmony_ci
8698c2ecf20Sopenharmony_ci	if (can_access_vgic_from_kernel())
8708c2ecf20Sopenharmony_ci		vgic_save_state(vcpu);
8718c2ecf20Sopenharmony_ci
8728c2ecf20Sopenharmony_ci	if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
8738c2ecf20Sopenharmony_ci		used_lrs = vcpu->arch.vgic_cpu.vgic_v2.used_lrs;
8748c2ecf20Sopenharmony_ci	else
8758c2ecf20Sopenharmony_ci		used_lrs = vcpu->arch.vgic_cpu.vgic_v3.used_lrs;
8768c2ecf20Sopenharmony_ci
8778c2ecf20Sopenharmony_ci	if (used_lrs)
8788c2ecf20Sopenharmony_ci		vgic_fold_lr_state(vcpu);
8798c2ecf20Sopenharmony_ci	vgic_prune_ap_list(vcpu);
8808c2ecf20Sopenharmony_ci}
8818c2ecf20Sopenharmony_ci
8828c2ecf20Sopenharmony_cistatic inline void vgic_restore_state(struct kvm_vcpu *vcpu)
8838c2ecf20Sopenharmony_ci{
8848c2ecf20Sopenharmony_ci	if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
8858c2ecf20Sopenharmony_ci		vgic_v2_restore_state(vcpu);
8868c2ecf20Sopenharmony_ci	else
8878c2ecf20Sopenharmony_ci		__vgic_v3_restore_state(&vcpu->arch.vgic_cpu.vgic_v3);
8888c2ecf20Sopenharmony_ci}
8898c2ecf20Sopenharmony_ci
8908c2ecf20Sopenharmony_ci/* Flush our emulation state into the GIC hardware before entering the guest. */
8918c2ecf20Sopenharmony_civoid kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
8928c2ecf20Sopenharmony_ci{
8938c2ecf20Sopenharmony_ci	/*
8948c2ecf20Sopenharmony_ci	 * If there are no virtual interrupts active or pending for this
8958c2ecf20Sopenharmony_ci	 * VCPU, then there is no work to do and we can bail out without
8968c2ecf20Sopenharmony_ci	 * taking any lock.  There is a potential race with someone injecting
8978c2ecf20Sopenharmony_ci	 * interrupts to the VCPU, but it is a benign race as the VCPU will
8988c2ecf20Sopenharmony_ci	 * either observe the new interrupt before or after doing this check,
8998c2ecf20Sopenharmony_ci	 * and introducing additional synchronization mechanism doesn't change
9008c2ecf20Sopenharmony_ci	 * this.
9018c2ecf20Sopenharmony_ci	 *
9028c2ecf20Sopenharmony_ci	 * Note that we still need to go through the whole thing if anything
9038c2ecf20Sopenharmony_ci	 * can be directly injected (GICv4).
9048c2ecf20Sopenharmony_ci	 */
9058c2ecf20Sopenharmony_ci	if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head) &&
9068c2ecf20Sopenharmony_ci	    !vgic_supports_direct_msis(vcpu->kvm))
9078c2ecf20Sopenharmony_ci		return;
9088c2ecf20Sopenharmony_ci
9098c2ecf20Sopenharmony_ci	DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
9108c2ecf20Sopenharmony_ci
9118c2ecf20Sopenharmony_ci	if (!list_empty(&vcpu->arch.vgic_cpu.ap_list_head)) {
9128c2ecf20Sopenharmony_ci		raw_spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
9138c2ecf20Sopenharmony_ci		vgic_flush_lr_state(vcpu);
9148c2ecf20Sopenharmony_ci		raw_spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
9158c2ecf20Sopenharmony_ci	}
9168c2ecf20Sopenharmony_ci
9178c2ecf20Sopenharmony_ci	if (can_access_vgic_from_kernel())
9188c2ecf20Sopenharmony_ci		vgic_restore_state(vcpu);
9198c2ecf20Sopenharmony_ci}
9208c2ecf20Sopenharmony_ci
9218c2ecf20Sopenharmony_civoid kvm_vgic_load(struct kvm_vcpu *vcpu)
9228c2ecf20Sopenharmony_ci{
9238c2ecf20Sopenharmony_ci	if (unlikely(!vgic_initialized(vcpu->kvm)))
9248c2ecf20Sopenharmony_ci		return;
9258c2ecf20Sopenharmony_ci
9268c2ecf20Sopenharmony_ci	if (kvm_vgic_global_state.type == VGIC_V2)
9278c2ecf20Sopenharmony_ci		vgic_v2_load(vcpu);
9288c2ecf20Sopenharmony_ci	else
9298c2ecf20Sopenharmony_ci		vgic_v3_load(vcpu);
9308c2ecf20Sopenharmony_ci}
9318c2ecf20Sopenharmony_ci
9328c2ecf20Sopenharmony_civoid kvm_vgic_put(struct kvm_vcpu *vcpu)
9338c2ecf20Sopenharmony_ci{
9348c2ecf20Sopenharmony_ci	if (unlikely(!vgic_initialized(vcpu->kvm)))
9358c2ecf20Sopenharmony_ci		return;
9368c2ecf20Sopenharmony_ci
9378c2ecf20Sopenharmony_ci	if (kvm_vgic_global_state.type == VGIC_V2)
9388c2ecf20Sopenharmony_ci		vgic_v2_put(vcpu);
9398c2ecf20Sopenharmony_ci	else
9408c2ecf20Sopenharmony_ci		vgic_v3_put(vcpu);
9418c2ecf20Sopenharmony_ci}
9428c2ecf20Sopenharmony_ci
9438c2ecf20Sopenharmony_civoid kvm_vgic_vmcr_sync(struct kvm_vcpu *vcpu)
9448c2ecf20Sopenharmony_ci{
9458c2ecf20Sopenharmony_ci	if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
9468c2ecf20Sopenharmony_ci		return;
9478c2ecf20Sopenharmony_ci
9488c2ecf20Sopenharmony_ci	if (kvm_vgic_global_state.type == VGIC_V2)
9498c2ecf20Sopenharmony_ci		vgic_v2_vmcr_sync(vcpu);
9508c2ecf20Sopenharmony_ci	else
9518c2ecf20Sopenharmony_ci		vgic_v3_vmcr_sync(vcpu);
9528c2ecf20Sopenharmony_ci}
9538c2ecf20Sopenharmony_ci
9548c2ecf20Sopenharmony_ciint kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
9558c2ecf20Sopenharmony_ci{
9568c2ecf20Sopenharmony_ci	struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
9578c2ecf20Sopenharmony_ci	struct vgic_irq *irq;
9588c2ecf20Sopenharmony_ci	bool pending = false;
9598c2ecf20Sopenharmony_ci	unsigned long flags;
9608c2ecf20Sopenharmony_ci	struct vgic_vmcr vmcr;
9618c2ecf20Sopenharmony_ci
9628c2ecf20Sopenharmony_ci	if (!vcpu->kvm->arch.vgic.enabled)
9638c2ecf20Sopenharmony_ci		return false;
9648c2ecf20Sopenharmony_ci
9658c2ecf20Sopenharmony_ci	if (vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last)
9668c2ecf20Sopenharmony_ci		return true;
9678c2ecf20Sopenharmony_ci
9688c2ecf20Sopenharmony_ci	vgic_get_vmcr(vcpu, &vmcr);
9698c2ecf20Sopenharmony_ci
9708c2ecf20Sopenharmony_ci	raw_spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags);
9718c2ecf20Sopenharmony_ci
9728c2ecf20Sopenharmony_ci	list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
9738c2ecf20Sopenharmony_ci		raw_spin_lock(&irq->irq_lock);
9748c2ecf20Sopenharmony_ci		pending = irq_is_pending(irq) && irq->enabled &&
9758c2ecf20Sopenharmony_ci			  !irq->active &&
9768c2ecf20Sopenharmony_ci			  irq->priority < vmcr.pmr;
9778c2ecf20Sopenharmony_ci		raw_spin_unlock(&irq->irq_lock);
9788c2ecf20Sopenharmony_ci
9798c2ecf20Sopenharmony_ci		if (pending)
9808c2ecf20Sopenharmony_ci			break;
9818c2ecf20Sopenharmony_ci	}
9828c2ecf20Sopenharmony_ci
9838c2ecf20Sopenharmony_ci	raw_spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags);
9848c2ecf20Sopenharmony_ci
9858c2ecf20Sopenharmony_ci	return pending;
9868c2ecf20Sopenharmony_ci}
9878c2ecf20Sopenharmony_ci
9888c2ecf20Sopenharmony_civoid vgic_kick_vcpus(struct kvm *kvm)
9898c2ecf20Sopenharmony_ci{
9908c2ecf20Sopenharmony_ci	struct kvm_vcpu *vcpu;
9918c2ecf20Sopenharmony_ci	int c;
9928c2ecf20Sopenharmony_ci
9938c2ecf20Sopenharmony_ci	/*
9948c2ecf20Sopenharmony_ci	 * We've injected an interrupt, time to find out who deserves
9958c2ecf20Sopenharmony_ci	 * a good kick...
9968c2ecf20Sopenharmony_ci	 */
9978c2ecf20Sopenharmony_ci	kvm_for_each_vcpu(c, vcpu, kvm) {
9988c2ecf20Sopenharmony_ci		if (kvm_vgic_vcpu_pending_irq(vcpu)) {
9998c2ecf20Sopenharmony_ci			kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
10008c2ecf20Sopenharmony_ci			kvm_vcpu_kick(vcpu);
10018c2ecf20Sopenharmony_ci		}
10028c2ecf20Sopenharmony_ci	}
10038c2ecf20Sopenharmony_ci}
10048c2ecf20Sopenharmony_ci
10058c2ecf20Sopenharmony_cibool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid)
10068c2ecf20Sopenharmony_ci{
10078c2ecf20Sopenharmony_ci	struct vgic_irq *irq;
10088c2ecf20Sopenharmony_ci	bool map_is_active;
10098c2ecf20Sopenharmony_ci	unsigned long flags;
10108c2ecf20Sopenharmony_ci
10118c2ecf20Sopenharmony_ci	if (!vgic_initialized(vcpu->kvm))
10128c2ecf20Sopenharmony_ci		return false;
10138c2ecf20Sopenharmony_ci
10148c2ecf20Sopenharmony_ci	irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
10158c2ecf20Sopenharmony_ci	raw_spin_lock_irqsave(&irq->irq_lock, flags);
10168c2ecf20Sopenharmony_ci	map_is_active = irq->hw && irq->active;
10178c2ecf20Sopenharmony_ci	raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
10188c2ecf20Sopenharmony_ci	vgic_put_irq(vcpu->kvm, irq);
10198c2ecf20Sopenharmony_ci
10208c2ecf20Sopenharmony_ci	return map_is_active;
10218c2ecf20Sopenharmony_ci}
1022