162306a36Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0-only 262306a36Sopenharmony_ci/* 362306a36Sopenharmony_ci * Copyright (C) 2015, 2016 ARM Ltd. 462306a36Sopenharmony_ci */ 562306a36Sopenharmony_ci 662306a36Sopenharmony_ci#include <linux/interrupt.h> 762306a36Sopenharmony_ci#include <linux/irq.h> 862306a36Sopenharmony_ci#include <linux/kvm.h> 962306a36Sopenharmony_ci#include <linux/kvm_host.h> 1062306a36Sopenharmony_ci#include <linux/list_sort.h> 1162306a36Sopenharmony_ci#include <linux/nospec.h> 1262306a36Sopenharmony_ci 1362306a36Sopenharmony_ci#include <asm/kvm_hyp.h> 1462306a36Sopenharmony_ci 1562306a36Sopenharmony_ci#include "vgic.h" 1662306a36Sopenharmony_ci 1762306a36Sopenharmony_ci#define CREATE_TRACE_POINTS 1862306a36Sopenharmony_ci#include "trace.h" 1962306a36Sopenharmony_ci 2062306a36Sopenharmony_cistruct vgic_global kvm_vgic_global_state __ro_after_init = { 2162306a36Sopenharmony_ci .gicv3_cpuif = STATIC_KEY_FALSE_INIT, 2262306a36Sopenharmony_ci}; 2362306a36Sopenharmony_ci 2462306a36Sopenharmony_ci/* 2562306a36Sopenharmony_ci * Locking order is always: 2662306a36Sopenharmony_ci * kvm->lock (mutex) 2762306a36Sopenharmony_ci * vcpu->mutex (mutex) 2862306a36Sopenharmony_ci * kvm->arch.config_lock (mutex) 2962306a36Sopenharmony_ci * its->cmd_lock (mutex) 3062306a36Sopenharmony_ci * its->its_lock (mutex) 3162306a36Sopenharmony_ci * vgic_cpu->ap_list_lock must be taken with IRQs disabled 3262306a36Sopenharmony_ci * kvm->lpi_list_lock must be taken with IRQs disabled 3362306a36Sopenharmony_ci * vgic_irq->irq_lock must be taken with IRQs disabled 3462306a36Sopenharmony_ci * 3562306a36Sopenharmony_ci * As the ap_list_lock might be taken from the timer interrupt handler, 3662306a36Sopenharmony_ci * we have to disable IRQs before taking this lock and everything lower 3762306a36Sopenharmony_ci * than it. 3862306a36Sopenharmony_ci * 3962306a36Sopenharmony_ci * If you need to take multiple locks, always take the upper lock first, 4062306a36Sopenharmony_ci * then the lower ones, e.g. first take the its_lock, then the irq_lock. 4162306a36Sopenharmony_ci * If you are already holding a lock and need to take a higher one, you 4262306a36Sopenharmony_ci * have to drop the lower ranking lock first and re-acquire it after having 4362306a36Sopenharmony_ci * taken the upper one. 4462306a36Sopenharmony_ci * 4562306a36Sopenharmony_ci * When taking more than one ap_list_lock at the same time, always take the 4662306a36Sopenharmony_ci * lowest numbered VCPU's ap_list_lock first, so: 4762306a36Sopenharmony_ci * vcpuX->vcpu_id < vcpuY->vcpu_id: 4862306a36Sopenharmony_ci * raw_spin_lock(vcpuX->arch.vgic_cpu.ap_list_lock); 4962306a36Sopenharmony_ci * raw_spin_lock(vcpuY->arch.vgic_cpu.ap_list_lock); 5062306a36Sopenharmony_ci * 5162306a36Sopenharmony_ci * Since the VGIC must support injecting virtual interrupts from ISRs, we have 5262306a36Sopenharmony_ci * to use the raw_spin_lock_irqsave/raw_spin_unlock_irqrestore versions of outer 5362306a36Sopenharmony_ci * spinlocks for any lock that may be taken while injecting an interrupt. 5462306a36Sopenharmony_ci */ 5562306a36Sopenharmony_ci 5662306a36Sopenharmony_ci/* 5762306a36Sopenharmony_ci * Iterate over the VM's list of mapped LPIs to find the one with a 5862306a36Sopenharmony_ci * matching interrupt ID and return a reference to the IRQ structure. 5962306a36Sopenharmony_ci */ 6062306a36Sopenharmony_cistatic struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid) 6162306a36Sopenharmony_ci{ 6262306a36Sopenharmony_ci struct vgic_dist *dist = &kvm->arch.vgic; 6362306a36Sopenharmony_ci struct vgic_irq *irq = NULL; 6462306a36Sopenharmony_ci unsigned long flags; 6562306a36Sopenharmony_ci 6662306a36Sopenharmony_ci raw_spin_lock_irqsave(&dist->lpi_list_lock, flags); 6762306a36Sopenharmony_ci 6862306a36Sopenharmony_ci list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) { 6962306a36Sopenharmony_ci if (irq->intid != intid) 7062306a36Sopenharmony_ci continue; 7162306a36Sopenharmony_ci 7262306a36Sopenharmony_ci /* 7362306a36Sopenharmony_ci * This increases the refcount, the caller is expected to 7462306a36Sopenharmony_ci * call vgic_put_irq() later once it's finished with the IRQ. 7562306a36Sopenharmony_ci */ 7662306a36Sopenharmony_ci vgic_get_irq_kref(irq); 7762306a36Sopenharmony_ci goto out_unlock; 7862306a36Sopenharmony_ci } 7962306a36Sopenharmony_ci irq = NULL; 8062306a36Sopenharmony_ci 8162306a36Sopenharmony_ciout_unlock: 8262306a36Sopenharmony_ci raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags); 8362306a36Sopenharmony_ci 8462306a36Sopenharmony_ci return irq; 8562306a36Sopenharmony_ci} 8662306a36Sopenharmony_ci 8762306a36Sopenharmony_ci/* 8862306a36Sopenharmony_ci * This looks up the virtual interrupt ID to get the corresponding 8962306a36Sopenharmony_ci * struct vgic_irq. It also increases the refcount, so any caller is expected 9062306a36Sopenharmony_ci * to call vgic_put_irq() once it's finished with this IRQ. 9162306a36Sopenharmony_ci */ 9262306a36Sopenharmony_cistruct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu, 9362306a36Sopenharmony_ci u32 intid) 9462306a36Sopenharmony_ci{ 9562306a36Sopenharmony_ci /* SGIs and PPIs */ 9662306a36Sopenharmony_ci if (intid <= VGIC_MAX_PRIVATE) { 9762306a36Sopenharmony_ci intid = array_index_nospec(intid, VGIC_MAX_PRIVATE + 1); 9862306a36Sopenharmony_ci return &vcpu->arch.vgic_cpu.private_irqs[intid]; 9962306a36Sopenharmony_ci } 10062306a36Sopenharmony_ci 10162306a36Sopenharmony_ci /* SPIs */ 10262306a36Sopenharmony_ci if (intid < (kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS)) { 10362306a36Sopenharmony_ci intid = array_index_nospec(intid, kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS); 10462306a36Sopenharmony_ci return &kvm->arch.vgic.spis[intid - VGIC_NR_PRIVATE_IRQS]; 10562306a36Sopenharmony_ci } 10662306a36Sopenharmony_ci 10762306a36Sopenharmony_ci /* LPIs */ 10862306a36Sopenharmony_ci if (intid >= VGIC_MIN_LPI) 10962306a36Sopenharmony_ci return vgic_get_lpi(kvm, intid); 11062306a36Sopenharmony_ci 11162306a36Sopenharmony_ci return NULL; 11262306a36Sopenharmony_ci} 11362306a36Sopenharmony_ci 11462306a36Sopenharmony_ci/* 11562306a36Sopenharmony_ci * We can't do anything in here, because we lack the kvm pointer to 11662306a36Sopenharmony_ci * lock and remove the item from the lpi_list. So we keep this function 11762306a36Sopenharmony_ci * empty and use the return value of kref_put() to trigger the freeing. 11862306a36Sopenharmony_ci */ 11962306a36Sopenharmony_cistatic void vgic_irq_release(struct kref *ref) 12062306a36Sopenharmony_ci{ 12162306a36Sopenharmony_ci} 12262306a36Sopenharmony_ci 12362306a36Sopenharmony_ci/* 12462306a36Sopenharmony_ci * Drop the refcount on the LPI. Must be called with lpi_list_lock held. 12562306a36Sopenharmony_ci */ 12662306a36Sopenharmony_civoid __vgic_put_lpi_locked(struct kvm *kvm, struct vgic_irq *irq) 12762306a36Sopenharmony_ci{ 12862306a36Sopenharmony_ci struct vgic_dist *dist = &kvm->arch.vgic; 12962306a36Sopenharmony_ci 13062306a36Sopenharmony_ci if (!kref_put(&irq->refcount, vgic_irq_release)) 13162306a36Sopenharmony_ci return; 13262306a36Sopenharmony_ci 13362306a36Sopenharmony_ci list_del(&irq->lpi_list); 13462306a36Sopenharmony_ci dist->lpi_list_count--; 13562306a36Sopenharmony_ci 13662306a36Sopenharmony_ci kfree(irq); 13762306a36Sopenharmony_ci} 13862306a36Sopenharmony_ci 13962306a36Sopenharmony_civoid vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq) 14062306a36Sopenharmony_ci{ 14162306a36Sopenharmony_ci struct vgic_dist *dist = &kvm->arch.vgic; 14262306a36Sopenharmony_ci unsigned long flags; 14362306a36Sopenharmony_ci 14462306a36Sopenharmony_ci if (irq->intid < VGIC_MIN_LPI) 14562306a36Sopenharmony_ci return; 14662306a36Sopenharmony_ci 14762306a36Sopenharmony_ci raw_spin_lock_irqsave(&dist->lpi_list_lock, flags); 14862306a36Sopenharmony_ci __vgic_put_lpi_locked(kvm, irq); 14962306a36Sopenharmony_ci raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags); 15062306a36Sopenharmony_ci} 15162306a36Sopenharmony_ci 15262306a36Sopenharmony_civoid vgic_flush_pending_lpis(struct kvm_vcpu *vcpu) 15362306a36Sopenharmony_ci{ 15462306a36Sopenharmony_ci struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 15562306a36Sopenharmony_ci struct vgic_irq *irq, *tmp; 15662306a36Sopenharmony_ci unsigned long flags; 15762306a36Sopenharmony_ci 15862306a36Sopenharmony_ci raw_spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags); 15962306a36Sopenharmony_ci 16062306a36Sopenharmony_ci list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) { 16162306a36Sopenharmony_ci if (irq->intid >= VGIC_MIN_LPI) { 16262306a36Sopenharmony_ci raw_spin_lock(&irq->irq_lock); 16362306a36Sopenharmony_ci list_del(&irq->ap_list); 16462306a36Sopenharmony_ci irq->vcpu = NULL; 16562306a36Sopenharmony_ci raw_spin_unlock(&irq->irq_lock); 16662306a36Sopenharmony_ci vgic_put_irq(vcpu->kvm, irq); 16762306a36Sopenharmony_ci } 16862306a36Sopenharmony_ci } 16962306a36Sopenharmony_ci 17062306a36Sopenharmony_ci raw_spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags); 17162306a36Sopenharmony_ci} 17262306a36Sopenharmony_ci 17362306a36Sopenharmony_civoid vgic_irq_set_phys_pending(struct vgic_irq *irq, bool pending) 17462306a36Sopenharmony_ci{ 17562306a36Sopenharmony_ci WARN_ON(irq_set_irqchip_state(irq->host_irq, 17662306a36Sopenharmony_ci IRQCHIP_STATE_PENDING, 17762306a36Sopenharmony_ci pending)); 17862306a36Sopenharmony_ci} 17962306a36Sopenharmony_ci 18062306a36Sopenharmony_cibool vgic_get_phys_line_level(struct vgic_irq *irq) 18162306a36Sopenharmony_ci{ 18262306a36Sopenharmony_ci bool line_level; 18362306a36Sopenharmony_ci 18462306a36Sopenharmony_ci BUG_ON(!irq->hw); 18562306a36Sopenharmony_ci 18662306a36Sopenharmony_ci if (irq->ops && irq->ops->get_input_level) 18762306a36Sopenharmony_ci return irq->ops->get_input_level(irq->intid); 18862306a36Sopenharmony_ci 18962306a36Sopenharmony_ci WARN_ON(irq_get_irqchip_state(irq->host_irq, 19062306a36Sopenharmony_ci IRQCHIP_STATE_PENDING, 19162306a36Sopenharmony_ci &line_level)); 19262306a36Sopenharmony_ci return line_level; 19362306a36Sopenharmony_ci} 19462306a36Sopenharmony_ci 19562306a36Sopenharmony_ci/* Set/Clear the physical active state */ 19662306a36Sopenharmony_civoid vgic_irq_set_phys_active(struct vgic_irq *irq, bool active) 19762306a36Sopenharmony_ci{ 19862306a36Sopenharmony_ci 19962306a36Sopenharmony_ci BUG_ON(!irq->hw); 20062306a36Sopenharmony_ci WARN_ON(irq_set_irqchip_state(irq->host_irq, 20162306a36Sopenharmony_ci IRQCHIP_STATE_ACTIVE, 20262306a36Sopenharmony_ci active)); 20362306a36Sopenharmony_ci} 20462306a36Sopenharmony_ci 20562306a36Sopenharmony_ci/** 20662306a36Sopenharmony_ci * kvm_vgic_target_oracle - compute the target vcpu for an irq 20762306a36Sopenharmony_ci * 20862306a36Sopenharmony_ci * @irq: The irq to route. Must be already locked. 20962306a36Sopenharmony_ci * 21062306a36Sopenharmony_ci * Based on the current state of the interrupt (enabled, pending, 21162306a36Sopenharmony_ci * active, vcpu and target_vcpu), compute the next vcpu this should be 21262306a36Sopenharmony_ci * given to. Return NULL if this shouldn't be injected at all. 21362306a36Sopenharmony_ci * 21462306a36Sopenharmony_ci * Requires the IRQ lock to be held. 21562306a36Sopenharmony_ci */ 21662306a36Sopenharmony_cistatic struct kvm_vcpu *vgic_target_oracle(struct vgic_irq *irq) 21762306a36Sopenharmony_ci{ 21862306a36Sopenharmony_ci lockdep_assert_held(&irq->irq_lock); 21962306a36Sopenharmony_ci 22062306a36Sopenharmony_ci /* If the interrupt is active, it must stay on the current vcpu */ 22162306a36Sopenharmony_ci if (irq->active) 22262306a36Sopenharmony_ci return irq->vcpu ? : irq->target_vcpu; 22362306a36Sopenharmony_ci 22462306a36Sopenharmony_ci /* 22562306a36Sopenharmony_ci * If the IRQ is not active but enabled and pending, we should direct 22662306a36Sopenharmony_ci * it to its configured target VCPU. 22762306a36Sopenharmony_ci * If the distributor is disabled, pending interrupts shouldn't be 22862306a36Sopenharmony_ci * forwarded. 22962306a36Sopenharmony_ci */ 23062306a36Sopenharmony_ci if (irq->enabled && irq_is_pending(irq)) { 23162306a36Sopenharmony_ci if (unlikely(irq->target_vcpu && 23262306a36Sopenharmony_ci !irq->target_vcpu->kvm->arch.vgic.enabled)) 23362306a36Sopenharmony_ci return NULL; 23462306a36Sopenharmony_ci 23562306a36Sopenharmony_ci return irq->target_vcpu; 23662306a36Sopenharmony_ci } 23762306a36Sopenharmony_ci 23862306a36Sopenharmony_ci /* If neither active nor pending and enabled, then this IRQ should not 23962306a36Sopenharmony_ci * be queued to any VCPU. 24062306a36Sopenharmony_ci */ 24162306a36Sopenharmony_ci return NULL; 24262306a36Sopenharmony_ci} 24362306a36Sopenharmony_ci 24462306a36Sopenharmony_ci/* 24562306a36Sopenharmony_ci * The order of items in the ap_lists defines how we'll pack things in LRs as 24662306a36Sopenharmony_ci * well, the first items in the list being the first things populated in the 24762306a36Sopenharmony_ci * LRs. 24862306a36Sopenharmony_ci * 24962306a36Sopenharmony_ci * A hard rule is that active interrupts can never be pushed out of the LRs 25062306a36Sopenharmony_ci * (and therefore take priority) since we cannot reliably trap on deactivation 25162306a36Sopenharmony_ci * of IRQs and therefore they have to be present in the LRs. 25262306a36Sopenharmony_ci * 25362306a36Sopenharmony_ci * Otherwise things should be sorted by the priority field and the GIC 25462306a36Sopenharmony_ci * hardware support will take care of preemption of priority groups etc. 25562306a36Sopenharmony_ci * 25662306a36Sopenharmony_ci * Return negative if "a" sorts before "b", 0 to preserve order, and positive 25762306a36Sopenharmony_ci * to sort "b" before "a". 25862306a36Sopenharmony_ci */ 25962306a36Sopenharmony_cistatic int vgic_irq_cmp(void *priv, const struct list_head *a, 26062306a36Sopenharmony_ci const struct list_head *b) 26162306a36Sopenharmony_ci{ 26262306a36Sopenharmony_ci struct vgic_irq *irqa = container_of(a, struct vgic_irq, ap_list); 26362306a36Sopenharmony_ci struct vgic_irq *irqb = container_of(b, struct vgic_irq, ap_list); 26462306a36Sopenharmony_ci bool penda, pendb; 26562306a36Sopenharmony_ci int ret; 26662306a36Sopenharmony_ci 26762306a36Sopenharmony_ci /* 26862306a36Sopenharmony_ci * list_sort may call this function with the same element when 26962306a36Sopenharmony_ci * the list is fairly long. 27062306a36Sopenharmony_ci */ 27162306a36Sopenharmony_ci if (unlikely(irqa == irqb)) 27262306a36Sopenharmony_ci return 0; 27362306a36Sopenharmony_ci 27462306a36Sopenharmony_ci raw_spin_lock(&irqa->irq_lock); 27562306a36Sopenharmony_ci raw_spin_lock_nested(&irqb->irq_lock, SINGLE_DEPTH_NESTING); 27662306a36Sopenharmony_ci 27762306a36Sopenharmony_ci if (irqa->active || irqb->active) { 27862306a36Sopenharmony_ci ret = (int)irqb->active - (int)irqa->active; 27962306a36Sopenharmony_ci goto out; 28062306a36Sopenharmony_ci } 28162306a36Sopenharmony_ci 28262306a36Sopenharmony_ci penda = irqa->enabled && irq_is_pending(irqa); 28362306a36Sopenharmony_ci pendb = irqb->enabled && irq_is_pending(irqb); 28462306a36Sopenharmony_ci 28562306a36Sopenharmony_ci if (!penda || !pendb) { 28662306a36Sopenharmony_ci ret = (int)pendb - (int)penda; 28762306a36Sopenharmony_ci goto out; 28862306a36Sopenharmony_ci } 28962306a36Sopenharmony_ci 29062306a36Sopenharmony_ci /* Both pending and enabled, sort by priority */ 29162306a36Sopenharmony_ci ret = irqa->priority - irqb->priority; 29262306a36Sopenharmony_ciout: 29362306a36Sopenharmony_ci raw_spin_unlock(&irqb->irq_lock); 29462306a36Sopenharmony_ci raw_spin_unlock(&irqa->irq_lock); 29562306a36Sopenharmony_ci return ret; 29662306a36Sopenharmony_ci} 29762306a36Sopenharmony_ci 29862306a36Sopenharmony_ci/* Must be called with the ap_list_lock held */ 29962306a36Sopenharmony_cistatic void vgic_sort_ap_list(struct kvm_vcpu *vcpu) 30062306a36Sopenharmony_ci{ 30162306a36Sopenharmony_ci struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 30262306a36Sopenharmony_ci 30362306a36Sopenharmony_ci lockdep_assert_held(&vgic_cpu->ap_list_lock); 30462306a36Sopenharmony_ci 30562306a36Sopenharmony_ci list_sort(NULL, &vgic_cpu->ap_list_head, vgic_irq_cmp); 30662306a36Sopenharmony_ci} 30762306a36Sopenharmony_ci 30862306a36Sopenharmony_ci/* 30962306a36Sopenharmony_ci * Only valid injection if changing level for level-triggered IRQs or for a 31062306a36Sopenharmony_ci * rising edge, and in-kernel connected IRQ lines can only be controlled by 31162306a36Sopenharmony_ci * their owner. 31262306a36Sopenharmony_ci */ 31362306a36Sopenharmony_cistatic bool vgic_validate_injection(struct vgic_irq *irq, bool level, void *owner) 31462306a36Sopenharmony_ci{ 31562306a36Sopenharmony_ci if (irq->owner != owner) 31662306a36Sopenharmony_ci return false; 31762306a36Sopenharmony_ci 31862306a36Sopenharmony_ci switch (irq->config) { 31962306a36Sopenharmony_ci case VGIC_CONFIG_LEVEL: 32062306a36Sopenharmony_ci return irq->line_level != level; 32162306a36Sopenharmony_ci case VGIC_CONFIG_EDGE: 32262306a36Sopenharmony_ci return level; 32362306a36Sopenharmony_ci } 32462306a36Sopenharmony_ci 32562306a36Sopenharmony_ci return false; 32662306a36Sopenharmony_ci} 32762306a36Sopenharmony_ci 32862306a36Sopenharmony_ci/* 32962306a36Sopenharmony_ci * Check whether an IRQ needs to (and can) be queued to a VCPU's ap list. 33062306a36Sopenharmony_ci * Do the queuing if necessary, taking the right locks in the right order. 33162306a36Sopenharmony_ci * Returns true when the IRQ was queued, false otherwise. 33262306a36Sopenharmony_ci * 33362306a36Sopenharmony_ci * Needs to be entered with the IRQ lock already held, but will return 33462306a36Sopenharmony_ci * with all locks dropped. 33562306a36Sopenharmony_ci */ 33662306a36Sopenharmony_cibool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq, 33762306a36Sopenharmony_ci unsigned long flags) 33862306a36Sopenharmony_ci{ 33962306a36Sopenharmony_ci struct kvm_vcpu *vcpu; 34062306a36Sopenharmony_ci 34162306a36Sopenharmony_ci lockdep_assert_held(&irq->irq_lock); 34262306a36Sopenharmony_ci 34362306a36Sopenharmony_ciretry: 34462306a36Sopenharmony_ci vcpu = vgic_target_oracle(irq); 34562306a36Sopenharmony_ci if (irq->vcpu || !vcpu) { 34662306a36Sopenharmony_ci /* 34762306a36Sopenharmony_ci * If this IRQ is already on a VCPU's ap_list, then it 34862306a36Sopenharmony_ci * cannot be moved or modified and there is no more work for 34962306a36Sopenharmony_ci * us to do. 35062306a36Sopenharmony_ci * 35162306a36Sopenharmony_ci * Otherwise, if the irq is not pending and enabled, it does 35262306a36Sopenharmony_ci * not need to be inserted into an ap_list and there is also 35362306a36Sopenharmony_ci * no more work for us to do. 35462306a36Sopenharmony_ci */ 35562306a36Sopenharmony_ci raw_spin_unlock_irqrestore(&irq->irq_lock, flags); 35662306a36Sopenharmony_ci 35762306a36Sopenharmony_ci /* 35862306a36Sopenharmony_ci * We have to kick the VCPU here, because we could be 35962306a36Sopenharmony_ci * queueing an edge-triggered interrupt for which we 36062306a36Sopenharmony_ci * get no EOI maintenance interrupt. In that case, 36162306a36Sopenharmony_ci * while the IRQ is already on the VCPU's AP list, the 36262306a36Sopenharmony_ci * VCPU could have EOI'ed the original interrupt and 36362306a36Sopenharmony_ci * won't see this one until it exits for some other 36462306a36Sopenharmony_ci * reason. 36562306a36Sopenharmony_ci */ 36662306a36Sopenharmony_ci if (vcpu) { 36762306a36Sopenharmony_ci kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu); 36862306a36Sopenharmony_ci kvm_vcpu_kick(vcpu); 36962306a36Sopenharmony_ci } 37062306a36Sopenharmony_ci return false; 37162306a36Sopenharmony_ci } 37262306a36Sopenharmony_ci 37362306a36Sopenharmony_ci /* 37462306a36Sopenharmony_ci * We must unlock the irq lock to take the ap_list_lock where 37562306a36Sopenharmony_ci * we are going to insert this new pending interrupt. 37662306a36Sopenharmony_ci */ 37762306a36Sopenharmony_ci raw_spin_unlock_irqrestore(&irq->irq_lock, flags); 37862306a36Sopenharmony_ci 37962306a36Sopenharmony_ci /* someone can do stuff here, which we re-check below */ 38062306a36Sopenharmony_ci 38162306a36Sopenharmony_ci raw_spin_lock_irqsave(&vcpu->arch.vgic_cpu.ap_list_lock, flags); 38262306a36Sopenharmony_ci raw_spin_lock(&irq->irq_lock); 38362306a36Sopenharmony_ci 38462306a36Sopenharmony_ci /* 38562306a36Sopenharmony_ci * Did something change behind our backs? 38662306a36Sopenharmony_ci * 38762306a36Sopenharmony_ci * There are two cases: 38862306a36Sopenharmony_ci * 1) The irq lost its pending state or was disabled behind our 38962306a36Sopenharmony_ci * backs and/or it was queued to another VCPU's ap_list. 39062306a36Sopenharmony_ci * 2) Someone changed the affinity on this irq behind our 39162306a36Sopenharmony_ci * backs and we are now holding the wrong ap_list_lock. 39262306a36Sopenharmony_ci * 39362306a36Sopenharmony_ci * In both cases, drop the locks and retry. 39462306a36Sopenharmony_ci */ 39562306a36Sopenharmony_ci 39662306a36Sopenharmony_ci if (unlikely(irq->vcpu || vcpu != vgic_target_oracle(irq))) { 39762306a36Sopenharmony_ci raw_spin_unlock(&irq->irq_lock); 39862306a36Sopenharmony_ci raw_spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, 39962306a36Sopenharmony_ci flags); 40062306a36Sopenharmony_ci 40162306a36Sopenharmony_ci raw_spin_lock_irqsave(&irq->irq_lock, flags); 40262306a36Sopenharmony_ci goto retry; 40362306a36Sopenharmony_ci } 40462306a36Sopenharmony_ci 40562306a36Sopenharmony_ci /* 40662306a36Sopenharmony_ci * Grab a reference to the irq to reflect the fact that it is 40762306a36Sopenharmony_ci * now in the ap_list. 40862306a36Sopenharmony_ci */ 40962306a36Sopenharmony_ci vgic_get_irq_kref(irq); 41062306a36Sopenharmony_ci list_add_tail(&irq->ap_list, &vcpu->arch.vgic_cpu.ap_list_head); 41162306a36Sopenharmony_ci irq->vcpu = vcpu; 41262306a36Sopenharmony_ci 41362306a36Sopenharmony_ci raw_spin_unlock(&irq->irq_lock); 41462306a36Sopenharmony_ci raw_spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags); 41562306a36Sopenharmony_ci 41662306a36Sopenharmony_ci kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu); 41762306a36Sopenharmony_ci kvm_vcpu_kick(vcpu); 41862306a36Sopenharmony_ci 41962306a36Sopenharmony_ci return true; 42062306a36Sopenharmony_ci} 42162306a36Sopenharmony_ci 42262306a36Sopenharmony_ci/** 42362306a36Sopenharmony_ci * kvm_vgic_inject_irq - Inject an IRQ from a device to the vgic 42462306a36Sopenharmony_ci * @kvm: The VM structure pointer 42562306a36Sopenharmony_ci * @cpuid: The CPU for PPIs 42662306a36Sopenharmony_ci * @intid: The INTID to inject a new state to. 42762306a36Sopenharmony_ci * @level: Edge-triggered: true: to trigger the interrupt 42862306a36Sopenharmony_ci * false: to ignore the call 42962306a36Sopenharmony_ci * Level-sensitive true: raise the input signal 43062306a36Sopenharmony_ci * false: lower the input signal 43162306a36Sopenharmony_ci * @owner: The opaque pointer to the owner of the IRQ being raised to verify 43262306a36Sopenharmony_ci * that the caller is allowed to inject this IRQ. Userspace 43362306a36Sopenharmony_ci * injections will have owner == NULL. 43462306a36Sopenharmony_ci * 43562306a36Sopenharmony_ci * The VGIC is not concerned with devices being active-LOW or active-HIGH for 43662306a36Sopenharmony_ci * level-sensitive interrupts. You can think of the level parameter as 1 43762306a36Sopenharmony_ci * being HIGH and 0 being LOW and all devices being active-HIGH. 43862306a36Sopenharmony_ci */ 43962306a36Sopenharmony_ciint kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid, 44062306a36Sopenharmony_ci bool level, void *owner) 44162306a36Sopenharmony_ci{ 44262306a36Sopenharmony_ci struct kvm_vcpu *vcpu; 44362306a36Sopenharmony_ci struct vgic_irq *irq; 44462306a36Sopenharmony_ci unsigned long flags; 44562306a36Sopenharmony_ci int ret; 44662306a36Sopenharmony_ci 44762306a36Sopenharmony_ci trace_vgic_update_irq_pending(cpuid, intid, level); 44862306a36Sopenharmony_ci 44962306a36Sopenharmony_ci ret = vgic_lazy_init(kvm); 45062306a36Sopenharmony_ci if (ret) 45162306a36Sopenharmony_ci return ret; 45262306a36Sopenharmony_ci 45362306a36Sopenharmony_ci vcpu = kvm_get_vcpu(kvm, cpuid); 45462306a36Sopenharmony_ci if (!vcpu && intid < VGIC_NR_PRIVATE_IRQS) 45562306a36Sopenharmony_ci return -EINVAL; 45662306a36Sopenharmony_ci 45762306a36Sopenharmony_ci irq = vgic_get_irq(kvm, vcpu, intid); 45862306a36Sopenharmony_ci if (!irq) 45962306a36Sopenharmony_ci return -EINVAL; 46062306a36Sopenharmony_ci 46162306a36Sopenharmony_ci raw_spin_lock_irqsave(&irq->irq_lock, flags); 46262306a36Sopenharmony_ci 46362306a36Sopenharmony_ci if (!vgic_validate_injection(irq, level, owner)) { 46462306a36Sopenharmony_ci /* Nothing to see here, move along... */ 46562306a36Sopenharmony_ci raw_spin_unlock_irqrestore(&irq->irq_lock, flags); 46662306a36Sopenharmony_ci vgic_put_irq(kvm, irq); 46762306a36Sopenharmony_ci return 0; 46862306a36Sopenharmony_ci } 46962306a36Sopenharmony_ci 47062306a36Sopenharmony_ci if (irq->config == VGIC_CONFIG_LEVEL) 47162306a36Sopenharmony_ci irq->line_level = level; 47262306a36Sopenharmony_ci else 47362306a36Sopenharmony_ci irq->pending_latch = true; 47462306a36Sopenharmony_ci 47562306a36Sopenharmony_ci vgic_queue_irq_unlock(kvm, irq, flags); 47662306a36Sopenharmony_ci vgic_put_irq(kvm, irq); 47762306a36Sopenharmony_ci 47862306a36Sopenharmony_ci return 0; 47962306a36Sopenharmony_ci} 48062306a36Sopenharmony_ci 48162306a36Sopenharmony_ci/* @irq->irq_lock must be held */ 48262306a36Sopenharmony_cistatic int kvm_vgic_map_irq(struct kvm_vcpu *vcpu, struct vgic_irq *irq, 48362306a36Sopenharmony_ci unsigned int host_irq, 48462306a36Sopenharmony_ci struct irq_ops *ops) 48562306a36Sopenharmony_ci{ 48662306a36Sopenharmony_ci struct irq_desc *desc; 48762306a36Sopenharmony_ci struct irq_data *data; 48862306a36Sopenharmony_ci 48962306a36Sopenharmony_ci /* 49062306a36Sopenharmony_ci * Find the physical IRQ number corresponding to @host_irq 49162306a36Sopenharmony_ci */ 49262306a36Sopenharmony_ci desc = irq_to_desc(host_irq); 49362306a36Sopenharmony_ci if (!desc) { 49462306a36Sopenharmony_ci kvm_err("%s: no interrupt descriptor\n", __func__); 49562306a36Sopenharmony_ci return -EINVAL; 49662306a36Sopenharmony_ci } 49762306a36Sopenharmony_ci data = irq_desc_get_irq_data(desc); 49862306a36Sopenharmony_ci while (data->parent_data) 49962306a36Sopenharmony_ci data = data->parent_data; 50062306a36Sopenharmony_ci 50162306a36Sopenharmony_ci irq->hw = true; 50262306a36Sopenharmony_ci irq->host_irq = host_irq; 50362306a36Sopenharmony_ci irq->hwintid = data->hwirq; 50462306a36Sopenharmony_ci irq->ops = ops; 50562306a36Sopenharmony_ci return 0; 50662306a36Sopenharmony_ci} 50762306a36Sopenharmony_ci 50862306a36Sopenharmony_ci/* @irq->irq_lock must be held */ 50962306a36Sopenharmony_cistatic inline void kvm_vgic_unmap_irq(struct vgic_irq *irq) 51062306a36Sopenharmony_ci{ 51162306a36Sopenharmony_ci irq->hw = false; 51262306a36Sopenharmony_ci irq->hwintid = 0; 51362306a36Sopenharmony_ci irq->ops = NULL; 51462306a36Sopenharmony_ci} 51562306a36Sopenharmony_ci 51662306a36Sopenharmony_ciint kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq, 51762306a36Sopenharmony_ci u32 vintid, struct irq_ops *ops) 51862306a36Sopenharmony_ci{ 51962306a36Sopenharmony_ci struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); 52062306a36Sopenharmony_ci unsigned long flags; 52162306a36Sopenharmony_ci int ret; 52262306a36Sopenharmony_ci 52362306a36Sopenharmony_ci BUG_ON(!irq); 52462306a36Sopenharmony_ci 52562306a36Sopenharmony_ci raw_spin_lock_irqsave(&irq->irq_lock, flags); 52662306a36Sopenharmony_ci ret = kvm_vgic_map_irq(vcpu, irq, host_irq, ops); 52762306a36Sopenharmony_ci raw_spin_unlock_irqrestore(&irq->irq_lock, flags); 52862306a36Sopenharmony_ci vgic_put_irq(vcpu->kvm, irq); 52962306a36Sopenharmony_ci 53062306a36Sopenharmony_ci return ret; 53162306a36Sopenharmony_ci} 53262306a36Sopenharmony_ci 53362306a36Sopenharmony_ci/** 53462306a36Sopenharmony_ci * kvm_vgic_reset_mapped_irq - Reset a mapped IRQ 53562306a36Sopenharmony_ci * @vcpu: The VCPU pointer 53662306a36Sopenharmony_ci * @vintid: The INTID of the interrupt 53762306a36Sopenharmony_ci * 53862306a36Sopenharmony_ci * Reset the active and pending states of a mapped interrupt. Kernel 53962306a36Sopenharmony_ci * subsystems injecting mapped interrupts should reset their interrupt lines 54062306a36Sopenharmony_ci * when we are doing a reset of the VM. 54162306a36Sopenharmony_ci */ 54262306a36Sopenharmony_civoid kvm_vgic_reset_mapped_irq(struct kvm_vcpu *vcpu, u32 vintid) 54362306a36Sopenharmony_ci{ 54462306a36Sopenharmony_ci struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); 54562306a36Sopenharmony_ci unsigned long flags; 54662306a36Sopenharmony_ci 54762306a36Sopenharmony_ci if (!irq->hw) 54862306a36Sopenharmony_ci goto out; 54962306a36Sopenharmony_ci 55062306a36Sopenharmony_ci raw_spin_lock_irqsave(&irq->irq_lock, flags); 55162306a36Sopenharmony_ci irq->active = false; 55262306a36Sopenharmony_ci irq->pending_latch = false; 55362306a36Sopenharmony_ci irq->line_level = false; 55462306a36Sopenharmony_ci raw_spin_unlock_irqrestore(&irq->irq_lock, flags); 55562306a36Sopenharmony_ciout: 55662306a36Sopenharmony_ci vgic_put_irq(vcpu->kvm, irq); 55762306a36Sopenharmony_ci} 55862306a36Sopenharmony_ci 55962306a36Sopenharmony_ciint kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid) 56062306a36Sopenharmony_ci{ 56162306a36Sopenharmony_ci struct vgic_irq *irq; 56262306a36Sopenharmony_ci unsigned long flags; 56362306a36Sopenharmony_ci 56462306a36Sopenharmony_ci if (!vgic_initialized(vcpu->kvm)) 56562306a36Sopenharmony_ci return -EAGAIN; 56662306a36Sopenharmony_ci 56762306a36Sopenharmony_ci irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); 56862306a36Sopenharmony_ci BUG_ON(!irq); 56962306a36Sopenharmony_ci 57062306a36Sopenharmony_ci raw_spin_lock_irqsave(&irq->irq_lock, flags); 57162306a36Sopenharmony_ci kvm_vgic_unmap_irq(irq); 57262306a36Sopenharmony_ci raw_spin_unlock_irqrestore(&irq->irq_lock, flags); 57362306a36Sopenharmony_ci vgic_put_irq(vcpu->kvm, irq); 57462306a36Sopenharmony_ci 57562306a36Sopenharmony_ci return 0; 57662306a36Sopenharmony_ci} 57762306a36Sopenharmony_ci 57862306a36Sopenharmony_ciint kvm_vgic_get_map(struct kvm_vcpu *vcpu, unsigned int vintid) 57962306a36Sopenharmony_ci{ 58062306a36Sopenharmony_ci struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); 58162306a36Sopenharmony_ci unsigned long flags; 58262306a36Sopenharmony_ci int ret = -1; 58362306a36Sopenharmony_ci 58462306a36Sopenharmony_ci raw_spin_lock_irqsave(&irq->irq_lock, flags); 58562306a36Sopenharmony_ci if (irq->hw) 58662306a36Sopenharmony_ci ret = irq->hwintid; 58762306a36Sopenharmony_ci raw_spin_unlock_irqrestore(&irq->irq_lock, flags); 58862306a36Sopenharmony_ci 58962306a36Sopenharmony_ci vgic_put_irq(vcpu->kvm, irq); 59062306a36Sopenharmony_ci return ret; 59162306a36Sopenharmony_ci} 59262306a36Sopenharmony_ci 59362306a36Sopenharmony_ci/** 59462306a36Sopenharmony_ci * kvm_vgic_set_owner - Set the owner of an interrupt for a VM 59562306a36Sopenharmony_ci * 59662306a36Sopenharmony_ci * @vcpu: Pointer to the VCPU (used for PPIs) 59762306a36Sopenharmony_ci * @intid: The virtual INTID identifying the interrupt (PPI or SPI) 59862306a36Sopenharmony_ci * @owner: Opaque pointer to the owner 59962306a36Sopenharmony_ci * 60062306a36Sopenharmony_ci * Returns 0 if intid is not already used by another in-kernel device and the 60162306a36Sopenharmony_ci * owner is set, otherwise returns an error code. 60262306a36Sopenharmony_ci */ 60362306a36Sopenharmony_ciint kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner) 60462306a36Sopenharmony_ci{ 60562306a36Sopenharmony_ci struct vgic_irq *irq; 60662306a36Sopenharmony_ci unsigned long flags; 60762306a36Sopenharmony_ci int ret = 0; 60862306a36Sopenharmony_ci 60962306a36Sopenharmony_ci if (!vgic_initialized(vcpu->kvm)) 61062306a36Sopenharmony_ci return -EAGAIN; 61162306a36Sopenharmony_ci 61262306a36Sopenharmony_ci /* SGIs and LPIs cannot be wired up to any device */ 61362306a36Sopenharmony_ci if (!irq_is_ppi(intid) && !vgic_valid_spi(vcpu->kvm, intid)) 61462306a36Sopenharmony_ci return -EINVAL; 61562306a36Sopenharmony_ci 61662306a36Sopenharmony_ci irq = vgic_get_irq(vcpu->kvm, vcpu, intid); 61762306a36Sopenharmony_ci raw_spin_lock_irqsave(&irq->irq_lock, flags); 61862306a36Sopenharmony_ci if (irq->owner && irq->owner != owner) 61962306a36Sopenharmony_ci ret = -EEXIST; 62062306a36Sopenharmony_ci else 62162306a36Sopenharmony_ci irq->owner = owner; 62262306a36Sopenharmony_ci raw_spin_unlock_irqrestore(&irq->irq_lock, flags); 62362306a36Sopenharmony_ci 62462306a36Sopenharmony_ci return ret; 62562306a36Sopenharmony_ci} 62662306a36Sopenharmony_ci 62762306a36Sopenharmony_ci/** 62862306a36Sopenharmony_ci * vgic_prune_ap_list - Remove non-relevant interrupts from the list 62962306a36Sopenharmony_ci * 63062306a36Sopenharmony_ci * @vcpu: The VCPU pointer 63162306a36Sopenharmony_ci * 63262306a36Sopenharmony_ci * Go over the list of "interesting" interrupts, and prune those that we 63362306a36Sopenharmony_ci * won't have to consider in the near future. 63462306a36Sopenharmony_ci */ 63562306a36Sopenharmony_cistatic void vgic_prune_ap_list(struct kvm_vcpu *vcpu) 63662306a36Sopenharmony_ci{ 63762306a36Sopenharmony_ci struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 63862306a36Sopenharmony_ci struct vgic_irq *irq, *tmp; 63962306a36Sopenharmony_ci 64062306a36Sopenharmony_ci DEBUG_SPINLOCK_BUG_ON(!irqs_disabled()); 64162306a36Sopenharmony_ci 64262306a36Sopenharmony_ciretry: 64362306a36Sopenharmony_ci raw_spin_lock(&vgic_cpu->ap_list_lock); 64462306a36Sopenharmony_ci 64562306a36Sopenharmony_ci list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) { 64662306a36Sopenharmony_ci struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB; 64762306a36Sopenharmony_ci bool target_vcpu_needs_kick = false; 64862306a36Sopenharmony_ci 64962306a36Sopenharmony_ci raw_spin_lock(&irq->irq_lock); 65062306a36Sopenharmony_ci 65162306a36Sopenharmony_ci BUG_ON(vcpu != irq->vcpu); 65262306a36Sopenharmony_ci 65362306a36Sopenharmony_ci target_vcpu = vgic_target_oracle(irq); 65462306a36Sopenharmony_ci 65562306a36Sopenharmony_ci if (!target_vcpu) { 65662306a36Sopenharmony_ci /* 65762306a36Sopenharmony_ci * We don't need to process this interrupt any 65862306a36Sopenharmony_ci * further, move it off the list. 65962306a36Sopenharmony_ci */ 66062306a36Sopenharmony_ci list_del(&irq->ap_list); 66162306a36Sopenharmony_ci irq->vcpu = NULL; 66262306a36Sopenharmony_ci raw_spin_unlock(&irq->irq_lock); 66362306a36Sopenharmony_ci 66462306a36Sopenharmony_ci /* 66562306a36Sopenharmony_ci * This vgic_put_irq call matches the 66662306a36Sopenharmony_ci * vgic_get_irq_kref in vgic_queue_irq_unlock, 66762306a36Sopenharmony_ci * where we added the LPI to the ap_list. As 66862306a36Sopenharmony_ci * we remove the irq from the list, we drop 66962306a36Sopenharmony_ci * also drop the refcount. 67062306a36Sopenharmony_ci */ 67162306a36Sopenharmony_ci vgic_put_irq(vcpu->kvm, irq); 67262306a36Sopenharmony_ci continue; 67362306a36Sopenharmony_ci } 67462306a36Sopenharmony_ci 67562306a36Sopenharmony_ci if (target_vcpu == vcpu) { 67662306a36Sopenharmony_ci /* We're on the right CPU */ 67762306a36Sopenharmony_ci raw_spin_unlock(&irq->irq_lock); 67862306a36Sopenharmony_ci continue; 67962306a36Sopenharmony_ci } 68062306a36Sopenharmony_ci 68162306a36Sopenharmony_ci /* This interrupt looks like it has to be migrated. */ 68262306a36Sopenharmony_ci 68362306a36Sopenharmony_ci raw_spin_unlock(&irq->irq_lock); 68462306a36Sopenharmony_ci raw_spin_unlock(&vgic_cpu->ap_list_lock); 68562306a36Sopenharmony_ci 68662306a36Sopenharmony_ci /* 68762306a36Sopenharmony_ci * Ensure locking order by always locking the smallest 68862306a36Sopenharmony_ci * ID first. 68962306a36Sopenharmony_ci */ 69062306a36Sopenharmony_ci if (vcpu->vcpu_id < target_vcpu->vcpu_id) { 69162306a36Sopenharmony_ci vcpuA = vcpu; 69262306a36Sopenharmony_ci vcpuB = target_vcpu; 69362306a36Sopenharmony_ci } else { 69462306a36Sopenharmony_ci vcpuA = target_vcpu; 69562306a36Sopenharmony_ci vcpuB = vcpu; 69662306a36Sopenharmony_ci } 69762306a36Sopenharmony_ci 69862306a36Sopenharmony_ci raw_spin_lock(&vcpuA->arch.vgic_cpu.ap_list_lock); 69962306a36Sopenharmony_ci raw_spin_lock_nested(&vcpuB->arch.vgic_cpu.ap_list_lock, 70062306a36Sopenharmony_ci SINGLE_DEPTH_NESTING); 70162306a36Sopenharmony_ci raw_spin_lock(&irq->irq_lock); 70262306a36Sopenharmony_ci 70362306a36Sopenharmony_ci /* 70462306a36Sopenharmony_ci * If the affinity has been preserved, move the 70562306a36Sopenharmony_ci * interrupt around. Otherwise, it means things have 70662306a36Sopenharmony_ci * changed while the interrupt was unlocked, and we 70762306a36Sopenharmony_ci * need to replay this. 70862306a36Sopenharmony_ci * 70962306a36Sopenharmony_ci * In all cases, we cannot trust the list not to have 71062306a36Sopenharmony_ci * changed, so we restart from the beginning. 71162306a36Sopenharmony_ci */ 71262306a36Sopenharmony_ci if (target_vcpu == vgic_target_oracle(irq)) { 71362306a36Sopenharmony_ci struct vgic_cpu *new_cpu = &target_vcpu->arch.vgic_cpu; 71462306a36Sopenharmony_ci 71562306a36Sopenharmony_ci list_del(&irq->ap_list); 71662306a36Sopenharmony_ci irq->vcpu = target_vcpu; 71762306a36Sopenharmony_ci list_add_tail(&irq->ap_list, &new_cpu->ap_list_head); 71862306a36Sopenharmony_ci target_vcpu_needs_kick = true; 71962306a36Sopenharmony_ci } 72062306a36Sopenharmony_ci 72162306a36Sopenharmony_ci raw_spin_unlock(&irq->irq_lock); 72262306a36Sopenharmony_ci raw_spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock); 72362306a36Sopenharmony_ci raw_spin_unlock(&vcpuA->arch.vgic_cpu.ap_list_lock); 72462306a36Sopenharmony_ci 72562306a36Sopenharmony_ci if (target_vcpu_needs_kick) { 72662306a36Sopenharmony_ci kvm_make_request(KVM_REQ_IRQ_PENDING, target_vcpu); 72762306a36Sopenharmony_ci kvm_vcpu_kick(target_vcpu); 72862306a36Sopenharmony_ci } 72962306a36Sopenharmony_ci 73062306a36Sopenharmony_ci goto retry; 73162306a36Sopenharmony_ci } 73262306a36Sopenharmony_ci 73362306a36Sopenharmony_ci raw_spin_unlock(&vgic_cpu->ap_list_lock); 73462306a36Sopenharmony_ci} 73562306a36Sopenharmony_ci 73662306a36Sopenharmony_cistatic inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu) 73762306a36Sopenharmony_ci{ 73862306a36Sopenharmony_ci if (kvm_vgic_global_state.type == VGIC_V2) 73962306a36Sopenharmony_ci vgic_v2_fold_lr_state(vcpu); 74062306a36Sopenharmony_ci else 74162306a36Sopenharmony_ci vgic_v3_fold_lr_state(vcpu); 74262306a36Sopenharmony_ci} 74362306a36Sopenharmony_ci 74462306a36Sopenharmony_ci/* Requires the irq_lock to be held. */ 74562306a36Sopenharmony_cistatic inline void vgic_populate_lr(struct kvm_vcpu *vcpu, 74662306a36Sopenharmony_ci struct vgic_irq *irq, int lr) 74762306a36Sopenharmony_ci{ 74862306a36Sopenharmony_ci lockdep_assert_held(&irq->irq_lock); 74962306a36Sopenharmony_ci 75062306a36Sopenharmony_ci if (kvm_vgic_global_state.type == VGIC_V2) 75162306a36Sopenharmony_ci vgic_v2_populate_lr(vcpu, irq, lr); 75262306a36Sopenharmony_ci else 75362306a36Sopenharmony_ci vgic_v3_populate_lr(vcpu, irq, lr); 75462306a36Sopenharmony_ci} 75562306a36Sopenharmony_ci 75662306a36Sopenharmony_cistatic inline void vgic_clear_lr(struct kvm_vcpu *vcpu, int lr) 75762306a36Sopenharmony_ci{ 75862306a36Sopenharmony_ci if (kvm_vgic_global_state.type == VGIC_V2) 75962306a36Sopenharmony_ci vgic_v2_clear_lr(vcpu, lr); 76062306a36Sopenharmony_ci else 76162306a36Sopenharmony_ci vgic_v3_clear_lr(vcpu, lr); 76262306a36Sopenharmony_ci} 76362306a36Sopenharmony_ci 76462306a36Sopenharmony_cistatic inline void vgic_set_underflow(struct kvm_vcpu *vcpu) 76562306a36Sopenharmony_ci{ 76662306a36Sopenharmony_ci if (kvm_vgic_global_state.type == VGIC_V2) 76762306a36Sopenharmony_ci vgic_v2_set_underflow(vcpu); 76862306a36Sopenharmony_ci else 76962306a36Sopenharmony_ci vgic_v3_set_underflow(vcpu); 77062306a36Sopenharmony_ci} 77162306a36Sopenharmony_ci 77262306a36Sopenharmony_ci/* Requires the ap_list_lock to be held. */ 77362306a36Sopenharmony_cistatic int compute_ap_list_depth(struct kvm_vcpu *vcpu, 77462306a36Sopenharmony_ci bool *multi_sgi) 77562306a36Sopenharmony_ci{ 77662306a36Sopenharmony_ci struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 77762306a36Sopenharmony_ci struct vgic_irq *irq; 77862306a36Sopenharmony_ci int count = 0; 77962306a36Sopenharmony_ci 78062306a36Sopenharmony_ci *multi_sgi = false; 78162306a36Sopenharmony_ci 78262306a36Sopenharmony_ci lockdep_assert_held(&vgic_cpu->ap_list_lock); 78362306a36Sopenharmony_ci 78462306a36Sopenharmony_ci list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { 78562306a36Sopenharmony_ci int w; 78662306a36Sopenharmony_ci 78762306a36Sopenharmony_ci raw_spin_lock(&irq->irq_lock); 78862306a36Sopenharmony_ci /* GICv2 SGIs can count for more than one... */ 78962306a36Sopenharmony_ci w = vgic_irq_get_lr_count(irq); 79062306a36Sopenharmony_ci raw_spin_unlock(&irq->irq_lock); 79162306a36Sopenharmony_ci 79262306a36Sopenharmony_ci count += w; 79362306a36Sopenharmony_ci *multi_sgi |= (w > 1); 79462306a36Sopenharmony_ci } 79562306a36Sopenharmony_ci return count; 79662306a36Sopenharmony_ci} 79762306a36Sopenharmony_ci 79862306a36Sopenharmony_ci/* Requires the VCPU's ap_list_lock to be held. */ 79962306a36Sopenharmony_cistatic void vgic_flush_lr_state(struct kvm_vcpu *vcpu) 80062306a36Sopenharmony_ci{ 80162306a36Sopenharmony_ci struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 80262306a36Sopenharmony_ci struct vgic_irq *irq; 80362306a36Sopenharmony_ci int count; 80462306a36Sopenharmony_ci bool multi_sgi; 80562306a36Sopenharmony_ci u8 prio = 0xff; 80662306a36Sopenharmony_ci int i = 0; 80762306a36Sopenharmony_ci 80862306a36Sopenharmony_ci lockdep_assert_held(&vgic_cpu->ap_list_lock); 80962306a36Sopenharmony_ci 81062306a36Sopenharmony_ci count = compute_ap_list_depth(vcpu, &multi_sgi); 81162306a36Sopenharmony_ci if (count > kvm_vgic_global_state.nr_lr || multi_sgi) 81262306a36Sopenharmony_ci vgic_sort_ap_list(vcpu); 81362306a36Sopenharmony_ci 81462306a36Sopenharmony_ci count = 0; 81562306a36Sopenharmony_ci 81662306a36Sopenharmony_ci list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { 81762306a36Sopenharmony_ci raw_spin_lock(&irq->irq_lock); 81862306a36Sopenharmony_ci 81962306a36Sopenharmony_ci /* 82062306a36Sopenharmony_ci * If we have multi-SGIs in the pipeline, we need to 82162306a36Sopenharmony_ci * guarantee that they are all seen before any IRQ of 82262306a36Sopenharmony_ci * lower priority. In that case, we need to filter out 82362306a36Sopenharmony_ci * these interrupts by exiting early. This is easy as 82462306a36Sopenharmony_ci * the AP list has been sorted already. 82562306a36Sopenharmony_ci */ 82662306a36Sopenharmony_ci if (multi_sgi && irq->priority > prio) { 82762306a36Sopenharmony_ci _raw_spin_unlock(&irq->irq_lock); 82862306a36Sopenharmony_ci break; 82962306a36Sopenharmony_ci } 83062306a36Sopenharmony_ci 83162306a36Sopenharmony_ci if (likely(vgic_target_oracle(irq) == vcpu)) { 83262306a36Sopenharmony_ci vgic_populate_lr(vcpu, irq, count++); 83362306a36Sopenharmony_ci 83462306a36Sopenharmony_ci if (irq->source) 83562306a36Sopenharmony_ci prio = irq->priority; 83662306a36Sopenharmony_ci } 83762306a36Sopenharmony_ci 83862306a36Sopenharmony_ci raw_spin_unlock(&irq->irq_lock); 83962306a36Sopenharmony_ci 84062306a36Sopenharmony_ci if (count == kvm_vgic_global_state.nr_lr) { 84162306a36Sopenharmony_ci if (!list_is_last(&irq->ap_list, 84262306a36Sopenharmony_ci &vgic_cpu->ap_list_head)) 84362306a36Sopenharmony_ci vgic_set_underflow(vcpu); 84462306a36Sopenharmony_ci break; 84562306a36Sopenharmony_ci } 84662306a36Sopenharmony_ci } 84762306a36Sopenharmony_ci 84862306a36Sopenharmony_ci /* Nuke remaining LRs */ 84962306a36Sopenharmony_ci for (i = count ; i < kvm_vgic_global_state.nr_lr; i++) 85062306a36Sopenharmony_ci vgic_clear_lr(vcpu, i); 85162306a36Sopenharmony_ci 85262306a36Sopenharmony_ci if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) 85362306a36Sopenharmony_ci vcpu->arch.vgic_cpu.vgic_v2.used_lrs = count; 85462306a36Sopenharmony_ci else 85562306a36Sopenharmony_ci vcpu->arch.vgic_cpu.vgic_v3.used_lrs = count; 85662306a36Sopenharmony_ci} 85762306a36Sopenharmony_ci 85862306a36Sopenharmony_cistatic inline bool can_access_vgic_from_kernel(void) 85962306a36Sopenharmony_ci{ 86062306a36Sopenharmony_ci /* 86162306a36Sopenharmony_ci * GICv2 can always be accessed from the kernel because it is 86262306a36Sopenharmony_ci * memory-mapped, and VHE systems can access GICv3 EL2 system 86362306a36Sopenharmony_ci * registers. 86462306a36Sopenharmony_ci */ 86562306a36Sopenharmony_ci return !static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif) || has_vhe(); 86662306a36Sopenharmony_ci} 86762306a36Sopenharmony_ci 86862306a36Sopenharmony_cistatic inline void vgic_save_state(struct kvm_vcpu *vcpu) 86962306a36Sopenharmony_ci{ 87062306a36Sopenharmony_ci if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) 87162306a36Sopenharmony_ci vgic_v2_save_state(vcpu); 87262306a36Sopenharmony_ci else 87362306a36Sopenharmony_ci __vgic_v3_save_state(&vcpu->arch.vgic_cpu.vgic_v3); 87462306a36Sopenharmony_ci} 87562306a36Sopenharmony_ci 87662306a36Sopenharmony_ci/* Sync back the hardware VGIC state into our emulation after a guest's run. */ 87762306a36Sopenharmony_civoid kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) 87862306a36Sopenharmony_ci{ 87962306a36Sopenharmony_ci int used_lrs; 88062306a36Sopenharmony_ci 88162306a36Sopenharmony_ci /* An empty ap_list_head implies used_lrs == 0 */ 88262306a36Sopenharmony_ci if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head)) 88362306a36Sopenharmony_ci return; 88462306a36Sopenharmony_ci 88562306a36Sopenharmony_ci if (can_access_vgic_from_kernel()) 88662306a36Sopenharmony_ci vgic_save_state(vcpu); 88762306a36Sopenharmony_ci 88862306a36Sopenharmony_ci if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) 88962306a36Sopenharmony_ci used_lrs = vcpu->arch.vgic_cpu.vgic_v2.used_lrs; 89062306a36Sopenharmony_ci else 89162306a36Sopenharmony_ci used_lrs = vcpu->arch.vgic_cpu.vgic_v3.used_lrs; 89262306a36Sopenharmony_ci 89362306a36Sopenharmony_ci if (used_lrs) 89462306a36Sopenharmony_ci vgic_fold_lr_state(vcpu); 89562306a36Sopenharmony_ci vgic_prune_ap_list(vcpu); 89662306a36Sopenharmony_ci} 89762306a36Sopenharmony_ci 89862306a36Sopenharmony_cistatic inline void vgic_restore_state(struct kvm_vcpu *vcpu) 89962306a36Sopenharmony_ci{ 90062306a36Sopenharmony_ci if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) 90162306a36Sopenharmony_ci vgic_v2_restore_state(vcpu); 90262306a36Sopenharmony_ci else 90362306a36Sopenharmony_ci __vgic_v3_restore_state(&vcpu->arch.vgic_cpu.vgic_v3); 90462306a36Sopenharmony_ci} 90562306a36Sopenharmony_ci 90662306a36Sopenharmony_ci/* Flush our emulation state into the GIC hardware before entering the guest. */ 90762306a36Sopenharmony_civoid kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) 90862306a36Sopenharmony_ci{ 90962306a36Sopenharmony_ci /* 91062306a36Sopenharmony_ci * If there are no virtual interrupts active or pending for this 91162306a36Sopenharmony_ci * VCPU, then there is no work to do and we can bail out without 91262306a36Sopenharmony_ci * taking any lock. There is a potential race with someone injecting 91362306a36Sopenharmony_ci * interrupts to the VCPU, but it is a benign race as the VCPU will 91462306a36Sopenharmony_ci * either observe the new interrupt before or after doing this check, 91562306a36Sopenharmony_ci * and introducing additional synchronization mechanism doesn't change 91662306a36Sopenharmony_ci * this. 91762306a36Sopenharmony_ci * 91862306a36Sopenharmony_ci * Note that we still need to go through the whole thing if anything 91962306a36Sopenharmony_ci * can be directly injected (GICv4). 92062306a36Sopenharmony_ci */ 92162306a36Sopenharmony_ci if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head) && 92262306a36Sopenharmony_ci !vgic_supports_direct_msis(vcpu->kvm)) 92362306a36Sopenharmony_ci return; 92462306a36Sopenharmony_ci 92562306a36Sopenharmony_ci DEBUG_SPINLOCK_BUG_ON(!irqs_disabled()); 92662306a36Sopenharmony_ci 92762306a36Sopenharmony_ci if (!list_empty(&vcpu->arch.vgic_cpu.ap_list_head)) { 92862306a36Sopenharmony_ci raw_spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock); 92962306a36Sopenharmony_ci vgic_flush_lr_state(vcpu); 93062306a36Sopenharmony_ci raw_spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock); 93162306a36Sopenharmony_ci } 93262306a36Sopenharmony_ci 93362306a36Sopenharmony_ci if (can_access_vgic_from_kernel()) 93462306a36Sopenharmony_ci vgic_restore_state(vcpu); 93562306a36Sopenharmony_ci 93662306a36Sopenharmony_ci if (vgic_supports_direct_msis(vcpu->kvm)) 93762306a36Sopenharmony_ci vgic_v4_commit(vcpu); 93862306a36Sopenharmony_ci} 93962306a36Sopenharmony_ci 94062306a36Sopenharmony_civoid kvm_vgic_load(struct kvm_vcpu *vcpu) 94162306a36Sopenharmony_ci{ 94262306a36Sopenharmony_ci if (unlikely(!vgic_initialized(vcpu->kvm))) 94362306a36Sopenharmony_ci return; 94462306a36Sopenharmony_ci 94562306a36Sopenharmony_ci if (kvm_vgic_global_state.type == VGIC_V2) 94662306a36Sopenharmony_ci vgic_v2_load(vcpu); 94762306a36Sopenharmony_ci else 94862306a36Sopenharmony_ci vgic_v3_load(vcpu); 94962306a36Sopenharmony_ci} 95062306a36Sopenharmony_ci 95162306a36Sopenharmony_civoid kvm_vgic_put(struct kvm_vcpu *vcpu) 95262306a36Sopenharmony_ci{ 95362306a36Sopenharmony_ci if (unlikely(!vgic_initialized(vcpu->kvm))) 95462306a36Sopenharmony_ci return; 95562306a36Sopenharmony_ci 95662306a36Sopenharmony_ci if (kvm_vgic_global_state.type == VGIC_V2) 95762306a36Sopenharmony_ci vgic_v2_put(vcpu); 95862306a36Sopenharmony_ci else 95962306a36Sopenharmony_ci vgic_v3_put(vcpu); 96062306a36Sopenharmony_ci} 96162306a36Sopenharmony_ci 96262306a36Sopenharmony_civoid kvm_vgic_vmcr_sync(struct kvm_vcpu *vcpu) 96362306a36Sopenharmony_ci{ 96462306a36Sopenharmony_ci if (unlikely(!irqchip_in_kernel(vcpu->kvm))) 96562306a36Sopenharmony_ci return; 96662306a36Sopenharmony_ci 96762306a36Sopenharmony_ci if (kvm_vgic_global_state.type == VGIC_V2) 96862306a36Sopenharmony_ci vgic_v2_vmcr_sync(vcpu); 96962306a36Sopenharmony_ci else 97062306a36Sopenharmony_ci vgic_v3_vmcr_sync(vcpu); 97162306a36Sopenharmony_ci} 97262306a36Sopenharmony_ci 97362306a36Sopenharmony_ciint kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu) 97462306a36Sopenharmony_ci{ 97562306a36Sopenharmony_ci struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 97662306a36Sopenharmony_ci struct vgic_irq *irq; 97762306a36Sopenharmony_ci bool pending = false; 97862306a36Sopenharmony_ci unsigned long flags; 97962306a36Sopenharmony_ci struct vgic_vmcr vmcr; 98062306a36Sopenharmony_ci 98162306a36Sopenharmony_ci if (!vcpu->kvm->arch.vgic.enabled) 98262306a36Sopenharmony_ci return false; 98362306a36Sopenharmony_ci 98462306a36Sopenharmony_ci if (vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last) 98562306a36Sopenharmony_ci return true; 98662306a36Sopenharmony_ci 98762306a36Sopenharmony_ci vgic_get_vmcr(vcpu, &vmcr); 98862306a36Sopenharmony_ci 98962306a36Sopenharmony_ci raw_spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags); 99062306a36Sopenharmony_ci 99162306a36Sopenharmony_ci list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { 99262306a36Sopenharmony_ci raw_spin_lock(&irq->irq_lock); 99362306a36Sopenharmony_ci pending = irq_is_pending(irq) && irq->enabled && 99462306a36Sopenharmony_ci !irq->active && 99562306a36Sopenharmony_ci irq->priority < vmcr.pmr; 99662306a36Sopenharmony_ci raw_spin_unlock(&irq->irq_lock); 99762306a36Sopenharmony_ci 99862306a36Sopenharmony_ci if (pending) 99962306a36Sopenharmony_ci break; 100062306a36Sopenharmony_ci } 100162306a36Sopenharmony_ci 100262306a36Sopenharmony_ci raw_spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags); 100362306a36Sopenharmony_ci 100462306a36Sopenharmony_ci return pending; 100562306a36Sopenharmony_ci} 100662306a36Sopenharmony_ci 100762306a36Sopenharmony_civoid vgic_kick_vcpus(struct kvm *kvm) 100862306a36Sopenharmony_ci{ 100962306a36Sopenharmony_ci struct kvm_vcpu *vcpu; 101062306a36Sopenharmony_ci unsigned long c; 101162306a36Sopenharmony_ci 101262306a36Sopenharmony_ci /* 101362306a36Sopenharmony_ci * We've injected an interrupt, time to find out who deserves 101462306a36Sopenharmony_ci * a good kick... 101562306a36Sopenharmony_ci */ 101662306a36Sopenharmony_ci kvm_for_each_vcpu(c, vcpu, kvm) { 101762306a36Sopenharmony_ci if (kvm_vgic_vcpu_pending_irq(vcpu)) { 101862306a36Sopenharmony_ci kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu); 101962306a36Sopenharmony_ci kvm_vcpu_kick(vcpu); 102062306a36Sopenharmony_ci } 102162306a36Sopenharmony_ci } 102262306a36Sopenharmony_ci} 102362306a36Sopenharmony_ci 102462306a36Sopenharmony_cibool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid) 102562306a36Sopenharmony_ci{ 102662306a36Sopenharmony_ci struct vgic_irq *irq; 102762306a36Sopenharmony_ci bool map_is_active; 102862306a36Sopenharmony_ci unsigned long flags; 102962306a36Sopenharmony_ci 103062306a36Sopenharmony_ci if (!vgic_initialized(vcpu->kvm)) 103162306a36Sopenharmony_ci return false; 103262306a36Sopenharmony_ci 103362306a36Sopenharmony_ci irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); 103462306a36Sopenharmony_ci raw_spin_lock_irqsave(&irq->irq_lock, flags); 103562306a36Sopenharmony_ci map_is_active = irq->hw && irq->active; 103662306a36Sopenharmony_ci raw_spin_unlock_irqrestore(&irq->irq_lock, flags); 103762306a36Sopenharmony_ci vgic_put_irq(vcpu->kvm, irq); 103862306a36Sopenharmony_ci 103962306a36Sopenharmony_ci return map_is_active; 104062306a36Sopenharmony_ci} 104162306a36Sopenharmony_ci 104262306a36Sopenharmony_ci/* 104362306a36Sopenharmony_ci * Level-triggered mapped IRQs are special because we only observe rising 104462306a36Sopenharmony_ci * edges as input to the VGIC. 104562306a36Sopenharmony_ci * 104662306a36Sopenharmony_ci * If the guest never acked the interrupt we have to sample the physical 104762306a36Sopenharmony_ci * line and set the line level, because the device state could have changed 104862306a36Sopenharmony_ci * or we simply need to process the still pending interrupt later. 104962306a36Sopenharmony_ci * 105062306a36Sopenharmony_ci * We could also have entered the guest with the interrupt active+pending. 105162306a36Sopenharmony_ci * On the next exit, we need to re-evaluate the pending state, as it could 105262306a36Sopenharmony_ci * otherwise result in a spurious interrupt by injecting a now potentially 105362306a36Sopenharmony_ci * stale pending state. 105462306a36Sopenharmony_ci * 105562306a36Sopenharmony_ci * If this causes us to lower the level, we have to also clear the physical 105662306a36Sopenharmony_ci * active state, since we will otherwise never be told when the interrupt 105762306a36Sopenharmony_ci * becomes asserted again. 105862306a36Sopenharmony_ci * 105962306a36Sopenharmony_ci * Another case is when the interrupt requires a helping hand on 106062306a36Sopenharmony_ci * deactivation (no HW deactivation, for example). 106162306a36Sopenharmony_ci */ 106262306a36Sopenharmony_civoid vgic_irq_handle_resampling(struct vgic_irq *irq, 106362306a36Sopenharmony_ci bool lr_deactivated, bool lr_pending) 106462306a36Sopenharmony_ci{ 106562306a36Sopenharmony_ci if (vgic_irq_is_mapped_level(irq)) { 106662306a36Sopenharmony_ci bool resample = false; 106762306a36Sopenharmony_ci 106862306a36Sopenharmony_ci if (unlikely(vgic_irq_needs_resampling(irq))) { 106962306a36Sopenharmony_ci resample = !(irq->active || irq->pending_latch); 107062306a36Sopenharmony_ci } else if (lr_pending || (lr_deactivated && irq->line_level)) { 107162306a36Sopenharmony_ci irq->line_level = vgic_get_phys_line_level(irq); 107262306a36Sopenharmony_ci resample = !irq->line_level; 107362306a36Sopenharmony_ci } 107462306a36Sopenharmony_ci 107562306a36Sopenharmony_ci if (resample) 107662306a36Sopenharmony_ci vgic_irq_set_phys_active(irq, false); 107762306a36Sopenharmony_ci } 107862306a36Sopenharmony_ci} 1079