18c2ecf20Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0-only 28c2ecf20Sopenharmony_ci/* 38c2ecf20Sopenharmony_ci * Copyright (C) 2017 ARM Ltd. 48c2ecf20Sopenharmony_ci * Author: Marc Zyngier <marc.zyngier@arm.com> 58c2ecf20Sopenharmony_ci */ 68c2ecf20Sopenharmony_ci 78c2ecf20Sopenharmony_ci#include <linux/interrupt.h> 88c2ecf20Sopenharmony_ci#include <linux/irq.h> 98c2ecf20Sopenharmony_ci#include <linux/irqdomain.h> 108c2ecf20Sopenharmony_ci#include <linux/kvm_host.h> 118c2ecf20Sopenharmony_ci#include <linux/irqchip/arm-gic-v3.h> 128c2ecf20Sopenharmony_ci 138c2ecf20Sopenharmony_ci#include "vgic.h" 148c2ecf20Sopenharmony_ci 158c2ecf20Sopenharmony_ci/* 168c2ecf20Sopenharmony_ci * How KVM uses GICv4 (insert rude comments here): 178c2ecf20Sopenharmony_ci * 188c2ecf20Sopenharmony_ci * The vgic-v4 layer acts as a bridge between several entities: 198c2ecf20Sopenharmony_ci * - The GICv4 ITS representation offered by the ITS driver 208c2ecf20Sopenharmony_ci * - VFIO, which is in charge of the PCI endpoint 218c2ecf20Sopenharmony_ci * - The virtual ITS, which is the only thing the guest sees 228c2ecf20Sopenharmony_ci * 238c2ecf20Sopenharmony_ci * The configuration of VLPIs is triggered by a callback from VFIO, 248c2ecf20Sopenharmony_ci * instructing KVM that a PCI device has been configured to deliver 258c2ecf20Sopenharmony_ci * MSIs to a vITS. 268c2ecf20Sopenharmony_ci * 278c2ecf20Sopenharmony_ci * kvm_vgic_v4_set_forwarding() is thus called with the routing entry, 288c2ecf20Sopenharmony_ci * and this is used to find the corresponding vITS data structures 298c2ecf20Sopenharmony_ci * (ITS instance, device, event and irq) using a process that is 308c2ecf20Sopenharmony_ci * extremely similar to the injection of an MSI. 318c2ecf20Sopenharmony_ci * 328c2ecf20Sopenharmony_ci * At this stage, we can link the guest's view of an LPI (uniquely 338c2ecf20Sopenharmony_ci * identified by the routing entry) and the host irq, using the GICv4 348c2ecf20Sopenharmony_ci * driver mapping operation. Should the mapping succeed, we've then 358c2ecf20Sopenharmony_ci * successfully upgraded the guest's LPI to a VLPI. We can then start 368c2ecf20Sopenharmony_ci * with updating GICv4's view of the property table and generating an 378c2ecf20Sopenharmony_ci * INValidation in order to kickstart the delivery of this VLPI to the 388c2ecf20Sopenharmony_ci * guest directly, without software intervention. Well, almost. 398c2ecf20Sopenharmony_ci * 408c2ecf20Sopenharmony_ci * When the PCI endpoint is deconfigured, this operation is reversed 418c2ecf20Sopenharmony_ci * with VFIO calling kvm_vgic_v4_unset_forwarding(). 428c2ecf20Sopenharmony_ci * 438c2ecf20Sopenharmony_ci * Once the VLPI has been mapped, it needs to follow any change the 448c2ecf20Sopenharmony_ci * guest performs on its LPI through the vITS. For that, a number of 458c2ecf20Sopenharmony_ci * command handlers have hooks to communicate these changes to the HW: 468c2ecf20Sopenharmony_ci * - Any invalidation triggers a call to its_prop_update_vlpi() 478c2ecf20Sopenharmony_ci * - The INT command results in a irq_set_irqchip_state(), which 488c2ecf20Sopenharmony_ci * generates an INT on the corresponding VLPI. 498c2ecf20Sopenharmony_ci * - The CLEAR command results in a irq_set_irqchip_state(), which 508c2ecf20Sopenharmony_ci * generates an CLEAR on the corresponding VLPI. 518c2ecf20Sopenharmony_ci * - DISCARD translates into an unmap, similar to a call to 528c2ecf20Sopenharmony_ci * kvm_vgic_v4_unset_forwarding(). 538c2ecf20Sopenharmony_ci * - MOVI is translated by an update of the existing mapping, changing 548c2ecf20Sopenharmony_ci * the target vcpu, resulting in a VMOVI being generated. 558c2ecf20Sopenharmony_ci * - MOVALL is translated by a string of mapping updates (similar to 568c2ecf20Sopenharmony_ci * the handling of MOVI). MOVALL is horrible. 578c2ecf20Sopenharmony_ci * 588c2ecf20Sopenharmony_ci * Note that a DISCARD/MAPTI sequence emitted from the guest without 598c2ecf20Sopenharmony_ci * reprogramming the PCI endpoint after MAPTI does not result in a 608c2ecf20Sopenharmony_ci * VLPI being mapped, as there is no callback from VFIO (the guest 618c2ecf20Sopenharmony_ci * will get the interrupt via the normal SW injection). Fixing this is 628c2ecf20Sopenharmony_ci * not trivial, and requires some horrible messing with the VFIO 638c2ecf20Sopenharmony_ci * internals. Not fun. Don't do that. 648c2ecf20Sopenharmony_ci * 658c2ecf20Sopenharmony_ci * Then there is the scheduling. Each time a vcpu is about to run on a 668c2ecf20Sopenharmony_ci * physical CPU, KVM must tell the corresponding redistributor about 678c2ecf20Sopenharmony_ci * it. And if we've migrated our vcpu from one CPU to another, we must 688c2ecf20Sopenharmony_ci * tell the ITS (so that the messages reach the right redistributor). 698c2ecf20Sopenharmony_ci * This is done in two steps: first issue a irq_set_affinity() on the 708c2ecf20Sopenharmony_ci * irq corresponding to the vcpu, then call its_make_vpe_resident(). 718c2ecf20Sopenharmony_ci * You must be in a non-preemptible context. On exit, a call to 728c2ecf20Sopenharmony_ci * its_make_vpe_non_resident() tells the redistributor that we're done 738c2ecf20Sopenharmony_ci * with the vcpu. 748c2ecf20Sopenharmony_ci * 758c2ecf20Sopenharmony_ci * Finally, the doorbell handling: Each vcpu is allocated an interrupt 768c2ecf20Sopenharmony_ci * which will fire each time a VLPI is made pending whilst the vcpu is 778c2ecf20Sopenharmony_ci * not running. Each time the vcpu gets blocked, the doorbell 788c2ecf20Sopenharmony_ci * interrupt gets enabled. When the vcpu is unblocked (for whatever 798c2ecf20Sopenharmony_ci * reason), the doorbell interrupt is disabled. 808c2ecf20Sopenharmony_ci */ 818c2ecf20Sopenharmony_ci 828c2ecf20Sopenharmony_ci#define DB_IRQ_FLAGS (IRQ_NOAUTOEN | IRQ_DISABLE_UNLAZY | IRQ_NO_BALANCING) 838c2ecf20Sopenharmony_ci 848c2ecf20Sopenharmony_cistatic irqreturn_t vgic_v4_doorbell_handler(int irq, void *info) 858c2ecf20Sopenharmony_ci{ 868c2ecf20Sopenharmony_ci struct kvm_vcpu *vcpu = info; 878c2ecf20Sopenharmony_ci 888c2ecf20Sopenharmony_ci /* We got the message, no need to fire again */ 898c2ecf20Sopenharmony_ci if (!kvm_vgic_global_state.has_gicv4_1 && 908c2ecf20Sopenharmony_ci !irqd_irq_disabled(&irq_to_desc(irq)->irq_data)) 918c2ecf20Sopenharmony_ci disable_irq_nosync(irq); 928c2ecf20Sopenharmony_ci 938c2ecf20Sopenharmony_ci /* 948c2ecf20Sopenharmony_ci * The v4.1 doorbell can fire concurrently with the vPE being 958c2ecf20Sopenharmony_ci * made non-resident. Ensure we only update pending_last 968c2ecf20Sopenharmony_ci * *after* the non-residency sequence has completed. 978c2ecf20Sopenharmony_ci */ 988c2ecf20Sopenharmony_ci raw_spin_lock(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vpe_lock); 998c2ecf20Sopenharmony_ci vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last = true; 1008c2ecf20Sopenharmony_ci raw_spin_unlock(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vpe_lock); 1018c2ecf20Sopenharmony_ci 1028c2ecf20Sopenharmony_ci kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu); 1038c2ecf20Sopenharmony_ci kvm_vcpu_kick(vcpu); 1048c2ecf20Sopenharmony_ci 1058c2ecf20Sopenharmony_ci return IRQ_HANDLED; 1068c2ecf20Sopenharmony_ci} 1078c2ecf20Sopenharmony_ci 1088c2ecf20Sopenharmony_cistatic void vgic_v4_sync_sgi_config(struct its_vpe *vpe, struct vgic_irq *irq) 1098c2ecf20Sopenharmony_ci{ 1108c2ecf20Sopenharmony_ci vpe->sgi_config[irq->intid].enabled = irq->enabled; 1118c2ecf20Sopenharmony_ci vpe->sgi_config[irq->intid].group = irq->group; 1128c2ecf20Sopenharmony_ci vpe->sgi_config[irq->intid].priority = irq->priority; 1138c2ecf20Sopenharmony_ci} 1148c2ecf20Sopenharmony_ci 1158c2ecf20Sopenharmony_cistatic void vgic_v4_enable_vsgis(struct kvm_vcpu *vcpu) 1168c2ecf20Sopenharmony_ci{ 1178c2ecf20Sopenharmony_ci struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe; 1188c2ecf20Sopenharmony_ci int i; 1198c2ecf20Sopenharmony_ci 1208c2ecf20Sopenharmony_ci /* 1218c2ecf20Sopenharmony_ci * With GICv4.1, every virtual SGI can be directly injected. So 1228c2ecf20Sopenharmony_ci * let's pretend that they are HW interrupts, tied to a host 1238c2ecf20Sopenharmony_ci * IRQ. The SGI code will do its magic. 1248c2ecf20Sopenharmony_ci */ 1258c2ecf20Sopenharmony_ci for (i = 0; i < VGIC_NR_SGIS; i++) { 1268c2ecf20Sopenharmony_ci struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, i); 1278c2ecf20Sopenharmony_ci struct irq_desc *desc; 1288c2ecf20Sopenharmony_ci unsigned long flags; 1298c2ecf20Sopenharmony_ci int ret; 1308c2ecf20Sopenharmony_ci 1318c2ecf20Sopenharmony_ci raw_spin_lock_irqsave(&irq->irq_lock, flags); 1328c2ecf20Sopenharmony_ci 1338c2ecf20Sopenharmony_ci if (irq->hw) 1348c2ecf20Sopenharmony_ci goto unlock; 1358c2ecf20Sopenharmony_ci 1368c2ecf20Sopenharmony_ci irq->hw = true; 1378c2ecf20Sopenharmony_ci irq->host_irq = irq_find_mapping(vpe->sgi_domain, i); 1388c2ecf20Sopenharmony_ci 1398c2ecf20Sopenharmony_ci /* Transfer the full irq state to the vPE */ 1408c2ecf20Sopenharmony_ci vgic_v4_sync_sgi_config(vpe, irq); 1418c2ecf20Sopenharmony_ci desc = irq_to_desc(irq->host_irq); 1428c2ecf20Sopenharmony_ci ret = irq_domain_activate_irq(irq_desc_get_irq_data(desc), 1438c2ecf20Sopenharmony_ci false); 1448c2ecf20Sopenharmony_ci if (!WARN_ON(ret)) { 1458c2ecf20Sopenharmony_ci /* Transfer pending state */ 1468c2ecf20Sopenharmony_ci ret = irq_set_irqchip_state(irq->host_irq, 1478c2ecf20Sopenharmony_ci IRQCHIP_STATE_PENDING, 1488c2ecf20Sopenharmony_ci irq->pending_latch); 1498c2ecf20Sopenharmony_ci WARN_ON(ret); 1508c2ecf20Sopenharmony_ci irq->pending_latch = false; 1518c2ecf20Sopenharmony_ci } 1528c2ecf20Sopenharmony_ci unlock: 1538c2ecf20Sopenharmony_ci raw_spin_unlock_irqrestore(&irq->irq_lock, flags); 1548c2ecf20Sopenharmony_ci vgic_put_irq(vcpu->kvm, irq); 1558c2ecf20Sopenharmony_ci } 1568c2ecf20Sopenharmony_ci} 1578c2ecf20Sopenharmony_ci 1588c2ecf20Sopenharmony_cistatic void vgic_v4_disable_vsgis(struct kvm_vcpu *vcpu) 1598c2ecf20Sopenharmony_ci{ 1608c2ecf20Sopenharmony_ci int i; 1618c2ecf20Sopenharmony_ci 1628c2ecf20Sopenharmony_ci for (i = 0; i < VGIC_NR_SGIS; i++) { 1638c2ecf20Sopenharmony_ci struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, i); 1648c2ecf20Sopenharmony_ci struct irq_desc *desc; 1658c2ecf20Sopenharmony_ci unsigned long flags; 1668c2ecf20Sopenharmony_ci int ret; 1678c2ecf20Sopenharmony_ci 1688c2ecf20Sopenharmony_ci raw_spin_lock_irqsave(&irq->irq_lock, flags); 1698c2ecf20Sopenharmony_ci 1708c2ecf20Sopenharmony_ci if (!irq->hw) 1718c2ecf20Sopenharmony_ci goto unlock; 1728c2ecf20Sopenharmony_ci 1738c2ecf20Sopenharmony_ci irq->hw = false; 1748c2ecf20Sopenharmony_ci ret = irq_get_irqchip_state(irq->host_irq, 1758c2ecf20Sopenharmony_ci IRQCHIP_STATE_PENDING, 1768c2ecf20Sopenharmony_ci &irq->pending_latch); 1778c2ecf20Sopenharmony_ci WARN_ON(ret); 1788c2ecf20Sopenharmony_ci 1798c2ecf20Sopenharmony_ci desc = irq_to_desc(irq->host_irq); 1808c2ecf20Sopenharmony_ci irq_domain_deactivate_irq(irq_desc_get_irq_data(desc)); 1818c2ecf20Sopenharmony_ci unlock: 1828c2ecf20Sopenharmony_ci raw_spin_unlock_irqrestore(&irq->irq_lock, flags); 1838c2ecf20Sopenharmony_ci vgic_put_irq(vcpu->kvm, irq); 1848c2ecf20Sopenharmony_ci } 1858c2ecf20Sopenharmony_ci} 1868c2ecf20Sopenharmony_ci 1878c2ecf20Sopenharmony_ci/* Must be called with the kvm lock held */ 1888c2ecf20Sopenharmony_civoid vgic_v4_configure_vsgis(struct kvm *kvm) 1898c2ecf20Sopenharmony_ci{ 1908c2ecf20Sopenharmony_ci struct vgic_dist *dist = &kvm->arch.vgic; 1918c2ecf20Sopenharmony_ci struct kvm_vcpu *vcpu; 1928c2ecf20Sopenharmony_ci int i; 1938c2ecf20Sopenharmony_ci 1948c2ecf20Sopenharmony_ci kvm_arm_halt_guest(kvm); 1958c2ecf20Sopenharmony_ci 1968c2ecf20Sopenharmony_ci kvm_for_each_vcpu(i, vcpu, kvm) { 1978c2ecf20Sopenharmony_ci if (dist->nassgireq) 1988c2ecf20Sopenharmony_ci vgic_v4_enable_vsgis(vcpu); 1998c2ecf20Sopenharmony_ci else 2008c2ecf20Sopenharmony_ci vgic_v4_disable_vsgis(vcpu); 2018c2ecf20Sopenharmony_ci } 2028c2ecf20Sopenharmony_ci 2038c2ecf20Sopenharmony_ci kvm_arm_resume_guest(kvm); 2048c2ecf20Sopenharmony_ci} 2058c2ecf20Sopenharmony_ci 2068c2ecf20Sopenharmony_ci/** 2078c2ecf20Sopenharmony_ci * vgic_v4_init - Initialize the GICv4 data structures 2088c2ecf20Sopenharmony_ci * @kvm: Pointer to the VM being initialized 2098c2ecf20Sopenharmony_ci * 2108c2ecf20Sopenharmony_ci * We may be called each time a vITS is created, or when the 2118c2ecf20Sopenharmony_ci * vgic is initialized. This relies on kvm->lock to be 2128c2ecf20Sopenharmony_ci * held. In both cases, the number of vcpus should now be 2138c2ecf20Sopenharmony_ci * fixed. 2148c2ecf20Sopenharmony_ci */ 2158c2ecf20Sopenharmony_ciint vgic_v4_init(struct kvm *kvm) 2168c2ecf20Sopenharmony_ci{ 2178c2ecf20Sopenharmony_ci struct vgic_dist *dist = &kvm->arch.vgic; 2188c2ecf20Sopenharmony_ci struct kvm_vcpu *vcpu; 2198c2ecf20Sopenharmony_ci int i, nr_vcpus, ret; 2208c2ecf20Sopenharmony_ci 2218c2ecf20Sopenharmony_ci if (!kvm_vgic_global_state.has_gicv4) 2228c2ecf20Sopenharmony_ci return 0; /* Nothing to see here... move along. */ 2238c2ecf20Sopenharmony_ci 2248c2ecf20Sopenharmony_ci if (dist->its_vm.vpes) 2258c2ecf20Sopenharmony_ci return 0; 2268c2ecf20Sopenharmony_ci 2278c2ecf20Sopenharmony_ci nr_vcpus = atomic_read(&kvm->online_vcpus); 2288c2ecf20Sopenharmony_ci 2298c2ecf20Sopenharmony_ci dist->its_vm.vpes = kcalloc(nr_vcpus, sizeof(*dist->its_vm.vpes), 2308c2ecf20Sopenharmony_ci GFP_KERNEL); 2318c2ecf20Sopenharmony_ci if (!dist->its_vm.vpes) 2328c2ecf20Sopenharmony_ci return -ENOMEM; 2338c2ecf20Sopenharmony_ci 2348c2ecf20Sopenharmony_ci dist->its_vm.nr_vpes = nr_vcpus; 2358c2ecf20Sopenharmony_ci 2368c2ecf20Sopenharmony_ci kvm_for_each_vcpu(i, vcpu, kvm) 2378c2ecf20Sopenharmony_ci dist->its_vm.vpes[i] = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe; 2388c2ecf20Sopenharmony_ci 2398c2ecf20Sopenharmony_ci ret = its_alloc_vcpu_irqs(&dist->its_vm); 2408c2ecf20Sopenharmony_ci if (ret < 0) { 2418c2ecf20Sopenharmony_ci kvm_err("VPE IRQ allocation failure\n"); 2428c2ecf20Sopenharmony_ci kfree(dist->its_vm.vpes); 2438c2ecf20Sopenharmony_ci dist->its_vm.nr_vpes = 0; 2448c2ecf20Sopenharmony_ci dist->its_vm.vpes = NULL; 2458c2ecf20Sopenharmony_ci return ret; 2468c2ecf20Sopenharmony_ci } 2478c2ecf20Sopenharmony_ci 2488c2ecf20Sopenharmony_ci kvm_for_each_vcpu(i, vcpu, kvm) { 2498c2ecf20Sopenharmony_ci int irq = dist->its_vm.vpes[i]->irq; 2508c2ecf20Sopenharmony_ci unsigned long irq_flags = DB_IRQ_FLAGS; 2518c2ecf20Sopenharmony_ci 2528c2ecf20Sopenharmony_ci /* 2538c2ecf20Sopenharmony_ci * Don't automatically enable the doorbell, as we're 2548c2ecf20Sopenharmony_ci * flipping it back and forth when the vcpu gets 2558c2ecf20Sopenharmony_ci * blocked. Also disable the lazy disabling, as the 2568c2ecf20Sopenharmony_ci * doorbell could kick us out of the guest too 2578c2ecf20Sopenharmony_ci * early... 2588c2ecf20Sopenharmony_ci * 2598c2ecf20Sopenharmony_ci * On GICv4.1, the doorbell is managed in HW and must 2608c2ecf20Sopenharmony_ci * be left enabled. 2618c2ecf20Sopenharmony_ci */ 2628c2ecf20Sopenharmony_ci if (kvm_vgic_global_state.has_gicv4_1) 2638c2ecf20Sopenharmony_ci irq_flags &= ~IRQ_NOAUTOEN; 2648c2ecf20Sopenharmony_ci irq_set_status_flags(irq, irq_flags); 2658c2ecf20Sopenharmony_ci 2668c2ecf20Sopenharmony_ci ret = request_irq(irq, vgic_v4_doorbell_handler, 2678c2ecf20Sopenharmony_ci 0, "vcpu", vcpu); 2688c2ecf20Sopenharmony_ci if (ret) { 2698c2ecf20Sopenharmony_ci kvm_err("failed to allocate vcpu IRQ%d\n", irq); 2708c2ecf20Sopenharmony_ci /* 2718c2ecf20Sopenharmony_ci * Trick: adjust the number of vpes so we know 2728c2ecf20Sopenharmony_ci * how many to nuke on teardown... 2738c2ecf20Sopenharmony_ci */ 2748c2ecf20Sopenharmony_ci dist->its_vm.nr_vpes = i; 2758c2ecf20Sopenharmony_ci break; 2768c2ecf20Sopenharmony_ci } 2778c2ecf20Sopenharmony_ci } 2788c2ecf20Sopenharmony_ci 2798c2ecf20Sopenharmony_ci if (ret) 2808c2ecf20Sopenharmony_ci vgic_v4_teardown(kvm); 2818c2ecf20Sopenharmony_ci 2828c2ecf20Sopenharmony_ci return ret; 2838c2ecf20Sopenharmony_ci} 2848c2ecf20Sopenharmony_ci 2858c2ecf20Sopenharmony_ci/** 2868c2ecf20Sopenharmony_ci * vgic_v4_teardown - Free the GICv4 data structures 2878c2ecf20Sopenharmony_ci * @kvm: Pointer to the VM being destroyed 2888c2ecf20Sopenharmony_ci * 2898c2ecf20Sopenharmony_ci * Relies on kvm->lock to be held. 2908c2ecf20Sopenharmony_ci */ 2918c2ecf20Sopenharmony_civoid vgic_v4_teardown(struct kvm *kvm) 2928c2ecf20Sopenharmony_ci{ 2938c2ecf20Sopenharmony_ci struct its_vm *its_vm = &kvm->arch.vgic.its_vm; 2948c2ecf20Sopenharmony_ci int i; 2958c2ecf20Sopenharmony_ci 2968c2ecf20Sopenharmony_ci if (!its_vm->vpes) 2978c2ecf20Sopenharmony_ci return; 2988c2ecf20Sopenharmony_ci 2998c2ecf20Sopenharmony_ci for (i = 0; i < its_vm->nr_vpes; i++) { 3008c2ecf20Sopenharmony_ci struct kvm_vcpu *vcpu = kvm_get_vcpu(kvm, i); 3018c2ecf20Sopenharmony_ci int irq = its_vm->vpes[i]->irq; 3028c2ecf20Sopenharmony_ci 3038c2ecf20Sopenharmony_ci irq_clear_status_flags(irq, DB_IRQ_FLAGS); 3048c2ecf20Sopenharmony_ci free_irq(irq, vcpu); 3058c2ecf20Sopenharmony_ci } 3068c2ecf20Sopenharmony_ci 3078c2ecf20Sopenharmony_ci its_free_vcpu_irqs(its_vm); 3088c2ecf20Sopenharmony_ci kfree(its_vm->vpes); 3098c2ecf20Sopenharmony_ci its_vm->nr_vpes = 0; 3108c2ecf20Sopenharmony_ci its_vm->vpes = NULL; 3118c2ecf20Sopenharmony_ci} 3128c2ecf20Sopenharmony_ci 3138c2ecf20Sopenharmony_ciint vgic_v4_put(struct kvm_vcpu *vcpu, bool need_db) 3148c2ecf20Sopenharmony_ci{ 3158c2ecf20Sopenharmony_ci struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe; 3168c2ecf20Sopenharmony_ci 3178c2ecf20Sopenharmony_ci if (!vgic_supports_direct_msis(vcpu->kvm) || !vpe->resident) 3188c2ecf20Sopenharmony_ci return 0; 3198c2ecf20Sopenharmony_ci 3208c2ecf20Sopenharmony_ci return its_make_vpe_non_resident(vpe, need_db); 3218c2ecf20Sopenharmony_ci} 3228c2ecf20Sopenharmony_ci 3238c2ecf20Sopenharmony_ciint vgic_v4_load(struct kvm_vcpu *vcpu) 3248c2ecf20Sopenharmony_ci{ 3258c2ecf20Sopenharmony_ci struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe; 3268c2ecf20Sopenharmony_ci int err; 3278c2ecf20Sopenharmony_ci 3288c2ecf20Sopenharmony_ci if (!vgic_supports_direct_msis(vcpu->kvm) || vpe->resident) 3298c2ecf20Sopenharmony_ci return 0; 3308c2ecf20Sopenharmony_ci 3318c2ecf20Sopenharmony_ci /* 3328c2ecf20Sopenharmony_ci * Before making the VPE resident, make sure the redistributor 3338c2ecf20Sopenharmony_ci * corresponding to our current CPU expects us here. See the 3348c2ecf20Sopenharmony_ci * doc in drivers/irqchip/irq-gic-v4.c to understand how this 3358c2ecf20Sopenharmony_ci * turns into a VMOVP command at the ITS level. 3368c2ecf20Sopenharmony_ci */ 3378c2ecf20Sopenharmony_ci err = irq_set_affinity(vpe->irq, cpumask_of(smp_processor_id())); 3388c2ecf20Sopenharmony_ci if (err) 3398c2ecf20Sopenharmony_ci return err; 3408c2ecf20Sopenharmony_ci 3418c2ecf20Sopenharmony_ci err = its_make_vpe_resident(vpe, false, vcpu->kvm->arch.vgic.enabled); 3428c2ecf20Sopenharmony_ci if (err) 3438c2ecf20Sopenharmony_ci return err; 3448c2ecf20Sopenharmony_ci 3458c2ecf20Sopenharmony_ci /* 3468c2ecf20Sopenharmony_ci * Now that the VPE is resident, let's get rid of a potential 3478c2ecf20Sopenharmony_ci * doorbell interrupt that would still be pending. This is a 3488c2ecf20Sopenharmony_ci * GICv4.0 only "feature"... 3498c2ecf20Sopenharmony_ci */ 3508c2ecf20Sopenharmony_ci if (!kvm_vgic_global_state.has_gicv4_1) 3518c2ecf20Sopenharmony_ci err = irq_set_irqchip_state(vpe->irq, IRQCHIP_STATE_PENDING, false); 3528c2ecf20Sopenharmony_ci 3538c2ecf20Sopenharmony_ci return err; 3548c2ecf20Sopenharmony_ci} 3558c2ecf20Sopenharmony_ci 3568c2ecf20Sopenharmony_cistatic struct vgic_its *vgic_get_its(struct kvm *kvm, 3578c2ecf20Sopenharmony_ci struct kvm_kernel_irq_routing_entry *irq_entry) 3588c2ecf20Sopenharmony_ci{ 3598c2ecf20Sopenharmony_ci struct kvm_msi msi = (struct kvm_msi) { 3608c2ecf20Sopenharmony_ci .address_lo = irq_entry->msi.address_lo, 3618c2ecf20Sopenharmony_ci .address_hi = irq_entry->msi.address_hi, 3628c2ecf20Sopenharmony_ci .data = irq_entry->msi.data, 3638c2ecf20Sopenharmony_ci .flags = irq_entry->msi.flags, 3648c2ecf20Sopenharmony_ci .devid = irq_entry->msi.devid, 3658c2ecf20Sopenharmony_ci }; 3668c2ecf20Sopenharmony_ci 3678c2ecf20Sopenharmony_ci return vgic_msi_to_its(kvm, &msi); 3688c2ecf20Sopenharmony_ci} 3698c2ecf20Sopenharmony_ci 3708c2ecf20Sopenharmony_ciint kvm_vgic_v4_set_forwarding(struct kvm *kvm, int virq, 3718c2ecf20Sopenharmony_ci struct kvm_kernel_irq_routing_entry *irq_entry) 3728c2ecf20Sopenharmony_ci{ 3738c2ecf20Sopenharmony_ci struct vgic_its *its; 3748c2ecf20Sopenharmony_ci struct vgic_irq *irq; 3758c2ecf20Sopenharmony_ci struct its_vlpi_map map; 3768c2ecf20Sopenharmony_ci int ret; 3778c2ecf20Sopenharmony_ci 3788c2ecf20Sopenharmony_ci if (!vgic_supports_direct_msis(kvm)) 3798c2ecf20Sopenharmony_ci return 0; 3808c2ecf20Sopenharmony_ci 3818c2ecf20Sopenharmony_ci /* 3828c2ecf20Sopenharmony_ci * Get the ITS, and escape early on error (not a valid 3838c2ecf20Sopenharmony_ci * doorbell for any of our vITSs). 3848c2ecf20Sopenharmony_ci */ 3858c2ecf20Sopenharmony_ci its = vgic_get_its(kvm, irq_entry); 3868c2ecf20Sopenharmony_ci if (IS_ERR(its)) 3878c2ecf20Sopenharmony_ci return 0; 3888c2ecf20Sopenharmony_ci 3898c2ecf20Sopenharmony_ci mutex_lock(&its->its_lock); 3908c2ecf20Sopenharmony_ci 3918c2ecf20Sopenharmony_ci /* Perform the actual DevID/EventID -> LPI translation. */ 3928c2ecf20Sopenharmony_ci ret = vgic_its_resolve_lpi(kvm, its, irq_entry->msi.devid, 3938c2ecf20Sopenharmony_ci irq_entry->msi.data, &irq); 3948c2ecf20Sopenharmony_ci if (ret) 3958c2ecf20Sopenharmony_ci goto out; 3968c2ecf20Sopenharmony_ci 3978c2ecf20Sopenharmony_ci /* 3988c2ecf20Sopenharmony_ci * Emit the mapping request. If it fails, the ITS probably 3998c2ecf20Sopenharmony_ci * isn't v4 compatible, so let's silently bail out. Holding 4008c2ecf20Sopenharmony_ci * the ITS lock should ensure that nothing can modify the 4018c2ecf20Sopenharmony_ci * target vcpu. 4028c2ecf20Sopenharmony_ci */ 4038c2ecf20Sopenharmony_ci map = (struct its_vlpi_map) { 4048c2ecf20Sopenharmony_ci .vm = &kvm->arch.vgic.its_vm, 4058c2ecf20Sopenharmony_ci .vpe = &irq->target_vcpu->arch.vgic_cpu.vgic_v3.its_vpe, 4068c2ecf20Sopenharmony_ci .vintid = irq->intid, 4078c2ecf20Sopenharmony_ci .properties = ((irq->priority & 0xfc) | 4088c2ecf20Sopenharmony_ci (irq->enabled ? LPI_PROP_ENABLED : 0) | 4098c2ecf20Sopenharmony_ci LPI_PROP_GROUP1), 4108c2ecf20Sopenharmony_ci .db_enabled = true, 4118c2ecf20Sopenharmony_ci }; 4128c2ecf20Sopenharmony_ci 4138c2ecf20Sopenharmony_ci ret = its_map_vlpi(virq, &map); 4148c2ecf20Sopenharmony_ci if (ret) 4158c2ecf20Sopenharmony_ci goto out; 4168c2ecf20Sopenharmony_ci 4178c2ecf20Sopenharmony_ci irq->hw = true; 4188c2ecf20Sopenharmony_ci irq->host_irq = virq; 4198c2ecf20Sopenharmony_ci atomic_inc(&map.vpe->vlpi_count); 4208c2ecf20Sopenharmony_ci 4218c2ecf20Sopenharmony_ciout: 4228c2ecf20Sopenharmony_ci mutex_unlock(&its->its_lock); 4238c2ecf20Sopenharmony_ci return ret; 4248c2ecf20Sopenharmony_ci} 4258c2ecf20Sopenharmony_ci 4268c2ecf20Sopenharmony_ciint kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int virq, 4278c2ecf20Sopenharmony_ci struct kvm_kernel_irq_routing_entry *irq_entry) 4288c2ecf20Sopenharmony_ci{ 4298c2ecf20Sopenharmony_ci struct vgic_its *its; 4308c2ecf20Sopenharmony_ci struct vgic_irq *irq; 4318c2ecf20Sopenharmony_ci int ret; 4328c2ecf20Sopenharmony_ci 4338c2ecf20Sopenharmony_ci if (!vgic_supports_direct_msis(kvm)) 4348c2ecf20Sopenharmony_ci return 0; 4358c2ecf20Sopenharmony_ci 4368c2ecf20Sopenharmony_ci /* 4378c2ecf20Sopenharmony_ci * Get the ITS, and escape early on error (not a valid 4388c2ecf20Sopenharmony_ci * doorbell for any of our vITSs). 4398c2ecf20Sopenharmony_ci */ 4408c2ecf20Sopenharmony_ci its = vgic_get_its(kvm, irq_entry); 4418c2ecf20Sopenharmony_ci if (IS_ERR(its)) 4428c2ecf20Sopenharmony_ci return 0; 4438c2ecf20Sopenharmony_ci 4448c2ecf20Sopenharmony_ci mutex_lock(&its->its_lock); 4458c2ecf20Sopenharmony_ci 4468c2ecf20Sopenharmony_ci ret = vgic_its_resolve_lpi(kvm, its, irq_entry->msi.devid, 4478c2ecf20Sopenharmony_ci irq_entry->msi.data, &irq); 4488c2ecf20Sopenharmony_ci if (ret) 4498c2ecf20Sopenharmony_ci goto out; 4508c2ecf20Sopenharmony_ci 4518c2ecf20Sopenharmony_ci WARN_ON(!(irq->hw && irq->host_irq == virq)); 4528c2ecf20Sopenharmony_ci if (irq->hw) { 4538c2ecf20Sopenharmony_ci atomic_dec(&irq->target_vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count); 4548c2ecf20Sopenharmony_ci irq->hw = false; 4558c2ecf20Sopenharmony_ci ret = its_unmap_vlpi(virq); 4568c2ecf20Sopenharmony_ci } 4578c2ecf20Sopenharmony_ci 4588c2ecf20Sopenharmony_ciout: 4598c2ecf20Sopenharmony_ci mutex_unlock(&its->its_lock); 4608c2ecf20Sopenharmony_ci return ret; 4618c2ecf20Sopenharmony_ci} 462