Lines Matching defs:kvm

18 #include <linux/kvm.h>
22 #include <trace/events/kvm.h>
41 #include <kvm/arm_hypercalls.h>
42 #include <kvm/arm_pmu.h>
43 #include <kvm/arm_psci.h>
79 int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
90 kvm->arch.return_nisv_io_abort_to_user = true;
105 static void set_default_csv2(struct kvm *kvm)
116 kvm->arch.pfr0_csv2 = 1;
121 * @kvm: pointer to the KVM struct
123 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
127 ret = kvm_arm_setup_stage2(kvm, type);
131 ret = kvm_init_stage2_mmu(kvm, &kvm->arch.mmu);
135 ret = create_hyp_mappings(kvm, kvm + 1, PAGE_HYP);
139 kvm_vgic_early_init(kvm);
142 kvm->arch.max_vcpus = kvm_arm_default_max_vcpus();
144 set_default_csv2(kvm);
148 kvm_free_stage2_pgd(&kvm->arch.mmu);
160 * @kvm: pointer to the KVM struct
162 void kvm_arch_destroy_vm(struct kvm *kvm)
166 bitmap_free(kvm->arch.pmu_filter);
168 kvm_vgic_destroy(kvm);
171 if (kvm->vcpus[i]) {
172 kvm_vcpu_destroy(kvm->vcpus[i]);
173 kvm->vcpus[i] = NULL;
176 atomic_set(&kvm->online_vcpus, 0);
179 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
211 if (kvm)
212 r = kvm->arch.max_vcpus;
217 if (!kvm)
220 r = kvm->arch.vgic.msis_require_devid;
233 r = kvm_arch_vm_ioctl_check_extension(kvm, ext);
245 struct kvm *kvm_arch_alloc_vm(void)
248 return kzalloc(sizeof(struct kvm), GFP_KERNEL);
250 return vzalloc(sizeof(struct kvm));
253 void kvm_arch_free_vm(struct kvm *kvm)
256 kfree(kvm);
258 vfree(kvm);
261 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
263 if (irqchip_in_kernel(kvm) && vgic_initialized(kvm))
266 if (id >= kvm->arch.max_vcpus)
291 vcpu->arch.hw_mmu = &vcpu->kvm->arch.mmu;
306 if (vcpu->arch.has_run_once && unlikely(!irqchip_in_kernel(vcpu->kvm)))
483 smp_rmb(); /* Orders read of kvm_vmid_gen and kvm->arch.vmid */
539 struct kvm *kvm = vcpu->kvm;
552 if (likely(irqchip_in_kernel(kvm))) {
557 if (unlikely(!vgic_ready(kvm))) {
558 ret = kvm_vgic_map_resources(kvm);
579 bool kvm_arch_intc_initialized(struct kvm *kvm)
581 return vgic_initialized(kvm);
584 void kvm_arm_halt_guest(struct kvm *kvm)
589 kvm_for_each_vcpu(i, vcpu, kvm)
591 kvm_make_all_cpus_request(kvm, KVM_REQ_SLEEP);
594 void kvm_arm_resume_guest(struct kvm *kvm)
599 kvm_for_each_vcpu(i, vcpu, kvm) {
747 * Documentation/virt/kvm/vcpu-requests.rst
857 if (unlikely(!irqchip_in_kernel(vcpu->kvm))) {
902 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
907 int nrcpus = atomic_read(&kvm->online_vcpus);
920 if (irqchip_in_kernel(kvm))
926 vcpu = kvm_get_vcpu(kvm, vcpu_idx);
935 if (!irqchip_in_kernel(kvm))
941 vcpu = kvm_get_vcpu(kvm, vcpu_idx);
948 return kvm_vgic_inject_irq(kvm, vcpu->vcpu_id, irq_num, level, NULL);
950 if (!irqchip_in_kernel(kvm))
956 return kvm_vgic_inject_irq(kvm, 0, irq_num, level, NULL);
1029 stage2_unmap_vm(vcpu->kvm);
1244 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
1249 void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
1252 kvm_flush_remote_tlbs(kvm);
1255 static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm,
1269 return kvm_vgic_addr(kvm, type, &dev_addr->addr, true);
1278 struct kvm *kvm = filp->private_data;
1286 mutex_lock(&kvm->lock);
1287 ret = kvm_vgic_create(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
1288 mutex_unlock(&kvm->lock);
1296 return kvm_vm_ioctl_set_device_addr(kvm, &dev_addr);
1699 struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr)
1705 kvm_for_each_vcpu(i, vcpu, kvm) {
1723 return kvm_vgic_v4_set_forwarding(irqfd->kvm, prod->irq,
1732 kvm_vgic_v4_unset_forwarding(irqfd->kvm, prod->irq,
1741 kvm_arm_halt_guest(irqfd->kvm);
1749 kvm_arm_resume_guest(irqfd->kvm);