Lines Matching defs:kvm
5 * derived from drivers/kvm/kvm_main.c
39 #include <linux/kvm.h>
62 #include <linux/entry-kvm.h>
67 #include <trace/events/kvm.h>
143 #include <asm/kvm-x86-ops.h>
502 kvm_recalculate_apic_map(vcpu->kvm);
902 kvm_mmu_free_roots(vcpu->kvm, mmu, KVM_MMU_ROOT_CURRENT);
965 kvm_arch_has_noncoherent_dma(vcpu->kvm) &&
966 !kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
967 kvm_zap_gfn_range(vcpu->kvm, 0, ~0ULL);
1263 kvm_mmu_free_roots(vcpu->kvm, mmu, roots_to_free);
1798 struct kvm *kvm = vcpu->kvm;
1807 idx = srcu_read_lock(&kvm->srcu);
1809 msr_filter = srcu_dereference(kvm->arch.msr_filter, &kvm->srcu);
1831 srcu_read_unlock(&kvm->srcu, idx);
2041 if (!(vcpu->kvm->arch.user_space_msr_mask & msr_reason))
2127 if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MWAIT_NEVER_UD_FAULTS) &&
2314 static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock, int sec_hi_ofs)
2325 r = kvm_read_guest(kvm, wall_clock, &version, sizeof(version));
2334 if (kvm_write_guest(kvm, wall_clock, &version, sizeof(version)))
2342 wall_nsec = ktime_get_real_ns() - get_kvmclock_ns(kvm);
2348 kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc));
2352 kvm_write_guest(kvm, wall_clock + sec_hi_ofs,
2357 kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
2363 struct kvm_arch *ka = &vcpu->kvm->arch;
2525 struct kvm_arch *ka = &vcpu->kvm->arch;
2529 atomic_read(&vcpu->kvm->online_vcpus));
2544 atomic_read(&vcpu->kvm->online_vcpus),
2676 struct kvm *kvm = vcpu->kvm;
2678 lockdep_assert_held(&kvm->arch.tsc_write_lock);
2684 kvm->arch.last_tsc_nsec = ns;
2685 kvm->arch.last_tsc_write = tsc;
2686 kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz;
2687 kvm->arch.last_tsc_offset = offset;
2701 * These values are tracked in kvm->arch.cur_xxx variables.
2703 kvm->arch.cur_tsc_generation++;
2704 kvm->arch.cur_tsc_nsec = ns;
2705 kvm->arch.cur_tsc_write = tsc;
2706 kvm->arch.cur_tsc_offset = offset;
2707 kvm->arch.nr_vcpus_matched_tsc = 0;
2708 } else if (vcpu->arch.this_tsc_generation != kvm->arch.cur_tsc_generation) {
2709 kvm->arch.nr_vcpus_matched_tsc++;
2713 vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation;
2714 vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec;
2715 vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write;
2722 struct kvm *kvm = vcpu->kvm;
2728 raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
2731 elapsed = ns - kvm->arch.last_tsc_nsec;
2742 u64 tsc_exp = kvm->arch.last_tsc_write +
2762 vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) {
2764 offset = kvm->arch.cur_tsc_offset;
2774 raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
2954 static void pvclock_update_vm_gtod_copy(struct kvm *kvm)
2957 struct kvm_arch *ka = &kvm->arch;
2961 lockdep_assert_held(&kvm->arch.tsc_write_lock);
2963 atomic_read(&kvm->online_vcpus));
2986 static void kvm_make_mclock_inprogress_request(struct kvm *kvm)
2988 kvm_make_all_cpus_request(kvm, KVM_REQ_MCLOCK_INPROGRESS);
2991 static void __kvm_start_pvclock_update(struct kvm *kvm)
2993 raw_spin_lock_irq(&kvm->arch.tsc_write_lock);
2994 write_seqcount_begin(&kvm->arch.pvclock_sc);
2997 static void kvm_start_pvclock_update(struct kvm *kvm)
2999 kvm_make_mclock_inprogress_request(kvm);
3002 __kvm_start_pvclock_update(kvm);
3005 static void kvm_end_pvclock_update(struct kvm *kvm)
3007 struct kvm_arch *ka = &kvm->arch;
3013 kvm_for_each_vcpu(i, vcpu, kvm)
3017 kvm_for_each_vcpu(i, vcpu, kvm)
3021 static void kvm_update_masterclock(struct kvm *kvm)
3023 kvm_hv_request_tsc_page_update(kvm);
3024 kvm_start_pvclock_update(kvm);
3025 pvclock_update_vm_gtod_copy(kvm);
3026 kvm_end_pvclock_update(kvm);
3045 /* Called within read_seqcount_begin/retry for kvm->pvclock_sc. */
3046 static void __get_kvmclock(struct kvm *kvm, struct kvm_clock_data *data)
3048 struct kvm_arch *ka = &kvm->arch;
3081 static void get_kvmclock(struct kvm *kvm, struct kvm_clock_data *data)
3083 struct kvm_arch *ka = &kvm->arch;
3088 __get_kvmclock(kvm, data);
3092 u64 get_kvmclock_ns(struct kvm *kvm)
3096 get_kvmclock(kvm, &data);
3143 mark_page_dirty_in_slot(v->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT);
3154 struct kvm_arch *ka = &v->kvm->arch;
3226 vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset;
3243 kvm_hv_setup_tsc_page(v->kvm, &vcpu->hv_clock);
3269 struct kvm *kvm = container_of(ka, struct kvm, arch);
3272 kvm_for_each_vcpu(i, vcpu, kvm) {
3280 struct kvm *kvm = v->kvm;
3283 schedule_delayed_work(&kvm->arch.kvmclock_update_work,
3294 struct kvm *kvm = container_of(ka, struct kvm, arch);
3299 schedule_delayed_work(&kvm->arch.kvmclock_update_work, 0);
3300 schedule_delayed_work(&kvm->arch.kvmclock_sync_work,
3432 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa,
3531 if (kvm_xen_msr_enabled(vcpu->kvm)) {
3539 if (WARN_ON_ONCE(current->mm != vcpu->kvm->mm))
3542 slots = kvm_memslots(vcpu->kvm);
3550 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, gpa, sizeof(*st)) ||
3617 mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa));
3637 if (msr && msr == vcpu->kvm->arch.xen_hvm_config.msr)
3763 if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT) &&
3814 vcpu->kvm->arch.wall_clock = data;
3815 kvm_write_wall_clock(vcpu->kvm, data, 0);
3821 vcpu->kvm->arch.wall_clock = data;
3822 kvm_write_wall_clock(vcpu->kvm, data, 0);
4195 msr_info->data = vcpu->kvm->arch.wall_clock;
4201 msr_info->data = vcpu->kvm->arch.wall_clock;
4316 !vcpu->kvm->arch.guest_can_read_msr_platform_info)
4449 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
4574 r = static_call(kvm_x86_has_emulated_msr)(kvm, MSR_IA32_SMBASE);
4806 return kvm_arch_has_noncoherent_dma(vcpu->kvm);
4852 if (!vcpu->kvm->arch.use_master_clock || vcpu->cpu == -1)
4890 if (unlikely(current->mm != vcpu->kvm->mm))
4893 slots = kvm_memslots(vcpu->kvm);
4906 mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa));
4921 idx = srcu_read_lock(&vcpu->kvm->srcu);
4922 if (kvm_xen_msr_enabled(vcpu->kvm))
4926 srcu_read_unlock(&vcpu->kvm->srcu, idx);
4991 if (!irqchip_in_kernel(vcpu->kvm)) {
5001 if (pic_in_kernel(vcpu->kvm))
5181 if (!vcpu->kvm->arch.exception_payload_enabled &&
5201 if (!vcpu->kvm->arch.exception_payload_enabled)
5232 if (vcpu->kvm->arch.exception_payload_enabled)
5234 if (vcpu->kvm->arch.triple_fault_event) {
5252 if (!vcpu->kvm->arch.exception_payload_enabled)
5346 if (!vcpu->kvm->arch.triple_fault_event)
5531 struct kvm *kvm = vcpu->kvm;
5547 raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
5550 kvm->arch.last_tsc_khz == vcpu->arch.virtual_tsc_khz &&
5551 kvm->arch.last_tsc_offset == offset);
5557 raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
5614 if (!irqchip_in_kernel(vcpu->kvm))
5755 int idx = srcu_read_lock(&vcpu->kvm->srcu);
5757 srcu_read_unlock(&vcpu->kvm->srcu, idx);
5761 int idx = srcu_read_lock(&vcpu->kvm->srcu);
5763 srcu_read_unlock(&vcpu->kvm->srcu, idx);
5791 idx = srcu_read_lock(&vcpu->kvm->srcu);
5793 srcu_read_unlock(&vcpu->kvm->srcu, idx);
6024 idx = srcu_read_lock(&vcpu->kvm->srcu);
6026 srcu_read_unlock(&vcpu->kvm->srcu, idx);
6096 static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr)
6102 ret = static_call(kvm_x86_set_tss_addr)(kvm, addr);
6106 static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm,
6109 return static_call(kvm_x86_set_identity_map_addr)(kvm, ident_addr);
6112 static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
6118 mutex_lock(&kvm->slots_lock);
6120 kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
6121 kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
6123 mutex_unlock(&kvm->slots_lock);
6127 static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
6129 struct kvm_pic *pic = kvm->arch.vpic;
6143 kvm_get_ioapic(kvm, &chip->chip.ioapic);
6152 static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
6154 struct kvm_pic *pic = kvm->arch.vpic;
6172 kvm_set_ioapic(kvm, &chip->chip.ioapic);
6182 static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
6184 struct kvm_kpit_state *kps = &kvm->arch.vpit->pit_state;
6194 static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
6197 struct kvm_pit *pit = kvm->arch.vpit;
6207 static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
6209 mutex_lock(&kvm->arch.vpit->pit_state.lock);
6210 memcpy(ps->channels, &kvm->arch.vpit->pit_state.channels,
6212 ps->flags = kvm->arch.vpit->pit_state.flags;
6213 mutex_unlock(&kvm->arch.vpit->pit_state.lock);
6218 static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
6223 struct kvm_pit *pit = kvm->arch.vpit;
6240 static int kvm_vm_ioctl_reinject(struct kvm *kvm,
6243 struct kvm_pit *pit = kvm->arch.vpit;
6256 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
6268 kvm_for_each_vcpu(i, vcpu, kvm)
6272 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
6275 if (!irqchip_in_kernel(kvm))
6278 irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
6284 int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
6299 kvm->arch.disabled_quirks = cap->args[0];
6303 mutex_lock(&kvm->lock);
6308 if (irqchip_in_kernel(kvm))
6310 if (kvm->created_vcpus)
6312 r = kvm_setup_empty_irq_routing(kvm);
6317 kvm->arch.irqchip_mode = KVM_IRQCHIP_SPLIT;
6318 kvm->arch.nr_reserved_ioapic_pins = cap->args[0];
6319 kvm_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_ABSENT);
6322 mutex_unlock(&kvm->lock);
6331 kvm->arch.x2apic_format = true;
6333 kvm->arch.x2apic_broadcast_quirk_disabled = true;
6343 kvm->arch.pause_in_guest = true;
6355 kvm->arch.mwait_in_guest = true;
6357 kvm->arch.hlt_in_guest = true;
6359 kvm->arch.cstate_in_guest = true;
6365 kvm->arch.guest_can_read_msr_platform_info = cap->args[0];
6369 kvm->arch.exception_payload_enabled = cap->args[0];
6373 kvm->arch.triple_fault_event = cap->args[0];
6380 kvm->arch.user_space_msr_mask = cap->args[0];
6394 kvm->arch.bus_lock_detection_enabled = true;
6408 kvm->arch.sgx_provisioning_allowed = true;
6419 r = static_call(kvm_x86_vm_copy_enc_context_from)(kvm, cap->args[0]);
6426 r = static_call(kvm_x86_vm_move_enc_context_from)(kvm, cap->args[0]);
6433 kvm->arch.hypercall_exit_enabled = cap->args[0];
6440 kvm->arch.exit_on_emulation_error = cap->args[0];
6448 mutex_lock(&kvm->lock);
6449 if (!kvm->created_vcpus) {
6450 kvm->arch.enable_pmu = !(cap->args[0] & KVM_PMU_CAP_DISABLE);
6453 mutex_unlock(&kvm->lock);
6460 mutex_lock(&kvm->lock);
6461 if (kvm->arch.max_vcpu_ids == cap->args[0]) {
6463 } else if (!kvm->arch.max_vcpu_ids) {
6464 kvm->arch.max_vcpu_ids = cap->args[0];
6467 mutex_unlock(&kvm->lock);
6477 mutex_lock(&kvm->lock);
6478 if (!kvm->created_vcpus) {
6479 kvm->arch.notify_window = cap->args[0] >> 32;
6480 kvm->arch.notify_vmexit_flags = (u32)cap->args[0];
6483 mutex_unlock(&kvm->lock);
6495 * /dev/kvm into a container does not limit the scope of the
6507 mutex_lock(&kvm->lock);
6508 if (!kvm->created_vcpus) {
6509 kvm->arch.disable_nx_huge_pages = true;
6512 mutex_unlock(&kvm->lock);
6580 static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm,
6611 mutex_lock(&kvm->lock);
6612 old_filter = rcu_replace_pointer(kvm->arch.msr_filter, new_filter,
6613 mutex_is_locked(&kvm->lock));
6614 mutex_unlock(&kvm->lock);
6615 synchronize_srcu(&kvm->srcu);
6619 kvm_make_all_cpus_request(kvm, KVM_REQ_MSR_FILTER_CHANGED);
6644 struct kvm *kvm = filp->private_data;
6671 r = kvm_vm_ioctl_set_msr_filter(kvm, &filter);
6681 static int kvm_arch_suspend_notifier(struct kvm *kvm)
6687 mutex_lock(&kvm->lock);
6688 kvm_for_each_vcpu(i, vcpu, kvm) {
6699 mutex_unlock(&kvm->lock);
6704 int kvm_arch_pm_notifier(struct kvm *kvm, unsigned long state)
6709 return kvm_arch_suspend_notifier(kvm);
6716 static int kvm_vm_ioctl_get_clock(struct kvm *kvm, void __user *argp)
6720 get_kvmclock(kvm, &data);
6727 static int kvm_vm_ioctl_set_clock(struct kvm *kvm, void __user *argp)
6729 struct kvm_arch *ka = &kvm->arch;
6743 kvm_hv_request_tsc_page_update(kvm);
6744 kvm_start_pvclock_update(kvm);
6745 pvclock_update_vm_gtod_copy(kvm);
6769 kvm_end_pvclock_update(kvm);
6775 struct kvm *kvm = filp->private_data;
6791 r = kvm_vm_ioctl_set_tss_addr(kvm, arg);
6796 mutex_lock(&kvm->lock);
6798 if (kvm->created_vcpus)
6803 r = kvm_vm_ioctl_set_identity_map_addr(kvm, ident_addr);
6805 mutex_unlock(&kvm->lock);
6809 r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg);
6812 mutex_lock(&kvm->lock);
6815 if (irqchip_in_kernel(kvm))
6819 if (kvm->created_vcpus)
6822 r = kvm_pic_init(kvm);
6826 r = kvm_ioapic_init(kvm);
6828 kvm_pic_destroy(kvm);
6832 r = kvm_setup_default_irq_routing(kvm);
6834 kvm_ioapic_destroy(kvm);
6835 kvm_pic_destroy(kvm);
6838 /* Write kvm->irq_routing before enabling irqchip_in_kernel. */
6840 kvm->arch.irqchip_mode = KVM_IRQCHIP_KERNEL;
6841 kvm_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_ABSENT);
6843 mutex_unlock(&kvm->lock);
6855 mutex_lock(&kvm->lock);
6857 if (kvm->arch.vpit)
6860 kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags);
6861 if (kvm->arch.vpit)
6864 mutex_unlock(&kvm->lock);
6877 if (!irqchip_kernel(kvm))
6879 r = kvm_vm_ioctl_get_irqchip(kvm, chip);
6901 if (!irqchip_kernel(kvm))
6903 r = kvm_vm_ioctl_set_irqchip(kvm, chip);
6913 if (!kvm->arch.vpit)
6915 r = kvm_vm_ioctl_get_pit(kvm, &u.ps);
6928 mutex_lock(&kvm->lock);
6930 if (!kvm->arch.vpit)
6932 r = kvm_vm_ioctl_set_pit(kvm, &u.ps);
6934 mutex_unlock(&kvm->lock);
6939 if (!kvm->arch.vpit)
6941 r = kvm_vm_ioctl_get_pit2(kvm, &u.ps2);
6954 mutex_lock(&kvm->lock);
6956 if (!kvm->arch.vpit)
6958 r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2);
6960 mutex_unlock(&kvm->lock);
6969 if (!kvm->arch.vpit)
6971 r = kvm_vm_ioctl_reinject(kvm, &control);
6976 mutex_lock(&kvm->lock);
6977 if (kvm->created_vcpus)
6980 kvm->arch.bsp_vcpu_id = arg;
6981 mutex_unlock(&kvm->lock);
6989 r = kvm_xen_hvm_config(kvm, &xhc);
6998 r = kvm_xen_hvm_get_attr(kvm, &xha);
7009 r = kvm_xen_hvm_set_attr(kvm, &xha);
7018 r = kvm_xen_hvm_evtchn_send(kvm, &uxe);
7023 r = kvm_vm_ioctl_set_clock(kvm, argp);
7026 r = kvm_vm_ioctl_get_clock(kvm, argp);
7041 WRITE_ONCE(kvm->arch.default_tsc_khz, user_tsc_khz);
7047 r = READ_ONCE(kvm->arch.default_tsc_khz);
7055 r = static_call(kvm_x86_mem_enc_ioctl)(kvm, argp);
7069 r = static_call(kvm_x86_mem_enc_register_region)(kvm, ®ion);
7083 r = static_call(kvm_x86_mem_enc_unregister_region)(kvm, ®ion);
7092 r = kvm_vm_ioctl_hv_eventfd(kvm, &hvevfd);
7096 r = kvm_vm_ioctl_set_pmu_event_filter(kvm, argp);
7105 r = kvm_vm_ioctl_set_msr_filter(kvm, &filter);
7506 char sig[5]; /* ud2; .ascii "kvm" */
8322 struct kvm *kvm = emul_to_vcpu(ctxt)->kvm;
8324 if (!kvm->vm_bugged)
8325 kvm_vm_bugged(kvm);
8545 struct kvm *kvm = vcpu->kvm;
8555 if (kvm->arch.exit_on_emulation_error ||
8605 pfn = gfn_to_pfn(vcpu->kvm, gpa_to_gfn(gpa));
8620 write_lock(&vcpu->kvm->mmu_lock);
8621 indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages;
8622 write_unlock(&vcpu->kvm->mmu_lock);
8625 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
8635 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
8688 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
9109 kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_OUT_7E_INC_RIP)) {
9199 struct kvm *kvm;
9203 list_for_each_entry(kvm, &vm_list, vm_list)
9204 kvm_make_mclock_inprogress_request(kvm);
9216 list_for_each_entry(kvm, &vm_list, vm_list) {
9217 __kvm_start_pvclock_update(kvm);
9218 pvclock_update_vm_gtod_copy(kvm);
9219 kvm_end_pvclock_update(kvm);
9228 struct kvm *kvm;
9275 list_for_each_entry(kvm, &vm_list, vm_list) {
9276 kvm_for_each_vcpu(i, vcpu, kvm) {
9351 cpuhp_setup_state(CPUHP_AP_X86_KVM_CLK_ONLINE, "x86/kvm/clk:online",
9359 struct kvm *kvm;
9364 list_for_each_entry(kvm, &vm_list, vm_list)
9365 kvm_for_each_vcpu(i, vcpu, kvm)
9424 #include <asm/kvm-x86-ops.h>
9710 if (kvm_write_guest(vcpu->kvm, paddr, &clock_pairing,
9723 static void kvm_pv_kick_cpu_op(struct kvm *kvm, int apicid)
9736 kvm_irq_delivery_to_apic(kvm, NULL, &lapic_irq, NULL);
9739 bool kvm_apicv_activated(struct kvm *kvm)
9741 return (READ_ONCE(kvm->arch.apicv_inhibit_reasons) == 0);
9747 ulong vm_reasons = READ_ONCE(vcpu->kvm->arch.apicv_inhibit_reasons);
9765 static void kvm_apicv_init(struct kvm *kvm)
9767 unsigned long *inhibits = &kvm->arch.apicv_inhibit_reasons;
9769 init_rwsem(&kvm->arch.apicv_update_lock);
9789 map = rcu_dereference(vcpu->kvm->arch.apic_map);
9828 if (kvm_xen_hypercall_enabled(vcpu->kvm))
9866 kvm_pv_kick_cpu_op(vcpu->kvm, a1);
9879 ret = kvm_pv_send_ipi(vcpu->kvm, a0, a1, a2, a3, op_64_bit);
9892 if (!(vcpu->kvm->arch.hypercall_exit_enabled & (1 << KVM_HC_MAP_GPA_RANGE)))
9938 if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_FIX_HYPERCALL_INSN)) {
9954 likely(!pic_in_kernel(vcpu->kvm));
9957 /* Called within kvm->srcu read side. */
9967 pic_in_kernel(vcpu->kvm) ||
10318 void kvm_make_scan_ioapic_request_mask(struct kvm *kvm,
10321 kvm_make_vcpus_request_mask(kvm, KVM_REQ_SCAN_IOAPIC, vcpu_bitmap);
10324 void kvm_make_scan_ioapic_request(struct kvm *kvm)
10326 kvm_make_all_cpus_request(kvm, KVM_REQ_SCAN_IOAPIC);
10337 down_read(&vcpu->kvm->arch.apicv_update_lock);
10362 up_read(&vcpu->kvm->arch.apicv_update_lock);
10389 void __kvm_set_or_clear_apicv_inhibit(struct kvm *kvm,
10394 lockdep_assert_held_write(&kvm->arch.apicv_update_lock);
10399 old = new = kvm->arch.apicv_inhibit_reasons;
10416 kvm_make_all_cpus_request(kvm, KVM_REQ_APICV_UPDATE);
10417 kvm->arch.apicv_inhibit_reasons = new;
10420 int idx = srcu_read_lock(&kvm->srcu);
10422 kvm_zap_gfn_range(kvm, gfn, gfn+1);
10423 srcu_read_unlock(&kvm->srcu, idx);
10426 kvm->arch.apicv_inhibit_reasons = new;
10430 void kvm_set_or_clear_apicv_inhibit(struct kvm *kvm,
10436 down_write(&kvm->arch.apicv_update_lock);
10437 __kvm_set_or_clear_apicv_inhibit(kvm, reason, set);
10438 up_write(&kvm->arch.apicv_update_lock);
10449 if (irqchip_split(vcpu->kvm))
10453 if (ioapic_in_kernel(vcpu->kvm))
10482 void kvm_arch_guest_memory_reclaimed(struct kvm *kvm)
10484 static_call_cond(kvm_x86_guest_memory_reclaimed)(kvm);
10502 * Called within kvm->srcu read side.
10539 kvm_update_masterclock(vcpu->kvm);
10892 /* Called within kvm->srcu read side. */
10966 /* Called within kvm->srcu read side. */
11547 idx = srcu_read_lock(&vcpu->kvm->srcu);
11552 srcu_read_unlock(&vcpu->kvm->srcu, idx);
11643 static void kvm_arch_vcpu_guestdbg_update_apicv_inhibit(struct kvm *kvm)
11652 down_write(&kvm->arch.apicv_update_lock);
11654 kvm_for_each_vcpu(i, vcpu, kvm) {
11660 __kvm_set_or_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_BLOCKIRQ, set);
11661 up_write(&kvm->arch.apicv_update_lock);
11716 kvm_arch_vcpu_guestdbg_update_apicv_inhibit(vcpu->kvm);
11737 idx = srcu_read_lock(&vcpu->kvm->srcu);
11739 srcu_read_unlock(&vcpu->kvm->srcu, idx);
11839 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
11841 if (kvm_check_tsc_unstable() && kvm->created_vcpus)
11845 if (!kvm->arch.max_vcpu_ids)
11846 kvm->arch.max_vcpu_ids = KVM_MAX_VCPU_IDS;
11848 if (id >= kvm->arch.max_vcpu_ids)
11851 return static_call(kvm_x86_vcpu_precreate)(kvm);
11863 kvm_gpc_init(&vcpu->arch.pv_time, vcpu->kvm, vcpu, KVM_HOST_USES_PFN);
11865 if (!irqchip_in_kernel(vcpu->kvm) || kvm_vcpu_is_reset_bsp(vcpu))
11874 if (irqchip_in_kernel(vcpu->kvm)) {
11949 kvm_set_tsc_khz(vcpu, vcpu->kvm->arch.default_tsc_khz);
11974 struct kvm *kvm = vcpu->kvm;
11988 schedule_delayed_work(&kvm->arch.kvmclock_sync_work,
12010 idx = srcu_read_lock(&vcpu->kvm->srcu);
12012 srcu_read_unlock(&vcpu->kvm->srcu, idx);
12184 struct kvm *kvm;
12204 list_for_each_entry(kvm, &vm_list, vm_list) {
12205 kvm_for_each_vcpu(i, vcpu, kvm) {
12256 list_for_each_entry(kvm, &vm_list, vm_list) {
12257 kvm->arch.backwards_tsc_observed = true;
12258 kvm_for_each_vcpu(i, vcpu, kvm) {
12270 kvm->arch.last_tsc_nsec = 0;
12271 kvm->arch.last_tsc_write = 0;
12286 return vcpu->kvm->arch.bsp_vcpu_id == vcpu->vcpu_id;
12309 void kvm_arch_free_vm(struct kvm *kvm)
12311 kfree(to_kvm_hv(kvm)->hv_pa_pg);
12312 __kvm_arch_free_vm(kvm);
12316 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
12324 ret = kvm_page_track_init(kvm);
12328 kvm_mmu_init_vm(kvm);
12330 ret = static_call(kvm_x86_vm_init)(kvm);
12334 INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list);
12335 INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
12336 atomic_set(&kvm->arch.noncoherent_dma_count, 0);
12339 set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
12342 &kvm->arch.irq_sources_bitmap);
12344 raw_spin_lock_init(&kvm->arch.tsc_write_lock);
12345 mutex_init(&kvm->arch.apic_map_lock);
12346 seqcount_raw_spinlock_init(&kvm->arch.pvclock_sc, &kvm->arch.tsc_write_lock);
12347 kvm->arch.kvmclock_offset = -get_kvmclock_base_ns();
12349 raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
12350 pvclock_update_vm_gtod_copy(kvm);
12351 raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
12353 kvm->arch.default_tsc_khz = max_tsc_khz ? : tsc_khz;
12354 kvm->arch.guest_can_read_msr_platform_info = true;
12355 kvm->arch.enable_pmu = enable_pmu;
12358 spin_lock_init(&kvm->arch.hv_root_tdp_lock);
12359 kvm->arch.hv_root_tdp = INVALID_PAGE;
12362 INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn);
12363 INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn);
12365 kvm_apicv_init(kvm);
12366 kvm_hv_init_vm(kvm);
12367 kvm_xen_init_vm(kvm);
12372 kvm_mmu_uninit_vm(kvm);
12373 kvm_page_track_cleanup(kvm);
12378 int kvm_arch_post_init_vm(struct kvm *kvm)
12380 return kvm_mmu_post_init_vm(kvm);
12390 static void kvm_unload_vcpu_mmus(struct kvm *kvm)
12395 kvm_for_each_vcpu(i, vcpu, kvm) {
12401 void kvm_arch_sync_events(struct kvm *kvm)
12403 cancel_delayed_work_sync(&kvm->arch.kvmclock_sync_work);
12404 cancel_delayed_work_sync(&kvm->arch.kvmclock_update_work);
12405 kvm_free_pit(kvm);
12411 * @kvm: the kvm pointer to the VM.
12430 void __user * __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa,
12435 struct kvm_memslots *slots = kvm_memslots(kvm);
12438 /* Called with kvm->slots_lock held. */
12471 r = __kvm_set_memory_region(kvm, &m);
12483 void kvm_arch_pre_destroy_vm(struct kvm *kvm)
12485 kvm_mmu_pre_destroy_vm(kvm);
12488 void kvm_arch_destroy_vm(struct kvm *kvm)
12490 if (current->mm == kvm->mm) {
12496 mutex_lock(&kvm->slots_lock);
12497 __x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
12499 __x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT,
12501 __x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, 0, 0);
12502 mutex_unlock(&kvm->slots_lock);
12504 kvm_unload_vcpu_mmus(kvm);
12505 static_call_cond(kvm_x86_vm_destroy)(kvm);
12506 kvm_free_msr_filter(srcu_dereference_check(kvm->arch.msr_filter, &kvm->srcu, 1));
12507 kvm_pic_destroy(kvm);
12508 kvm_ioapic_destroy(kvm);
12509 kvm_destroy_vcpus(kvm);
12510 kvfree(rcu_dereference_check(kvm->arch.apic_map, 1));
12511 kfree(srcu_dereference_check(kvm->arch.pmu_event_filter, &kvm->srcu, 1));
12512 kvm_mmu_uninit_vm(kvm);
12513 kvm_page_track_cleanup(kvm);
12514 kvm_xen_destroy_vm(kvm);
12515 kvm_hv_destroy_vm(kvm);
12528 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
12564 static int kvm_alloc_memslot_metadata(struct kvm *kvm,
12577 if (kvm_memslots_have_rmaps(kvm)) {
12614 if (kvm_page_track_create_memslot(kvm, slot, npages))
12629 void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
12638 kvm_mmu_invalidate_mmio_sptes(kvm, gen);
12641 kvm_for_each_vcpu(i, vcpu, kvm)
12645 int kvm_arch_prepare_memory_region(struct kvm *kvm,
12654 if (change == KVM_MR_MOVE && kvm_page_track_has_external_user(kvm))
12661 return kvm_alloc_memslot_metadata(kvm, new);
12673 static void kvm_mmu_update_cpu_dirty_logging(struct kvm *kvm, bool enable)
12680 nr_slots = atomic_read(&kvm->nr_memslots_dirty_logging);
12682 kvm_make_all_cpus_request(kvm, KVM_REQ_UPDATE_CPU_DIRTY_LOGGING);
12685 static void kvm_mmu_slot_apply_flags(struct kvm *kvm,
12699 kvm_mmu_update_cpu_dirty_logging(kvm, log_dirty_pages);
12741 kvm_mmu_zap_collapsible_sptes(kvm, new);
12747 if (kvm_dirty_log_manual_protect_and_init_set(kvm))
12751 kvm_mmu_slot_try_split_huge_pages(kvm, new, PG_LEVEL_4K);
12754 kvm_mmu_slot_leaf_clear_dirty(kvm, new);
12755 kvm_mmu_slot_remove_write_access(kvm, new, PG_LEVEL_2M);
12757 kvm_mmu_slot_remove_write_access(kvm, new, PG_LEVEL_4K);
12802 kvm_flush_remote_tlbs_memslot(kvm, new);
12806 void kvm_arch_commit_memory_region(struct kvm *kvm,
12812 kvm_page_track_delete_slot(kvm, old);
12814 if (!kvm->arch.n_requested_mmu_pages &&
12818 nr_mmu_pages = kvm->nr_memslot_pages / KVM_MEMSLOT_PAGES_TO_MMU_PAGES_RATIO;
12820 kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
12823 kvm_mmu_slot_apply_flags(kvm, old, new, change);
12827 kvm_arch_free_memslot(kvm, old);
13053 return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &reason,
13061 return kvm_write_guest_offset_cached(vcpu->kvm, &vcpu->arch.apf.data,
13070 if (kvm_read_guest_offset_cached(vcpu->kvm, &vcpu->arch.apf.data,
13110 if (kvm_hlt_in_guest(vcpu->kvm) && !kvm_can_deliver_async_pf(vcpu))
13192 void kvm_arch_start_assignment(struct kvm *kvm)
13194 if (atomic_inc_return(&kvm->arch.assigned_device_count) == 1)
13195 static_call_cond(kvm_x86_pi_start_assignment)(kvm);
13199 void kvm_arch_end_assignment(struct kvm *kvm)
13201 atomic_dec(&kvm->arch.assigned_device_count);
13205 bool noinstr kvm_arch_has_assigned_device(struct kvm *kvm)
13207 return raw_atomic_read(&kvm->arch.assigned_device_count);
13211 void kvm_arch_register_noncoherent_dma(struct kvm *kvm)
13213 atomic_inc(&kvm->arch.noncoherent_dma_count);
13217 void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm)
13219 atomic_dec(&kvm->arch.noncoherent_dma_count);
13223 bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
13225 return atomic_read(&kvm->arch.noncoherent_dma_count);
13242 kvm_arch_start_assignment(irqfd->kvm);
13243 ret = static_call(kvm_x86_pi_update_irte)(irqfd->kvm,
13247 kvm_arch_end_assignment(irqfd->kvm);
13268 ret = static_call(kvm_x86_pi_update_irte)(irqfd->kvm, prod->irq, irqfd->gsi, 0);
13273 kvm_arch_end_assignment(irqfd->kvm);
13276 int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq,
13279 return static_call(kvm_x86_pi_update_irte)(kvm, host_irq, guest_irq, set);
13363 if (KVM_BUG_ON(!e, vcpu->kvm))