Lines Matching refs:vmx

50 #include <asm/vmx.h>
64 #include "vmx.h"
375 static __always_inline void vmx_disable_fb_clear(struct vcpu_vmx *vmx)
379 if (!vmx->disable_fb_clear)
386 vmx->msr_ia32_mcu_opt_ctrl = msr;
389 static __always_inline void vmx_enable_fb_clear(struct vcpu_vmx *vmx)
391 if (!vmx->disable_fb_clear)
394 vmx->msr_ia32_mcu_opt_ctrl &= ~FB_CLEAR_DIS;
395 native_wrmsrl(MSR_IA32_MCU_OPT_CTRL, vmx->msr_ia32_mcu_opt_ctrl);
398 static void vmx_update_fb_clear_dis(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
400 vmx->disable_fb_clear = vmx_fb_clear_ctrl_available;
413 vmx->disable_fb_clear = false;
508 static inline void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
510 vmx->segment_cache.bitmask = 0;
731 static inline int __vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr)
735 for (i = 0; i < vmx->nr_uret_msrs; ++i)
736 if (vmx_uret_msrs_list[vmx->guest_uret_msrs[i].slot] == msr)
741 struct vmx_uret_msr *vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr)
745 i = __vmx_find_uret_msr(vmx, msr);
747 return &vmx->guest_uret_msrs[i];
751 static int vmx_set_guest_uret_msr(struct vcpu_vmx *vmx,
758 if (msr - vmx->guest_uret_msrs < vmx->nr_active_uret_msrs) {
818 static bool vmx_segment_cache_test_set(struct vcpu_vmx *vmx, unsigned seg,
824 if (!kvm_register_is_available(&vmx->vcpu, VCPU_EXREG_SEGMENTS)) {
825 kvm_register_mark_available(&vmx->vcpu, VCPU_EXREG_SEGMENTS);
826 vmx->segment_cache.bitmask = 0;
828 ret = vmx->segment_cache.bitmask & mask;
829 vmx->segment_cache.bitmask |= mask;
833 static u16 vmx_read_guest_seg_selector(struct vcpu_vmx *vmx, unsigned seg)
835 u16 *p = &vmx->segment_cache.seg[seg].selector;
837 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_SEL))
842 static ulong vmx_read_guest_seg_base(struct vcpu_vmx *vmx, unsigned seg)
844 ulong *p = &vmx->segment_cache.seg[seg].base;
846 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_BASE))
851 static u32 vmx_read_guest_seg_limit(struct vcpu_vmx *vmx, unsigned seg)
853 u32 *p = &vmx->segment_cache.seg[seg].limit;
855 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_LIMIT))
860 static u32 vmx_read_guest_seg_ar(struct vcpu_vmx *vmx, unsigned seg)
862 u32 *p = &vmx->segment_cache.seg[seg].ar;
864 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_AR))
918 static bool msr_write_intercepted(struct vcpu_vmx *vmx, u32 msr)
923 if (!(exec_controls_get(vmx) & CPU_BASED_USE_MSR_BITMAPS))
926 msr_bitmap = vmx->loaded_vmcs->msr_bitmap;
938 unsigned int __vmx_vcpu_run_flags(struct vcpu_vmx *vmx)
942 if (vmx->loaded_vmcs->launched)
948 * it after vmexit and store it in vmx->spec_ctrl.
950 if (unlikely(!msr_write_intercepted(vmx, MSR_IA32_SPEC_CTRL)))
956 static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx,
959 vm_entry_controls_clearbit(vmx, entry);
960 vm_exit_controls_clearbit(vmx, exit);
974 static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
977 struct msr_autoload *m = &vmx->msr_autoload;
982 clear_atomic_switch_msr_special(vmx,
990 clear_atomic_switch_msr_special(vmx,
1014 static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx,
1022 vm_entry_controls_setbit(vmx, entry);
1023 vm_exit_controls_setbit(vmx, exit);
1026 static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
1030 struct msr_autoload *m = &vmx->msr_autoload;
1035 add_atomic_switch_msr_special(vmx,
1046 add_atomic_switch_msr_special(vmx,
1092 static bool update_transition_efer(struct vcpu_vmx *vmx)
1094 u64 guest_efer = vmx->vcpu.arch.efer;
1119 (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX))) {
1123 add_atomic_switch_msr(vmx, MSR_EFER,
1126 clear_atomic_switch_msr(vmx, MSR_EFER);
1130 i = __vmx_find_uret_msr(vmx, MSR_EFER);
1134 clear_atomic_switch_msr(vmx, MSR_EFER);
1139 vmx->guest_uret_msrs[i].data = guest_efer;
1140 vmx->guest_uret_msrs[i].mask = ~ignore_bits;
1174 static inline bool pt_can_write_msr(struct vcpu_vmx *vmx)
1177 !(vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN);
1214 static void pt_guest_enter(struct vcpu_vmx *vmx)
1223 rdmsrl(MSR_IA32_RTIT_CTL, vmx->pt_desc.host.ctl);
1224 if (vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) {
1226 pt_save_msr(&vmx->pt_desc.host, vmx->pt_desc.addr_range);
1227 pt_load_msr(&vmx->pt_desc.guest, vmx->pt_desc.addr_range);
1231 static void pt_guest_exit(struct vcpu_vmx *vmx)
1236 if (vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) {
1237 pt_save_msr(&vmx->pt_desc.guest, vmx->pt_desc.addr_range);
1238 pt_load_msr(&vmx->pt_desc.host, vmx->pt_desc.addr_range);
1242 wrmsrl(MSR_IA32_RTIT_CTL, vmx->pt_desc.host.ctl);
1274 struct vcpu_vmx *vmx = to_vmx(vcpu);
1283 vmx->req_immediate_exit = false;
1290 if (!vmx->guest_uret_msrs_loaded) {
1291 vmx->guest_uret_msrs_loaded = true;
1292 for (i = 0; i < vmx->nr_active_uret_msrs; ++i)
1293 kvm_set_user_return_msr(vmx->guest_uret_msrs[i].slot,
1294 vmx->guest_uret_msrs[i].data,
1295 vmx->guest_uret_msrs[i].mask);
1299 if (vmx->nested.need_vmcs12_to_shadow_sync)
1302 if (vmx->guest_state_loaded)
1305 host_state = &vmx->loaded_vmcs->host_state;
1323 vmx->msr_host_kernel_gs_base = current->thread.gsbase;
1328 vmx->msr_host_kernel_gs_base = read_msr(MSR_KERNEL_GS_BASE);
1331 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
1340 vmx->guest_state_loaded = true;
1343 static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx)
1347 if (!vmx->guest_state_loaded)
1350 host_state = &vmx->loaded_vmcs->host_state;
1352 ++vmx->vcpu.stat.host_state_reload;
1355 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
1375 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
1378 vmx->guest_state_loaded = false;
1379 vmx->guest_uret_msrs_loaded = false;
1383 static u64 vmx_read_guest_kernel_gs_base(struct vcpu_vmx *vmx)
1386 if (vmx->guest_state_loaded)
1387 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
1389 return vmx->msr_guest_kernel_gs_base;
1392 static void vmx_write_guest_kernel_gs_base(struct vcpu_vmx *vmx, u64 data)
1395 if (vmx->guest_state_loaded)
1398 vmx->msr_guest_kernel_gs_base = data;
1405 struct vcpu_vmx *vmx = to_vmx(vcpu);
1406 bool already_loaded = vmx->loaded_vmcs->cpu == cpu;
1410 loaded_vmcs_clear(vmx->loaded_vmcs);
1421 list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link,
1427 if (prev != vmx->loaded_vmcs->vmcs) {
1428 per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs;
1429 vmcs_load(vmx->loaded_vmcs->vmcs);
1463 vmx->loaded_vmcs->cpu = cpu;
1468 vmx->current_tsc_ratio != vcpu->arch.tsc_scaling_ratio)
1469 decache_tsc_multiplier(vmx);
1478 struct vcpu_vmx *vmx = to_vmx(vcpu);
1484 vmx->host_debugctlmsr = get_debugctlmsr();
1501 struct vcpu_vmx *vmx = to_vmx(vcpu);
1507 if (vmx->rmode.vm86_active) {
1509 save_rflags = vmx->rmode.save_rflags;
1512 vmx->rflags = rflags;
1514 return vmx->rflags;
1519 struct vcpu_vmx *vmx = to_vmx(vcpu);
1529 vmx->rflags = rflags;
1535 vmx->rflags = rflags;
1536 if (vmx->rmode.vm86_active) {
1537 vmx->rmode.save_rflags = rflags;
1542 if ((old_rflags ^ vmx->rflags) & X86_EFLAGS_VM)
1543 vmx->emulation_required = emulation_required(vcpu);
1577 struct vcpu_vmx *vmx = to_vmx(vcpu);
1584 if (data & vmx->pt_desc.ctl_bitmask)
1591 if ((vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) &&
1592 ((vmx->pt_desc.guest.ctl ^ data) & ~RTIT_CTL_TRACEEN))
1602 !intel_pt_validate_cap(vmx->pt_desc.caps,
1610 value = intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_mtc_periods);
1611 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_mtc) &&
1615 value = intel_pt_validate_cap(vmx->pt_desc.caps,
1617 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_cyc) &&
1621 value = intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_periods);
1622 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_cyc) &&
1632 if ((value && (vmx->pt_desc.addr_range < 1)) || (value > 2))
1635 if ((value && (vmx->pt_desc.addr_range < 2)) || (value > 2))
1638 if ((value && (vmx->pt_desc.addr_range < 3)) || (value > 2))
1641 if ((value && (vmx->pt_desc.addr_range < 4)) || (value > 2))
1696 struct vcpu_vmx *vmx = to_vmx(vcpu);
1711 vmx->nested.mtf_pending = true;
1713 vmx->nested.mtf_pending = false;
1737 struct vcpu_vmx *vmx = to_vmx(vcpu);
1760 if (vmx->rmode.vm86_active) {
1768 WARN_ON_ONCE(vmx->emulation_required);
1772 vmx->vcpu.arch.event_exit_inst_len);
1782 static void vmx_setup_uret_msr(struct vcpu_vmx *vmx, unsigned int msr)
1787 from = __vmx_find_uret_msr(vmx, msr);
1790 to = vmx->nr_active_uret_msrs++;
1792 tmp = vmx->guest_uret_msrs[to];
1793 vmx->guest_uret_msrs[to] = vmx->guest_uret_msrs[from];
1794 vmx->guest_uret_msrs[from] = tmp;
1802 static void setup_msrs(struct vcpu_vmx *vmx)
1804 vmx->guest_uret_msrs_loaded = false;
1805 vmx->nr_active_uret_msrs = 0;
1811 if (is_long_mode(&vmx->vcpu) && (vmx->vcpu.arch.efer & EFER_SCE)) {
1812 vmx_setup_uret_msr(vmx, MSR_STAR);
1813 vmx_setup_uret_msr(vmx, MSR_LSTAR);
1814 vmx_setup_uret_msr(vmx, MSR_SYSCALL_MASK);
1817 if (update_transition_efer(vmx))
1818 vmx_setup_uret_msr(vmx, MSR_EFER);
1820 if (guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDTSCP))
1821 vmx_setup_uret_msr(vmx, MSR_TSC_AUX);
1823 vmx_setup_uret_msr(vmx, MSR_IA32_TSX_CTRL);
1826 vmx_update_msr_bitmap(&vmx->vcpu);
1892 struct vcpu_vmx *vmx = to_vmx(vcpu);
1905 msr_info->data = vmx_read_guest_kernel_gs_base(vmx);
1916 if (!msr_info->host_initiated && !vmx_has_waitpkg(vmx))
1919 msr_info->data = vmx->msr_ia32_umwait_control;
1946 !(vmx->msr_ia32_feature_control &
1952 msr_info->data = vmx->msr_ia32_feature_control;
1957 if (vmx_get_vmx_msr(&vmx->nested.msrs, msr_info->index,
1968 vmx->nested.enlightened_vmcs_enabled)
1975 msr_info->data = vmx->pt_desc.guest.ctl;
1980 msr_info->data = vmx->pt_desc.guest.status;
1984 !intel_pt_validate_cap(vmx->pt_desc.caps,
1987 msr_info->data = vmx->pt_desc.guest.cr3_match;
1991 (!intel_pt_validate_cap(vmx->pt_desc.caps,
1993 !intel_pt_validate_cap(vmx->pt_desc.caps,
1996 msr_info->data = vmx->pt_desc.guest.output_base;
2000 (!intel_pt_validate_cap(vmx->pt_desc.caps,
2002 !intel_pt_validate_cap(vmx->pt_desc.caps,
2005 msr_info->data = vmx->pt_desc.guest.output_mask;
2010 (index >= 2 * intel_pt_validate_cap(vmx->pt_desc.caps,
2014 msr_info->data = vmx->pt_desc.guest.addr_b[index / 2];
2016 msr_info->data = vmx->pt_desc.guest.addr_a[index / 2];
2025 msr = vmx_find_uret_msr(vmx, msr_info->index);
2053 struct vcpu_vmx *vmx = to_vmx(vcpu);
2066 vmx_segment_cache_clear(vmx);
2070 vmx_segment_cache_clear(vmx);
2074 vmx_write_guest_kernel_gs_base(vmx, data);
2115 if (!msr_info->host_initiated && !vmx_has_waitpkg(vmx))
2122 vmx->msr_ia32_umwait_control = data;
2132 vmx->spec_ctrl = data;
2217 vmx->msr_ia32_feature_control = data;
2230 vmx->nested.vmxon)
2233 vmx->pt_desc.guest.ctl = data;
2237 if (!pt_can_write_msr(vmx))
2241 vmx->pt_desc.guest.status = data;
2244 if (!pt_can_write_msr(vmx))
2246 if (!intel_pt_validate_cap(vmx->pt_desc.caps,
2249 vmx->pt_desc.guest.cr3_match = data;
2252 if (!pt_can_write_msr(vmx))
2254 if (!intel_pt_validate_cap(vmx->pt_desc.caps,
2256 !intel_pt_validate_cap(vmx->pt_desc.caps,
2261 vmx->pt_desc.guest.output_base = data;
2264 if (!pt_can_write_msr(vmx))
2266 if (!intel_pt_validate_cap(vmx->pt_desc.caps,
2268 !intel_pt_validate_cap(vmx->pt_desc.caps,
2271 vmx->pt_desc.guest.output_mask = data;
2274 if (!pt_can_write_msr(vmx))
2277 if (index >= 2 * intel_pt_validate_cap(vmx->pt_desc.caps,
2283 vmx->pt_desc.guest.addr_b[index / 2] = data;
2285 vmx->pt_desc.guest.addr_a[index / 2] = data;
2298 msr = vmx_find_uret_msr(vmx, msr_index);
2300 ret = vmx_set_guest_uret_msr(vmx, msr, data);
2307 vmx_update_fb_clear_dis(vcpu, vmx);
2808 struct vcpu_vmx *vmx = to_vmx(vcpu);
2814 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES);
2815 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS);
2816 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS);
2817 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS);
2818 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS);
2819 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS);
2821 vmx->rmode.vm86_active = 0;
2823 vmx_set_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR);
2827 flags |= vmx->rmode.save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
2835 fix_pmode_seg(vcpu, VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]);
2836 fix_pmode_seg(vcpu, VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]);
2837 fix_pmode_seg(vcpu, VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]);
2838 fix_pmode_seg(vcpu, VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]);
2839 fix_pmode_seg(vcpu, VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]);
2840 fix_pmode_seg(vcpu, VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]);
2879 struct vcpu_vmx *vmx = to_vmx(vcpu);
2882 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR);
2883 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES);
2884 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS);
2885 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS);
2886 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS);
2887 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS);
2888 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS);
2890 vmx->rmode.vm86_active = 1;
2900 vmx_segment_cache_clear(vmx);
2907 vmx->rmode.save_rflags = flags;
2915 fix_rmode_seg(VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]);
2916 fix_rmode_seg(VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]);
2917 fix_rmode_seg(VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]);
2918 fix_rmode_seg(VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]);
2919 fix_rmode_seg(VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]);
2920 fix_rmode_seg(VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]);
2927 struct vcpu_vmx *vmx = to_vmx(vcpu);
2928 struct vmx_uret_msr *msr = vmx_find_uret_msr(vmx, MSR_EFER);
2943 setup_msrs(vmx);
2976 struct vcpu_vmx *vmx = to_vmx(vcpu);
2991 vpid_sync_vcpu_single(vmx->vpid);
2992 vpid_sync_vcpu_single(vmx->nested.vpid02);
3076 struct vcpu_vmx *vmx = to_vmx(vcpu);
3088 if (vmx->rmode.vm86_active && (cr0 & X86_CR0_PE))
3091 if (!vmx->rmode.vm86_active && !(cr0 & X86_CR0_PE))
3130 exec_controls_setbit(vmx, CR3_EXITING_BITS);
3132 exec_controls_clearbit(vmx, CR3_EXITING_BITS);
3134 tmp = exec_controls_get(vmx);
3137 exec_controls_set(vmx, tmp);
3152 vmx->emulation_required = emulation_required(vcpu);
3230 struct vcpu_vmx *vmx = to_vmx(vcpu);
3241 else if (vmx->rmode.vm86_active)
3248 secondary_exec_controls_setbit(vmx, SECONDARY_EXEC_DESC);
3252 secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_DESC);
3290 struct vcpu_vmx *vmx = to_vmx(vcpu);
3293 if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) {
3294 *var = vmx->rmode.segs[seg];
3296 || var->selector == vmx_read_guest_seg_selector(vmx, seg))
3298 var->base = vmx_read_guest_seg_base(vmx, seg);
3299 var->selector = vmx_read_guest_seg_selector(vmx, seg);
3302 var->base = vmx_read_guest_seg_base(vmx, seg);
3303 var->limit = vmx_read_guest_seg_limit(vmx, seg);
3304 var->selector = vmx_read_guest_seg_selector(vmx, seg);
3305 ar = vmx_read_guest_seg_ar(vmx, seg);
3337 struct vcpu_vmx *vmx = to_vmx(vcpu);
3339 if (unlikely(vmx->rmode.vm86_active))
3342 int ar = vmx_read_guest_seg_ar(vmx, VCPU_SREG_SS);
3366 struct vcpu_vmx *vmx = to_vmx(vcpu);
3369 vmx_segment_cache_clear(vmx);
3371 if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) {
3372 vmx->rmode.segs[seg] = *var;
3376 fix_rmode_seg(seg, &vmx->rmode.segs[seg]);
3401 vmx->emulation_required = emulation_required(vcpu);
3811 static void vmx_msr_bitmap_l01_changed(struct vcpu_vmx *vmx)
3819 struct hv_enlightened_vmcs *evmcs = (void *)vmx->vmcs01.vmcs;
3830 struct vcpu_vmx *vmx = to_vmx(vcpu);
3831 unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap;
3836 vmx_msr_bitmap_l01_changed(vmx);
3847 clear_bit(idx, vmx->shadow_msr_intercept.read);
3849 clear_bit(idx, vmx->shadow_msr_intercept.write);
3875 struct vcpu_vmx *vmx = to_vmx(vcpu);
3876 unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap;
3881 vmx_msr_bitmap_l01_changed(vmx);
3892 set_bit(idx, vmx->shadow_msr_intercept.read);
3894 set_bit(idx, vmx->shadow_msr_intercept.write);
3969 struct vcpu_vmx *vmx = to_vmx(vcpu);
3971 u8 changed = mode ^ vmx->msr_bitmap_mode;
3979 vmx->msr_bitmap_mode = mode;
3984 struct vcpu_vmx *vmx = to_vmx(vcpu);
3985 bool flag = !(vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN);
3992 for (i = 0; i < vmx->pt_desc.addr_range; i++) {
4000 struct vcpu_vmx *vmx = to_vmx(vcpu);
4007 WARN_ON_ONCE(!vmx->nested.virtual_apic_map.gfn))
4012 vapic_page = vmx->nested.virtual_apic_map.hva;
4020 struct vcpu_vmx *vmx = to_vmx(vcpu);
4030 bool read = test_bit(i, vmx->shadow_msr_intercept.read);
4031 bool write = test_bit(i, vmx->shadow_msr_intercept.write);
4083 struct vcpu_vmx *vmx = to_vmx(vcpu);
4086 vector == vmx->nested.posted_intr_nv) {
4091 vmx->nested.pi_pending = true;
4109 struct vcpu_vmx *vmx = to_vmx(vcpu);
4119 if (pi_test_and_set_pir(vector, &vmx->pi_desc))
4123 if (pi_test_and_set_on(&vmx->pi_desc))
4138 void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
4154 vmx->loaded_vmcs->host_state.cr3 = cr3;
4159 vmx->loaded_vmcs->host_state.cr4 = cr4;
4195 void set_cr4_guest_host_mask(struct vcpu_vmx *vmx)
4197 struct kvm_vcpu *vcpu = &vmx->vcpu;
4203 if (is_guest_mode(&vmx->vcpu))
4209 u32 vmx_pin_based_exec_ctrl(struct vcpu_vmx *vmx)
4213 if (!kvm_vcpu_apicv_active(&vmx->vcpu))
4227 struct vcpu_vmx *vmx = to_vmx(vcpu);
4229 pin_controls_set(vmx, vmx_pin_based_exec_ctrl(vmx));
4232 secondary_exec_controls_setbit(vmx,
4236 secondary_exec_controls_clearbit(vmx,
4245 u32 vmx_exec_control(struct vcpu_vmx *vmx)
4249 if (vmx->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)
4252 if (!cpu_need_tpr_shadow(&vmx->vcpu)) {
4263 if (kvm_mwait_in_guest(vmx->vcpu.kvm))
4266 if (kvm_hlt_in_guest(vmx->vcpu.kvm))
4277 vmx_adjust_secondary_exec_control(struct vcpu_vmx *vmx, u32 *exec_control,
4297 vmx->nested.msrs.secondary_ctls_high |= control;
4299 vmx->nested.msrs.secondary_ctls_high &= ~control;
4308 #define vmx_adjust_sec_exec_control(vmx, exec_control, name, feat_name, ctrl_name, exiting) \
4313 __enabled = guest_cpuid_has(&(vmx)->vcpu, \
4315 vmx_adjust_secondary_exec_control(vmx, exec_control, \
4321 #define vmx_adjust_sec_exec_feature(vmx, exec_control, lname, uname) \
4322 vmx_adjust_sec_exec_control(vmx, exec_control, lname, uname, ENABLE_##uname, false)
4324 #define vmx_adjust_sec_exec_exiting(vmx, exec_control, lname, uname) \
4325 vmx_adjust_sec_exec_control(vmx, exec_control, lname, uname, uname##_EXITING, true)
4327 static void vmx_compute_secondary_exec_control(struct vcpu_vmx *vmx)
4329 struct kvm_vcpu *vcpu = &vmx->vcpu;
4337 if (vmx->vpid == 0)
4345 if (kvm_pause_in_guest(vmx->vcpu.kvm))
4375 vmx_adjust_secondary_exec_control(vmx, &exec_control,
4380 vmx_adjust_sec_exec_feature(vmx, &exec_control, rdtscp, RDTSCP);
4390 vmx_adjust_sec_exec_feature(vmx, &exec_control, invpcid, INVPCID);
4393 vmx_adjust_sec_exec_exiting(vmx, &exec_control, rdrand, RDRAND);
4394 vmx_adjust_sec_exec_exiting(vmx, &exec_control, rdseed, RDSEED);
4396 vmx_adjust_sec_exec_control(vmx, &exec_control, waitpkg, WAITPKG,
4399 vmx->secondary_exec_control = exec_control;
4417 static void init_vmcs(struct vcpu_vmx *vmx)
4423 vmcs_write64(MSR_BITMAP, __pa(vmx->vmcs01.msr_bitmap));
4428 pin_controls_set(vmx, vmx_pin_based_exec_ctrl(vmx));
4430 exec_controls_set(vmx, vmx_exec_control(vmx));
4433 vmx_compute_secondary_exec_control(vmx);
4434 secondary_exec_controls_set(vmx, vmx->secondary_exec_control);
4437 if (kvm_vcpu_apicv_active(&vmx->vcpu)) {
4446 vmcs_write64(POSTED_INTR_DESC_ADDR, __pa((&vmx->pi_desc)));
4449 if (!kvm_pause_in_guest(vmx->vcpu.kvm)) {
4451 vmx->ple_window = ple_window;
4452 vmx->ple_window_dirty = true;
4461 vmx_set_constant_host_state(vmx);
4470 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val));
4472 vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val));
4475 vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
4477 vm_exit_controls_set(vmx, vmx_vmexit_ctrl());
4480 vm_entry_controls_set(vmx, vmx_vmentry_ctrl());
4482 vmx->vcpu.arch.cr0_guest_owned_bits = KVM_POSSIBLE_CR0_GUEST_BITS;
4483 vmcs_writel(CR0_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr0_guest_owned_bits);
4485 set_cr4_guest_host_mask(vmx);
4487 if (vmx->vpid != 0)
4488 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
4494 vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
4502 memset(&vmx->pt_desc, 0, sizeof(vmx->pt_desc));
4504 vmx->pt_desc.guest.output_mask = 0x7F;
4511 struct vcpu_vmx *vmx = to_vmx(vcpu);
4515 vmx->rmode.vm86_active = 0;
4516 vmx->spec_ctrl = 0;
4518 vmx->msr_ia32_umwait_control = 0;
4520 vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val();
4521 vmx->hv_deadline_tsc = -1;
4533 vmx_segment_cache_clear(vmx);
4577 setup_msrs(vmx);
4592 vmx->vcpu.arch.cr0 = cr0;
4599 vpid_sync_context(vmx->vpid);
4603 vmx_update_fb_clear_dis(vcpu, vmx);
4624 struct vcpu_vmx *vmx = to_vmx(vcpu);
4631 if (vmx->rmode.vm86_active) {
4642 vmx->vcpu.arch.event_exit_inst_len);
4652 struct vcpu_vmx *vmx = to_vmx(vcpu);
4663 vmx->loaded_vmcs->soft_vnmi_blocked = 1;
4664 vmx->loaded_vmcs->vnmi_blocked_time = 0;
4668 vmx->loaded_vmcs->nmi_known_unmasked = false;
4670 if (vmx->rmode.vm86_active) {
4683 struct vcpu_vmx *vmx = to_vmx(vcpu);
4687 return vmx->loaded_vmcs->soft_vnmi_blocked;
4688 if (vmx->loaded_vmcs->nmi_known_unmasked)
4691 vmx->loaded_vmcs->nmi_known_unmasked = !masked;
4697 struct vcpu_vmx *vmx = to_vmx(vcpu);
4700 if (vmx->loaded_vmcs->soft_vnmi_blocked != masked) {
4701 vmx->loaded_vmcs->soft_vnmi_blocked = masked;
4702 vmx->loaded_vmcs->vnmi_blocked_time = 0;
4705 vmx->loaded_vmcs->nmi_known_unmasked = !masked;
4892 struct vcpu_vmx *vmx = to_vmx(vcpu);
4898 vect_info = vmx->idt_vectoring_info;
4911 if (!vmx->rmode.vm86_active && is_gp_fault(intr_info)) {
4959 if (vmx->rmode.vm86_active && rmode_exception(vcpu, ex_no))
5007 vmx->vcpu.arch.event_exit_inst_len =
5400 struct vcpu_vmx *vmx = to_vmx(vcpu);
5407 idt_v = (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK);
5408 idt_index = (vmx->idt_vectoring_info & VECTORING_INFO_VECTOR_MASK);
5409 type = (vmx->idt_vectoring_info & VECTORING_INFO_TYPE_MASK);
5425 if (vmx->idt_vectoring_info &
5541 struct vcpu_vmx *vmx = to_vmx(vcpu);
5545 intr_window_requested = exec_controls_get(vmx) &
5548 while (vmx->emulation_required && count-- != 0) {
5550 return handle_interrupt_window(&vmx->vcpu);
5558 if (vmx->emulation_required && !vmx->rmode.vm86_active &&
5586 struct vcpu_vmx *vmx = to_vmx(vcpu);
5587 unsigned int old = vmx->ple_window;
5589 vmx->ple_window = __grow_ple_window(old, ple_window,
5593 if (vmx->ple_window != old) {
5594 vmx->ple_window_dirty = true;
5596 vmx->ple_window, old);
5602 struct vcpu_vmx *vmx = to_vmx(vcpu);
5603 unsigned int old = vmx->ple_window;
5605 vmx->ple_window = __shrink_ple_window(old, ple_window,
5609 if (vmx->ple_window != old) {
5610 vmx->ple_window_dirty = true;
5612 vmx->ple_window, old);
5736 struct vcpu_vmx *vmx = to_vmx(vcpu);
5738 if (!vmx->req_immediate_exit &&
5739 !unlikely(vmx->loaded_vmcs->hv_timer_soft_disabled)) {
5838 struct vcpu_vmx *vmx = to_vmx(vcpu);
5841 if (!(vmx->exit_reason.failed_vmentry)) {
5842 *info2 = vmx->idt_vectoring_info;
5855 static void vmx_destroy_pml_buffer(struct vcpu_vmx *vmx)
5857 if (vmx->pml_pg) {
5858 __free_page(vmx->pml_pg);
5859 vmx->pml_pg = NULL;
5865 struct vcpu_vmx *vmx = to_vmx(vcpu);
5881 pml_buf = page_address(vmx->pml_pg);
6081 struct vcpu_vmx *vmx = to_vmx(vcpu);
6082 union vmx_exit_reason exit_reason = vmx->exit_reason;
6083 u32 vectoring_info = vmx->idt_vectoring_info;
6102 WARN_ON_ONCE(vmx->nested.nested_run_pending);
6105 if (vmx->emulation_required)
6135 if (unlikely(vmx->fail)) {
6174 vmx->loaded_vmcs->soft_vnmi_blocked)) {
6176 vmx->loaded_vmcs->soft_vnmi_blocked = 0;
6177 } else if (vmx->loaded_vmcs->vnmi_blocked_time > 1000000000LL &&
6188 vmx->loaded_vmcs->soft_vnmi_blocked = 0;
6220 vcpu_unimpl(vcpu, "vmx: unexpected exit reason 0x%x\n",
6320 struct vcpu_vmx *vmx = to_vmx(vcpu);
6332 vmx->nested.change_vmcs01_virtual_apic_mode = true;
6336 sec_exec_control = secondary_exec_controls_get(vmx);
6366 secondary_exec_controls_set(vmx, sec_exec_control);
6449 struct vcpu_vmx *vmx = to_vmx(vcpu);
6454 if (pi_test_on(&vmx->pi_desc)) {
6455 pi_clear_on(&vmx->pi_desc);
6462 kvm_apic_update_irr(vcpu, vmx->pi_desc.pir, &max_irr);
6493 struct vcpu_vmx *vmx = to_vmx(vcpu);
6495 pi_clear_on(&vmx->pi_desc);
6496 memset(vmx->pi_desc.pir, 0, sizeof(vmx->pi_desc.pir));
6509 static void handle_exception_nmi_irqoff(struct vcpu_vmx *vmx)
6512 u32 intr_info = vmx_get_intr_info(&vmx->vcpu);
6516 vmx->vcpu.arch.apf.host_apf_flags = kvm_read_and_reset_apf_flags();
6522 handle_interrupt_nmi_irqoff(&vmx->vcpu, nmi_entry);
6541 struct vcpu_vmx *vmx = to_vmx(vcpu);
6543 if (vmx->emulation_required)
6546 if (vmx->exit_reason.basic == EXIT_REASON_EXTERNAL_INTERRUPT)
6548 else if (vmx->exit_reason.basic == EXIT_REASON_EXCEPTION_NMI)
6549 handle_exception_nmi_irqoff(vmx);
6571 static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
6578 idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK;
6581 if (vmx->loaded_vmcs->nmi_known_unmasked)
6584 exit_intr_info = vmx_get_intr_info(&vmx->vcpu);
6602 vmx->loaded_vmcs->nmi_known_unmasked =
6605 } else if (unlikely(vmx->loaded_vmcs->soft_vnmi_blocked))
6606 vmx->loaded_vmcs->vnmi_blocked_time +=
6608 vmx->loaded_vmcs->entry_time));
6665 static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
6667 __vmx_complete_interrupts(&vmx->vcpu, vmx->idt_vectoring_info,
6682 static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
6694 clear_atomic_switch_msr(vmx, msrs[i].msr);
6696 add_atomic_switch_msr(vmx, msrs[i].msr, msrs[i].guest,
6702 struct vcpu_vmx *vmx = to_vmx(vcpu);
6706 if (vmx->req_immediate_exit) {
6708 vmx->loaded_vmcs->hv_timer_soft_disabled = false;
6709 } else if (vmx->hv_deadline_tsc != -1) {
6711 if (vmx->hv_deadline_tsc > tscl)
6713 delta_tsc = (u32)((vmx->hv_deadline_tsc - tscl) >>
6719 vmx->loaded_vmcs->hv_timer_soft_disabled = false;
6720 } else if (!vmx->loaded_vmcs->hv_timer_soft_disabled) {
6722 vmx->loaded_vmcs->hv_timer_soft_disabled = true;
6726 void noinstr vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp)
6728 if (unlikely(host_rsp != vmx->loaded_vmcs->host_state.rsp)) {
6729 vmx->loaded_vmcs->host_state.rsp = host_rsp;
6734 void noinstr vmx_spec_ctrl_restore_host(struct vcpu_vmx *vmx,
6743 vmx->spec_ctrl = __rdmsr(MSR_IA32_SPEC_CTRL);
6753 vmx->spec_ctrl != hostval)
6772 struct vcpu_vmx *vmx,
6804 vmx_disable_fb_clear(vmx);
6809 vmx->fail = __vmx_vcpu_run(vmx, (unsigned long *)&vcpu->arch.regs,
6814 vmx_enable_fb_clear(vmx);
6839 struct vcpu_vmx *vmx = to_vmx(vcpu);
6845 vmx->loaded_vmcs->soft_vnmi_blocked))
6846 vmx->loaded_vmcs->entry_time = ktime_get();
6850 if (vmx->emulation_required)
6853 if (vmx->ple_window_dirty) {
6854 vmx->ple_window_dirty = false;
6855 vmcs_write32(PLE_WINDOW, vmx->ple_window);
6862 WARN_ON_ONCE(vmx->nested.need_vmcs12_to_shadow_sync);
6870 if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) {
6872 vmx->loaded_vmcs->host_state.cr3 = cr3;
6876 if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) {
6878 vmx->loaded_vmcs->host_state.cr4 = cr4;
6891 pt_guest_enter(vmx);
6893 atomic_switch_perf_msrs(vmx);
6906 x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0);
6909 vmx_vcpu_enter_exit(vcpu, vmx, __vmx_vcpu_run_flags(vmx));
6920 if (vmx->host_debugctlmsr)
6921 update_debugctlmsr(vmx->host_debugctlmsr);
6938 pt_guest_exit(vmx);
6942 vmx->nested.nested_run_pending = 0;
6943 vmx->idt_vectoring_info = 0;
6945 if (unlikely(vmx->fail)) {
6946 vmx->exit_reason.full = 0xdead;
6950 vmx->exit_reason.full = vmcs_read32(VM_EXIT_REASON);
6951 if (unlikely((u16)vmx->exit_reason.basic == EXIT_REASON_MCE_DURING_VMENTRY))
6954 trace_kvm_exit(vmx->exit_reason.full, vcpu, KVM_ISA_VMX);
6956 if (unlikely(vmx->exit_reason.failed_vmentry))
6959 vmx->loaded_vmcs->launched = 1;
6960 vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
6962 vmx_recover_nmi_blocking(vmx);
6963 vmx_complete_interrupts(vmx);
6988 struct vcpu_vmx *vmx = to_vmx(vcpu);
6991 vmx_destroy_pml_buffer(vmx);
6992 free_vpid(vmx->vpid);
6994 free_loaded_vmcs(vmx->loaded_vmcs);
6999 struct vcpu_vmx *vmx;
7003 vmx = to_vmx(vcpu);
7007 vmx->vpid = allocate_vpid();
7016 vmx->pml_pg = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
7017 if (!vmx->pml_pg)
7025 int j = vmx->nr_uret_msrs;
7030 vmx->guest_uret_msrs[j].slot = i;
7031 vmx->guest_uret_msrs[j].data = 0;
7046 vmx->guest_uret_msrs[j].mask = ~(u64)TSX_CTRL_CPUID_CLEAR;
7048 vmx->guest_uret_msrs[j].mask = 0;
7051 vmx->guest_uret_msrs[j].mask = -1ull;
7054 ++vmx->nr_uret_msrs;
7057 err = alloc_loaded_vmcs(&vmx->vmcs01);
7069 struct hv_enlightened_vmcs *evmcs = (void *)vmx->vmcs01.vmcs;
7075 bitmap_fill(vmx->shadow_msr_intercept.read, MAX_POSSIBLE_PASSTHROUGH_MSRS);
7076 bitmap_fill(vmx->shadow_msr_intercept.write, MAX_POSSIBLE_PASSTHROUGH_MSRS);
7093 vmx->msr_bitmap_mode = 0;
7095 vmx->loaded_vmcs = &vmx->vmcs01;
7099 init_vmcs(vmx);
7115 memcpy(&vmx->nested.msrs, &vmcs_config.nested, sizeof(vmx->nested.msrs));
7117 memset(&vmx->nested.msrs, 0, sizeof(vmx->nested.msrs));
7119 vmx->nested.posted_intr_nv = -1;
7120 vmx->nested.current_vmptr = -1ull;
7123 vmx->msr_ia32_feature_control_valid_bits = FEAT_CTL_LOCKED;
7129 vmx->pi_desc.nv = POSTED_INTR_VECTOR;
7130 vmx->pi_desc.sn = 1;
7132 vmx->ept_pointer = INVALID_PAGE;
7137 free_loaded_vmcs(vmx->loaded_vmcs);
7139 vmx_destroy_pml_buffer(vmx);
7141 free_vpid(vmx->vpid);
7254 static void vmcs_set_secondary_exec_control(struct vcpu_vmx *vmx)
7268 u32 new_ctl = vmx->secondary_exec_control;
7269 u32 cur_ctl = secondary_exec_controls_get(vmx);
7271 secondary_exec_controls_set(vmx, (new_ctl & ~mask) | (cur_ctl & mask));
7280 struct vcpu_vmx *vmx = to_vmx(vcpu);
7283 vmx->nested.msrs.cr0_fixed1 = 0xffffffff;
7284 vmx->nested.msrs.cr4_fixed1 = X86_CR4_PCE;
7288 vmx->nested.msrs.cr4_fixed1 |= (_cr4_mask); \
7320 struct vcpu_vmx *vmx = to_vmx(vcpu);
7326 vmx->nested.msrs.entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS;
7327 vmx->nested.msrs.exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS;
7329 vmx->nested.msrs.entry_ctls_high &= ~VM_ENTRY_LOAD_BNDCFGS;
7330 vmx->nested.msrs.exit_ctls_high &= ~VM_EXIT_CLEAR_BNDCFGS;
7337 struct vcpu_vmx *vmx = to_vmx(vcpu);
7345 vmx->pt_desc.caps[CPUID_EAX + i*PT_CPUID_REGS_NUM] = best->eax;
7346 vmx->pt_desc.caps[CPUID_EBX + i*PT_CPUID_REGS_NUM] = best->ebx;
7347 vmx->pt_desc.caps[CPUID_ECX + i*PT_CPUID_REGS_NUM] = best->ecx;
7348 vmx->pt_desc.caps[CPUID_EDX + i*PT_CPUID_REGS_NUM] = best->edx;
7352 vmx->pt_desc.addr_range = intel_pt_validate_cap(vmx->pt_desc.caps,
7356 vmx->pt_desc.ctl_bitmask = ~(RTIT_CTL_TRACEEN | RTIT_CTL_OS |
7363 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_cr3_filtering))
7364 vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_CR3EN;
7370 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_cyc))
7371 vmx->pt_desc.ctl_bitmask &= ~(RTIT_CTL_CYCLEACC |
7378 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_mtc))
7379 vmx->pt_desc.ctl_bitmask &= ~(RTIT_CTL_MTC_EN |
7383 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_ptwrite))
7384 vmx->pt_desc.ctl_bitmask &= ~(RTIT_CTL_FUP_ON_PTW |
7388 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_power_event_trace))
7389 vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_PWR_EVT_EN;
7392 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_topa_output))
7393 vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_TOPA;
7396 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_output_subsys))
7397 vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_FABRIC_EN;
7400 for (i = 0; i < vmx->pt_desc.addr_range; i++)
7401 vmx->pt_desc.ctl_bitmask &= ~(0xfULL << (32 + i * 4));
7406 struct vcpu_vmx *vmx = to_vmx(vcpu);
7412 vmx_compute_secondary_exec_control(vmx);
7413 vmcs_set_secondary_exec_control(vmx);
7436 msr = vmx_find_uret_msr(vmx, MSR_IA32_TSX_CTRL);
7439 vmx_set_guest_uret_msr(vmx, msr, enabled ? 0 : TSX_CTRL_RTM_DISABLE);
7443 set_cr4_guest_host_mask(vmx);
7608 struct vcpu_vmx *vmx;
7612 vmx = to_vmx(vcpu);
7640 vmx->hv_deadline_tsc = tscl + delta_tsc;
7722 struct vcpu_vmx *vmx = to_vmx(vcpu);
7724 vmx->nested.smm.guest_mode = is_guest_mode(vcpu);
7725 if (vmx->nested.smm.guest_mode)
7728 vmx->nested.smm.vmxon = vmx->nested.vmxon;
7729 vmx->nested.vmxon = false;
7736 struct vcpu_vmx *vmx = to_vmx(vcpu);
7739 if (vmx->nested.smm.vmxon) {
7740 vmx->nested.vmxon = true;
7741 vmx->nested.smm.vmxon = false;
7744 if (vmx->nested.smm.guest_mode) {
7749 vmx->nested.smm.guest_mode = false;
8178 pr_info("KVM: vmx: using Hyper-V Enlightened VMCS\n");