Lines Matching refs:vmx
50 #include <asm/vmx.h>
66 #include "vmx.h"
365 static __always_inline void vmx_disable_fb_clear(struct vcpu_vmx *vmx)
369 if (!vmx->disable_fb_clear)
376 vmx->msr_ia32_mcu_opt_ctrl = msr;
379 static __always_inline void vmx_enable_fb_clear(struct vcpu_vmx *vmx)
381 if (!vmx->disable_fb_clear)
384 vmx->msr_ia32_mcu_opt_ctrl &= ~FB_CLEAR_DIS;
385 native_wrmsrl(MSR_IA32_MCU_OPT_CTRL, vmx->msr_ia32_mcu_opt_ctrl);
388 static void vmx_update_fb_clear_dis(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx)
398 vmx->disable_fb_clear = !cpu_feature_enabled(X86_FEATURE_CLEAR_CPU_BUF) &&
414 vmx->disable_fb_clear = false;
519 static inline void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
521 vmx->segment_cache.bitmask = 0;
720 struct vmx_uret_msr *vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr)
726 return &vmx->guest_uret_msrs[i];
730 static int vmx_set_guest_uret_msr(struct vcpu_vmx *vmx,
733 unsigned int slot = msr - vmx->guest_uret_msrs;
830 static bool vmx_segment_cache_test_set(struct vcpu_vmx *vmx, unsigned seg,
836 if (!kvm_register_is_available(&vmx->vcpu, VCPU_EXREG_SEGMENTS)) {
837 kvm_register_mark_available(&vmx->vcpu, VCPU_EXREG_SEGMENTS);
838 vmx->segment_cache.bitmask = 0;
840 ret = vmx->segment_cache.bitmask & mask;
841 vmx->segment_cache.bitmask |= mask;
845 static u16 vmx_read_guest_seg_selector(struct vcpu_vmx *vmx, unsigned seg)
847 u16 *p = &vmx->segment_cache.seg[seg].selector;
849 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_SEL))
854 static ulong vmx_read_guest_seg_base(struct vcpu_vmx *vmx, unsigned seg)
856 ulong *p = &vmx->segment_cache.seg[seg].base;
858 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_BASE))
863 static u32 vmx_read_guest_seg_limit(struct vcpu_vmx *vmx, unsigned seg)
865 u32 *p = &vmx->segment_cache.seg[seg].limit;
867 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_LIMIT))
872 static u32 vmx_read_guest_seg_ar(struct vcpu_vmx *vmx, unsigned seg)
874 u32 *p = &vmx->segment_cache.seg[seg].ar;
876 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_AR))
943 static bool msr_write_intercepted(struct vcpu_vmx *vmx, u32 msr)
945 if (!(exec_controls_get(vmx) & CPU_BASED_USE_MSR_BITMAPS))
948 return vmx_test_msr_bitmap_write(vmx->loaded_vmcs->msr_bitmap, msr);
951 unsigned int __vmx_vcpu_run_flags(struct vcpu_vmx *vmx)
955 if (vmx->loaded_vmcs->launched)
961 * it after vmexit and store it in vmx->spec_ctrl.
963 if (!msr_write_intercepted(vmx, MSR_IA32_SPEC_CTRL))
969 static __always_inline void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx,
972 vm_entry_controls_clearbit(vmx, entry);
973 vm_exit_controls_clearbit(vmx, exit);
987 static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
990 struct msr_autoload *m = &vmx->msr_autoload;
995 clear_atomic_switch_msr_special(vmx,
1003 clear_atomic_switch_msr_special(vmx,
1027 static __always_inline void add_atomic_switch_msr_special(struct vcpu_vmx *vmx,
1035 vm_entry_controls_setbit(vmx, entry);
1036 vm_exit_controls_setbit(vmx, exit);
1039 static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
1043 struct msr_autoload *m = &vmx->msr_autoload;
1048 add_atomic_switch_msr_special(vmx,
1059 add_atomic_switch_msr_special(vmx,
1105 static bool update_transition_efer(struct vcpu_vmx *vmx)
1107 u64 guest_efer = vmx->vcpu.arch.efer;
1132 (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX))) {
1136 add_atomic_switch_msr(vmx, MSR_EFER,
1139 clear_atomic_switch_msr(vmx, MSR_EFER);
1147 clear_atomic_switch_msr(vmx, MSR_EFER);
1152 vmx->guest_uret_msrs[i].data = guest_efer;
1153 vmx->guest_uret_msrs[i].mask = ~ignore_bits;
1187 static inline bool pt_can_write_msr(struct vcpu_vmx *vmx)
1190 !(vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN);
1227 static void pt_guest_enter(struct vcpu_vmx *vmx)
1236 rdmsrl(MSR_IA32_RTIT_CTL, vmx->pt_desc.host.ctl);
1237 if (vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) {
1239 pt_save_msr(&vmx->pt_desc.host, vmx->pt_desc.num_address_ranges);
1240 pt_load_msr(&vmx->pt_desc.guest, vmx->pt_desc.num_address_ranges);
1244 static void pt_guest_exit(struct vcpu_vmx *vmx)
1249 if (vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) {
1250 pt_save_msr(&vmx->pt_desc.guest, vmx->pt_desc.num_address_ranges);
1251 pt_load_msr(&vmx->pt_desc.host, vmx->pt_desc.num_address_ranges);
1258 if (vmx->pt_desc.host.ctl)
1259 wrmsrl(MSR_IA32_RTIT_CTL, vmx->pt_desc.host.ctl);
1291 struct vcpu_vmx *vmx = to_vmx(vcpu);
1300 vmx->req_immediate_exit = false;
1307 if (!vmx->guest_uret_msrs_loaded) {
1308 vmx->guest_uret_msrs_loaded = true;
1310 if (!vmx->guest_uret_msrs[i].load_into_hardware)
1314 vmx->guest_uret_msrs[i].data,
1315 vmx->guest_uret_msrs[i].mask);
1319 if (vmx->nested.need_vmcs12_to_shadow_sync)
1322 if (vmx->guest_state_loaded)
1325 host_state = &vmx->loaded_vmcs->host_state;
1343 vmx->msr_host_kernel_gs_base = current->thread.gsbase;
1348 vmx->msr_host_kernel_gs_base = read_msr(MSR_KERNEL_GS_BASE);
1351 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
1360 vmx->guest_state_loaded = true;
1363 static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx)
1367 if (!vmx->guest_state_loaded)
1370 host_state = &vmx->loaded_vmcs->host_state;
1372 ++vmx->vcpu.stat.host_state_reload;
1375 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
1395 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
1398 vmx->guest_state_loaded = false;
1399 vmx->guest_uret_msrs_loaded = false;
1403 static u64 vmx_read_guest_kernel_gs_base(struct vcpu_vmx *vmx)
1406 if (vmx->guest_state_loaded)
1407 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
1409 return vmx->msr_guest_kernel_gs_base;
1412 static void vmx_write_guest_kernel_gs_base(struct vcpu_vmx *vmx, u64 data)
1415 if (vmx->guest_state_loaded)
1418 vmx->msr_guest_kernel_gs_base = data;
1425 struct vcpu_vmx *vmx = to_vmx(vcpu);
1426 bool already_loaded = vmx->loaded_vmcs->cpu == cpu;
1430 loaded_vmcs_clear(vmx->loaded_vmcs);
1441 list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link,
1447 if (prev != vmx->loaded_vmcs->vmcs) {
1448 per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs;
1449 vmcs_load(vmx->loaded_vmcs->vmcs);
1485 vmx->loaded_vmcs->cpu = cpu;
1495 struct vcpu_vmx *vmx = to_vmx(vcpu);
1501 vmx->host_debugctlmsr = get_debugctlmsr();
1518 struct vcpu_vmx *vmx = to_vmx(vcpu);
1524 if (vmx->rmode.vm86_active) {
1526 save_rflags = vmx->rmode.save_rflags;
1529 vmx->rflags = rflags;
1531 return vmx->rflags;
1536 struct vcpu_vmx *vmx = to_vmx(vcpu);
1546 vmx->rflags = rflags;
1552 vmx->rflags = rflags;
1553 if (vmx->rmode.vm86_active) {
1554 vmx->rmode.save_rflags = rflags;
1559 if ((old_rflags ^ vmx->rflags) & X86_EFLAGS_VM)
1560 vmx->emulation_required = vmx_emulation_required(vcpu);
1599 struct vcpu_vmx *vmx = to_vmx(vcpu);
1606 if (data & vmx->pt_desc.ctl_bitmask)
1613 if ((vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) &&
1614 ((vmx->pt_desc.guest.ctl ^ data) & ~RTIT_CTL_TRACEEN))
1624 !intel_pt_validate_cap(vmx->pt_desc.caps,
1632 value = intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_mtc_periods);
1633 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_mtc) &&
1637 value = intel_pt_validate_cap(vmx->pt_desc.caps,
1639 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_cyc) &&
1643 value = intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_periods);
1644 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_cyc) &&
1654 if ((value && (vmx->pt_desc.num_address_ranges < 1)) || (value > 2))
1657 if ((value && (vmx->pt_desc.num_address_ranges < 2)) || (value > 2))
1660 if ((value && (vmx->pt_desc.num_address_ranges < 3)) || (value > 2))
1663 if ((value && (vmx->pt_desc.num_address_ranges < 4)) || (value > 2))
1757 struct vcpu_vmx *vmx = to_vmx(vcpu);
1778 vmx->nested.mtf_pending = true;
1781 vmx->nested.mtf_pending = false;
1808 struct vcpu_vmx *vmx = to_vmx(vcpu);
1827 if (vmx->rmode.vm86_active) {
1835 WARN_ON_ONCE(vmx->emulation_required);
1839 vmx->vcpu.arch.event_exit_inst_len);
1849 static void vmx_setup_uret_msr(struct vcpu_vmx *vmx, unsigned int msr,
1854 uret_msr = vmx_find_uret_msr(vmx, msr);
1867 static void vmx_setup_uret_msrs(struct vcpu_vmx *vmx)
1876 load_syscall_msrs = is_long_mode(&vmx->vcpu) &&
1877 (vmx->vcpu.arch.efer & EFER_SCE);
1879 vmx_setup_uret_msr(vmx, MSR_STAR, load_syscall_msrs);
1880 vmx_setup_uret_msr(vmx, MSR_LSTAR, load_syscall_msrs);
1881 vmx_setup_uret_msr(vmx, MSR_SYSCALL_MASK, load_syscall_msrs);
1883 vmx_setup_uret_msr(vmx, MSR_EFER, update_transition_efer(vmx));
1885 vmx_setup_uret_msr(vmx, MSR_TSC_AUX,
1886 guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDTSCP) ||
1887 guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDPID));
1895 vmx_setup_uret_msr(vmx, MSR_IA32_TSX_CTRL, boot_cpu_has(X86_FEATURE_RTM));
1901 vmx->guest_uret_msrs_loaded = false;
1949 static inline bool is_vmx_feature_control_msr_valid(struct vcpu_vmx *vmx,
1958 WARN_ON_ONCE(vmx->msr_ia32_feature_control_valid_bits &
1962 (vmx->msr_ia32_feature_control & FEAT_CTL_LOCKED))
1968 valid_bits = vmx->msr_ia32_feature_control_valid_bits;
1992 struct vcpu_vmx *vmx = to_vmx(vcpu);
2005 msr_info->data = vmx_read_guest_kernel_gs_base(vmx);
2016 if (!msr_info->host_initiated && !vmx_has_waitpkg(vmx))
2019 msr_info->data = vmx->msr_ia32_umwait_control;
2046 !(vmx->msr_ia32_feature_control &
2052 msr_info->data = vmx->msr_ia32_feature_control;
2064 if (vmx_get_vmx_msr(&vmx->nested.msrs, msr_info->index,
2081 msr_info->data = vmx->pt_desc.guest.ctl;
2086 msr_info->data = vmx->pt_desc.guest.status;
2090 !intel_pt_validate_cap(vmx->pt_desc.caps,
2093 msr_info->data = vmx->pt_desc.guest.cr3_match;
2097 (!intel_pt_validate_cap(vmx->pt_desc.caps,
2099 !intel_pt_validate_cap(vmx->pt_desc.caps,
2102 msr_info->data = vmx->pt_desc.guest.output_base;
2106 (!intel_pt_validate_cap(vmx->pt_desc.caps,
2108 !intel_pt_validate_cap(vmx->pt_desc.caps,
2111 msr_info->data = vmx->pt_desc.guest.output_mask;
2116 (index >= 2 * vmx->pt_desc.num_address_ranges))
2119 msr_info->data = vmx->pt_desc.guest.addr_b[index / 2];
2121 msr_info->data = vmx->pt_desc.guest.addr_a[index / 2];
2128 msr = vmx_find_uret_msr(vmx, msr_info->index);
2171 struct vcpu_vmx *vmx = to_vmx(vcpu);
2184 vmx_segment_cache_clear(vmx);
2188 vmx_segment_cache_clear(vmx);
2192 vmx_write_guest_kernel_gs_base(vmx, data);
2265 ((vmx->nested.msrs.entry_ctls_high & VM_ENTRY_LOAD_BNDCFGS) ||
2266 (vmx->nested.msrs.exit_ctls_high & VM_EXIT_CLEAR_BNDCFGS)))
2272 if (!msr_info->host_initiated && !vmx_has_waitpkg(vmx))
2279 vmx->msr_ia32_umwait_control = data;
2289 vmx->spec_ctrl = data;
2337 if (!is_vmx_feature_control_msr_valid(vmx, msr_info))
2340 vmx->msr_ia32_feature_control = data;
2361 ((vmx->msr_ia32_feature_control & FEAT_CTL_LOCKED) &&
2362 !(vmx->msr_ia32_feature_control & FEAT_CTL_SGX_LC_ENABLED))))
2364 vmx->msr_ia32_sgxlepubkeyhash
2376 vmx->nested.vmxon)
2379 vmx->pt_desc.guest.ctl = data;
2383 if (!pt_can_write_msr(vmx))
2387 vmx->pt_desc.guest.status = data;
2390 if (!pt_can_write_msr(vmx))
2392 if (!intel_pt_validate_cap(vmx->pt_desc.caps,
2395 vmx->pt_desc.guest.cr3_match = data;
2398 if (!pt_can_write_msr(vmx))
2400 if (!intel_pt_validate_cap(vmx->pt_desc.caps,
2402 !intel_pt_validate_cap(vmx->pt_desc.caps,
2407 vmx->pt_desc.guest.output_base = data;
2410 if (!pt_can_write_msr(vmx))
2412 if (!intel_pt_validate_cap(vmx->pt_desc.caps,
2414 !intel_pt_validate_cap(vmx->pt_desc.caps,
2417 vmx->pt_desc.guest.output_mask = data;
2420 if (!pt_can_write_msr(vmx))
2423 if (index >= 2 * vmx->pt_desc.num_address_ranges)
2428 vmx->pt_desc.guest.addr_b[index / 2] = data;
2430 vmx->pt_desc.guest.addr_a[index / 2] = data;
2458 msr = vmx_find_uret_msr(vmx, msr_index);
2460 ret = vmx_set_guest_uret_msr(vmx, msr, data);
2467 vmx_update_fb_clear_dis(vcpu, vmx);
3005 struct vcpu_vmx *vmx = to_vmx(vcpu);
3011 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES);
3012 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS);
3013 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS);
3014 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS);
3015 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS);
3016 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS);
3018 vmx->rmode.vm86_active = 0;
3020 __vmx_set_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR);
3024 flags |= vmx->rmode.save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
3032 fix_pmode_seg(vcpu, VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]);
3033 fix_pmode_seg(vcpu, VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]);
3034 fix_pmode_seg(vcpu, VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]);
3035 fix_pmode_seg(vcpu, VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]);
3036 fix_pmode_seg(vcpu, VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]);
3037 fix_pmode_seg(vcpu, VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]);
3075 struct vcpu_vmx *vmx = to_vmx(vcpu);
3087 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR);
3088 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES);
3089 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS);
3090 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS);
3091 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS);
3092 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS);
3093 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS);
3095 vmx->rmode.vm86_active = 1;
3097 vmx_segment_cache_clear(vmx);
3104 vmx->rmode.save_rflags = flags;
3112 fix_rmode_seg(VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]);
3113 fix_rmode_seg(VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]);
3114 fix_rmode_seg(VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]);
3115 fix_rmode_seg(VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]);
3116 fix_rmode_seg(VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]);
3117 fix_rmode_seg(VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]);
3122 struct vcpu_vmx *vmx = to_vmx(vcpu);
3125 if (!vmx_find_uret_msr(vmx, MSR_EFER))
3131 vm_entry_controls_setbit(vmx, VM_ENTRY_IA32E_MODE);
3133 vm_entry_controls_clearbit(vmx, VM_ENTRY_IA32E_MODE);
3139 vmx_setup_uret_msrs(vmx);
3171 struct vcpu_vmx *vmx = to_vmx(vcpu);
3186 vpid_sync_vcpu_single(vmx->vpid);
3187 vpid_sync_vcpu_single(vmx->nested.vpid02);
3282 struct vcpu_vmx *vmx = to_vmx(vcpu);
3296 if (vmx->rmode.vm86_active && (cr0 & X86_CR0_PE))
3299 if (!vmx->rmode.vm86_active && !(cr0 & X86_CR0_PE))
3343 exec_controls_setbit(vmx, CR3_EXITING_BITS);
3345 exec_controls_clearbit(vmx, CR3_EXITING_BITS);
3347 tmp = exec_controls_get(vmx);
3350 exec_controls_set(vmx, tmp);
3359 * GUEST_CR3 is still vmx->ept_identity_map_addr if EPT + !URG.
3366 vmx->emulation_required = vmx_emulation_required(vcpu);
3439 struct vcpu_vmx *vmx = to_vmx(vcpu);
3450 else if (vmx->rmode.vm86_active)
3457 secondary_exec_controls_setbit(vmx, SECONDARY_EXEC_DESC);
3461 secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_DESC);
3502 struct vcpu_vmx *vmx = to_vmx(vcpu);
3505 if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) {
3506 *var = vmx->rmode.segs[seg];
3508 || var->selector == vmx_read_guest_seg_selector(vmx, seg))
3510 var->base = vmx_read_guest_seg_base(vmx, seg);
3511 var->selector = vmx_read_guest_seg_selector(vmx, seg);
3514 var->base = vmx_read_guest_seg_base(vmx, seg);
3515 var->limit = vmx_read_guest_seg_limit(vmx, seg);
3516 var->selector = vmx_read_guest_seg_selector(vmx, seg);
3517 ar = vmx_read_guest_seg_ar(vmx, seg);
3549 struct vcpu_vmx *vmx = to_vmx(vcpu);
3551 if (unlikely(vmx->rmode.vm86_active))
3554 int ar = vmx_read_guest_seg_ar(vmx, VCPU_SREG_SS);
3578 struct vcpu_vmx *vmx = to_vmx(vcpu);
3581 vmx_segment_cache_clear(vmx);
3583 if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) {
3584 vmx->rmode.segs[seg] = *var;
3588 fix_rmode_seg(seg, &vmx->rmode.segs[seg]);
3948 static void vmx_msr_bitmap_l01_changed(struct vcpu_vmx *vmx)
3956 struct hv_enlightened_vmcs *evmcs = (void *)vmx->vmcs01.vmcs;
3963 vmx->nested.force_msr_bitmap_recalc = true;
3968 struct vcpu_vmx *vmx = to_vmx(vcpu);
3969 unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap;
3974 vmx_msr_bitmap_l01_changed(vmx);
3985 clear_bit(idx, vmx->shadow_msr_intercept.read);
3987 clear_bit(idx, vmx->shadow_msr_intercept.write);
4012 struct vcpu_vmx *vmx = to_vmx(vcpu);
4013 unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap;
4018 vmx_msr_bitmap_l01_changed(vmx);
4029 set_bit(idx, vmx->shadow_msr_intercept.read);
4031 set_bit(idx, vmx->shadow_msr_intercept.write);
4051 struct vcpu_vmx *vmx = to_vmx(vcpu);
4052 u64 *msr_bitmap = (u64 *)vmx->vmcs01.msr_bitmap;
4059 (secondary_exec_controls_get(vmx) &
4068 if (mode == vmx->x2apic_msr_bitmap_mode)
4071 vmx->x2apic_msr_bitmap_mode = mode;
4104 struct vcpu_vmx *vmx = to_vmx(vcpu);
4105 bool flag = !(vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN);
4112 for (i = 0; i < vmx->pt_desc.num_address_ranges; i++) {
4120 struct vcpu_vmx *vmx = to_vmx(vcpu);
4127 WARN_ON_ONCE(!vmx->nested.virtual_apic_map.gfn))
4132 vapic_page = vmx->nested.virtual_apic_map.hva;
4140 struct vcpu_vmx *vmx = to_vmx(vcpu);
4154 if (!test_bit(i, vmx->shadow_msr_intercept.read))
4157 if (!test_bit(i, vmx->shadow_msr_intercept.write))
4213 struct vcpu_vmx *vmx = to_vmx(vcpu);
4216 vector == vmx->nested.posted_intr_nv) {
4221 vmx->nested.pi_pending = true;
4251 struct vcpu_vmx *vmx = to_vmx(vcpu);
4262 if (pi_test_and_set_pir(vector, &vmx->pi_desc))
4266 if (pi_test_and_set_on(&vmx->pi_desc))
4300 void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
4316 vmx->loaded_vmcs->host_state.cr3 = cr3;
4321 vmx->loaded_vmcs->host_state.cr4 = cr4;
4367 void set_cr4_guest_host_mask(struct vcpu_vmx *vmx)
4369 struct kvm_vcpu *vcpu = &vmx->vcpu;
4377 if (is_guest_mode(&vmx->vcpu))
4383 static u32 vmx_pin_based_exec_ctrl(struct vcpu_vmx *vmx)
4387 if (!kvm_vcpu_apicv_active(&vmx->vcpu))
4444 struct vcpu_vmx *vmx = to_vmx(vcpu);
4447 vmx->nested.update_vmcs01_apicv_status = true;
4451 pin_controls_set(vmx, vmx_pin_based_exec_ctrl(vmx));
4454 secondary_exec_controls_setbit(vmx,
4458 tertiary_exec_controls_setbit(vmx, TERTIARY_EXEC_IPI_VIRT);
4460 secondary_exec_controls_clearbit(vmx,
4464 tertiary_exec_controls_clearbit(vmx, TERTIARY_EXEC_IPI_VIRT);
4470 static u32 vmx_exec_control(struct vcpu_vmx *vmx)
4487 if (vmx->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)
4490 if (!cpu_need_tpr_shadow(&vmx->vcpu))
4506 if (kvm_mwait_in_guest(vmx->vcpu.kvm))
4509 if (kvm_hlt_in_guest(vmx->vcpu.kvm))
4514 static u64 vmx_tertiary_exec_control(struct vcpu_vmx *vmx)
4522 if (!enable_ipiv || !kvm_vcpu_apicv_active(&vmx->vcpu))
4534 vmx_adjust_secondary_exec_control(struct vcpu_vmx *vmx, u32 *exec_control,
4561 vmx->nested.msrs.secondary_ctls_high |= control;
4563 vmx->nested.msrs.secondary_ctls_high &= ~control;
4572 #define vmx_adjust_sec_exec_control(vmx, exec_control, name, feat_name, ctrl_name, exiting) \
4574 struct kvm_vcpu *__vcpu = &(vmx)->vcpu; \
4582 vmx_adjust_secondary_exec_control(vmx, exec_control, SECONDARY_EXEC_##ctrl_name,\
4588 #define vmx_adjust_sec_exec_feature(vmx, exec_control, lname, uname) \
4589 vmx_adjust_sec_exec_control(vmx, exec_control, lname, uname, ENABLE_##uname, false)
4591 #define vmx_adjust_sec_exec_exiting(vmx, exec_control, lname, uname) \
4592 vmx_adjust_sec_exec_control(vmx, exec_control, lname, uname, uname##_EXITING, true)
4594 static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx)
4596 struct kvm_vcpu *vcpu = &vmx->vcpu;
4604 if (vmx->vpid == 0)
4612 if (kvm_pause_in_guest(vmx->vcpu.kvm))
4644 vmx_adjust_sec_exec_feature(vmx, &exec_control, xsaves, XSAVES);
4659 vmx_adjust_secondary_exec_control(vmx, &exec_control,
4664 vmx_adjust_sec_exec_feature(vmx, &exec_control, invpcid, INVPCID);
4666 vmx_adjust_sec_exec_exiting(vmx, &exec_control, rdrand, RDRAND);
4667 vmx_adjust_sec_exec_exiting(vmx, &exec_control, rdseed, RDSEED);
4669 vmx_adjust_sec_exec_control(vmx, &exec_control, waitpkg, WAITPKG,
4713 static void init_vmcs(struct vcpu_vmx *vmx)
4715 struct kvm *kvm = vmx->vcpu.kvm;
4722 vmcs_write64(MSR_BITMAP, __pa(vmx->vmcs01.msr_bitmap));
4727 pin_controls_set(vmx, vmx_pin_based_exec_ctrl(vmx));
4729 exec_controls_set(vmx, vmx_exec_control(vmx));
4732 secondary_exec_controls_set(vmx, vmx_secondary_exec_control(vmx));
4735 tertiary_exec_controls_set(vmx, vmx_tertiary_exec_control(vmx));
4737 if (enable_apicv && lapic_in_kernel(&vmx->vcpu)) {
4746 vmcs_write64(POSTED_INTR_DESC_ADDR, __pa((&vmx->pi_desc)));
4749 if (vmx_can_use_ipiv(&vmx->vcpu)) {
4756 vmx->ple_window = ple_window;
4757 vmx->ple_window_dirty = true;
4769 vmx_set_constant_host_state(vmx);
4778 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val));
4780 vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val));
4783 vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
4785 vm_exit_controls_set(vmx, vmx_vmexit_ctrl());
4788 vm_entry_controls_set(vmx, vmx_vmentry_ctrl());
4790 vmx->vcpu.arch.cr0_guest_owned_bits = vmx_l1_guest_owned_cr0_bits();
4791 vmcs_writel(CR0_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr0_guest_owned_bits);
4793 set_cr4_guest_host_mask(vmx);
4795 if (vmx->vpid != 0)
4796 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
4802 vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
4806 vmx_write_encls_bitmap(&vmx->vcpu, NULL);
4809 memset(&vmx->pt_desc, 0, sizeof(vmx->pt_desc));
4811 vmx->pt_desc.guest.output_mask = 0x7F;
4822 if (cpu_need_tpr_shadow(&vmx->vcpu))
4824 __pa(vmx->vcpu.arch.apic->regs));
4828 vmx_setup_uret_msrs(vmx);
4833 struct vcpu_vmx *vmx = to_vmx(vcpu);
4835 init_vmcs(vmx);
4838 memcpy(&vmx->nested.msrs, &vmcs_config.nested, sizeof(vmx->nested.msrs));
4842 vmx->nested.posted_intr_nv = -1;
4843 vmx->nested.vmxon_ptr = INVALID_GPA;
4844 vmx->nested.current_vmptr = INVALID_GPA;
4845 vmx->nested.hv_evmcs_vmptr = EVMPTR_INVALID;
4848 vmx->msr_ia32_feature_control_valid_bits = FEAT_CTL_LOCKED;
4854 vmx->pi_desc.nv = POSTED_INTR_VECTOR;
4855 vmx->pi_desc.sn = 1;
4860 struct vcpu_vmx *vmx = to_vmx(vcpu);
4865 vmx->rmode.vm86_active = 0;
4866 vmx->spec_ctrl = 0;
4868 vmx->msr_ia32_umwait_control = 0;
4870 vmx->hv_deadline_tsc = -1;
4873 vmx_segment_cache_clear(vmx);
4912 vpid_sync_context(vmx->vpid);
4914 vmx_update_fb_clear_dis(vcpu, vmx);
4935 struct vcpu_vmx *vmx = to_vmx(vcpu);
4942 if (vmx->rmode.vm86_active) {
4953 vmx->vcpu.arch.event_exit_inst_len);
4963 struct vcpu_vmx *vmx = to_vmx(vcpu);
4974 vmx->loaded_vmcs->soft_vnmi_blocked = 1;
4975 vmx->loaded_vmcs->vnmi_blocked_time = 0;
4979 vmx->loaded_vmcs->nmi_known_unmasked = false;
4981 if (vmx->rmode.vm86_active) {
4994 struct vcpu_vmx *vmx = to_vmx(vcpu);
4998 return vmx->loaded_vmcs->soft_vnmi_blocked;
4999 if (vmx->loaded_vmcs->nmi_known_unmasked)
5002 vmx->loaded_vmcs->nmi_known_unmasked = !masked;
5008 struct vcpu_vmx *vmx = to_vmx(vcpu);
5011 if (vmx->loaded_vmcs->soft_vnmi_blocked != masked) {
5012 vmx->loaded_vmcs->soft_vnmi_blocked = masked;
5013 vmx->loaded_vmcs->vnmi_blocked_time = 0;
5016 vmx->loaded_vmcs->nmi_known_unmasked = !masked;
5186 struct vcpu_vmx *vmx = to_vmx(vcpu);
5192 vect_info = vmx->idt_vectoring_info;
5221 if (!vmx->rmode.vm86_active && is_gp_fault(intr_info)) {
5269 if (vmx->rmode.vm86_active && rmode_exception(vcpu, ex_no))
5319 vmx->vcpu.arch.event_exit_inst_len =
5691 struct vcpu_vmx *vmx = to_vmx(vcpu);
5698 idt_v = (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK);
5699 idt_index = (vmx->idt_vectoring_info & VECTORING_INFO_VECTOR_MASK);
5700 type = (vmx->idt_vectoring_info & VECTORING_INFO_TYPE_MASK);
5716 if (vmx->idt_vectoring_info &
5835 struct vcpu_vmx *vmx = to_vmx(vcpu);
5837 return vmx->emulation_required && !vmx->rmode.vm86_active &&
5843 struct vcpu_vmx *vmx = to_vmx(vcpu);
5847 intr_window_requested = exec_controls_get(vmx) &
5850 while (vmx->emulation_required && count-- != 0) {
5852 return handle_interrupt_window(&vmx->vcpu);
5894 struct vcpu_vmx *vmx = to_vmx(vcpu);
5895 unsigned int old = vmx->ple_window;
5897 vmx->ple_window = __grow_ple_window(old, ple_window,
5901 if (vmx->ple_window != old) {
5902 vmx->ple_window_dirty = true;
5904 vmx->ple_window, old);
5910 struct vcpu_vmx *vmx = to_vmx(vcpu);
5911 unsigned int old = vmx->ple_window;
5913 vmx->ple_window = __shrink_ple_window(old, ple_window,
5917 if (vmx->ple_window != old) {
5918 vmx->ple_window_dirty = true;
5920 vmx->ple_window, old);
6006 struct vcpu_vmx *vmx = to_vmx(vcpu);
6008 if (!vmx->req_immediate_exit &&
6009 !unlikely(vmx->loaded_vmcs->hv_timer_soft_disabled)) {
6150 struct vcpu_vmx *vmx = to_vmx(vcpu);
6152 *reason = vmx->exit_reason.full;
6154 if (!(vmx->exit_reason.failed_vmentry)) {
6155 *info2 = vmx->idt_vectoring_info;
6168 static void vmx_destroy_pml_buffer(struct vcpu_vmx *vmx)
6170 if (vmx->pml_pg) {
6171 __free_page(vmx->pml_pg);
6172 vmx->pml_pg = NULL;
6178 struct vcpu_vmx *vmx = to_vmx(vcpu);
6194 pml_buf = page_address(vmx->pml_pg);
6235 struct vcpu_vmx *vmx = to_vmx(vcpu);
6264 vmx->loaded_vmcs->vmcs, vcpu->arch.last_vmentry_cpu);
6295 efer_slot = vmx_find_loadstore_msr_slot(&vmx->msr_autoload.guest, MSR_EFER);
6300 vmx->msr_autoload.guest.val[efer_slot].value);
6325 vmx_dump_msrs("guest autoload", &vmx->msr_autoload.guest);
6327 vmx_dump_msrs("guest autostore", &vmx->msr_autostore.guest);
6358 vmx_dump_msrs("host autoload", &vmx->msr_autoload.host);
6414 struct vcpu_vmx *vmx = to_vmx(vcpu);
6415 union vmx_exit_reason exit_reason = vmx->exit_reason;
6416 u32 vectoring_info = vmx->idt_vectoring_info;
6436 if (KVM_BUG_ON(vmx->nested.nested_run_pending, vcpu->kvm))
6471 if (vmx->emulation_required) {
6481 if (vmx->emulation_required)
6493 if (unlikely(vmx->fail)) {
6533 vmx->loaded_vmcs->soft_vnmi_blocked)) {
6535 vmx->loaded_vmcs->soft_vnmi_blocked = 0;
6536 } else if (vmx->loaded_vmcs->vnmi_blocked_time > 1000000000LL &&
6547 vmx->loaded_vmcs->soft_vnmi_blocked = 0;
6579 vcpu_unimpl(vcpu, "vmx: unexpected exit reason 0x%x\n",
6697 struct vcpu_vmx *vmx = to_vmx(vcpu);
6709 vmx->nested.change_vmcs01_virtual_apic_mode = true;
6713 sec_exec_control = secondary_exec_controls_get(vmx);
6744 secondary_exec_controls_set(vmx, sec_exec_control);
6869 struct vcpu_vmx *vmx = to_vmx(vcpu);
6876 if (pi_test_on(&vmx->pi_desc)) {
6877 pi_clear_on(&vmx->pi_desc);
6884 kvm_apic_update_irr(vcpu, vmx->pi_desc.pir, &max_irr);
6926 struct vcpu_vmx *vmx = to_vmx(vcpu);
6928 pi_clear_on(&vmx->pi_desc);
6929 memset(vmx->pi_desc.pir, 0, sizeof(vmx->pi_desc.pir));
6955 static void handle_exception_irqoff(struct vcpu_vmx *vmx)
6957 u32 intr_info = vmx_get_intr_info(&vmx->vcpu);
6961 vmx->vcpu.arch.apf.host_apf_flags = kvm_read_and_reset_apf_flags();
6964 handle_nm_fault_irqoff(&vmx->vcpu);
6989 struct vcpu_vmx *vmx = to_vmx(vcpu);
6991 if (vmx->emulation_required)
6994 if (vmx->exit_reason.basic == EXIT_REASON_EXTERNAL_INTERRUPT)
6996 else if (vmx->exit_reason.basic == EXIT_REASON_EXCEPTION_NMI)
6997 handle_exception_irqoff(vmx);
7026 static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
7033 idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK;
7036 if (vmx->loaded_vmcs->nmi_known_unmasked)
7039 exit_intr_info = vmx_get_intr_info(&vmx->vcpu);
7057 vmx->loaded_vmcs->nmi_known_unmasked =
7060 } else if (unlikely(vmx->loaded_vmcs->soft_vnmi_blocked))
7061 vmx->loaded_vmcs->vnmi_blocked_time +=
7063 vmx->loaded_vmcs->entry_time));
7120 static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
7122 __vmx_complete_interrupts(&vmx->vcpu, vmx->idt_vectoring_info,
7137 static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
7141 struct kvm_pmu *pmu = vcpu_to_pmu(&vmx->vcpu);
7154 clear_atomic_switch_msr(vmx, msrs[i].msr);
7156 add_atomic_switch_msr(vmx, msrs[i].msr, msrs[i].guest,
7162 struct vcpu_vmx *vmx = to_vmx(vcpu);
7166 if (vmx->req_immediate_exit) {
7168 vmx->loaded_vmcs->hv_timer_soft_disabled = false;
7169 } else if (vmx->hv_deadline_tsc != -1) {
7171 if (vmx->hv_deadline_tsc > tscl)
7173 delta_tsc = (u32)((vmx->hv_deadline_tsc - tscl) >>
7179 vmx->loaded_vmcs->hv_timer_soft_disabled = false;
7180 } else if (!vmx->loaded_vmcs->hv_timer_soft_disabled) {
7182 vmx->loaded_vmcs->hv_timer_soft_disabled = true;
7186 void noinstr vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp)
7188 if (unlikely(host_rsp != vmx->loaded_vmcs->host_state.rsp)) {
7189 vmx->loaded_vmcs->host_state.rsp = host_rsp;
7194 void noinstr vmx_spec_ctrl_restore_host(struct vcpu_vmx *vmx,
7203 vmx->spec_ctrl = __rdmsr(MSR_IA32_SPEC_CTRL);
7213 vmx->spec_ctrl != hostval)
7234 struct vcpu_vmx *vmx = to_vmx(vcpu);
7250 vmx_disable_fb_clear(vmx);
7255 vmx->fail = __vmx_vcpu_run(vmx, (unsigned long *)&vcpu->arch.regs,
7261 vmx->idt_vectoring_info = 0;
7263 vmx_enable_fb_clear(vmx);
7265 if (unlikely(vmx->fail)) {
7266 vmx->exit_reason.full = 0xdead;
7270 vmx->exit_reason.full = vmcs_read32(VM_EXIT_REASON);
7271 if (likely(!vmx->exit_reason.failed_vmentry))
7272 vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
7274 if ((u16)vmx->exit_reason.basic == EXIT_REASON_EXCEPTION_NMI &&
7287 struct vcpu_vmx *vmx = to_vmx(vcpu);
7292 vmx->loaded_vmcs->soft_vnmi_blocked))
7293 vmx->loaded_vmcs->entry_time = ktime_get();
7300 if (unlikely(vmx->emulation_required)) {
7301 vmx->fail = 0;
7303 vmx->exit_reason.full = EXIT_REASON_INVALID_STATE;
7304 vmx->exit_reason.failed_vmentry = 1;
7306 vmx->exit_qualification = ENTRY_FAIL_DEFAULT;
7308 vmx->exit_intr_info = 0;
7314 if (vmx->ple_window_dirty) {
7315 vmx->ple_window_dirty = false;
7316 vmcs_write32(PLE_WINDOW, vmx->ple_window);
7323 WARN_ON_ONCE(vmx->nested.need_vmcs12_to_shadow_sync);
7339 if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) {
7341 vmx->loaded_vmcs->host_state.cr3 = cr3;
7345 if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) {
7347 vmx->loaded_vmcs->host_state.cr4 = cr4;
7364 pt_guest_enter(vmx);
7366 atomic_switch_perf_msrs(vmx);
7376 vmx_vcpu_enter_exit(vcpu, __vmx_vcpu_run_flags(vmx));
7387 if (vmx->host_debugctlmsr)
7388 update_debugctlmsr(vmx->host_debugctlmsr);
7403 pt_guest_exit(vmx);
7412 if (vmx->nested.nested_run_pending &&
7413 !vmx->exit_reason.failed_vmentry)
7416 vmx->nested.nested_run_pending = 0;
7419 if (unlikely(vmx->fail))
7422 if (unlikely((u16)vmx->exit_reason.basic == EXIT_REASON_MCE_DURING_VMENTRY))
7427 if (unlikely(vmx->exit_reason.failed_vmentry))
7430 vmx->loaded_vmcs->launched = 1;
7432 vmx_recover_nmi_blocking(vmx);
7433 vmx_complete_interrupts(vmx);
7443 struct vcpu_vmx *vmx = to_vmx(vcpu);
7446 vmx_destroy_pml_buffer(vmx);
7447 free_vpid(vmx->vpid);
7449 free_loaded_vmcs(vmx->loaded_vmcs);
7455 struct vcpu_vmx *vmx;
7459 vmx = to_vmx(vcpu);
7461 INIT_LIST_HEAD(&vmx->pi_wakeup_list);
7465 vmx->vpid = allocate_vpid();
7474 vmx->pml_pg = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
7475 if (!vmx->pml_pg)
7480 vmx->guest_uret_msrs[i].mask = -1ull;
7487 tsx_ctrl = vmx_find_uret_msr(vmx, MSR_IA32_TSX_CTRL);
7492 err = alloc_loaded_vmcs(&vmx->vmcs01);
7504 struct hv_enlightened_vmcs *evmcs = (void *)vmx->vmcs01.vmcs;
7510 bitmap_fill(vmx->shadow_msr_intercept.read, MAX_POSSIBLE_PASSTHROUGH_MSRS);
7511 bitmap_fill(vmx->shadow_msr_intercept.write, MAX_POSSIBLE_PASSTHROUGH_MSRS);
7529 vmx->loaded_vmcs = &vmx->vmcs01;
7545 __pa(&vmx->pi_desc) | PID_TABLE_ENTRY_VALID);
7550 free_loaded_vmcs(vmx->loaded_vmcs);
7552 vmx_destroy_pml_buffer(vmx);
7554 free_vpid(vmx->vpid);
7632 static void vmcs_set_secondary_exec_control(struct vcpu_vmx *vmx, u32 new_ctl)
7646 u32 cur_ctl = secondary_exec_controls_get(vmx);
7648 secondary_exec_controls_set(vmx, (new_ctl & ~mask) | (cur_ctl & mask));
7657 struct vcpu_vmx *vmx = to_vmx(vcpu);
7660 vmx->nested.msrs.cr0_fixed1 = 0xffffffff;
7661 vmx->nested.msrs.cr4_fixed1 = X86_CR4_PCE;
7665 vmx->nested.msrs.cr4_fixed1 |= (_cr4_mask); \
7697 struct vcpu_vmx *vmx = to_vmx(vcpu);
7705 vmx->pt_desc.caps[CPUID_EAX + i*PT_CPUID_REGS_NUM] = best->eax;
7706 vmx->pt_desc.caps[CPUID_EBX + i*PT_CPUID_REGS_NUM] = best->ebx;
7707 vmx->pt_desc.caps[CPUID_ECX + i*PT_CPUID_REGS_NUM] = best->ecx;
7708 vmx->pt_desc.caps[CPUID_EDX + i*PT_CPUID_REGS_NUM] = best->edx;
7712 vmx->pt_desc.num_address_ranges = intel_pt_validate_cap(vmx->pt_desc.caps,
7716 vmx->pt_desc.ctl_bitmask = ~(RTIT_CTL_TRACEEN | RTIT_CTL_OS |
7724 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_cr3_filtering))
7725 vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_CR3EN;
7731 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_cyc))
7732 vmx->pt_desc.ctl_bitmask &= ~(RTIT_CTL_CYCLEACC |
7738 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_mtc))
7739 vmx->pt_desc.ctl_bitmask &= ~(RTIT_CTL_MTC_EN |
7743 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_ptwrite))
7744 vmx->pt_desc.ctl_bitmask &= ~(RTIT_CTL_FUP_ON_PTW |
7748 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_power_event_trace))
7749 vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_PWR_EVT_EN;
7752 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_topa_output))
7753 vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_TOPA;
7756 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_output_subsys))
7757 vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_FABRIC_EN;
7760 for (i = 0; i < vmx->pt_desc.num_address_ranges; i++)
7761 vmx->pt_desc.ctl_bitmask &= ~(0xfULL << (32 + i * 4));
7766 struct vcpu_vmx *vmx = to_vmx(vcpu);
7779 vmx_setup_uret_msrs(vmx);
7782 vmcs_set_secondary_exec_control(vmx,
7783 vmx_secondary_exec_control(vmx));
7786 vmx->msr_ia32_feature_control_valid_bits |=
7790 vmx->msr_ia32_feature_control_valid_bits &=
7803 msr = vmx_find_uret_msr(vmx, MSR_IA32_TSX_CTRL);
7806 vmx_set_guest_uret_msr(vmx, msr, enabled ? 0 : TSX_CTRL_RTM_DISABLE);
7822 set_cr4_guest_host_mask(vmx);
7826 vmx->msr_ia32_feature_control_valid_bits |= FEAT_CTL_SGX_ENABLED;
7828 vmx->msr_ia32_feature_control_valid_bits &= ~FEAT_CTL_SGX_ENABLED;
7831 vmx->msr_ia32_feature_control_valid_bits |=
7834 vmx->msr_ia32_feature_control_valid_bits &=
8042 struct vcpu_vmx *vmx;
8046 vmx = to_vmx(vcpu);
8074 vmx->hv_deadline_tsc = tscl + delta_tsc;
8093 struct vcpu_vmx *vmx = to_vmx(vcpu);
8099 vmx->nested.update_vmcs01_cpu_dirty_logging = true;
8109 secondary_exec_controls_setbit(vmx, SECONDARY_EXEC_ENABLE_PML);
8111 secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_ENABLE_PML);
8135 struct vcpu_vmx *vmx = to_vmx(vcpu);
8144 vmx->nested.smm.guest_mode = is_guest_mode(vcpu);
8145 if (vmx->nested.smm.guest_mode)
8148 vmx->nested.smm.vmxon = vmx->nested.vmxon;
8149 vmx->nested.vmxon = false;
8156 struct vcpu_vmx *vmx = to_vmx(vcpu);
8159 if (vmx->nested.smm.vmxon) {
8160 vmx->nested.vmxon = true;
8161 vmx->nested.smm.vmxon = false;
8164 if (vmx->nested.smm.guest_mode) {
8169 vmx->nested.nested_run_pending = 1;
8170 vmx->nested.smm.guest_mode = false;