Lines Matching refs:vmx
17 #include "vmx.h"
190 struct vcpu_vmx *vmx = to_vmx(vcpu);
196 if (vmx->nested.current_vmptr == INVALID_GPA &&
197 !evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
207 pr_debug_ratelimited("nested vmx abort, indicator %d\n", indicator);
220 static void vmx_disable_shadow_vmcs(struct vcpu_vmx *vmx)
222 secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_SHADOW_VMCS);
224 vmx->nested.need_vmcs12_to_shadow_sync = false;
230 struct vcpu_vmx *vmx = to_vmx(vcpu);
232 if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) {
233 kvm_vcpu_unmap(vcpu, &vmx->nested.hv_evmcs_map, true);
234 vmx->nested.hv_evmcs = NULL;
237 vmx->nested.hv_evmcs_vmptr = EVMPTR_INVALID;
246 static void vmx_sync_vmcs_host_state(struct vcpu_vmx *vmx,
251 if (unlikely(!vmx->guest_state_loaded))
255 dest = &vmx->loaded_vmcs->host_state;
267 struct vcpu_vmx *vmx = to_vmx(vcpu);
271 if (WARN_ON_ONCE(vmx->loaded_vmcs == vmcs))
275 prev = vmx->loaded_vmcs;
276 vmx->loaded_vmcs = vmcs;
278 vmx_sync_vmcs_host_state(vmx, prev);
291 * Free whatever needs to be freed from vmx->nested when L1 goes down, or
296 struct vcpu_vmx *vmx = to_vmx(vcpu);
298 if (WARN_ON_ONCE(vmx->loaded_vmcs != &vmx->vmcs01))
299 vmx_switch_vmcs(vcpu, &vmx->vmcs01);
301 if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon)
306 vmx->nested.vmxon = false;
307 vmx->nested.smm.vmxon = false;
308 vmx->nested.vmxon_ptr = INVALID_GPA;
309 free_vpid(vmx->nested.vpid02);
310 vmx->nested.posted_intr_nv = -1;
311 vmx->nested.current_vmptr = INVALID_GPA;
313 vmx_disable_shadow_vmcs(vmx);
314 vmcs_clear(vmx->vmcs01.shadow_vmcs);
315 free_vmcs(vmx->vmcs01.shadow_vmcs);
316 vmx->vmcs01.shadow_vmcs = NULL;
318 kfree(vmx->nested.cached_vmcs12);
319 vmx->nested.cached_vmcs12 = NULL;
320 kfree(vmx->nested.cached_shadow_vmcs12);
321 vmx->nested.cached_shadow_vmcs12 = NULL;
327 kvm_vcpu_unmap(vcpu, &vmx->nested.apic_access_page_map, false);
328 kvm_vcpu_unmap(vcpu, &vmx->nested.virtual_apic_map, true);
329 kvm_vcpu_unmap(vcpu, &vmx->nested.pi_desc_map, true);
330 vmx->nested.pi_desc = NULL;
336 free_loaded_vmcs(&vmx->nested.vmcs02);
382 struct vcpu_vmx *vmx = to_vmx(vcpu);
386 if (vmx->nested.pml_full) {
388 vmx->nested.pml_full = false;
413 struct vcpu_vmx *vmx = to_vmx(vcpu);
414 bool execonly = vmx->nested.msrs.ept_caps & VMX_EPT_EXECUTE_ONLY_BIT;
415 int ept_lpage_level = ept_caps_to_lpage_level(vmx->nested.msrs.ept_caps);
538 void nested_vmx_set_msr_##rw##_intercept(struct vcpu_vmx *vmx, \
542 if (vmx_test_msr_bitmap_##rw(vmx->vmcs01.msr_bitmap, msr) || \
551 static inline void nested_vmx_set_intercept_for_msr(struct vcpu_vmx *vmx,
557 nested_vmx_set_msr_read_intercept(vmx, msr_bitmap_l1,
560 nested_vmx_set_msr_write_intercept(vmx, msr_bitmap_l1,
571 struct vcpu_vmx *vmx = to_vmx(vcpu);
574 unsigned long *msr_bitmap_l0 = vmx->nested.vmcs02.msr_bitmap;
575 struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs;
576 struct kvm_host_map *map = &vmx->nested.msr_bitmap_map;
591 if (!vmx->nested.force_msr_bitmap_recalc && evmcs &&
645 nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0,
648 nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0,
651 nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0,
654 nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0,
657 nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0,
660 nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0,
663 kvm_vcpu_unmap(vcpu, &vmx->nested.msr_bitmap_map, false);
665 vmx->nested.force_msr_bitmap_recalc = false;
673 struct vcpu_vmx *vmx = to_vmx(vcpu);
674 struct gfn_to_hva_cache *ghc = &vmx->nested.shadow_vmcs12_cache;
685 kvm_read_guest_cached(vmx->vcpu.kvm, ghc, get_shadow_vmcs12(vcpu),
692 struct vcpu_vmx *vmx = to_vmx(vcpu);
693 struct gfn_to_hva_cache *ghc = &vmx->nested.shadow_vmcs12_cache;
704 kvm_write_guest_cached(vmx->vcpu.kvm, ghc, get_shadow_vmcs12(vcpu),
891 struct vcpu_vmx *vmx = to_vmx(vcpu);
892 u64 vmx_misc = vmx_control_msr(vmx->nested.msrs.misc_low,
893 vmx->nested.msrs.misc_high);
947 struct vcpu_vmx *vmx = to_vmx(vcpu);
955 int i = vmx_find_loadstore_msr_slot(&vmx->msr_autostore.guest,
959 u64 val = vmx->msr_autostore.guest.val[i].value;
1045 struct vcpu_vmx *vmx = to_vmx(vcpu);
1046 struct vmx_msrs *autostore = &vmx->msr_autostore.guest;
1125 * with different VPID (L1 entries are tagged with vmx->vpid
1126 * while L2 entries are tagged with vmx->nested.vpid02).
1140 struct vcpu_vmx *vmx = to_vmx(vcpu);
1180 if (is_vmenter && vmcs12->virtual_processor_id != vmx->nested.last_vpid) {
1181 vmx->nested.last_vpid = vmcs12->virtual_processor_id;
1204 static int vmx_restore_vmx_basic(struct vcpu_vmx *vmx, u64 data)
1230 vmx->nested.msrs.basic = data;
1264 vmx_restore_control_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
1281 vmx_get_control_msr(&vmx->nested.msrs, msr_index, &lowp, &highp);
1287 static int vmx_restore_vmx_misc(struct vcpu_vmx *vmx, u64 data)
1301 if ((vmx->nested.msrs.pinbased_ctls_high &
1316 vmx->nested.msrs.misc_low = data;
1317 vmx->nested.msrs.misc_high = data >> 32;
1322 static int vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx *vmx, u64 data)
1331 vmx->nested.msrs.ept_caps = data;
1332 vmx->nested.msrs.vpid_caps = data >> 32;
1348 static int vmx_restore_fixed0_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
1359 *vmx_get_fixed0_msr(&vmx->nested.msrs, msr_index) = data;
1370 struct vcpu_vmx *vmx = to_vmx(vcpu);
1376 if (vmx->nested.vmxon)
1381 return vmx_restore_vmx_basic(vmx, data);
1401 return vmx_restore_control_msr(vmx, msr_index, data);
1403 return vmx_restore_vmx_misc(vmx, data);
1406 return vmx_restore_fixed0_msr(vmx, msr_index, data);
1415 return vmx_restore_vmx_ept_vpid_cap(vmx, data);
1417 vmx->nested.msrs.vmcs_enum = data;
1422 vmx->nested.msrs.vmfunc_controls = data;
1518 static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
1520 struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
1521 struct vmcs12 *vmcs12 = get_vmcs12(&vmx->vcpu);
1540 vmcs_load(vmx->loaded_vmcs->vmcs);
1545 static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
1555 struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
1556 struct vmcs12 *vmcs12 = get_vmcs12(&vmx->vcpu);
1576 vmcs_load(vmx->loaded_vmcs->vmcs);
1579 static void copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx, u32 hv_clean_fields)
1581 struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12;
1582 struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs;
1583 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(&vmx->vcpu);
1823 static void copy_vmcs12_to_enlightened(struct vcpu_vmx *vmx)
1825 struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12;
1826 struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs;
2004 struct vcpu_vmx *vmx = to_vmx(vcpu);
2017 if (unlikely(evmcs_gpa != vmx->nested.hv_evmcs_vmptr)) {
2018 vmx->nested.current_vmptr = INVALID_GPA;
2023 &vmx->nested.hv_evmcs_map))
2026 vmx->nested.hv_evmcs = vmx->nested.hv_evmcs_map.hva;
2050 if ((vmx->nested.hv_evmcs->revision_id != KVM_EVMCS_VERSION) &&
2051 (vmx->nested.hv_evmcs->revision_id != VMCS12_REVISION)) {
2056 vmx->nested.hv_evmcs_vmptr = evmcs_gpa;
2078 vmx->nested.hv_evmcs->hv_clean_fields &=
2081 vmx->nested.force_msr_bitmap_recalc = true;
2089 struct vcpu_vmx *vmx = to_vmx(vcpu);
2091 if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
2092 copy_vmcs12_to_enlightened(vmx);
2094 copy_vmcs12_to_shadow(vmx);
2096 vmx->nested.need_vmcs12_to_shadow_sync = false;
2101 struct vcpu_vmx *vmx =
2104 vmx->nested.preemption_timer_expired = true;
2105 kvm_make_request(KVM_REQ_EVENT, &vmx->vcpu);
2106 kvm_vcpu_kick(&vmx->vcpu);
2113 struct vcpu_vmx *vmx = to_vmx(vcpu);
2119 if (!vmx->nested.has_preemption_timer_deadline) {
2120 vmx->nested.preemption_timer_deadline =
2122 vmx->nested.has_preemption_timer_deadline = true;
2124 return vmx->nested.preemption_timer_deadline - l1_scaled_tsc;
2130 struct vcpu_vmx *vmx = to_vmx(vcpu);
2137 vmx_preemption_timer_fn(&vmx->nested.preemption_timer);
2147 hrtimer_start(&vmx->nested.preemption_timer,
2152 static u64 nested_vmx_calc_efer(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
2154 if (vmx->nested.nested_run_pending &&
2158 return vmx->vcpu.arch.efer | (EFER_LMA | EFER_LME);
2160 return vmx->vcpu.arch.efer & ~(EFER_LMA | EFER_LME);
2163 static void prepare_vmcs02_constant_state(struct vcpu_vmx *vmx)
2165 struct kvm *kvm = vmx->vcpu.kvm;
2173 if (vmx->nested.vmcs02_initialized)
2175 vmx->nested.vmcs02_initialized = true;
2184 construct_eptp(&vmx->vcpu, 0, PT64_ROOT_4LEVEL));
2194 vmcs_write64(MSR_BITMAP, __pa(vmx->nested.vmcs02.msr_bitmap));
2217 vmcs_write64(VM_EXIT_MSR_STORE_ADDR, __pa(vmx->msr_autostore.guest.val));
2218 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val));
2219 vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val));
2221 vmx_set_constant_host_state(vmx);
2224 static void prepare_vmcs02_early_rare(struct vcpu_vmx *vmx,
2227 prepare_vmcs02_constant_state(vmx);
2232 if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02)
2233 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->nested.vpid02);
2235 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
2239 static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct loaded_vmcs *vmcs01,
2243 u64 guest_efer = nested_vmx_calc_efer(vmx, vmcs12);
2245 if (vmx->nested.dirty_vmcs12 || evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
2246 prepare_vmcs02_early_rare(vmx, vmcs12);
2256 vmx->nested.pi_pending = false;
2258 vmx->nested.posted_intr_nv = vmcs12->posted_intr_nv;
2261 pin_controls_set(vmx, exec_control);
2272 vmx->nested.l1_tpr_threshold = -1;
2295 exec_control |= exec_controls_get(vmx) & CPU_BASED_USE_MSR_BITMAPS;
2297 exec_controls_set(vmx, exec_control);
2342 vmx_write_encls_bitmap(&vmx->vcpu, vmcs12);
2344 secondary_exec_controls_set(vmx, exec_control);
2369 vm_entry_controls_set(vmx, exec_control);
2383 vm_exit_controls_set(vmx, exec_control);
2388 if (vmx->nested.nested_run_pending) {
2397 vmx->loaded_vmcs->nmi_known_unmasked =
2404 static void prepare_vmcs02_rare(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
2406 struct hv_enlightened_vmcs *hv_evmcs = vmx->nested.hv_evmcs;
2447 vmx->segment_cache.bitmask = 0;
2469 if (kvm_mpx_supported() && vmx->nested.nested_run_pending &&
2490 if (vmx_need_pf_intercept(&vmx->vcpu)) {
2513 prepare_vmx_msr_autostore_list(&vmx->vcpu, MSR_IA32_TSC);
2515 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, vmx->msr_autostore.guest.nr);
2516 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
2517 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
2519 set_cr4_guest_host_mask(vmx);
2537 struct vcpu_vmx *vmx = to_vmx(vcpu);
2540 if (vmx->nested.dirty_vmcs12 || evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) {
2541 prepare_vmcs02_rare(vmx, vmcs12);
2542 vmx->nested.dirty_vmcs12 = false;
2544 load_guest_pdptrs_vmcs12 = !evmptr_is_valid(vmx->nested.hv_evmcs_vmptr) ||
2545 !(vmx->nested.hv_evmcs->hv_clean_fields &
2549 if (vmx->nested.nested_run_pending &&
2555 vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.pre_vmenter_debugctl);
2557 if (kvm_mpx_supported() && (!vmx->nested.nested_run_pending ||
2559 vmcs_write64(GUEST_BNDCFGS, vmx->nested.pre_vmenter_bndcfgs);
2570 if (vmx->nested.nested_run_pending &&
2575 vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
2607 vcpu->arch.efer = nested_vmx_calc_efer(vmx, vmcs12);
2667 if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
2668 vmx->nested.hv_evmcs->hv_clean_fields |=
2689 struct vcpu_vmx *vmx = to_vmx(vcpu);
2694 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPTP_UC_BIT)))
2698 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPTP_WB_BIT)))
2708 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_PAGE_WALK_5_BIT)))
2712 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_PAGE_WALK_4_BIT)))
2725 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_AD_BIT)))
2738 struct vcpu_vmx *vmx = to_vmx(vcpu);
2741 vmx->nested.msrs.pinbased_ctls_low,
2742 vmx->nested.msrs.pinbased_ctls_high)) ||
2744 vmx->nested.msrs.procbased_ctls_low,
2745 vmx->nested.msrs.procbased_ctls_high)))
2750 vmx->nested.msrs.secondary_ctls_low,
2751 vmx->nested.msrs.secondary_ctls_high)))
2778 ~vmx->nested.msrs.vmfunc_controls))
2797 struct vcpu_vmx *vmx = to_vmx(vcpu);
2800 vmx->nested.msrs.exit_ctls_low,
2801 vmx->nested.msrs.exit_ctls_high)) ||
2814 struct vcpu_vmx *vmx = to_vmx(vcpu);
2817 vmx->nested.msrs.entry_ctls_low,
2818 vmx->nested.msrs.entry_ctls_high)))
2980 struct vcpu_vmx *vmx = to_vmx(vcpu);
2981 struct gfn_to_hva_cache *ghc = &vmx->nested.shadow_vmcs12_cache;
3088 struct vcpu_vmx *vmx = to_vmx(vcpu);
3095 if (vmx->msr_autoload.host.nr)
3097 if (vmx->msr_autoload.guest.nr)
3113 if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) {
3115 vmx->loaded_vmcs->host_state.cr3 = cr3;
3119 if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) {
3121 vmx->loaded_vmcs->host_state.cr4 = cr4;
3124 vm_fail = __vmx_vcpu_run(vmx, (unsigned long *)&vcpu->arch.regs,
3125 __vmx_vcpu_run_flags(vmx));
3127 if (vmx->msr_autoload.host.nr)
3128 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
3129 if (vmx->msr_autoload.guest.nr)
3130 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
3166 struct vcpu_vmx *vmx = to_vmx(vcpu);
3174 vmx->nested.hv_evmcs_vmptr == EVMPTR_MAP_PENDING) {
3186 vmx->nested.need_vmcs12_to_shadow_sync = true;
3195 struct vcpu_vmx *vmx = to_vmx(vcpu);
3211 map = &vmx->nested.apic_access_page_map;
3227 map = &vmx->nested.virtual_apic_map;
3242 exec_controls_clearbit(vmx, CPU_BASED_TPR_SHADOW);
3253 map = &vmx->nested.pi_desc_map;
3256 vmx->nested.pi_desc =
3268 vmx->nested.pi_desc = NULL;
3269 pin_controls_clearbit(vmx, PIN_BASED_POSTED_INTR);
3273 exec_controls_setbit(vmx, CPU_BASED_USE_MSR_BITMAPS);
3275 exec_controls_clearbit(vmx, CPU_BASED_USE_MSR_BITMAPS);
3308 struct vcpu_vmx *vmx = to_vmx(vcpu);
3314 if (WARN_ON_ONCE(vmx->nested.pml_full))
3326 vmx->nested.pml_full = true;
3388 struct vcpu_vmx *vmx = to_vmx(vcpu);
3399 vmx->nested.current_vmptr,
3410 evaluate_pending_interrupts = exec_controls_get(vmx) &
3417 if (!vmx->nested.nested_run_pending ||
3419 vmx->nested.pre_vmenter_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
3421 (!vmx->nested.nested_run_pending ||
3423 vmx->nested.pre_vmenter_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
3444 vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02);
3446 prepare_vmcs02_early(vmx, &vmx->vmcs01, vmcs12);
3450 vmx_switch_vmcs(vcpu, &vmx->vmcs01);
3455 vmx_switch_vmcs(vcpu, &vmx->vmcs01);
3509 vmx->nested.preemption_timer_expired = false;
3534 vmx_switch_vmcs(vcpu, &vmx->vmcs01);
3541 if (enable_shadow_vmcs || evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
3542 vmx->nested.need_vmcs12_to_shadow_sync = true;
3554 struct vcpu_vmx *vmx = to_vmx(vcpu);
3572 if (CC(!evmptr_is_valid(vmx->nested.hv_evmcs_vmptr) &&
3573 vmx->nested.current_vmptr == INVALID_GPA))
3587 if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) {
3588 copy_enlightened_to_vmcs12(vmx, vmx->nested.hv_evmcs->hv_clean_fields);
3592 copy_shadow_to_vmcs12(vmx);
3626 vmx->nested.nested_run_pending = 1;
3627 vmx->nested.has_preemption_timer_deadline = false;
3634 kvm_apic_has_interrupt(vcpu) == vmx->nested.posted_intr_nv) {
3635 vmx->nested.pi_pending = true;
3637 kvm_apic_clear_irr(vcpu, vmx->nested.posted_intr_nv);
3641 vmx->vcpu.arch.l1tf_flush_l1d = true;
3666 vmx->nested.nested_run_pending = 0;
3671 vmx->nested.nested_run_pending = 0;
3681 vmx->nested.nested_run_pending = 0;
3817 struct vcpu_vmx *vmx = to_vmx(vcpu);
3822 if (!vmx->nested.pi_pending)
3825 if (!vmx->nested.pi_desc)
3828 vmx->nested.pi_pending = false;
3830 if (!pi_test_and_clear_on(vmx->nested.pi_desc))
3833 max_irr = find_last_bit((unsigned long *)vmx->nested.pi_desc->pir, 256);
3835 vapic_page = vmx->nested.virtual_apic_map.hva;
3839 __kvm_apic_update_irr(vmx->nested.pi_desc->pir,
4057 struct vcpu_vmx *vmx = to_vmx(vcpu);
4063 bool block_nested_exceptions = vmx->nested.nested_run_pending;
4083 vmx->nested.mtf_pending = false;
4127 if (vmx->nested.mtf_pending) {
4257 struct vcpu_vmx *vmx = to_vmx(vcpu);
4296 vmx->nested.need_sync_vmcs02_to_vmcs12_rare = false;
4302 struct vcpu_vmx *vmx = to_vmx(vcpu);
4305 if (!vmx->nested.need_sync_vmcs02_to_vmcs12_rare)
4309 WARN_ON_ONCE(vmx->loaded_vmcs != &vmx->vmcs01);
4312 vmx->loaded_vmcs = &vmx->nested.vmcs02;
4313 vmx_vcpu_load_vmcs(vcpu, cpu, &vmx->vmcs01);
4317 vmx->loaded_vmcs = &vmx->vmcs01;
4318 vmx_vcpu_load_vmcs(vcpu, cpu, &vmx->nested.vmcs02);
4330 struct vcpu_vmx *vmx = to_vmx(vcpu);
4332 if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
4335 vmx->nested.need_sync_vmcs02_to_vmcs12_rare =
4336 !evmptr_is_valid(vmx->nested.hv_evmcs_vmptr);
4360 !vmx->nested.nested_run_pending)
4590 static inline u64 nested_vmx_get_vmcs01_guest_efer(struct vcpu_vmx *vmx)
4595 if (vm_entry_controls_get(vmx) & VM_ENTRY_LOAD_IA32_EFER)
4601 for (i = 0; i < vmx->msr_autoload.guest.nr; ++i) {
4602 if (vmx->msr_autoload.guest.val[i].index == MSR_EFER)
4603 return vmx->msr_autoload.guest.val[i].value;
4606 efer_msr = vmx_find_uret_msr(vmx, MSR_EFER);
4616 struct vcpu_vmx *vmx = to_vmx(vcpu);
4640 vmx_set_efer(vcpu, nested_vmx_get_vmcs01_guest_efer(vmx));
4726 struct vcpu_vmx *vmx = to_vmx(vcpu);
4730 vmx->nested.mtf_pending = false;
4733 WARN_ON_ONCE(vmx->nested.nested_run_pending);
4749 * VCPU_EXREG_PDPTR will be clobbered in arch/x86/kvm/vmx/vmx.h between
4767 if (likely(!vmx->fail)) {
4807 vmx_switch_vmcs(vcpu, &vmx->vmcs01);
4821 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
4822 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
4827 if (vmx->nested.l1_tpr_threshold != -1)
4828 vmcs_write32(TPR_THRESHOLD, vmx->nested.l1_tpr_threshold);
4830 if (vmx->nested.change_vmcs01_virtual_apic_mode) {
4831 vmx->nested.change_vmcs01_virtual_apic_mode = false;
4835 if (vmx->nested.update_vmcs01_cpu_dirty_logging) {
4836 vmx->nested.update_vmcs01_cpu_dirty_logging = false;
4841 kvm_vcpu_unmap(vcpu, &vmx->nested.apic_access_page_map, false);
4842 kvm_vcpu_unmap(vcpu, &vmx->nested.virtual_apic_map, true);
4843 kvm_vcpu_unmap(vcpu, &vmx->nested.pi_desc_map, true);
4844 vmx->nested.pi_desc = NULL;
4846 if (vmx->nested.reload_vmcs01_apic_access_page) {
4847 vmx->nested.reload_vmcs01_apic_access_page = false;
4851 if (vmx->nested.update_vmcs01_apicv_status) {
4852 vmx->nested.update_vmcs01_apicv_status = false;
4857 (enable_shadow_vmcs || evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)))
4858 vmx->nested.need_vmcs12_to_shadow_sync = true;
4863 if (likely(!vmx->fail)) {
4902 vmx->fail = 0;
4912 * Decode the memory-address operand of a vmx instruction, as recorded on an
5071 struct vcpu_vmx *vmx = to_vmx(vcpu);
5072 struct loaded_vmcs *loaded_vmcs = vmx->loaded_vmcs;
5082 if (WARN_ON(loaded_vmcs != &vmx->vmcs01 || loaded_vmcs->shadow_vmcs))
5094 struct vcpu_vmx *vmx = to_vmx(vcpu);
5097 r = alloc_loaded_vmcs(&vmx->nested.vmcs02);
5101 vmx->nested.cached_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT);
5102 if (!vmx->nested.cached_vmcs12)
5105 vmx->nested.shadow_vmcs12_cache.gpa = INVALID_GPA;
5106 vmx->nested.cached_shadow_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT);
5107 if (!vmx->nested.cached_shadow_vmcs12)
5113 hrtimer_init(&vmx->nested.preemption_timer, CLOCK_MONOTONIC,
5115 vmx->nested.preemption_timer.function = vmx_preemption_timer_fn;
5117 vmx->nested.vpid02 = allocate_vpid();
5119 vmx->nested.vmcs02_initialized = false;
5120 vmx->nested.vmxon = true;
5123 vmx->pt_desc.guest.ctl = 0;
5130 kfree(vmx->nested.cached_shadow_vmcs12);
5133 kfree(vmx->nested.cached_vmcs12);
5136 free_loaded_vmcs(&vmx->nested.vmcs02);
5148 struct vcpu_vmx *vmx = to_vmx(vcpu);
5188 if (vmx->nested.vmxon)
5202 if ((vmx->msr_ia32_feature_control & VMXON_NEEDED_FEATURES)
5226 vmx->nested.vmxon_ptr = vmptr;
5236 struct vcpu_vmx *vmx = to_vmx(vcpu);
5238 if (vmx->nested.current_vmptr == INVALID_GPA)
5246 copy_shadow_to_vmcs12(vmx);
5247 vmx_disable_shadow_vmcs(vmx);
5249 vmx->nested.posted_intr_nv = -1;
5253 vmx->nested.current_vmptr >> PAGE_SHIFT,
5254 vmx->nested.cached_vmcs12, 0, VMCS12_SIZE);
5258 vmx->nested.current_vmptr = INVALID_GPA;
5278 struct vcpu_vmx *vmx = to_vmx(vcpu);
5292 if (vmptr == vmx->nested.vmxon_ptr)
5303 * vmx->nested.hv_evmcs but this shouldn't be a problem.
5307 if (vmptr == vmx->nested.current_vmptr)
5323 } else if (vmx->nested.hv_evmcs && vmptr == vmx->nested.hv_evmcs_vmptr) {
5349 struct vcpu_vmx *vmx = to_vmx(vcpu);
5363 if (!evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) {
5368 if (vmx->nested.current_vmptr == INVALID_GPA ||
5401 value = evmcs_read_any(vmx->nested.hv_evmcs, field, offset);
5455 struct vcpu_vmx *vmx = to_vmx(vcpu);
5478 if (vmx->nested.current_vmptr == INVALID_GPA ||
5542 vmcs_load(vmx->vmcs01.shadow_vmcs);
5546 vmcs_clear(vmx->vmcs01.shadow_vmcs);
5547 vmcs_load(vmx->loaded_vmcs->vmcs);
5550 vmx->nested.dirty_vmcs12 = true;
5556 static void set_current_vmptr(struct vcpu_vmx *vmx, gpa_t vmptr)
5558 vmx->nested.current_vmptr = vmptr;
5560 secondary_exec_controls_setbit(vmx, SECONDARY_EXEC_SHADOW_VMCS);
5562 __pa(vmx->vmcs01.shadow_vmcs));
5563 vmx->nested.need_vmcs12_to_shadow_sync = true;
5565 vmx->nested.dirty_vmcs12 = true;
5566 vmx->nested.force_msr_bitmap_recalc = true;
5572 struct vcpu_vmx *vmx = to_vmx(vcpu);
5585 if (vmptr == vmx->nested.vmxon_ptr)
5589 if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
5592 if (vmx->nested.current_vmptr != vmptr) {
5593 struct gfn_to_hva_cache *ghc = &vmx->nested.vmcs12_cache;
5627 if (kvm_read_guest_cached(vcpu->kvm, ghc, vmx->nested.cached_vmcs12,
5633 set_current_vmptr(vmx, vmptr);
5670 struct vcpu_vmx *vmx = to_vmx(vcpu);
5681 if (!(vmx->nested.msrs.secondary_ctls_high &
5683 !(vmx->nested.msrs.ept_caps & VMX_EPT_INVEPT_BIT)) {
5695 types = (vmx->nested.msrs.ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6;
5750 struct vcpu_vmx *vmx = to_vmx(vcpu);
5762 if (!(vmx->nested.msrs.secondary_ctls_high &
5764 !(vmx->nested.msrs.vpid_caps & VMX_VPID_INVVPID_BIT)) {
5776 types = (vmx->nested.msrs.vpid_caps &
5783 /* according to the intel vmx instruction reference, the memory
5872 struct vcpu_vmx *vmx = to_vmx(vcpu);
5915 nested_vmx_vmexit(vcpu, vmx->exit_reason.full,
6355 struct vcpu_vmx *vmx = to_vmx(vcpu);
6356 union vmx_exit_reason exit_reason = vmx->exit_reason;
6360 WARN_ON_ONCE(vmx->nested.nested_run_pending);
6366 if (unlikely(vmx->fail)) {
6409 struct vcpu_vmx *vmx;
6415 .hdr.vmx.flags = 0,
6416 .hdr.vmx.vmxon_pa = INVALID_GPA,
6417 .hdr.vmx.vmcs12_pa = INVALID_GPA,
6418 .hdr.vmx.preemption_timer_deadline = 0,
6421 &user_kvm_nested_state->data.vmx[0];
6426 vmx = to_vmx(vcpu);
6430 (vmx->nested.vmxon || vmx->nested.smm.vmxon)) {
6431 kvm_state.hdr.vmx.vmxon_pa = vmx->nested.vmxon_ptr;
6432 kvm_state.hdr.vmx.vmcs12_pa = vmx->nested.current_vmptr;
6438 if (vmx->nested.hv_evmcs_vmptr != EVMPTR_INVALID)
6447 if (vmx->nested.smm.vmxon)
6448 kvm_state.hdr.vmx.smm.flags |= KVM_STATE_NESTED_SMM_VMXON;
6450 if (vmx->nested.smm.guest_mode)
6451 kvm_state.hdr.vmx.smm.flags |= KVM_STATE_NESTED_SMM_GUEST_MODE;
6456 if (vmx->nested.nested_run_pending)
6459 if (vmx->nested.mtf_pending)
6463 vmx->nested.has_preemption_timer_deadline) {
6464 kvm_state.hdr.vmx.flags |=
6466 kvm_state.hdr.vmx.preemption_timer_deadline =
6467 vmx->nested.preemption_timer_deadline;
6493 if (!vmx->nested.need_vmcs12_to_shadow_sync) {
6494 if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
6502 copy_enlightened_to_vmcs12(vmx, 0);
6504 copy_shadow_to_vmcs12(vmx);
6541 struct vcpu_vmx *vmx = to_vmx(vcpu);
6545 &user_kvm_nested_state->data.vmx[0];
6551 if (kvm_state->hdr.vmx.vmxon_pa == INVALID_GPA) {
6552 if (kvm_state->hdr.vmx.smm.flags)
6555 if (kvm_state->hdr.vmx.vmcs12_pa != INVALID_GPA)
6573 if (!page_address_valid(vcpu, kvm_state->hdr.vmx.vmxon_pa))
6577 if ((kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
6581 if (kvm_state->hdr.vmx.smm.flags &
6585 if (kvm_state->hdr.vmx.flags & ~KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE)
6596 : kvm_state->hdr.vmx.smm.flags)
6599 if ((kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
6600 !(kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON))
6605 !vmx->nested.enlightened_vmcs_enabled))
6610 if (kvm_state->hdr.vmx.vmxon_pa == INVALID_GPA)
6613 vmx->nested.vmxon_ptr = kvm_state->hdr.vmx.vmxon_pa;
6623 (kvm_state->hdr.vmx.vmcs12_pa != INVALID_GPA))
6629 if (kvm_state->hdr.vmx.vmcs12_pa != INVALID_GPA) {
6630 if (kvm_state->hdr.vmx.vmcs12_pa == kvm_state->hdr.vmx.vmxon_pa ||
6631 !page_address_valid(vcpu, kvm_state->hdr.vmx.vmcs12_pa))
6634 set_current_vmptr(vmx, kvm_state->hdr.vmx.vmcs12_pa);
6642 vmx->nested.hv_evmcs_vmptr = EVMPTR_MAP_PENDING;
6648 if (kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON) {
6649 vmx->nested.smm.vmxon = true;
6650 vmx->nested.vmxon = false;
6652 if (kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE)
6653 vmx->nested.smm.guest_mode = true;
6666 vmx->nested.nested_run_pending =
6669 vmx->nested.mtf_pending =
6694 vmx->nested.has_preemption_timer_deadline = false;
6695 if (kvm_state->hdr.vmx.flags & KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE) {
6696 vmx->nested.has_preemption_timer_deadline = true;
6697 vmx->nested.preemption_timer_deadline =
6698 kvm_state->hdr.vmx.preemption_timer_deadline;
6706 vmx->nested.dirty_vmcs12 = true;
6707 vmx->nested.force_msr_bitmap_recalc = true;
6712 if (vmx->nested.mtf_pending)
6718 vmx->nested.nested_run_pending = 0;