Lines Matching refs:vmcs01

298 	if (WARN_ON_ONCE(vmx->loaded_vmcs != &vmx->vmcs01))
299 vmx_switch_vmcs(vcpu, &vmx->vmcs01);
314 vmcs_clear(vmx->vmcs01.shadow_vmcs);
315 free_vmcs(vmx->vmcs01.shadow_vmcs);
316 vmx->vmcs01.shadow_vmcs = NULL;
341 * vmcs01 of the vcpu before calling free_nested().
509 * For x2APIC MSRs, ignore the vmcs01 bitmap. L1 can enable x2APIC without L1
542 if (vmx_test_msr_bitmap_##rw(vmx->vmcs01.msr_bitmap, msr) || \
641 * Always check vmcs01's bitmap to honor userspace MSR filters and any
642 * other runtime changes to vmcs01's bitmap, e.g. dynamic pass-through.
1520 struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
1555 struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
2239 static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct loaded_vmcs *vmcs01,
2251 exec_control = __pin_controls_get(vmcs01);
2266 exec_control = __exec_controls_get(vmcs01); /* L0's desires */
2303 exec_control = __secondary_exec_controls_get(vmcs01);
2355 * Similarly, take vmcs01's PERF_GLOBAL_CTRL in the hope that if KVM is
2359 exec_control = __vm_entry_controls_get(vmcs01);
2378 exec_control = __vm_exit_controls_get(vmcs01);
2486 * "or"ing of the EB of vmcs01 and vmcs12, because when enable_ept,
2487 * vmcs01's EB.PF is 0 so the "or" will take vmcs12's value, and when
2525 * with L0's requirements for its guest (a.k.a. vmcs01), so we can run the L2
3426 * Overwrite vmcs01.GUEST_CR3 with L1's CR3 if EPT is disabled *and*
3432 * vmcs01.GUEST_CR3 results in the unwind naturally setting arch.cr3 to
3433 * the correct value. Smashing vmcs01.GUEST_CR3 is safe because nested
3434 * VM-Exits, and the unwind, reset KVM's MMU, i.e. vmcs01.GUEST_CR3 is
3436 * L1. Don't stuff vmcs01.GUEST_CR3 when using nested early checks as
3439 * path would need to manually save/restore vmcs01.GUEST_CR3.
3446 prepare_vmcs02_early(vmx, &vmx->vmcs01, vmcs12);
3450 vmx_switch_vmcs(vcpu, &vmx->vmcs01);
3455 vmx_switch_vmcs(vcpu, &vmx->vmcs01);
3534 vmx_switch_vmcs(vcpu, &vmx->vmcs01);
4309 WARN_ON_ONCE(vmx->loaded_vmcs != &vmx->vmcs01);
4313 vmx_vcpu_load_vmcs(vcpu, cpu, &vmx->vmcs01);
4317 vmx->loaded_vmcs = &vmx->vmcs01;
4463 * This function should be called when the active VMCS is L1's (vmcs01).
4488 * CR0_GUEST_HOST_MASK is already set in the original vmcs01
4626 * as vmcs01.GUEST_DR7 contains a userspace defined value
4654 * from vmcs01 (if necessary). The PDPTRs are not loaded on
4807 vmx_switch_vmcs(vcpu, &vmx->vmcs01);
5079 * doesn't support virtualization of VMCS shadowing, so vmcs01 should
5082 if (WARN_ON(loaded_vmcs != &vmx->vmcs01 || loaded_vmcs->shadow_vmcs))
5542 vmcs_load(vmx->vmcs01.shadow_vmcs);
5546 vmcs_clear(vmx->vmcs01.shadow_vmcs);
5562 __pa(vmx->vmcs01.shadow_vmcs));
6484 * in the shadow or enlightened vmcs linked to vmcs01, unless
7016 * in vmcs01 and prepare_vmcs02, when it bitwise-or's the control
7017 * fields of vmcs01 and vmcs02, will turn these bits off - and