Lines Matching defs:vmcb01

89 	 * The NPT format depends on L1's CR4 and EFER, which is in vmcb01.  Note,
93 kvm_init_shadow_npt_mmu(vcpu, X86_CR0_PG, svm->vmcb01.ptr->save.cr4,
94 svm->vmcb01.ptr->save.efer,
134 h = &svm->vmcb01.ptr->control;
152 if (!(svm->vmcb01.ptr->save.rflags & X86_EFLAGS_IF))
531 /* FIXME: merge g_pat from vmcb01 and vmcb12. */
532 svm->nested.vmcb02.ptr->save.g_pat = svm->vmcb01.ptr->save.g_pat;
538 struct vmcb *vmcb01 = svm->vmcb01.ptr;
601 } else if (unlikely(vmcb01->control.virt_ext & LBR_CTL_ENABLE_MASK)) {
602 svm_copy_lbrs(vmcb02, vmcb01);
638 struct vmcb *vmcb01 = svm->vmcb01.ptr;
655 if (vmcb01->control.int_ctl & V_NMI_PENDING_MASK) {
665 /* Copied from vmcb01. msrpm_base can be overwritten later. */
666 vmcb02->control.nested_ctl = vmcb01->control.nested_ctl;
667 vmcb02->control.iopm_base_pa = vmcb01->control.iopm_base_pa;
668 vmcb02->control.msrpm_base_pa = vmcb01->control.msrpm_base_pa;
692 (vmcb01->control.int_ctl & int_ctl_vmcb01_bits);
723 vmcb02->control.virt_ext = vmcb01->control.virt_ext &
747 vmcb02->control.pause_filter_count = vmcb01->control.pause_filter_count;
748 vmcb02->control.pause_filter_thresh = vmcb01->control.pause_filter_thresh;
811 nested_svm_copy_common_state(svm->vmcb01.ptr, svm->nested.vmcb02.ptr);
842 struct vmcb *vmcb01 = svm->vmcb01.ptr;
890 * Since vmcb01 is not in use, we can use it to store some of the L1
893 vmcb01->save.efer = vcpu->arch.efer;
894 vmcb01->save.cr0 = kvm_read_cr0(vcpu);
895 vmcb01->save.cr4 = vcpu->arch.cr4;
896 vmcb01->save.rflags = kvm_get_rflags(vcpu);
897 vmcb01->save.rip = kvm_rip_read(vcpu);
900 vmcb01->save.cr3 = kvm_read_cr3(vcpu);
968 struct vmcb *vmcb01 = svm->vmcb01.ptr;
1031 vmcb01->control.pause_filter_count = vmcb02->control.pause_filter_count;
1032 vmcb_mark_dirty(vmcb01, VMCB_INTERCEPTS);
1036 nested_svm_copy_common_state(svm->nested.vmcb02.ptr, svm->vmcb01.ptr);
1038 svm_switch_vmcb(svm, &svm->vmcb01);
1041 * Rules for synchronizing int_ctl bits from vmcb02 to vmcb01:
1048 * copies this bits from vmcb02 to vmcb01.
1052 * vmcb01 because it is copied from/to the virtual APIC's TPR register
1057 * there is no need to copy V_GIF from vmcb02 to vmcb01.
1066 } else if (unlikely(vmcb01->control.virt_ext & LBR_CTL_ENABLE_MASK)) {
1067 svm_copy_lbrs(vmcb01, vmcb02);
1073 vmcb01->control.int_ctl |= V_NMI_BLOCKING_MASK;
1075 vmcb01->control.int_ctl &= ~V_NMI_BLOCKING_MASK;
1079 vmcb01->control.int_ctl |= V_NMI_PENDING_MASK;
1081 vmcb01->control.int_ctl &= ~V_NMI_PENDING_MASK;
1090 vmcb01->control.exit_int_info = 0;
1093 if (vmcb01->control.tsc_offset != svm->vcpu.arch.tsc_offset) {
1094 vmcb01->control.tsc_offset = svm->vcpu.arch.tsc_offset;
1095 vmcb_mark_dirty(vmcb01, VMCB_INTERCEPTS);
1107 * Restore processor state that had been saved in vmcb01
1109 kvm_set_rflags(vcpu, vmcb01->save.rflags);
1110 svm_set_efer(vcpu, vmcb01->save.efer);
1111 svm_set_cr0(vcpu, vmcb01->save.cr0 | X86_CR0_PE);
1112 svm_set_cr4(vcpu, vmcb01->save.cr4);
1113 kvm_rax_write(vcpu, vmcb01->save.rax);
1114 kvm_rsp_write(vcpu, vmcb01->save.rsp);
1115 kvm_rip_write(vcpu, vmcb01->save.rip);
1133 rc = nested_svm_load_cr3(vcpu, vmcb01->save.cr3, false, true);
1151 if (unlikely(vmcb01->save.rflags & X86_EFLAGS_TF))
1206 if (WARN_ON_ONCE(svm->vmcb != svm->vmcb01.ptr))
1207 svm_switch_vmcb(svm, &svm->vmcb01);
1237 svm_switch_vmcb(svm, &svm->vmcb01);
1507 if (svm->vmcb01.ptr->control.intercepts[INTERCEPT_EXCEPTION] &
1636 if (copy_to_user(&user_vmcb->save, &svm->vmcb01.ptr->save,
1733 * vmcb02, and the L1 save state which we store in vmcb01.
1740 svm->nested.vmcb02.ptr->save = svm->vmcb01.ptr->save;
1749 svm_copy_vmrun_state(&svm->vmcb01.ptr->save, save);