Lines Matching refs:save

93 	kvm_init_shadow_npt_mmu(vcpu, X86_CR0_PG, svm->vmcb01.ptr->save.cr4,
94 svm->vmcb01.ptr->save.efer,
152 if (!(svm->vmcb01.ptr->save.rflags & X86_EFLAGS_IF))
279 struct vmcb_save_area_cached *save)
281 if (CC(!(save->efer & EFER_SVME)))
284 if (CC((save->cr0 & X86_CR0_CD) == 0 && (save->cr0 & X86_CR0_NW)) ||
285 CC(save->cr0 & ~0xffffffffULL))
288 if (CC(!kvm_dr6_valid(save->dr6)) || CC(!kvm_dr7_valid(save->dr7)))
296 if ((save->efer & EFER_LME) && (save->cr0 & X86_CR0_PG)) {
297 if (CC(!(save->cr4 & X86_CR4_PAE)) ||
298 CC(!(save->cr0 & X86_CR0_PE)) ||
299 CC(kvm_vcpu_is_illegal_gpa(vcpu, save->cr3)))
304 if (CC(!__kvm_is_valid_cr4(vcpu, save->cr4)))
307 if (CC(!kvm_valid_efer(vcpu, save->efer)))
316 struct vmcb_save_area_cached *save = &svm->nested.save;
318 return __nested_vmcb_check_save(vcpu, save);
397 struct vmcb_save_area *save)
399 __nested_copy_vmcb_save_to_cache(&svm->nested.save, save);
532 svm->nested.vmcb02.ptr->save.g_pat = svm->vmcb01.ptr->save.g_pat;
552 vmcb02->save.es = vmcb12->save.es;
553 vmcb02->save.cs = vmcb12->save.cs;
554 vmcb02->save.ss = vmcb12->save.ss;
555 vmcb02->save.ds = vmcb12->save.ds;
556 vmcb02->save.cpl = vmcb12->save.cpl;
561 vmcb02->save.gdtr = vmcb12->save.gdtr;
562 vmcb02->save.idtr = vmcb12->save.idtr;
566 kvm_set_rflags(vcpu, vmcb12->save.rflags | X86_EFLAGS_FIXED);
568 svm_set_efer(vcpu, svm->nested.save.efer);
570 svm_set_cr0(vcpu, svm->nested.save.cr0);
571 svm_set_cr4(vcpu, svm->nested.save.cr4);
573 svm->vcpu.arch.cr2 = vmcb12->save.cr2;
575 kvm_rax_write(vcpu, vmcb12->save.rax);
576 kvm_rsp_write(vcpu, vmcb12->save.rsp);
577 kvm_rip_write(vcpu, vmcb12->save.rip);
580 vmcb02->save.rax = vmcb12->save.rax;
581 vmcb02->save.rsp = vmcb12->save.rsp;
582 vmcb02->save.rip = vmcb12->save.rip;
586 vmcb02->save.dr7 = svm->nested.save.dr7 | DR7_FIXED_1;
587 svm->vcpu.arch.dr6 = svm->nested.save.dr6 | DR6_ACTIVE_LOW;
598 vmcb02->save.dbgctl &= ~DEBUGCTL_RESERVED_BITS;
780 to_vmcb->save.spec_ctrl = from_vmcb->save.spec_ctrl;
789 trace_kvm_nested_vmenter(svm->vmcb->save.rip,
791 vmcb12->save.rip,
796 vmcb12->save.cr3,
814 nested_vmcb02_prepare_control(svm, vmcb12->save.rip, vmcb12->save.cs.base);
817 ret = nested_svm_load_cr3(&svm->vcpu, svm->nested.save.cr3,
861 vmcb12_gpa = svm->vmcb->save.rax;
878 nested_copy_vmcb_save_to_cache(svm, &vmcb12->save);
893 vmcb01->save.efer = vcpu->arch.efer;
894 vmcb01->save.cr0 = kvm_read_cr0(vcpu);
895 vmcb01->save.cr4 = vcpu->arch.cr4;
896 vmcb01->save.rflags = kvm_get_rflags(vcpu);
897 vmcb01->save.rip = kvm_rip_read(vcpu);
900 vmcb01->save.cr3 = kvm_read_cr3(vcpu);
928 /* Copy state save area fields which are handled by VMRUN */
951 to_vmcb->save.fs = from_vmcb->save.fs;
952 to_vmcb->save.gs = from_vmcb->save.gs;
953 to_vmcb->save.tr = from_vmcb->save.tr;
954 to_vmcb->save.ldtr = from_vmcb->save.ldtr;
955 to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base;
956 to_vmcb->save.star = from_vmcb->save.star;
957 to_vmcb->save.lstar = from_vmcb->save.lstar;
958 to_vmcb->save.cstar = from_vmcb->save.cstar;
959 to_vmcb->save.sfmask = from_vmcb->save.sfmask;
960 to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs;
961 to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp;
962 to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
995 vmcb12->save.es = vmcb02->save.es;
996 vmcb12->save.cs = vmcb02->save.cs;
997 vmcb12->save.ss = vmcb02->save.ss;
998 vmcb12->save.ds = vmcb02->save.ds;
999 vmcb12->save.gdtr = vmcb02->save.gdtr;
1000 vmcb12->save.idtr = vmcb02->save.idtr;
1001 vmcb12->save.efer = svm->vcpu.arch.efer;
1002 vmcb12->save.cr0 = kvm_read_cr0(vcpu);
1003 vmcb12->save.cr3 = kvm_read_cr3(vcpu);
1004 vmcb12->save.cr2 = vmcb02->save.cr2;
1005 vmcb12->save.cr4 = svm->vcpu.arch.cr4;
1006 vmcb12->save.rflags = kvm_get_rflags(vcpu);
1007 vmcb12->save.rip = kvm_rip_read(vcpu);
1008 vmcb12->save.rsp = kvm_rsp_read(vcpu);
1009 vmcb12->save.rax = kvm_rax_read(vcpu);
1010 vmcb12->save.dr7 = vmcb02->save.dr7;
1011 vmcb12->save.dr6 = svm->vcpu.arch.dr6;
1012 vmcb12->save.cpl = vmcb02->save.cpl;
1109 kvm_set_rflags(vcpu, vmcb01->save.rflags);
1110 svm_set_efer(vcpu, vmcb01->save.efer);
1111 svm_set_cr0(vcpu, vmcb01->save.cr0 | X86_CR0_PE);
1112 svm_set_cr4(vcpu, vmcb01->save.cr4);
1113 kvm_rax_write(vcpu, vmcb01->save.rax);
1114 kvm_rsp_write(vcpu, vmcb01->save.rsp);
1115 kvm_rip_write(vcpu, vmcb01->save.rip);
1133 rc = nested_svm_load_cr3(vcpu, vmcb01->save.cr3, false, true);
1151 if (unlikely(vmcb01->save.rflags & X86_EFLAGS_TF))
1362 if (to_svm(vcpu)->vmcb->save.cpl) {
1486 trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
1636 if (copy_to_user(&user_vmcb->save, &svm->vmcb01.ptr->save,
1637 sizeof(user_vmcb->save)))
1651 struct vmcb_save_area *save;
1695 save = kzalloc(sizeof(*save), GFP_KERNEL_ACCOUNT);
1696 if (!ctl || !save)
1702 if (copy_from_user(save, &user_vmcb->save, sizeof(*save)))
1722 __nested_copy_vmcb_save_to_cache(&save_cached, save);
1723 if (!(save->cr0 & X86_CR0_PG) ||
1724 !(save->cr0 & X86_CR0_PE) ||
1725 (save->rflags & X86_EFLAGS_VM) ||
1733 * vmcb02, and the L1 save state which we store in vmcb01.
1740 svm->nested.vmcb02.ptr->save = svm->vmcb01.ptr->save;
1749 svm_copy_vmrun_state(&svm->vmcb01.ptr->save, save);
1753 nested_vmcb02_prepare_control(svm, svm->vmcb->save.rip, svm->vmcb->save.cs.base);
1772 kfree(save);