Lines Matching refs:save

100 	kvm_init_shadow_npt_mmu(vcpu, X86_CR0_PG, hsave->save.cr4, hsave->save.efer,
259 * to avoid TOC/TOU races. For these save area checks
264 if ((vmcb12->save.efer & EFER_SVME) == 0)
267 if (((vmcb12->save.cr0 & X86_CR0_CD) == 0) && (vmcb12->save.cr0 & X86_CR0_NW))
270 if (!kvm_dr6_valid(vmcb12->save.dr6) || !kvm_dr7_valid(vmcb12->save.dr7))
273 vmcb12_lma = (vmcb12->save.efer & EFER_LME) && (vmcb12->save.cr0 & X86_CR0_PG);
276 if (!(vmcb12->save.cr4 & X86_CR4_PAE) ||
277 !(vmcb12->save.cr0 & X86_CR0_PE) ||
278 (vmcb12->save.cr3 & vcpu->arch.cr3_lm_rsvd_bits))
281 if (kvm_valid_cr4(&svm->vcpu, vmcb12->save.cr4))
402 svm->vmcb->save.es = vmcb12->save.es;
403 svm->vmcb->save.cs = vmcb12->save.cs;
404 svm->vmcb->save.ss = vmcb12->save.ss;
405 svm->vmcb->save.ds = vmcb12->save.ds;
406 svm->vmcb->save.gdtr = vmcb12->save.gdtr;
407 svm->vmcb->save.idtr = vmcb12->save.idtr;
408 kvm_set_rflags(&svm->vcpu, vmcb12->save.rflags);
415 svm_set_efer(&svm->vcpu, vmcb12->save.efer | EFER_SVME);
417 svm_set_cr0(&svm->vcpu, vmcb12->save.cr0);
418 svm_set_cr4(&svm->vcpu, vmcb12->save.cr4);
419 svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = vmcb12->save.cr2;
420 kvm_rax_write(&svm->vcpu, vmcb12->save.rax);
421 kvm_rsp_write(&svm->vcpu, vmcb12->save.rsp);
422 kvm_rip_write(&svm->vcpu, vmcb12->save.rip);
425 svm->vmcb->save.rax = vmcb12->save.rax;
426 svm->vmcb->save.rsp = vmcb12->save.rsp;
427 svm->vmcb->save.rip = vmcb12->save.rip;
428 svm->vmcb->save.dr7 = vmcb12->save.dr7;
429 svm->vcpu.arch.dr6 = vmcb12->save.dr6;
430 svm->vmcb->save.cpl = vmcb12->save.cpl;
479 ret = nested_svm_load_cr3(&svm->vcpu, vmcb12->save.cr3,
506 vmcb12_gpa = svm->vmcb->save.rax;
533 trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb12_gpa,
534 vmcb12->save.rip,
551 * Save the old vmcb, so we don't need to pick what we save, but can
554 hsave->save.es = vmcb->save.es;
555 hsave->save.cs = vmcb->save.cs;
556 hsave->save.ss = vmcb->save.ss;
557 hsave->save.ds = vmcb->save.ds;
558 hsave->save.gdtr = vmcb->save.gdtr;
559 hsave->save.idtr = vmcb->save.idtr;
560 hsave->save.efer = svm->vcpu.arch.efer;
561 hsave->save.cr0 = kvm_read_cr0(&svm->vcpu);
562 hsave->save.cr4 = svm->vcpu.arch.cr4;
563 hsave->save.rflags = kvm_get_rflags(&svm->vcpu);
564 hsave->save.rip = kvm_rip_read(&svm->vcpu);
565 hsave->save.rsp = vmcb->save.rsp;
566 hsave->save.rax = vmcb->save.rax;
568 hsave->save.cr3 = vmcb->save.cr3;
570 hsave->save.cr3 = kvm_read_cr3(&svm->vcpu);
600 to_vmcb->save.fs = from_vmcb->save.fs;
601 to_vmcb->save.gs = from_vmcb->save.gs;
602 to_vmcb->save.tr = from_vmcb->save.tr;
603 to_vmcb->save.ldtr = from_vmcb->save.ldtr;
604 to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base;
605 to_vmcb->save.star = from_vmcb->save.star;
606 to_vmcb->save.lstar = from_vmcb->save.lstar;
607 to_vmcb->save.cstar = from_vmcb->save.cstar;
608 to_vmcb->save.sfmask = from_vmcb->save.sfmask;
609 to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs;
610 to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp;
611 to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
643 vmcb12->save.es = vmcb->save.es;
644 vmcb12->save.cs = vmcb->save.cs;
645 vmcb12->save.ss = vmcb->save.ss;
646 vmcb12->save.ds = vmcb->save.ds;
647 vmcb12->save.gdtr = vmcb->save.gdtr;
648 vmcb12->save.idtr = vmcb->save.idtr;
649 vmcb12->save.efer = svm->vcpu.arch.efer;
650 vmcb12->save.cr0 = kvm_read_cr0(&svm->vcpu);
651 vmcb12->save.cr3 = kvm_read_cr3(&svm->vcpu);
652 vmcb12->save.cr2 = vmcb->save.cr2;
653 vmcb12->save.cr4 = svm->vcpu.arch.cr4;
654 vmcb12->save.rflags = kvm_get_rflags(&svm->vcpu);
655 vmcb12->save.rip = kvm_rip_read(&svm->vcpu);
656 vmcb12->save.rsp = kvm_rsp_read(&svm->vcpu);
657 vmcb12->save.rax = kvm_rax_read(&svm->vcpu);
658 vmcb12->save.dr7 = vmcb->save.dr7;
659 vmcb12->save.dr6 = svm->vcpu.arch.dr6;
660 vmcb12->save.cpl = vmcb->save.cpl;
695 /* Restore selected save entries */
696 svm->vmcb->save.es = hsave->save.es;
697 svm->vmcb->save.cs = hsave->save.cs;
698 svm->vmcb->save.ss = hsave->save.ss;
699 svm->vmcb->save.ds = hsave->save.ds;
700 svm->vmcb->save.gdtr = hsave->save.gdtr;
701 svm->vmcb->save.idtr = hsave->save.idtr;
702 kvm_set_rflags(&svm->vcpu, hsave->save.rflags);
703 svm_set_efer(&svm->vcpu, hsave->save.efer);
704 svm_set_cr0(&svm->vcpu, hsave->save.cr0 | X86_CR0_PE);
705 svm_set_cr4(&svm->vcpu, hsave->save.cr4);
706 kvm_rax_write(&svm->vcpu, hsave->save.rax);
707 kvm_rsp_write(&svm->vcpu, hsave->save.rsp);
708 kvm_rip_write(&svm->vcpu, hsave->save.rip);
709 svm->vmcb->save.dr7 = 0;
710 svm->vmcb->save.cpl = 0;
726 rc = nested_svm_load_cr3(&svm->vcpu, hsave->save.cr3, false);
731 svm->vmcb->save.cr3 = hsave->save.cr3;
917 if (svm->vmcb->save.cpl) {
986 trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
1143 if (copy_to_user(&user_vmcb->save, &svm->nested.hsave->save,
1144 sizeof(user_vmcb->save)))
1160 struct vmcb_save_area *save;
1202 save = kzalloc(sizeof(*save), GFP_KERNEL_ACCOUNT);
1203 if (!ctl || !save)
1209 if (copy_from_user(save, &user_vmcb->save, sizeof(*save)))
1229 if (!(save->cr0 & X86_CR0_PG))
1231 if (!(save->efer & EFER_SVME))
1236 * come from the nested save state. Guest state is already
1237 * in the registers, the save area of the nested state instead
1241 hsave->save = *save;
1253 kfree(save);