Lines Matching defs:svm
30 #include "svm.h"
35 struct vcpu_svm *svm = to_svm(vcpu);
37 if (svm->vmcb->control.exit_code != SVM_EXIT_NPF) {
42 svm->vmcb->control.exit_code = SVM_EXIT_NPF;
43 svm->vmcb->control.exit_code_hi = 0;
44 svm->vmcb->control.exit_info_1 = (1ULL << 32);
45 svm->vmcb->control.exit_info_2 = fault->address;
48 svm->vmcb->control.exit_info_1 &= ~0xffffffffULL;
49 svm->vmcb->control.exit_info_1 |= fault->error_code;
51 nested_svm_vmexit(svm);
56 struct vcpu_svm *svm = to_svm(vcpu);
59 if (vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_EXCEPTION_OFFSET + PF_VECTOR) &&
60 !svm->nested.nested_run_pending) {
61 svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + PF_VECTOR;
62 svm->vmcb->control.exit_code_hi = 0;
63 svm->vmcb->control.exit_info_1 = fault->error_code;
64 svm->vmcb->control.exit_info_2 = fault->address;
65 nested_svm_vmexit(svm);
73 struct vcpu_svm *svm = to_svm(vcpu);
74 u64 cr3 = svm->nested.ctl.nested_cr3;
87 struct vcpu_svm *svm = to_svm(vcpu);
89 return svm->nested.ctl.nested_cr3;
94 struct vcpu_svm *svm = to_svm(vcpu);
95 struct vmcb *hsave = svm->nested.hsave;
101 svm->nested.ctl.nested_cr3);
115 void recalc_intercepts(struct vcpu_svm *svm)
120 vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
122 if (!is_guest_mode(&svm->vcpu))
125 c = &svm->vmcb->control;
126 h = &svm->nested.hsave->control;
127 g = &svm->nested.ctl;
166 /* asid not copied, it is handled manually for svm->vmcb. */
186 static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
195 if (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)))
206 offset = svm->nested.ctl.msrpm_base_pa + (p * 4);
208 if (kvm_vcpu_read_guest(&svm->vcpu, offset, &value, 4))
211 svm->nested.msrpm[p] = svm->msrpm[p] | value;
214 svm->vmcb->control.msrpm_base_pa = __sme_set(__pa(svm->nested.msrpm));
221 struct vcpu_svm *svm = to_svm(vcpu);
226 if (!nested_svm_vmrun_msrpm(svm)) {
252 static bool nested_vmcb_check_save(struct vcpu_svm *svm, struct vmcb *vmcb12)
254 struct kvm_vcpu *vcpu = &svm->vcpu;
281 if (kvm_valid_cr4(&svm->vcpu, vmcb12->save.cr4))
287 static void load_nested_vmcb_control(struct vcpu_svm *svm,
290 copy_vmcb_control_area(&svm->nested.ctl, control);
293 svm->nested.ctl.asid = control->asid;
294 svm->nested.ctl.msrpm_base_pa &= ~0x0fffULL;
295 svm->nested.ctl.iopm_base_pa &= ~0x0fffULL;
302 void sync_nested_vmcb_control(struct vcpu_svm *svm)
305 svm->nested.ctl.event_inj = svm->vmcb->control.event_inj;
306 svm->nested.ctl.event_inj_err = svm->vmcb->control.event_inj_err;
310 if (!(svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK) &&
311 svm_is_intercept(svm, INTERCEPT_VINTR)) {
314 * svm->vmcb->control.int_ctl and possibly setting V_IRQ
322 svm->nested.ctl.int_ctl &= ~mask;
323 svm->nested.ctl.int_ctl |= svm->vmcb->control.int_ctl & mask;
330 static void nested_vmcb_save_pending_event(struct vcpu_svm *svm,
333 struct kvm_vcpu *vcpu = &svm->vcpu;
363 static inline bool nested_npt_enabled(struct vcpu_svm *svm)
365 return svm->nested.ctl.nested_ctl & SVM_NESTED_CTL_NP_ENABLE;
399 static void nested_prepare_vmcb_save(struct vcpu_svm *svm, struct vmcb *vmcb12)
402 svm->vmcb->save.es = vmcb12->save.es;
403 svm->vmcb->save.cs = vmcb12->save.cs;
404 svm->vmcb->save.ss = vmcb12->save.ss;
405 svm->vmcb->save.ds = vmcb12->save.ds;
406 svm->vmcb->save.gdtr = vmcb12->save.gdtr;
407 svm->vmcb->save.idtr = vmcb12->save.idtr;
408 kvm_set_rflags(&svm->vcpu, vmcb12->save.rflags);
415 svm_set_efer(&svm->vcpu, vmcb12->save.efer | EFER_SVME);
417 svm_set_cr0(&svm->vcpu, vmcb12->save.cr0);
418 svm_set_cr4(&svm->vcpu, vmcb12->save.cr4);
419 svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = vmcb12->save.cr2;
420 kvm_rax_write(&svm->vcpu, vmcb12->save.rax);
421 kvm_rsp_write(&svm->vcpu, vmcb12->save.rsp);
422 kvm_rip_write(&svm->vcpu, vmcb12->save.rip);
425 svm->vmcb->save.rax = vmcb12->save.rax;
426 svm->vmcb->save.rsp = vmcb12->save.rsp;
427 svm->vmcb->save.rip = vmcb12->save.rip;
428 svm->vmcb->save.dr7 = vmcb12->save.dr7;
429 svm->vcpu.arch.dr6 = vmcb12->save.dr6;
430 svm->vmcb->save.cpl = vmcb12->save.cpl;
433 static void nested_prepare_vmcb_control(struct vcpu_svm *svm)
440 if (nested_npt_enabled(svm))
441 nested_svm_init_mmu_context(&svm->vcpu);
443 svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset =
444 svm->vcpu.arch.l1_tsc_offset + svm->nested.ctl.tsc_offset;
446 svm->vmcb->control.int_ctl =
447 (svm->nested.ctl.int_ctl & int_ctl_vmcb12_bits) |
448 (svm->nested.hsave->control.int_ctl & int_ctl_vmcb01_bits);
450 svm->vmcb->control.int_vector = svm->nested.ctl.int_vector;
451 svm->vmcb->control.int_state = svm->nested.ctl.int_state;
452 svm->vmcb->control.event_inj = svm->nested.ctl.event_inj;
453 svm->vmcb->control.event_inj_err = svm->nested.ctl.event_inj_err;
455 svm->vmcb->control.pause_filter_count = svm->nested.ctl.pause_filter_count;
456 svm->vmcb->control.pause_filter_thresh = svm->nested.ctl.pause_filter_thresh;
459 enter_guest_mode(&svm->vcpu);
465 recalc_intercepts(svm);
467 vmcb_mark_all_dirty(svm->vmcb);
470 int enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb12_gpa,
475 svm->nested.vmcb12_gpa = vmcb12_gpa;
476 nested_prepare_vmcb_save(svm, vmcb12);
477 nested_prepare_vmcb_control(svm);
479 ret = nested_svm_load_cr3(&svm->vcpu, vmcb12->save.cr3,
480 nested_npt_enabled(svm));
485 svm->vcpu.arch.mmu->inject_page_fault = svm_inject_page_fault_nested;
487 svm_set_gif(svm, true);
492 int nested_svm_vmrun(struct vcpu_svm *svm)
496 struct vmcb *hsave = svm->nested.hsave;
497 struct vmcb *vmcb = svm->vmcb;
501 if (is_smm(&svm->vcpu)) {
502 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
506 vmcb12_gpa = svm->vmcb->save.rax;
507 ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(vmcb12_gpa), &map);
509 kvm_inject_gp(&svm->vcpu, 0);
512 return kvm_skip_emulated_instruction(&svm->vcpu);
515 ret = kvm_skip_emulated_instruction(&svm->vcpu);
519 if (WARN_ON_ONCE(!svm->nested.initialized))
522 load_nested_vmcb_control(svm, &vmcb12->control);
524 if (!nested_vmcb_check_save(svm, vmcb12) ||
525 !nested_vmcb_check_controls(&svm->nested.ctl)) {
533 trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb12_gpa,
547 kvm_clear_exception_queue(&svm->vcpu);
548 kvm_clear_interrupt_queue(&svm->vcpu);
560 hsave->save.efer = svm->vcpu.arch.efer;
561 hsave->save.cr0 = kvm_read_cr0(&svm->vcpu);
562 hsave->save.cr4 = svm->vcpu.arch.cr4;
563 hsave->save.rflags = kvm_get_rflags(&svm->vcpu);
564 hsave->save.rip = kvm_rip_read(&svm->vcpu);
570 hsave->save.cr3 = kvm_read_cr3(&svm->vcpu);
574 svm->nested.nested_run_pending = 1;
576 if (enter_svm_guest_mode(svm, vmcb12_gpa, vmcb12))
579 if (nested_svm_vmrun_msrpm(svm))
583 svm->nested.nested_run_pending = 0;
585 svm->vmcb->control.exit_code = SVM_EXIT_ERR;
586 svm->vmcb->control.exit_code_hi = 0;
587 svm->vmcb->control.exit_info_1 = 0;
588 svm->vmcb->control.exit_info_2 = 0;
590 nested_svm_vmexit(svm);
593 kvm_vcpu_unmap(&svm->vcpu, &map, true);
614 int nested_svm_vmexit(struct vcpu_svm *svm)
618 struct vmcb *hsave = svm->nested.hsave;
619 struct vmcb *vmcb = svm->vmcb;
622 rc = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->nested.vmcb12_gpa), &map);
625 kvm_inject_gp(&svm->vcpu, 0);
632 leave_guest_mode(&svm->vcpu);
633 svm->nested.vmcb12_gpa = 0;
634 WARN_ON_ONCE(svm->nested.nested_run_pending);
636 kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, &svm->vcpu);
639 svm->vcpu.arch.mp_state = KVM_MP_STATE_RUNNABLE;
649 vmcb12->save.efer = svm->vcpu.arch.efer;
650 vmcb12->save.cr0 = kvm_read_cr0(&svm->vcpu);
651 vmcb12->save.cr3 = kvm_read_cr3(&svm->vcpu);
653 vmcb12->save.cr4 = svm->vcpu.arch.cr4;
654 vmcb12->save.rflags = kvm_get_rflags(&svm->vcpu);
655 vmcb12->save.rip = kvm_rip_read(&svm->vcpu);
656 vmcb12->save.rsp = kvm_rsp_read(&svm->vcpu);
657 vmcb12->save.rax = kvm_rax_read(&svm->vcpu);
659 vmcb12->save.dr6 = svm->vcpu.arch.dr6;
669 nested_vmcb_save_pending_event(svm, vmcb12);
671 if (svm->nrips_enabled)
674 vmcb12->control.int_ctl = svm->nested.ctl.int_ctl;
675 vmcb12->control.tlb_ctl = svm->nested.ctl.tlb_ctl;
676 vmcb12->control.event_inj = svm->nested.ctl.event_inj;
677 vmcb12->control.event_inj_err = svm->nested.ctl.event_inj_err;
680 svm->vmcb->control.pause_filter_count;
682 svm->vmcb->control.pause_filter_thresh;
688 svm_set_gif(svm, false);
690 svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset =
691 svm->vcpu.arch.l1_tsc_offset;
693 svm->nested.ctl.nested_cr3 = 0;
696 svm->vmcb->save.es = hsave->save.es;
697 svm->vmcb->save.cs = hsave->save.cs;
698 svm->vmcb->save.ss = hsave->save.ss;
699 svm->vmcb->save.ds = hsave->save.ds;
700 svm->vmcb->save.gdtr = hsave->save.gdtr;
701 svm->vmcb->save.idtr = hsave->save.idtr;
702 kvm_set_rflags(&svm->vcpu, hsave->save.rflags);
703 svm_set_efer(&svm->vcpu, hsave->save.efer);
704 svm_set_cr0(&svm->vcpu, hsave->save.cr0 | X86_CR0_PE);
705 svm_set_cr4(&svm->vcpu, hsave->save.cr4);
706 kvm_rax_write(&svm->vcpu, hsave->save.rax);
707 kvm_rsp_write(&svm->vcpu, hsave->save.rsp);
708 kvm_rip_write(&svm->vcpu, hsave->save.rip);
709 svm->vmcb->save.dr7 = 0;
710 svm->vmcb->save.cpl = 0;
711 svm->vmcb->control.exit_int_info = 0;
713 vmcb_mark_all_dirty(svm->vmcb);
722 kvm_vcpu_unmap(&svm->vcpu, &map, true);
724 nested_svm_uninit_mmu_context(&svm->vcpu);
726 rc = nested_svm_load_cr3(&svm->vcpu, hsave->save.cr3, false);
731 svm->vmcb->save.cr3 = hsave->save.cr3;
737 svm->vcpu.arch.nmi_injected = false;
738 kvm_clear_exception_queue(&svm->vcpu);
739 kvm_clear_interrupt_queue(&svm->vcpu);
744 int svm_allocate_nested(struct vcpu_svm *svm)
748 if (svm->nested.initialized)
754 svm->nested.hsave = page_address(hsave_page);
756 svm->nested.msrpm = svm_vcpu_alloc_msrpm();
757 if (!svm->nested.msrpm)
759 svm_vcpu_init_msrpm(&svm->vcpu, svm->nested.msrpm);
761 svm->nested.initialized = true;
769 void svm_free_nested(struct vcpu_svm *svm)
771 if (!svm->nested.initialized)
774 svm_vcpu_free_msrpm(svm->nested.msrpm);
775 svm->nested.msrpm = NULL;
777 __free_page(virt_to_page(svm->nested.hsave));
778 svm->nested.hsave = NULL;
780 svm->nested.initialized = false;
788 struct vcpu_svm *svm = to_svm(vcpu);
790 if (is_guest_mode(&svm->vcpu)) {
791 struct vmcb *hsave = svm->nested.hsave;
792 struct vmcb *vmcb = svm->vmcb;
794 svm->nested.nested_run_pending = 0;
795 leave_guest_mode(&svm->vcpu);
797 nested_svm_uninit_mmu_context(&svm->vcpu);
800 kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, &svm->vcpu);
803 static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
808 if (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)))
811 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
813 write = svm->vmcb->control.exit_info_1 & 1;
822 if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.ctl.msrpm_base_pa + offset, &value, 4))
828 static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
835 if (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_IOIO_PROT)))
838 port = svm->vmcb->control.exit_info_1 >> 16;
839 size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >>
841 gpa = svm->nested.ctl.iopm_base_pa + (port / 8);
847 if (kvm_vcpu_read_guest(&svm->vcpu, gpa, &val, iopm_len))
853 static int nested_svm_intercept(struct vcpu_svm *svm)
855 u32 exit_code = svm->vmcb->control.exit_code;
860 vmexit = nested_svm_exit_handled_msr(svm);
863 vmexit = nested_svm_intercept_ioio(svm);
866 if (vmcb_is_intercept(&svm->nested.ctl, exit_code))
871 if (vmcb_is_intercept(&svm->nested.ctl, exit_code))
889 if (vmcb_is_intercept(&svm->nested.ctl, exit_code))
897 int nested_svm_exit_handled(struct vcpu_svm *svm)
901 vmexit = nested_svm_intercept(svm);
904 nested_svm_vmexit(svm);
909 int nested_svm_check_permissions(struct vcpu_svm *svm)
911 if (!(svm->vcpu.arch.efer & EFER_SVME) ||
912 !is_paging(&svm->vcpu)) {
913 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
917 if (svm->vmcb->save.cpl) {
918 kvm_inject_gp(&svm->vcpu, 0);
925 static bool nested_exit_on_exception(struct vcpu_svm *svm)
927 unsigned int nr = svm->vcpu.arch.exception.nr;
929 return (svm->nested.ctl.intercepts[INTERCEPT_EXCEPTION] & BIT(nr));
932 static void nested_svm_inject_exception_vmexit(struct vcpu_svm *svm)
934 unsigned int nr = svm->vcpu.arch.exception.nr;
936 svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr;
937 svm->vmcb->control.exit_code_hi = 0;
939 if (svm->vcpu.arch.exception.has_error_code)
940 svm->vmcb->control.exit_info_1 = svm->vcpu.arch.exception.error_code;
947 if (svm->vcpu.arch.exception.nested_apf)
948 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.apf.nested_apf_token;
949 else if (svm->vcpu.arch.exception.has_payload)
950 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.exception.payload;
952 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
955 kvm_deliver_exception_payload(&svm->vcpu);
956 if (svm->vcpu.arch.dr7 & DR7_GD) {
957 svm->vcpu.arch.dr7 &= ~DR7_GD;
958 kvm_update_dr7(&svm->vcpu);
961 WARN_ON(svm->vcpu.arch.exception.has_payload);
963 nested_svm_vmexit(svm);
966 static void nested_svm_smi(struct vcpu_svm *svm)
968 svm->vmcb->control.exit_code = SVM_EXIT_SMI;
969 svm->vmcb->control.exit_info_1 = 0;
970 svm->vmcb->control.exit_info_2 = 0;
972 nested_svm_vmexit(svm);
975 static void nested_svm_nmi(struct vcpu_svm *svm)
977 svm->vmcb->control.exit_code = SVM_EXIT_NMI;
978 svm->vmcb->control.exit_info_1 = 0;
979 svm->vmcb->control.exit_info_2 = 0;
981 nested_svm_vmexit(svm);
984 static void nested_svm_intr(struct vcpu_svm *svm)
986 trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
988 svm->vmcb->control.exit_code = SVM_EXIT_INTR;
989 svm->vmcb->control.exit_info_1 = 0;
990 svm->vmcb->control.exit_info_2 = 0;
992 nested_svm_vmexit(svm);
995 static inline bool nested_exit_on_init(struct vcpu_svm *svm)
997 return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_INIT);
1000 static void nested_svm_init(struct vcpu_svm *svm)
1002 svm->vmcb->control.exit_code = SVM_EXIT_INIT;
1003 svm->vmcb->control.exit_info_1 = 0;
1004 svm->vmcb->control.exit_info_2 = 0;
1006 nested_svm_vmexit(svm);
1012 struct vcpu_svm *svm = to_svm(vcpu);
1014 kvm_event_needs_reinjection(vcpu) || svm->nested.nested_run_pending;
1021 if (!nested_exit_on_init(svm))
1023 nested_svm_init(svm);
1030 if (!nested_exit_on_exception(svm))
1032 nested_svm_inject_exception_vmexit(svm);
1039 if (!nested_exit_on_smi(svm))
1041 nested_svm_smi(svm);
1048 if (!nested_exit_on_nmi(svm))
1050 nested_svm_nmi(svm);
1057 if (!nested_exit_on_intr(svm))
1059 nested_svm_intr(svm);
1066 int nested_svm_exit_special(struct vcpu_svm *svm)
1068 u32 exit_code = svm->vmcb->control.exit_code;
1078 if (get_host_vmcb(svm)->control.intercepts[INTERCEPT_EXCEPTION] &
1082 svm->vcpu.arch.apf.host_apf_flags)
1098 struct vcpu_svm *svm;
1105 &user_kvm_nested_state->data.svm[0];
1110 svm = to_svm(vcpu);
1117 kvm_state.hdr.svm.vmcb_pa = svm->nested.vmcb12_gpa;
1121 if (svm->nested.nested_run_pending)
1125 if (gif_set(svm))
1140 if (copy_to_user(&user_vmcb->control, &svm->nested.ctl,
1143 if (copy_to_user(&user_vmcb->save, &svm->nested.hsave->save,
1155 struct vcpu_svm *svm = to_svm(vcpu);
1156 struct vmcb *hsave = svm->nested.hsave;
1158 &user_kvm_nested_state->data.svm[0];
1191 svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET));
1195 if (!page_address_valid(vcpu, kvm_state->hdr.svm.vmcb_pa))
1240 copy_vmcb_control_area(&hsave->control, &svm->vmcb->control);
1246 svm->nested.vmcb12_gpa = kvm_state->hdr.svm.vmcb_pa;
1247 load_nested_vmcb_control(svm, ctl);
1248 nested_prepare_vmcb_control(svm);