Lines Matching refs:vmcb

303 	svm->vmcb->save.efer = efer | EFER_SVME;
304 vmcb_mark_dirty(svm->vmcb, VMCB_CR);
313 if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK)
323 svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
325 svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK;
333 if (nrips && svm->vmcb->control.next_rip != 0) {
335 svm->next_rip = svm->vmcb->control.next_rip;
370 svm->int3_rip = rip + svm->vmcb->save.cs.base;
374 svm->vmcb->control.event_inj = nr
378 svm->vmcb->control.event_inj_err = error_code;
768 svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK;
779 svm->vmcb->control.virt_ext &= ~LBR_CTL_ENABLE_MASK;
793 svm->vmcb->save.rflags &= ~X86_EFLAGS_TF;
795 svm->vmcb->save.rflags &= ~X86_EFLAGS_RF;
802 struct vmcb_control_area *control = &svm->vmcb->control;
811 vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
820 struct vmcb_control_area *control = &svm->vmcb->control;
829 vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
1071 g_tsc_offset = svm->vmcb->control.tsc_offset -
1077 svm->vmcb->control.tsc_offset - g_tsc_offset,
1080 svm->vmcb->control.tsc_offset = offset + g_tsc_offset;
1082 vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
1083 return svm->vmcb->control.tsc_offset;
1103 struct vmcb_control_area *control = &svm->vmcb->control;
1104 struct vmcb_save_area *save = &svm->vmcb->save;
1243 svm->vmcb->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
1249 svm->vmcb->control.int_ctl |= V_GIF_ENABLE_MASK;
1253 svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ENABLE;
1257 vmcb_mark_all_dirty(svm->vmcb);
1319 svm->vmcb = page_address(vmcb_page);
1335 static void svm_clear_current_vmcb(struct vmcb *vmcb)
1340 cmpxchg(&per_cpu(svm_data, i)->current_vmcb, vmcb, NULL);
1348 * The vmcb page can be recycled, causing a false negative in
1350 * vmcb page recorded as its current vmcb.
1352 svm_clear_current_vmcb(svm->vmcb);
1369 vmcb_mark_all_dirty(svm->vmcb);
1393 if (sd->current_vmcb != svm->vmcb) {
1394 sd->current_vmcb = svm->vmcb;
1427 unsigned long rflags = svm->vmcb->save.rflags;
1449 to_svm(vcpu)->vmcb->save.rflags = rflags;
1476 control = &svm->vmcb->control;
1481 vmcb_mark_dirty(svm->vmcb, VMCB_INTR);
1489 svm->vmcb->control.int_ctl &= ~V_IRQ_INJECTION_BITS_MASK;
1493 WARN_ON((svm->vmcb->control.int_ctl & V_TPR_MASK) !=
1495 svm->vmcb->control.int_ctl |= svm->nested.ctl.int_ctl &
1498 svm->vmcb->control.int_vector = svm->nested.ctl.int_vector;
1501 vmcb_mark_dirty(svm->vmcb, VMCB_INTR);
1506 struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
1593 var->dpl = to_svm(vcpu)->vmcb->save.cpl;
1600 struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
1609 dt->size = svm->vmcb->save.idtr.limit;
1610 dt->address = svm->vmcb->save.idtr.base;
1617 svm->vmcb->save.idtr.limit = dt->size;
1618 svm->vmcb->save.idtr.base = dt->address ;
1619 vmcb_mark_dirty(svm->vmcb, VMCB_DT);
1626 dt->size = svm->vmcb->save.gdtr.limit;
1627 dt->address = svm->vmcb->save.gdtr.base;
1634 svm->vmcb->save.gdtr.limit = dt->size;
1635 svm->vmcb->save.gdtr.base = dt->address ;
1636 vmcb_mark_dirty(svm->vmcb, VMCB_DT);
1642 u64 *hcr0 = &svm->vmcb->save.cr0;
1647 vmcb_mark_dirty(svm->vmcb, VMCB_CR);
1666 svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
1671 svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
1687 svm->vmcb->save.cr0 = cr0;
1688 vmcb_mark_dirty(svm->vmcb, VMCB_CR);
1700 unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4;
1709 to_svm(vcpu)->vmcb->save.cr4 = cr4;
1710 vmcb_mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
1739 svm->vmcb->save.cpl = (var->dpl & 3);
1741 vmcb_mark_dirty(svm->vmcb, VMCB_SEG);
1761 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
1765 svm->vmcb->control.asid = sd->next_asid++;
1767 vmcb_mark_dirty(svm->vmcb, VMCB_ASID);
1772 struct vmcb *vmcb = svm->vmcb;
1774 if (unlikely(value != vmcb->save.dr6)) {
1775 vmcb->save.dr6 = value;
1776 vmcb_mark_dirty(vmcb, VMCB_DR);
1789 * We cannot reset svm->vmcb->save.dr6 to DR6_FIXED_1|DR6_RTM here,
1792 vcpu->arch.dr6 = svm->vmcb->save.dr6;
1793 vcpu->arch.dr7 = svm->vmcb->save.dr7;
1802 svm->vmcb->save.dr7 = value;
1803 vmcb_mark_dirty(svm->vmcb, VMCB_DR);
1808 u64 fault_address = svm->vmcb->control.exit_info_2;
1809 u64 error_code = svm->vmcb->control.exit_info_1;
1813 svm->vmcb->control.insn_bytes : NULL,
1814 svm->vmcb->control.insn_len);
1819 u64 fault_address = __sme_clr(svm->vmcb->control.exit_info_2);
1820 u64 error_code = svm->vmcb->control.exit_info_1;
1825 svm->vmcb->control.insn_bytes : NULL,
1826 svm->vmcb->control.insn_len);
1837 u32 payload = (svm->vmcb->save.dr6 ^ DR6_RTM) & ~DR6_FIXED_1;
1851 kvm_run->debug.arch.dr6 = svm->vmcb->save.dr6;
1852 kvm_run->debug.arch.dr7 = svm->vmcb->save.dr7;
1854 svm->vmcb->save.cs.base + svm->vmcb->save.rip;
1867 kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip;
1886 u32 error_code = svm->vmcb->control.exit_info_1;
1993 clear_page(svm->vmcb);
2003 u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */
2015 svm->next_rip = svm->vmcb->control.exit_info_2;
2048 struct vmcb *nested_vmcb;
2055 ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->vmcb->save.rax), &map);
2066 nested_svm_vmloadsave(nested_vmcb, svm->vmcb);
2074 struct vmcb *nested_vmcb;
2081 ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->vmcb->save.rax), &map);
2092 nested_svm_vmloadsave(svm->vmcb, nested_vmcb);
2166 trace_kvm_invlpga(svm->vmcb->save.rip, kvm_rcx_read(&svm->vcpu),
2177 trace_kvm_skinit(svm->vmcb->save.rip, kvm_rax_read(&svm->vcpu));
2210 int int_type = svm->vmcb->control.exit_int_info &
2212 int int_vec = svm->vmcb->control.exit_int_info & SVM_EVTINJ_VEC_MASK;
2214 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK;
2216 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID;
2220 tss_selector = (u16)svm->vmcb->control.exit_info_1;
2222 if (svm->vmcb->control.exit_info_2 &
2225 else if (svm->vmcb->control.exit_info_2 &
2239 if (svm->vmcb->control.exit_info_2 &
2243 (u32)svm->vmcb->control.exit_info_2;
2296 kvm_mmu_invlpg(&svm->vcpu, svm->vmcb->control.exit_info_1);
2335 svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE;
2353 if (unlikely((svm->vmcb->control.exit_info_1 & CR_VALID) == 0))
2356 reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
2357 if (svm->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE)
2360 cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0;
2436 reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
2437 dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0;
2494 msr_info->data = svm->vmcb->save.star;
2498 msr_info->data = svm->vmcb->save.lstar;
2501 msr_info->data = svm->vmcb->save.cstar;
2504 msr_info->data = svm->vmcb->save.kernel_gs_base;
2507 msr_info->data = svm->vmcb->save.sfmask;
2511 msr_info->data = svm->vmcb->save.sysenter_cs;
2533 msr_info->data = svm->vmcb->save.dbgctl;
2536 msr_info->data = svm->vmcb->save.br_from;
2539 msr_info->data = svm->vmcb->save.br_to;
2542 msr_info->data = svm->vmcb->save.last_excp_from;
2545 msr_info->data = svm->vmcb->save.last_excp_to;
2634 svm->vmcb->save.g_pat = data;
2635 vmcb_mark_dirty(svm->vmcb, VMCB_NPT);
2688 svm->vmcb->save.star = data;
2692 svm->vmcb->save.lstar = data;
2695 svm->vmcb->save.cstar = data;
2698 svm->vmcb->save.kernel_gs_base = data;
2701 svm->vmcb->save.sfmask = data;
2705 svm->vmcb->save.sysenter_cs = data;
2709 svm->vmcb->save.sysenter_eip = data;
2713 svm->vmcb->save.sysenter_esp = data;
2740 svm->vmcb->save.dbgctl = data;
2741 vmcb_mark_dirty(svm->vmcb, VMCB_LBR);
2799 if (svm->vmcb->control.exit_info_1)
2866 type = svm->vmcb->control.exit_info_2;
2867 gva = svm->vmcb->control.exit_info_1;
2949 struct vmcb_control_area *control = &svm->vmcb->control;
2950 struct vmcb_save_area *save = &svm->vmcb->save;
3067 struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control;
3083 u32 exit_code = svm->vmcb->control.exit_code;
3088 vcpu->arch.cr0 = svm->vmcb->save.cr0;
3090 vcpu->arch.cr3 = svm->vmcb->save.cr3;
3106 if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
3109 = svm->vmcb->control.exit_code;
3170 svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
3183 svm->vmcb->control.event_inj = vcpu->arch.interrupt.nr |
3206 struct vmcb *vmcb = svm->vmcb;
3215 ret = (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) ||
3257 struct vmcb *vmcb = svm->vmcb;
3277 return (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK);
3340 svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
3365 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
3374 invlpga(gva, svm->vmcb->control.asid);
3390 int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK;
3405 svm->vmcb->control.int_ctl &= ~V_TPR_MASK;
3406 svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK;
3413 u32 exitintinfo = svm->vmcb->control.exit_int_info;
3459 u32 err = svm->vmcb->control.exit_int_info_err;
3476 struct vmcb_control_area *control = &svm->vmcb->control;
3486 struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control;
3559 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
3560 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
3561 svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
3569 if (svm->nmi_singlestep && svm->vmcb->control.event_inj) {
3583 svm->vmcb->save.cr2 = vcpu->arch.cr2;
3631 vcpu->arch.cr2 = svm->vmcb->save.cr2;
3632 vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
3633 vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
3634 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
3636 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
3644 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
3655 svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
3656 vmcb_mark_all_clean(svm->vmcb);
3659 if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR)
3672 if (unlikely(svm->vmcb->control.exit_code ==
3692 svm->vmcb->control.nested_cr3 = cr3;
3693 vmcb_mark_dirty(svm->vmcb, VMCB_NPT);
3701 svm->vmcb->save.cr3 = cr3;
3702 vmcb_mark_dirty(svm->vmcb, VMCB_CR);
3875 struct vmcb *vmcb = svm->vmcb;
3926 vmcb->control.exit_info_1 = 1;
3928 vmcb->control.exit_info_1 = 0;
3965 vmcb->control.exit_info_1 = exit_info;
3966 vmcb->control.exit_info_2 = info->next_rip;
3976 vmcb->control.next_rip = info->next_rip;
3977 vmcb->control.exit_code = icpt_info.exit_code;
3989 if (to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_INTR)
4040 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
4041 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
4042 svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
4187 (vmcb_is_intercept(&svm->vmcb->control, INTERCEPT_INIT));