Lines Matching refs:vmcb
342 svm->vmcb->save.efer = efer | EFER_SVME;
343 vmcb_mark_dirty(svm->vmcb, VMCB_CR);
352 if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK)
362 svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
364 svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK;
383 if (nrips && svm->vmcb->control.next_rip != 0) {
385 svm->next_rip = svm->vmcb->control.next_rip;
398 old_rflags = svm->vmcb->save.rflags;
404 svm->vmcb->save.rflags = old_rflags;
453 svm->soft_int_csbase = svm->vmcb->save.cs.base;
461 svm->vmcb->control.next_rip = rip;
477 svm->vmcb->control.event_inj = ex->vector
481 svm->vmcb->control.event_inj_err = ex->error_code;
743 struct vmcb *vmcb = svm->vmcb01.ptr;
745 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_READ);
746 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_READ);
747 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_READ);
748 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_READ);
749 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_READ);
750 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_READ);
751 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_READ);
752 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_WRITE);
753 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR1_WRITE);
754 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR2_WRITE);
755 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR3_WRITE);
756 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR4_WRITE);
757 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR5_WRITE);
758 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR6_WRITE);
759 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_READ);
760 vmcb_set_intercept(&vmcb->control, INTERCEPT_DR7_WRITE);
767 struct vmcb *vmcb = svm->vmcb01.ptr;
769 vmcb->control.intercepts[INTERCEPT_DR] = 0;
999 void svm_copy_lbrs(struct vmcb *to_vmcb, struct vmcb *from_vmcb)
1014 svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK;
1022 svm_copy_lbrs(svm->vmcb, svm->vmcb01.ptr);
1029 svm->vmcb->control.virt_ext &= ~LBR_CTL_ENABLE_MASK;
1040 svm_copy_lbrs(svm->vmcb01.ptr, svm->vmcb);
1043 static struct vmcb *svm_get_lbr_vmcb(struct vcpu_svm *svm)
1050 return svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK ? svm->vmcb :
1057 bool current_enable_lbrv = svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK;
1078 svm->vmcb->save.rflags &= ~X86_EFLAGS_TF;
1080 svm->vmcb->save.rflags &= ~X86_EFLAGS_RF;
1087 struct vmcb_control_area *control = &svm->vmcb->control;
1099 vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
1108 struct vmcb_control_area *control = &svm->vmcb->control;
1120 vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
1176 svm->vmcb->control.tsc_offset = vcpu->arch.tsc_offset;
1177 vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
1224 svm->vmcb->control.virt_ext &= ~VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
1236 svm->vmcb->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
1247 struct vmcb *vmcb = svm->vmcb01.ptr;
1248 struct vmcb_control_area *control = &vmcb->control;
1249 struct vmcb_save_area *save = &vmcb->save;
1372 avic_init_vmcb(svm, vmcb);
1375 svm->vmcb->control.int_ctl |= V_NMI_ENABLE_MASK;
1380 svm->vmcb->control.int_ctl |= V_GIF_ENABLE_MASK;
1386 svm_hv_init_vmcb(vmcb);
1389 vmcb_mark_all_dirty(vmcb);
1427 svm->vmcb = target_vmcb->ptr;
1495 static void svm_clear_current_vmcb(struct vmcb *vmcb)
1500 cmpxchg(per_cpu_ptr(&svm_data.current_vmcb, i), vmcb, NULL);
1508 * The vmcb page can be recycled, causing a false negative in
1510 * vmcb page recorded as its current vmcb.
1512 svm_clear_current_vmcb(svm->vmcb);
1572 if (sd->current_vmcb != svm->vmcb) {
1573 sd->current_vmcb = svm->vmcb;
1595 unsigned long rflags = svm->vmcb->save.rflags;
1617 to_svm(vcpu)->vmcb->save.rflags = rflags;
1622 struct vmcb *vmcb = to_svm(vcpu)->vmcb;
1625 ? vmcb->control.int_state & SVM_GUEST_INTERRUPT_MASK
1672 control = &svm->vmcb->control;
1677 vmcb_mark_dirty(svm->vmcb, VMCB_INTR);
1685 svm->vmcb->control.int_ctl &= ~V_IRQ_INJECTION_BITS_MASK;
1689 WARN_ON((svm->vmcb->control.int_ctl & V_TPR_MASK) !=
1692 svm->vmcb->control.int_ctl |= svm->nested.ctl.int_ctl &
1695 svm->vmcb->control.int_vector = svm->nested.ctl.int_vector;
1698 vmcb_mark_dirty(svm->vmcb, VMCB_INTR);
1703 struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
1791 var->dpl = to_svm(vcpu)->vmcb->save.cpl;
1798 struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
1816 dt->size = svm->vmcb->save.idtr.limit;
1817 dt->address = svm->vmcb->save.idtr.base;
1824 svm->vmcb->save.idtr.limit = dt->size;
1825 svm->vmcb->save.idtr.base = dt->address ;
1826 vmcb_mark_dirty(svm->vmcb, VMCB_DT);
1833 dt->size = svm->vmcb->save.gdtr.limit;
1834 dt->address = svm->vmcb->save.gdtr.base;
1841 svm->vmcb->save.gdtr.limit = dt->size;
1842 svm->vmcb->save.gdtr.base = dt->address ;
1843 vmcb_mark_dirty(svm->vmcb, VMCB_DT);
1859 svm->vmcb->save.cr3 = cr3;
1860 vmcb_mark_dirty(svm->vmcb, VMCB_CR);
1880 svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
1886 svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
1906 svm->vmcb->save.cr0 = hcr0;
1907 vmcb_mark_dirty(svm->vmcb, VMCB_CR);
1947 to_svm(vcpu)->vmcb->save.cr4 = cr4;
1948 vmcb_mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
1980 svm->vmcb->save.cpl = (var->dpl & 3);
1982 vmcb_mark_dirty(svm->vmcb, VMCB_SEG);
2002 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
2003 vmcb_mark_dirty(svm->vmcb, VMCB_ASID);
2012 struct vmcb *vmcb = svm->vmcb;
2017 if (unlikely(value != vmcb->save.dr6)) {
2018 vmcb->save.dr6 = value;
2019 vmcb_mark_dirty(vmcb, VMCB_DR);
2035 * We cannot reset svm->vmcb->save.dr6 to DR6_ACTIVE_LOW here,
2038 vcpu->arch.dr6 = svm->vmcb->save.dr6;
2039 vcpu->arch.dr7 = svm->vmcb->save.dr7;
2051 svm->vmcb->save.dr7 = value;
2052 vmcb_mark_dirty(svm->vmcb, VMCB_DR);
2059 u64 fault_address = svm->vmcb->control.exit_info_2;
2060 u64 error_code = svm->vmcb->control.exit_info_1;
2064 svm->vmcb->control.insn_bytes : NULL,
2065 svm->vmcb->control.insn_len);
2072 u64 fault_address = svm->vmcb->control.exit_info_2;
2073 u64 error_code = svm->vmcb->control.exit_info_1;
2078 svm->vmcb->control.insn_bytes : NULL,
2079 svm->vmcb->control.insn_len);
2090 u32 payload = svm->vmcb->save.dr6 ^ DR6_ACTIVE_LOW;
2104 kvm_run->debug.arch.dr6 = svm->vmcb->save.dr6;
2105 kvm_run->debug.arch.dr7 = svm->vmcb->save.dr7;
2107 svm->vmcb->save.cs.base + svm->vmcb->save.rip;
2121 kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip;
2222 clear_page(svm->vmcb);
2232 u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */
2249 svm->next_rip = svm->vmcb->control.exit_info_2;
2273 struct vmcb *vmcb12;
2280 ret = kvm_vcpu_map(vcpu, gpa_to_gfn(svm->vmcb->save.rax), &map);
2292 svm_copy_vmloadsave_state(svm->vmcb, vmcb12);
2296 svm_copy_vmloadsave_state(vmcb12, svm->vmcb);
2387 u32 error_code = svm->vmcb->control.exit_info_1;
2413 if (svm->vmcb->save.rax & ~PAGE_MASK)
2490 trace_kvm_invlpga(to_svm(vcpu)->vmcb->save.rip, asid, gva);
2500 trace_kvm_skinit(to_svm(vcpu)->vmcb->save.rip, kvm_rax_read(vcpu));
2511 int int_type = svm->vmcb->control.exit_int_info &
2513 int int_vec = svm->vmcb->control.exit_int_info & SVM_EVTINJ_VEC_MASK;
2515 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK;
2517 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID;
2521 tss_selector = (u16)svm->vmcb->control.exit_info_1;
2523 if (svm->vmcb->control.exit_info_2 &
2526 else if (svm->vmcb->control.exit_info_2 &
2540 if (svm->vmcb->control.exit_info_2 &
2544 (u32)svm->vmcb->control.exit_info_2;
2605 kvm_mmu_invlpg(vcpu, to_svm(vcpu)->vmcb->control.exit_info_1);
2634 svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE;
2653 if (unlikely((svm->vmcb->control.exit_info_1 & CR_VALID) == 0))
2656 reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
2657 if (svm->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE)
2660 cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0;
2724 new_value = (unsigned long)svm->vmcb->control.exit_info_1;
2726 cr = svm->vmcb->control.exit_code - SVM_EXIT_CR0_WRITE_TRAP;
2780 reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
2781 dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0;
2822 msr_info.data = to_svm(vcpu)->vmcb->control.exit_info_1 & ~EFER_SVME;
2915 msr_info->data = svm->vmcb->save.spec_ctrl;
3036 vmcb_mark_dirty(svm->vmcb, VMCB_NPT);
3047 svm->vmcb->save.spec_ctrl = data;
3189 if (to_svm(vcpu)->vmcb->control.exit_info_1)
3250 type = svm->vmcb->control.exit_info_2;
3251 gva = svm->vmcb->control.exit_info_1;
3333 struct vmcb_control_area *control = &svm->vmcb->control;
3334 struct vmcb_save_area *save = &svm->vmcb->save;
3495 struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control;
3512 u32 exit_code = svm->vmcb->control.exit_code;
3517 vcpu->arch.cr0 = svm->vmcb->save.cr0;
3519 vcpu->arch.cr3 = svm->vmcb->save.cr3;
3536 if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
3539 = svm->vmcb->control.exit_code;
3557 * If the previous vmrun of the vmcb occurred on a different physical
3558 * cpu, then mark the vmcb dirty and assign a new asid. Hardware's
3559 * vmcb clean bits are per logical CPU, as are KVM's asid assignments.
3563 vmcb_mark_all_dirty(svm->vmcb);
3579 svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
3596 return !!(svm->vmcb->control.int_ctl & V_NMI_PENDING_MASK);
3606 if (svm->vmcb->control.int_ctl & V_NMI_PENDING_MASK)
3609 svm->vmcb->control.int_ctl |= V_NMI_PENDING_MASK;
3610 vmcb_mark_dirty(svm->vmcb, VMCB_INTR);
3640 svm->vmcb->control.event_inj = vcpu->arch.interrupt.nr |
3722 return svm->vmcb->control.int_ctl & V_NMI_BLOCKING_MASK;
3733 svm->vmcb->control.int_ctl |= V_NMI_BLOCKING_MASK;
3735 svm->vmcb->control.int_ctl &= ~V_NMI_BLOCKING_MASK;
3749 struct vmcb *vmcb = svm->vmcb;
3760 return vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK;
3781 struct vmcb *vmcb = svm->vmcb;
3801 return (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK);
3895 svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
3917 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
3957 invlpga(gva, svm->vmcb->control.asid);
3968 int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK;
3983 svm->vmcb->control.int_ctl &= ~V_TPR_MASK;
3984 svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK;
4005 svm->vmcb->control.next_rip = svm->soft_int_next_rip;
4024 u32 exitintinfo = svm->vmcb->control.exit_int_info;
4070 u32 err = svm->vmcb->control.exit_int_info_err;
4091 struct vmcb_control_area *control = &svm->vmcb->control;
4106 if (to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_MSR &&
4107 to_svm(vcpu)->vmcb->control.exit_info_1)
4136 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
4137 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
4138 svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
4146 if (svm->nmi_singlestep && svm->vmcb->control.event_inj) {
4160 if (unlikely(svm->asid != svm->vmcb->control.asid)) {
4161 svm->vmcb->control.asid = svm->asid;
4162 vmcb_mark_dirty(svm->vmcb, VMCB_ASID);
4164 svm->vmcb->save.cr2 = vcpu->arch.cr2;
4166 svm_hv_update_vp_id(svm->vmcb, vcpu);
4197 vcpu->arch.cr2 = svm->vmcb->save.cr2;
4198 vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
4199 vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
4200 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
4204 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
4212 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
4223 svm->vmcb->control.exit_code != SVM_EXIT_ERR)
4229 svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
4230 vmcb_mark_all_clean(svm->vmcb);
4233 if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR)
4243 if (unlikely(svm->vmcb->control.exit_code ==
4264 svm->vmcb->control.nested_cr3 = __sme_set(root_hpa);
4265 vmcb_mark_dirty(svm->vmcb, VMCB_NPT);
4278 svm->vmcb->save.cr3 = cr3;
4279 vmcb_mark_dirty(svm->vmcb, VMCB_CR);
4446 struct vmcb *vmcb = svm->vmcb;
4497 vmcb->control.exit_info_1 = 1;
4499 vmcb->control.exit_info_1 = 0;
4536 vmcb->control.exit_info_1 = exit_info;
4537 vmcb->control.exit_info_2 = info->next_rip;
4547 vmcb->control.next_rip = info->next_rip;
4548 vmcb->control.exit_code = icpt_info.exit_code;
4560 if (to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_INTR)
4624 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
4625 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
4626 svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
4639 * within 'struct vmcb'). Note: HSAVE area may also be used by
4647 BUILD_BUG_ON(offsetof(struct vmcb, save) != 0x400);
4660 struct vmcb *vmcb12;
4806 * read guest memory at CS:RIP and vmcb.GuestIntrBytes will incorrectly
4832 error_code = to_svm(vcpu)->vmcb->control.exit_info_1;