Lines Matching refs:arch

281 	vcpu->arch.regs_avail = ~VMX_REGS_LAZY_LOAD_SET;
287 vcpu->arch.regs_dirty = 0;
332 kvm_mmu_free_roots(vcpu->kvm, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
368 cached_root = &vcpu->arch.mmu->prev_roots[i];
375 kvm_mmu_invalidate_addr(vcpu, vcpu->arch.mmu, addr, roots);
384 unsigned long exit_qualification = vcpu->arch.exit_qualification;
426 vcpu->arch.mmu = &vcpu->arch.guest_mmu;
428 vcpu->arch.mmu->get_guest_pgd = nested_ept_get_eptp;
429 vcpu->arch.mmu->inject_page_fault = nested_ept_inject_page_fault;
430 vcpu->arch.mmu->get_pdptr = kvm_pdptr_read;
432 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
437 vcpu->arch.mmu = &vcpu->arch.root_mmu;
438 vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
859 if (CC(vcpu->arch.apic_base & X2APIC_ENABLE && e->index >> 8 == 0x8))
1103 vcpu->arch.cr3 = cr3;
2141 if (vcpu->arch.virtual_tsc_khz == 0)
2146 do_div(preemption_timeout, vcpu->arch.virtual_tsc_khz);
2158 return vmx->vcpu.arch.efer | (EFER_LMA | EFER_LME);
2160 return vmx->vcpu.arch.efer & ~(EFER_LMA | EFER_LME);
2210 vmcs_write32(NOTIFY_WINDOW, kvm->arch.notify_window);
2529 * vcpu->arch fields.
2554 kvm_set_dr(vcpu, 7, vcpu->arch.dr7);
2567 vcpu->arch.cr0_guest_owned_bits &= ~vmcs12->cr0_guest_host_mask;
2568 vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
2573 vcpu->arch.pat = vmcs12->guest_ia32_pat;
2575 vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
2578 vcpu->arch.tsc_offset = kvm_calc_nested_tsc_offset(
2579 vcpu->arch.l1_tsc_offset,
2583 vcpu->arch.tsc_scaling_ratio = kvm_calc_nested_tsc_multiplier(
2584 vcpu->arch.l1_tsc_scaling_ratio,
2587 vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
2589 vmcs_write64(TSC_MULTIPLIER, vcpu->arch.tsc_scaling_ratio);
2607 vcpu->arch.efer = nested_vmx_calc_efer(vmx, vmcs12);
2609 vmx_set_efer(vcpu, vcpu->arch.efer);
2902 !!(vcpu->arch.efer & EFER_LMA)))
3124 vm_fail = __vmx_vcpu_run(vmx, (unsigned long *)&vcpu->arch.regs,
3198 if (!vcpu->arch.pdptrs_from_userspace &&
3205 if (CC(!load_pdptrs(vcpu, vcpu->arch.cr3)))
3367 u8 vppr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_PROCPRI);
3431 * nested_vmx_restore_host_state() to corrupt vcpu->arch.cr3. Stuffing
3432 * vmcs01.GUEST_CR3 results in the unwind naturally setting arch.cr3 to
3437 * KVM modifies vcpu->arch.cr3 if and only if the early hardware checks
3442 vmcs_writel(GUEST_CR3, vcpu->arch.cr3);
3530 vcpu->arch.tsc_offset -= vmcs12->tsc_offset;
3641 vmx->vcpu.arch.l1tf_flush_l1d = true;
3672 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
3711 /*1*/ (vmcs_readl(GUEST_CR0) & vcpu->arch.cr0_guest_owned_bits) |
3714 vcpu->arch.cr0_guest_owned_bits));
3721 /*1*/ (vmcs_readl(GUEST_CR4) & vcpu->arch.cr4_guest_owned_bits) |
3724 vcpu->arch.cr4_guest_owned_bits));
3755 } else if (vcpu->arch.exception.injected) {
3756 nr = vcpu->arch.exception.vector;
3761 vcpu->arch.event_exit_inst_len;
3766 if (vcpu->arch.exception.has_error_code) {
3769 vcpu->arch.exception.error_code;
3773 } else if (vcpu->arch.nmi_injected) {
3776 } else if (vcpu->arch.interrupt.injected) {
3777 nr = vcpu->arch.interrupt.nr;
3780 if (vcpu->arch.interrupt.soft) {
3783 vcpu->arch.event_exit_inst_len;
3859 struct kvm_queued_exception *ex = &vcpu->arch.exception_vmexit;
3867 exit_qual = vcpu->arch.cr2;
3869 exit_qual = vcpu->arch.dr6;
3954 pending_dbg = vmx_get_pending_dbg_trap(&vcpu->arch.exception);
4056 struct kvm_lapic *apic = vcpu->arch.apic;
4079 if (vcpu->arch.mp_state != KVM_MP_STATE_INIT_RECEIVED)
4093 if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
4111 if (vcpu->arch.exception_vmexit.pending &&
4112 !vmx_is_low_priority_db_trap(&vcpu->arch.exception_vmexit)) {
4120 if (vcpu->arch.exception.pending &&
4121 !vmx_is_low_priority_db_trap(&vcpu->arch.exception)) {
4135 if (vcpu->arch.exception_vmexit.pending) {
4143 if (vcpu->arch.exception.pending) {
4156 if (vcpu->arch.smi_pending && !is_smm(vcpu)) {
4162 if (vcpu->arch.nmi_pending && !vmx_nmi_blocked(vcpu)) {
4175 vcpu->arch.nmi_pending = 0;
4202 value = ktime_to_ns(remaining) * vcpu->arch.virtual_tsc_khz;
4351 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
4353 else if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED)
4395 vmcs12->guest_ia32_efer = vcpu->arch.efer;
4472 vcpu->arch.efer = vmcs12->host_ia32_efer;
4474 vcpu->arch.efer |= (EFER_LMA | EFER_LME);
4476 vcpu->arch.efer &= ~(EFER_LMA | EFER_LME);
4477 vmx_set_efer(vcpu, vcpu->arch.efer);
4491 vcpu->arch.cr0_guest_owned_bits = vmx_l1_guest_owned_cr0_bits();
4495 vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
4523 vcpu->arch.pat = vmcs12->host_ia32_pat;
4621 vcpu->arch.pat = vmcs_read64(GUEST_IA32_PAT);
4627 * and vcpu->arch.dr7 is not squirreled away before the
4642 vcpu->arch.cr0_guest_owned_bits = vmx_l1_guest_owned_cr0_bits();
4645 vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
4649 vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
4749 * VCPU_EXREG_PDPTR will be clobbered in arch/x86/kvm/vmx/vmx.h between
4762 vcpu->arch.tsc_offset = vcpu->arch.l1_tsc_offset;
4764 vcpu->arch.tsc_scaling_ratio = vcpu->arch.l1_tsc_scaling_ratio;
4803 vcpu->arch.nmi_injected = false;
4823 vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
4825 vmcs_write64(TSC_MULTIPLIER, vcpu->arch.tsc_scaling_ratio);
4861 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
5256 kvm_mmu_free_roots(vcpu->kvm, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
5714 mmu = &vcpu->arch.guest_mmu;
5832 kvm_mmu_free_guest_mode_roots(vcpu->kvm, &vcpu->arch.root_mmu);
6161 return vcpu->arch.apf.host_apf_flags ||