Lines Matching refs:arch

408 	if ((vcpu->arch.arch_capabilities & ARCH_CAP_FB_CLEAR) ||
409 ((vcpu->arch.arch_capabilities & ARCH_CAP_MDS_NO) &&
410 (vcpu->arch.arch_capabilities & ARCH_CAP_TAA_NO) &&
411 (vcpu->arch.arch_capabilities & ARCH_CAP_PSDP_NO) &&
412 (vcpu->arch.arch_capabilities & ARCH_CAP_FBSDP_NO) &&
413 (vcpu->arch.arch_capabilities & ARCH_CAP_SBDR_SSDP_NO)))
934 if (vcpu->arch.xfd_no_write_intercept)
1107 u64 guest_efer = vmx->vcpu.arch.efer;
1132 (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX))) {
1774 (!vcpu->arch.exception.pending ||
1775 vcpu->arch.exception.vector == DB_VECTOR) &&
1776 (!vcpu->arch.exception_vmexit.pending ||
1777 vcpu->arch.exception_vmexit.vector == DB_VECTOR)) {
1806 struct kvm_queued_exception *ex = &vcpu->arch.exception;
1830 inc_eip = vcpu->arch.event_exit_inst_len;
1839 vmx->vcpu.arch.event_exit_inst_len);
1877 (vmx->vcpu.arch.efer & EFER_SCE);
1927 vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
1932 vmcs_write64(TSC_MULTIPLIER, vcpu->arch.tsc_scaling_ratio);
2012 !(vcpu->arch.arch_capabilities & ARCH_CAP_TSX_CTRL_MSR))
2049 msr_info->data = vcpu->arch.mcg_ext_ctl;
2208 vcpu->arch.xfd_no_write_intercept = true;
2311 !(vcpu->arch.arch_capabilities & ARCH_CAP_TSX_CTRL_MSR))
2334 vcpu->arch.mcg_ext_ctl = data;
2480 vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP);
2483 vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP);
2490 guest_owned_bits = vcpu->arch.cr0_guest_owned_bits;
2492 vcpu->arch.cr0 &= ~guest_owned_bits;
2493 vcpu->arch.cr0 |= vmcs_readl(GUEST_CR0) & guest_owned_bits;
2501 vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
2504 guest_owned_bits = vcpu->arch.cr4_guest_owned_bits;
2506 vcpu->arch.cr4 &= ~guest_owned_bits;
2507 vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & guest_owned_bits;
3128 vcpu->arch.efer = efer;
3159 vmx_set_efer(vcpu, vcpu->arch.efer | EFER_LMA);
3164 vmx_set_efer(vcpu, vcpu->arch.efer & ~EFER_LMA);
3201 struct kvm_mmu *mmu = vcpu->arch.mmu;
3238 struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
3253 struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
3305 vcpu->arch.cr0 = cr0;
3309 if (vcpu->arch.efer & EFER_LME) {
3353 /* Note, vmx_set_cr4() consumes the new vcpu->arch.cr0. */
3358 * When !CR0_PG -> CR0_PG, vcpu->arch.cr3 becomes active, but
3365 /* depends on vcpu->arch.cr0 to be set to a new value */
3407 guest_cr3 = vcpu->arch.cr3;
3465 vcpu->arch.cr4 = cr4;
3599 * IA32 arch specifies that at the time of processor reset the
4081 msr_bitmap[read_idx] = ~kvm_lapic_readable_reg_mask(vcpu->arch.apic);
4259 if (!vcpu->arch.apic->apicv_active)
4371 vcpu->arch.cr4_guest_owned_bits = KVM_POSSIBLE_CR4_GUEST_BITS &
4372 ~vcpu->arch.cr4_guest_rsvd_bits;
4374 vcpu->arch.cr4_guest_owned_bits &= ~X86_CR4_TLBFLUSH_BITS;
4375 vcpu->arch.cr4_guest_owned_bits &= ~X86_CR4_PDPTR_BITS;
4378 vcpu->arch.cr4_guest_owned_bits &=
4380 vmcs_writel(CR4_GUEST_HOST_MASK, ~vcpu->arch.cr4_guest_owned_bits);
4487 if (vmx->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)
4672 if (!vcpu->kvm->arch.bus_lock_detection_enabled)
4683 return get_order(kvm->arch.max_vcpu_ids * sizeof(*to_kvm_vmx(kvm)->pid_table));
4751 vmcs_write16(LAST_PID_POINTER_INDEX, kvm->arch.max_vcpu_ids - 1);
4761 vmcs_write32(NOTIFY_WINDOW, kvm->arch.notify_window);
4783 vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
4790 vmx->vcpu.arch.cr0_guest_owned_bits = vmx_l1_guest_owned_cr0_bits();
4791 vmcs_writel(CR0_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr0_guest_owned_bits);
4824 __pa(vmx->vcpu.arch.apic->regs));
4847 vcpu->arch.microcode_version = 0x100000000ULL;
4937 int irq = vcpu->arch.interrupt.nr;
4939 trace_kvm_inj_virq(irq, vcpu->arch.interrupt.soft, reinjected);
4944 if (vcpu->arch.interrupt.soft)
4945 inc_eip = vcpu->arch.event_exit_inst_len;
4950 if (vcpu->arch.interrupt.soft) {
4953 vmx->vcpu.arch.event_exit_inst_len);
5110 to_vmx(vcpu)->vcpu.arch.event_exit_inst_len =
5140 if (vcpu->arch.halt_request) {
5141 vcpu->arch.halt_request = 0;
5249 vcpu->run->internal.data[3] = vcpu->arch.last_vmentry_cpu;
5255 if (enable_ept && !vcpu->arch.apf.host_apf_flags) {
5310 kvm_run->debug.arch.dr6 = dr6 | DR6_ACTIVE_LOW;
5311 kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7);
5319 vmx->vcpu.arch.event_exit_inst_len =
5322 kvm_run->debug.arch.pc = kvm_get_linear_rip(vcpu);
5323 kvm_run->debug.arch.exception = ex_no;
5553 vcpu->run->debug.arch.dr6 = DR6_BD | DR6_ACTIVE_LOW;
5554 vcpu->run->debug.arch.dr7 = dr7;
5555 vcpu->run->debug.arch.pc = kvm_get_linear_rip(vcpu);
5556 vcpu->run->debug.arch.exception = DB_VECTOR;
5573 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT;
5594 get_debugreg(vcpu->arch.db[0], 0);
5595 get_debugreg(vcpu->arch.db[1], 1);
5596 get_debugreg(vcpu->arch.db[2], 2);
5597 get_debugreg(vcpu->arch.db[3], 3);
5598 get_debugreg(vcpu->arch.dr6, 6);
5599 vcpu->arch.dr7 = vmcs_readl(GUEST_DR7);
5601 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT;
5708 vcpu->arch.nmi_injected = false;
5784 vcpu->arch.exit_qualification = exit_qualification;
5838 (kvm_is_exception_pending(vcpu) || vcpu->arch.exception.injected);
5865 if (vcpu->arch.halt_request) {
5866 vcpu->arch.halt_request = 0;
6072 if (vcpu->kvm->arch.notify_vmexit_flags & KVM_X86_NOTIFY_VMEXIT_USER ||
6264 vmx->loaded_vmcs->vmcs, vcpu->arch.last_vmentry_cpu);
6303 vcpu->arch.efer | (EFER_LMA | EFER_LME));
6306 vcpu->arch.efer & ~(EFER_LMA | EFER_LME));
6489 vcpu->run->fail_entry.cpu = vcpu->arch.last_vmentry_cpu;
6498 vcpu->run->fail_entry.cpu = vcpu->arch.last_vmentry_cpu;
6522 vcpu->run->internal.data[2] = vcpu->arch.exit_qualification;
6527 vcpu->run->internal.data[ndata++] = vcpu->arch.last_vmentry_cpu;
6537 vcpu->arch.nmi_pending) {
6587 vcpu->run->internal.data[1] = vcpu->arch.last_vmentry_cpu;
6635 flush_l1d = vcpu->arch.l1tf_flush_l1d;
6636 vcpu->arch.l1tf_flush_l1d = false;
6951 if (vcpu->arch.guest_fpu.fpstate->xfd)
6952 rdmsrl(MSR_IA32_XFD_ERR, vcpu->arch.guest_fpu.xfd_err);
6961 vmx->vcpu.arch.apf.host_apf_flags = kvm_read_and_reset_apf_flags();
6984 vcpu->arch.at_instruction_boundary = true;
7077 vcpu->arch.nmi_injected = false;
7091 vcpu->arch.nmi_injected = true;
7100 vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field);
7110 vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field);
7252 if (vcpu->arch.cr2 != native_read_cr2())
7253 native_write_cr2(vcpu->arch.cr2);
7255 vmx->fail = __vmx_vcpu_run(vmx, (unsigned long *)&vcpu->arch.regs,
7258 vcpu->arch.cr2 = native_read_cr2();
7259 vcpu->arch.regs_avail &= ~VMX_REGS_LAZY_LOAD_SET;
7326 vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
7328 vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
7329 vcpu->arch.regs_dirty = 0;
7351 if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT))
7352 set_debugreg(vcpu->arch.dr6, 6);
7564 kvm->arch.pause_in_guest = true;
8044 struct kvm_timer *ktimer = &vcpu->arch.apic->lapic_timer;
8059 if (vcpu->arch.l1_tsc_scaling_ratio != kvm_caps.default_tsc_scaling_ratio &&
8062 vcpu->arch.l1_tsc_scaling_ratio, &delta_tsc))
8116 if (vcpu->arch.mcg_cap & MCG_LMCE_P)
8376 (unsigned long *)&vcpu->arch.pmu.global_status);