Lines Matching refs:arch

407 	if ((vcpu->arch.arch_capabilities & ARCH_CAP_FB_CLEAR) ||
408 ((vcpu->arch.arch_capabilities & ARCH_CAP_MDS_NO) &&
409 (vcpu->arch.arch_capabilities & ARCH_CAP_TAA_NO) &&
410 (vcpu->arch.arch_capabilities & ARCH_CAP_PSDP_NO) &&
411 (vcpu->arch.arch_capabilities & ARCH_CAP_FBSDP_NO) &&
412 (vcpu->arch.arch_capabilities & ARCH_CAP_SBDR_SSDP_NO)))
615 &vcpu->kvm->arch.hyperv.hv_pa_pg;
1094 u64 guest_efer = vmx->vcpu.arch.efer;
1119 (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX))) {
1468 vmx->current_tsc_ratio != vcpu->arch.tsc_scaling_ratio)
1709 (!vcpu->arch.exception.pending ||
1710 vcpu->arch.exception.nr == DB_VECTOR))
1738 unsigned nr = vcpu->arch.exception.nr;
1739 bool has_error_code = vcpu->arch.exception.has_error_code;
1740 u32 error_code = vcpu->arch.exception.error_code;
1763 inc_eip = vcpu->arch.event_exit_inst_len;
1772 vmx->vcpu.arch.event_exit_inst_len);
1811 if (is_long_mode(&vmx->vcpu) && (vmx->vcpu.arch.efer & EFER_SCE)) {
1845 vcpu->arch.tsc_offset - g_tsc_offset,
1912 !(vcpu->arch.arch_capabilities & ARCH_CAP_TSX_CTRL_MSR))
1949 msr_info->data = vcpu->arch.mcg_ext_ctl;
2154 !(vcpu->arch.arch_capabilities & ARCH_CAP_TSX_CTRL_MSR))
2196 vcpu->arch.pat = data;
2210 vcpu->arch.mcg_ext_ctl = data;
2320 vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP);
2323 vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP);
2330 guest_owned_bits = vcpu->arch.cr0_guest_owned_bits;
2332 vcpu->arch.cr0 &= ~guest_owned_bits;
2333 vcpu->arch.cr0 |= vmcs_readl(GUEST_CR0) & guest_owned_bits;
2338 vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
2341 guest_owned_bits = vcpu->arch.cr4_guest_owned_bits;
2343 vcpu->arch.cr4 &= ~guest_owned_bits;
2344 vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & guest_owned_bits;
2934 vcpu->arch.efer = efer;
2963 vmx_set_efer(vcpu, vcpu->arch.efer | EFER_LMA);
2969 vmx_set_efer(vcpu, vcpu->arch.efer & ~EFER_LMA);
3006 struct kvm_mmu *mmu = vcpu->arch.mmu;
3043 struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
3058 struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
3096 if (vcpu->arch.efer & EFER_LME) {
3141 vcpu->arch.cr0 = cr0;
3148 vcpu->arch.cr0 = cr0;
3151 /* depends on vcpu->arch.cr0 to be set to a new value */
3199 else if (test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail))
3200 guest_cr3 = vcpu->arch.cr3;
3256 vcpu->arch.cr4 = cr4;
3387 * IA32 arch specifies that at the time of processor reset the
3722 if (kvm->arch.apic_access_page_done)
3740 kvm->arch.apic_access_page_done = true;
4116 if (!vcpu->arch.apicv_active)
4199 vcpu->arch.cr4_guest_owned_bits = KVM_POSSIBLE_CR4_GUEST_BITS &
4200 ~vcpu->arch.cr4_guest_rsvd_bits;
4202 vcpu->arch.cr4_guest_owned_bits &= ~X86_CR4_PGE;
4204 vcpu->arch.cr4_guest_owned_bits &=
4206 vmcs_writel(CR4_GUEST_HOST_MASK, ~vcpu->arch.cr4_guest_owned_bits);
4249 if (vmx->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)
4373 vcpu->arch.xsaves_enabled = xsaves_enabled;
4475 vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
4482 vmx->vcpu.arch.cr0_guest_owned_bits = KVM_POSSIBLE_CR0_GUEST_BITS;
4483 vmcs_writel(CR0_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr0_guest_owned_bits);
4520 vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val();
4585 __pa(vcpu->arch.apic->regs));
4592 vmx->vcpu.arch.cr0 = cr0;
4626 int irq = vcpu->arch.interrupt.nr;
4633 if (vcpu->arch.interrupt.soft)
4634 inc_eip = vcpu->arch.event_exit_inst_len;
4639 if (vcpu->arch.interrupt.soft) {
4642 vmx->vcpu.arch.event_exit_inst_len);
4797 to_vmx(vcpu)->vcpu.arch.event_exit_inst_len =
4827 if (vcpu->arch.halt_request) {
4828 vcpu->arch.halt_request = 0;
4939 vcpu->run->internal.data[3] = vcpu->arch.last_vmentry_cpu;
4945 if (enable_ept && !vcpu->arch.apf.host_apf_flags) {
4998 kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1 | DR6_RTM;
4999 kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7);
5007 vmx->vcpu.arch.event_exit_inst_len =
5011 kvm_run->debug.arch.pc = vmcs_readl(GUEST_CS_BASE) + rip;
5012 kvm_run->debug.arch.exception = ex_no;
5135 WARN_ON(!(vcpu->arch.cr4 & X86_CR4_UMIP));
5242 vcpu->run->debug.arch.dr6 = DR6_BD | DR6_RTM | DR6_FIXED_1;
5243 vcpu->run->debug.arch.dr7 = dr7;
5244 vcpu->run->debug.arch.pc = kvm_get_linear_rip(vcpu);
5245 vcpu->run->debug.arch.exception = DB_VECTOR;
5262 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT;
5282 get_debugreg(vcpu->arch.db[0], 0);
5283 get_debugreg(vcpu->arch.db[1], 1);
5284 get_debugreg(vcpu->arch.db[2], 2);
5285 get_debugreg(vcpu->arch.db[3], 3);
5286 get_debugreg(vcpu->arch.dr6, 6);
5287 vcpu->arch.dr7 = vmcs_readl(GUEST_DR7);
5289 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT;
5417 vcpu->arch.nmi_injected = false;
5495 vcpu->arch.exit_qualification = exit_qualification;
5559 vcpu->arch.exception.pending) {
5567 if (vcpu->arch.halt_request) {
5568 vcpu->arch.halt_request = 0;
6131 vcpu->run->fail_entry.cpu = vcpu->arch.last_vmentry_cpu;
6140 vcpu->run->fail_entry.cpu = vcpu->arch.last_vmentry_cpu;
6163 vcpu->run->internal.data[2] = vcpu->arch.exit_qualification;
6168 vcpu->run->internal.data[ndata++] = vcpu->arch.last_vmentry_cpu;
6178 vcpu->arch.nmi_pending) {
6228 vcpu->run->internal.data[1] = vcpu->arch.last_vmentry_cpu;
6258 flush_l1d = vcpu->arch.l1tf_flush_l1d;
6259 vcpu->arch.l1tf_flush_l1d = false;
6453 WARN_ON(!vcpu->arch.apicv_active);
6516 vmx->vcpu.arch.apf.host_apf_flags = kvm_read_and_reset_apf_flags();
6536 vcpu->arch.at_instruction_boundary = true;
6622 vcpu->arch.nmi_injected = false;
6636 vcpu->arch.nmi_injected = true;
6645 vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field);
6655 vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field);
6806 if (vcpu->arch.cr2 != native_read_cr2())
6807 native_write_cr2(vcpu->arch.cr2);
6809 vmx->fail = __vmx_vcpu_run(vmx, (unsigned long *)&vcpu->arch.regs,
6812 vcpu->arch.cr2 = native_read_cr2();
6865 vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
6867 vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
6917 current_evmcs->hv_vp_id = vcpu->arch.hyperv.vp_index;
6976 if (vcpu->arch.apicv_active)
7122 vcpu->arch.microcode_version = 0x100000000ULL;
7153 kvm->arch.pause_in_guest = true;
7409 vcpu->arch.xsaves_enabled = false;
7610 struct kvm_timer *ktimer = &vcpu->arch.apic->lapic_timer;
7625 if (vcpu->arch.tsc_scaling_ratio != kvm_default_tsc_scaling_ratio &&
7628 vcpu->arch.tsc_scaling_ratio, &delta_tsc))
7704 if (vcpu->arch.mcg_cap & MCG_LMCE_P)