Lines Matching refs:arch

359 		vcpu->arch.apf.gfns[i] = ~0;
476 return vcpu->arch.apic_base;
586 vcpu->arch.dr6 &= ~DR_TRAP_BITS;
603 vcpu->arch.dr6 |= DR6_ACTIVE_LOW;
604 vcpu->arch.dr6 |= ex->payload;
605 vcpu->arch.dr6 ^= ex->payload & DR6_ACTIVE_LOW;
613 vcpu->arch.dr6 &= ~BIT(12);
616 vcpu->arch.cr2 = ex->payload;
629 struct kvm_queued_exception *ex = &vcpu->arch.exception_vmexit;
669 if (!vcpu->arch.exception.pending && !vcpu->arch.exception.injected) {
680 vcpu->arch.exception.injected = true;
690 vcpu->arch.exception.pending = true;
691 vcpu->arch.exception.injected = false;
693 vcpu->arch.exception.has_error_code = has_error;
694 vcpu->arch.exception.vector = nr;
695 vcpu->arch.exception.error_code = error_code;
696 vcpu->arch.exception.has_payload = has_payload;
697 vcpu->arch.exception.payload = payload;
700 &vcpu->arch.exception);
705 prev_nr = vcpu->arch.exception.vector;
719 vcpu->arch.exception.injected = false;
720 vcpu->arch.exception.pending = false;
802 fault_mmu = fault->nested_page_fault ? vcpu->arch.mmu :
803 vcpu->arch.walk_mmu;
820 atomic_inc(&vcpu->arch.nmi_queued);
860 return vcpu->arch.reserved_gpa_bits | rsvd_bits(5, 8) | rsvd_bits(1, 2);
868 struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
907 vcpu->arch.pdptrs_from_userspace = false;
984 if ((vcpu->arch.efer & EFER_LME) && !is_paging(vcpu) &&
995 if (!(vcpu->arch.efer & EFER_LME) && (cr0 & X86_CR0_PG) &&
1020 if (vcpu->arch.guest_state_protected)
1025 if (vcpu->arch.xcr0 != host_xcr0)
1026 xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
1029 vcpu->arch.ia32_xss != host_xss)
1030 wrmsrl(MSR_IA32_XSS, vcpu->arch.ia32_xss);
1034 vcpu->arch.pkru != vcpu->arch.host_pkru &&
1035 ((vcpu->arch.xcr0 & XFEATURE_MASK_PKRU) ||
1037 write_pkru(vcpu->arch.pkru);
1043 if (vcpu->arch.guest_state_protected)
1047 ((vcpu->arch.xcr0 & XFEATURE_MASK_PKRU) ||
1049 vcpu->arch.pkru = rdpkru();
1050 if (vcpu->arch.pkru != vcpu->arch.host_pkru)
1051 write_pkru(vcpu->arch.host_pkru);
1056 if (vcpu->arch.xcr0 != host_xcr0)
1060 vcpu->arch.ia32_xss != host_xss)
1070 return vcpu->arch.guest_supported_xcr0 & XFEATURE_MASK_USER_DYNAMIC;
1077 u64 old_xcr0 = vcpu->arch.xcr0;
1093 valid_bits = vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FP;
1112 vcpu->arch.xcr0 = xcr0;
1137 if (cr4 & vcpu->arch.cr4_guest_rsvd_bits)
1225 struct kvm_mmu *mmu = vcpu->arch.mmu;
1296 vcpu->arch.cr3 = cr3;
1322 vcpu->arch.cr8 = cr8;
1332 return vcpu->arch.cr8;
1342 vcpu->arch.eff_db[i] = vcpu->arch.db[i];
1351 dr7 = vcpu->arch.guest_debug_dr7;
1353 dr7 = vcpu->arch.dr7;
1355 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_BP_ENABLED;
1357 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_BP_ENABLED;
1375 size_t size = ARRAY_SIZE(vcpu->arch.db);
1379 vcpu->arch.db[array_index_nospec(dr, size)] = val;
1381 vcpu->arch.eff_db[dr] = val;
1387 vcpu->arch.dr6 = (val & DR6_VOLATILE) | kvm_dr6_fixed(vcpu);
1393 vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1;
1404 size_t size = ARRAY_SIZE(vcpu->arch.db);
1408 *val = vcpu->arch.db[array_index_nospec(dr, size)];
1412 *val = vcpu->arch.dr6;
1416 *val = vcpu->arch.dr7;
1757 u64 old_efer = vcpu->arch.efer;
1769 (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
1774 efer |= vcpu->arch.efer & EFER_LMA;
1809 msr_filter = srcu_dereference(kvm->arch.msr_filter, &kvm->srcu);
2041 if (!(vcpu->kvm->arch.user_space_msr_mask & msr_reason))
2050 vcpu->arch.complete_userspace_io = completion;
2162 if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(vcpu->arch.apic))
2169 return kvm_x2apic_icr_write(vcpu->arch.apic, data);
2363 struct kvm_arch *ka = &vcpu->kvm->arch;
2372 vcpu->arch.time = system_time;
2377 kvm_gpc_activate(&vcpu->arch.pv_time, system_time & ~1ULL,
2380 kvm_gpc_deactivate(&vcpu->arch.pv_time);
2448 vcpu->arch.tsc_catchup = 1;
2449 vcpu->arch.tsc_always_catchup = 1;
2485 &vcpu->arch.virtual_tsc_shift,
2486 &vcpu->arch.virtual_tsc_mult);
2487 vcpu->arch.virtual_tsc_khz = user_tsc_khz;
2507 u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.this_tsc_nsec,
2508 vcpu->arch.virtual_tsc_mult,
2509 vcpu->arch.virtual_tsc_shift);
2510 tsc += vcpu->arch.this_tsc_write;
2525 struct kvm_arch *ka = &vcpu->kvm->arch;
2578 tsc = kvm_scale_tsc(rdtsc(), vcpu->arch.l1_tsc_scaling_ratio);
2585 return vcpu->arch.l1_tsc_offset +
2586 kvm_scale_tsc(host_tsc, vcpu->arch.l1_tsc_scaling_ratio);
2618 vcpu->arch.l1_tsc_offset,
2621 vcpu->arch.l1_tsc_offset = l1_offset;
2629 vcpu->arch.tsc_offset = kvm_calc_nested_tsc_offset(
2634 vcpu->arch.tsc_offset = l1_offset;
2641 vcpu->arch.l1_tsc_scaling_ratio = l1_multiplier;
2645 vcpu->arch.tsc_scaling_ratio = kvm_calc_nested_tsc_multiplier(
2649 vcpu->arch.tsc_scaling_ratio = l1_multiplier;
2678 lockdep_assert_held(&kvm->arch.tsc_write_lock);
2684 kvm->arch.last_tsc_nsec = ns;
2685 kvm->arch.last_tsc_write = tsc;
2686 kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz;
2687 kvm->arch.last_tsc_offset = offset;
2689 vcpu->arch.last_guest_tsc = tsc;
2701 * These values are tracked in kvm->arch.cur_xxx variables.
2703 kvm->arch.cur_tsc_generation++;
2704 kvm->arch.cur_tsc_nsec = ns;
2705 kvm->arch.cur_tsc_write = tsc;
2706 kvm->arch.cur_tsc_offset = offset;
2707 kvm->arch.nr_vcpus_matched_tsc = 0;
2708 } else if (vcpu->arch.this_tsc_generation != kvm->arch.cur_tsc_generation) {
2709 kvm->arch.nr_vcpus_matched_tsc++;
2713 vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation;
2714 vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec;
2715 vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write;
2728 raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
2731 elapsed = ns - kvm->arch.last_tsc_nsec;
2733 if (vcpu->arch.virtual_tsc_khz) {
2742 u64 tsc_exp = kvm->arch.last_tsc_write +
2744 u64 tsc_hz = vcpu->arch.virtual_tsc_khz * 1000LL;
2762 vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) {
2764 offset = kvm->arch.cur_tsc_offset;
2774 raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
2780 u64 tsc_offset = vcpu->arch.l1_tsc_offset;
2786 if (vcpu->arch.l1_tsc_scaling_ratio != kvm_caps.default_tsc_scaling_ratio)
2789 vcpu->arch.l1_tsc_scaling_ratio);
2957 struct kvm_arch *ka = &kvm->arch;
2961 lockdep_assert_held(&kvm->arch.tsc_write_lock);
2993 raw_spin_lock_irq(&kvm->arch.tsc_write_lock);
2994 write_seqcount_begin(&kvm->arch.pvclock_sc);
3007 struct kvm_arch *ka = &kvm->arch;
3048 struct kvm_arch *ka = &kvm->arch;
3083 struct kvm_arch *ka = &kvm->arch;
3104 struct kvm_vcpu_arch *vcpu = &v->arch;
3153 struct kvm_vcpu_arch *vcpu = &v->arch;
3154 struct kvm_arch *ka = &v->kvm->arch;
3215 v->arch.l1_tsc_scaling_ratio);
3226 vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset;
3269 struct kvm *kvm = container_of(ka, struct kvm, arch);
3283 schedule_delayed_work(&kvm->arch.kvmclock_update_work,
3294 struct kvm *kvm = container_of(ka, struct kvm, arch);
3299 schedule_delayed_work(&kvm->arch.kvmclock_update_work, 0);
3300 schedule_delayed_work(&kvm->arch.kvmclock_sync_work,
3321 return !!(vcpu->arch.msr_hwcr & BIT_ULL(18));
3328 u64 mcg_cap = vcpu->arch.mcg_cap;
3336 vcpu->arch.mcg_status = data;
3344 vcpu->arch.mcg_ctl = data;
3358 vcpu->arch.mci_ctl2_banks[offset] = data;
3390 vcpu->arch.mce_banks[offset] = data;
3402 return (vcpu->arch.apf.msr_en_val & mask) == mask;
3424 vcpu->arch.apf.msr_en_val = data;
3432 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa,
3436 vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS);
3437 vcpu->arch.apf.delivery_as_pf_vmexit = data & KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT;
3453 vcpu->arch.apf.msr_int_val = data;
3455 vcpu->arch.apf.vec = data & KVM_ASYNC_PF_VEC_MASK;
3462 kvm_gpc_deactivate(&vcpu->arch.pv_time);
3463 vcpu->arch.time = 0;
3524 struct gfn_to_hva_cache *ghc = &vcpu->arch.st.cache;
3527 gpa_t gpa = vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS;
3536 if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
3579 vcpu->arch.st.preempted = 0;
3593 vcpu->arch.st.preempted = 0;
3607 vcpu->arch.st.last_steal;
3608 vcpu->arch.st.last_steal = current->sched_info.run_delay;
3637 if (msr && msr == vcpu->kvm->arch.xen_hvm_config.msr)
3653 vcpu->arch.microcode_version = data;
3658 vcpu->arch.arch_capabilities = data;
3671 if (vcpu->arch.perf_capabilities == data)
3674 vcpu->arch.perf_capabilities = data;
3709 vcpu->arch.msr_hwcr = data;
3725 vcpu->arch.pat = data;
3740 s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr;
3747 vcpu->arch.ia32_tsc_adjust_msr = data;
3751 u64 old_val = vcpu->arch.ia32_misc_enable_msr;
3767 vcpu->arch.ia32_misc_enable_msr = data;
3770 vcpu->arch.ia32_misc_enable_msr = data;
3777 vcpu->arch.smbase = data;
3780 vcpu->arch.msr_ia32_power_ctl = data;
3786 u64 adj = kvm_compute_l1_tsc_offset(vcpu, data) - vcpu->arch.l1_tsc_offset;
3788 vcpu->arch.ia32_tsc_adjust_msr += adj;
3802 vcpu->arch.ia32_xss = data;
3808 vcpu->arch.smi_count = data;
3814 vcpu->kvm->arch.wall_clock = data;
3821 vcpu->kvm->arch.wall_clock = data;
3854 vcpu->arch.apf.pageready_pending = false;
3868 vcpu->arch.st.msr_val = data;
3892 vcpu->arch.msr_kvm_poll_control = data;
3942 vcpu->arch.osvw.length = data;
3947 vcpu->arch.osvw.status = data;
3954 vcpu->arch.msr_platform_info = data;
3961 vcpu->arch.msr_misc_features_enables = data;
3972 fpu_update_guest_xfd(&vcpu->arch.guest_fpu, data);
3982 vcpu->arch.guest_fpu.xfd_err = data;
4006 u64 mcg_cap = vcpu->arch.mcg_cap;
4016 data = vcpu->arch.mcg_cap;
4021 data = vcpu->arch.mcg_ctl;
4024 data = vcpu->arch.mcg_status;
4035 data = vcpu->arch.mci_ctl2_banks[offset];
4044 data = vcpu->arch.mce_banks[offset];
4096 msr_info->data = vcpu->arch.microcode_version;
4102 msr_info->data = vcpu->arch.arch_capabilities;
4108 msr_info->data = vcpu->arch.perf_capabilities;
4111 msr_info->data = vcpu->arch.msr_ia32_power_ctl;
4126 offset = vcpu->arch.l1_tsc_offset;
4127 ratio = vcpu->arch.l1_tsc_scaling_ratio;
4129 offset = vcpu->arch.tsc_offset;
4130 ratio = vcpu->arch.tsc_scaling_ratio;
4137 msr_info->data = vcpu->arch.pat;
4169 msr_info->data = (u64)vcpu->arch.ia32_tsc_adjust_msr;
4172 msr_info->data = vcpu->arch.ia32_misc_enable_msr;
4177 msr_info->data = vcpu->arch.smbase;
4180 msr_info->data = vcpu->arch.smi_count;
4189 msr_info->data = vcpu->arch.efer;
4195 msr_info->data = vcpu->kvm->arch.wall_clock;
4201 msr_info->data = vcpu->kvm->arch.wall_clock;
4207 msr_info->data = vcpu->arch.time;
4213 msr_info->data = vcpu->arch.time;
4219 msr_info->data = vcpu->arch.apf.msr_en_val;
4225 msr_info->data = vcpu->arch.apf.msr_int_val;
4237 msr_info->data = vcpu->arch.st.msr_val;
4243 msr_info->data = vcpu->arch.pv_eoi.msr_val;
4249 msr_info->data = vcpu->arch.msr_kvm_poll_control;
4264 msr_info->data = vcpu->arch.ia32_xss;
4307 msr_info->data = vcpu->arch.osvw.length;
4312 msr_info->data = vcpu->arch.osvw.status;
4316 !vcpu->kvm->arch.guest_can_read_msr_platform_info)
4318 msr_info->data = vcpu->arch.msr_platform_info;
4321 msr_info->data = vcpu->arch.msr_misc_features_enables;
4324 msr_info->data = vcpu->arch.msr_hwcr;
4332 msr_info->data = vcpu->arch.guest_fpu.fpstate->xfd;
4339 msr_info->data = vcpu->arch.guest_fpu.xfd_err;
4814 cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
4823 vcpu->arch.host_pkru = read_pkru();
4826 if (unlikely(vcpu->arch.tsc_offset_adjustment)) {
4827 adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment);
4828 vcpu->arch.tsc_offset_adjustment = 0;
4833 s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 :
4834 rdtsc() - vcpu->arch.last_host_tsc;
4840 vcpu->arch.last_guest_tsc);
4842 vcpu->arch.tsc_catchup = 1;
4852 if (!vcpu->kvm->arch.use_master_clock || vcpu->cpu == -1)
4864 struct gfn_to_hva_cache *ghc = &vcpu->arch.st.cache;
4868 gpa_t gpa = vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS;
4877 if (!vcpu->arch.at_instruction_boundary) {
4883 if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
4886 if (vcpu->arch.st.preempted)
4904 vcpu->arch.st.preempted = KVM_VCPU_PREEMPTED;
4914 if (!vcpu->arch.guest_state_protected)
4915 vcpu->arch.preempted_in_kernel = !static_call(kvm_x86_get_cpl)(vcpu);
4930 vcpu->arch.last_host_tsc = rdtsc();
5004 if (vcpu->arch.pending_external_vector != -1)
5007 vcpu->arch.pending_external_vector = irq->irq;
5024 vcpu->arch.tpr_access_reporting = !!tac->enabled;
5040 vcpu->arch.mcg_cap = mcg_cap;
5043 vcpu->arch.mcg_ctl = ~(u64)0;
5046 vcpu->arch.mce_banks[bank*4] = ~(u64)0;
5048 vcpu->arch.mci_ctl2_banks[bank] = 0;
5077 u64 mcg_cap = vcpu->arch.mcg_cap;
5082 vcpu->arch.mcg_status = mce->mcg_status;
5085 !(vcpu->arch.mci_ctl2_banks[mce->bank] & MCI_CTL2_CMCI_EN))
5089 kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTCMCI);
5097 u64 mcg_cap = vcpu->arch.mcg_cap;
5099 u64 *banks = vcpu->arch.mce_banks;
5114 vcpu->arch.mcg_ctl != ~(u64)0)
5123 if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) ||
5132 vcpu->arch.mcg_status = mce->mcg_status;
5166 if (vcpu->arch.exception_vmexit.pending &&
5167 !vcpu->arch.exception.pending &&
5168 !vcpu->arch.exception.injected)
5169 ex = &vcpu->arch.exception_vmexit;
5171 ex = &vcpu->arch.exception;
5181 if (!vcpu->kvm->arch.exception_payload_enabled &&
5201 if (!vcpu->kvm->arch.exception_payload_enabled)
5211 vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft;
5212 events->interrupt.nr = vcpu->arch.interrupt.nr;
5215 events->nmi.injected = vcpu->arch.nmi_injected;
5223 events->smi.pending = vcpu->arch.smi_pending;
5225 !!(vcpu->arch.hflags & HF_SMM_INSIDE_NMI_MASK);
5232 if (vcpu->kvm->arch.exception_payload_enabled)
5234 if (vcpu->kvm->arch.triple_fault_event) {
5252 if (!vcpu->kvm->arch.exception_payload_enabled)
5270 vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED)
5283 vcpu->arch.exception_from_userspace = events->exception.pending;
5285 vcpu->arch.exception_vmexit.pending = false;
5287 vcpu->arch.exception.injected = events->exception.injected;
5288 vcpu->arch.exception.pending = events->exception.pending;
5289 vcpu->arch.exception.vector = events->exception.nr;
5290 vcpu->arch.exception.has_error_code = events->exception.has_error_code;
5291 vcpu->arch.exception.error_code = events->exception.error_code;
5292 vcpu->arch.exception.has_payload = events->exception_has_payload;
5293 vcpu->arch.exception.payload = events->exception_payload;
5295 vcpu->arch.interrupt.injected = events->interrupt.injected;
5296 vcpu->arch.interrupt.nr = events->interrupt.nr;
5297 vcpu->arch.interrupt.soft = events->interrupt.soft;
5302 vcpu->arch.nmi_injected = events->nmi.injected;
5304 vcpu->arch.nmi_pending = 0;
5305 atomic_set(&vcpu->arch.nmi_queued, events->nmi.pending);
5313 vcpu->arch.apic->sipi_vector = events->sipi_vector;
5317 if (!!(vcpu->arch.hflags & HF_SMM_MASK) != events->smi.smm) {
5322 vcpu->arch.smi_pending = events->smi.pending;
5326 vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK;
5328 vcpu->arch.hflags &= ~HF_SMM_INSIDE_NMI_MASK;
5339 set_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events);
5341 clear_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events);
5346 if (!vcpu->kvm->arch.triple_fault_event)
5365 memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db));
5368 dbgregs->dr7 = vcpu->arch.dr7;
5382 memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db));
5384 vcpu->arch.dr6 = dbgregs->dr6;
5385 vcpu->arch.dr7 = dbgregs->dr7;
5407 u64 supported_xcr0 = vcpu->arch.guest_supported_xcr0 |
5410 if (fpstate_is_confidential(&vcpu->arch.guest_fpu))
5413 fpu_copy_guest_fpstate_to_uabi(&vcpu->arch.guest_fpu, state, size,
5414 supported_xcr0, vcpu->arch.pkru);
5427 if (fpstate_is_confidential(&vcpu->arch.guest_fpu))
5430 return fpu_copy_uabi_to_guest_fpstate(&vcpu->arch.guest_fpu,
5433 &vcpu->arch.pkru);
5447 guest_xcrs->xcrs[0].value = vcpu->arch.xcr0;
5481 if (!vcpu->arch.pv_time.active)
5483 vcpu->arch.pvclock_set_guest_stopped_request = true;
5516 if (put_user(vcpu->arch.l1_tsc_offset, uaddr))
5547 raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
5549 matched = (vcpu->arch.virtual_tsc_khz &&
5550 kvm->arch.last_tsc_khz == vcpu->arch.virtual_tsc_khz &&
5551 kvm->arch.last_tsc_offset == offset);
5553 tsc = kvm_scale_tsc(rdtsc(), vcpu->arch.l1_tsc_scaling_ratio) + offset;
5557 raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
5639 vcpu->arch.pv_cpuid.enforce = cap->args[0];
5640 if (vcpu->arch.pv_cpuid.enforce)
5860 if (vcpu->arch.guest_fpu.uabi_size > sizeof(struct kvm_xsave))
5877 int size = vcpu->arch.guest_fpu.uabi_size;
5890 int size = vcpu->arch.guest_fpu.uabi_size;
5951 r = vcpu->arch.virtual_tsc_khz;
6121 kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
6129 struct kvm_pic *pic = kvm->arch.vpic;
6154 struct kvm_pic *pic = kvm->arch.vpic;
6184 struct kvm_kpit_state *kps = &kvm->arch.vpit->pit_state;
6197 struct kvm_pit *pit = kvm->arch.vpit;
6209 mutex_lock(&kvm->arch.vpit->pit_state.lock);
6210 memcpy(ps->channels, &kvm->arch.vpit->pit_state.channels,
6212 ps->flags = kvm->arch.vpit->pit_state.flags;
6213 mutex_unlock(&kvm->arch.vpit->pit_state.lock);
6223 struct kvm_pit *pit = kvm->arch.vpit;
6243 struct kvm_pit *pit = kvm->arch.vpit;
6299 kvm->arch.disabled_quirks = cap->args[0];
6317 kvm->arch.irqchip_mode = KVM_IRQCHIP_SPLIT;
6318 kvm->arch.nr_reserved_ioapic_pins = cap->args[0];
6331 kvm->arch.x2apic_format = true;
6333 kvm->arch.x2apic_broadcast_quirk_disabled = true;
6343 kvm->arch.pause_in_guest = true;
6355 kvm->arch.mwait_in_guest = true;
6357 kvm->arch.hlt_in_guest = true;
6359 kvm->arch.cstate_in_guest = true;
6365 kvm->arch.guest_can_read_msr_platform_info = cap->args[0];
6369 kvm->arch.exception_payload_enabled = cap->args[0];
6373 kvm->arch.triple_fault_event = cap->args[0];
6380 kvm->arch.user_space_msr_mask = cap->args[0];
6394 kvm->arch.bus_lock_detection_enabled = true;
6408 kvm->arch.sgx_provisioning_allowed = true;
6433 kvm->arch.hypercall_exit_enabled = cap->args[0];
6440 kvm->arch.exit_on_emulation_error = cap->args[0];
6450 kvm->arch.enable_pmu = !(cap->args[0] & KVM_PMU_CAP_DISABLE);
6461 if (kvm->arch.max_vcpu_ids == cap->args[0]) {
6463 } else if (!kvm->arch.max_vcpu_ids) {
6464 kvm->arch.max_vcpu_ids = cap->args[0];
6479 kvm->arch.notify_window = cap->args[0] >> 32;
6480 kvm->arch.notify_vmexit_flags = (u32)cap->args[0];
6509 kvm->arch.disable_nx_huge_pages = true;
6612 old_filter = rcu_replace_pointer(kvm->arch.msr_filter, new_filter,
6689 if (!vcpu->arch.pv_time.active)
6729 struct kvm_arch *ka = &kvm->arch;
6840 kvm->arch.irqchip_mode = KVM_IRQCHIP_KERNEL;
6857 if (kvm->arch.vpit)
6860 kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags);
6861 if (kvm->arch.vpit)
6913 if (!kvm->arch.vpit)
6930 if (!kvm->arch.vpit)
6939 if (!kvm->arch.vpit)
6956 if (!kvm->arch.vpit)
6969 if (!kvm->arch.vpit)
6980 kvm->arch.bsp_vcpu_id = arg;
7041 WRITE_ONCE(kvm->arch.default_tsc_khz, user_tsc_khz);
7047 r = READ_ONCE(kvm->arch.default_tsc_khz);
7254 !kvm_iodevice_write(vcpu, &vcpu->arch.apic->dev, addr, n, v))
7274 !kvm_iodevice_read(vcpu, &vcpu->arch.apic->dev,
7303 struct kvm_mmu *mmu = vcpu->arch.mmu;
7318 struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
7328 struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
7340 struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
7349 struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
7382 struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
7441 struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
7487 vcpu->arch.l1tf_flush_l1d = true;
7545 struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
7555 !permission_fault(vcpu, vcpu->arch.walk_mmu,
7556 vcpu->arch.mmio_access, 0, access))) {
7557 *gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT |
7665 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
7863 WARN_ON_ONCE(vcpu->arch.pio.count);
7888 vcpu->arch.pio.port = port;
7889 vcpu->arch.pio.in = in;
7890 vcpu->arch.pio.count = count;
7891 vcpu->arch.pio.size = size;
7894 memset(vcpu->arch.pio_data, 0, size * count);
7896 memcpy(vcpu->arch.pio_data, data, size * count);
7919 int size = vcpu->arch.pio.size;
7920 unsigned int count = vcpu->arch.pio.count;
7921 memcpy(val, vcpu->arch.pio_data, size * count);
7922 trace_kvm_pio(KVM_PIO_IN, vcpu->arch.pio.port, size, count, vcpu->arch.pio_data);
7923 vcpu->arch.pio.count = 0;
7931 if (vcpu->arch.pio.count) {
7979 cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
7980 on_each_cpu_mask(vcpu->arch.wbinvd_dirty_mask,
7983 cpumask_clear(vcpu->arch.wbinvd_dirty_mask);
8031 value = vcpu->arch.cr2;
8060 vcpu->arch.cr2 = val;
8244 emul_to_vcpu(ctxt)->arch.halt_request = 1;
8395 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
8418 vcpu->arch.emulate_ctxt = ctxt;
8425 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
8446 vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
8451 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
8524 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
8555 if (kvm->arch.exit_on_emulation_error ||
8584 if (!vcpu->arch.mmu->root_role.direct) {
8617 if (vcpu->arch.mmu->root_role.direct) {
8621 indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages;
8651 last_retry_eip = vcpu->arch.last_retry_eip;
8652 last_retry_addr = vcpu->arch.last_retry_addr;
8667 vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0;
8682 vcpu->arch.last_retry_eip = ctxt->eip;
8683 vcpu->arch.last_retry_addr = cr2_or_gpa;
8685 if (!vcpu->arch.mmu->root_role.direct)
8716 kvm_run->debug.arch.dr6 = DR6_BS | DR6_ACTIVE_LOW;
8717 kvm_run->debug.arch.pc = kvm_get_linear_rip(vcpu);
8718 kvm_run->debug.arch.exception = DB_VECTOR;
8792 (vcpu->arch.guest_debug_dr7 & DR7_BP_EN_MASK)) {
8796 vcpu->arch.guest_debug_dr7,
8797 vcpu->arch.eff_db);
8800 kvm_run->debug.arch.dr6 = dr6 | DR6_ACTIVE_LOW;
8801 kvm_run->debug.arch.pc = eip;
8802 kvm_run->debug.arch.exception = DB_VECTOR;
8809 if (unlikely(vcpu->arch.dr7 & DR7_BP_EN_MASK) &&
8813 vcpu->arch.dr7,
8814 vcpu->arch.db);
8870 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
8888 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
8894 vcpu->arch.l1tf_flush_l1d = true;
8968 if (vcpu->arch.emulate_regs_need_sync_from_vcpu) {
8969 vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
8979 if (vcpu->arch.mmu->root_role.direct) {
9005 } else if (vcpu->arch.pio.count) {
9006 if (!vcpu->arch.pio.in) {
9008 vcpu->arch.pio.count = 0;
9011 vcpu->arch.complete_userspace_io = complete_emulated_pio;
9020 vcpu->arch.complete_userspace_io = complete_emulated_mmio;
9021 } else if (vcpu->arch.complete_userspace_io) {
9033 vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
9061 vcpu->arch.emulate_regs_need_sync_to_vcpu = true;
9081 vcpu->arch.pio.count = 0;
9087 vcpu->arch.pio.count = 0;
9089 if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.pio.linear_rip)))
9110 vcpu->arch.complete_userspace_io =
9114 vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu);
9115 vcpu->arch.complete_userspace_io = complete_fast_pio_out;
9124 /* We should only ever be called with arch.pio.count equal to 1 */
9125 BUG_ON(vcpu->arch.pio.count != 1);
9127 if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.pio.linear_rip))) {
9128 vcpu->arch.pio.count = 0;
9133 val = (vcpu->arch.pio.size < 4) ? kvm_rax_read(vcpu) : 0;
9156 vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu);
9157 vcpu->arch.complete_userspace_io = complete_fast_pio_in;
9647 vcpu->arch.mp_state = state;
9697 if (vcpu->arch.tsc_always_catchup)
9741 return (READ_ONCE(kvm->arch.apicv_inhibit_reasons) == 0);
9747 ulong vm_reasons = READ_ONCE(vcpu->kvm->arch.apicv_inhibit_reasons);
9767 unsigned long *inhibits = &kvm->arch.apicv_inhibit_reasons;
9769 init_rwsem(&kvm->arch.apicv_update_lock);
9789 map = rcu_dereference(vcpu->kvm->arch.apic_map);
9892 if (!(vcpu->kvm->arch.hypercall_exit_enabled & (1 << KVM_HC_MAP_GPA_RANGE)))
9911 vcpu->arch.complete_userspace_io = complete_hypercall_exit;
9984 if (vcpu->arch.apic->apicv_active)
9987 if (!vcpu->arch.apic->vapic_addr)
10020 vcpu->arch.exception.has_error_code &= is_protmode(vcpu);
10022 trace_kvm_inj_exception(vcpu->arch.exception.vector,
10023 vcpu->arch.exception.has_error_code,
10024 vcpu->arch.exception.error_code,
10025 vcpu->arch.exception.injected);
10108 if (vcpu->arch.exception.injected)
10112 else if (vcpu->arch.nmi_injected)
10114 else if (vcpu->arch.interrupt.injected)
10122 WARN_ON_ONCE(vcpu->arch.exception.injected &&
10123 vcpu->arch.exception.pending);
10142 WARN_ON_ONCE(vcpu->arch.exception_vmexit.injected ||
10143 vcpu->arch.exception_vmexit.pending);
10152 if (vcpu->arch.exception.pending) {
10163 if (exception_type(vcpu->arch.exception.vector) == EXCPT_FAULT)
10167 if (vcpu->arch.exception.vector == DB_VECTOR) {
10168 kvm_deliver_exception_payload(vcpu, &vcpu->arch.exception);
10169 if (vcpu->arch.dr7 & DR7_GD) {
10170 vcpu->arch.dr7 &= ~DR7_GD;
10177 vcpu->arch.exception.pending = false;
10178 vcpu->arch.exception.injected = true;
10199 if (vcpu->arch.smi_pending) {
10204 vcpu->arch.smi_pending = false;
10205 ++vcpu->arch.smi_count;
10213 if (vcpu->arch.nmi_pending) {
10218 --vcpu->arch.nmi_pending;
10219 vcpu->arch.nmi_injected = true;
10224 if (vcpu->arch.nmi_pending)
10262 WARN_ON_ONCE(vcpu->arch.exception.pending ||
10263 vcpu->arch.exception_vmexit.pending);
10288 if (static_call(kvm_x86_get_nmi_mask)(vcpu) || vcpu->arch.nmi_injected)
10295 * tracked in vcpu->arch.nmi_pending.
10300 vcpu->arch.nmi_pending += atomic_xchg(&vcpu->arch.nmi_queued, 0);
10301 vcpu->arch.nmi_pending = min(vcpu->arch.nmi_pending, limit);
10303 if (vcpu->arch.nmi_pending &&
10305 vcpu->arch.nmi_pending--;
10307 if (vcpu->arch.nmi_pending)
10314 return vcpu->arch.nmi_pending +
10331 struct kvm_lapic *apic = vcpu->arch.apic;
10337 down_read(&vcpu->kvm->arch.apicv_update_lock);
10362 up_read(&vcpu->kvm->arch.apicv_update_lock);
10382 if (apic_x2apic_mode(vcpu->arch.apic) &&
10394 lockdep_assert_held_write(&kvm->arch.apicv_update_lock);
10399 old = new = kvm->arch.apicv_inhibit_reasons;
10417 kvm->arch.apicv_inhibit_reasons = new;
10426 kvm->arch.apicv_inhibit_reasons = new;
10436 down_write(&kvm->arch.apicv_update_lock);
10438 up_write(&kvm->arch.apicv_update_lock);
10447 bitmap_zero(vcpu->arch.ioapic_handled_vectors, 256);
10450 kvm_scan_ioapic_routes(vcpu, vcpu->arch.ioapic_handled_vectors);
10454 kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors);
10458 vcpu->arch.load_eoi_exitmap_pending = true;
10467 if (!kvm_apic_hw_enabled(vcpu->arch.apic))
10472 vcpu->arch.ioapic_handled_vectors,
10479 vcpu, (u64 *)vcpu->arch.ioapic_handled_vectors);
10590 vcpu->arch.apf.halted = true;
10607 BUG_ON(vcpu->arch.pending_ioapic_eoi > 255);
10608 if (test_bit(vcpu->arch.pending_ioapic_eoi,
10609 vcpu->arch.ioapic_handled_vectors)) {
10612 vcpu->arch.pending_ioapic_eoi;
10672 if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
10755 if (vcpu->arch.guest_fpu.xfd_err)
10756 wrmsrl(MSR_IA32_XFD_ERR, vcpu->arch.guest_fpu.xfd_err);
10758 if (unlikely(vcpu->arch.switch_db_regs)) {
10760 set_debugreg(vcpu->arch.eff_db[0], 0);
10761 set_debugreg(vcpu->arch.eff_db[1], 1);
10762 set_debugreg(vcpu->arch.eff_db[2], 2);
10763 set_debugreg(vcpu->arch.eff_db[3], 3);
10802 if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) {
10819 vcpu->arch.last_vmentry_cpu = vcpu->cpu;
10820 vcpu->arch.last_guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
10830 if (vcpu->arch.xfd_no_write_intercept)
10835 if (vcpu->arch.guest_fpu.xfd_err)
10873 if (unlikely(vcpu->arch.tsc_always_catchup))
10876 if (vcpu->arch.apic_attention)
10886 if (unlikely(vcpu->arch.apic_attention))
10910 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
10941 switch(vcpu->arch.mp_state) {
10944 vcpu->arch.pv.pv_unhalted = false;
10945 vcpu->arch.mp_state =
10949 vcpu->arch.apf.halted = false;
10962 return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
10963 !vcpu->arch.apf.halted);
10971 vcpu->arch.l1tf_flush_l1d = true;
10980 vcpu->arch.at_instruction_boundary = false;
11024 BUG_ON(!vcpu->arch.pio.count);
11088 vcpu->arch.complete_userspace_io = complete_emulated_mmio;
11096 fpu_swap_kvm_fpstate(&vcpu->arch.guest_fpu, true);
11103 fpu_swap_kvm_fpstate(&vcpu->arch.guest_fpu, false);
11110 struct kvm_queued_exception *ex = &vcpu->arch.exception;
11120 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
11177 if (vcpu->arch.exception_from_userspace && is_guest_mode(vcpu) &&
11186 vcpu->arch.exception_from_userspace = false;
11188 if (unlikely(vcpu->arch.complete_userspace_io)) {
11189 int (*cui)(struct kvm_vcpu *) = vcpu->arch.complete_userspace_io;
11190 vcpu->arch.complete_userspace_io = NULL;
11195 WARN_ON_ONCE(vcpu->arch.pio.count);
11224 if (vcpu->arch.emulate_regs_need_sync_to_vcpu) {
11232 emulator_writeback_register_cache(vcpu->arch.emulate_ctxt);
11233 vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
11268 vcpu->arch.emulate_regs_need_sync_from_vcpu = true;
11269 vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
11293 vcpu->arch.exception.pending = false;
11294 vcpu->arch.exception_vmexit.pending = false;
11311 if (vcpu->arch.guest_state_protected)
11331 sregs->cr2 = vcpu->arch.cr2;
11338 sregs->efer = vcpu->arch.efer;
11346 if (vcpu->arch.guest_state_protected)
11349 if (vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft)
11350 set_bit(vcpu->arch.interrupt.nr,
11360 if (vcpu->arch.guest_state_protected)
11393 if ((vcpu->arch.mp_state == KVM_MP_STATE_HALTED ||
11394 vcpu->arch.mp_state == KVM_MP_STATE_AP_RESET_HOLD) &&
11395 vcpu->arch.pv.pv_unhalted)
11398 mp_state->mp_state = vcpu->arch.mp_state;
11437 if ((!kvm_apic_init_sipi_allowed(vcpu) || vcpu->arch.smi_pending) &&
11443 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
11444 set_bit(KVM_APIC_SIPI, &vcpu->arch.apic->pending_events);
11446 vcpu->arch.mp_state = mp_state->mp_state;
11458 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
11518 if (vcpu->arch.guest_state_protected)
11528 vcpu->arch.cr2 = sregs->cr2;
11530 vcpu->arch.cr3 = sregs->cr3;
11536 *mmu_reset_needed |= vcpu->arch.efer != sregs->efer;
11541 vcpu->arch.cr0 = sregs->cr0;
11571 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
11611 if (valid_pdptrs && (!pae || vcpu->arch.guest_state_protected))
11625 vcpu->arch.pdptrs_from_userspace = true;
11652 down_write(&kvm->arch.apicv_update_lock);
11661 up_write(&kvm->arch.apicv_update_lock);
11670 if (vcpu->arch.guest_state_protected)
11697 vcpu->arch.eff_db[i] = dbg->arch.debugreg[i];
11698 vcpu->arch.guest_debug_dr7 = dbg->arch.debugreg[7];
11701 vcpu->arch.eff_db[i] = vcpu->arch.db[i];
11706 vcpu->arch.singlestep_rip = kvm_get_linear_rip(vcpu);
11753 if (fpstate_is_confidential(&vcpu->arch.guest_fpu))
11758 fxsave = &vcpu->arch.guest_fpu.fpstate->regs.fxsave;
11776 if (fpstate_is_confidential(&vcpu->arch.guest_fpu))
11781 fxsave = &vcpu->arch.guest_fpu.fpstate->regs.fxsave;
11845 if (!kvm->arch.max_vcpu_ids)
11846 kvm->arch.max_vcpu_ids = KVM_MAX_VCPU_IDS;
11848 if (id >= kvm->arch.max_vcpu_ids)
11859 vcpu->arch.last_vmentry_cpu = -1;
11860 vcpu->arch.regs_avail = ~0;
11861 vcpu->arch.regs_dirty = ~0;
11863 kvm_gpc_init(&vcpu->arch.pv_time, vcpu->kvm, vcpu, KVM_HOST_USES_PFN);
11866 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
11868 vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
11890 vcpu->arch.apic->apicv_active = true;
11901 vcpu->arch.pio_data = page_address(page);
11903 vcpu->arch.mce_banks = kcalloc(KVM_MAX_MCE_BANKS * 4, sizeof(u64),
11905 vcpu->arch.mci_ctl2_banks = kcalloc(KVM_MAX_MCE_BANKS, sizeof(u64),
11907 if (!vcpu->arch.mce_banks || !vcpu->arch.mci_ctl2_banks)
11909 vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS;
11911 if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask,
11918 if (!fpu_alloc_guest_fpstate(&vcpu->arch.guest_fpu)) {
11923 vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
11924 vcpu->arch.reserved_gpa_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu);
11926 vcpu->arch.pat = MSR_IA32_CR_PAT_DEFAULT;
11930 vcpu->arch.perf_capabilities = kvm_caps.supported_perf_cap;
11933 vcpu->arch.pending_external_vector = -1;
11934 vcpu->arch.preempted_in_kernel = false;
11937 vcpu->arch.hv_root_tdp = INVALID_PAGE;
11944 vcpu->arch.arch_capabilities = kvm_get_arch_capabilities();
11945 vcpu->arch.msr_platform_info = MSR_PLATFORM_INFO_CPUID_FAULT;
11949 kvm_set_tsc_khz(vcpu, vcpu->kvm->arch.default_tsc_khz);
11956 fpu_free_guest_fpstate(&vcpu->arch.guest_fpu);
11958 kmem_cache_free(x86_emulator_cache, vcpu->arch.emulate_ctxt);
11960 free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
11962 kfree(vcpu->arch.mce_banks);
11963 kfree(vcpu->arch.mci_ctl2_banks);
11964 free_page((unsigned long)vcpu->arch.pio_data);
11983 vcpu->arch.msr_kvm_poll_control = 1;
11988 schedule_delayed_work(&kvm->arch.kvmclock_sync_work,
12000 kmem_cache_free(x86_emulator_cache, vcpu->arch.emulate_ctxt);
12001 free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
12002 fpu_free_guest_fpstate(&vcpu->arch.guest_fpu);
12007 kfree(vcpu->arch.mce_banks);
12008 kfree(vcpu->arch.mci_ctl2_banks);
12013 free_page((unsigned long)vcpu->arch.pio_data);
12014 kvfree(vcpu->arch.cpuid_entries);
12047 vcpu->arch.hflags = 0;
12049 vcpu->arch.smi_pending = 0;
12050 vcpu->arch.smi_count = 0;
12051 atomic_set(&vcpu->arch.nmi_queued, 0);
12052 vcpu->arch.nmi_pending = 0;
12053 vcpu->arch.nmi_injected = false;
12057 memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db));
12059 vcpu->arch.dr6 = DR6_ACTIVE_LOW;
12060 vcpu->arch.dr7 = DR7_FIXED_1;
12063 vcpu->arch.cr2 = 0;
12066 vcpu->arch.apf.msr_en_val = 0;
12067 vcpu->arch.apf.msr_int_val = 0;
12068 vcpu->arch.st.msr_val = 0;
12074 vcpu->arch.apf.halted = false;
12076 if (vcpu->arch.guest_fpu.fpstate && kvm_mpx_supported()) {
12077 struct fpstate *fpstate = vcpu->arch.guest_fpu.fpstate;
12095 vcpu->arch.smbase = 0x30000;
12097 vcpu->arch.msr_misc_features_enables = 0;
12098 vcpu->arch.ia32_misc_enable_msr = MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL |
12106 memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs));
12124 vcpu->arch.cr3 = 0;
12208 if (stable && vcpu->arch.last_host_tsc > local_tsc) {
12210 if (vcpu->arch.last_host_tsc > max_tsc)
12211 max_tsc = vcpu->arch.last_host_tsc;
12257 kvm->arch.backwards_tsc_observed = true;
12259 vcpu->arch.tsc_offset_adjustment += delta_cyc;
12260 vcpu->arch.last_host_tsc = local_tsc;
12270 kvm->arch.last_tsc_nsec = 0;
12271 kvm->arch.last_tsc_write = 0;
12286 return vcpu->kvm->arch.bsp_vcpu_id == vcpu->vcpu_id;
12291 return (vcpu->arch.apic_base & MSR_IA32_APICBASE_BSP) != 0;
12301 vcpu->arch.l1tf_flush_l1d = true;
12334 INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list);
12335 INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
12336 atomic_set(&kvm->arch.noncoherent_dma_count, 0);
12339 set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
12342 &kvm->arch.irq_sources_bitmap);
12344 raw_spin_lock_init(&kvm->arch.tsc_write_lock);
12345 mutex_init(&kvm->arch.apic_map_lock);
12346 seqcount_raw_spinlock_init(&kvm->arch.pvclock_sc, &kvm->arch.tsc_write_lock);
12347 kvm->arch.kvmclock_offset = -get_kvmclock_base_ns();
12349 raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
12351 raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
12353 kvm->arch.default_tsc_khz = max_tsc_khz ? : tsc_khz;
12354 kvm->arch.guest_can_read_msr_platform_info = true;
12355 kvm->arch.enable_pmu = enable_pmu;
12358 spin_lock_init(&kvm->arch.hv_root_tdp_lock);
12359 kvm->arch.hv_root_tdp = INVALID_PAGE;
12362 INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn);
12363 INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn);
12403 cancel_delayed_work_sync(&kvm->arch.kvmclock_sync_work);
12404 cancel_delayed_work_sync(&kvm->arch.kvmclock_update_work);
12506 kvm_free_msr_filter(srcu_dereference_check(kvm->arch.msr_filter, &kvm->srcu, 1));
12510 kvfree(rcu_dereference_check(kvm->arch.apic_map, 1));
12511 kfree(srcu_dereference_check(kvm->arch.pmu_event_filter, &kvm->srcu, 1));
12523 kvfree(slot->arch.rmap[i]);
12524 slot->arch.rmap[i] = NULL;
12535 kvfree(slot->arch.lpage_info[i - 1]);
12536 slot->arch.lpage_info[i - 1] = NULL;
12544 const int sz = sizeof(*slot->arch.rmap[0]);
12551 if (slot->arch.rmap[i])
12554 slot->arch.rmap[i] = __vcalloc(lpages, sz, GFP_KERNEL_ACCOUNT);
12555 if (!slot->arch.rmap[i]) {
12575 memset(&slot->arch, 0, sizeof(slot->arch));
12595 slot->arch.lpage_info[i - 1] = linfo;
12623 kvfree(slot->arch.lpage_info[i - 1]);
12624 slot->arch.lpage_info[i - 1] = NULL;
12665 memcpy(&new->arch, &old->arch, sizeof(old->arch));
12814 if (!kvm->arch.n_requested_mmu_pages &&
12845 if (vcpu->arch.pv.pv_unhalted)
12852 (vcpu->arch.nmi_pending &&
12858 (vcpu->arch.smi_pending &&
12901 if (READ_ONCE(vcpu->arch.pv.pv_unhalted))
12916 if (vcpu->arch.guest_state_protected)
12919 return vcpu->arch.preempted_in_kernel;
12940 if (vcpu->arch.guest_state_protected)
12970 kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip))
12998 while (vcpu->arch.apf.gfns[key] != ~0)
13001 vcpu->arch.apf.gfns[key] = gfn;
13010 (vcpu->arch.apf.gfns[key] != gfn &&
13011 vcpu->arch.apf.gfns[key] != ~0); i++)
13019 return vcpu->arch.apf.gfns[kvm_async_pf_gfn_slot(vcpu, gfn)] == gfn;
13028 if (WARN_ON_ONCE(vcpu->arch.apf.gfns[i] != gfn))
13032 vcpu->arch.apf.gfns[i] = ~0;
13035 if (vcpu->arch.apf.gfns[j] == ~0)
13037 k = kvm_async_pf_hash_fn(vcpu->arch.apf.gfns[j]);
13044 vcpu->arch.apf.gfns[i] = vcpu->arch.apf.gfns[j];
13053 return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &reason,
13061 return kvm_write_guest_offset_cached(vcpu->kvm, &vcpu->arch.apf.data,
13070 if (kvm_read_guest_offset_cached(vcpu->kvm, &vcpu->arch.apf.data,
13083 if (vcpu->arch.apf.send_user_only &&
13092 return vcpu->arch.apf.delivery_as_pf_vmexit;
13125 trace_kvm_async_pf_not_present(work->arch.token, work->cr2_or_gpa);
13126 kvm_add_async_pf_gfn(vcpu, work->arch.gfn);
13134 fault.address = work->arch.token;
13157 .vector = vcpu->arch.apf.vec
13161 work->arch.token = ~0; /* broadcast wakeup */
13163 kvm_del_async_pf_gfn(vcpu, work->arch.gfn);
13164 trace_kvm_async_pf_ready(work->arch.token, work->cr2_or_gpa);
13168 !apf_put_user_ready(vcpu, work->arch.token)) {
13169 vcpu->arch.apf.pageready_pending = true;
13173 vcpu->arch.apf.halted = false;
13174 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
13180 if (!vcpu->arch.apf.pageready_pending)
13194 if (atomic_inc_return(&kvm->arch.assigned_device_count) == 1)
13201 atomic_dec(&kvm->arch.assigned_device_count);
13207 return raw_atomic_read(&kvm->arch.assigned_device_count);
13213 atomic_inc(&kvm->arch.noncoherent_dma_count);
13219 atomic_dec(&kvm->arch.noncoherent_dma_count);
13225 return atomic_read(&kvm->arch.noncoherent_dma_count);
13298 return (vcpu->arch.msr_kvm_poll_control & 1) == 0;
13331 struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
13339 * If vcpu->arch.walk_mmu->gva_to_gpa succeeded, the page
13350 vcpu->arch.walk_mmu->inject_page_fault(vcpu, &fault);
13484 vcpu->arch.complete_userspace_io = complete_sev_es_emulated_mmio;
13522 vcpu->arch.complete_userspace_io = complete_sev_es_emulated_mmio;
13560 vcpu->arch.complete_userspace_io = complete_sev_es_emulated_mmio;
13568 vcpu->arch.sev_pio_count -= count;
13569 vcpu->arch.sev_pio_data += count * size;
13577 int size = vcpu->arch.pio.size;
13578 int port = vcpu->arch.pio.port;
13580 vcpu->arch.pio.count = 0;
13581 if (vcpu->arch.sev_pio_count)
13591 min_t(unsigned int, PAGE_SIZE / size, vcpu->arch.sev_pio_count);
13592 int ret = emulator_pio_out(vcpu, size, port, vcpu->arch.sev_pio_data, count);
13600 if (!vcpu->arch.sev_pio_count)
13604 vcpu->arch.complete_userspace_io = complete_sev_es_emulated_outs;
13613 unsigned count = vcpu->arch.pio.count;
13614 int size = vcpu->arch.pio.size;
13615 int port = vcpu->arch.pio.port;
13617 complete_emulator_pio_in(vcpu, vcpu->arch.sev_pio_data);
13619 if (vcpu->arch.sev_pio_count)
13629 min_t(unsigned int, PAGE_SIZE / size, vcpu->arch.sev_pio_count);
13630 if (!emulator_pio_in(vcpu, size, port, vcpu->arch.sev_pio_data, count))
13635 if (!vcpu->arch.sev_pio_count)
13639 vcpu->arch.complete_userspace_io = complete_sev_es_emulated_ins;
13647 vcpu->arch.sev_pio_data = data;
13648 vcpu->arch.sev_pio_count = count;