Lines Matching refs:arch

112 	struct kvm_lapic *apic = vcpu->arch.apic;
222 struct kvm_lapic *apic = vcpu->arch.apic;
267 if (vcpu->kvm->arch.x2apic_format) {
297 struct kvm_lapic *apic = vcpu->arch.apic;
383 /* Read kvm->arch.apic_map_dirty before kvm->arch.apic_map. */
384 if (atomic_read_acquire(&kvm->arch.apic_map_dirty) == CLEAN)
390 mutex_lock(&kvm->arch.apic_map_lock);
394 * Read kvm->arch.apic_map_dirty before kvm->arch.apic_map (if clean)
400 if (atomic_cmpxchg_acquire(&kvm->arch.apic_map_dirty,
403 mutex_unlock(&kvm->arch.apic_map_lock);
418 max_id = max(max_id, kvm_x2apic_id(vcpu->arch.apic));
469 old = rcu_dereference_protected(kvm->arch.apic_map,
470 lockdep_is_held(&kvm->arch.apic_map_lock));
471 rcu_assign_pointer(kvm->arch.apic_map, new);
473 * Write kvm->arch.apic_map before clearing apic->apic_map_dirty.
476 atomic_cmpxchg_release(&kvm->arch.apic_map_dirty,
478 mutex_unlock(&kvm->arch.apic_map_lock);
499 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
512 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
518 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
524 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
535 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
570 return KVM_APIC_MAX_NR_LVT_ENTRIES - !(vcpu->arch.mcg_cap & MCG_CMCI_P);
575 struct kvm_lapic *apic = vcpu->arch.apic;
599 struct kvm_lapic *apic = vcpu->arch.apic;
692 struct kvm_lapic *apic = vcpu->arch.apic;
740 apic_clear_irr(vec, vcpu->arch.apic);
815 return apic_find_highest_irr(vcpu->arch.apic);
826 struct kvm_lapic *apic = vcpu->arch.apic;
870 map = rcu_dereference(kvm->arch.apic_map);
886 return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, &val,
893 return kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, val,
899 return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED;
907 __set_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
927 __clear_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
977 apic_update_ppr(vcpu->arch.apic);
1060 if (!vcpu->kvm->arch.x2apic_broadcast_quirk_disabled &&
1070 struct kvm_lapic *target = vcpu->arch.apic;
1110 if (!kvm->arch.disabled_lapic_found) {
1111 kvm->arch.disabled_lapic_found = true;
1119 if (kvm->arch.x2apic_broadcast_quirk_disabled) {
1229 map = rcu_dereference(kvm->arch.apic_map);
1271 map = rcu_dereference(kvm->arch.apic_map);
1302 vcpu->arch.apic_arb_prio++;
1334 vcpu->arch.pv.pv_unhalted = 1;
1406 map = rcu_dereference(kvm->arch.apic_map);
1434 return vcpu1->arch.apic_arb_prio - vcpu2->arch.apic_arb_prio;
1439 return test_bit(vector, apic->vcpu->arch.ioapic_handled_vectors);
1452 apic->vcpu->arch.pending_ioapic_eoi = vector;
1496 struct kvm_lapic *apic = vcpu->arch.apic;
1563 if (apic->vcpu->arch.tpr_access_reporting)
1780 struct kvm_lapic *apic = vcpu->arch.apic;
1798 u64 timer_advance_ns = vcpu->arch.apic->lapic_timer.timer_advance_ns;
1806 if (vcpu->arch.tsc_scaling_ratio == kvm_caps.default_tsc_scaling_ratio) {
1811 do_div(delay_ns, vcpu->arch.virtual_tsc_khz);
1819 struct kvm_lapic *apic = vcpu->arch.apic;
1831 do_div(ns, vcpu->arch.virtual_tsc_khz);
1836 do_div(ns, vcpu->arch.virtual_tsc_khz);
1847 struct kvm_lapic *apic = vcpu->arch.apic;
1873 vcpu->arch.apic->lapic_timer.expired_tscdeadline &&
1874 vcpu->arch.apic->lapic_timer.timer_advance_ns &&
1918 if (vcpu->arch.apic->lapic_timer.expired_tscdeadline &&
1919 vcpu->arch.apic->lapic_timer.timer_advance_ns)
1938 unsigned long this_tsc_khz = vcpu->arch.virtual_tsc_khz;
2089 return vcpu->arch.apic->lapic_timer.hv_timer_in_use;
2174 struct kvm_lapic *apic = vcpu->arch.apic;
2195 restart_apic_timer(vcpu->arch.apic);
2200 struct kvm_lapic *apic = vcpu->arch.apic;
2211 struct kvm_lapic *apic = vcpu->arch.apic;
2240 atomic_inc(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
2242 atomic_dec(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
2442 kvm_lapic_reg_write(vcpu->arch.apic, APIC_EOI, 0);
2449 struct kvm_lapic *apic = vcpu->arch.apic;
2471 struct kvm_lapic *apic = vcpu->arch.apic;
2473 if (!vcpu->arch.apic)
2478 if (!(vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE))
2497 struct kvm_lapic *apic = vcpu->arch.apic;
2507 struct kvm_lapic *apic = vcpu->arch.apic;
2519 apic_set_tpr(vcpu->arch.apic, (cr8 & 0x0f) << 4);
2526 tpr = (u64) kvm_lapic_get_reg(vcpu->arch.apic, APIC_TASKPRI);
2533 u64 old_value = vcpu->arch.apic_base;
2534 struct kvm_lapic *apic = vcpu->arch.apic;
2536 vcpu->arch.apic_base = value;
2553 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
2569 apic->base_address = apic->vcpu->arch.apic_base &
2581 struct kvm_lapic *apic = vcpu->arch.apic;
2606 if (kvm->arch.apic_access_memslot_enabled ||
2607 kvm->arch.apic_access_memslot_inhibited)
2628 kvm->arch.apic_access_memslot_enabled = true;
2639 if (!kvm->arch.apic_access_memslot_enabled)
2646 if (kvm->arch.apic_access_memslot_enabled) {
2656 kvm->arch.apic_access_memslot_enabled = false;
2662 kvm->arch.apic_access_memslot_inhibited = true;
2672 struct kvm_lapic *apic = vcpu->arch.apic;
2728 vcpu->arch.pv_eoi.msr_val = 0;
2736 vcpu->arch.apic_arb_prio = 0;
2737 vcpu->arch.apic_attention = 0;
2755 struct kvm_lapic *apic = vcpu->arch.apic;
2784 struct kvm_lapic *apic = vcpu->arch.apic;
2820 vcpu->arch.apic = apic;
2847 vcpu->arch.apic_base = MSR_IA32_APICBASE_ENABLE;
2854 vcpu->arch.apic = NULL;
2861 struct kvm_lapic *apic = vcpu->arch.apic;
2874 u32 lvt0 = kvm_lapic_get_reg(vcpu->arch.apic, APIC_LVT0);
2876 if (!kvm_apic_hw_enabled(vcpu->arch.apic))
2886 struct kvm_lapic *apic = vcpu->arch.apic;
2897 struct kvm_lapic *apic = vcpu->arch.apic;
2935 if (apic_x2apic_mode(vcpu->arch.apic)) {
2940 if (vcpu->kvm->arch.x2apic_format) {
2972 memcpy(s->regs, vcpu->arch.apic->regs, sizeof(*s));
2979 __apic_read(vcpu->arch.apic, APIC_TMCCT));
2986 struct kvm_lapic *apic = vcpu->arch.apic;
2991 kvm_lapic_set_base(vcpu, vcpu->arch.apic_base);
3000 memcpy(vcpu->arch.apic->regs, s->regs, sizeof(*s));
3002 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
3024 vcpu->arch.apic_arb_prio = 0;
3037 timer = &vcpu->arch.apic->lapic_timer.timer;
3076 if (test_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention))
3077 apic_sync_pv_eoi_from_guest(vcpu, vcpu->arch.apic);
3079 if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
3082 if (kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
3086 apic_set_tpr(vcpu->arch.apic, data & 0xff);
3119 struct kvm_lapic *apic = vcpu->arch.apic;
3123 if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
3135 kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
3143 &vcpu->arch.apic->vapic_cache,
3146 __set_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
3148 __clear_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
3151 vcpu->arch.apic->vapic_addr = vapic_addr;
3201 struct kvm_lapic *apic = vcpu->arch.apic;
3212 struct kvm_lapic *apic = vcpu->arch.apic;
3226 return kvm_lapic_msr_write(vcpu->arch.apic, reg, data);
3234 return kvm_lapic_msr_read(vcpu->arch.apic, reg, data);
3240 struct gfn_to_hva_cache *ghc = &vcpu->arch.pv_eoi.data;
3258 vcpu->arch.pv_eoi.msr_val = data;
3265 struct kvm_lapic *apic = vcpu->arch.apic;
3289 WARN_ON_ONCE(vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED);
3297 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
3299 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
3302 if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
3307 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;