Lines Matching refs:apic

69 static int kvm_lapic_msr_read(struct kvm_lapic *apic, u32 reg, u64 *data);
70 static int kvm_lapic_msr_write(struct kvm_lapic *apic, u32 reg, u64 data);
77 static inline void kvm_lapic_set_reg(struct kvm_lapic *apic, int reg_off, u32 val)
79 __kvm_lapic_set_reg(apic->regs, reg_off, val);
88 static __always_inline u64 kvm_lapic_get_reg64(struct kvm_lapic *apic, int reg)
90 return __kvm_lapic_get_reg64(apic->regs, reg);
99 static __always_inline void kvm_lapic_set_reg64(struct kvm_lapic *apic,
102 __kvm_lapic_set_reg64(apic->regs, reg, val);
112 struct kvm_lapic *apic = vcpu->arch.apic;
114 return apic_test_vector(vector, apic->regs + APIC_ISR) ||
115 apic_test_vector(vector, apic->regs + APIC_IRR);
131 static inline int apic_enabled(struct kvm_lapic *apic)
133 return kvm_apic_sw_enabled(apic) && kvm_apic_hw_enabled(apic);
143 static inline u32 kvm_x2apic_id(struct kvm_lapic *apic)
145 return apic->vcpu->vcpu_id;
222 struct kvm_lapic *apic = vcpu->arch.apic;
223 u32 x2apic_id = kvm_x2apic_id(apic);
224 u32 xapic_id = kvm_xapic_id(apic);
250 if (!apic_x2apic_mode(apic) && xapic_id != (u8)vcpu->vcpu_id)
269 if (apic_x2apic_mode(apic) || x2apic_id > 0xff)
270 new->phys_map[x2apic_id] = apic;
272 if (!apic_x2apic_mode(apic) && !new->phys_map[xapic_id])
273 new->phys_map[xapic_id] = apic;
280 if (apic_x2apic_mode(apic))
288 new->phys_map[physical_id] = apic;
297 struct kvm_lapic *apic = vcpu->arch.apic;
306 if (!kvm_apic_sw_enabled(apic))
309 ldr = kvm_lapic_get_reg(apic, APIC_LDR);
313 if (apic_x2apic_mode(apic)) {
317 if (kvm_lapic_get_reg(apic, APIC_DFR) == APIC_DFR_FLAT)
341 if (apic_x2apic_mode(apic)) {
342 WARN_ON_ONCE(ldr != kvm_apic_calc_x2apic_ldr(kvm_x2apic_id(apic)));
359 cluster[ldr] = apic;
418 max_id = max(max_id, kvm_x2apic_id(vcpu->arch.apic));
473 * Write kvm->arch.apic_map before clearing apic->apic_map_dirty.
486 static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val)
490 kvm_lapic_set_reg(apic, APIC_SPIV, val);
492 if (enabled != apic->sw_enabled) {
493 apic->sw_enabled = enabled;
499 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
504 kvm_make_request(KVM_REQ_APF_READY, apic->vcpu);
505 kvm_xen_sw_enable_lapic(apic->vcpu);
509 static inline void kvm_apic_set_xapic_id(struct kvm_lapic *apic, u8 id)
511 kvm_lapic_set_reg(apic, APIC_ID, id << 24);
512 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
515 static inline void kvm_apic_set_ldr(struct kvm_lapic *apic, u32 id)
517 kvm_lapic_set_reg(apic, APIC_LDR, id);
518 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
521 static inline void kvm_apic_set_dfr(struct kvm_lapic *apic, u32 val)
523 kvm_lapic_set_reg(apic, APIC_DFR, val);
524 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
527 static inline void kvm_apic_set_x2apic_id(struct kvm_lapic *apic, u32 id)
531 WARN_ON_ONCE(id != apic->vcpu->vcpu_id);
533 kvm_lapic_set_reg(apic, APIC_ID, id);
534 kvm_lapic_set_reg(apic, APIC_LDR, ldr);
535 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
538 static inline int apic_lvt_enabled(struct kvm_lapic *apic, int lvt_type)
540 return !(kvm_lapic_get_reg(apic, lvt_type) & APIC_LVT_MASKED);
543 static inline int apic_lvtt_oneshot(struct kvm_lapic *apic)
545 return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_ONESHOT;
548 static inline int apic_lvtt_period(struct kvm_lapic *apic)
550 return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_PERIODIC;
553 static inline int apic_lvtt_tscdeadline(struct kvm_lapic *apic)
555 return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_TSCDEADLINE;
563 static inline bool kvm_lapic_lvt_supported(struct kvm_lapic *apic, int lvt_index)
565 return apic->nr_lvt_entries > lvt_index;
575 struct kvm_lapic *apic = vcpu->arch.apic;
581 v = APIC_VERSION | ((apic->nr_lvt_entries - 1) << 16);
593 kvm_lapic_set_reg(apic, APIC_LVR, v);
599 struct kvm_lapic *apic = vcpu->arch.apic;
602 if (!lapic_in_kernel(vcpu) || nr_lvt_entries == apic->nr_lvt_entries)
606 for (i = apic->nr_lvt_entries; i < nr_lvt_entries; i++)
607 kvm_lapic_set_reg(apic, APIC_LVTx(i), APIC_LVT_MASKED);
609 apic->nr_lvt_entries = nr_lvt_entries;
692 struct kvm_lapic *apic = vcpu->arch.apic;
693 bool irr_updated = __kvm_apic_update_irr(pir, apic->regs, max_irr);
695 if (unlikely(!apic->apicv_active && irr_updated))
696 apic->irr_pending = true;
701 static inline int apic_search_irr(struct kvm_lapic *apic)
703 return find_highest_vector(apic->regs + APIC_IRR);
706 static inline int apic_find_highest_irr(struct kvm_lapic *apic)
714 if (!apic->irr_pending)
717 result = apic_search_irr(apic);
723 static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
725 if (unlikely(apic->apicv_active)) {
727 kvm_lapic_clear_vector(vec, apic->regs + APIC_IRR);
728 static_call_cond(kvm_x86_hwapic_irr_update)(apic->vcpu,
729 apic_find_highest_irr(apic));
731 apic->irr_pending = false;
732 kvm_lapic_clear_vector(vec, apic->regs + APIC_IRR);
733 if (apic_search_irr(apic) != -1)
734 apic->irr_pending = true;
740 apic_clear_irr(vec, vcpu->arch.apic);
744 static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
746 if (__apic_test_and_set_vector(vec, apic->regs + APIC_ISR))
754 if (unlikely(apic->apicv_active))
757 ++apic->isr_count;
758 BUG_ON(apic->isr_count > MAX_APIC_VECTOR);
764 apic->highest_isr_cache = vec;
768 static inline int apic_find_highest_isr(struct kvm_lapic *apic)
776 if (!apic->isr_count)
778 if (likely(apic->highest_isr_cache != -1))
779 return apic->highest_isr_cache;
781 result = find_highest_vector(apic->regs + APIC_ISR);
787 static inline void apic_clear_isr(int vec, struct kvm_lapic *apic)
789 if (!__apic_test_and_clear_vector(vec, apic->regs + APIC_ISR))
799 if (unlikely(apic->apicv_active))
800 static_call_cond(kvm_x86_hwapic_isr_update)(apic_find_highest_isr(apic));
802 --apic->isr_count;
803 BUG_ON(apic->isr_count < 0);
804 apic->highest_isr_cache = -1;
815 return apic_find_highest_irr(vcpu->arch.apic);
819 static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
826 struct kvm_lapic *apic = vcpu->arch.apic;
828 return __apic_accept_irq(apic, irq->delivery_mode, irq->vector,
932 static int apic_has_interrupt_for_ppr(struct kvm_lapic *apic, u32 ppr)
936 highest_irr = static_call(kvm_x86_sync_pir_to_irr)(apic->vcpu);
938 highest_irr = apic_find_highest_irr(apic);
944 static bool __apic_update_ppr(struct kvm_lapic *apic, u32 *new_ppr)
949 old_ppr = kvm_lapic_get_reg(apic, APIC_PROCPRI);
950 tpr = kvm_lapic_get_reg(apic, APIC_TASKPRI);
951 isr = apic_find_highest_isr(apic);
961 kvm_lapic_set_reg(apic, APIC_PROCPRI, ppr);
966 static void apic_update_ppr(struct kvm_lapic *apic)
970 if (__apic_update_ppr(apic, &ppr) &&
971 apic_has_interrupt_for_ppr(apic, ppr) != -1)
972 kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
977 apic_update_ppr(vcpu->arch.apic);
981 static void apic_set_tpr(struct kvm_lapic *apic, u32 tpr)
983 kvm_lapic_set_reg(apic, APIC_TASKPRI, tpr);
984 apic_update_ppr(apic);
987 static bool kvm_apic_broadcast(struct kvm_lapic *apic, u32 mda)
989 return mda == (apic_x2apic_mode(apic) ?
993 static bool kvm_apic_match_physical_addr(struct kvm_lapic *apic, u32 mda)
995 if (kvm_apic_broadcast(apic, mda))
1007 if (apic_x2apic_mode(apic) || mda > 0xff)
1008 return mda == kvm_x2apic_id(apic);
1010 return mda == kvm_xapic_id(apic);
1013 static bool kvm_apic_match_logical_addr(struct kvm_lapic *apic, u32 mda)
1017 if (kvm_apic_broadcast(apic, mda))
1020 logical_id = kvm_lapic_get_reg(apic, APIC_LDR);
1022 if (apic_x2apic_mode(apic))
1028 switch (kvm_lapic_get_reg(apic, APIC_DFR)) {
1070 struct kvm_lapic *target = vcpu->arch.apic;
1291 static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
1296 struct kvm_vcpu *vcpu = apic->vcpu;
1309 if (unlikely(!apic_enabled(apic)))
1319 if (apic_test_vector(vector, apic->regs + APIC_TMR) != !!trig_mode) {
1322 apic->regs + APIC_TMR);
1325 apic->regs + APIC_TMR);
1328 static_call(kvm_x86_deliver_interrupt)(apic, delivery_mode,
1356 apic->pending_events = (1UL << KVM_APIC_INIT);
1364 apic->sipi_vector = vector;
1367 set_bit(KVM_APIC_SIPI, &apic->pending_events);
1437 static bool kvm_ioapic_handles_vector(struct kvm_lapic *apic, int vector)
1439 return test_bit(vector, apic->vcpu->arch.ioapic_handled_vectors);
1442 static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector)
1447 if (!kvm_ioapic_handles_vector(apic, vector))
1451 if (irqchip_split(apic->vcpu->kvm)) {
1452 apic->vcpu->arch.pending_ioapic_eoi = vector;
1453 kvm_make_request(KVM_REQ_IOAPIC_EOI_EXIT, apic->vcpu);
1457 if (apic_test_vector(vector, apic->regs + APIC_TMR))
1462 kvm_ioapic_update_eoi(apic->vcpu, vector, trigger_mode);
1465 static int apic_set_eoi(struct kvm_lapic *apic)
1467 int vector = apic_find_highest_isr(apic);
1469 trace_kvm_eoi(apic, vector);
1478 apic_clear_isr(vector, apic);
1479 apic_update_ppr(apic);
1481 if (to_hv_vcpu(apic->vcpu) &&
1482 test_bit(vector, to_hv_synic(apic->vcpu)->vec_bitmap))
1483 kvm_hv_synic_send_eoi(apic->vcpu, vector);
1485 kvm_ioapic_send_eoi(apic, vector);
1486 kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
1496 struct kvm_lapic *apic = vcpu->arch.apic;
1498 trace_kvm_eoi(apic, vector);
1500 kvm_ioapic_send_eoi(apic, vector);
1501 kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
1505 void kvm_apic_send_ipi(struct kvm_lapic *apic, u32 icr_low, u32 icr_high)
1519 if (apic_x2apic_mode(apic))
1526 kvm_irq_delivery_to_apic(apic->vcpu->kvm, apic, &irq, NULL);
1530 static u32 apic_get_tmcct(struct kvm_lapic *apic)
1535 ASSERT(apic != NULL);
1538 if (kvm_lapic_get_reg(apic, APIC_TMICT) == 0 ||
1539 apic->lapic_timer.period == 0)
1543 remaining = ktime_sub(apic->lapic_timer.target_expiration, now);
1547 ns = mod_64(ktime_to_ns(remaining), apic->lapic_timer.period);
1548 return div64_u64(ns, (APIC_BUS_CYCLE_NS * apic->divide_count));
1551 static void __report_tpr_access(struct kvm_lapic *apic, bool write)
1553 struct kvm_vcpu *vcpu = apic->vcpu;
1561 static inline void report_tpr_access(struct kvm_lapic *apic, bool write)
1563 if (apic->vcpu->arch.tpr_access_reporting)
1564 __report_tpr_access(apic, write);
1567 static u32 __apic_read(struct kvm_lapic *apic, unsigned int offset)
1579 if (apic_lvtt_tscdeadline(apic))
1582 val = apic_get_tmcct(apic);
1585 apic_update_ppr(apic);
1586 val = kvm_lapic_get_reg(apic, offset);
1589 report_tpr_access(apic, false);
1592 val = kvm_lapic_get_reg(apic, offset);
1608 u64 kvm_lapic_readable_reg_mask(struct kvm_lapic *apic)
1633 if (kvm_lapic_lvt_supported(apic, LVT_CMCI))
1637 if (!apic_x2apic_mode(apic))
1646 static int kvm_lapic_reg_read(struct kvm_lapic *apic, u32 offset, int len,
1656 WARN_ON_ONCE(apic_x2apic_mode(apic) && offset == APIC_ICR);
1662 !(kvm_lapic_readable_reg_mask(apic) & APIC_REG_MASK(offset)))
1665 result = __apic_read(apic, offset & ~0xf);
1683 static int apic_mmio_in_range(struct kvm_lapic *apic, gpa_t addr)
1685 return addr >= apic->base_address &&
1686 addr < apic->base_address + LAPIC_MMIO_LENGTH;
1692 struct kvm_lapic *apic = to_lapic(this);
1693 u32 offset = address - apic->base_address;
1695 if (!apic_mmio_in_range(apic, address))
1698 if (!kvm_apic_hw_enabled(apic) || apic_x2apic_mode(apic)) {
1707 kvm_lapic_reg_read(apic, offset, len, data);
1712 static void update_divide_count(struct kvm_lapic *apic)
1716 tdcr = kvm_lapic_get_reg(apic, APIC_TDCR);
1719 apic->divide_count = 0x1 << (tmp2 & 0x7);
1722 static void limit_periodic_timer_frequency(struct kvm_lapic *apic)
1729 if (apic_lvtt_period(apic) && apic->lapic_timer.period) {
1732 if (apic->lapic_timer.period < min_period) {
1736 apic->vcpu->vcpu_id,
1737 apic->lapic_timer.period, min_period);
1738 apic->lapic_timer.period = min_period;
1743 static void cancel_hv_timer(struct kvm_lapic *apic);
1745 static void cancel_apic_timer(struct kvm_lapic *apic)
1747 hrtimer_cancel(&apic->lapic_timer.timer);
1749 if (apic->lapic_timer.hv_timer_in_use)
1750 cancel_hv_timer(apic);
1752 atomic_set(&apic->lapic_timer.pending, 0);
1755 static void apic_update_lvtt(struct kvm_lapic *apic)
1757 u32 timer_mode = kvm_lapic_get_reg(apic, APIC_LVTT) &
1758 apic->lapic_timer.timer_mode_mask;
1760 if (apic->lapic_timer.timer_mode != timer_mode) {
1761 if (apic_lvtt_tscdeadline(apic) != (timer_mode ==
1763 cancel_apic_timer(apic);
1764 kvm_lapic_set_reg(apic, APIC_TMICT, 0);
1765 apic->lapic_timer.period = 0;
1766 apic->lapic_timer.tscdeadline = 0;
1768 apic->lapic_timer.timer_mode = timer_mode;
1769 limit_periodic_timer_frequency(apic);
1780 struct kvm_lapic *apic = vcpu->arch.apic;
1781 u32 reg = kvm_lapic_get_reg(apic, APIC_LVTT);
1783 if (kvm_apic_hw_enabled(apic)) {
1785 void *bitmap = apic->regs + APIC_ISR;
1787 if (apic->apicv_active)
1788 bitmap = apic->regs + APIC_IRR;
1798 u64 timer_advance_ns = vcpu->arch.apic->lapic_timer.timer_advance_ns;
1819 struct kvm_lapic *apic = vcpu->arch.apic;
1820 u32 timer_advance_ns = apic->lapic_timer.timer_advance_ns;
1842 apic->lapic_timer.timer_advance_ns = timer_advance_ns;
1847 struct kvm_lapic *apic = vcpu->arch.apic;
1850 tsc_deadline = apic->lapic_timer.expired_tscdeadline;
1851 apic->lapic_timer.expired_tscdeadline = 0;
1873 vcpu->arch.apic->lapic_timer.expired_tscdeadline &&
1874 vcpu->arch.apic->lapic_timer.timer_advance_ns &&
1880 static void kvm_apic_inject_pending_timer_irqs(struct kvm_lapic *apic)
1882 struct kvm_timer *ktimer = &apic->lapic_timer;
1884 kvm_apic_local_deliver(apic, APIC_LVTT);
1885 if (apic_lvtt_tscdeadline(apic)) {
1887 } else if (apic_lvtt_oneshot(apic)) {
1893 static void apic_timer_expired(struct kvm_lapic *apic, bool from_timer_fn)
1895 struct kvm_vcpu *vcpu = apic->vcpu;
1896 struct kvm_timer *ktimer = &apic->lapic_timer;
1898 if (atomic_read(&apic->lapic_timer.pending))
1901 if (apic_lvtt_tscdeadline(apic) || ktimer->hv_timer_in_use)
1904 if (!from_timer_fn && apic->apicv_active) {
1906 kvm_apic_inject_pending_timer_irqs(apic);
1910 if (kvm_use_posted_timer_interrupt(apic->vcpu)) {
1918 if (vcpu->arch.apic->lapic_timer.expired_tscdeadline &&
1919 vcpu->arch.apic->lapic_timer.timer_advance_ns)
1921 kvm_apic_inject_pending_timer_irqs(apic);
1925 atomic_inc(&apic->lapic_timer.pending);
1931 static void start_sw_tscdeadline(struct kvm_lapic *apic)
1933 struct kvm_timer *ktimer = &apic->lapic_timer;
1937 struct kvm_vcpu *vcpu = apic->vcpu;
1954 likely(ns > apic->lapic_timer.timer_advance_ns)) {
1959 apic_timer_expired(apic, false);
1964 static inline u64 tmict_to_ns(struct kvm_lapic *apic, u32 tmict)
1966 return (u64)tmict * APIC_BUS_CYCLE_NS * (u64)apic->divide_count;
1969 static void update_target_expiration(struct kvm_lapic *apic, uint32_t old_divisor)
1974 apic->lapic_timer.period =
1975 tmict_to_ns(apic, kvm_lapic_get_reg(apic, APIC_TMICT));
1976 limit_periodic_timer_frequency(apic);
1979 remaining = ktime_sub(apic->lapic_timer.target_expiration, now);
1985 apic->divide_count, old_divisor);
1987 apic->lapic_timer.tscdeadline +=
1988 nsec_to_cycles(apic->vcpu, ns_remaining_new) -
1989 nsec_to_cycles(apic->vcpu, ns_remaining_old);
1990 apic->lapic_timer.target_expiration = ktime_add_ns(now, ns_remaining_new);
1993 static bool set_target_expiration(struct kvm_lapic *apic, u32 count_reg)
2000 apic->lapic_timer.period =
2001 tmict_to_ns(apic, kvm_lapic_get_reg(apic, APIC_TMICT));
2003 if (!apic->lapic_timer.period) {
2004 apic->lapic_timer.tscdeadline = 0;
2008 limit_periodic_timer_frequency(apic);
2009 deadline = apic->lapic_timer.period;
2011 if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic)) {
2013 deadline = tmict_to_ns(apic,
2014 kvm_lapic_get_reg(apic, count_reg));
2016 if (apic_lvtt_period(apic))
2017 deadline = apic->lapic_timer.period;
2021 else if (unlikely(deadline > apic->lapic_timer.period)) {
2026 apic->vcpu->vcpu_id,
2028 kvm_lapic_get_reg(apic, count_reg),
2029 deadline, apic->lapic_timer.period);
2030 kvm_lapic_set_reg(apic, count_reg, 0);
2031 deadline = apic->lapic_timer.period;
2036 apic->lapic_timer.tscdeadline = kvm_read_l1_tsc(apic->vcpu, tscl) +
2037 nsec_to_cycles(apic->vcpu, deadline);
2038 apic->lapic_timer.target_expiration = ktime_add_ns(now, deadline);
2043 static void advance_periodic_target_expiration(struct kvm_lapic *apic)
2056 apic->lapic_timer.target_expiration =
2057 ktime_add_ns(apic->lapic_timer.target_expiration,
2058 apic->lapic_timer.period);
2059 delta = ktime_sub(apic->lapic_timer.target_expiration, now);
2060 apic->lapic_timer.tscdeadline = kvm_read_l1_tsc(apic->vcpu, tscl) +
2061 nsec_to_cycles(apic->vcpu, delta);
2064 static void start_sw_period(struct kvm_lapic *apic)
2066 if (!apic->lapic_timer.period)
2070 apic->lapic_timer.target_expiration)) {
2071 apic_timer_expired(apic, false);
2073 if (apic_lvtt_oneshot(apic))
2076 advance_periodic_target_expiration(apic);
2079 hrtimer_start(&apic->lapic_timer.timer,
2080 apic->lapic_timer.target_expiration,
2089 return vcpu->arch.apic->lapic_timer.hv_timer_in_use;
2092 static void cancel_hv_timer(struct kvm_lapic *apic)
2095 WARN_ON(!apic->lapic_timer.hv_timer_in_use);
2096 static_call(kvm_x86_cancel_hv_timer)(apic->vcpu);
2097 apic->lapic_timer.hv_timer_in_use = false;
2100 static bool start_hv_timer(struct kvm_lapic *apic)
2102 struct kvm_timer *ktimer = &apic->lapic_timer;
2103 struct kvm_vcpu *vcpu = apic->vcpu;
2124 if (!apic_lvtt_period(apic)) {
2130 cancel_hv_timer(apic);
2132 apic_timer_expired(apic, false);
2133 cancel_hv_timer(apic);
2142 static void start_sw_timer(struct kvm_lapic *apic)
2144 struct kvm_timer *ktimer = &apic->lapic_timer;
2147 if (apic->lapic_timer.hv_timer_in_use)
2148 cancel_hv_timer(apic);
2149 if (!apic_lvtt_period(apic) && atomic_read(&ktimer->pending))
2152 if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic))
2153 start_sw_period(apic);
2154 else if (apic_lvtt_tscdeadline(apic))
2155 start_sw_tscdeadline(apic);
2156 trace_kvm_hv_timer_state(apic->vcpu->vcpu_id, false);
2159 static void restart_apic_timer(struct kvm_lapic *apic)
2163 if (!apic_lvtt_period(apic) && atomic_read(&apic->lapic_timer.pending))
2166 if (!start_hv_timer(apic))
2167 start_sw_timer(apic);
2174 struct kvm_lapic *apic = vcpu->arch.apic;
2178 if (!apic->lapic_timer.hv_timer_in_use)
2181 apic_timer_expired(apic, false);
2182 cancel_hv_timer(apic);
2184 if (apic_lvtt_period(apic) && apic->lapic_timer.period) {
2185 advance_periodic_target_expiration(apic);
2186 restart_apic_timer(apic);
2195 restart_apic_timer(vcpu->arch.apic);
2200 struct kvm_lapic *apic = vcpu->arch.apic;
2204 if (apic->lapic_timer.hv_timer_in_use)
2205 start_sw_timer(apic);
2211 struct kvm_lapic *apic = vcpu->arch.apic;
2213 WARN_ON(!apic->lapic_timer.hv_timer_in_use);
2214 restart_apic_timer(apic);
2217 static void __start_apic_timer(struct kvm_lapic *apic, u32 count_reg)
2219 atomic_set(&apic->lapic_timer.pending, 0);
2221 if ((apic_lvtt_period(apic) || apic_lvtt_oneshot(apic))
2222 && !set_target_expiration(apic, count_reg))
2225 restart_apic_timer(apic);
2228 static void start_apic_timer(struct kvm_lapic *apic)
2230 __start_apic_timer(apic, APIC_TMICT);
2233 static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val)
2237 if (apic->lvt0_in_nmi_mode != lvt0_in_nmi_mode) {
2238 apic->lvt0_in_nmi_mode = lvt0_in_nmi_mode;
2240 atomic_inc(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
2242 atomic_dec(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
2256 static int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
2264 if (!apic_x2apic_mode(apic)) {
2265 kvm_apic_set_xapic_id(apic, val >> 24);
2272 report_tpr_access(apic, true);
2273 apic_set_tpr(apic, val & 0xff);
2277 apic_set_eoi(apic);
2281 if (!apic_x2apic_mode(apic))
2282 kvm_apic_set_ldr(apic, val & APIC_LDR_MASK);
2288 if (!apic_x2apic_mode(apic))
2289 kvm_apic_set_dfr(apic, val | 0x0FFFFFFF);
2296 if (kvm_lapic_get_reg(apic, APIC_LVR) & APIC_LVR_DIRECTED_EOI)
2298 apic_set_spiv(apic, val & mask);
2302 for (i = 0; i < apic->nr_lvt_entries; i++) {
2303 kvm_lapic_set_reg(apic, APIC_LVTx(i),
2304 kvm_lapic_get_reg(apic, APIC_LVTx(i)) | APIC_LVT_MASKED);
2306 apic_update_lvtt(apic);
2307 atomic_set(&apic->lapic_timer.pending, 0);
2313 WARN_ON_ONCE(apic_x2apic_mode(apic));
2317 kvm_apic_send_ipi(apic, val, kvm_lapic_get_reg(apic, APIC_ICR2));
2318 kvm_lapic_set_reg(apic, APIC_ICR, val);
2321 if (apic_x2apic_mode(apic))
2324 kvm_lapic_set_reg(apic, APIC_ICR2, val & 0xff000000);
2328 apic_manage_nmi_watchdog(apic, val);
2336 if (!kvm_lapic_lvt_supported(apic, index)) {
2340 if (!kvm_apic_sw_enabled(apic))
2343 kvm_lapic_set_reg(apic, reg, val);
2348 if (!kvm_apic_sw_enabled(apic))
2350 val &= (apic_lvt_mask[0] | apic->lapic_timer.timer_mode_mask);
2351 kvm_lapic_set_reg(apic, APIC_LVTT, val);
2352 apic_update_lvtt(apic);
2356 if (apic_lvtt_tscdeadline(apic))
2359 cancel_apic_timer(apic);
2360 kvm_lapic_set_reg(apic, APIC_TMICT, val);
2361 start_apic_timer(apic);
2365 uint32_t old_divisor = apic->divide_count;
2367 kvm_lapic_set_reg(apic, APIC_TDCR, val & 0xb);
2368 update_divide_count(apic);
2369 if (apic->divide_count != old_divisor &&
2370 apic->lapic_timer.period) {
2371 hrtimer_cancel(&apic->lapic_timer.timer);
2372 update_target_expiration(apic, old_divisor);
2373 restart_apic_timer(apic);
2378 if (apic_x2apic_mode(apic) && val != 0)
2387 if (!apic_x2apic_mode(apic) || (val & ~APIC_VECTOR_MASK))
2390 kvm_apic_send_ipi(apic, APIC_DEST_SELF | val, 0);
2402 kvm_recalculate_apic_map(apic->vcpu->kvm);
2410 struct kvm_lapic *apic = to_lapic(this);
2411 unsigned int offset = address - apic->base_address;
2414 if (!apic_mmio_in_range(apic, address))
2417 if (!kvm_apic_hw_enabled(apic) || apic_x2apic_mode(apic)) {
2435 kvm_lapic_reg_write(apic, offset & 0xff0, val);
2442 kvm_lapic_reg_write(vcpu->arch.apic, APIC_EOI, 0);
2449 struct kvm_lapic *apic = vcpu->arch.apic;
2462 if (apic_x2apic_mode(apic) && offset == APIC_ICR)
2463 kvm_x2apic_icr_write(apic, kvm_lapic_get_reg64(apic, APIC_ICR));
2465 kvm_lapic_reg_write(apic, offset, kvm_lapic_get_reg(apic, offset));
2471 struct kvm_lapic *apic = vcpu->arch.apic;
2473 if (!vcpu->arch.apic)
2476 hrtimer_cancel(&apic->lapic_timer.timer);
2481 if (!apic->sw_enabled)
2484 if (apic->regs)
2485 free_page((unsigned long)apic->regs);
2487 kfree(apic);
2497 struct kvm_lapic *apic = vcpu->arch.apic;
2499 if (!kvm_apic_present(vcpu) || !apic_lvtt_tscdeadline(apic))
2502 return apic->lapic_timer.tscdeadline;
2507 struct kvm_lapic *apic = vcpu->arch.apic;
2509 if (!kvm_apic_present(vcpu) || !apic_lvtt_tscdeadline(apic))
2512 hrtimer_cancel(&apic->lapic_timer.timer);
2513 apic->lapic_timer.tscdeadline = data;
2514 start_apic_timer(apic);
2519 apic_set_tpr(vcpu->arch.apic, (cr8 & 0x0f) << 4);
2526 tpr = (u64) kvm_lapic_get_reg(vcpu->arch.apic, APIC_TASKPRI);
2534 struct kvm_lapic *apic = vcpu->arch.apic;
2541 if (!apic)
2547 kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
2553 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
2559 kvm_apic_set_x2apic_id(apic, vcpu->vcpu_id);
2561 kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
2569 apic->base_address = apic->vcpu->arch.apic_base &
2573 apic->base_address != APIC_DEFAULT_PHYS_BASE) {
2574 kvm_set_apicv_inhibit(apic->vcpu->kvm,
2581 struct kvm_lapic *apic = vcpu->arch.apic;
2583 if (apic->apicv_active) {
2585 apic->irr_pending = true;
2586 apic->isr_count = 1;
2594 apic->isr_count = count_vectors(apic->regs + APIC_ISR);
2596 apic->highest_isr_cache = -1;
2672 struct kvm_lapic *apic = vcpu->arch.apic;
2685 if (!apic)
2688 /* Stop the timer in case it's a reset to an active apic */
2689 hrtimer_cancel(&apic->lapic_timer.timer);
2693 kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
2694 kvm_apic_set_version(apic->vcpu);
2696 for (i = 0; i < apic->nr_lvt_entries; i++)
2697 kvm_lapic_set_reg(apic, APIC_LVTx(i), APIC_LVT_MASKED);
2698 apic_update_lvtt(apic);
2701 kvm_lapic_set_reg(apic, APIC_LVT0,
2703 apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));
2705 kvm_apic_set_dfr(apic, 0xffffffffU);
2706 apic_set_spiv(apic, 0xff);
2707 kvm_lapic_set_reg(apic, APIC_TASKPRI, 0);
2708 if (!apic_x2apic_mode(apic))
2709 kvm_apic_set_ldr(apic, 0);
2710 kvm_lapic_set_reg(apic, APIC_ESR, 0);
2711 if (!apic_x2apic_mode(apic)) {
2712 kvm_lapic_set_reg(apic, APIC_ICR, 0);
2713 kvm_lapic_set_reg(apic, APIC_ICR2, 0);
2715 kvm_lapic_set_reg64(apic, APIC_ICR, 0);
2717 kvm_lapic_set_reg(apic, APIC_TDCR, 0);
2718 kvm_lapic_set_reg(apic, APIC_TMICT, 0);
2720 kvm_lapic_set_reg(apic, APIC_IRR + 0x10 * i, 0);
2721 kvm_lapic_set_reg(apic, APIC_ISR + 0x10 * i, 0);
2722 kvm_lapic_set_reg(apic, APIC_TMR + 0x10 * i, 0);
2725 update_divide_count(apic);
2726 atomic_set(&apic->lapic_timer.pending, 0);
2729 apic_update_ppr(apic);
2730 if (apic->apicv_active) {
2748 static bool lapic_is_periodic(struct kvm_lapic *apic)
2750 return apic_lvtt_period(apic);
2755 struct kvm_lapic *apic = vcpu->arch.apic;
2757 if (apic_enabled(apic) && apic_lvt_enabled(apic, APIC_LVTT))
2758 return atomic_read(&apic->lapic_timer.pending);
2763 int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type)
2765 u32 reg = kvm_lapic_get_reg(apic, lvt_type);
2769 if (kvm_apic_hw_enabled(apic) && !(reg & APIC_LVT_MASKED)) {
2774 r = __apic_accept_irq(apic, mode, vector, 1, trig_mode, NULL);
2776 kvm_lapic_set_reg(apic, APIC_LVTPC, reg | APIC_LVT_MASKED);
2784 struct kvm_lapic *apic = vcpu->arch.apic;
2786 if (apic)
2787 kvm_apic_local_deliver(apic, APIC_LVT0);
2798 struct kvm_lapic *apic = container_of(ktimer, struct kvm_lapic, lapic_timer);
2800 apic_timer_expired(apic, true);
2802 if (lapic_is_periodic(apic)) {
2803 advance_periodic_target_expiration(apic);
2812 struct kvm_lapic *apic;
2816 apic = kzalloc(sizeof(*apic), GFP_KERNEL_ACCOUNT);
2817 if (!apic)
2820 vcpu->arch.apic = apic;
2822 apic->regs = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
2823 if (!apic->regs) {
2824 printk(KERN_ERR "malloc apic regs error for vcpu %x\n",
2828 apic->vcpu = vcpu;
2830 apic->nr_lvt_entries = kvm_apic_calc_nr_lvt_entries(vcpu);
2832 hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
2834 apic->lapic_timer.timer.function = apic_timer_fn;
2836 apic->lapic_timer.timer_advance_ns = LAPIC_TIMER_ADVANCE_NS_INIT;
2839 apic->lapic_timer.timer_advance_ns = timer_advance_ns;
2849 kvm_iodevice_init(&apic->dev, &apic_mmio_ops);
2853 kfree(apic);
2854 vcpu->arch.apic = NULL;
2861 struct kvm_lapic *apic = vcpu->arch.apic;
2867 __apic_update_ppr(apic, &ppr);
2868 return apic_has_interrupt_for_ppr(apic, ppr);
2874 u32 lvt0 = kvm_lapic_get_reg(vcpu->arch.apic, APIC_LVT0);
2876 if (!kvm_apic_hw_enabled(vcpu->arch.apic))
2886 struct kvm_lapic *apic = vcpu->arch.apic;
2888 if (atomic_read(&apic->lapic_timer.pending) > 0) {
2889 kvm_apic_inject_pending_timer_irqs(apic);
2890 atomic_set(&apic->lapic_timer.pending, 0);
2897 struct kvm_lapic *apic = vcpu->arch.apic;
2910 apic_clear_irr(vector, apic);
2917 apic_update_ppr(apic);
2925 apic_set_isr(vector, apic);
2926 __apic_update_ppr(apic, &ppr);
2935 if (apic_x2apic_mode(vcpu->arch.apic)) {
2972 memcpy(s->regs, vcpu->arch.apic->regs, sizeof(*s));
2979 __apic_read(vcpu->arch.apic, APIC_TMCCT));
2986 struct kvm_lapic *apic = vcpu->arch.apic;
2993 apic_set_spiv(apic, *((u32 *)(s->regs + APIC_SPIV)));
3000 memcpy(vcpu->arch.apic->regs, s->regs, sizeof(*s));
3002 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
3006 apic_update_ppr(apic);
3007 cancel_apic_timer(apic);
3008 apic->lapic_timer.expired_tscdeadline = 0;
3009 apic_update_lvtt(apic);
3010 apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));
3011 update_divide_count(apic);
3012 __start_apic_timer(apic, APIC_TMCCT);
3013 kvm_lapic_set_reg(apic, APIC_TMCCT, 0);
3015 if (apic->apicv_active) {
3017 static_call_cond(kvm_x86_hwapic_irr_update)(vcpu, apic_find_highest_irr(apic));
3018 static_call_cond(kvm_x86_hwapic_isr_update)(apic_find_highest_isr(apic));
3037 timer = &vcpu->arch.apic->lapic_timer.timer;
3050 struct kvm_lapic *apic)
3068 vector = apic_set_eoi(apic);
3069 trace_kvm_pv_eoi(apic, vector);
3077 apic_sync_pv_eoi_from_guest(vcpu, vcpu->arch.apic);
3082 if (kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
3086 apic_set_tpr(vcpu->arch.apic, data & 0xff);
3096 struct kvm_lapic *apic)
3100 apic->irr_pending ||
3102 apic->highest_isr_cache == -1 ||
3104 kvm_ioapic_handles_vector(apic, apic->highest_isr_cache)) {
3112 pv_eoi_set_pending(apic->vcpu);
3119 struct kvm_lapic *apic = vcpu->arch.apic;
3121 apic_sync_pv_eoi_to_guest(vcpu, apic);
3126 tpr = kvm_lapic_get_reg(apic, APIC_TASKPRI) & 0xff;
3127 max_irr = apic_find_highest_irr(apic);
3130 max_isr = apic_find_highest_isr(apic);
3135 kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
3143 &vcpu->arch.apic->vapic_cache,
3151 vcpu->arch.apic->vapic_addr = vapic_addr;
3155 int kvm_x2apic_icr_write(struct kvm_lapic *apic, u64 data)
3159 kvm_apic_send_ipi(apic, (u32)data, (u32)(data >> 32));
3160 kvm_lapic_set_reg64(apic, APIC_ICR, data);
3165 static int kvm_lapic_msr_read(struct kvm_lapic *apic, u32 reg, u64 *data)
3170 *data = kvm_lapic_get_reg64(apic, APIC_ICR);
3174 if (kvm_lapic_reg_read(apic, reg, 4, &low))
3182 static int kvm_lapic_msr_write(struct kvm_lapic *apic, u32 reg, u64 data)
3190 return kvm_x2apic_icr_write(apic, data);
3196 return kvm_lapic_reg_write(apic, reg, (u32)data);
3201 struct kvm_lapic *apic = vcpu->arch.apic;
3204 if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(apic))
3207 return kvm_lapic_msr_write(apic, reg, data);
3212 struct kvm_lapic *apic = vcpu->arch.apic;
3215 if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(apic))
3218 return kvm_lapic_msr_read(apic, reg, data);
3226 return kvm_lapic_msr_write(vcpu->arch.apic, reg, data);
3234 return kvm_lapic_msr_read(vcpu->arch.apic, reg, data);
3265 struct kvm_lapic *apic = vcpu->arch.apic;
3290 clear_bit(KVM_APIC_SIPI, &apic->pending_events);
3294 if (test_and_clear_bit(KVM_APIC_INIT, &apic->pending_events)) {
3296 if (kvm_vcpu_is_bsp(apic->vcpu))
3301 if (test_and_clear_bit(KVM_APIC_SIPI, &apic->pending_events)) {
3305 sipi_vector = apic->sipi_vector;