xref: /kernel/linux/linux-6.6/arch/x86/kvm/pmu.h (revision 62306a36)
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __KVM_X86_PMU_H
3#define __KVM_X86_PMU_H
4
5#include <linux/nospec.h>
6
7#define vcpu_to_pmu(vcpu) (&(vcpu)->arch.pmu)
8#define pmu_to_vcpu(pmu)  (container_of((pmu), struct kvm_vcpu, arch.pmu))
9#define pmc_to_pmu(pmc)   (&(pmc)->vcpu->arch.pmu)
10
11#define MSR_IA32_MISC_ENABLE_PMU_RO_MASK (MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL |	\
12					  MSR_IA32_MISC_ENABLE_BTS_UNAVAIL)
13
14/* retrieve the 4 bits for EN and PMI out of IA32_FIXED_CTR_CTRL */
15#define fixed_ctrl_field(ctrl_reg, idx) (((ctrl_reg) >> ((idx)*4)) & 0xf)
16
17#define VMWARE_BACKDOOR_PMC_HOST_TSC		0x10000
18#define VMWARE_BACKDOOR_PMC_REAL_TIME		0x10001
19#define VMWARE_BACKDOOR_PMC_APPARENT_TIME	0x10002
20
21struct kvm_pmu_ops {
22	bool (*hw_event_available)(struct kvm_pmc *pmc);
23	struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx);
24	struct kvm_pmc *(*rdpmc_ecx_to_pmc)(struct kvm_vcpu *vcpu,
25		unsigned int idx, u64 *mask);
26	struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, u32 msr);
27	bool (*is_valid_rdpmc_ecx)(struct kvm_vcpu *vcpu, unsigned int idx);
28	bool (*is_valid_msr)(struct kvm_vcpu *vcpu, u32 msr);
29	int (*get_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
30	int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
31	void (*refresh)(struct kvm_vcpu *vcpu);
32	void (*init)(struct kvm_vcpu *vcpu);
33	void (*reset)(struct kvm_vcpu *vcpu);
34	void (*deliver_pmi)(struct kvm_vcpu *vcpu);
35	void (*cleanup)(struct kvm_vcpu *vcpu);
36
37	const u64 EVENTSEL_EVENT;
38	const int MAX_NR_GP_COUNTERS;
39	const int MIN_NR_GP_COUNTERS;
40};
41
42void kvm_pmu_ops_update(const struct kvm_pmu_ops *pmu_ops);
43
44static inline bool kvm_pmu_has_perf_global_ctrl(struct kvm_pmu *pmu)
45{
46	/*
47	 * Architecturally, Intel's SDM states that IA32_PERF_GLOBAL_CTRL is
48	 * supported if "CPUID.0AH: EAX[7:0] > 0", i.e. if the PMU version is
49	 * greater than zero.  However, KVM only exposes and emulates the MSR
50	 * to/for the guest if the guest PMU supports at least "Architectural
51	 * Performance Monitoring Version 2".
52	 *
53	 * AMD's version of PERF_GLOBAL_CTRL conveniently shows up with v2.
54	 */
55	return pmu->version > 1;
56}
57
58static inline u64 pmc_bitmask(struct kvm_pmc *pmc)
59{
60	struct kvm_pmu *pmu = pmc_to_pmu(pmc);
61
62	return pmu->counter_bitmask[pmc->type];
63}
64
65static inline u64 pmc_read_counter(struct kvm_pmc *pmc)
66{
67	u64 counter, enabled, running;
68
69	counter = pmc->counter;
70	if (pmc->perf_event && !pmc->is_paused)
71		counter += perf_event_read_value(pmc->perf_event,
72						 &enabled, &running);
73	/* FIXME: Scaling needed? */
74	return counter & pmc_bitmask(pmc);
75}
76
77static inline void pmc_write_counter(struct kvm_pmc *pmc, u64 val)
78{
79	pmc->counter += val - pmc_read_counter(pmc);
80	pmc->counter &= pmc_bitmask(pmc);
81}
82
83static inline bool pmc_is_gp(struct kvm_pmc *pmc)
84{
85	return pmc->type == KVM_PMC_GP;
86}
87
88static inline bool pmc_is_fixed(struct kvm_pmc *pmc)
89{
90	return pmc->type == KVM_PMC_FIXED;
91}
92
93static inline bool kvm_valid_perf_global_ctrl(struct kvm_pmu *pmu,
94						 u64 data)
95{
96	return !(pmu->global_ctrl_mask & data);
97}
98
99/* returns general purpose PMC with the specified MSR. Note that it can be
100 * used for both PERFCTRn and EVNTSELn; that is why it accepts base as a
101 * parameter to tell them apart.
102 */
103static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr,
104					 u32 base)
105{
106	if (msr >= base && msr < base + pmu->nr_arch_gp_counters) {
107		u32 index = array_index_nospec(msr - base,
108					       pmu->nr_arch_gp_counters);
109
110		return &pmu->gp_counters[index];
111	}
112
113	return NULL;
114}
115
116/* returns fixed PMC with the specified MSR */
117static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr)
118{
119	int base = MSR_CORE_PERF_FIXED_CTR0;
120
121	if (msr >= base && msr < base + pmu->nr_arch_fixed_counters) {
122		u32 index = array_index_nospec(msr - base,
123					       pmu->nr_arch_fixed_counters);
124
125		return &pmu->fixed_counters[index];
126	}
127
128	return NULL;
129}
130
131static inline u64 get_sample_period(struct kvm_pmc *pmc, u64 counter_value)
132{
133	u64 sample_period = (-counter_value) & pmc_bitmask(pmc);
134
135	if (!sample_period)
136		sample_period = pmc_bitmask(pmc) + 1;
137	return sample_period;
138}
139
140static inline void pmc_update_sample_period(struct kvm_pmc *pmc)
141{
142	if (!pmc->perf_event || pmc->is_paused ||
143	    !is_sampling_event(pmc->perf_event))
144		return;
145
146	perf_event_period(pmc->perf_event,
147			  get_sample_period(pmc, pmc->counter));
148}
149
150static inline bool pmc_speculative_in_use(struct kvm_pmc *pmc)
151{
152	struct kvm_pmu *pmu = pmc_to_pmu(pmc);
153
154	if (pmc_is_fixed(pmc))
155		return fixed_ctrl_field(pmu->fixed_ctr_ctrl,
156					pmc->idx - INTEL_PMC_IDX_FIXED) & 0x3;
157
158	return pmc->eventsel & ARCH_PERFMON_EVENTSEL_ENABLE;
159}
160
161extern struct x86_pmu_capability kvm_pmu_cap;
162
163static inline void kvm_init_pmu_capability(const struct kvm_pmu_ops *pmu_ops)
164{
165	bool is_intel = boot_cpu_data.x86_vendor == X86_VENDOR_INTEL;
166	int min_nr_gp_ctrs = pmu_ops->MIN_NR_GP_COUNTERS;
167
168	/*
169	 * Hybrid PMUs don't play nice with virtualization without careful
170	 * configuration by userspace, and KVM's APIs for reporting supported
171	 * vPMU features do not account for hybrid PMUs.  Disable vPMU support
172	 * for hybrid PMUs until KVM gains a way to let userspace opt-in.
173	 */
174	if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU))
175		enable_pmu = false;
176
177	if (enable_pmu) {
178		perf_get_x86_pmu_capability(&kvm_pmu_cap);
179
180		/*
181		 * WARN if perf did NOT disable hardware PMU if the number of
182		 * architecturally required GP counters aren't present, i.e. if
183		 * there are a non-zero number of counters, but fewer than what
184		 * is architecturally required.
185		 */
186		if (!kvm_pmu_cap.num_counters_gp ||
187		    WARN_ON_ONCE(kvm_pmu_cap.num_counters_gp < min_nr_gp_ctrs))
188			enable_pmu = false;
189		else if (is_intel && !kvm_pmu_cap.version)
190			enable_pmu = false;
191	}
192
193	if (!enable_pmu) {
194		memset(&kvm_pmu_cap, 0, sizeof(kvm_pmu_cap));
195		return;
196	}
197
198	kvm_pmu_cap.version = min(kvm_pmu_cap.version, 2);
199	kvm_pmu_cap.num_counters_gp = min(kvm_pmu_cap.num_counters_gp,
200					  pmu_ops->MAX_NR_GP_COUNTERS);
201	kvm_pmu_cap.num_counters_fixed = min(kvm_pmu_cap.num_counters_fixed,
202					     KVM_PMC_MAX_FIXED);
203}
204
205static inline void kvm_pmu_request_counter_reprogram(struct kvm_pmc *pmc)
206{
207	set_bit(pmc->idx, pmc_to_pmu(pmc)->reprogram_pmi);
208	kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
209}
210
211static inline void reprogram_counters(struct kvm_pmu *pmu, u64 diff)
212{
213	int bit;
214
215	if (!diff)
216		return;
217
218	for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX)
219		set_bit(bit, pmu->reprogram_pmi);
220	kvm_make_request(KVM_REQ_PMU, pmu_to_vcpu(pmu));
221}
222
223/*
224 * Check if a PMC is enabled by comparing it against global_ctrl bits.
225 *
226 * If the vPMU doesn't have global_ctrl MSR, all vPMCs are enabled.
227 */
228static inline bool pmc_is_globally_enabled(struct kvm_pmc *pmc)
229{
230	struct kvm_pmu *pmu = pmc_to_pmu(pmc);
231
232	if (!kvm_pmu_has_perf_global_ctrl(pmu))
233		return true;
234
235	return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl);
236}
237
238void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu);
239void kvm_pmu_handle_event(struct kvm_vcpu *vcpu);
240int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data);
241bool kvm_pmu_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx);
242bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr);
243int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
244int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
245void kvm_pmu_refresh(struct kvm_vcpu *vcpu);
246void kvm_pmu_reset(struct kvm_vcpu *vcpu);
247void kvm_pmu_init(struct kvm_vcpu *vcpu);
248void kvm_pmu_cleanup(struct kvm_vcpu *vcpu);
249void kvm_pmu_destroy(struct kvm_vcpu *vcpu);
250int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp);
251void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 perf_hw_id);
252
253bool is_vmware_backdoor_pmc(u32 pmc_idx);
254
255extern struct kvm_pmu_ops intel_pmu_ops;
256extern struct kvm_pmu_ops amd_pmu_ops;
257#endif /* __KVM_X86_PMU_H */
258