xref: /kernel/linux/linux-5.10/arch/x86/kvm/svm/pmu.c (revision 8c2ecf20)
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * KVM PMU support for AMD
4 *
5 * Copyright 2015, Red Hat, Inc. and/or its affiliates.
6 *
7 * Author:
8 *   Wei Huang <wei@redhat.com>
9 *
10 * Implementation is based on pmu_intel.c file
11 */
12#include <linux/types.h>
13#include <linux/kvm_host.h>
14#include <linux/perf_event.h>
15#include "x86.h"
16#include "cpuid.h"
17#include "lapic.h"
18#include "pmu.h"
19
20enum pmu_type {
21	PMU_TYPE_COUNTER = 0,
22	PMU_TYPE_EVNTSEL,
23};
24
25enum index {
26	INDEX_ZERO = 0,
27	INDEX_ONE,
28	INDEX_TWO,
29	INDEX_THREE,
30	INDEX_FOUR,
31	INDEX_FIVE,
32	INDEX_ERROR,
33};
34
35/* duplicated from amd_perfmon_event_map, K7 and above should work. */
36static struct kvm_event_hw_type_mapping amd_event_mapping[] = {
37	[0] = { 0x76, 0x00, PERF_COUNT_HW_CPU_CYCLES },
38	[1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
39	[2] = { 0x7d, 0x07, PERF_COUNT_HW_CACHE_REFERENCES },
40	[3] = { 0x7e, 0x07, PERF_COUNT_HW_CACHE_MISSES },
41	[4] = { 0xc2, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
42	[5] = { 0xc3, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
43	[6] = { 0xd0, 0x00, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND },
44	[7] = { 0xd1, 0x00, PERF_COUNT_HW_STALLED_CYCLES_BACKEND },
45};
46
47/* duplicated from amd_f17h_perfmon_event_map. */
48static struct kvm_event_hw_type_mapping amd_f17h_event_mapping[] = {
49	[0] = { 0x76, 0x00, PERF_COUNT_HW_CPU_CYCLES },
50	[1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
51	[2] = { 0x60, 0xff, PERF_COUNT_HW_CACHE_REFERENCES },
52	[3] = { 0x64, 0x09, PERF_COUNT_HW_CACHE_MISSES },
53	[4] = { 0xc2, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
54	[5] = { 0xc3, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
55	[6] = { 0x87, 0x02, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND },
56	[7] = { 0x87, 0x01, PERF_COUNT_HW_STALLED_CYCLES_BACKEND },
57};
58
59/* amd_pmc_perf_hw_id depends on these being the same size */
60static_assert(ARRAY_SIZE(amd_event_mapping) ==
61	     ARRAY_SIZE(amd_f17h_event_mapping));
62
63static unsigned int get_msr_base(struct kvm_pmu *pmu, enum pmu_type type)
64{
65	struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
66
67	if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) {
68		if (type == PMU_TYPE_COUNTER)
69			return MSR_F15H_PERF_CTR;
70		else
71			return MSR_F15H_PERF_CTL;
72	} else {
73		if (type == PMU_TYPE_COUNTER)
74			return MSR_K7_PERFCTR0;
75		else
76			return MSR_K7_EVNTSEL0;
77	}
78}
79
80static enum index msr_to_index(u32 msr)
81{
82	switch (msr) {
83	case MSR_F15H_PERF_CTL0:
84	case MSR_F15H_PERF_CTR0:
85	case MSR_K7_EVNTSEL0:
86	case MSR_K7_PERFCTR0:
87		return INDEX_ZERO;
88	case MSR_F15H_PERF_CTL1:
89	case MSR_F15H_PERF_CTR1:
90	case MSR_K7_EVNTSEL1:
91	case MSR_K7_PERFCTR1:
92		return INDEX_ONE;
93	case MSR_F15H_PERF_CTL2:
94	case MSR_F15H_PERF_CTR2:
95	case MSR_K7_EVNTSEL2:
96	case MSR_K7_PERFCTR2:
97		return INDEX_TWO;
98	case MSR_F15H_PERF_CTL3:
99	case MSR_F15H_PERF_CTR3:
100	case MSR_K7_EVNTSEL3:
101	case MSR_K7_PERFCTR3:
102		return INDEX_THREE;
103	case MSR_F15H_PERF_CTL4:
104	case MSR_F15H_PERF_CTR4:
105		return INDEX_FOUR;
106	case MSR_F15H_PERF_CTL5:
107	case MSR_F15H_PERF_CTR5:
108		return INDEX_FIVE;
109	default:
110		return INDEX_ERROR;
111	}
112}
113
114static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
115					     enum pmu_type type)
116{
117	switch (msr) {
118	case MSR_F15H_PERF_CTL0:
119	case MSR_F15H_PERF_CTL1:
120	case MSR_F15H_PERF_CTL2:
121	case MSR_F15H_PERF_CTL3:
122	case MSR_F15H_PERF_CTL4:
123	case MSR_F15H_PERF_CTL5:
124	case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
125		if (type != PMU_TYPE_EVNTSEL)
126			return NULL;
127		break;
128	case MSR_F15H_PERF_CTR0:
129	case MSR_F15H_PERF_CTR1:
130	case MSR_F15H_PERF_CTR2:
131	case MSR_F15H_PERF_CTR3:
132	case MSR_F15H_PERF_CTR4:
133	case MSR_F15H_PERF_CTR5:
134	case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
135		if (type != PMU_TYPE_COUNTER)
136			return NULL;
137		break;
138	default:
139		return NULL;
140	}
141
142	return &pmu->gp_counters[msr_to_index(msr)];
143}
144
145static unsigned int amd_pmc_perf_hw_id(struct kvm_pmc *pmc)
146{
147	struct kvm_event_hw_type_mapping *event_mapping;
148	u8 event_select = pmc->eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
149	u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
150	int i;
151
152	if (guest_cpuid_family(pmc->vcpu) >= 0x17)
153		event_mapping = amd_f17h_event_mapping;
154	else
155		event_mapping = amd_event_mapping;
156
157	for (i = 0; i < ARRAY_SIZE(amd_event_mapping); i++)
158		if (event_mapping[i].eventsel == event_select
159		    && event_mapping[i].unit_mask == unit_mask)
160			break;
161
162	if (i == ARRAY_SIZE(amd_event_mapping))
163		return PERF_COUNT_HW_MAX;
164
165	return event_mapping[i].event_type;
166}
167
168/* return PERF_COUNT_HW_MAX as AMD doesn't have fixed events */
169static unsigned amd_find_fixed_event(int idx)
170{
171	return PERF_COUNT_HW_MAX;
172}
173
174/* check if a PMC is enabled by comparing it against global_ctrl bits. Because
175 * AMD CPU doesn't have global_ctrl MSR, all PMCs are enabled (return TRUE).
176 */
177static bool amd_pmc_is_enabled(struct kvm_pmc *pmc)
178{
179	return true;
180}
181
182static struct kvm_pmc *amd_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
183{
184	unsigned int base = get_msr_base(pmu, PMU_TYPE_COUNTER);
185	struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
186
187	if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) {
188		/*
189		 * The idx is contiguous. The MSRs are not. The counter MSRs
190		 * are interleaved with the event select MSRs.
191		 */
192		pmc_idx *= 2;
193	}
194
195	return get_gp_pmc_amd(pmu, base + pmc_idx, PMU_TYPE_COUNTER);
196}
197
198/* returns 0 if idx's corresponding MSR exists; otherwise returns 1. */
199static int amd_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
200{
201	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
202
203	idx &= ~(3u << 30);
204
205	return (idx >= pmu->nr_arch_gp_counters);
206}
207
208/* idx is the ECX register of RDPMC instruction */
209static struct kvm_pmc *amd_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu,
210	unsigned int idx, u64 *mask)
211{
212	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
213	struct kvm_pmc *counters;
214
215	idx &= ~(3u << 30);
216	if (idx >= pmu->nr_arch_gp_counters)
217		return NULL;
218	counters = pmu->gp_counters;
219
220	return &counters[idx];
221}
222
223static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
224{
225	/* All MSRs refer to exactly one PMC, so msr_idx_to_pmc is enough.  */
226	return false;
227}
228
229static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr)
230{
231	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
232	struct kvm_pmc *pmc;
233
234	pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
235	pmc = pmc ? pmc : get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
236
237	return pmc;
238}
239
240static int amd_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
241{
242	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
243	struct kvm_pmc *pmc;
244	u32 msr = msr_info->index;
245
246	/* MSR_PERFCTRn */
247	pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
248	if (pmc) {
249		msr_info->data = pmc_read_counter(pmc);
250		return 0;
251	}
252	/* MSR_EVNTSELn */
253	pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
254	if (pmc) {
255		msr_info->data = pmc->eventsel;
256		return 0;
257	}
258
259	return 1;
260}
261
262static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
263{
264	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
265	struct kvm_pmc *pmc;
266	u32 msr = msr_info->index;
267	u64 data = msr_info->data;
268
269	/* MSR_PERFCTRn */
270	pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
271	if (pmc) {
272		pmc->counter += data - pmc_read_counter(pmc);
273		return 0;
274	}
275	/* MSR_EVNTSELn */
276	pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
277	if (pmc) {
278		data &= ~pmu->reserved_bits;
279		if (data != pmc->eventsel)
280			reprogram_gp_counter(pmc, data);
281		return 0;
282	}
283
284	return 1;
285}
286
287static void amd_pmu_refresh(struct kvm_vcpu *vcpu)
288{
289	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
290
291	if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE))
292		pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS_CORE;
293	else
294		pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS;
295
296	pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << 48) - 1;
297	pmu->reserved_bits = 0xfffffff000280000ull;
298	pmu->raw_event_mask = AMD64_RAW_EVENT_MASK;
299	pmu->version = 1;
300	/* not applicable to AMD; but clean them to prevent any fall out */
301	pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
302	pmu->nr_arch_fixed_counters = 0;
303	pmu->global_status = 0;
304	bitmap_set(pmu->all_valid_pmc_idx, 0, pmu->nr_arch_gp_counters);
305}
306
307static void amd_pmu_init(struct kvm_vcpu *vcpu)
308{
309	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
310	int i;
311
312	BUILD_BUG_ON(AMD64_NUM_COUNTERS_CORE > INTEL_PMC_MAX_GENERIC);
313
314	for (i = 0; i < AMD64_NUM_COUNTERS_CORE ; i++) {
315		pmu->gp_counters[i].type = KVM_PMC_GP;
316		pmu->gp_counters[i].vcpu = vcpu;
317		pmu->gp_counters[i].idx = i;
318		pmu->gp_counters[i].current_config = 0;
319	}
320}
321
322static void amd_pmu_reset(struct kvm_vcpu *vcpu)
323{
324	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
325	int i;
326
327	for (i = 0; i < AMD64_NUM_COUNTERS_CORE; i++) {
328		struct kvm_pmc *pmc = &pmu->gp_counters[i];
329
330		pmc_stop_counter(pmc);
331		pmc->counter = pmc->eventsel = 0;
332	}
333}
334
335struct kvm_pmu_ops amd_pmu_ops = {
336	.pmc_perf_hw_id = amd_pmc_perf_hw_id,
337	.find_fixed_event = amd_find_fixed_event,
338	.pmc_is_enabled = amd_pmc_is_enabled,
339	.pmc_idx_to_pmc = amd_pmc_idx_to_pmc,
340	.rdpmc_ecx_to_pmc = amd_rdpmc_ecx_to_pmc,
341	.msr_idx_to_pmc = amd_msr_idx_to_pmc,
342	.is_valid_rdpmc_ecx = amd_is_valid_rdpmc_ecx,
343	.is_valid_msr = amd_is_valid_msr,
344	.get_msr = amd_pmu_get_msr,
345	.set_msr = amd_pmu_set_msr,
346	.refresh = amd_pmu_refresh,
347	.init = amd_pmu_init,
348	.reset = amd_pmu_reset,
349};
350