xref: /kernel/linux/linux-5.10/arch/arm64/kvm/pmu.c (revision 8c2ecf20)
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright 2019 Arm Limited
4 * Author: Andrew Murray <Andrew.Murray@arm.com>
5 */
6#include <linux/kvm_host.h>
7#include <linux/perf_event.h>
8#include <asm/kvm_hyp.h>
9
10/*
11 * Given the perf event attributes and system type, determine
12 * if we are going to need to switch counters at guest entry/exit.
13 */
14static bool kvm_pmu_switch_needed(struct perf_event_attr *attr)
15{
16	/**
17	 * With VHE the guest kernel runs at EL1 and the host at EL2,
18	 * where user (EL0) is excluded then we have no reason to switch
19	 * counters.
20	 */
21	if (has_vhe() && attr->exclude_user)
22		return false;
23
24	/* Only switch if attributes are different */
25	return (attr->exclude_host != attr->exclude_guest);
26}
27
28/*
29 * Add events to track that we may want to switch at guest entry/exit
30 * time.
31 */
32void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr)
33{
34	struct kvm_host_data *ctx = this_cpu_ptr_hyp_sym(kvm_host_data);
35
36	if (!ctx || !kvm_pmu_switch_needed(attr))
37		return;
38
39	if (!attr->exclude_host)
40		ctx->pmu_events.events_host |= set;
41	if (!attr->exclude_guest)
42		ctx->pmu_events.events_guest |= set;
43}
44
45/*
46 * Stop tracking events
47 */
48void kvm_clr_pmu_events(u32 clr)
49{
50	struct kvm_host_data *ctx = this_cpu_ptr_hyp_sym(kvm_host_data);
51
52	if (!ctx)
53		return;
54
55	ctx->pmu_events.events_host &= ~clr;
56	ctx->pmu_events.events_guest &= ~clr;
57}
58
59#define PMEVTYPER_READ_CASE(idx)				\
60	case idx:						\
61		return read_sysreg(pmevtyper##idx##_el0)
62
63#define PMEVTYPER_WRITE_CASE(idx)				\
64	case idx:						\
65		write_sysreg(val, pmevtyper##idx##_el0);	\
66		break
67
68#define PMEVTYPER_CASES(readwrite)				\
69	PMEVTYPER_##readwrite##_CASE(0);			\
70	PMEVTYPER_##readwrite##_CASE(1);			\
71	PMEVTYPER_##readwrite##_CASE(2);			\
72	PMEVTYPER_##readwrite##_CASE(3);			\
73	PMEVTYPER_##readwrite##_CASE(4);			\
74	PMEVTYPER_##readwrite##_CASE(5);			\
75	PMEVTYPER_##readwrite##_CASE(6);			\
76	PMEVTYPER_##readwrite##_CASE(7);			\
77	PMEVTYPER_##readwrite##_CASE(8);			\
78	PMEVTYPER_##readwrite##_CASE(9);			\
79	PMEVTYPER_##readwrite##_CASE(10);			\
80	PMEVTYPER_##readwrite##_CASE(11);			\
81	PMEVTYPER_##readwrite##_CASE(12);			\
82	PMEVTYPER_##readwrite##_CASE(13);			\
83	PMEVTYPER_##readwrite##_CASE(14);			\
84	PMEVTYPER_##readwrite##_CASE(15);			\
85	PMEVTYPER_##readwrite##_CASE(16);			\
86	PMEVTYPER_##readwrite##_CASE(17);			\
87	PMEVTYPER_##readwrite##_CASE(18);			\
88	PMEVTYPER_##readwrite##_CASE(19);			\
89	PMEVTYPER_##readwrite##_CASE(20);			\
90	PMEVTYPER_##readwrite##_CASE(21);			\
91	PMEVTYPER_##readwrite##_CASE(22);			\
92	PMEVTYPER_##readwrite##_CASE(23);			\
93	PMEVTYPER_##readwrite##_CASE(24);			\
94	PMEVTYPER_##readwrite##_CASE(25);			\
95	PMEVTYPER_##readwrite##_CASE(26);			\
96	PMEVTYPER_##readwrite##_CASE(27);			\
97	PMEVTYPER_##readwrite##_CASE(28);			\
98	PMEVTYPER_##readwrite##_CASE(29);			\
99	PMEVTYPER_##readwrite##_CASE(30)
100
101/*
102 * Read a value direct from PMEVTYPER<idx> where idx is 0-30
103 * or PMCCFILTR_EL0 where idx is ARMV8_PMU_CYCLE_IDX (31).
104 */
105static u64 kvm_vcpu_pmu_read_evtype_direct(int idx)
106{
107	switch (idx) {
108	PMEVTYPER_CASES(READ);
109	case ARMV8_PMU_CYCLE_IDX:
110		return read_sysreg(pmccfiltr_el0);
111	default:
112		WARN_ON(1);
113	}
114
115	return 0;
116}
117
118/*
119 * Write a value direct to PMEVTYPER<idx> where idx is 0-30
120 * or PMCCFILTR_EL0 where idx is ARMV8_PMU_CYCLE_IDX (31).
121 */
122static void kvm_vcpu_pmu_write_evtype_direct(int idx, u32 val)
123{
124	switch (idx) {
125	PMEVTYPER_CASES(WRITE);
126	case ARMV8_PMU_CYCLE_IDX:
127		write_sysreg(val, pmccfiltr_el0);
128		break;
129	default:
130		WARN_ON(1);
131	}
132}
133
134/*
135 * Modify ARMv8 PMU events to include EL0 counting
136 */
137static void kvm_vcpu_pmu_enable_el0(unsigned long events)
138{
139	u64 typer;
140	u32 counter;
141
142	for_each_set_bit(counter, &events, 32) {
143		typer = kvm_vcpu_pmu_read_evtype_direct(counter);
144		typer &= ~ARMV8_PMU_EXCLUDE_EL0;
145		kvm_vcpu_pmu_write_evtype_direct(counter, typer);
146	}
147}
148
149/*
150 * Modify ARMv8 PMU events to exclude EL0 counting
151 */
152static void kvm_vcpu_pmu_disable_el0(unsigned long events)
153{
154	u64 typer;
155	u32 counter;
156
157	for_each_set_bit(counter, &events, 32) {
158		typer = kvm_vcpu_pmu_read_evtype_direct(counter);
159		typer |= ARMV8_PMU_EXCLUDE_EL0;
160		kvm_vcpu_pmu_write_evtype_direct(counter, typer);
161	}
162}
163
164/*
165 * On VHE ensure that only guest events have EL0 counting enabled.
166 * This is called from both vcpu_{load,put} and the sysreg handling.
167 * Since the latter is preemptible, special care must be taken to
168 * disable preemption.
169 */
170void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
171{
172	struct kvm_host_data *host;
173	u32 events_guest, events_host;
174
175	if (!has_vhe())
176		return;
177
178	preempt_disable();
179	host = this_cpu_ptr_hyp_sym(kvm_host_data);
180	events_guest = host->pmu_events.events_guest;
181	events_host = host->pmu_events.events_host;
182
183	kvm_vcpu_pmu_enable_el0(events_guest);
184	kvm_vcpu_pmu_disable_el0(events_host);
185	preempt_enable();
186}
187
188/*
189 * On VHE ensure that only host events have EL0 counting enabled
190 */
191void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu)
192{
193	struct kvm_host_data *host;
194	u32 events_guest, events_host;
195
196	if (!has_vhe())
197		return;
198
199	host = this_cpu_ptr_hyp_sym(kvm_host_data);
200	events_guest = host->pmu_events.events_guest;
201	events_host = host->pmu_events.events_host;
202
203	kvm_vcpu_pmu_enable_el0(events_host);
204	kvm_vcpu_pmu_disable_el0(events_guest);
205}
206