1// SPDX-License-Identifier: GPL-2.0-only
2
3#include <asm/kvm_ppc.h>
4#include <asm/pmc.h>
5
6#include "book3s_hv.h"
7
8static void freeze_pmu(unsigned long mmcr0, unsigned long mmcra)
9{
10	if (!(mmcr0 & MMCR0_FC))
11		goto do_freeze;
12	if (mmcra & MMCRA_SAMPLE_ENABLE)
13		goto do_freeze;
14	if (cpu_has_feature(CPU_FTR_ARCH_31)) {
15		if (!(mmcr0 & MMCR0_PMCCEXT))
16			goto do_freeze;
17		if (!(mmcra & MMCRA_BHRB_DISABLE))
18			goto do_freeze;
19	}
20	return;
21
22do_freeze:
23	mmcr0 = MMCR0_FC;
24	mmcra = 0;
25	if (cpu_has_feature(CPU_FTR_ARCH_31)) {
26		mmcr0 |= MMCR0_PMCCEXT;
27		mmcra = MMCRA_BHRB_DISABLE;
28	}
29
30	mtspr(SPRN_MMCR0, mmcr0);
31	mtspr(SPRN_MMCRA, mmcra);
32	isync();
33}
34
35void switch_pmu_to_guest(struct kvm_vcpu *vcpu,
36			 struct p9_host_os_sprs *host_os_sprs)
37{
38	struct lppaca *lp;
39	int load_pmu = 1;
40
41	lp = vcpu->arch.vpa.pinned_addr;
42	if (lp)
43		load_pmu = lp->pmcregs_in_use;
44
45	/* Save host */
46	if (ppc_get_pmu_inuse()) {
47		/* POWER9, POWER10 do not implement HPMC or SPMC */
48
49		host_os_sprs->mmcr0 = mfspr(SPRN_MMCR0);
50		host_os_sprs->mmcra = mfspr(SPRN_MMCRA);
51
52		freeze_pmu(host_os_sprs->mmcr0, host_os_sprs->mmcra);
53
54		host_os_sprs->pmc1 = mfspr(SPRN_PMC1);
55		host_os_sprs->pmc2 = mfspr(SPRN_PMC2);
56		host_os_sprs->pmc3 = mfspr(SPRN_PMC3);
57		host_os_sprs->pmc4 = mfspr(SPRN_PMC4);
58		host_os_sprs->pmc5 = mfspr(SPRN_PMC5);
59		host_os_sprs->pmc6 = mfspr(SPRN_PMC6);
60		host_os_sprs->mmcr1 = mfspr(SPRN_MMCR1);
61		host_os_sprs->mmcr2 = mfspr(SPRN_MMCR2);
62		host_os_sprs->sdar = mfspr(SPRN_SDAR);
63		host_os_sprs->siar = mfspr(SPRN_SIAR);
64		host_os_sprs->sier1 = mfspr(SPRN_SIER);
65
66		if (cpu_has_feature(CPU_FTR_ARCH_31)) {
67			host_os_sprs->mmcr3 = mfspr(SPRN_MMCR3);
68			host_os_sprs->sier2 = mfspr(SPRN_SIER2);
69			host_os_sprs->sier3 = mfspr(SPRN_SIER3);
70		}
71	}
72
73#ifdef CONFIG_PPC_PSERIES
74	/* After saving PMU, before loading guest PMU, flip pmcregs_in_use */
75	if (kvmhv_on_pseries()) {
76		barrier();
77		get_lppaca()->pmcregs_in_use = load_pmu;
78		barrier();
79	}
80#endif
81
82	/*
83	 * Load guest. If the VPA said the PMCs are not in use but the guest
84	 * tried to access them anyway, HFSCR[PM] will be set by the HFAC
85	 * fault so we can make forward progress.
86	 */
87	if (load_pmu || (vcpu->arch.hfscr & HFSCR_PM)) {
88		mtspr(SPRN_PMC1, vcpu->arch.pmc[0]);
89		mtspr(SPRN_PMC2, vcpu->arch.pmc[1]);
90		mtspr(SPRN_PMC3, vcpu->arch.pmc[2]);
91		mtspr(SPRN_PMC4, vcpu->arch.pmc[3]);
92		mtspr(SPRN_PMC5, vcpu->arch.pmc[4]);
93		mtspr(SPRN_PMC6, vcpu->arch.pmc[5]);
94		mtspr(SPRN_MMCR1, vcpu->arch.mmcr[1]);
95		mtspr(SPRN_MMCR2, vcpu->arch.mmcr[2]);
96		mtspr(SPRN_SDAR, vcpu->arch.sdar);
97		mtspr(SPRN_SIAR, vcpu->arch.siar);
98		mtspr(SPRN_SIER, vcpu->arch.sier[0]);
99
100		if (cpu_has_feature(CPU_FTR_ARCH_31)) {
101			mtspr(SPRN_MMCR3, vcpu->arch.mmcr[3]);
102			mtspr(SPRN_SIER2, vcpu->arch.sier[1]);
103			mtspr(SPRN_SIER3, vcpu->arch.sier[2]);
104		}
105
106		/* Set MMCRA then MMCR0 last */
107		mtspr(SPRN_MMCRA, vcpu->arch.mmcra);
108		mtspr(SPRN_MMCR0, vcpu->arch.mmcr[0]);
109		/* No isync necessary because we're starting counters */
110
111		if (!vcpu->arch.nested &&
112		    (vcpu->arch.hfscr_permitted & HFSCR_PM))
113			vcpu->arch.hfscr |= HFSCR_PM;
114	}
115}
116EXPORT_SYMBOL_GPL(switch_pmu_to_guest);
117
118void switch_pmu_to_host(struct kvm_vcpu *vcpu,
119			struct p9_host_os_sprs *host_os_sprs)
120{
121	struct lppaca *lp;
122	int save_pmu = 1;
123
124	lp = vcpu->arch.vpa.pinned_addr;
125	if (lp)
126		save_pmu = lp->pmcregs_in_use;
127	if (IS_ENABLED(CONFIG_KVM_BOOK3S_HV_NESTED_PMU_WORKAROUND)) {
128		/*
129		 * Save pmu if this guest is capable of running nested guests.
130		 * This is option is for old L1s that do not set their
131		 * lppaca->pmcregs_in_use properly when entering their L2.
132		 */
133		save_pmu |= nesting_enabled(vcpu->kvm);
134	}
135
136	if (save_pmu) {
137		vcpu->arch.mmcr[0] = mfspr(SPRN_MMCR0);
138		vcpu->arch.mmcra = mfspr(SPRN_MMCRA);
139
140		freeze_pmu(vcpu->arch.mmcr[0], vcpu->arch.mmcra);
141
142		vcpu->arch.pmc[0] = mfspr(SPRN_PMC1);
143		vcpu->arch.pmc[1] = mfspr(SPRN_PMC2);
144		vcpu->arch.pmc[2] = mfspr(SPRN_PMC3);
145		vcpu->arch.pmc[3] = mfspr(SPRN_PMC4);
146		vcpu->arch.pmc[4] = mfspr(SPRN_PMC5);
147		vcpu->arch.pmc[5] = mfspr(SPRN_PMC6);
148		vcpu->arch.mmcr[1] = mfspr(SPRN_MMCR1);
149		vcpu->arch.mmcr[2] = mfspr(SPRN_MMCR2);
150		vcpu->arch.sdar = mfspr(SPRN_SDAR);
151		vcpu->arch.siar = mfspr(SPRN_SIAR);
152		vcpu->arch.sier[0] = mfspr(SPRN_SIER);
153
154		if (cpu_has_feature(CPU_FTR_ARCH_31)) {
155			vcpu->arch.mmcr[3] = mfspr(SPRN_MMCR3);
156			vcpu->arch.sier[1] = mfspr(SPRN_SIER2);
157			vcpu->arch.sier[2] = mfspr(SPRN_SIER3);
158		}
159
160	} else if (vcpu->arch.hfscr & HFSCR_PM) {
161		/*
162		 * The guest accessed PMC SPRs without specifying they should
163		 * be preserved, or it cleared pmcregs_in_use after the last
164		 * access. Just ensure they are frozen.
165		 */
166		freeze_pmu(mfspr(SPRN_MMCR0), mfspr(SPRN_MMCRA));
167
168		/*
169		 * Demand-fault PMU register access in the guest.
170		 *
171		 * This is used to grab the guest's VPA pmcregs_in_use value
172		 * and reflect it into the host's VPA in the case of a nested
173		 * hypervisor.
174		 *
175		 * It also avoids having to zero-out SPRs after each guest
176		 * exit to avoid side-channels when.
177		 *
178		 * This is cleared here when we exit the guest, so later HFSCR
179		 * interrupt handling can add it back to run the guest with
180		 * PM enabled next time.
181		 */
182		if (!vcpu->arch.nested)
183			vcpu->arch.hfscr &= ~HFSCR_PM;
184	} /* otherwise the PMU should still be frozen */
185
186#ifdef CONFIG_PPC_PSERIES
187	if (kvmhv_on_pseries()) {
188		barrier();
189		get_lppaca()->pmcregs_in_use = ppc_get_pmu_inuse();
190		barrier();
191	}
192#endif
193
194	if (ppc_get_pmu_inuse()) {
195		mtspr(SPRN_PMC1, host_os_sprs->pmc1);
196		mtspr(SPRN_PMC2, host_os_sprs->pmc2);
197		mtspr(SPRN_PMC3, host_os_sprs->pmc3);
198		mtspr(SPRN_PMC4, host_os_sprs->pmc4);
199		mtspr(SPRN_PMC5, host_os_sprs->pmc5);
200		mtspr(SPRN_PMC6, host_os_sprs->pmc6);
201		mtspr(SPRN_MMCR1, host_os_sprs->mmcr1);
202		mtspr(SPRN_MMCR2, host_os_sprs->mmcr2);
203		mtspr(SPRN_SDAR, host_os_sprs->sdar);
204		mtspr(SPRN_SIAR, host_os_sprs->siar);
205		mtspr(SPRN_SIER, host_os_sprs->sier1);
206
207		if (cpu_has_feature(CPU_FTR_ARCH_31)) {
208			mtspr(SPRN_MMCR3, host_os_sprs->mmcr3);
209			mtspr(SPRN_SIER2, host_os_sprs->sier2);
210			mtspr(SPRN_SIER3, host_os_sprs->sier3);
211		}
212
213		/* Set MMCRA then MMCR0 last */
214		mtspr(SPRN_MMCRA, host_os_sprs->mmcra);
215		mtspr(SPRN_MMCR0, host_os_sprs->mmcr0);
216		isync();
217	}
218}
219EXPORT_SYMBOL_GPL(switch_pmu_to_host);
220