xref: /kernel/linux/linux-5.10/arch/arm64/kvm/hyp/vhe/tlb.c (revision 8c2ecf20)
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2015 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 */
6
7#include <linux/irqflags.h>
8
9#include <asm/kvm_hyp.h>
10#include <asm/kvm_mmu.h>
11#include <asm/tlbflush.h>
12
13struct tlb_inv_context {
14	unsigned long	flags;
15	u64		tcr;
16	u64		sctlr;
17};
18
19static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu,
20				  struct tlb_inv_context *cxt)
21{
22	u64 val;
23
24	local_irq_save(cxt->flags);
25
26	if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
27		/*
28		 * For CPUs that are affected by ARM errata 1165522 or 1530923,
29		 * we cannot trust stage-1 to be in a correct state at that
30		 * point. Since we do not want to force a full load of the
31		 * vcpu state, we prevent the EL1 page-table walker to
32		 * allocate new TLBs. This is done by setting the EPD bits
33		 * in the TCR_EL1 register. We also need to prevent it to
34		 * allocate IPA->PA walks, so we enable the S1 MMU...
35		 */
36		val = cxt->tcr = read_sysreg_el1(SYS_TCR);
37		val |= TCR_EPD1_MASK | TCR_EPD0_MASK;
38		write_sysreg_el1(val, SYS_TCR);
39		val = cxt->sctlr = read_sysreg_el1(SYS_SCTLR);
40		val |= SCTLR_ELx_M;
41		write_sysreg_el1(val, SYS_SCTLR);
42	}
43
44	/*
45	 * With VHE enabled, we have HCR_EL2.{E2H,TGE} = {1,1}, and
46	 * most TLB operations target EL2/EL0. In order to affect the
47	 * guest TLBs (EL1/EL0), we need to change one of these two
48	 * bits. Changing E2H is impossible (goodbye TTBR1_EL2), so
49	 * let's flip TGE before executing the TLB operation.
50	 *
51	 * ARM erratum 1165522 requires some special handling (again),
52	 * as we need to make sure both stages of translation are in
53	 * place before clearing TGE. __load_guest_stage2() already
54	 * has an ISB in order to deal with this.
55	 */
56	__load_guest_stage2(mmu);
57	val = read_sysreg(hcr_el2);
58	val &= ~HCR_TGE;
59	write_sysreg(val, hcr_el2);
60	isb();
61}
62
63static void __tlb_switch_to_host(struct tlb_inv_context *cxt)
64{
65	/*
66	 * We're done with the TLB operation, let's restore the host's
67	 * view of HCR_EL2.
68	 */
69	write_sysreg(0, vttbr_el2);
70	write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
71	isb();
72
73	if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
74		/* Restore the registers to what they were */
75		write_sysreg_el1(cxt->tcr, SYS_TCR);
76		write_sysreg_el1(cxt->sctlr, SYS_SCTLR);
77	}
78
79	local_irq_restore(cxt->flags);
80}
81
82void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
83			      phys_addr_t ipa, int level)
84{
85	struct tlb_inv_context cxt;
86
87	dsb(ishst);
88
89	/* Switch to requested VMID */
90	__tlb_switch_to_guest(mmu, &cxt);
91
92	/*
93	 * We could do so much better if we had the VA as well.
94	 * Instead, we invalidate Stage-2 for this IPA, and the
95	 * whole of Stage-1. Weep...
96	 */
97	ipa >>= 12;
98	__tlbi_level(ipas2e1is, ipa, level);
99
100	/*
101	 * We have to ensure completion of the invalidation at Stage-2,
102	 * since a table walk on another CPU could refill a TLB with a
103	 * complete (S1 + S2) walk based on the old Stage-2 mapping if
104	 * the Stage-1 invalidation happened first.
105	 */
106	dsb(ish);
107	__tlbi(vmalle1is);
108	dsb(ish);
109	isb();
110
111	__tlb_switch_to_host(&cxt);
112}
113
114void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
115{
116	struct tlb_inv_context cxt;
117
118	dsb(ishst);
119
120	/* Switch to requested VMID */
121	__tlb_switch_to_guest(mmu, &cxt);
122
123	__tlbi(vmalls12e1is);
124	dsb(ish);
125	isb();
126
127	__tlb_switch_to_host(&cxt);
128}
129
130void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu)
131{
132	struct tlb_inv_context cxt;
133
134	/* Switch to requested VMID */
135	__tlb_switch_to_guest(mmu, &cxt);
136
137	__tlbi(vmalle1);
138	asm volatile("ic iallu");
139	dsb(nsh);
140	isb();
141
142	__tlb_switch_to_host(&cxt);
143}
144
145void __kvm_flush_vm_context(void)
146{
147	dsb(ishst);
148	__tlbi(alle1is);
149
150	/*
151	 * VIPT and PIPT caches are not affected by VMID, so no maintenance
152	 * is necessary across a VMID rollover.
153	 *
154	 * VPIPT caches constrain lookup and maintenance to the active VMID,
155	 * so we need to invalidate lines with a stale VMID to avoid an ABA
156	 * race after multiple rollovers.
157	 *
158	 */
159	if (icache_is_vpipt())
160		asm volatile("ic ialluis");
161
162	dsb(ish);
163}
164