162306a36Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0-only
262306a36Sopenharmony_ci/*
362306a36Sopenharmony_ci * Copyright (C) 2015 - ARM Ltd
462306a36Sopenharmony_ci * Author: Marc Zyngier <marc.zyngier@arm.com>
562306a36Sopenharmony_ci */
662306a36Sopenharmony_ci
762306a36Sopenharmony_ci#include <asm/kvm_hyp.h>
862306a36Sopenharmony_ci#include <asm/kvm_mmu.h>
962306a36Sopenharmony_ci#include <asm/tlbflush.h>
1062306a36Sopenharmony_ci
1162306a36Sopenharmony_ci#include <nvhe/mem_protect.h>
1262306a36Sopenharmony_ci
1362306a36Sopenharmony_cistruct tlb_inv_context {
1462306a36Sopenharmony_ci	u64		tcr;
1562306a36Sopenharmony_ci};
1662306a36Sopenharmony_ci
1762306a36Sopenharmony_cistatic void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu,
1862306a36Sopenharmony_ci				  struct tlb_inv_context *cxt,
1962306a36Sopenharmony_ci				  bool nsh)
2062306a36Sopenharmony_ci{
2162306a36Sopenharmony_ci	/*
2262306a36Sopenharmony_ci	 * We have two requirements:
2362306a36Sopenharmony_ci	 *
2462306a36Sopenharmony_ci	 * - ensure that the page table updates are visible to all
2562306a36Sopenharmony_ci	 *   CPUs, for which a dsb(DOMAIN-st) is what we need, DOMAIN
2662306a36Sopenharmony_ci	 *   being either ish or nsh, depending on the invalidation
2762306a36Sopenharmony_ci	 *   type.
2862306a36Sopenharmony_ci	 *
2962306a36Sopenharmony_ci	 * - complete any speculative page table walk started before
3062306a36Sopenharmony_ci	 *   we trapped to EL2 so that we can mess with the MM
3162306a36Sopenharmony_ci	 *   registers out of context, for which dsb(nsh) is enough
3262306a36Sopenharmony_ci	 *
3362306a36Sopenharmony_ci	 * The composition of these two barriers is a dsb(DOMAIN), and
3462306a36Sopenharmony_ci	 * the 'nsh' parameter tracks the distinction between
3562306a36Sopenharmony_ci	 * Inner-Shareable and Non-Shareable, as specified by the
3662306a36Sopenharmony_ci	 * callers.
3762306a36Sopenharmony_ci	 */
3862306a36Sopenharmony_ci	if (nsh)
3962306a36Sopenharmony_ci		dsb(nsh);
4062306a36Sopenharmony_ci	else
4162306a36Sopenharmony_ci		dsb(ish);
4262306a36Sopenharmony_ci
4362306a36Sopenharmony_ci	if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
4462306a36Sopenharmony_ci		u64 val;
4562306a36Sopenharmony_ci
4662306a36Sopenharmony_ci		/*
4762306a36Sopenharmony_ci		 * For CPUs that are affected by ARM 1319367, we need to
4862306a36Sopenharmony_ci		 * avoid a host Stage-1 walk while we have the guest's
4962306a36Sopenharmony_ci		 * VMID set in the VTTBR in order to invalidate TLBs.
5062306a36Sopenharmony_ci		 * We're guaranteed that the S1 MMU is enabled, so we can
5162306a36Sopenharmony_ci		 * simply set the EPD bits to avoid any further TLB fill.
5262306a36Sopenharmony_ci		 */
5362306a36Sopenharmony_ci		val = cxt->tcr = read_sysreg_el1(SYS_TCR);
5462306a36Sopenharmony_ci		val |= TCR_EPD1_MASK | TCR_EPD0_MASK;
5562306a36Sopenharmony_ci		write_sysreg_el1(val, SYS_TCR);
5662306a36Sopenharmony_ci		isb();
5762306a36Sopenharmony_ci	}
5862306a36Sopenharmony_ci
5962306a36Sopenharmony_ci	/*
6062306a36Sopenharmony_ci	 * __load_stage2() includes an ISB only when the AT
6162306a36Sopenharmony_ci	 * workaround is applied. Take care of the opposite condition,
6262306a36Sopenharmony_ci	 * ensuring that we always have an ISB, but not two ISBs back
6362306a36Sopenharmony_ci	 * to back.
6462306a36Sopenharmony_ci	 */
6562306a36Sopenharmony_ci	__load_stage2(mmu, kern_hyp_va(mmu->arch));
6662306a36Sopenharmony_ci	asm(ALTERNATIVE("isb", "nop", ARM64_WORKAROUND_SPECULATIVE_AT));
6762306a36Sopenharmony_ci}
6862306a36Sopenharmony_ci
6962306a36Sopenharmony_cistatic void __tlb_switch_to_host(struct tlb_inv_context *cxt)
7062306a36Sopenharmony_ci{
7162306a36Sopenharmony_ci	__load_host_stage2();
7262306a36Sopenharmony_ci
7362306a36Sopenharmony_ci	if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
7462306a36Sopenharmony_ci		/* Ensure write of the host VMID */
7562306a36Sopenharmony_ci		isb();
7662306a36Sopenharmony_ci		/* Restore the host's TCR_EL1 */
7762306a36Sopenharmony_ci		write_sysreg_el1(cxt->tcr, SYS_TCR);
7862306a36Sopenharmony_ci	}
7962306a36Sopenharmony_ci}
8062306a36Sopenharmony_ci
8162306a36Sopenharmony_civoid __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
8262306a36Sopenharmony_ci			      phys_addr_t ipa, int level)
8362306a36Sopenharmony_ci{
8462306a36Sopenharmony_ci	struct tlb_inv_context cxt;
8562306a36Sopenharmony_ci
8662306a36Sopenharmony_ci	/* Switch to requested VMID */
8762306a36Sopenharmony_ci	__tlb_switch_to_guest(mmu, &cxt, false);
8862306a36Sopenharmony_ci
8962306a36Sopenharmony_ci	/*
9062306a36Sopenharmony_ci	 * We could do so much better if we had the VA as well.
9162306a36Sopenharmony_ci	 * Instead, we invalidate Stage-2 for this IPA, and the
9262306a36Sopenharmony_ci	 * whole of Stage-1. Weep...
9362306a36Sopenharmony_ci	 */
9462306a36Sopenharmony_ci	ipa >>= 12;
9562306a36Sopenharmony_ci	__tlbi_level(ipas2e1is, ipa, level);
9662306a36Sopenharmony_ci
9762306a36Sopenharmony_ci	/*
9862306a36Sopenharmony_ci	 * We have to ensure completion of the invalidation at Stage-2,
9962306a36Sopenharmony_ci	 * since a table walk on another CPU could refill a TLB with a
10062306a36Sopenharmony_ci	 * complete (S1 + S2) walk based on the old Stage-2 mapping if
10162306a36Sopenharmony_ci	 * the Stage-1 invalidation happened first.
10262306a36Sopenharmony_ci	 */
10362306a36Sopenharmony_ci	dsb(ish);
10462306a36Sopenharmony_ci	__tlbi(vmalle1is);
10562306a36Sopenharmony_ci	dsb(ish);
10662306a36Sopenharmony_ci	isb();
10762306a36Sopenharmony_ci
10862306a36Sopenharmony_ci	/*
10962306a36Sopenharmony_ci	 * If the host is running at EL1 and we have a VPIPT I-cache,
11062306a36Sopenharmony_ci	 * then we must perform I-cache maintenance at EL2 in order for
11162306a36Sopenharmony_ci	 * it to have an effect on the guest. Since the guest cannot hit
11262306a36Sopenharmony_ci	 * I-cache lines allocated with a different VMID, we don't need
11362306a36Sopenharmony_ci	 * to worry about junk out of guest reset (we nuke the I-cache on
11462306a36Sopenharmony_ci	 * VMID rollover), but we do need to be careful when remapping
11562306a36Sopenharmony_ci	 * executable pages for the same guest. This can happen when KSM
11662306a36Sopenharmony_ci	 * takes a CoW fault on an executable page, copies the page into
11762306a36Sopenharmony_ci	 * a page that was previously mapped in the guest and then needs
11862306a36Sopenharmony_ci	 * to invalidate the guest view of the I-cache for that page
11962306a36Sopenharmony_ci	 * from EL1. To solve this, we invalidate the entire I-cache when
12062306a36Sopenharmony_ci	 * unmapping a page from a guest if we have a VPIPT I-cache but
12162306a36Sopenharmony_ci	 * the host is running at EL1. As above, we could do better if
12262306a36Sopenharmony_ci	 * we had the VA.
12362306a36Sopenharmony_ci	 *
12462306a36Sopenharmony_ci	 * The moral of this story is: if you have a VPIPT I-cache, then
12562306a36Sopenharmony_ci	 * you should be running with VHE enabled.
12662306a36Sopenharmony_ci	 */
12762306a36Sopenharmony_ci	if (icache_is_vpipt())
12862306a36Sopenharmony_ci		icache_inval_all_pou();
12962306a36Sopenharmony_ci
13062306a36Sopenharmony_ci	__tlb_switch_to_host(&cxt);
13162306a36Sopenharmony_ci}
13262306a36Sopenharmony_ci
13362306a36Sopenharmony_civoid __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu,
13462306a36Sopenharmony_ci				  phys_addr_t ipa, int level)
13562306a36Sopenharmony_ci{
13662306a36Sopenharmony_ci	struct tlb_inv_context cxt;
13762306a36Sopenharmony_ci
13862306a36Sopenharmony_ci	/* Switch to requested VMID */
13962306a36Sopenharmony_ci	__tlb_switch_to_guest(mmu, &cxt, true);
14062306a36Sopenharmony_ci
14162306a36Sopenharmony_ci	/*
14262306a36Sopenharmony_ci	 * We could do so much better if we had the VA as well.
14362306a36Sopenharmony_ci	 * Instead, we invalidate Stage-2 for this IPA, and the
14462306a36Sopenharmony_ci	 * whole of Stage-1. Weep...
14562306a36Sopenharmony_ci	 */
14662306a36Sopenharmony_ci	ipa >>= 12;
14762306a36Sopenharmony_ci	__tlbi_level(ipas2e1, ipa, level);
14862306a36Sopenharmony_ci
14962306a36Sopenharmony_ci	/*
15062306a36Sopenharmony_ci	 * We have to ensure completion of the invalidation at Stage-2,
15162306a36Sopenharmony_ci	 * since a table walk on another CPU could refill a TLB with a
15262306a36Sopenharmony_ci	 * complete (S1 + S2) walk based on the old Stage-2 mapping if
15362306a36Sopenharmony_ci	 * the Stage-1 invalidation happened first.
15462306a36Sopenharmony_ci	 */
15562306a36Sopenharmony_ci	dsb(nsh);
15662306a36Sopenharmony_ci	__tlbi(vmalle1);
15762306a36Sopenharmony_ci	dsb(nsh);
15862306a36Sopenharmony_ci	isb();
15962306a36Sopenharmony_ci
16062306a36Sopenharmony_ci	/*
16162306a36Sopenharmony_ci	 * If the host is running at EL1 and we have a VPIPT I-cache,
16262306a36Sopenharmony_ci	 * then we must perform I-cache maintenance at EL2 in order for
16362306a36Sopenharmony_ci	 * it to have an effect on the guest. Since the guest cannot hit
16462306a36Sopenharmony_ci	 * I-cache lines allocated with a different VMID, we don't need
16562306a36Sopenharmony_ci	 * to worry about junk out of guest reset (we nuke the I-cache on
16662306a36Sopenharmony_ci	 * VMID rollover), but we do need to be careful when remapping
16762306a36Sopenharmony_ci	 * executable pages for the same guest. This can happen when KSM
16862306a36Sopenharmony_ci	 * takes a CoW fault on an executable page, copies the page into
16962306a36Sopenharmony_ci	 * a page that was previously mapped in the guest and then needs
17062306a36Sopenharmony_ci	 * to invalidate the guest view of the I-cache for that page
17162306a36Sopenharmony_ci	 * from EL1. To solve this, we invalidate the entire I-cache when
17262306a36Sopenharmony_ci	 * unmapping a page from a guest if we have a VPIPT I-cache but
17362306a36Sopenharmony_ci	 * the host is running at EL1. As above, we could do better if
17462306a36Sopenharmony_ci	 * we had the VA.
17562306a36Sopenharmony_ci	 *
17662306a36Sopenharmony_ci	 * The moral of this story is: if you have a VPIPT I-cache, then
17762306a36Sopenharmony_ci	 * you should be running with VHE enabled.
17862306a36Sopenharmony_ci	 */
17962306a36Sopenharmony_ci	if (icache_is_vpipt())
18062306a36Sopenharmony_ci		icache_inval_all_pou();
18162306a36Sopenharmony_ci
18262306a36Sopenharmony_ci	__tlb_switch_to_host(&cxt);
18362306a36Sopenharmony_ci}
18462306a36Sopenharmony_ci
18562306a36Sopenharmony_civoid __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
18662306a36Sopenharmony_ci				phys_addr_t start, unsigned long pages)
18762306a36Sopenharmony_ci{
18862306a36Sopenharmony_ci	struct tlb_inv_context cxt;
18962306a36Sopenharmony_ci	unsigned long stride;
19062306a36Sopenharmony_ci
19162306a36Sopenharmony_ci	/*
19262306a36Sopenharmony_ci	 * Since the range of addresses may not be mapped at
19362306a36Sopenharmony_ci	 * the same level, assume the worst case as PAGE_SIZE
19462306a36Sopenharmony_ci	 */
19562306a36Sopenharmony_ci	stride = PAGE_SIZE;
19662306a36Sopenharmony_ci	start = round_down(start, stride);
19762306a36Sopenharmony_ci
19862306a36Sopenharmony_ci	/* Switch to requested VMID */
19962306a36Sopenharmony_ci	__tlb_switch_to_guest(mmu, &cxt, false);
20062306a36Sopenharmony_ci
20162306a36Sopenharmony_ci	__flush_s2_tlb_range_op(ipas2e1is, start, pages, stride, 0);
20262306a36Sopenharmony_ci
20362306a36Sopenharmony_ci	dsb(ish);
20462306a36Sopenharmony_ci	__tlbi(vmalle1is);
20562306a36Sopenharmony_ci	dsb(ish);
20662306a36Sopenharmony_ci	isb();
20762306a36Sopenharmony_ci
20862306a36Sopenharmony_ci	/* See the comment in __kvm_tlb_flush_vmid_ipa() */
20962306a36Sopenharmony_ci	if (icache_is_vpipt())
21062306a36Sopenharmony_ci		icache_inval_all_pou();
21162306a36Sopenharmony_ci
21262306a36Sopenharmony_ci	__tlb_switch_to_host(&cxt);
21362306a36Sopenharmony_ci}
21462306a36Sopenharmony_ci
21562306a36Sopenharmony_civoid __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
21662306a36Sopenharmony_ci{
21762306a36Sopenharmony_ci	struct tlb_inv_context cxt;
21862306a36Sopenharmony_ci
21962306a36Sopenharmony_ci	/* Switch to requested VMID */
22062306a36Sopenharmony_ci	__tlb_switch_to_guest(mmu, &cxt, false);
22162306a36Sopenharmony_ci
22262306a36Sopenharmony_ci	__tlbi(vmalls12e1is);
22362306a36Sopenharmony_ci	dsb(ish);
22462306a36Sopenharmony_ci	isb();
22562306a36Sopenharmony_ci
22662306a36Sopenharmony_ci	__tlb_switch_to_host(&cxt);
22762306a36Sopenharmony_ci}
22862306a36Sopenharmony_ci
22962306a36Sopenharmony_civoid __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu)
23062306a36Sopenharmony_ci{
23162306a36Sopenharmony_ci	struct tlb_inv_context cxt;
23262306a36Sopenharmony_ci
23362306a36Sopenharmony_ci	/* Switch to requested VMID */
23462306a36Sopenharmony_ci	__tlb_switch_to_guest(mmu, &cxt, false);
23562306a36Sopenharmony_ci
23662306a36Sopenharmony_ci	__tlbi(vmalle1);
23762306a36Sopenharmony_ci	asm volatile("ic iallu");
23862306a36Sopenharmony_ci	dsb(nsh);
23962306a36Sopenharmony_ci	isb();
24062306a36Sopenharmony_ci
24162306a36Sopenharmony_ci	__tlb_switch_to_host(&cxt);
24262306a36Sopenharmony_ci}
24362306a36Sopenharmony_ci
24462306a36Sopenharmony_civoid __kvm_flush_vm_context(void)
24562306a36Sopenharmony_ci{
24662306a36Sopenharmony_ci	/* Same remark as in __tlb_switch_to_guest() */
24762306a36Sopenharmony_ci	dsb(ish);
24862306a36Sopenharmony_ci	__tlbi(alle1is);
24962306a36Sopenharmony_ci
25062306a36Sopenharmony_ci	/*
25162306a36Sopenharmony_ci	 * VIPT and PIPT caches are not affected by VMID, so no maintenance
25262306a36Sopenharmony_ci	 * is necessary across a VMID rollover.
25362306a36Sopenharmony_ci	 *
25462306a36Sopenharmony_ci	 * VPIPT caches constrain lookup and maintenance to the active VMID,
25562306a36Sopenharmony_ci	 * so we need to invalidate lines with a stale VMID to avoid an ABA
25662306a36Sopenharmony_ci	 * race after multiple rollovers.
25762306a36Sopenharmony_ci	 *
25862306a36Sopenharmony_ci	 */
25962306a36Sopenharmony_ci	if (icache_is_vpipt())
26062306a36Sopenharmony_ci		asm volatile("ic ialluis");
26162306a36Sopenharmony_ci
26262306a36Sopenharmony_ci	dsb(ish);
26362306a36Sopenharmony_ci}
264