18c2ecf20Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0-only 28c2ecf20Sopenharmony_ci/* 38c2ecf20Sopenharmony_ci * Copyright (C) 2015 - ARM Ltd 48c2ecf20Sopenharmony_ci * Author: Marc Zyngier <marc.zyngier@arm.com> 58c2ecf20Sopenharmony_ci */ 68c2ecf20Sopenharmony_ci 78c2ecf20Sopenharmony_ci#include <asm/kvm_hyp.h> 88c2ecf20Sopenharmony_ci#include <asm/kvm_mmu.h> 98c2ecf20Sopenharmony_ci#include <asm/tlbflush.h> 108c2ecf20Sopenharmony_ci 118c2ecf20Sopenharmony_cistruct tlb_inv_context { 128c2ecf20Sopenharmony_ci u64 tcr; 138c2ecf20Sopenharmony_ci}; 148c2ecf20Sopenharmony_ci 158c2ecf20Sopenharmony_cistatic void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu, 168c2ecf20Sopenharmony_ci struct tlb_inv_context *cxt) 178c2ecf20Sopenharmony_ci{ 188c2ecf20Sopenharmony_ci if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) { 198c2ecf20Sopenharmony_ci u64 val; 208c2ecf20Sopenharmony_ci 218c2ecf20Sopenharmony_ci /* 228c2ecf20Sopenharmony_ci * For CPUs that are affected by ARM 1319367, we need to 238c2ecf20Sopenharmony_ci * avoid a host Stage-1 walk while we have the guest's 248c2ecf20Sopenharmony_ci * VMID set in the VTTBR in order to invalidate TLBs. 258c2ecf20Sopenharmony_ci * We're guaranteed that the S1 MMU is enabled, so we can 268c2ecf20Sopenharmony_ci * simply set the EPD bits to avoid any further TLB fill. 278c2ecf20Sopenharmony_ci */ 288c2ecf20Sopenharmony_ci val = cxt->tcr = read_sysreg_el1(SYS_TCR); 298c2ecf20Sopenharmony_ci val |= TCR_EPD1_MASK | TCR_EPD0_MASK; 308c2ecf20Sopenharmony_ci write_sysreg_el1(val, SYS_TCR); 318c2ecf20Sopenharmony_ci isb(); 328c2ecf20Sopenharmony_ci } 338c2ecf20Sopenharmony_ci 348c2ecf20Sopenharmony_ci /* 358c2ecf20Sopenharmony_ci * __load_guest_stage2() includes an ISB only when the AT 368c2ecf20Sopenharmony_ci * workaround is applied. Take care of the opposite condition, 378c2ecf20Sopenharmony_ci * ensuring that we always have an ISB, but not two ISBs back 388c2ecf20Sopenharmony_ci * to back. 398c2ecf20Sopenharmony_ci */ 408c2ecf20Sopenharmony_ci __load_guest_stage2(mmu); 418c2ecf20Sopenharmony_ci asm(ALTERNATIVE("isb", "nop", ARM64_WORKAROUND_SPECULATIVE_AT)); 428c2ecf20Sopenharmony_ci} 438c2ecf20Sopenharmony_ci 448c2ecf20Sopenharmony_cistatic void __tlb_switch_to_host(struct tlb_inv_context *cxt) 458c2ecf20Sopenharmony_ci{ 468c2ecf20Sopenharmony_ci write_sysreg(0, vttbr_el2); 478c2ecf20Sopenharmony_ci 488c2ecf20Sopenharmony_ci if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) { 498c2ecf20Sopenharmony_ci /* Ensure write of the host VMID */ 508c2ecf20Sopenharmony_ci isb(); 518c2ecf20Sopenharmony_ci /* Restore the host's TCR_EL1 */ 528c2ecf20Sopenharmony_ci write_sysreg_el1(cxt->tcr, SYS_TCR); 538c2ecf20Sopenharmony_ci } 548c2ecf20Sopenharmony_ci} 558c2ecf20Sopenharmony_ci 568c2ecf20Sopenharmony_civoid __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, 578c2ecf20Sopenharmony_ci phys_addr_t ipa, int level) 588c2ecf20Sopenharmony_ci{ 598c2ecf20Sopenharmony_ci struct tlb_inv_context cxt; 608c2ecf20Sopenharmony_ci 618c2ecf20Sopenharmony_ci dsb(ishst); 628c2ecf20Sopenharmony_ci 638c2ecf20Sopenharmony_ci /* Switch to requested VMID */ 648c2ecf20Sopenharmony_ci __tlb_switch_to_guest(mmu, &cxt); 658c2ecf20Sopenharmony_ci 668c2ecf20Sopenharmony_ci /* 678c2ecf20Sopenharmony_ci * We could do so much better if we had the VA as well. 688c2ecf20Sopenharmony_ci * Instead, we invalidate Stage-2 for this IPA, and the 698c2ecf20Sopenharmony_ci * whole of Stage-1. Weep... 708c2ecf20Sopenharmony_ci */ 718c2ecf20Sopenharmony_ci ipa >>= 12; 728c2ecf20Sopenharmony_ci __tlbi_level(ipas2e1is, ipa, level); 738c2ecf20Sopenharmony_ci 748c2ecf20Sopenharmony_ci /* 758c2ecf20Sopenharmony_ci * We have to ensure completion of the invalidation at Stage-2, 768c2ecf20Sopenharmony_ci * since a table walk on another CPU could refill a TLB with a 778c2ecf20Sopenharmony_ci * complete (S1 + S2) walk based on the old Stage-2 mapping if 788c2ecf20Sopenharmony_ci * the Stage-1 invalidation happened first. 798c2ecf20Sopenharmony_ci */ 808c2ecf20Sopenharmony_ci dsb(ish); 818c2ecf20Sopenharmony_ci __tlbi(vmalle1is); 828c2ecf20Sopenharmony_ci dsb(ish); 838c2ecf20Sopenharmony_ci isb(); 848c2ecf20Sopenharmony_ci 858c2ecf20Sopenharmony_ci /* 868c2ecf20Sopenharmony_ci * If the host is running at EL1 and we have a VPIPT I-cache, 878c2ecf20Sopenharmony_ci * then we must perform I-cache maintenance at EL2 in order for 888c2ecf20Sopenharmony_ci * it to have an effect on the guest. Since the guest cannot hit 898c2ecf20Sopenharmony_ci * I-cache lines allocated with a different VMID, we don't need 908c2ecf20Sopenharmony_ci * to worry about junk out of guest reset (we nuke the I-cache on 918c2ecf20Sopenharmony_ci * VMID rollover), but we do need to be careful when remapping 928c2ecf20Sopenharmony_ci * executable pages for the same guest. This can happen when KSM 938c2ecf20Sopenharmony_ci * takes a CoW fault on an executable page, copies the page into 948c2ecf20Sopenharmony_ci * a page that was previously mapped in the guest and then needs 958c2ecf20Sopenharmony_ci * to invalidate the guest view of the I-cache for that page 968c2ecf20Sopenharmony_ci * from EL1. To solve this, we invalidate the entire I-cache when 978c2ecf20Sopenharmony_ci * unmapping a page from a guest if we have a VPIPT I-cache but 988c2ecf20Sopenharmony_ci * the host is running at EL1. As above, we could do better if 998c2ecf20Sopenharmony_ci * we had the VA. 1008c2ecf20Sopenharmony_ci * 1018c2ecf20Sopenharmony_ci * The moral of this story is: if you have a VPIPT I-cache, then 1028c2ecf20Sopenharmony_ci * you should be running with VHE enabled. 1038c2ecf20Sopenharmony_ci */ 1048c2ecf20Sopenharmony_ci if (icache_is_vpipt()) 1058c2ecf20Sopenharmony_ci __flush_icache_all(); 1068c2ecf20Sopenharmony_ci 1078c2ecf20Sopenharmony_ci __tlb_switch_to_host(&cxt); 1088c2ecf20Sopenharmony_ci} 1098c2ecf20Sopenharmony_ci 1108c2ecf20Sopenharmony_civoid __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu) 1118c2ecf20Sopenharmony_ci{ 1128c2ecf20Sopenharmony_ci struct tlb_inv_context cxt; 1138c2ecf20Sopenharmony_ci 1148c2ecf20Sopenharmony_ci dsb(ishst); 1158c2ecf20Sopenharmony_ci 1168c2ecf20Sopenharmony_ci /* Switch to requested VMID */ 1178c2ecf20Sopenharmony_ci __tlb_switch_to_guest(mmu, &cxt); 1188c2ecf20Sopenharmony_ci 1198c2ecf20Sopenharmony_ci __tlbi(vmalls12e1is); 1208c2ecf20Sopenharmony_ci dsb(ish); 1218c2ecf20Sopenharmony_ci isb(); 1228c2ecf20Sopenharmony_ci 1238c2ecf20Sopenharmony_ci __tlb_switch_to_host(&cxt); 1248c2ecf20Sopenharmony_ci} 1258c2ecf20Sopenharmony_ci 1268c2ecf20Sopenharmony_civoid __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu) 1278c2ecf20Sopenharmony_ci{ 1288c2ecf20Sopenharmony_ci struct tlb_inv_context cxt; 1298c2ecf20Sopenharmony_ci 1308c2ecf20Sopenharmony_ci /* Switch to requested VMID */ 1318c2ecf20Sopenharmony_ci __tlb_switch_to_guest(mmu, &cxt); 1328c2ecf20Sopenharmony_ci 1338c2ecf20Sopenharmony_ci __tlbi(vmalle1); 1348c2ecf20Sopenharmony_ci asm volatile("ic iallu"); 1358c2ecf20Sopenharmony_ci dsb(nsh); 1368c2ecf20Sopenharmony_ci isb(); 1378c2ecf20Sopenharmony_ci 1388c2ecf20Sopenharmony_ci __tlb_switch_to_host(&cxt); 1398c2ecf20Sopenharmony_ci} 1408c2ecf20Sopenharmony_ci 1418c2ecf20Sopenharmony_civoid __kvm_flush_vm_context(void) 1428c2ecf20Sopenharmony_ci{ 1438c2ecf20Sopenharmony_ci dsb(ishst); 1448c2ecf20Sopenharmony_ci __tlbi(alle1is); 1458c2ecf20Sopenharmony_ci 1468c2ecf20Sopenharmony_ci /* 1478c2ecf20Sopenharmony_ci * VIPT and PIPT caches are not affected by VMID, so no maintenance 1488c2ecf20Sopenharmony_ci * is necessary across a VMID rollover. 1498c2ecf20Sopenharmony_ci * 1508c2ecf20Sopenharmony_ci * VPIPT caches constrain lookup and maintenance to the active VMID, 1518c2ecf20Sopenharmony_ci * so we need to invalidate lines with a stale VMID to avoid an ABA 1528c2ecf20Sopenharmony_ci * race after multiple rollovers. 1538c2ecf20Sopenharmony_ci * 1548c2ecf20Sopenharmony_ci */ 1558c2ecf20Sopenharmony_ci if (icache_is_vpipt()) 1568c2ecf20Sopenharmony_ci asm volatile("ic ialluis"); 1578c2ecf20Sopenharmony_ci 1588c2ecf20Sopenharmony_ci dsb(ish); 1598c2ecf20Sopenharmony_ci} 160