18c2ecf20Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0-only 28c2ecf20Sopenharmony_ci/* 38c2ecf20Sopenharmony_ci * Kernel-based Virtual Machine driver for Linux 48c2ecf20Sopenharmony_ci * 58c2ecf20Sopenharmony_ci * This module enables machines with Intel VT-x extensions to run virtual 68c2ecf20Sopenharmony_ci * machines without emulation or binary translation. 78c2ecf20Sopenharmony_ci * 88c2ecf20Sopenharmony_ci * MMU support 98c2ecf20Sopenharmony_ci * 108c2ecf20Sopenharmony_ci * Copyright (C) 2006 Qumranet, Inc. 118c2ecf20Sopenharmony_ci * Copyright 2010 Red Hat, Inc. and/or its affiliates. 128c2ecf20Sopenharmony_ci * 138c2ecf20Sopenharmony_ci * Authors: 148c2ecf20Sopenharmony_ci * Yaniv Kamay <yaniv@qumranet.com> 158c2ecf20Sopenharmony_ci * Avi Kivity <avi@qumranet.com> 168c2ecf20Sopenharmony_ci */ 178c2ecf20Sopenharmony_ci 188c2ecf20Sopenharmony_ci#include "irq.h" 198c2ecf20Sopenharmony_ci#include "ioapic.h" 208c2ecf20Sopenharmony_ci#include "mmu.h" 218c2ecf20Sopenharmony_ci#include "mmu_internal.h" 228c2ecf20Sopenharmony_ci#include "tdp_mmu.h" 238c2ecf20Sopenharmony_ci#include "x86.h" 248c2ecf20Sopenharmony_ci#include "kvm_cache_regs.h" 258c2ecf20Sopenharmony_ci#include "kvm_emulate.h" 268c2ecf20Sopenharmony_ci#include "cpuid.h" 278c2ecf20Sopenharmony_ci#include "spte.h" 288c2ecf20Sopenharmony_ci 298c2ecf20Sopenharmony_ci#include <linux/kvm_host.h> 308c2ecf20Sopenharmony_ci#include <linux/types.h> 318c2ecf20Sopenharmony_ci#include <linux/string.h> 328c2ecf20Sopenharmony_ci#include <linux/mm.h> 338c2ecf20Sopenharmony_ci#include <linux/highmem.h> 348c2ecf20Sopenharmony_ci#include <linux/moduleparam.h> 358c2ecf20Sopenharmony_ci#include <linux/export.h> 368c2ecf20Sopenharmony_ci#include <linux/swap.h> 378c2ecf20Sopenharmony_ci#include <linux/hugetlb.h> 388c2ecf20Sopenharmony_ci#include <linux/compiler.h> 398c2ecf20Sopenharmony_ci#include <linux/srcu.h> 408c2ecf20Sopenharmony_ci#include <linux/slab.h> 418c2ecf20Sopenharmony_ci#include <linux/sched/signal.h> 428c2ecf20Sopenharmony_ci#include <linux/uaccess.h> 438c2ecf20Sopenharmony_ci#include <linux/hash.h> 448c2ecf20Sopenharmony_ci#include <linux/kern_levels.h> 458c2ecf20Sopenharmony_ci#include <linux/kthread.h> 468c2ecf20Sopenharmony_ci 478c2ecf20Sopenharmony_ci#include <asm/page.h> 488c2ecf20Sopenharmony_ci#include <asm/memtype.h> 498c2ecf20Sopenharmony_ci#include <asm/cmpxchg.h> 508c2ecf20Sopenharmony_ci#include <asm/io.h> 518c2ecf20Sopenharmony_ci#include <asm/vmx.h> 528c2ecf20Sopenharmony_ci#include <asm/kvm_page_track.h> 538c2ecf20Sopenharmony_ci#include "trace.h" 548c2ecf20Sopenharmony_ci 558c2ecf20Sopenharmony_ci#include "paging.h" 568c2ecf20Sopenharmony_ci 578c2ecf20Sopenharmony_ciextern bool itlb_multihit_kvm_mitigation; 588c2ecf20Sopenharmony_ci 598c2ecf20Sopenharmony_cistatic int __read_mostly nx_huge_pages = -1; 608c2ecf20Sopenharmony_ci#ifdef CONFIG_PREEMPT_RT 618c2ecf20Sopenharmony_ci/* Recovery can cause latency spikes, disable it for PREEMPT_RT. */ 628c2ecf20Sopenharmony_cistatic uint __read_mostly nx_huge_pages_recovery_ratio = 0; 638c2ecf20Sopenharmony_ci#else 648c2ecf20Sopenharmony_cistatic uint __read_mostly nx_huge_pages_recovery_ratio = 60; 658c2ecf20Sopenharmony_ci#endif 668c2ecf20Sopenharmony_ci 678c2ecf20Sopenharmony_cistatic int set_nx_huge_pages(const char *val, const struct kernel_param *kp); 688c2ecf20Sopenharmony_cistatic int set_nx_huge_pages_recovery_ratio(const char *val, const struct kernel_param *kp); 698c2ecf20Sopenharmony_ci 708c2ecf20Sopenharmony_cistatic const struct kernel_param_ops nx_huge_pages_ops = { 718c2ecf20Sopenharmony_ci .set = set_nx_huge_pages, 728c2ecf20Sopenharmony_ci .get = param_get_bool, 738c2ecf20Sopenharmony_ci}; 748c2ecf20Sopenharmony_ci 758c2ecf20Sopenharmony_cistatic const struct kernel_param_ops nx_huge_pages_recovery_ratio_ops = { 768c2ecf20Sopenharmony_ci .set = set_nx_huge_pages_recovery_ratio, 778c2ecf20Sopenharmony_ci .get = param_get_uint, 788c2ecf20Sopenharmony_ci}; 798c2ecf20Sopenharmony_ci 808c2ecf20Sopenharmony_cimodule_param_cb(nx_huge_pages, &nx_huge_pages_ops, &nx_huge_pages, 0644); 818c2ecf20Sopenharmony_ci__MODULE_PARM_TYPE(nx_huge_pages, "bool"); 828c2ecf20Sopenharmony_cimodule_param_cb(nx_huge_pages_recovery_ratio, &nx_huge_pages_recovery_ratio_ops, 838c2ecf20Sopenharmony_ci &nx_huge_pages_recovery_ratio, 0644); 848c2ecf20Sopenharmony_ci__MODULE_PARM_TYPE(nx_huge_pages_recovery_ratio, "uint"); 858c2ecf20Sopenharmony_ci 868c2ecf20Sopenharmony_cistatic bool __read_mostly force_flush_and_sync_on_reuse; 878c2ecf20Sopenharmony_cimodule_param_named(flush_on_reuse, force_flush_and_sync_on_reuse, bool, 0644); 888c2ecf20Sopenharmony_ci 898c2ecf20Sopenharmony_ci/* 908c2ecf20Sopenharmony_ci * When setting this variable to true it enables Two-Dimensional-Paging 918c2ecf20Sopenharmony_ci * where the hardware walks 2 page tables: 928c2ecf20Sopenharmony_ci * 1. the guest-virtual to guest-physical 938c2ecf20Sopenharmony_ci * 2. while doing 1. it walks guest-physical to host-physical 948c2ecf20Sopenharmony_ci * If the hardware supports that we don't need to do shadow paging. 958c2ecf20Sopenharmony_ci */ 968c2ecf20Sopenharmony_cibool tdp_enabled = false; 978c2ecf20Sopenharmony_ci 988c2ecf20Sopenharmony_cistatic int max_huge_page_level __read_mostly; 998c2ecf20Sopenharmony_cistatic int max_tdp_level __read_mostly; 1008c2ecf20Sopenharmony_ci 1018c2ecf20Sopenharmony_cienum { 1028c2ecf20Sopenharmony_ci AUDIT_PRE_PAGE_FAULT, 1038c2ecf20Sopenharmony_ci AUDIT_POST_PAGE_FAULT, 1048c2ecf20Sopenharmony_ci AUDIT_PRE_PTE_WRITE, 1058c2ecf20Sopenharmony_ci AUDIT_POST_PTE_WRITE, 1068c2ecf20Sopenharmony_ci AUDIT_PRE_SYNC, 1078c2ecf20Sopenharmony_ci AUDIT_POST_SYNC 1088c2ecf20Sopenharmony_ci}; 1098c2ecf20Sopenharmony_ci 1108c2ecf20Sopenharmony_ci#ifdef MMU_DEBUG 1118c2ecf20Sopenharmony_cibool dbg = 0; 1128c2ecf20Sopenharmony_cimodule_param(dbg, bool, 0644); 1138c2ecf20Sopenharmony_ci#endif 1148c2ecf20Sopenharmony_ci 1158c2ecf20Sopenharmony_ci#define PTE_PREFETCH_NUM 8 1168c2ecf20Sopenharmony_ci 1178c2ecf20Sopenharmony_ci#define PT32_LEVEL_BITS 10 1188c2ecf20Sopenharmony_ci 1198c2ecf20Sopenharmony_ci#define PT32_LEVEL_SHIFT(level) \ 1208c2ecf20Sopenharmony_ci (PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS) 1218c2ecf20Sopenharmony_ci 1228c2ecf20Sopenharmony_ci#define PT32_LVL_OFFSET_MASK(level) \ 1238c2ecf20Sopenharmony_ci (PT32_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \ 1248c2ecf20Sopenharmony_ci * PT32_LEVEL_BITS))) - 1)) 1258c2ecf20Sopenharmony_ci 1268c2ecf20Sopenharmony_ci#define PT32_INDEX(address, level)\ 1278c2ecf20Sopenharmony_ci (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1)) 1288c2ecf20Sopenharmony_ci 1298c2ecf20Sopenharmony_ci 1308c2ecf20Sopenharmony_ci#define PT32_BASE_ADDR_MASK PAGE_MASK 1318c2ecf20Sopenharmony_ci#define PT32_DIR_BASE_ADDR_MASK \ 1328c2ecf20Sopenharmony_ci (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1)) 1338c2ecf20Sopenharmony_ci#define PT32_LVL_ADDR_MASK(level) \ 1348c2ecf20Sopenharmony_ci (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \ 1358c2ecf20Sopenharmony_ci * PT32_LEVEL_BITS))) - 1)) 1368c2ecf20Sopenharmony_ci 1378c2ecf20Sopenharmony_ci#include <trace/events/kvm.h> 1388c2ecf20Sopenharmony_ci 1398c2ecf20Sopenharmony_ci/* make pte_list_desc fit well in cache line */ 1408c2ecf20Sopenharmony_ci#define PTE_LIST_EXT 3 1418c2ecf20Sopenharmony_ci 1428c2ecf20Sopenharmony_cistruct pte_list_desc { 1438c2ecf20Sopenharmony_ci u64 *sptes[PTE_LIST_EXT]; 1448c2ecf20Sopenharmony_ci struct pte_list_desc *more; 1458c2ecf20Sopenharmony_ci}; 1468c2ecf20Sopenharmony_ci 1478c2ecf20Sopenharmony_cistruct kvm_shadow_walk_iterator { 1488c2ecf20Sopenharmony_ci u64 addr; 1498c2ecf20Sopenharmony_ci hpa_t shadow_addr; 1508c2ecf20Sopenharmony_ci u64 *sptep; 1518c2ecf20Sopenharmony_ci int level; 1528c2ecf20Sopenharmony_ci unsigned index; 1538c2ecf20Sopenharmony_ci}; 1548c2ecf20Sopenharmony_ci 1558c2ecf20Sopenharmony_ci#define for_each_shadow_entry_using_root(_vcpu, _root, _addr, _walker) \ 1568c2ecf20Sopenharmony_ci for (shadow_walk_init_using_root(&(_walker), (_vcpu), \ 1578c2ecf20Sopenharmony_ci (_root), (_addr)); \ 1588c2ecf20Sopenharmony_ci shadow_walk_okay(&(_walker)); \ 1598c2ecf20Sopenharmony_ci shadow_walk_next(&(_walker))) 1608c2ecf20Sopenharmony_ci 1618c2ecf20Sopenharmony_ci#define for_each_shadow_entry(_vcpu, _addr, _walker) \ 1628c2ecf20Sopenharmony_ci for (shadow_walk_init(&(_walker), _vcpu, _addr); \ 1638c2ecf20Sopenharmony_ci shadow_walk_okay(&(_walker)); \ 1648c2ecf20Sopenharmony_ci shadow_walk_next(&(_walker))) 1658c2ecf20Sopenharmony_ci 1668c2ecf20Sopenharmony_ci#define for_each_shadow_entry_lockless(_vcpu, _addr, _walker, spte) \ 1678c2ecf20Sopenharmony_ci for (shadow_walk_init(&(_walker), _vcpu, _addr); \ 1688c2ecf20Sopenharmony_ci shadow_walk_okay(&(_walker)) && \ 1698c2ecf20Sopenharmony_ci ({ spte = mmu_spte_get_lockless(_walker.sptep); 1; }); \ 1708c2ecf20Sopenharmony_ci __shadow_walk_next(&(_walker), spte)) 1718c2ecf20Sopenharmony_ci 1728c2ecf20Sopenharmony_cistatic struct kmem_cache *pte_list_desc_cache; 1738c2ecf20Sopenharmony_cistruct kmem_cache *mmu_page_header_cache; 1748c2ecf20Sopenharmony_cistatic struct percpu_counter kvm_total_used_mmu_pages; 1758c2ecf20Sopenharmony_ci 1768c2ecf20Sopenharmony_cistatic void mmu_spte_set(u64 *sptep, u64 spte); 1778c2ecf20Sopenharmony_cistatic union kvm_mmu_page_role 1788c2ecf20Sopenharmony_cikvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu); 1798c2ecf20Sopenharmony_ci 1808c2ecf20Sopenharmony_ci#define CREATE_TRACE_POINTS 1818c2ecf20Sopenharmony_ci#include "mmutrace.h" 1828c2ecf20Sopenharmony_ci 1838c2ecf20Sopenharmony_ci 1848c2ecf20Sopenharmony_cistatic inline bool kvm_available_flush_tlb_with_range(void) 1858c2ecf20Sopenharmony_ci{ 1868c2ecf20Sopenharmony_ci return kvm_x86_ops.tlb_remote_flush_with_range; 1878c2ecf20Sopenharmony_ci} 1888c2ecf20Sopenharmony_ci 1898c2ecf20Sopenharmony_cistatic void kvm_flush_remote_tlbs_with_range(struct kvm *kvm, 1908c2ecf20Sopenharmony_ci struct kvm_tlb_range *range) 1918c2ecf20Sopenharmony_ci{ 1928c2ecf20Sopenharmony_ci int ret = -ENOTSUPP; 1938c2ecf20Sopenharmony_ci 1948c2ecf20Sopenharmony_ci if (range && kvm_x86_ops.tlb_remote_flush_with_range) 1958c2ecf20Sopenharmony_ci ret = kvm_x86_ops.tlb_remote_flush_with_range(kvm, range); 1968c2ecf20Sopenharmony_ci 1978c2ecf20Sopenharmony_ci if (ret) 1988c2ecf20Sopenharmony_ci kvm_flush_remote_tlbs(kvm); 1998c2ecf20Sopenharmony_ci} 2008c2ecf20Sopenharmony_ci 2018c2ecf20Sopenharmony_civoid kvm_flush_remote_tlbs_with_address(struct kvm *kvm, 2028c2ecf20Sopenharmony_ci u64 start_gfn, u64 pages) 2038c2ecf20Sopenharmony_ci{ 2048c2ecf20Sopenharmony_ci struct kvm_tlb_range range; 2058c2ecf20Sopenharmony_ci 2068c2ecf20Sopenharmony_ci range.start_gfn = start_gfn; 2078c2ecf20Sopenharmony_ci range.pages = pages; 2088c2ecf20Sopenharmony_ci 2098c2ecf20Sopenharmony_ci kvm_flush_remote_tlbs_with_range(kvm, &range); 2108c2ecf20Sopenharmony_ci} 2118c2ecf20Sopenharmony_ci 2128c2ecf20Sopenharmony_cibool is_nx_huge_page_enabled(void) 2138c2ecf20Sopenharmony_ci{ 2148c2ecf20Sopenharmony_ci return READ_ONCE(nx_huge_pages); 2158c2ecf20Sopenharmony_ci} 2168c2ecf20Sopenharmony_ci 2178c2ecf20Sopenharmony_cistatic void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn, 2188c2ecf20Sopenharmony_ci unsigned int access) 2198c2ecf20Sopenharmony_ci{ 2208c2ecf20Sopenharmony_ci u64 mask = make_mmio_spte(vcpu, gfn, access); 2218c2ecf20Sopenharmony_ci 2228c2ecf20Sopenharmony_ci trace_mark_mmio_spte(sptep, gfn, mask); 2238c2ecf20Sopenharmony_ci mmu_spte_set(sptep, mask); 2248c2ecf20Sopenharmony_ci} 2258c2ecf20Sopenharmony_ci 2268c2ecf20Sopenharmony_cistatic gfn_t get_mmio_spte_gfn(u64 spte) 2278c2ecf20Sopenharmony_ci{ 2288c2ecf20Sopenharmony_ci u64 gpa = spte & shadow_nonpresent_or_rsvd_lower_gfn_mask; 2298c2ecf20Sopenharmony_ci 2308c2ecf20Sopenharmony_ci gpa |= (spte >> SHADOW_NONPRESENT_OR_RSVD_MASK_LEN) 2318c2ecf20Sopenharmony_ci & shadow_nonpresent_or_rsvd_mask; 2328c2ecf20Sopenharmony_ci 2338c2ecf20Sopenharmony_ci return gpa >> PAGE_SHIFT; 2348c2ecf20Sopenharmony_ci} 2358c2ecf20Sopenharmony_ci 2368c2ecf20Sopenharmony_cistatic unsigned get_mmio_spte_access(u64 spte) 2378c2ecf20Sopenharmony_ci{ 2388c2ecf20Sopenharmony_ci return spte & shadow_mmio_access_mask; 2398c2ecf20Sopenharmony_ci} 2408c2ecf20Sopenharmony_ci 2418c2ecf20Sopenharmony_cistatic bool set_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn, 2428c2ecf20Sopenharmony_ci kvm_pfn_t pfn, unsigned int access) 2438c2ecf20Sopenharmony_ci{ 2448c2ecf20Sopenharmony_ci if (unlikely(is_noslot_pfn(pfn))) { 2458c2ecf20Sopenharmony_ci mark_mmio_spte(vcpu, sptep, gfn, access); 2468c2ecf20Sopenharmony_ci return true; 2478c2ecf20Sopenharmony_ci } 2488c2ecf20Sopenharmony_ci 2498c2ecf20Sopenharmony_ci return false; 2508c2ecf20Sopenharmony_ci} 2518c2ecf20Sopenharmony_ci 2528c2ecf20Sopenharmony_cistatic bool check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte) 2538c2ecf20Sopenharmony_ci{ 2548c2ecf20Sopenharmony_ci u64 kvm_gen, spte_gen, gen; 2558c2ecf20Sopenharmony_ci 2568c2ecf20Sopenharmony_ci gen = kvm_vcpu_memslots(vcpu)->generation; 2578c2ecf20Sopenharmony_ci if (unlikely(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS)) 2588c2ecf20Sopenharmony_ci return false; 2598c2ecf20Sopenharmony_ci 2608c2ecf20Sopenharmony_ci kvm_gen = gen & MMIO_SPTE_GEN_MASK; 2618c2ecf20Sopenharmony_ci spte_gen = get_mmio_spte_generation(spte); 2628c2ecf20Sopenharmony_ci 2638c2ecf20Sopenharmony_ci trace_check_mmio_spte(spte, kvm_gen, spte_gen); 2648c2ecf20Sopenharmony_ci return likely(kvm_gen == spte_gen); 2658c2ecf20Sopenharmony_ci} 2668c2ecf20Sopenharmony_ci 2678c2ecf20Sopenharmony_cistatic gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access, 2688c2ecf20Sopenharmony_ci struct x86_exception *exception) 2698c2ecf20Sopenharmony_ci{ 2708c2ecf20Sopenharmony_ci return gpa; 2718c2ecf20Sopenharmony_ci} 2728c2ecf20Sopenharmony_ci 2738c2ecf20Sopenharmony_cistatic int is_cpuid_PSE36(void) 2748c2ecf20Sopenharmony_ci{ 2758c2ecf20Sopenharmony_ci return 1; 2768c2ecf20Sopenharmony_ci} 2778c2ecf20Sopenharmony_ci 2788c2ecf20Sopenharmony_cistatic int is_nx(struct kvm_vcpu *vcpu) 2798c2ecf20Sopenharmony_ci{ 2808c2ecf20Sopenharmony_ci return vcpu->arch.efer & EFER_NX; 2818c2ecf20Sopenharmony_ci} 2828c2ecf20Sopenharmony_ci 2838c2ecf20Sopenharmony_cistatic gfn_t pse36_gfn_delta(u32 gpte) 2848c2ecf20Sopenharmony_ci{ 2858c2ecf20Sopenharmony_ci int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT; 2868c2ecf20Sopenharmony_ci 2878c2ecf20Sopenharmony_ci return (gpte & PT32_DIR_PSE36_MASK) << shift; 2888c2ecf20Sopenharmony_ci} 2898c2ecf20Sopenharmony_ci 2908c2ecf20Sopenharmony_ci#ifdef CONFIG_X86_64 2918c2ecf20Sopenharmony_cistatic void __set_spte(u64 *sptep, u64 spte) 2928c2ecf20Sopenharmony_ci{ 2938c2ecf20Sopenharmony_ci WRITE_ONCE(*sptep, spte); 2948c2ecf20Sopenharmony_ci} 2958c2ecf20Sopenharmony_ci 2968c2ecf20Sopenharmony_cistatic void __update_clear_spte_fast(u64 *sptep, u64 spte) 2978c2ecf20Sopenharmony_ci{ 2988c2ecf20Sopenharmony_ci WRITE_ONCE(*sptep, spte); 2998c2ecf20Sopenharmony_ci} 3008c2ecf20Sopenharmony_ci 3018c2ecf20Sopenharmony_cistatic u64 __update_clear_spte_slow(u64 *sptep, u64 spte) 3028c2ecf20Sopenharmony_ci{ 3038c2ecf20Sopenharmony_ci return xchg(sptep, spte); 3048c2ecf20Sopenharmony_ci} 3058c2ecf20Sopenharmony_ci 3068c2ecf20Sopenharmony_cistatic u64 __get_spte_lockless(u64 *sptep) 3078c2ecf20Sopenharmony_ci{ 3088c2ecf20Sopenharmony_ci return READ_ONCE(*sptep); 3098c2ecf20Sopenharmony_ci} 3108c2ecf20Sopenharmony_ci#else 3118c2ecf20Sopenharmony_ciunion split_spte { 3128c2ecf20Sopenharmony_ci struct { 3138c2ecf20Sopenharmony_ci u32 spte_low; 3148c2ecf20Sopenharmony_ci u32 spte_high; 3158c2ecf20Sopenharmony_ci }; 3168c2ecf20Sopenharmony_ci u64 spte; 3178c2ecf20Sopenharmony_ci}; 3188c2ecf20Sopenharmony_ci 3198c2ecf20Sopenharmony_cistatic void count_spte_clear(u64 *sptep, u64 spte) 3208c2ecf20Sopenharmony_ci{ 3218c2ecf20Sopenharmony_ci struct kvm_mmu_page *sp = sptep_to_sp(sptep); 3228c2ecf20Sopenharmony_ci 3238c2ecf20Sopenharmony_ci if (is_shadow_present_pte(spte)) 3248c2ecf20Sopenharmony_ci return; 3258c2ecf20Sopenharmony_ci 3268c2ecf20Sopenharmony_ci /* Ensure the spte is completely set before we increase the count */ 3278c2ecf20Sopenharmony_ci smp_wmb(); 3288c2ecf20Sopenharmony_ci sp->clear_spte_count++; 3298c2ecf20Sopenharmony_ci} 3308c2ecf20Sopenharmony_ci 3318c2ecf20Sopenharmony_cistatic void __set_spte(u64 *sptep, u64 spte) 3328c2ecf20Sopenharmony_ci{ 3338c2ecf20Sopenharmony_ci union split_spte *ssptep, sspte; 3348c2ecf20Sopenharmony_ci 3358c2ecf20Sopenharmony_ci ssptep = (union split_spte *)sptep; 3368c2ecf20Sopenharmony_ci sspte = (union split_spte)spte; 3378c2ecf20Sopenharmony_ci 3388c2ecf20Sopenharmony_ci ssptep->spte_high = sspte.spte_high; 3398c2ecf20Sopenharmony_ci 3408c2ecf20Sopenharmony_ci /* 3418c2ecf20Sopenharmony_ci * If we map the spte from nonpresent to present, We should store 3428c2ecf20Sopenharmony_ci * the high bits firstly, then set present bit, so cpu can not 3438c2ecf20Sopenharmony_ci * fetch this spte while we are setting the spte. 3448c2ecf20Sopenharmony_ci */ 3458c2ecf20Sopenharmony_ci smp_wmb(); 3468c2ecf20Sopenharmony_ci 3478c2ecf20Sopenharmony_ci WRITE_ONCE(ssptep->spte_low, sspte.spte_low); 3488c2ecf20Sopenharmony_ci} 3498c2ecf20Sopenharmony_ci 3508c2ecf20Sopenharmony_cistatic void __update_clear_spte_fast(u64 *sptep, u64 spte) 3518c2ecf20Sopenharmony_ci{ 3528c2ecf20Sopenharmony_ci union split_spte *ssptep, sspte; 3538c2ecf20Sopenharmony_ci 3548c2ecf20Sopenharmony_ci ssptep = (union split_spte *)sptep; 3558c2ecf20Sopenharmony_ci sspte = (union split_spte)spte; 3568c2ecf20Sopenharmony_ci 3578c2ecf20Sopenharmony_ci WRITE_ONCE(ssptep->spte_low, sspte.spte_low); 3588c2ecf20Sopenharmony_ci 3598c2ecf20Sopenharmony_ci /* 3608c2ecf20Sopenharmony_ci * If we map the spte from present to nonpresent, we should clear 3618c2ecf20Sopenharmony_ci * present bit firstly to avoid vcpu fetch the old high bits. 3628c2ecf20Sopenharmony_ci */ 3638c2ecf20Sopenharmony_ci smp_wmb(); 3648c2ecf20Sopenharmony_ci 3658c2ecf20Sopenharmony_ci ssptep->spte_high = sspte.spte_high; 3668c2ecf20Sopenharmony_ci count_spte_clear(sptep, spte); 3678c2ecf20Sopenharmony_ci} 3688c2ecf20Sopenharmony_ci 3698c2ecf20Sopenharmony_cistatic u64 __update_clear_spte_slow(u64 *sptep, u64 spte) 3708c2ecf20Sopenharmony_ci{ 3718c2ecf20Sopenharmony_ci union split_spte *ssptep, sspte, orig; 3728c2ecf20Sopenharmony_ci 3738c2ecf20Sopenharmony_ci ssptep = (union split_spte *)sptep; 3748c2ecf20Sopenharmony_ci sspte = (union split_spte)spte; 3758c2ecf20Sopenharmony_ci 3768c2ecf20Sopenharmony_ci /* xchg acts as a barrier before the setting of the high bits */ 3778c2ecf20Sopenharmony_ci orig.spte_low = xchg(&ssptep->spte_low, sspte.spte_low); 3788c2ecf20Sopenharmony_ci orig.spte_high = ssptep->spte_high; 3798c2ecf20Sopenharmony_ci ssptep->spte_high = sspte.spte_high; 3808c2ecf20Sopenharmony_ci count_spte_clear(sptep, spte); 3818c2ecf20Sopenharmony_ci 3828c2ecf20Sopenharmony_ci return orig.spte; 3838c2ecf20Sopenharmony_ci} 3848c2ecf20Sopenharmony_ci 3858c2ecf20Sopenharmony_ci/* 3868c2ecf20Sopenharmony_ci * The idea using the light way get the spte on x86_32 guest is from 3878c2ecf20Sopenharmony_ci * gup_get_pte (mm/gup.c). 3888c2ecf20Sopenharmony_ci * 3898c2ecf20Sopenharmony_ci * An spte tlb flush may be pending, because kvm_set_pte_rmapp 3908c2ecf20Sopenharmony_ci * coalesces them and we are running out of the MMU lock. Therefore 3918c2ecf20Sopenharmony_ci * we need to protect against in-progress updates of the spte. 3928c2ecf20Sopenharmony_ci * 3938c2ecf20Sopenharmony_ci * Reading the spte while an update is in progress may get the old value 3948c2ecf20Sopenharmony_ci * for the high part of the spte. The race is fine for a present->non-present 3958c2ecf20Sopenharmony_ci * change (because the high part of the spte is ignored for non-present spte), 3968c2ecf20Sopenharmony_ci * but for a present->present change we must reread the spte. 3978c2ecf20Sopenharmony_ci * 3988c2ecf20Sopenharmony_ci * All such changes are done in two steps (present->non-present and 3998c2ecf20Sopenharmony_ci * non-present->present), hence it is enough to count the number of 4008c2ecf20Sopenharmony_ci * present->non-present updates: if it changed while reading the spte, 4018c2ecf20Sopenharmony_ci * we might have hit the race. This is done using clear_spte_count. 4028c2ecf20Sopenharmony_ci */ 4038c2ecf20Sopenharmony_cistatic u64 __get_spte_lockless(u64 *sptep) 4048c2ecf20Sopenharmony_ci{ 4058c2ecf20Sopenharmony_ci struct kvm_mmu_page *sp = sptep_to_sp(sptep); 4068c2ecf20Sopenharmony_ci union split_spte spte, *orig = (union split_spte *)sptep; 4078c2ecf20Sopenharmony_ci int count; 4088c2ecf20Sopenharmony_ci 4098c2ecf20Sopenharmony_ciretry: 4108c2ecf20Sopenharmony_ci count = sp->clear_spte_count; 4118c2ecf20Sopenharmony_ci smp_rmb(); 4128c2ecf20Sopenharmony_ci 4138c2ecf20Sopenharmony_ci spte.spte_low = orig->spte_low; 4148c2ecf20Sopenharmony_ci smp_rmb(); 4158c2ecf20Sopenharmony_ci 4168c2ecf20Sopenharmony_ci spte.spte_high = orig->spte_high; 4178c2ecf20Sopenharmony_ci smp_rmb(); 4188c2ecf20Sopenharmony_ci 4198c2ecf20Sopenharmony_ci if (unlikely(spte.spte_low != orig->spte_low || 4208c2ecf20Sopenharmony_ci count != sp->clear_spte_count)) 4218c2ecf20Sopenharmony_ci goto retry; 4228c2ecf20Sopenharmony_ci 4238c2ecf20Sopenharmony_ci return spte.spte; 4248c2ecf20Sopenharmony_ci} 4258c2ecf20Sopenharmony_ci#endif 4268c2ecf20Sopenharmony_ci 4278c2ecf20Sopenharmony_cistatic bool spte_has_volatile_bits(u64 spte) 4288c2ecf20Sopenharmony_ci{ 4298c2ecf20Sopenharmony_ci if (!is_shadow_present_pte(spte)) 4308c2ecf20Sopenharmony_ci return false; 4318c2ecf20Sopenharmony_ci 4328c2ecf20Sopenharmony_ci /* 4338c2ecf20Sopenharmony_ci * Always atomically update spte if it can be updated 4348c2ecf20Sopenharmony_ci * out of mmu-lock, it can ensure dirty bit is not lost, 4358c2ecf20Sopenharmony_ci * also, it can help us to get a stable is_writable_pte() 4368c2ecf20Sopenharmony_ci * to ensure tlb flush is not missed. 4378c2ecf20Sopenharmony_ci */ 4388c2ecf20Sopenharmony_ci if (spte_can_locklessly_be_made_writable(spte) || 4398c2ecf20Sopenharmony_ci is_access_track_spte(spte)) 4408c2ecf20Sopenharmony_ci return true; 4418c2ecf20Sopenharmony_ci 4428c2ecf20Sopenharmony_ci if (spte_ad_enabled(spte)) { 4438c2ecf20Sopenharmony_ci if ((spte & shadow_accessed_mask) == 0 || 4448c2ecf20Sopenharmony_ci (is_writable_pte(spte) && (spte & shadow_dirty_mask) == 0)) 4458c2ecf20Sopenharmony_ci return true; 4468c2ecf20Sopenharmony_ci } 4478c2ecf20Sopenharmony_ci 4488c2ecf20Sopenharmony_ci return false; 4498c2ecf20Sopenharmony_ci} 4508c2ecf20Sopenharmony_ci 4518c2ecf20Sopenharmony_ci/* Rules for using mmu_spte_set: 4528c2ecf20Sopenharmony_ci * Set the sptep from nonpresent to present. 4538c2ecf20Sopenharmony_ci * Note: the sptep being assigned *must* be either not present 4548c2ecf20Sopenharmony_ci * or in a state where the hardware will not attempt to update 4558c2ecf20Sopenharmony_ci * the spte. 4568c2ecf20Sopenharmony_ci */ 4578c2ecf20Sopenharmony_cistatic void mmu_spte_set(u64 *sptep, u64 new_spte) 4588c2ecf20Sopenharmony_ci{ 4598c2ecf20Sopenharmony_ci WARN_ON(is_shadow_present_pte(*sptep)); 4608c2ecf20Sopenharmony_ci __set_spte(sptep, new_spte); 4618c2ecf20Sopenharmony_ci} 4628c2ecf20Sopenharmony_ci 4638c2ecf20Sopenharmony_ci/* 4648c2ecf20Sopenharmony_ci * Update the SPTE (excluding the PFN), but do not track changes in its 4658c2ecf20Sopenharmony_ci * accessed/dirty status. 4668c2ecf20Sopenharmony_ci */ 4678c2ecf20Sopenharmony_cistatic u64 mmu_spte_update_no_track(u64 *sptep, u64 new_spte) 4688c2ecf20Sopenharmony_ci{ 4698c2ecf20Sopenharmony_ci u64 old_spte = *sptep; 4708c2ecf20Sopenharmony_ci 4718c2ecf20Sopenharmony_ci WARN_ON(!is_shadow_present_pte(new_spte)); 4728c2ecf20Sopenharmony_ci 4738c2ecf20Sopenharmony_ci if (!is_shadow_present_pte(old_spte)) { 4748c2ecf20Sopenharmony_ci mmu_spte_set(sptep, new_spte); 4758c2ecf20Sopenharmony_ci return old_spte; 4768c2ecf20Sopenharmony_ci } 4778c2ecf20Sopenharmony_ci 4788c2ecf20Sopenharmony_ci if (!spte_has_volatile_bits(old_spte)) 4798c2ecf20Sopenharmony_ci __update_clear_spte_fast(sptep, new_spte); 4808c2ecf20Sopenharmony_ci else 4818c2ecf20Sopenharmony_ci old_spte = __update_clear_spte_slow(sptep, new_spte); 4828c2ecf20Sopenharmony_ci 4838c2ecf20Sopenharmony_ci WARN_ON(spte_to_pfn(old_spte) != spte_to_pfn(new_spte)); 4848c2ecf20Sopenharmony_ci 4858c2ecf20Sopenharmony_ci return old_spte; 4868c2ecf20Sopenharmony_ci} 4878c2ecf20Sopenharmony_ci 4888c2ecf20Sopenharmony_ci/* Rules for using mmu_spte_update: 4898c2ecf20Sopenharmony_ci * Update the state bits, it means the mapped pfn is not changed. 4908c2ecf20Sopenharmony_ci * 4918c2ecf20Sopenharmony_ci * Whenever we overwrite a writable spte with a read-only one we 4928c2ecf20Sopenharmony_ci * should flush remote TLBs. Otherwise rmap_write_protect 4938c2ecf20Sopenharmony_ci * will find a read-only spte, even though the writable spte 4948c2ecf20Sopenharmony_ci * might be cached on a CPU's TLB, the return value indicates this 4958c2ecf20Sopenharmony_ci * case. 4968c2ecf20Sopenharmony_ci * 4978c2ecf20Sopenharmony_ci * Returns true if the TLB needs to be flushed 4988c2ecf20Sopenharmony_ci */ 4998c2ecf20Sopenharmony_cistatic bool mmu_spte_update(u64 *sptep, u64 new_spte) 5008c2ecf20Sopenharmony_ci{ 5018c2ecf20Sopenharmony_ci bool flush = false; 5028c2ecf20Sopenharmony_ci u64 old_spte = mmu_spte_update_no_track(sptep, new_spte); 5038c2ecf20Sopenharmony_ci 5048c2ecf20Sopenharmony_ci if (!is_shadow_present_pte(old_spte)) 5058c2ecf20Sopenharmony_ci return false; 5068c2ecf20Sopenharmony_ci 5078c2ecf20Sopenharmony_ci /* 5088c2ecf20Sopenharmony_ci * For the spte updated out of mmu-lock is safe, since 5098c2ecf20Sopenharmony_ci * we always atomically update it, see the comments in 5108c2ecf20Sopenharmony_ci * spte_has_volatile_bits(). 5118c2ecf20Sopenharmony_ci */ 5128c2ecf20Sopenharmony_ci if (spte_can_locklessly_be_made_writable(old_spte) && 5138c2ecf20Sopenharmony_ci !is_writable_pte(new_spte)) 5148c2ecf20Sopenharmony_ci flush = true; 5158c2ecf20Sopenharmony_ci 5168c2ecf20Sopenharmony_ci /* 5178c2ecf20Sopenharmony_ci * Flush TLB when accessed/dirty states are changed in the page tables, 5188c2ecf20Sopenharmony_ci * to guarantee consistency between TLB and page tables. 5198c2ecf20Sopenharmony_ci */ 5208c2ecf20Sopenharmony_ci 5218c2ecf20Sopenharmony_ci if (is_accessed_spte(old_spte) && !is_accessed_spte(new_spte)) { 5228c2ecf20Sopenharmony_ci flush = true; 5238c2ecf20Sopenharmony_ci kvm_set_pfn_accessed(spte_to_pfn(old_spte)); 5248c2ecf20Sopenharmony_ci } 5258c2ecf20Sopenharmony_ci 5268c2ecf20Sopenharmony_ci if (is_dirty_spte(old_spte) && !is_dirty_spte(new_spte)) { 5278c2ecf20Sopenharmony_ci flush = true; 5288c2ecf20Sopenharmony_ci kvm_set_pfn_dirty(spte_to_pfn(old_spte)); 5298c2ecf20Sopenharmony_ci } 5308c2ecf20Sopenharmony_ci 5318c2ecf20Sopenharmony_ci return flush; 5328c2ecf20Sopenharmony_ci} 5338c2ecf20Sopenharmony_ci 5348c2ecf20Sopenharmony_ci/* 5358c2ecf20Sopenharmony_ci * Rules for using mmu_spte_clear_track_bits: 5368c2ecf20Sopenharmony_ci * It sets the sptep from present to nonpresent, and track the 5378c2ecf20Sopenharmony_ci * state bits, it is used to clear the last level sptep. 5388c2ecf20Sopenharmony_ci * Returns non-zero if the PTE was previously valid. 5398c2ecf20Sopenharmony_ci */ 5408c2ecf20Sopenharmony_cistatic int mmu_spte_clear_track_bits(u64 *sptep) 5418c2ecf20Sopenharmony_ci{ 5428c2ecf20Sopenharmony_ci kvm_pfn_t pfn; 5438c2ecf20Sopenharmony_ci u64 old_spte = *sptep; 5448c2ecf20Sopenharmony_ci 5458c2ecf20Sopenharmony_ci if (!spte_has_volatile_bits(old_spte)) 5468c2ecf20Sopenharmony_ci __update_clear_spte_fast(sptep, 0ull); 5478c2ecf20Sopenharmony_ci else 5488c2ecf20Sopenharmony_ci old_spte = __update_clear_spte_slow(sptep, 0ull); 5498c2ecf20Sopenharmony_ci 5508c2ecf20Sopenharmony_ci if (!is_shadow_present_pte(old_spte)) 5518c2ecf20Sopenharmony_ci return 0; 5528c2ecf20Sopenharmony_ci 5538c2ecf20Sopenharmony_ci pfn = spte_to_pfn(old_spte); 5548c2ecf20Sopenharmony_ci 5558c2ecf20Sopenharmony_ci /* 5568c2ecf20Sopenharmony_ci * KVM does not hold the refcount of the page used by 5578c2ecf20Sopenharmony_ci * kvm mmu, before reclaiming the page, we should 5588c2ecf20Sopenharmony_ci * unmap it from mmu first. 5598c2ecf20Sopenharmony_ci */ 5608c2ecf20Sopenharmony_ci WARN_ON(!kvm_is_reserved_pfn(pfn) && !page_count(pfn_to_page(pfn))); 5618c2ecf20Sopenharmony_ci 5628c2ecf20Sopenharmony_ci if (is_accessed_spte(old_spte)) 5638c2ecf20Sopenharmony_ci kvm_set_pfn_accessed(pfn); 5648c2ecf20Sopenharmony_ci 5658c2ecf20Sopenharmony_ci if (is_dirty_spte(old_spte)) 5668c2ecf20Sopenharmony_ci kvm_set_pfn_dirty(pfn); 5678c2ecf20Sopenharmony_ci 5688c2ecf20Sopenharmony_ci return 1; 5698c2ecf20Sopenharmony_ci} 5708c2ecf20Sopenharmony_ci 5718c2ecf20Sopenharmony_ci/* 5728c2ecf20Sopenharmony_ci * Rules for using mmu_spte_clear_no_track: 5738c2ecf20Sopenharmony_ci * Directly clear spte without caring the state bits of sptep, 5748c2ecf20Sopenharmony_ci * it is used to set the upper level spte. 5758c2ecf20Sopenharmony_ci */ 5768c2ecf20Sopenharmony_cistatic void mmu_spte_clear_no_track(u64 *sptep) 5778c2ecf20Sopenharmony_ci{ 5788c2ecf20Sopenharmony_ci __update_clear_spte_fast(sptep, 0ull); 5798c2ecf20Sopenharmony_ci} 5808c2ecf20Sopenharmony_ci 5818c2ecf20Sopenharmony_cistatic u64 mmu_spte_get_lockless(u64 *sptep) 5828c2ecf20Sopenharmony_ci{ 5838c2ecf20Sopenharmony_ci return __get_spte_lockless(sptep); 5848c2ecf20Sopenharmony_ci} 5858c2ecf20Sopenharmony_ci 5868c2ecf20Sopenharmony_ci/* Restore an acc-track PTE back to a regular PTE */ 5878c2ecf20Sopenharmony_cistatic u64 restore_acc_track_spte(u64 spte) 5888c2ecf20Sopenharmony_ci{ 5898c2ecf20Sopenharmony_ci u64 new_spte = spte; 5908c2ecf20Sopenharmony_ci u64 saved_bits = (spte >> SHADOW_ACC_TRACK_SAVED_BITS_SHIFT) 5918c2ecf20Sopenharmony_ci & SHADOW_ACC_TRACK_SAVED_BITS_MASK; 5928c2ecf20Sopenharmony_ci 5938c2ecf20Sopenharmony_ci WARN_ON_ONCE(spte_ad_enabled(spte)); 5948c2ecf20Sopenharmony_ci WARN_ON_ONCE(!is_access_track_spte(spte)); 5958c2ecf20Sopenharmony_ci 5968c2ecf20Sopenharmony_ci new_spte &= ~shadow_acc_track_mask; 5978c2ecf20Sopenharmony_ci new_spte &= ~(SHADOW_ACC_TRACK_SAVED_BITS_MASK << 5988c2ecf20Sopenharmony_ci SHADOW_ACC_TRACK_SAVED_BITS_SHIFT); 5998c2ecf20Sopenharmony_ci new_spte |= saved_bits; 6008c2ecf20Sopenharmony_ci 6018c2ecf20Sopenharmony_ci return new_spte; 6028c2ecf20Sopenharmony_ci} 6038c2ecf20Sopenharmony_ci 6048c2ecf20Sopenharmony_ci/* Returns the Accessed status of the PTE and resets it at the same time. */ 6058c2ecf20Sopenharmony_cistatic bool mmu_spte_age(u64 *sptep) 6068c2ecf20Sopenharmony_ci{ 6078c2ecf20Sopenharmony_ci u64 spte = mmu_spte_get_lockless(sptep); 6088c2ecf20Sopenharmony_ci 6098c2ecf20Sopenharmony_ci if (!is_accessed_spte(spte)) 6108c2ecf20Sopenharmony_ci return false; 6118c2ecf20Sopenharmony_ci 6128c2ecf20Sopenharmony_ci if (spte_ad_enabled(spte)) { 6138c2ecf20Sopenharmony_ci clear_bit((ffs(shadow_accessed_mask) - 1), 6148c2ecf20Sopenharmony_ci (unsigned long *)sptep); 6158c2ecf20Sopenharmony_ci } else { 6168c2ecf20Sopenharmony_ci /* 6178c2ecf20Sopenharmony_ci * Capture the dirty status of the page, so that it doesn't get 6188c2ecf20Sopenharmony_ci * lost when the SPTE is marked for access tracking. 6198c2ecf20Sopenharmony_ci */ 6208c2ecf20Sopenharmony_ci if (is_writable_pte(spte)) 6218c2ecf20Sopenharmony_ci kvm_set_pfn_dirty(spte_to_pfn(spte)); 6228c2ecf20Sopenharmony_ci 6238c2ecf20Sopenharmony_ci spte = mark_spte_for_access_track(spte); 6248c2ecf20Sopenharmony_ci mmu_spte_update_no_track(sptep, spte); 6258c2ecf20Sopenharmony_ci } 6268c2ecf20Sopenharmony_ci 6278c2ecf20Sopenharmony_ci return true; 6288c2ecf20Sopenharmony_ci} 6298c2ecf20Sopenharmony_ci 6308c2ecf20Sopenharmony_cistatic void walk_shadow_page_lockless_begin(struct kvm_vcpu *vcpu) 6318c2ecf20Sopenharmony_ci{ 6328c2ecf20Sopenharmony_ci /* 6338c2ecf20Sopenharmony_ci * Prevent page table teardown by making any free-er wait during 6348c2ecf20Sopenharmony_ci * kvm_flush_remote_tlbs() IPI to all active vcpus. 6358c2ecf20Sopenharmony_ci */ 6368c2ecf20Sopenharmony_ci local_irq_disable(); 6378c2ecf20Sopenharmony_ci 6388c2ecf20Sopenharmony_ci /* 6398c2ecf20Sopenharmony_ci * Make sure a following spte read is not reordered ahead of the write 6408c2ecf20Sopenharmony_ci * to vcpu->mode. 6418c2ecf20Sopenharmony_ci */ 6428c2ecf20Sopenharmony_ci smp_store_mb(vcpu->mode, READING_SHADOW_PAGE_TABLES); 6438c2ecf20Sopenharmony_ci} 6448c2ecf20Sopenharmony_ci 6458c2ecf20Sopenharmony_cistatic void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu) 6468c2ecf20Sopenharmony_ci{ 6478c2ecf20Sopenharmony_ci /* 6488c2ecf20Sopenharmony_ci * Make sure the write to vcpu->mode is not reordered in front of 6498c2ecf20Sopenharmony_ci * reads to sptes. If it does, kvm_mmu_commit_zap_page() can see us 6508c2ecf20Sopenharmony_ci * OUTSIDE_GUEST_MODE and proceed to free the shadow page table. 6518c2ecf20Sopenharmony_ci */ 6528c2ecf20Sopenharmony_ci smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE); 6538c2ecf20Sopenharmony_ci local_irq_enable(); 6548c2ecf20Sopenharmony_ci} 6558c2ecf20Sopenharmony_ci 6568c2ecf20Sopenharmony_cistatic int mmu_topup_memory_caches(struct kvm_vcpu *vcpu, bool maybe_indirect) 6578c2ecf20Sopenharmony_ci{ 6588c2ecf20Sopenharmony_ci int r; 6598c2ecf20Sopenharmony_ci 6608c2ecf20Sopenharmony_ci /* 1 rmap, 1 parent PTE per level, and the prefetched rmaps. */ 6618c2ecf20Sopenharmony_ci r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache, 6628c2ecf20Sopenharmony_ci 1 + PT64_ROOT_MAX_LEVEL + PTE_PREFETCH_NUM); 6638c2ecf20Sopenharmony_ci if (r) 6648c2ecf20Sopenharmony_ci return r; 6658c2ecf20Sopenharmony_ci r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_shadow_page_cache, 6668c2ecf20Sopenharmony_ci PT64_ROOT_MAX_LEVEL); 6678c2ecf20Sopenharmony_ci if (r) 6688c2ecf20Sopenharmony_ci return r; 6698c2ecf20Sopenharmony_ci if (maybe_indirect) { 6708c2ecf20Sopenharmony_ci r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_gfn_array_cache, 6718c2ecf20Sopenharmony_ci PT64_ROOT_MAX_LEVEL); 6728c2ecf20Sopenharmony_ci if (r) 6738c2ecf20Sopenharmony_ci return r; 6748c2ecf20Sopenharmony_ci } 6758c2ecf20Sopenharmony_ci return kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache, 6768c2ecf20Sopenharmony_ci PT64_ROOT_MAX_LEVEL); 6778c2ecf20Sopenharmony_ci} 6788c2ecf20Sopenharmony_ci 6798c2ecf20Sopenharmony_cistatic void mmu_free_memory_caches(struct kvm_vcpu *vcpu) 6808c2ecf20Sopenharmony_ci{ 6818c2ecf20Sopenharmony_ci kvm_mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache); 6828c2ecf20Sopenharmony_ci kvm_mmu_free_memory_cache(&vcpu->arch.mmu_shadow_page_cache); 6838c2ecf20Sopenharmony_ci kvm_mmu_free_memory_cache(&vcpu->arch.mmu_gfn_array_cache); 6848c2ecf20Sopenharmony_ci kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache); 6858c2ecf20Sopenharmony_ci} 6868c2ecf20Sopenharmony_ci 6878c2ecf20Sopenharmony_cistatic struct pte_list_desc *mmu_alloc_pte_list_desc(struct kvm_vcpu *vcpu) 6888c2ecf20Sopenharmony_ci{ 6898c2ecf20Sopenharmony_ci return kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_list_desc_cache); 6908c2ecf20Sopenharmony_ci} 6918c2ecf20Sopenharmony_ci 6928c2ecf20Sopenharmony_cistatic void mmu_free_pte_list_desc(struct pte_list_desc *pte_list_desc) 6938c2ecf20Sopenharmony_ci{ 6948c2ecf20Sopenharmony_ci kmem_cache_free(pte_list_desc_cache, pte_list_desc); 6958c2ecf20Sopenharmony_ci} 6968c2ecf20Sopenharmony_ci 6978c2ecf20Sopenharmony_cistatic gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index) 6988c2ecf20Sopenharmony_ci{ 6998c2ecf20Sopenharmony_ci if (!sp->role.direct) 7008c2ecf20Sopenharmony_ci return sp->gfns[index]; 7018c2ecf20Sopenharmony_ci 7028c2ecf20Sopenharmony_ci return sp->gfn + (index << ((sp->role.level - 1) * PT64_LEVEL_BITS)); 7038c2ecf20Sopenharmony_ci} 7048c2ecf20Sopenharmony_ci 7058c2ecf20Sopenharmony_cistatic void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn) 7068c2ecf20Sopenharmony_ci{ 7078c2ecf20Sopenharmony_ci if (!sp->role.direct) { 7088c2ecf20Sopenharmony_ci sp->gfns[index] = gfn; 7098c2ecf20Sopenharmony_ci return; 7108c2ecf20Sopenharmony_ci } 7118c2ecf20Sopenharmony_ci 7128c2ecf20Sopenharmony_ci if (WARN_ON(gfn != kvm_mmu_page_get_gfn(sp, index))) 7138c2ecf20Sopenharmony_ci pr_err_ratelimited("gfn mismatch under direct page %llx " 7148c2ecf20Sopenharmony_ci "(expected %llx, got %llx)\n", 7158c2ecf20Sopenharmony_ci sp->gfn, 7168c2ecf20Sopenharmony_ci kvm_mmu_page_get_gfn(sp, index), gfn); 7178c2ecf20Sopenharmony_ci} 7188c2ecf20Sopenharmony_ci 7198c2ecf20Sopenharmony_ci/* 7208c2ecf20Sopenharmony_ci * Return the pointer to the large page information for a given gfn, 7218c2ecf20Sopenharmony_ci * handling slots that are not large page aligned. 7228c2ecf20Sopenharmony_ci */ 7238c2ecf20Sopenharmony_cistatic struct kvm_lpage_info *lpage_info_slot(gfn_t gfn, 7248c2ecf20Sopenharmony_ci struct kvm_memory_slot *slot, 7258c2ecf20Sopenharmony_ci int level) 7268c2ecf20Sopenharmony_ci{ 7278c2ecf20Sopenharmony_ci unsigned long idx; 7288c2ecf20Sopenharmony_ci 7298c2ecf20Sopenharmony_ci idx = gfn_to_index(gfn, slot->base_gfn, level); 7308c2ecf20Sopenharmony_ci return &slot->arch.lpage_info[level - 2][idx]; 7318c2ecf20Sopenharmony_ci} 7328c2ecf20Sopenharmony_ci 7338c2ecf20Sopenharmony_cistatic void update_gfn_disallow_lpage_count(struct kvm_memory_slot *slot, 7348c2ecf20Sopenharmony_ci gfn_t gfn, int count) 7358c2ecf20Sopenharmony_ci{ 7368c2ecf20Sopenharmony_ci struct kvm_lpage_info *linfo; 7378c2ecf20Sopenharmony_ci int i; 7388c2ecf20Sopenharmony_ci 7398c2ecf20Sopenharmony_ci for (i = PG_LEVEL_2M; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) { 7408c2ecf20Sopenharmony_ci linfo = lpage_info_slot(gfn, slot, i); 7418c2ecf20Sopenharmony_ci linfo->disallow_lpage += count; 7428c2ecf20Sopenharmony_ci WARN_ON(linfo->disallow_lpage < 0); 7438c2ecf20Sopenharmony_ci } 7448c2ecf20Sopenharmony_ci} 7458c2ecf20Sopenharmony_ci 7468c2ecf20Sopenharmony_civoid kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn) 7478c2ecf20Sopenharmony_ci{ 7488c2ecf20Sopenharmony_ci update_gfn_disallow_lpage_count(slot, gfn, 1); 7498c2ecf20Sopenharmony_ci} 7508c2ecf20Sopenharmony_ci 7518c2ecf20Sopenharmony_civoid kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn) 7528c2ecf20Sopenharmony_ci{ 7538c2ecf20Sopenharmony_ci update_gfn_disallow_lpage_count(slot, gfn, -1); 7548c2ecf20Sopenharmony_ci} 7558c2ecf20Sopenharmony_ci 7568c2ecf20Sopenharmony_cistatic void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp) 7578c2ecf20Sopenharmony_ci{ 7588c2ecf20Sopenharmony_ci struct kvm_memslots *slots; 7598c2ecf20Sopenharmony_ci struct kvm_memory_slot *slot; 7608c2ecf20Sopenharmony_ci gfn_t gfn; 7618c2ecf20Sopenharmony_ci 7628c2ecf20Sopenharmony_ci kvm->arch.indirect_shadow_pages++; 7638c2ecf20Sopenharmony_ci gfn = sp->gfn; 7648c2ecf20Sopenharmony_ci slots = kvm_memslots_for_spte_role(kvm, sp->role); 7658c2ecf20Sopenharmony_ci slot = __gfn_to_memslot(slots, gfn); 7668c2ecf20Sopenharmony_ci 7678c2ecf20Sopenharmony_ci /* the non-leaf shadow pages are keeping readonly. */ 7688c2ecf20Sopenharmony_ci if (sp->role.level > PG_LEVEL_4K) 7698c2ecf20Sopenharmony_ci return kvm_slot_page_track_add_page(kvm, slot, gfn, 7708c2ecf20Sopenharmony_ci KVM_PAGE_TRACK_WRITE); 7718c2ecf20Sopenharmony_ci 7728c2ecf20Sopenharmony_ci kvm_mmu_gfn_disallow_lpage(slot, gfn); 7738c2ecf20Sopenharmony_ci} 7748c2ecf20Sopenharmony_ci 7758c2ecf20Sopenharmony_civoid account_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp) 7768c2ecf20Sopenharmony_ci{ 7778c2ecf20Sopenharmony_ci if (sp->lpage_disallowed) 7788c2ecf20Sopenharmony_ci return; 7798c2ecf20Sopenharmony_ci 7808c2ecf20Sopenharmony_ci ++kvm->stat.nx_lpage_splits; 7818c2ecf20Sopenharmony_ci list_add_tail(&sp->lpage_disallowed_link, 7828c2ecf20Sopenharmony_ci &kvm->arch.lpage_disallowed_mmu_pages); 7838c2ecf20Sopenharmony_ci sp->lpage_disallowed = true; 7848c2ecf20Sopenharmony_ci} 7858c2ecf20Sopenharmony_ci 7868c2ecf20Sopenharmony_cistatic void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp) 7878c2ecf20Sopenharmony_ci{ 7888c2ecf20Sopenharmony_ci struct kvm_memslots *slots; 7898c2ecf20Sopenharmony_ci struct kvm_memory_slot *slot; 7908c2ecf20Sopenharmony_ci gfn_t gfn; 7918c2ecf20Sopenharmony_ci 7928c2ecf20Sopenharmony_ci kvm->arch.indirect_shadow_pages--; 7938c2ecf20Sopenharmony_ci gfn = sp->gfn; 7948c2ecf20Sopenharmony_ci slots = kvm_memslots_for_spte_role(kvm, sp->role); 7958c2ecf20Sopenharmony_ci slot = __gfn_to_memslot(slots, gfn); 7968c2ecf20Sopenharmony_ci if (sp->role.level > PG_LEVEL_4K) 7978c2ecf20Sopenharmony_ci return kvm_slot_page_track_remove_page(kvm, slot, gfn, 7988c2ecf20Sopenharmony_ci KVM_PAGE_TRACK_WRITE); 7998c2ecf20Sopenharmony_ci 8008c2ecf20Sopenharmony_ci kvm_mmu_gfn_allow_lpage(slot, gfn); 8018c2ecf20Sopenharmony_ci} 8028c2ecf20Sopenharmony_ci 8038c2ecf20Sopenharmony_civoid unaccount_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp) 8048c2ecf20Sopenharmony_ci{ 8058c2ecf20Sopenharmony_ci --kvm->stat.nx_lpage_splits; 8068c2ecf20Sopenharmony_ci sp->lpage_disallowed = false; 8078c2ecf20Sopenharmony_ci list_del(&sp->lpage_disallowed_link); 8088c2ecf20Sopenharmony_ci} 8098c2ecf20Sopenharmony_ci 8108c2ecf20Sopenharmony_cistatic struct kvm_memory_slot * 8118c2ecf20Sopenharmony_cigfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn, 8128c2ecf20Sopenharmony_ci bool no_dirty_log) 8138c2ecf20Sopenharmony_ci{ 8148c2ecf20Sopenharmony_ci struct kvm_memory_slot *slot; 8158c2ecf20Sopenharmony_ci 8168c2ecf20Sopenharmony_ci slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 8178c2ecf20Sopenharmony_ci if (!slot || slot->flags & KVM_MEMSLOT_INVALID) 8188c2ecf20Sopenharmony_ci return NULL; 8198c2ecf20Sopenharmony_ci if (no_dirty_log && slot->dirty_bitmap) 8208c2ecf20Sopenharmony_ci return NULL; 8218c2ecf20Sopenharmony_ci 8228c2ecf20Sopenharmony_ci return slot; 8238c2ecf20Sopenharmony_ci} 8248c2ecf20Sopenharmony_ci 8258c2ecf20Sopenharmony_ci/* 8268c2ecf20Sopenharmony_ci * About rmap_head encoding: 8278c2ecf20Sopenharmony_ci * 8288c2ecf20Sopenharmony_ci * If the bit zero of rmap_head->val is clear, then it points to the only spte 8298c2ecf20Sopenharmony_ci * in this rmap chain. Otherwise, (rmap_head->val & ~1) points to a struct 8308c2ecf20Sopenharmony_ci * pte_list_desc containing more mappings. 8318c2ecf20Sopenharmony_ci */ 8328c2ecf20Sopenharmony_ci 8338c2ecf20Sopenharmony_ci/* 8348c2ecf20Sopenharmony_ci * Returns the number of pointers in the rmap chain, not counting the new one. 8358c2ecf20Sopenharmony_ci */ 8368c2ecf20Sopenharmony_cistatic int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte, 8378c2ecf20Sopenharmony_ci struct kvm_rmap_head *rmap_head) 8388c2ecf20Sopenharmony_ci{ 8398c2ecf20Sopenharmony_ci struct pte_list_desc *desc; 8408c2ecf20Sopenharmony_ci int i, count = 0; 8418c2ecf20Sopenharmony_ci 8428c2ecf20Sopenharmony_ci if (!rmap_head->val) { 8438c2ecf20Sopenharmony_ci rmap_printk("pte_list_add: %p %llx 0->1\n", spte, *spte); 8448c2ecf20Sopenharmony_ci rmap_head->val = (unsigned long)spte; 8458c2ecf20Sopenharmony_ci } else if (!(rmap_head->val & 1)) { 8468c2ecf20Sopenharmony_ci rmap_printk("pte_list_add: %p %llx 1->many\n", spte, *spte); 8478c2ecf20Sopenharmony_ci desc = mmu_alloc_pte_list_desc(vcpu); 8488c2ecf20Sopenharmony_ci desc->sptes[0] = (u64 *)rmap_head->val; 8498c2ecf20Sopenharmony_ci desc->sptes[1] = spte; 8508c2ecf20Sopenharmony_ci rmap_head->val = (unsigned long)desc | 1; 8518c2ecf20Sopenharmony_ci ++count; 8528c2ecf20Sopenharmony_ci } else { 8538c2ecf20Sopenharmony_ci rmap_printk("pte_list_add: %p %llx many->many\n", spte, *spte); 8548c2ecf20Sopenharmony_ci desc = (struct pte_list_desc *)(rmap_head->val & ~1ul); 8558c2ecf20Sopenharmony_ci while (desc->sptes[PTE_LIST_EXT-1]) { 8568c2ecf20Sopenharmony_ci count += PTE_LIST_EXT; 8578c2ecf20Sopenharmony_ci 8588c2ecf20Sopenharmony_ci if (!desc->more) { 8598c2ecf20Sopenharmony_ci desc->more = mmu_alloc_pte_list_desc(vcpu); 8608c2ecf20Sopenharmony_ci desc = desc->more; 8618c2ecf20Sopenharmony_ci break; 8628c2ecf20Sopenharmony_ci } 8638c2ecf20Sopenharmony_ci desc = desc->more; 8648c2ecf20Sopenharmony_ci } 8658c2ecf20Sopenharmony_ci for (i = 0; desc->sptes[i]; ++i) 8668c2ecf20Sopenharmony_ci ++count; 8678c2ecf20Sopenharmony_ci desc->sptes[i] = spte; 8688c2ecf20Sopenharmony_ci } 8698c2ecf20Sopenharmony_ci return count; 8708c2ecf20Sopenharmony_ci} 8718c2ecf20Sopenharmony_ci 8728c2ecf20Sopenharmony_cistatic void 8738c2ecf20Sopenharmony_cipte_list_desc_remove_entry(struct kvm_rmap_head *rmap_head, 8748c2ecf20Sopenharmony_ci struct pte_list_desc *desc, int i, 8758c2ecf20Sopenharmony_ci struct pte_list_desc *prev_desc) 8768c2ecf20Sopenharmony_ci{ 8778c2ecf20Sopenharmony_ci int j; 8788c2ecf20Sopenharmony_ci 8798c2ecf20Sopenharmony_ci for (j = PTE_LIST_EXT - 1; !desc->sptes[j] && j > i; --j) 8808c2ecf20Sopenharmony_ci ; 8818c2ecf20Sopenharmony_ci desc->sptes[i] = desc->sptes[j]; 8828c2ecf20Sopenharmony_ci desc->sptes[j] = NULL; 8838c2ecf20Sopenharmony_ci if (j != 0) 8848c2ecf20Sopenharmony_ci return; 8858c2ecf20Sopenharmony_ci if (!prev_desc && !desc->more) 8868c2ecf20Sopenharmony_ci rmap_head->val = 0; 8878c2ecf20Sopenharmony_ci else 8888c2ecf20Sopenharmony_ci if (prev_desc) 8898c2ecf20Sopenharmony_ci prev_desc->more = desc->more; 8908c2ecf20Sopenharmony_ci else 8918c2ecf20Sopenharmony_ci rmap_head->val = (unsigned long)desc->more | 1; 8928c2ecf20Sopenharmony_ci mmu_free_pte_list_desc(desc); 8938c2ecf20Sopenharmony_ci} 8948c2ecf20Sopenharmony_ci 8958c2ecf20Sopenharmony_cistatic void __pte_list_remove(u64 *spte, struct kvm_rmap_head *rmap_head) 8968c2ecf20Sopenharmony_ci{ 8978c2ecf20Sopenharmony_ci struct pte_list_desc *desc; 8988c2ecf20Sopenharmony_ci struct pte_list_desc *prev_desc; 8998c2ecf20Sopenharmony_ci int i; 9008c2ecf20Sopenharmony_ci 9018c2ecf20Sopenharmony_ci if (!rmap_head->val) { 9028c2ecf20Sopenharmony_ci pr_err("%s: %p 0->BUG\n", __func__, spte); 9038c2ecf20Sopenharmony_ci BUG(); 9048c2ecf20Sopenharmony_ci } else if (!(rmap_head->val & 1)) { 9058c2ecf20Sopenharmony_ci rmap_printk("%s: %p 1->0\n", __func__, spte); 9068c2ecf20Sopenharmony_ci if ((u64 *)rmap_head->val != spte) { 9078c2ecf20Sopenharmony_ci pr_err("%s: %p 1->BUG\n", __func__, spte); 9088c2ecf20Sopenharmony_ci BUG(); 9098c2ecf20Sopenharmony_ci } 9108c2ecf20Sopenharmony_ci rmap_head->val = 0; 9118c2ecf20Sopenharmony_ci } else { 9128c2ecf20Sopenharmony_ci rmap_printk("%s: %p many->many\n", __func__, spte); 9138c2ecf20Sopenharmony_ci desc = (struct pte_list_desc *)(rmap_head->val & ~1ul); 9148c2ecf20Sopenharmony_ci prev_desc = NULL; 9158c2ecf20Sopenharmony_ci while (desc) { 9168c2ecf20Sopenharmony_ci for (i = 0; i < PTE_LIST_EXT && desc->sptes[i]; ++i) { 9178c2ecf20Sopenharmony_ci if (desc->sptes[i] == spte) { 9188c2ecf20Sopenharmony_ci pte_list_desc_remove_entry(rmap_head, 9198c2ecf20Sopenharmony_ci desc, i, prev_desc); 9208c2ecf20Sopenharmony_ci return; 9218c2ecf20Sopenharmony_ci } 9228c2ecf20Sopenharmony_ci } 9238c2ecf20Sopenharmony_ci prev_desc = desc; 9248c2ecf20Sopenharmony_ci desc = desc->more; 9258c2ecf20Sopenharmony_ci } 9268c2ecf20Sopenharmony_ci pr_err("%s: %p many->many\n", __func__, spte); 9278c2ecf20Sopenharmony_ci BUG(); 9288c2ecf20Sopenharmony_ci } 9298c2ecf20Sopenharmony_ci} 9308c2ecf20Sopenharmony_ci 9318c2ecf20Sopenharmony_cistatic void pte_list_remove(struct kvm_rmap_head *rmap_head, u64 *sptep) 9328c2ecf20Sopenharmony_ci{ 9338c2ecf20Sopenharmony_ci mmu_spte_clear_track_bits(sptep); 9348c2ecf20Sopenharmony_ci __pte_list_remove(sptep, rmap_head); 9358c2ecf20Sopenharmony_ci} 9368c2ecf20Sopenharmony_ci 9378c2ecf20Sopenharmony_cistatic struct kvm_rmap_head *__gfn_to_rmap(gfn_t gfn, int level, 9388c2ecf20Sopenharmony_ci struct kvm_memory_slot *slot) 9398c2ecf20Sopenharmony_ci{ 9408c2ecf20Sopenharmony_ci unsigned long idx; 9418c2ecf20Sopenharmony_ci 9428c2ecf20Sopenharmony_ci idx = gfn_to_index(gfn, slot->base_gfn, level); 9438c2ecf20Sopenharmony_ci return &slot->arch.rmap[level - PG_LEVEL_4K][idx]; 9448c2ecf20Sopenharmony_ci} 9458c2ecf20Sopenharmony_ci 9468c2ecf20Sopenharmony_cistatic struct kvm_rmap_head *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, 9478c2ecf20Sopenharmony_ci struct kvm_mmu_page *sp) 9488c2ecf20Sopenharmony_ci{ 9498c2ecf20Sopenharmony_ci struct kvm_memslots *slots; 9508c2ecf20Sopenharmony_ci struct kvm_memory_slot *slot; 9518c2ecf20Sopenharmony_ci 9528c2ecf20Sopenharmony_ci slots = kvm_memslots_for_spte_role(kvm, sp->role); 9538c2ecf20Sopenharmony_ci slot = __gfn_to_memslot(slots, gfn); 9548c2ecf20Sopenharmony_ci return __gfn_to_rmap(gfn, sp->role.level, slot); 9558c2ecf20Sopenharmony_ci} 9568c2ecf20Sopenharmony_ci 9578c2ecf20Sopenharmony_cistatic bool rmap_can_add(struct kvm_vcpu *vcpu) 9588c2ecf20Sopenharmony_ci{ 9598c2ecf20Sopenharmony_ci struct kvm_mmu_memory_cache *mc; 9608c2ecf20Sopenharmony_ci 9618c2ecf20Sopenharmony_ci mc = &vcpu->arch.mmu_pte_list_desc_cache; 9628c2ecf20Sopenharmony_ci return kvm_mmu_memory_cache_nr_free_objects(mc); 9638c2ecf20Sopenharmony_ci} 9648c2ecf20Sopenharmony_ci 9658c2ecf20Sopenharmony_cistatic int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) 9668c2ecf20Sopenharmony_ci{ 9678c2ecf20Sopenharmony_ci struct kvm_mmu_page *sp; 9688c2ecf20Sopenharmony_ci struct kvm_rmap_head *rmap_head; 9698c2ecf20Sopenharmony_ci 9708c2ecf20Sopenharmony_ci sp = sptep_to_sp(spte); 9718c2ecf20Sopenharmony_ci kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn); 9728c2ecf20Sopenharmony_ci rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp); 9738c2ecf20Sopenharmony_ci return pte_list_add(vcpu, spte, rmap_head); 9748c2ecf20Sopenharmony_ci} 9758c2ecf20Sopenharmony_ci 9768c2ecf20Sopenharmony_cistatic void rmap_remove(struct kvm *kvm, u64 *spte) 9778c2ecf20Sopenharmony_ci{ 9788c2ecf20Sopenharmony_ci struct kvm_mmu_page *sp; 9798c2ecf20Sopenharmony_ci gfn_t gfn; 9808c2ecf20Sopenharmony_ci struct kvm_rmap_head *rmap_head; 9818c2ecf20Sopenharmony_ci 9828c2ecf20Sopenharmony_ci sp = sptep_to_sp(spte); 9838c2ecf20Sopenharmony_ci gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt); 9848c2ecf20Sopenharmony_ci rmap_head = gfn_to_rmap(kvm, gfn, sp); 9858c2ecf20Sopenharmony_ci __pte_list_remove(spte, rmap_head); 9868c2ecf20Sopenharmony_ci} 9878c2ecf20Sopenharmony_ci 9888c2ecf20Sopenharmony_ci/* 9898c2ecf20Sopenharmony_ci * Used by the following functions to iterate through the sptes linked by a 9908c2ecf20Sopenharmony_ci * rmap. All fields are private and not assumed to be used outside. 9918c2ecf20Sopenharmony_ci */ 9928c2ecf20Sopenharmony_cistruct rmap_iterator { 9938c2ecf20Sopenharmony_ci /* private fields */ 9948c2ecf20Sopenharmony_ci struct pte_list_desc *desc; /* holds the sptep if not NULL */ 9958c2ecf20Sopenharmony_ci int pos; /* index of the sptep */ 9968c2ecf20Sopenharmony_ci}; 9978c2ecf20Sopenharmony_ci 9988c2ecf20Sopenharmony_ci/* 9998c2ecf20Sopenharmony_ci * Iteration must be started by this function. This should also be used after 10008c2ecf20Sopenharmony_ci * removing/dropping sptes from the rmap link because in such cases the 10018c2ecf20Sopenharmony_ci * information in the iterator may not be valid. 10028c2ecf20Sopenharmony_ci * 10038c2ecf20Sopenharmony_ci * Returns sptep if found, NULL otherwise. 10048c2ecf20Sopenharmony_ci */ 10058c2ecf20Sopenharmony_cistatic u64 *rmap_get_first(struct kvm_rmap_head *rmap_head, 10068c2ecf20Sopenharmony_ci struct rmap_iterator *iter) 10078c2ecf20Sopenharmony_ci{ 10088c2ecf20Sopenharmony_ci u64 *sptep; 10098c2ecf20Sopenharmony_ci 10108c2ecf20Sopenharmony_ci if (!rmap_head->val) 10118c2ecf20Sopenharmony_ci return NULL; 10128c2ecf20Sopenharmony_ci 10138c2ecf20Sopenharmony_ci if (!(rmap_head->val & 1)) { 10148c2ecf20Sopenharmony_ci iter->desc = NULL; 10158c2ecf20Sopenharmony_ci sptep = (u64 *)rmap_head->val; 10168c2ecf20Sopenharmony_ci goto out; 10178c2ecf20Sopenharmony_ci } 10188c2ecf20Sopenharmony_ci 10198c2ecf20Sopenharmony_ci iter->desc = (struct pte_list_desc *)(rmap_head->val & ~1ul); 10208c2ecf20Sopenharmony_ci iter->pos = 0; 10218c2ecf20Sopenharmony_ci sptep = iter->desc->sptes[iter->pos]; 10228c2ecf20Sopenharmony_ciout: 10238c2ecf20Sopenharmony_ci BUG_ON(!is_shadow_present_pte(*sptep)); 10248c2ecf20Sopenharmony_ci return sptep; 10258c2ecf20Sopenharmony_ci} 10268c2ecf20Sopenharmony_ci 10278c2ecf20Sopenharmony_ci/* 10288c2ecf20Sopenharmony_ci * Must be used with a valid iterator: e.g. after rmap_get_first(). 10298c2ecf20Sopenharmony_ci * 10308c2ecf20Sopenharmony_ci * Returns sptep if found, NULL otherwise. 10318c2ecf20Sopenharmony_ci */ 10328c2ecf20Sopenharmony_cistatic u64 *rmap_get_next(struct rmap_iterator *iter) 10338c2ecf20Sopenharmony_ci{ 10348c2ecf20Sopenharmony_ci u64 *sptep; 10358c2ecf20Sopenharmony_ci 10368c2ecf20Sopenharmony_ci if (iter->desc) { 10378c2ecf20Sopenharmony_ci if (iter->pos < PTE_LIST_EXT - 1) { 10388c2ecf20Sopenharmony_ci ++iter->pos; 10398c2ecf20Sopenharmony_ci sptep = iter->desc->sptes[iter->pos]; 10408c2ecf20Sopenharmony_ci if (sptep) 10418c2ecf20Sopenharmony_ci goto out; 10428c2ecf20Sopenharmony_ci } 10438c2ecf20Sopenharmony_ci 10448c2ecf20Sopenharmony_ci iter->desc = iter->desc->more; 10458c2ecf20Sopenharmony_ci 10468c2ecf20Sopenharmony_ci if (iter->desc) { 10478c2ecf20Sopenharmony_ci iter->pos = 0; 10488c2ecf20Sopenharmony_ci /* desc->sptes[0] cannot be NULL */ 10498c2ecf20Sopenharmony_ci sptep = iter->desc->sptes[iter->pos]; 10508c2ecf20Sopenharmony_ci goto out; 10518c2ecf20Sopenharmony_ci } 10528c2ecf20Sopenharmony_ci } 10538c2ecf20Sopenharmony_ci 10548c2ecf20Sopenharmony_ci return NULL; 10558c2ecf20Sopenharmony_ciout: 10568c2ecf20Sopenharmony_ci BUG_ON(!is_shadow_present_pte(*sptep)); 10578c2ecf20Sopenharmony_ci return sptep; 10588c2ecf20Sopenharmony_ci} 10598c2ecf20Sopenharmony_ci 10608c2ecf20Sopenharmony_ci#define for_each_rmap_spte(_rmap_head_, _iter_, _spte_) \ 10618c2ecf20Sopenharmony_ci for (_spte_ = rmap_get_first(_rmap_head_, _iter_); \ 10628c2ecf20Sopenharmony_ci _spte_; _spte_ = rmap_get_next(_iter_)) 10638c2ecf20Sopenharmony_ci 10648c2ecf20Sopenharmony_cistatic void drop_spte(struct kvm *kvm, u64 *sptep) 10658c2ecf20Sopenharmony_ci{ 10668c2ecf20Sopenharmony_ci if (mmu_spte_clear_track_bits(sptep)) 10678c2ecf20Sopenharmony_ci rmap_remove(kvm, sptep); 10688c2ecf20Sopenharmony_ci} 10698c2ecf20Sopenharmony_ci 10708c2ecf20Sopenharmony_ci 10718c2ecf20Sopenharmony_cistatic bool __drop_large_spte(struct kvm *kvm, u64 *sptep) 10728c2ecf20Sopenharmony_ci{ 10738c2ecf20Sopenharmony_ci if (is_large_pte(*sptep)) { 10748c2ecf20Sopenharmony_ci WARN_ON(sptep_to_sp(sptep)->role.level == PG_LEVEL_4K); 10758c2ecf20Sopenharmony_ci drop_spte(kvm, sptep); 10768c2ecf20Sopenharmony_ci --kvm->stat.lpages; 10778c2ecf20Sopenharmony_ci return true; 10788c2ecf20Sopenharmony_ci } 10798c2ecf20Sopenharmony_ci 10808c2ecf20Sopenharmony_ci return false; 10818c2ecf20Sopenharmony_ci} 10828c2ecf20Sopenharmony_ci 10838c2ecf20Sopenharmony_cistatic void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep) 10848c2ecf20Sopenharmony_ci{ 10858c2ecf20Sopenharmony_ci if (__drop_large_spte(vcpu->kvm, sptep)) { 10868c2ecf20Sopenharmony_ci struct kvm_mmu_page *sp = sptep_to_sp(sptep); 10878c2ecf20Sopenharmony_ci 10888c2ecf20Sopenharmony_ci kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn, 10898c2ecf20Sopenharmony_ci KVM_PAGES_PER_HPAGE(sp->role.level)); 10908c2ecf20Sopenharmony_ci } 10918c2ecf20Sopenharmony_ci} 10928c2ecf20Sopenharmony_ci 10938c2ecf20Sopenharmony_ci/* 10948c2ecf20Sopenharmony_ci * Write-protect on the specified @sptep, @pt_protect indicates whether 10958c2ecf20Sopenharmony_ci * spte write-protection is caused by protecting shadow page table. 10968c2ecf20Sopenharmony_ci * 10978c2ecf20Sopenharmony_ci * Note: write protection is difference between dirty logging and spte 10988c2ecf20Sopenharmony_ci * protection: 10998c2ecf20Sopenharmony_ci * - for dirty logging, the spte can be set to writable at anytime if 11008c2ecf20Sopenharmony_ci * its dirty bitmap is properly set. 11018c2ecf20Sopenharmony_ci * - for spte protection, the spte can be writable only after unsync-ing 11028c2ecf20Sopenharmony_ci * shadow page. 11038c2ecf20Sopenharmony_ci * 11048c2ecf20Sopenharmony_ci * Return true if tlb need be flushed. 11058c2ecf20Sopenharmony_ci */ 11068c2ecf20Sopenharmony_cistatic bool spte_write_protect(u64 *sptep, bool pt_protect) 11078c2ecf20Sopenharmony_ci{ 11088c2ecf20Sopenharmony_ci u64 spte = *sptep; 11098c2ecf20Sopenharmony_ci 11108c2ecf20Sopenharmony_ci if (!is_writable_pte(spte) && 11118c2ecf20Sopenharmony_ci !(pt_protect && spte_can_locklessly_be_made_writable(spte))) 11128c2ecf20Sopenharmony_ci return false; 11138c2ecf20Sopenharmony_ci 11148c2ecf20Sopenharmony_ci rmap_printk("rmap_write_protect: spte %p %llx\n", sptep, *sptep); 11158c2ecf20Sopenharmony_ci 11168c2ecf20Sopenharmony_ci if (pt_protect) 11178c2ecf20Sopenharmony_ci spte &= ~SPTE_MMU_WRITEABLE; 11188c2ecf20Sopenharmony_ci spte = spte & ~PT_WRITABLE_MASK; 11198c2ecf20Sopenharmony_ci 11208c2ecf20Sopenharmony_ci return mmu_spte_update(sptep, spte); 11218c2ecf20Sopenharmony_ci} 11228c2ecf20Sopenharmony_ci 11238c2ecf20Sopenharmony_cistatic bool __rmap_write_protect(struct kvm *kvm, 11248c2ecf20Sopenharmony_ci struct kvm_rmap_head *rmap_head, 11258c2ecf20Sopenharmony_ci bool pt_protect) 11268c2ecf20Sopenharmony_ci{ 11278c2ecf20Sopenharmony_ci u64 *sptep; 11288c2ecf20Sopenharmony_ci struct rmap_iterator iter; 11298c2ecf20Sopenharmony_ci bool flush = false; 11308c2ecf20Sopenharmony_ci 11318c2ecf20Sopenharmony_ci for_each_rmap_spte(rmap_head, &iter, sptep) 11328c2ecf20Sopenharmony_ci flush |= spte_write_protect(sptep, pt_protect); 11338c2ecf20Sopenharmony_ci 11348c2ecf20Sopenharmony_ci return flush; 11358c2ecf20Sopenharmony_ci} 11368c2ecf20Sopenharmony_ci 11378c2ecf20Sopenharmony_cistatic bool spte_clear_dirty(u64 *sptep) 11388c2ecf20Sopenharmony_ci{ 11398c2ecf20Sopenharmony_ci u64 spte = *sptep; 11408c2ecf20Sopenharmony_ci 11418c2ecf20Sopenharmony_ci rmap_printk("rmap_clear_dirty: spte %p %llx\n", sptep, *sptep); 11428c2ecf20Sopenharmony_ci 11438c2ecf20Sopenharmony_ci MMU_WARN_ON(!spte_ad_enabled(spte)); 11448c2ecf20Sopenharmony_ci spte &= ~shadow_dirty_mask; 11458c2ecf20Sopenharmony_ci return mmu_spte_update(sptep, spte); 11468c2ecf20Sopenharmony_ci} 11478c2ecf20Sopenharmony_ci 11488c2ecf20Sopenharmony_cistatic bool spte_wrprot_for_clear_dirty(u64 *sptep) 11498c2ecf20Sopenharmony_ci{ 11508c2ecf20Sopenharmony_ci bool was_writable = test_and_clear_bit(PT_WRITABLE_SHIFT, 11518c2ecf20Sopenharmony_ci (unsigned long *)sptep); 11528c2ecf20Sopenharmony_ci if (was_writable && !spte_ad_enabled(*sptep)) 11538c2ecf20Sopenharmony_ci kvm_set_pfn_dirty(spte_to_pfn(*sptep)); 11548c2ecf20Sopenharmony_ci 11558c2ecf20Sopenharmony_ci return was_writable; 11568c2ecf20Sopenharmony_ci} 11578c2ecf20Sopenharmony_ci 11588c2ecf20Sopenharmony_ci/* 11598c2ecf20Sopenharmony_ci * Gets the GFN ready for another round of dirty logging by clearing the 11608c2ecf20Sopenharmony_ci * - D bit on ad-enabled SPTEs, and 11618c2ecf20Sopenharmony_ci * - W bit on ad-disabled SPTEs. 11628c2ecf20Sopenharmony_ci * Returns true iff any D or W bits were cleared. 11638c2ecf20Sopenharmony_ci */ 11648c2ecf20Sopenharmony_cistatic bool __rmap_clear_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head) 11658c2ecf20Sopenharmony_ci{ 11668c2ecf20Sopenharmony_ci u64 *sptep; 11678c2ecf20Sopenharmony_ci struct rmap_iterator iter; 11688c2ecf20Sopenharmony_ci bool flush = false; 11698c2ecf20Sopenharmony_ci 11708c2ecf20Sopenharmony_ci for_each_rmap_spte(rmap_head, &iter, sptep) 11718c2ecf20Sopenharmony_ci if (spte_ad_need_write_protect(*sptep)) 11728c2ecf20Sopenharmony_ci flush |= spte_wrprot_for_clear_dirty(sptep); 11738c2ecf20Sopenharmony_ci else 11748c2ecf20Sopenharmony_ci flush |= spte_clear_dirty(sptep); 11758c2ecf20Sopenharmony_ci 11768c2ecf20Sopenharmony_ci return flush; 11778c2ecf20Sopenharmony_ci} 11788c2ecf20Sopenharmony_ci 11798c2ecf20Sopenharmony_cistatic bool spte_set_dirty(u64 *sptep) 11808c2ecf20Sopenharmony_ci{ 11818c2ecf20Sopenharmony_ci u64 spte = *sptep; 11828c2ecf20Sopenharmony_ci 11838c2ecf20Sopenharmony_ci rmap_printk("rmap_set_dirty: spte %p %llx\n", sptep, *sptep); 11848c2ecf20Sopenharmony_ci 11858c2ecf20Sopenharmony_ci /* 11868c2ecf20Sopenharmony_ci * Similar to the !kvm_x86_ops.slot_disable_log_dirty case, 11878c2ecf20Sopenharmony_ci * do not bother adding back write access to pages marked 11888c2ecf20Sopenharmony_ci * SPTE_AD_WRPROT_ONLY_MASK. 11898c2ecf20Sopenharmony_ci */ 11908c2ecf20Sopenharmony_ci spte |= shadow_dirty_mask; 11918c2ecf20Sopenharmony_ci 11928c2ecf20Sopenharmony_ci return mmu_spte_update(sptep, spte); 11938c2ecf20Sopenharmony_ci} 11948c2ecf20Sopenharmony_ci 11958c2ecf20Sopenharmony_cistatic bool __rmap_set_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head) 11968c2ecf20Sopenharmony_ci{ 11978c2ecf20Sopenharmony_ci u64 *sptep; 11988c2ecf20Sopenharmony_ci struct rmap_iterator iter; 11998c2ecf20Sopenharmony_ci bool flush = false; 12008c2ecf20Sopenharmony_ci 12018c2ecf20Sopenharmony_ci for_each_rmap_spte(rmap_head, &iter, sptep) 12028c2ecf20Sopenharmony_ci if (spte_ad_enabled(*sptep)) 12038c2ecf20Sopenharmony_ci flush |= spte_set_dirty(sptep); 12048c2ecf20Sopenharmony_ci 12058c2ecf20Sopenharmony_ci return flush; 12068c2ecf20Sopenharmony_ci} 12078c2ecf20Sopenharmony_ci 12088c2ecf20Sopenharmony_ci/** 12098c2ecf20Sopenharmony_ci * kvm_mmu_write_protect_pt_masked - write protect selected PT level pages 12108c2ecf20Sopenharmony_ci * @kvm: kvm instance 12118c2ecf20Sopenharmony_ci * @slot: slot to protect 12128c2ecf20Sopenharmony_ci * @gfn_offset: start of the BITS_PER_LONG pages we care about 12138c2ecf20Sopenharmony_ci * @mask: indicates which pages we should protect 12148c2ecf20Sopenharmony_ci * 12158c2ecf20Sopenharmony_ci * Used when we do not need to care about huge page mappings: e.g. during dirty 12168c2ecf20Sopenharmony_ci * logging we do not have any such mappings. 12178c2ecf20Sopenharmony_ci */ 12188c2ecf20Sopenharmony_cistatic void kvm_mmu_write_protect_pt_masked(struct kvm *kvm, 12198c2ecf20Sopenharmony_ci struct kvm_memory_slot *slot, 12208c2ecf20Sopenharmony_ci gfn_t gfn_offset, unsigned long mask) 12218c2ecf20Sopenharmony_ci{ 12228c2ecf20Sopenharmony_ci struct kvm_rmap_head *rmap_head; 12238c2ecf20Sopenharmony_ci 12248c2ecf20Sopenharmony_ci if (kvm->arch.tdp_mmu_enabled) 12258c2ecf20Sopenharmony_ci kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot, 12268c2ecf20Sopenharmony_ci slot->base_gfn + gfn_offset, mask, true); 12278c2ecf20Sopenharmony_ci while (mask) { 12288c2ecf20Sopenharmony_ci rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask), 12298c2ecf20Sopenharmony_ci PG_LEVEL_4K, slot); 12308c2ecf20Sopenharmony_ci __rmap_write_protect(kvm, rmap_head, false); 12318c2ecf20Sopenharmony_ci 12328c2ecf20Sopenharmony_ci /* clear the first set bit */ 12338c2ecf20Sopenharmony_ci mask &= mask - 1; 12348c2ecf20Sopenharmony_ci } 12358c2ecf20Sopenharmony_ci} 12368c2ecf20Sopenharmony_ci 12378c2ecf20Sopenharmony_ci/** 12388c2ecf20Sopenharmony_ci * kvm_mmu_clear_dirty_pt_masked - clear MMU D-bit for PT level pages, or write 12398c2ecf20Sopenharmony_ci * protect the page if the D-bit isn't supported. 12408c2ecf20Sopenharmony_ci * @kvm: kvm instance 12418c2ecf20Sopenharmony_ci * @slot: slot to clear D-bit 12428c2ecf20Sopenharmony_ci * @gfn_offset: start of the BITS_PER_LONG pages we care about 12438c2ecf20Sopenharmony_ci * @mask: indicates which pages we should clear D-bit 12448c2ecf20Sopenharmony_ci * 12458c2ecf20Sopenharmony_ci * Used for PML to re-log the dirty GPAs after userspace querying dirty_bitmap. 12468c2ecf20Sopenharmony_ci */ 12478c2ecf20Sopenharmony_civoid kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm, 12488c2ecf20Sopenharmony_ci struct kvm_memory_slot *slot, 12498c2ecf20Sopenharmony_ci gfn_t gfn_offset, unsigned long mask) 12508c2ecf20Sopenharmony_ci{ 12518c2ecf20Sopenharmony_ci struct kvm_rmap_head *rmap_head; 12528c2ecf20Sopenharmony_ci 12538c2ecf20Sopenharmony_ci if (kvm->arch.tdp_mmu_enabled) 12548c2ecf20Sopenharmony_ci kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot, 12558c2ecf20Sopenharmony_ci slot->base_gfn + gfn_offset, mask, false); 12568c2ecf20Sopenharmony_ci while (mask) { 12578c2ecf20Sopenharmony_ci rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask), 12588c2ecf20Sopenharmony_ci PG_LEVEL_4K, slot); 12598c2ecf20Sopenharmony_ci __rmap_clear_dirty(kvm, rmap_head); 12608c2ecf20Sopenharmony_ci 12618c2ecf20Sopenharmony_ci /* clear the first set bit */ 12628c2ecf20Sopenharmony_ci mask &= mask - 1; 12638c2ecf20Sopenharmony_ci } 12648c2ecf20Sopenharmony_ci} 12658c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(kvm_mmu_clear_dirty_pt_masked); 12668c2ecf20Sopenharmony_ci 12678c2ecf20Sopenharmony_ci/** 12688c2ecf20Sopenharmony_ci * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected 12698c2ecf20Sopenharmony_ci * PT level pages. 12708c2ecf20Sopenharmony_ci * 12718c2ecf20Sopenharmony_ci * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to 12728c2ecf20Sopenharmony_ci * enable dirty logging for them. 12738c2ecf20Sopenharmony_ci * 12748c2ecf20Sopenharmony_ci * Used when we do not need to care about huge page mappings: e.g. during dirty 12758c2ecf20Sopenharmony_ci * logging we do not have any such mappings. 12768c2ecf20Sopenharmony_ci */ 12778c2ecf20Sopenharmony_civoid kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, 12788c2ecf20Sopenharmony_ci struct kvm_memory_slot *slot, 12798c2ecf20Sopenharmony_ci gfn_t gfn_offset, unsigned long mask) 12808c2ecf20Sopenharmony_ci{ 12818c2ecf20Sopenharmony_ci if (kvm_x86_ops.enable_log_dirty_pt_masked) 12828c2ecf20Sopenharmony_ci kvm_x86_ops.enable_log_dirty_pt_masked(kvm, slot, gfn_offset, 12838c2ecf20Sopenharmony_ci mask); 12848c2ecf20Sopenharmony_ci else 12858c2ecf20Sopenharmony_ci kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask); 12868c2ecf20Sopenharmony_ci} 12878c2ecf20Sopenharmony_ci 12888c2ecf20Sopenharmony_cibool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm, 12898c2ecf20Sopenharmony_ci struct kvm_memory_slot *slot, u64 gfn) 12908c2ecf20Sopenharmony_ci{ 12918c2ecf20Sopenharmony_ci struct kvm_rmap_head *rmap_head; 12928c2ecf20Sopenharmony_ci int i; 12938c2ecf20Sopenharmony_ci bool write_protected = false; 12948c2ecf20Sopenharmony_ci 12958c2ecf20Sopenharmony_ci for (i = PG_LEVEL_4K; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) { 12968c2ecf20Sopenharmony_ci rmap_head = __gfn_to_rmap(gfn, i, slot); 12978c2ecf20Sopenharmony_ci write_protected |= __rmap_write_protect(kvm, rmap_head, true); 12988c2ecf20Sopenharmony_ci } 12998c2ecf20Sopenharmony_ci 13008c2ecf20Sopenharmony_ci if (kvm->arch.tdp_mmu_enabled) 13018c2ecf20Sopenharmony_ci write_protected |= 13028c2ecf20Sopenharmony_ci kvm_tdp_mmu_write_protect_gfn(kvm, slot, gfn); 13038c2ecf20Sopenharmony_ci 13048c2ecf20Sopenharmony_ci return write_protected; 13058c2ecf20Sopenharmony_ci} 13068c2ecf20Sopenharmony_ci 13078c2ecf20Sopenharmony_cistatic bool rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn) 13088c2ecf20Sopenharmony_ci{ 13098c2ecf20Sopenharmony_ci struct kvm_memory_slot *slot; 13108c2ecf20Sopenharmony_ci 13118c2ecf20Sopenharmony_ci slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 13128c2ecf20Sopenharmony_ci return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn); 13138c2ecf20Sopenharmony_ci} 13148c2ecf20Sopenharmony_ci 13158c2ecf20Sopenharmony_cistatic bool kvm_zap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head) 13168c2ecf20Sopenharmony_ci{ 13178c2ecf20Sopenharmony_ci u64 *sptep; 13188c2ecf20Sopenharmony_ci struct rmap_iterator iter; 13198c2ecf20Sopenharmony_ci bool flush = false; 13208c2ecf20Sopenharmony_ci 13218c2ecf20Sopenharmony_ci while ((sptep = rmap_get_first(rmap_head, &iter))) { 13228c2ecf20Sopenharmony_ci rmap_printk("%s: spte %p %llx.\n", __func__, sptep, *sptep); 13238c2ecf20Sopenharmony_ci 13248c2ecf20Sopenharmony_ci pte_list_remove(rmap_head, sptep); 13258c2ecf20Sopenharmony_ci flush = true; 13268c2ecf20Sopenharmony_ci } 13278c2ecf20Sopenharmony_ci 13288c2ecf20Sopenharmony_ci return flush; 13298c2ecf20Sopenharmony_ci} 13308c2ecf20Sopenharmony_ci 13318c2ecf20Sopenharmony_cistatic int kvm_unmap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, 13328c2ecf20Sopenharmony_ci struct kvm_memory_slot *slot, gfn_t gfn, int level, 13338c2ecf20Sopenharmony_ci unsigned long data) 13348c2ecf20Sopenharmony_ci{ 13358c2ecf20Sopenharmony_ci return kvm_zap_rmapp(kvm, rmap_head); 13368c2ecf20Sopenharmony_ci} 13378c2ecf20Sopenharmony_ci 13388c2ecf20Sopenharmony_cistatic int kvm_set_pte_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, 13398c2ecf20Sopenharmony_ci struct kvm_memory_slot *slot, gfn_t gfn, int level, 13408c2ecf20Sopenharmony_ci unsigned long data) 13418c2ecf20Sopenharmony_ci{ 13428c2ecf20Sopenharmony_ci u64 *sptep; 13438c2ecf20Sopenharmony_ci struct rmap_iterator iter; 13448c2ecf20Sopenharmony_ci int need_flush = 0; 13458c2ecf20Sopenharmony_ci u64 new_spte; 13468c2ecf20Sopenharmony_ci pte_t *ptep = (pte_t *)data; 13478c2ecf20Sopenharmony_ci kvm_pfn_t new_pfn; 13488c2ecf20Sopenharmony_ci 13498c2ecf20Sopenharmony_ci WARN_ON(pte_huge(*ptep)); 13508c2ecf20Sopenharmony_ci new_pfn = pte_pfn(*ptep); 13518c2ecf20Sopenharmony_ci 13528c2ecf20Sopenharmony_cirestart: 13538c2ecf20Sopenharmony_ci for_each_rmap_spte(rmap_head, &iter, sptep) { 13548c2ecf20Sopenharmony_ci rmap_printk("kvm_set_pte_rmapp: spte %p %llx gfn %llx (%d)\n", 13558c2ecf20Sopenharmony_ci sptep, *sptep, gfn, level); 13568c2ecf20Sopenharmony_ci 13578c2ecf20Sopenharmony_ci need_flush = 1; 13588c2ecf20Sopenharmony_ci 13598c2ecf20Sopenharmony_ci if (pte_write(*ptep)) { 13608c2ecf20Sopenharmony_ci pte_list_remove(rmap_head, sptep); 13618c2ecf20Sopenharmony_ci goto restart; 13628c2ecf20Sopenharmony_ci } else { 13638c2ecf20Sopenharmony_ci new_spte = kvm_mmu_changed_pte_notifier_make_spte( 13648c2ecf20Sopenharmony_ci *sptep, new_pfn); 13658c2ecf20Sopenharmony_ci 13668c2ecf20Sopenharmony_ci mmu_spte_clear_track_bits(sptep); 13678c2ecf20Sopenharmony_ci mmu_spte_set(sptep, new_spte); 13688c2ecf20Sopenharmony_ci } 13698c2ecf20Sopenharmony_ci } 13708c2ecf20Sopenharmony_ci 13718c2ecf20Sopenharmony_ci if (need_flush && kvm_available_flush_tlb_with_range()) { 13728c2ecf20Sopenharmony_ci kvm_flush_remote_tlbs_with_address(kvm, gfn, 1); 13738c2ecf20Sopenharmony_ci return 0; 13748c2ecf20Sopenharmony_ci } 13758c2ecf20Sopenharmony_ci 13768c2ecf20Sopenharmony_ci return need_flush; 13778c2ecf20Sopenharmony_ci} 13788c2ecf20Sopenharmony_ci 13798c2ecf20Sopenharmony_cistruct slot_rmap_walk_iterator { 13808c2ecf20Sopenharmony_ci /* input fields. */ 13818c2ecf20Sopenharmony_ci struct kvm_memory_slot *slot; 13828c2ecf20Sopenharmony_ci gfn_t start_gfn; 13838c2ecf20Sopenharmony_ci gfn_t end_gfn; 13848c2ecf20Sopenharmony_ci int start_level; 13858c2ecf20Sopenharmony_ci int end_level; 13868c2ecf20Sopenharmony_ci 13878c2ecf20Sopenharmony_ci /* output fields. */ 13888c2ecf20Sopenharmony_ci gfn_t gfn; 13898c2ecf20Sopenharmony_ci struct kvm_rmap_head *rmap; 13908c2ecf20Sopenharmony_ci int level; 13918c2ecf20Sopenharmony_ci 13928c2ecf20Sopenharmony_ci /* private field. */ 13938c2ecf20Sopenharmony_ci struct kvm_rmap_head *end_rmap; 13948c2ecf20Sopenharmony_ci}; 13958c2ecf20Sopenharmony_ci 13968c2ecf20Sopenharmony_cistatic void 13978c2ecf20Sopenharmony_cirmap_walk_init_level(struct slot_rmap_walk_iterator *iterator, int level) 13988c2ecf20Sopenharmony_ci{ 13998c2ecf20Sopenharmony_ci iterator->level = level; 14008c2ecf20Sopenharmony_ci iterator->gfn = iterator->start_gfn; 14018c2ecf20Sopenharmony_ci iterator->rmap = __gfn_to_rmap(iterator->gfn, level, iterator->slot); 14028c2ecf20Sopenharmony_ci iterator->end_rmap = __gfn_to_rmap(iterator->end_gfn, level, 14038c2ecf20Sopenharmony_ci iterator->slot); 14048c2ecf20Sopenharmony_ci} 14058c2ecf20Sopenharmony_ci 14068c2ecf20Sopenharmony_cistatic void 14078c2ecf20Sopenharmony_cislot_rmap_walk_init(struct slot_rmap_walk_iterator *iterator, 14088c2ecf20Sopenharmony_ci struct kvm_memory_slot *slot, int start_level, 14098c2ecf20Sopenharmony_ci int end_level, gfn_t start_gfn, gfn_t end_gfn) 14108c2ecf20Sopenharmony_ci{ 14118c2ecf20Sopenharmony_ci iterator->slot = slot; 14128c2ecf20Sopenharmony_ci iterator->start_level = start_level; 14138c2ecf20Sopenharmony_ci iterator->end_level = end_level; 14148c2ecf20Sopenharmony_ci iterator->start_gfn = start_gfn; 14158c2ecf20Sopenharmony_ci iterator->end_gfn = end_gfn; 14168c2ecf20Sopenharmony_ci 14178c2ecf20Sopenharmony_ci rmap_walk_init_level(iterator, iterator->start_level); 14188c2ecf20Sopenharmony_ci} 14198c2ecf20Sopenharmony_ci 14208c2ecf20Sopenharmony_cistatic bool slot_rmap_walk_okay(struct slot_rmap_walk_iterator *iterator) 14218c2ecf20Sopenharmony_ci{ 14228c2ecf20Sopenharmony_ci return !!iterator->rmap; 14238c2ecf20Sopenharmony_ci} 14248c2ecf20Sopenharmony_ci 14258c2ecf20Sopenharmony_cistatic void slot_rmap_walk_next(struct slot_rmap_walk_iterator *iterator) 14268c2ecf20Sopenharmony_ci{ 14278c2ecf20Sopenharmony_ci if (++iterator->rmap <= iterator->end_rmap) { 14288c2ecf20Sopenharmony_ci iterator->gfn += (1UL << KVM_HPAGE_GFN_SHIFT(iterator->level)); 14298c2ecf20Sopenharmony_ci return; 14308c2ecf20Sopenharmony_ci } 14318c2ecf20Sopenharmony_ci 14328c2ecf20Sopenharmony_ci if (++iterator->level > iterator->end_level) { 14338c2ecf20Sopenharmony_ci iterator->rmap = NULL; 14348c2ecf20Sopenharmony_ci return; 14358c2ecf20Sopenharmony_ci } 14368c2ecf20Sopenharmony_ci 14378c2ecf20Sopenharmony_ci rmap_walk_init_level(iterator, iterator->level); 14388c2ecf20Sopenharmony_ci} 14398c2ecf20Sopenharmony_ci 14408c2ecf20Sopenharmony_ci#define for_each_slot_rmap_range(_slot_, _start_level_, _end_level_, \ 14418c2ecf20Sopenharmony_ci _start_gfn, _end_gfn, _iter_) \ 14428c2ecf20Sopenharmony_ci for (slot_rmap_walk_init(_iter_, _slot_, _start_level_, \ 14438c2ecf20Sopenharmony_ci _end_level_, _start_gfn, _end_gfn); \ 14448c2ecf20Sopenharmony_ci slot_rmap_walk_okay(_iter_); \ 14458c2ecf20Sopenharmony_ci slot_rmap_walk_next(_iter_)) 14468c2ecf20Sopenharmony_ci 14478c2ecf20Sopenharmony_cistatic int kvm_handle_hva_range(struct kvm *kvm, 14488c2ecf20Sopenharmony_ci unsigned long start, 14498c2ecf20Sopenharmony_ci unsigned long end, 14508c2ecf20Sopenharmony_ci unsigned long data, 14518c2ecf20Sopenharmony_ci int (*handler)(struct kvm *kvm, 14528c2ecf20Sopenharmony_ci struct kvm_rmap_head *rmap_head, 14538c2ecf20Sopenharmony_ci struct kvm_memory_slot *slot, 14548c2ecf20Sopenharmony_ci gfn_t gfn, 14558c2ecf20Sopenharmony_ci int level, 14568c2ecf20Sopenharmony_ci unsigned long data)) 14578c2ecf20Sopenharmony_ci{ 14588c2ecf20Sopenharmony_ci struct kvm_memslots *slots; 14598c2ecf20Sopenharmony_ci struct kvm_memory_slot *memslot; 14608c2ecf20Sopenharmony_ci struct slot_rmap_walk_iterator iterator; 14618c2ecf20Sopenharmony_ci int ret = 0; 14628c2ecf20Sopenharmony_ci int i; 14638c2ecf20Sopenharmony_ci 14648c2ecf20Sopenharmony_ci for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { 14658c2ecf20Sopenharmony_ci slots = __kvm_memslots(kvm, i); 14668c2ecf20Sopenharmony_ci kvm_for_each_memslot(memslot, slots) { 14678c2ecf20Sopenharmony_ci unsigned long hva_start, hva_end; 14688c2ecf20Sopenharmony_ci gfn_t gfn_start, gfn_end; 14698c2ecf20Sopenharmony_ci 14708c2ecf20Sopenharmony_ci hva_start = max(start, memslot->userspace_addr); 14718c2ecf20Sopenharmony_ci hva_end = min(end, memslot->userspace_addr + 14728c2ecf20Sopenharmony_ci (memslot->npages << PAGE_SHIFT)); 14738c2ecf20Sopenharmony_ci if (hva_start >= hva_end) 14748c2ecf20Sopenharmony_ci continue; 14758c2ecf20Sopenharmony_ci /* 14768c2ecf20Sopenharmony_ci * {gfn(page) | page intersects with [hva_start, hva_end)} = 14778c2ecf20Sopenharmony_ci * {gfn_start, gfn_start+1, ..., gfn_end-1}. 14788c2ecf20Sopenharmony_ci */ 14798c2ecf20Sopenharmony_ci gfn_start = hva_to_gfn_memslot(hva_start, memslot); 14808c2ecf20Sopenharmony_ci gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot); 14818c2ecf20Sopenharmony_ci 14828c2ecf20Sopenharmony_ci for_each_slot_rmap_range(memslot, PG_LEVEL_4K, 14838c2ecf20Sopenharmony_ci KVM_MAX_HUGEPAGE_LEVEL, 14848c2ecf20Sopenharmony_ci gfn_start, gfn_end - 1, 14858c2ecf20Sopenharmony_ci &iterator) 14868c2ecf20Sopenharmony_ci ret |= handler(kvm, iterator.rmap, memslot, 14878c2ecf20Sopenharmony_ci iterator.gfn, iterator.level, data); 14888c2ecf20Sopenharmony_ci } 14898c2ecf20Sopenharmony_ci } 14908c2ecf20Sopenharmony_ci 14918c2ecf20Sopenharmony_ci return ret; 14928c2ecf20Sopenharmony_ci} 14938c2ecf20Sopenharmony_ci 14948c2ecf20Sopenharmony_cistatic int kvm_handle_hva(struct kvm *kvm, unsigned long hva, 14958c2ecf20Sopenharmony_ci unsigned long data, 14968c2ecf20Sopenharmony_ci int (*handler)(struct kvm *kvm, 14978c2ecf20Sopenharmony_ci struct kvm_rmap_head *rmap_head, 14988c2ecf20Sopenharmony_ci struct kvm_memory_slot *slot, 14998c2ecf20Sopenharmony_ci gfn_t gfn, int level, 15008c2ecf20Sopenharmony_ci unsigned long data)) 15018c2ecf20Sopenharmony_ci{ 15028c2ecf20Sopenharmony_ci return kvm_handle_hva_range(kvm, hva, hva + 1, data, handler); 15038c2ecf20Sopenharmony_ci} 15048c2ecf20Sopenharmony_ci 15058c2ecf20Sopenharmony_ciint kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, 15068c2ecf20Sopenharmony_ci unsigned flags) 15078c2ecf20Sopenharmony_ci{ 15088c2ecf20Sopenharmony_ci int r; 15098c2ecf20Sopenharmony_ci 15108c2ecf20Sopenharmony_ci r = kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp); 15118c2ecf20Sopenharmony_ci 15128c2ecf20Sopenharmony_ci if (kvm->arch.tdp_mmu_enabled) 15138c2ecf20Sopenharmony_ci r |= kvm_tdp_mmu_zap_hva_range(kvm, start, end); 15148c2ecf20Sopenharmony_ci 15158c2ecf20Sopenharmony_ci return r; 15168c2ecf20Sopenharmony_ci} 15178c2ecf20Sopenharmony_ci 15188c2ecf20Sopenharmony_ciint kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) 15198c2ecf20Sopenharmony_ci{ 15208c2ecf20Sopenharmony_ci int r; 15218c2ecf20Sopenharmony_ci 15228c2ecf20Sopenharmony_ci r = kvm_handle_hva(kvm, hva, (unsigned long)&pte, kvm_set_pte_rmapp); 15238c2ecf20Sopenharmony_ci 15248c2ecf20Sopenharmony_ci if (kvm->arch.tdp_mmu_enabled) 15258c2ecf20Sopenharmony_ci r |= kvm_tdp_mmu_set_spte_hva(kvm, hva, &pte); 15268c2ecf20Sopenharmony_ci 15278c2ecf20Sopenharmony_ci return r; 15288c2ecf20Sopenharmony_ci} 15298c2ecf20Sopenharmony_ci 15308c2ecf20Sopenharmony_cistatic int kvm_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, 15318c2ecf20Sopenharmony_ci struct kvm_memory_slot *slot, gfn_t gfn, int level, 15328c2ecf20Sopenharmony_ci unsigned long data) 15338c2ecf20Sopenharmony_ci{ 15348c2ecf20Sopenharmony_ci u64 *sptep; 15358c2ecf20Sopenharmony_ci struct rmap_iterator iter; 15368c2ecf20Sopenharmony_ci int young = 0; 15378c2ecf20Sopenharmony_ci 15388c2ecf20Sopenharmony_ci for_each_rmap_spte(rmap_head, &iter, sptep) 15398c2ecf20Sopenharmony_ci young |= mmu_spte_age(sptep); 15408c2ecf20Sopenharmony_ci 15418c2ecf20Sopenharmony_ci trace_kvm_age_page(gfn, level, slot, young); 15428c2ecf20Sopenharmony_ci return young; 15438c2ecf20Sopenharmony_ci} 15448c2ecf20Sopenharmony_ci 15458c2ecf20Sopenharmony_cistatic int kvm_test_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, 15468c2ecf20Sopenharmony_ci struct kvm_memory_slot *slot, gfn_t gfn, 15478c2ecf20Sopenharmony_ci int level, unsigned long data) 15488c2ecf20Sopenharmony_ci{ 15498c2ecf20Sopenharmony_ci u64 *sptep; 15508c2ecf20Sopenharmony_ci struct rmap_iterator iter; 15518c2ecf20Sopenharmony_ci 15528c2ecf20Sopenharmony_ci for_each_rmap_spte(rmap_head, &iter, sptep) 15538c2ecf20Sopenharmony_ci if (is_accessed_spte(*sptep)) 15548c2ecf20Sopenharmony_ci return 1; 15558c2ecf20Sopenharmony_ci return 0; 15568c2ecf20Sopenharmony_ci} 15578c2ecf20Sopenharmony_ci 15588c2ecf20Sopenharmony_ci#define RMAP_RECYCLE_THRESHOLD 1000 15598c2ecf20Sopenharmony_ci 15608c2ecf20Sopenharmony_cistatic void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) 15618c2ecf20Sopenharmony_ci{ 15628c2ecf20Sopenharmony_ci struct kvm_rmap_head *rmap_head; 15638c2ecf20Sopenharmony_ci struct kvm_mmu_page *sp; 15648c2ecf20Sopenharmony_ci 15658c2ecf20Sopenharmony_ci sp = sptep_to_sp(spte); 15668c2ecf20Sopenharmony_ci 15678c2ecf20Sopenharmony_ci rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp); 15688c2ecf20Sopenharmony_ci 15698c2ecf20Sopenharmony_ci kvm_unmap_rmapp(vcpu->kvm, rmap_head, NULL, gfn, sp->role.level, 0); 15708c2ecf20Sopenharmony_ci kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn, 15718c2ecf20Sopenharmony_ci KVM_PAGES_PER_HPAGE(sp->role.level)); 15728c2ecf20Sopenharmony_ci} 15738c2ecf20Sopenharmony_ci 15748c2ecf20Sopenharmony_ciint kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end) 15758c2ecf20Sopenharmony_ci{ 15768c2ecf20Sopenharmony_ci int young = false; 15778c2ecf20Sopenharmony_ci 15788c2ecf20Sopenharmony_ci young = kvm_handle_hva_range(kvm, start, end, 0, kvm_age_rmapp); 15798c2ecf20Sopenharmony_ci if (kvm->arch.tdp_mmu_enabled) 15808c2ecf20Sopenharmony_ci young |= kvm_tdp_mmu_age_hva_range(kvm, start, end); 15818c2ecf20Sopenharmony_ci 15828c2ecf20Sopenharmony_ci return young; 15838c2ecf20Sopenharmony_ci} 15848c2ecf20Sopenharmony_ci 15858c2ecf20Sopenharmony_ciint kvm_test_age_hva(struct kvm *kvm, unsigned long hva) 15868c2ecf20Sopenharmony_ci{ 15878c2ecf20Sopenharmony_ci int young = false; 15888c2ecf20Sopenharmony_ci 15898c2ecf20Sopenharmony_ci young = kvm_handle_hva(kvm, hva, 0, kvm_test_age_rmapp); 15908c2ecf20Sopenharmony_ci if (kvm->arch.tdp_mmu_enabled) 15918c2ecf20Sopenharmony_ci young |= kvm_tdp_mmu_test_age_hva(kvm, hva); 15928c2ecf20Sopenharmony_ci 15938c2ecf20Sopenharmony_ci return young; 15948c2ecf20Sopenharmony_ci} 15958c2ecf20Sopenharmony_ci 15968c2ecf20Sopenharmony_ci#ifdef MMU_DEBUG 15978c2ecf20Sopenharmony_cistatic int is_empty_shadow_page(u64 *spt) 15988c2ecf20Sopenharmony_ci{ 15998c2ecf20Sopenharmony_ci u64 *pos; 16008c2ecf20Sopenharmony_ci u64 *end; 16018c2ecf20Sopenharmony_ci 16028c2ecf20Sopenharmony_ci for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++) 16038c2ecf20Sopenharmony_ci if (is_shadow_present_pte(*pos)) { 16048c2ecf20Sopenharmony_ci printk(KERN_ERR "%s: %p %llx\n", __func__, 16058c2ecf20Sopenharmony_ci pos, *pos); 16068c2ecf20Sopenharmony_ci return 0; 16078c2ecf20Sopenharmony_ci } 16088c2ecf20Sopenharmony_ci return 1; 16098c2ecf20Sopenharmony_ci} 16108c2ecf20Sopenharmony_ci#endif 16118c2ecf20Sopenharmony_ci 16128c2ecf20Sopenharmony_ci/* 16138c2ecf20Sopenharmony_ci * This value is the sum of all of the kvm instances's 16148c2ecf20Sopenharmony_ci * kvm->arch.n_used_mmu_pages values. We need a global, 16158c2ecf20Sopenharmony_ci * aggregate version in order to make the slab shrinker 16168c2ecf20Sopenharmony_ci * faster 16178c2ecf20Sopenharmony_ci */ 16188c2ecf20Sopenharmony_cistatic inline void kvm_mod_used_mmu_pages(struct kvm *kvm, long nr) 16198c2ecf20Sopenharmony_ci{ 16208c2ecf20Sopenharmony_ci kvm->arch.n_used_mmu_pages += nr; 16218c2ecf20Sopenharmony_ci percpu_counter_add(&kvm_total_used_mmu_pages, nr); 16228c2ecf20Sopenharmony_ci} 16238c2ecf20Sopenharmony_ci 16248c2ecf20Sopenharmony_cistatic void kvm_mmu_free_page(struct kvm_mmu_page *sp) 16258c2ecf20Sopenharmony_ci{ 16268c2ecf20Sopenharmony_ci MMU_WARN_ON(!is_empty_shadow_page(sp->spt)); 16278c2ecf20Sopenharmony_ci hlist_del(&sp->hash_link); 16288c2ecf20Sopenharmony_ci list_del(&sp->link); 16298c2ecf20Sopenharmony_ci free_page((unsigned long)sp->spt); 16308c2ecf20Sopenharmony_ci if (!sp->role.direct) 16318c2ecf20Sopenharmony_ci free_page((unsigned long)sp->gfns); 16328c2ecf20Sopenharmony_ci kmem_cache_free(mmu_page_header_cache, sp); 16338c2ecf20Sopenharmony_ci} 16348c2ecf20Sopenharmony_ci 16358c2ecf20Sopenharmony_cistatic unsigned kvm_page_table_hashfn(gfn_t gfn) 16368c2ecf20Sopenharmony_ci{ 16378c2ecf20Sopenharmony_ci return hash_64(gfn, KVM_MMU_HASH_SHIFT); 16388c2ecf20Sopenharmony_ci} 16398c2ecf20Sopenharmony_ci 16408c2ecf20Sopenharmony_cistatic void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu, 16418c2ecf20Sopenharmony_ci struct kvm_mmu_page *sp, u64 *parent_pte) 16428c2ecf20Sopenharmony_ci{ 16438c2ecf20Sopenharmony_ci if (!parent_pte) 16448c2ecf20Sopenharmony_ci return; 16458c2ecf20Sopenharmony_ci 16468c2ecf20Sopenharmony_ci pte_list_add(vcpu, parent_pte, &sp->parent_ptes); 16478c2ecf20Sopenharmony_ci} 16488c2ecf20Sopenharmony_ci 16498c2ecf20Sopenharmony_cistatic void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp, 16508c2ecf20Sopenharmony_ci u64 *parent_pte) 16518c2ecf20Sopenharmony_ci{ 16528c2ecf20Sopenharmony_ci __pte_list_remove(parent_pte, &sp->parent_ptes); 16538c2ecf20Sopenharmony_ci} 16548c2ecf20Sopenharmony_ci 16558c2ecf20Sopenharmony_cistatic void drop_parent_pte(struct kvm_mmu_page *sp, 16568c2ecf20Sopenharmony_ci u64 *parent_pte) 16578c2ecf20Sopenharmony_ci{ 16588c2ecf20Sopenharmony_ci mmu_page_remove_parent_pte(sp, parent_pte); 16598c2ecf20Sopenharmony_ci mmu_spte_clear_no_track(parent_pte); 16608c2ecf20Sopenharmony_ci} 16618c2ecf20Sopenharmony_ci 16628c2ecf20Sopenharmony_cistatic struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, int direct) 16638c2ecf20Sopenharmony_ci{ 16648c2ecf20Sopenharmony_ci struct kvm_mmu_page *sp; 16658c2ecf20Sopenharmony_ci 16668c2ecf20Sopenharmony_ci sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache); 16678c2ecf20Sopenharmony_ci sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache); 16688c2ecf20Sopenharmony_ci if (!direct) 16698c2ecf20Sopenharmony_ci sp->gfns = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_gfn_array_cache); 16708c2ecf20Sopenharmony_ci set_page_private(virt_to_page(sp->spt), (unsigned long)sp); 16718c2ecf20Sopenharmony_ci 16728c2ecf20Sopenharmony_ci /* 16738c2ecf20Sopenharmony_ci * active_mmu_pages must be a FIFO list, as kvm_zap_obsolete_pages() 16748c2ecf20Sopenharmony_ci * depends on valid pages being added to the head of the list. See 16758c2ecf20Sopenharmony_ci * comments in kvm_zap_obsolete_pages(). 16768c2ecf20Sopenharmony_ci */ 16778c2ecf20Sopenharmony_ci sp->mmu_valid_gen = vcpu->kvm->arch.mmu_valid_gen; 16788c2ecf20Sopenharmony_ci list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages); 16798c2ecf20Sopenharmony_ci kvm_mod_used_mmu_pages(vcpu->kvm, +1); 16808c2ecf20Sopenharmony_ci return sp; 16818c2ecf20Sopenharmony_ci} 16828c2ecf20Sopenharmony_ci 16838c2ecf20Sopenharmony_cistatic void mark_unsync(u64 *spte); 16848c2ecf20Sopenharmony_cistatic void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page *sp) 16858c2ecf20Sopenharmony_ci{ 16868c2ecf20Sopenharmony_ci u64 *sptep; 16878c2ecf20Sopenharmony_ci struct rmap_iterator iter; 16888c2ecf20Sopenharmony_ci 16898c2ecf20Sopenharmony_ci for_each_rmap_spte(&sp->parent_ptes, &iter, sptep) { 16908c2ecf20Sopenharmony_ci mark_unsync(sptep); 16918c2ecf20Sopenharmony_ci } 16928c2ecf20Sopenharmony_ci} 16938c2ecf20Sopenharmony_ci 16948c2ecf20Sopenharmony_cistatic void mark_unsync(u64 *spte) 16958c2ecf20Sopenharmony_ci{ 16968c2ecf20Sopenharmony_ci struct kvm_mmu_page *sp; 16978c2ecf20Sopenharmony_ci unsigned int index; 16988c2ecf20Sopenharmony_ci 16998c2ecf20Sopenharmony_ci sp = sptep_to_sp(spte); 17008c2ecf20Sopenharmony_ci index = spte - sp->spt; 17018c2ecf20Sopenharmony_ci if (__test_and_set_bit(index, sp->unsync_child_bitmap)) 17028c2ecf20Sopenharmony_ci return; 17038c2ecf20Sopenharmony_ci if (sp->unsync_children++) 17048c2ecf20Sopenharmony_ci return; 17058c2ecf20Sopenharmony_ci kvm_mmu_mark_parents_unsync(sp); 17068c2ecf20Sopenharmony_ci} 17078c2ecf20Sopenharmony_ci 17088c2ecf20Sopenharmony_cistatic int nonpaging_sync_page(struct kvm_vcpu *vcpu, 17098c2ecf20Sopenharmony_ci struct kvm_mmu_page *sp) 17108c2ecf20Sopenharmony_ci{ 17118c2ecf20Sopenharmony_ci return 0; 17128c2ecf20Sopenharmony_ci} 17138c2ecf20Sopenharmony_ci 17148c2ecf20Sopenharmony_ci#define KVM_PAGE_ARRAY_NR 16 17158c2ecf20Sopenharmony_ci 17168c2ecf20Sopenharmony_cistruct kvm_mmu_pages { 17178c2ecf20Sopenharmony_ci struct mmu_page_and_offset { 17188c2ecf20Sopenharmony_ci struct kvm_mmu_page *sp; 17198c2ecf20Sopenharmony_ci unsigned int idx; 17208c2ecf20Sopenharmony_ci } page[KVM_PAGE_ARRAY_NR]; 17218c2ecf20Sopenharmony_ci unsigned int nr; 17228c2ecf20Sopenharmony_ci}; 17238c2ecf20Sopenharmony_ci 17248c2ecf20Sopenharmony_cistatic int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp, 17258c2ecf20Sopenharmony_ci int idx) 17268c2ecf20Sopenharmony_ci{ 17278c2ecf20Sopenharmony_ci int i; 17288c2ecf20Sopenharmony_ci 17298c2ecf20Sopenharmony_ci if (sp->unsync) 17308c2ecf20Sopenharmony_ci for (i=0; i < pvec->nr; i++) 17318c2ecf20Sopenharmony_ci if (pvec->page[i].sp == sp) 17328c2ecf20Sopenharmony_ci return 0; 17338c2ecf20Sopenharmony_ci 17348c2ecf20Sopenharmony_ci pvec->page[pvec->nr].sp = sp; 17358c2ecf20Sopenharmony_ci pvec->page[pvec->nr].idx = idx; 17368c2ecf20Sopenharmony_ci pvec->nr++; 17378c2ecf20Sopenharmony_ci return (pvec->nr == KVM_PAGE_ARRAY_NR); 17388c2ecf20Sopenharmony_ci} 17398c2ecf20Sopenharmony_ci 17408c2ecf20Sopenharmony_cistatic inline void clear_unsync_child_bit(struct kvm_mmu_page *sp, int idx) 17418c2ecf20Sopenharmony_ci{ 17428c2ecf20Sopenharmony_ci --sp->unsync_children; 17438c2ecf20Sopenharmony_ci WARN_ON((int)sp->unsync_children < 0); 17448c2ecf20Sopenharmony_ci __clear_bit(idx, sp->unsync_child_bitmap); 17458c2ecf20Sopenharmony_ci} 17468c2ecf20Sopenharmony_ci 17478c2ecf20Sopenharmony_cistatic int __mmu_unsync_walk(struct kvm_mmu_page *sp, 17488c2ecf20Sopenharmony_ci struct kvm_mmu_pages *pvec) 17498c2ecf20Sopenharmony_ci{ 17508c2ecf20Sopenharmony_ci int i, ret, nr_unsync_leaf = 0; 17518c2ecf20Sopenharmony_ci 17528c2ecf20Sopenharmony_ci for_each_set_bit(i, sp->unsync_child_bitmap, 512) { 17538c2ecf20Sopenharmony_ci struct kvm_mmu_page *child; 17548c2ecf20Sopenharmony_ci u64 ent = sp->spt[i]; 17558c2ecf20Sopenharmony_ci 17568c2ecf20Sopenharmony_ci if (!is_shadow_present_pte(ent) || is_large_pte(ent)) { 17578c2ecf20Sopenharmony_ci clear_unsync_child_bit(sp, i); 17588c2ecf20Sopenharmony_ci continue; 17598c2ecf20Sopenharmony_ci } 17608c2ecf20Sopenharmony_ci 17618c2ecf20Sopenharmony_ci child = to_shadow_page(ent & PT64_BASE_ADDR_MASK); 17628c2ecf20Sopenharmony_ci 17638c2ecf20Sopenharmony_ci if (child->unsync_children) { 17648c2ecf20Sopenharmony_ci if (mmu_pages_add(pvec, child, i)) 17658c2ecf20Sopenharmony_ci return -ENOSPC; 17668c2ecf20Sopenharmony_ci 17678c2ecf20Sopenharmony_ci ret = __mmu_unsync_walk(child, pvec); 17688c2ecf20Sopenharmony_ci if (!ret) { 17698c2ecf20Sopenharmony_ci clear_unsync_child_bit(sp, i); 17708c2ecf20Sopenharmony_ci continue; 17718c2ecf20Sopenharmony_ci } else if (ret > 0) { 17728c2ecf20Sopenharmony_ci nr_unsync_leaf += ret; 17738c2ecf20Sopenharmony_ci } else 17748c2ecf20Sopenharmony_ci return ret; 17758c2ecf20Sopenharmony_ci } else if (child->unsync) { 17768c2ecf20Sopenharmony_ci nr_unsync_leaf++; 17778c2ecf20Sopenharmony_ci if (mmu_pages_add(pvec, child, i)) 17788c2ecf20Sopenharmony_ci return -ENOSPC; 17798c2ecf20Sopenharmony_ci } else 17808c2ecf20Sopenharmony_ci clear_unsync_child_bit(sp, i); 17818c2ecf20Sopenharmony_ci } 17828c2ecf20Sopenharmony_ci 17838c2ecf20Sopenharmony_ci return nr_unsync_leaf; 17848c2ecf20Sopenharmony_ci} 17858c2ecf20Sopenharmony_ci 17868c2ecf20Sopenharmony_ci#define INVALID_INDEX (-1) 17878c2ecf20Sopenharmony_ci 17888c2ecf20Sopenharmony_cistatic int mmu_unsync_walk(struct kvm_mmu_page *sp, 17898c2ecf20Sopenharmony_ci struct kvm_mmu_pages *pvec) 17908c2ecf20Sopenharmony_ci{ 17918c2ecf20Sopenharmony_ci pvec->nr = 0; 17928c2ecf20Sopenharmony_ci if (!sp->unsync_children) 17938c2ecf20Sopenharmony_ci return 0; 17948c2ecf20Sopenharmony_ci 17958c2ecf20Sopenharmony_ci mmu_pages_add(pvec, sp, INVALID_INDEX); 17968c2ecf20Sopenharmony_ci return __mmu_unsync_walk(sp, pvec); 17978c2ecf20Sopenharmony_ci} 17988c2ecf20Sopenharmony_ci 17998c2ecf20Sopenharmony_cistatic void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp) 18008c2ecf20Sopenharmony_ci{ 18018c2ecf20Sopenharmony_ci WARN_ON(!sp->unsync); 18028c2ecf20Sopenharmony_ci trace_kvm_mmu_sync_page(sp); 18038c2ecf20Sopenharmony_ci sp->unsync = 0; 18048c2ecf20Sopenharmony_ci --kvm->stat.mmu_unsync; 18058c2ecf20Sopenharmony_ci} 18068c2ecf20Sopenharmony_ci 18078c2ecf20Sopenharmony_cistatic bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp, 18088c2ecf20Sopenharmony_ci struct list_head *invalid_list); 18098c2ecf20Sopenharmony_cistatic void kvm_mmu_commit_zap_page(struct kvm *kvm, 18108c2ecf20Sopenharmony_ci struct list_head *invalid_list); 18118c2ecf20Sopenharmony_ci 18128c2ecf20Sopenharmony_ci#define for_each_valid_sp(_kvm, _sp, _list) \ 18138c2ecf20Sopenharmony_ci hlist_for_each_entry(_sp, _list, hash_link) \ 18148c2ecf20Sopenharmony_ci if (is_obsolete_sp((_kvm), (_sp))) { \ 18158c2ecf20Sopenharmony_ci } else 18168c2ecf20Sopenharmony_ci 18178c2ecf20Sopenharmony_ci#define for_each_gfn_indirect_valid_sp(_kvm, _sp, _gfn) \ 18188c2ecf20Sopenharmony_ci for_each_valid_sp(_kvm, _sp, \ 18198c2ecf20Sopenharmony_ci &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)]) \ 18208c2ecf20Sopenharmony_ci if ((_sp)->gfn != (_gfn) || (_sp)->role.direct) {} else 18218c2ecf20Sopenharmony_ci 18228c2ecf20Sopenharmony_cistatic inline bool is_ept_sp(struct kvm_mmu_page *sp) 18238c2ecf20Sopenharmony_ci{ 18248c2ecf20Sopenharmony_ci return sp->role.cr0_wp && sp->role.smap_andnot_wp; 18258c2ecf20Sopenharmony_ci} 18268c2ecf20Sopenharmony_ci 18278c2ecf20Sopenharmony_ci/* @sp->gfn should be write-protected at the call site */ 18288c2ecf20Sopenharmony_cistatic bool __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, 18298c2ecf20Sopenharmony_ci struct list_head *invalid_list) 18308c2ecf20Sopenharmony_ci{ 18318c2ecf20Sopenharmony_ci if ((!is_ept_sp(sp) && sp->role.gpte_is_8_bytes != !!is_pae(vcpu)) || 18328c2ecf20Sopenharmony_ci vcpu->arch.mmu->sync_page(vcpu, sp) == 0) { 18338c2ecf20Sopenharmony_ci kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list); 18348c2ecf20Sopenharmony_ci return false; 18358c2ecf20Sopenharmony_ci } 18368c2ecf20Sopenharmony_ci 18378c2ecf20Sopenharmony_ci return true; 18388c2ecf20Sopenharmony_ci} 18398c2ecf20Sopenharmony_ci 18408c2ecf20Sopenharmony_cistatic bool kvm_mmu_remote_flush_or_zap(struct kvm *kvm, 18418c2ecf20Sopenharmony_ci struct list_head *invalid_list, 18428c2ecf20Sopenharmony_ci bool remote_flush) 18438c2ecf20Sopenharmony_ci{ 18448c2ecf20Sopenharmony_ci if (!remote_flush && list_empty(invalid_list)) 18458c2ecf20Sopenharmony_ci return false; 18468c2ecf20Sopenharmony_ci 18478c2ecf20Sopenharmony_ci if (!list_empty(invalid_list)) 18488c2ecf20Sopenharmony_ci kvm_mmu_commit_zap_page(kvm, invalid_list); 18498c2ecf20Sopenharmony_ci else 18508c2ecf20Sopenharmony_ci kvm_flush_remote_tlbs(kvm); 18518c2ecf20Sopenharmony_ci return true; 18528c2ecf20Sopenharmony_ci} 18538c2ecf20Sopenharmony_ci 18548c2ecf20Sopenharmony_cistatic void kvm_mmu_flush_or_zap(struct kvm_vcpu *vcpu, 18558c2ecf20Sopenharmony_ci struct list_head *invalid_list, 18568c2ecf20Sopenharmony_ci bool remote_flush, bool local_flush) 18578c2ecf20Sopenharmony_ci{ 18588c2ecf20Sopenharmony_ci if (kvm_mmu_remote_flush_or_zap(vcpu->kvm, invalid_list, remote_flush)) 18598c2ecf20Sopenharmony_ci return; 18608c2ecf20Sopenharmony_ci 18618c2ecf20Sopenharmony_ci if (local_flush) 18628c2ecf20Sopenharmony_ci kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); 18638c2ecf20Sopenharmony_ci} 18648c2ecf20Sopenharmony_ci 18658c2ecf20Sopenharmony_ci#ifdef CONFIG_KVM_MMU_AUDIT 18668c2ecf20Sopenharmony_ci#include "mmu_audit.c" 18678c2ecf20Sopenharmony_ci#else 18688c2ecf20Sopenharmony_cistatic void kvm_mmu_audit(struct kvm_vcpu *vcpu, int point) { } 18698c2ecf20Sopenharmony_cistatic void mmu_audit_disable(void) { } 18708c2ecf20Sopenharmony_ci#endif 18718c2ecf20Sopenharmony_ci 18728c2ecf20Sopenharmony_cistatic bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp) 18738c2ecf20Sopenharmony_ci{ 18748c2ecf20Sopenharmony_ci return sp->role.invalid || 18758c2ecf20Sopenharmony_ci unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen); 18768c2ecf20Sopenharmony_ci} 18778c2ecf20Sopenharmony_ci 18788c2ecf20Sopenharmony_cistatic bool kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, 18798c2ecf20Sopenharmony_ci struct list_head *invalid_list) 18808c2ecf20Sopenharmony_ci{ 18818c2ecf20Sopenharmony_ci kvm_unlink_unsync_page(vcpu->kvm, sp); 18828c2ecf20Sopenharmony_ci return __kvm_sync_page(vcpu, sp, invalid_list); 18838c2ecf20Sopenharmony_ci} 18848c2ecf20Sopenharmony_ci 18858c2ecf20Sopenharmony_ci/* @gfn should be write-protected at the call site */ 18868c2ecf20Sopenharmony_cistatic bool kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, 18878c2ecf20Sopenharmony_ci struct list_head *invalid_list) 18888c2ecf20Sopenharmony_ci{ 18898c2ecf20Sopenharmony_ci struct kvm_mmu_page *s; 18908c2ecf20Sopenharmony_ci bool ret = false; 18918c2ecf20Sopenharmony_ci 18928c2ecf20Sopenharmony_ci for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) { 18938c2ecf20Sopenharmony_ci if (!s->unsync) 18948c2ecf20Sopenharmony_ci continue; 18958c2ecf20Sopenharmony_ci 18968c2ecf20Sopenharmony_ci WARN_ON(s->role.level != PG_LEVEL_4K); 18978c2ecf20Sopenharmony_ci ret |= kvm_sync_page(vcpu, s, invalid_list); 18988c2ecf20Sopenharmony_ci } 18998c2ecf20Sopenharmony_ci 19008c2ecf20Sopenharmony_ci return ret; 19018c2ecf20Sopenharmony_ci} 19028c2ecf20Sopenharmony_ci 19038c2ecf20Sopenharmony_cistruct mmu_page_path { 19048c2ecf20Sopenharmony_ci struct kvm_mmu_page *parent[PT64_ROOT_MAX_LEVEL]; 19058c2ecf20Sopenharmony_ci unsigned int idx[PT64_ROOT_MAX_LEVEL]; 19068c2ecf20Sopenharmony_ci}; 19078c2ecf20Sopenharmony_ci 19088c2ecf20Sopenharmony_ci#define for_each_sp(pvec, sp, parents, i) \ 19098c2ecf20Sopenharmony_ci for (i = mmu_pages_first(&pvec, &parents); \ 19108c2ecf20Sopenharmony_ci i < pvec.nr && ({ sp = pvec.page[i].sp; 1;}); \ 19118c2ecf20Sopenharmony_ci i = mmu_pages_next(&pvec, &parents, i)) 19128c2ecf20Sopenharmony_ci 19138c2ecf20Sopenharmony_cistatic int mmu_pages_next(struct kvm_mmu_pages *pvec, 19148c2ecf20Sopenharmony_ci struct mmu_page_path *parents, 19158c2ecf20Sopenharmony_ci int i) 19168c2ecf20Sopenharmony_ci{ 19178c2ecf20Sopenharmony_ci int n; 19188c2ecf20Sopenharmony_ci 19198c2ecf20Sopenharmony_ci for (n = i+1; n < pvec->nr; n++) { 19208c2ecf20Sopenharmony_ci struct kvm_mmu_page *sp = pvec->page[n].sp; 19218c2ecf20Sopenharmony_ci unsigned idx = pvec->page[n].idx; 19228c2ecf20Sopenharmony_ci int level = sp->role.level; 19238c2ecf20Sopenharmony_ci 19248c2ecf20Sopenharmony_ci parents->idx[level-1] = idx; 19258c2ecf20Sopenharmony_ci if (level == PG_LEVEL_4K) 19268c2ecf20Sopenharmony_ci break; 19278c2ecf20Sopenharmony_ci 19288c2ecf20Sopenharmony_ci parents->parent[level-2] = sp; 19298c2ecf20Sopenharmony_ci } 19308c2ecf20Sopenharmony_ci 19318c2ecf20Sopenharmony_ci return n; 19328c2ecf20Sopenharmony_ci} 19338c2ecf20Sopenharmony_ci 19348c2ecf20Sopenharmony_cistatic int mmu_pages_first(struct kvm_mmu_pages *pvec, 19358c2ecf20Sopenharmony_ci struct mmu_page_path *parents) 19368c2ecf20Sopenharmony_ci{ 19378c2ecf20Sopenharmony_ci struct kvm_mmu_page *sp; 19388c2ecf20Sopenharmony_ci int level; 19398c2ecf20Sopenharmony_ci 19408c2ecf20Sopenharmony_ci if (pvec->nr == 0) 19418c2ecf20Sopenharmony_ci return 0; 19428c2ecf20Sopenharmony_ci 19438c2ecf20Sopenharmony_ci WARN_ON(pvec->page[0].idx != INVALID_INDEX); 19448c2ecf20Sopenharmony_ci 19458c2ecf20Sopenharmony_ci sp = pvec->page[0].sp; 19468c2ecf20Sopenharmony_ci level = sp->role.level; 19478c2ecf20Sopenharmony_ci WARN_ON(level == PG_LEVEL_4K); 19488c2ecf20Sopenharmony_ci 19498c2ecf20Sopenharmony_ci parents->parent[level-2] = sp; 19508c2ecf20Sopenharmony_ci 19518c2ecf20Sopenharmony_ci /* Also set up a sentinel. Further entries in pvec are all 19528c2ecf20Sopenharmony_ci * children of sp, so this element is never overwritten. 19538c2ecf20Sopenharmony_ci */ 19548c2ecf20Sopenharmony_ci parents->parent[level-1] = NULL; 19558c2ecf20Sopenharmony_ci return mmu_pages_next(pvec, parents, 0); 19568c2ecf20Sopenharmony_ci} 19578c2ecf20Sopenharmony_ci 19588c2ecf20Sopenharmony_cistatic void mmu_pages_clear_parents(struct mmu_page_path *parents) 19598c2ecf20Sopenharmony_ci{ 19608c2ecf20Sopenharmony_ci struct kvm_mmu_page *sp; 19618c2ecf20Sopenharmony_ci unsigned int level = 0; 19628c2ecf20Sopenharmony_ci 19638c2ecf20Sopenharmony_ci do { 19648c2ecf20Sopenharmony_ci unsigned int idx = parents->idx[level]; 19658c2ecf20Sopenharmony_ci sp = parents->parent[level]; 19668c2ecf20Sopenharmony_ci if (!sp) 19678c2ecf20Sopenharmony_ci return; 19688c2ecf20Sopenharmony_ci 19698c2ecf20Sopenharmony_ci WARN_ON(idx == INVALID_INDEX); 19708c2ecf20Sopenharmony_ci clear_unsync_child_bit(sp, idx); 19718c2ecf20Sopenharmony_ci level++; 19728c2ecf20Sopenharmony_ci } while (!sp->unsync_children); 19738c2ecf20Sopenharmony_ci} 19748c2ecf20Sopenharmony_ci 19758c2ecf20Sopenharmony_cistatic void mmu_sync_children(struct kvm_vcpu *vcpu, 19768c2ecf20Sopenharmony_ci struct kvm_mmu_page *parent) 19778c2ecf20Sopenharmony_ci{ 19788c2ecf20Sopenharmony_ci int i; 19798c2ecf20Sopenharmony_ci struct kvm_mmu_page *sp; 19808c2ecf20Sopenharmony_ci struct mmu_page_path parents; 19818c2ecf20Sopenharmony_ci struct kvm_mmu_pages pages; 19828c2ecf20Sopenharmony_ci LIST_HEAD(invalid_list); 19838c2ecf20Sopenharmony_ci bool flush = false; 19848c2ecf20Sopenharmony_ci 19858c2ecf20Sopenharmony_ci while (mmu_unsync_walk(parent, &pages)) { 19868c2ecf20Sopenharmony_ci bool protected = false; 19878c2ecf20Sopenharmony_ci 19888c2ecf20Sopenharmony_ci for_each_sp(pages, sp, parents, i) 19898c2ecf20Sopenharmony_ci protected |= rmap_write_protect(vcpu, sp->gfn); 19908c2ecf20Sopenharmony_ci 19918c2ecf20Sopenharmony_ci if (protected) { 19928c2ecf20Sopenharmony_ci kvm_flush_remote_tlbs(vcpu->kvm); 19938c2ecf20Sopenharmony_ci flush = false; 19948c2ecf20Sopenharmony_ci } 19958c2ecf20Sopenharmony_ci 19968c2ecf20Sopenharmony_ci for_each_sp(pages, sp, parents, i) { 19978c2ecf20Sopenharmony_ci flush |= kvm_sync_page(vcpu, sp, &invalid_list); 19988c2ecf20Sopenharmony_ci mmu_pages_clear_parents(&parents); 19998c2ecf20Sopenharmony_ci } 20008c2ecf20Sopenharmony_ci if (need_resched() || spin_needbreak(&vcpu->kvm->mmu_lock)) { 20018c2ecf20Sopenharmony_ci kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush); 20028c2ecf20Sopenharmony_ci cond_resched_lock(&vcpu->kvm->mmu_lock); 20038c2ecf20Sopenharmony_ci flush = false; 20048c2ecf20Sopenharmony_ci } 20058c2ecf20Sopenharmony_ci } 20068c2ecf20Sopenharmony_ci 20078c2ecf20Sopenharmony_ci kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush); 20088c2ecf20Sopenharmony_ci} 20098c2ecf20Sopenharmony_ci 20108c2ecf20Sopenharmony_cistatic void __clear_sp_write_flooding_count(struct kvm_mmu_page *sp) 20118c2ecf20Sopenharmony_ci{ 20128c2ecf20Sopenharmony_ci atomic_set(&sp->write_flooding_count, 0); 20138c2ecf20Sopenharmony_ci} 20148c2ecf20Sopenharmony_ci 20158c2ecf20Sopenharmony_cistatic void clear_sp_write_flooding_count(u64 *spte) 20168c2ecf20Sopenharmony_ci{ 20178c2ecf20Sopenharmony_ci __clear_sp_write_flooding_count(sptep_to_sp(spte)); 20188c2ecf20Sopenharmony_ci} 20198c2ecf20Sopenharmony_ci 20208c2ecf20Sopenharmony_cistatic struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, 20218c2ecf20Sopenharmony_ci gfn_t gfn, 20228c2ecf20Sopenharmony_ci gva_t gaddr, 20238c2ecf20Sopenharmony_ci unsigned level, 20248c2ecf20Sopenharmony_ci int direct, 20258c2ecf20Sopenharmony_ci unsigned int access) 20268c2ecf20Sopenharmony_ci{ 20278c2ecf20Sopenharmony_ci bool direct_mmu = vcpu->arch.mmu->direct_map; 20288c2ecf20Sopenharmony_ci union kvm_mmu_page_role role; 20298c2ecf20Sopenharmony_ci struct hlist_head *sp_list; 20308c2ecf20Sopenharmony_ci unsigned quadrant; 20318c2ecf20Sopenharmony_ci struct kvm_mmu_page *sp; 20328c2ecf20Sopenharmony_ci bool need_sync = false; 20338c2ecf20Sopenharmony_ci bool flush = false; 20348c2ecf20Sopenharmony_ci int collisions = 0; 20358c2ecf20Sopenharmony_ci LIST_HEAD(invalid_list); 20368c2ecf20Sopenharmony_ci 20378c2ecf20Sopenharmony_ci role = vcpu->arch.mmu->mmu_role.base; 20388c2ecf20Sopenharmony_ci role.level = level; 20398c2ecf20Sopenharmony_ci role.direct = direct; 20408c2ecf20Sopenharmony_ci if (role.direct) 20418c2ecf20Sopenharmony_ci role.gpte_is_8_bytes = true; 20428c2ecf20Sopenharmony_ci role.access = access; 20438c2ecf20Sopenharmony_ci if (!direct_mmu && vcpu->arch.mmu->root_level <= PT32_ROOT_LEVEL) { 20448c2ecf20Sopenharmony_ci quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level)); 20458c2ecf20Sopenharmony_ci quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1; 20468c2ecf20Sopenharmony_ci role.quadrant = quadrant; 20478c2ecf20Sopenharmony_ci } 20488c2ecf20Sopenharmony_ci 20498c2ecf20Sopenharmony_ci sp_list = &vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)]; 20508c2ecf20Sopenharmony_ci for_each_valid_sp(vcpu->kvm, sp, sp_list) { 20518c2ecf20Sopenharmony_ci if (sp->gfn != gfn) { 20528c2ecf20Sopenharmony_ci collisions++; 20538c2ecf20Sopenharmony_ci continue; 20548c2ecf20Sopenharmony_ci } 20558c2ecf20Sopenharmony_ci 20568c2ecf20Sopenharmony_ci if (!need_sync && sp->unsync) 20578c2ecf20Sopenharmony_ci need_sync = true; 20588c2ecf20Sopenharmony_ci 20598c2ecf20Sopenharmony_ci if (sp->role.word != role.word) 20608c2ecf20Sopenharmony_ci continue; 20618c2ecf20Sopenharmony_ci 20628c2ecf20Sopenharmony_ci if (direct_mmu) 20638c2ecf20Sopenharmony_ci goto trace_get_page; 20648c2ecf20Sopenharmony_ci 20658c2ecf20Sopenharmony_ci if (sp->unsync) { 20668c2ecf20Sopenharmony_ci /* The page is good, but __kvm_sync_page might still end 20678c2ecf20Sopenharmony_ci * up zapping it. If so, break in order to rebuild it. 20688c2ecf20Sopenharmony_ci */ 20698c2ecf20Sopenharmony_ci if (!__kvm_sync_page(vcpu, sp, &invalid_list)) 20708c2ecf20Sopenharmony_ci break; 20718c2ecf20Sopenharmony_ci 20728c2ecf20Sopenharmony_ci WARN_ON(!list_empty(&invalid_list)); 20738c2ecf20Sopenharmony_ci kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); 20748c2ecf20Sopenharmony_ci } 20758c2ecf20Sopenharmony_ci 20768c2ecf20Sopenharmony_ci if (sp->unsync_children) 20778c2ecf20Sopenharmony_ci kvm_make_request(KVM_REQ_MMU_SYNC, vcpu); 20788c2ecf20Sopenharmony_ci 20798c2ecf20Sopenharmony_ci __clear_sp_write_flooding_count(sp); 20808c2ecf20Sopenharmony_ci 20818c2ecf20Sopenharmony_citrace_get_page: 20828c2ecf20Sopenharmony_ci trace_kvm_mmu_get_page(sp, false); 20838c2ecf20Sopenharmony_ci goto out; 20848c2ecf20Sopenharmony_ci } 20858c2ecf20Sopenharmony_ci 20868c2ecf20Sopenharmony_ci ++vcpu->kvm->stat.mmu_cache_miss; 20878c2ecf20Sopenharmony_ci 20888c2ecf20Sopenharmony_ci sp = kvm_mmu_alloc_page(vcpu, direct); 20898c2ecf20Sopenharmony_ci 20908c2ecf20Sopenharmony_ci sp->gfn = gfn; 20918c2ecf20Sopenharmony_ci sp->role = role; 20928c2ecf20Sopenharmony_ci hlist_add_head(&sp->hash_link, sp_list); 20938c2ecf20Sopenharmony_ci if (!direct) { 20948c2ecf20Sopenharmony_ci /* 20958c2ecf20Sopenharmony_ci * we should do write protection before syncing pages 20968c2ecf20Sopenharmony_ci * otherwise the content of the synced shadow page may 20978c2ecf20Sopenharmony_ci * be inconsistent with guest page table. 20988c2ecf20Sopenharmony_ci */ 20998c2ecf20Sopenharmony_ci account_shadowed(vcpu->kvm, sp); 21008c2ecf20Sopenharmony_ci if (level == PG_LEVEL_4K && rmap_write_protect(vcpu, gfn)) 21018c2ecf20Sopenharmony_ci kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn, 1); 21028c2ecf20Sopenharmony_ci 21038c2ecf20Sopenharmony_ci if (level > PG_LEVEL_4K && need_sync) 21048c2ecf20Sopenharmony_ci flush |= kvm_sync_pages(vcpu, gfn, &invalid_list); 21058c2ecf20Sopenharmony_ci } 21068c2ecf20Sopenharmony_ci trace_kvm_mmu_get_page(sp, true); 21078c2ecf20Sopenharmony_ci 21088c2ecf20Sopenharmony_ci kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush); 21098c2ecf20Sopenharmony_ciout: 21108c2ecf20Sopenharmony_ci if (collisions > vcpu->kvm->stat.max_mmu_page_hash_collisions) 21118c2ecf20Sopenharmony_ci vcpu->kvm->stat.max_mmu_page_hash_collisions = collisions; 21128c2ecf20Sopenharmony_ci return sp; 21138c2ecf20Sopenharmony_ci} 21148c2ecf20Sopenharmony_ci 21158c2ecf20Sopenharmony_cistatic void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterator, 21168c2ecf20Sopenharmony_ci struct kvm_vcpu *vcpu, hpa_t root, 21178c2ecf20Sopenharmony_ci u64 addr) 21188c2ecf20Sopenharmony_ci{ 21198c2ecf20Sopenharmony_ci iterator->addr = addr; 21208c2ecf20Sopenharmony_ci iterator->shadow_addr = root; 21218c2ecf20Sopenharmony_ci iterator->level = vcpu->arch.mmu->shadow_root_level; 21228c2ecf20Sopenharmony_ci 21238c2ecf20Sopenharmony_ci if (iterator->level == PT64_ROOT_4LEVEL && 21248c2ecf20Sopenharmony_ci vcpu->arch.mmu->root_level < PT64_ROOT_4LEVEL && 21258c2ecf20Sopenharmony_ci !vcpu->arch.mmu->direct_map) 21268c2ecf20Sopenharmony_ci --iterator->level; 21278c2ecf20Sopenharmony_ci 21288c2ecf20Sopenharmony_ci if (iterator->level == PT32E_ROOT_LEVEL) { 21298c2ecf20Sopenharmony_ci /* 21308c2ecf20Sopenharmony_ci * prev_root is currently only used for 64-bit hosts. So only 21318c2ecf20Sopenharmony_ci * the active root_hpa is valid here. 21328c2ecf20Sopenharmony_ci */ 21338c2ecf20Sopenharmony_ci BUG_ON(root != vcpu->arch.mmu->root_hpa); 21348c2ecf20Sopenharmony_ci 21358c2ecf20Sopenharmony_ci iterator->shadow_addr 21368c2ecf20Sopenharmony_ci = vcpu->arch.mmu->pae_root[(addr >> 30) & 3]; 21378c2ecf20Sopenharmony_ci iterator->shadow_addr &= PT64_BASE_ADDR_MASK; 21388c2ecf20Sopenharmony_ci --iterator->level; 21398c2ecf20Sopenharmony_ci if (!iterator->shadow_addr) 21408c2ecf20Sopenharmony_ci iterator->level = 0; 21418c2ecf20Sopenharmony_ci } 21428c2ecf20Sopenharmony_ci} 21438c2ecf20Sopenharmony_ci 21448c2ecf20Sopenharmony_cistatic void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator, 21458c2ecf20Sopenharmony_ci struct kvm_vcpu *vcpu, u64 addr) 21468c2ecf20Sopenharmony_ci{ 21478c2ecf20Sopenharmony_ci shadow_walk_init_using_root(iterator, vcpu, vcpu->arch.mmu->root_hpa, 21488c2ecf20Sopenharmony_ci addr); 21498c2ecf20Sopenharmony_ci} 21508c2ecf20Sopenharmony_ci 21518c2ecf20Sopenharmony_cistatic bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator) 21528c2ecf20Sopenharmony_ci{ 21538c2ecf20Sopenharmony_ci if (iterator->level < PG_LEVEL_4K) 21548c2ecf20Sopenharmony_ci return false; 21558c2ecf20Sopenharmony_ci 21568c2ecf20Sopenharmony_ci iterator->index = SHADOW_PT_INDEX(iterator->addr, iterator->level); 21578c2ecf20Sopenharmony_ci iterator->sptep = ((u64 *)__va(iterator->shadow_addr)) + iterator->index; 21588c2ecf20Sopenharmony_ci return true; 21598c2ecf20Sopenharmony_ci} 21608c2ecf20Sopenharmony_ci 21618c2ecf20Sopenharmony_cistatic void __shadow_walk_next(struct kvm_shadow_walk_iterator *iterator, 21628c2ecf20Sopenharmony_ci u64 spte) 21638c2ecf20Sopenharmony_ci{ 21648c2ecf20Sopenharmony_ci if (is_last_spte(spte, iterator->level)) { 21658c2ecf20Sopenharmony_ci iterator->level = 0; 21668c2ecf20Sopenharmony_ci return; 21678c2ecf20Sopenharmony_ci } 21688c2ecf20Sopenharmony_ci 21698c2ecf20Sopenharmony_ci iterator->shadow_addr = spte & PT64_BASE_ADDR_MASK; 21708c2ecf20Sopenharmony_ci --iterator->level; 21718c2ecf20Sopenharmony_ci} 21728c2ecf20Sopenharmony_ci 21738c2ecf20Sopenharmony_cistatic void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator) 21748c2ecf20Sopenharmony_ci{ 21758c2ecf20Sopenharmony_ci __shadow_walk_next(iterator, *iterator->sptep); 21768c2ecf20Sopenharmony_ci} 21778c2ecf20Sopenharmony_ci 21788c2ecf20Sopenharmony_cistatic void link_shadow_page(struct kvm_vcpu *vcpu, u64 *sptep, 21798c2ecf20Sopenharmony_ci struct kvm_mmu_page *sp) 21808c2ecf20Sopenharmony_ci{ 21818c2ecf20Sopenharmony_ci u64 spte; 21828c2ecf20Sopenharmony_ci 21838c2ecf20Sopenharmony_ci BUILD_BUG_ON(VMX_EPT_WRITABLE_MASK != PT_WRITABLE_MASK); 21848c2ecf20Sopenharmony_ci 21858c2ecf20Sopenharmony_ci spte = make_nonleaf_spte(sp->spt, sp_ad_disabled(sp)); 21868c2ecf20Sopenharmony_ci 21878c2ecf20Sopenharmony_ci mmu_spte_set(sptep, spte); 21888c2ecf20Sopenharmony_ci 21898c2ecf20Sopenharmony_ci mmu_page_add_parent_pte(vcpu, sp, sptep); 21908c2ecf20Sopenharmony_ci 21918c2ecf20Sopenharmony_ci if (sp->unsync_children || sp->unsync) 21928c2ecf20Sopenharmony_ci mark_unsync(sptep); 21938c2ecf20Sopenharmony_ci} 21948c2ecf20Sopenharmony_ci 21958c2ecf20Sopenharmony_cistatic void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep, 21968c2ecf20Sopenharmony_ci unsigned direct_access) 21978c2ecf20Sopenharmony_ci{ 21988c2ecf20Sopenharmony_ci if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)) { 21998c2ecf20Sopenharmony_ci struct kvm_mmu_page *child; 22008c2ecf20Sopenharmony_ci 22018c2ecf20Sopenharmony_ci /* 22028c2ecf20Sopenharmony_ci * For the direct sp, if the guest pte's dirty bit 22038c2ecf20Sopenharmony_ci * changed form clean to dirty, it will corrupt the 22048c2ecf20Sopenharmony_ci * sp's access: allow writable in the read-only sp, 22058c2ecf20Sopenharmony_ci * so we should update the spte at this point to get 22068c2ecf20Sopenharmony_ci * a new sp with the correct access. 22078c2ecf20Sopenharmony_ci */ 22088c2ecf20Sopenharmony_ci child = to_shadow_page(*sptep & PT64_BASE_ADDR_MASK); 22098c2ecf20Sopenharmony_ci if (child->role.access == direct_access) 22108c2ecf20Sopenharmony_ci return; 22118c2ecf20Sopenharmony_ci 22128c2ecf20Sopenharmony_ci drop_parent_pte(child, sptep); 22138c2ecf20Sopenharmony_ci kvm_flush_remote_tlbs_with_address(vcpu->kvm, child->gfn, 1); 22148c2ecf20Sopenharmony_ci } 22158c2ecf20Sopenharmony_ci} 22168c2ecf20Sopenharmony_ci 22178c2ecf20Sopenharmony_ci/* Returns the number of zapped non-leaf child shadow pages. */ 22188c2ecf20Sopenharmony_cistatic int mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp, 22198c2ecf20Sopenharmony_ci u64 *spte, struct list_head *invalid_list) 22208c2ecf20Sopenharmony_ci{ 22218c2ecf20Sopenharmony_ci u64 pte; 22228c2ecf20Sopenharmony_ci struct kvm_mmu_page *child; 22238c2ecf20Sopenharmony_ci 22248c2ecf20Sopenharmony_ci pte = *spte; 22258c2ecf20Sopenharmony_ci if (is_shadow_present_pte(pte)) { 22268c2ecf20Sopenharmony_ci if (is_last_spte(pte, sp->role.level)) { 22278c2ecf20Sopenharmony_ci drop_spte(kvm, spte); 22288c2ecf20Sopenharmony_ci if (is_large_pte(pte)) 22298c2ecf20Sopenharmony_ci --kvm->stat.lpages; 22308c2ecf20Sopenharmony_ci } else { 22318c2ecf20Sopenharmony_ci child = to_shadow_page(pte & PT64_BASE_ADDR_MASK); 22328c2ecf20Sopenharmony_ci drop_parent_pte(child, spte); 22338c2ecf20Sopenharmony_ci 22348c2ecf20Sopenharmony_ci /* 22358c2ecf20Sopenharmony_ci * Recursively zap nested TDP SPs, parentless SPs are 22368c2ecf20Sopenharmony_ci * unlikely to be used again in the near future. This 22378c2ecf20Sopenharmony_ci * avoids retaining a large number of stale nested SPs. 22388c2ecf20Sopenharmony_ci */ 22398c2ecf20Sopenharmony_ci if (tdp_enabled && invalid_list && 22408c2ecf20Sopenharmony_ci child->role.guest_mode && !child->parent_ptes.val) 22418c2ecf20Sopenharmony_ci return kvm_mmu_prepare_zap_page(kvm, child, 22428c2ecf20Sopenharmony_ci invalid_list); 22438c2ecf20Sopenharmony_ci } 22448c2ecf20Sopenharmony_ci } else if (is_mmio_spte(pte)) { 22458c2ecf20Sopenharmony_ci mmu_spte_clear_no_track(spte); 22468c2ecf20Sopenharmony_ci } 22478c2ecf20Sopenharmony_ci return 0; 22488c2ecf20Sopenharmony_ci} 22498c2ecf20Sopenharmony_ci 22508c2ecf20Sopenharmony_cistatic int kvm_mmu_page_unlink_children(struct kvm *kvm, 22518c2ecf20Sopenharmony_ci struct kvm_mmu_page *sp, 22528c2ecf20Sopenharmony_ci struct list_head *invalid_list) 22538c2ecf20Sopenharmony_ci{ 22548c2ecf20Sopenharmony_ci int zapped = 0; 22558c2ecf20Sopenharmony_ci unsigned i; 22568c2ecf20Sopenharmony_ci 22578c2ecf20Sopenharmony_ci for (i = 0; i < PT64_ENT_PER_PAGE; ++i) 22588c2ecf20Sopenharmony_ci zapped += mmu_page_zap_pte(kvm, sp, sp->spt + i, invalid_list); 22598c2ecf20Sopenharmony_ci 22608c2ecf20Sopenharmony_ci return zapped; 22618c2ecf20Sopenharmony_ci} 22628c2ecf20Sopenharmony_ci 22638c2ecf20Sopenharmony_cistatic void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp) 22648c2ecf20Sopenharmony_ci{ 22658c2ecf20Sopenharmony_ci u64 *sptep; 22668c2ecf20Sopenharmony_ci struct rmap_iterator iter; 22678c2ecf20Sopenharmony_ci 22688c2ecf20Sopenharmony_ci while ((sptep = rmap_get_first(&sp->parent_ptes, &iter))) 22698c2ecf20Sopenharmony_ci drop_parent_pte(sp, sptep); 22708c2ecf20Sopenharmony_ci} 22718c2ecf20Sopenharmony_ci 22728c2ecf20Sopenharmony_cistatic int mmu_zap_unsync_children(struct kvm *kvm, 22738c2ecf20Sopenharmony_ci struct kvm_mmu_page *parent, 22748c2ecf20Sopenharmony_ci struct list_head *invalid_list) 22758c2ecf20Sopenharmony_ci{ 22768c2ecf20Sopenharmony_ci int i, zapped = 0; 22778c2ecf20Sopenharmony_ci struct mmu_page_path parents; 22788c2ecf20Sopenharmony_ci struct kvm_mmu_pages pages; 22798c2ecf20Sopenharmony_ci 22808c2ecf20Sopenharmony_ci if (parent->role.level == PG_LEVEL_4K) 22818c2ecf20Sopenharmony_ci return 0; 22828c2ecf20Sopenharmony_ci 22838c2ecf20Sopenharmony_ci while (mmu_unsync_walk(parent, &pages)) { 22848c2ecf20Sopenharmony_ci struct kvm_mmu_page *sp; 22858c2ecf20Sopenharmony_ci 22868c2ecf20Sopenharmony_ci for_each_sp(pages, sp, parents, i) { 22878c2ecf20Sopenharmony_ci kvm_mmu_prepare_zap_page(kvm, sp, invalid_list); 22888c2ecf20Sopenharmony_ci mmu_pages_clear_parents(&parents); 22898c2ecf20Sopenharmony_ci zapped++; 22908c2ecf20Sopenharmony_ci } 22918c2ecf20Sopenharmony_ci } 22928c2ecf20Sopenharmony_ci 22938c2ecf20Sopenharmony_ci return zapped; 22948c2ecf20Sopenharmony_ci} 22958c2ecf20Sopenharmony_ci 22968c2ecf20Sopenharmony_cistatic bool __kvm_mmu_prepare_zap_page(struct kvm *kvm, 22978c2ecf20Sopenharmony_ci struct kvm_mmu_page *sp, 22988c2ecf20Sopenharmony_ci struct list_head *invalid_list, 22998c2ecf20Sopenharmony_ci int *nr_zapped) 23008c2ecf20Sopenharmony_ci{ 23018c2ecf20Sopenharmony_ci bool list_unstable; 23028c2ecf20Sopenharmony_ci 23038c2ecf20Sopenharmony_ci trace_kvm_mmu_prepare_zap_page(sp); 23048c2ecf20Sopenharmony_ci ++kvm->stat.mmu_shadow_zapped; 23058c2ecf20Sopenharmony_ci *nr_zapped = mmu_zap_unsync_children(kvm, sp, invalid_list); 23068c2ecf20Sopenharmony_ci *nr_zapped += kvm_mmu_page_unlink_children(kvm, sp, invalid_list); 23078c2ecf20Sopenharmony_ci kvm_mmu_unlink_parents(kvm, sp); 23088c2ecf20Sopenharmony_ci 23098c2ecf20Sopenharmony_ci /* Zapping children means active_mmu_pages has become unstable. */ 23108c2ecf20Sopenharmony_ci list_unstable = *nr_zapped; 23118c2ecf20Sopenharmony_ci 23128c2ecf20Sopenharmony_ci if (!sp->role.invalid && !sp->role.direct) 23138c2ecf20Sopenharmony_ci unaccount_shadowed(kvm, sp); 23148c2ecf20Sopenharmony_ci 23158c2ecf20Sopenharmony_ci if (sp->unsync) 23168c2ecf20Sopenharmony_ci kvm_unlink_unsync_page(kvm, sp); 23178c2ecf20Sopenharmony_ci if (!sp->root_count) { 23188c2ecf20Sopenharmony_ci /* Count self */ 23198c2ecf20Sopenharmony_ci (*nr_zapped)++; 23208c2ecf20Sopenharmony_ci 23218c2ecf20Sopenharmony_ci /* 23228c2ecf20Sopenharmony_ci * Already invalid pages (previously active roots) are not on 23238c2ecf20Sopenharmony_ci * the active page list. See list_del() in the "else" case of 23248c2ecf20Sopenharmony_ci * !sp->root_count. 23258c2ecf20Sopenharmony_ci */ 23268c2ecf20Sopenharmony_ci if (sp->role.invalid) 23278c2ecf20Sopenharmony_ci list_add(&sp->link, invalid_list); 23288c2ecf20Sopenharmony_ci else 23298c2ecf20Sopenharmony_ci list_move(&sp->link, invalid_list); 23308c2ecf20Sopenharmony_ci kvm_mod_used_mmu_pages(kvm, -1); 23318c2ecf20Sopenharmony_ci } else { 23328c2ecf20Sopenharmony_ci /* 23338c2ecf20Sopenharmony_ci * Remove the active root from the active page list, the root 23348c2ecf20Sopenharmony_ci * will be explicitly freed when the root_count hits zero. 23358c2ecf20Sopenharmony_ci */ 23368c2ecf20Sopenharmony_ci list_del(&sp->link); 23378c2ecf20Sopenharmony_ci 23388c2ecf20Sopenharmony_ci /* 23398c2ecf20Sopenharmony_ci * Obsolete pages cannot be used on any vCPUs, see the comment 23408c2ecf20Sopenharmony_ci * in kvm_mmu_zap_all_fast(). Note, is_obsolete_sp() also 23418c2ecf20Sopenharmony_ci * treats invalid shadow pages as being obsolete. 23428c2ecf20Sopenharmony_ci */ 23438c2ecf20Sopenharmony_ci if (!is_obsolete_sp(kvm, sp)) 23448c2ecf20Sopenharmony_ci kvm_reload_remote_mmus(kvm); 23458c2ecf20Sopenharmony_ci } 23468c2ecf20Sopenharmony_ci 23478c2ecf20Sopenharmony_ci if (sp->lpage_disallowed) 23488c2ecf20Sopenharmony_ci unaccount_huge_nx_page(kvm, sp); 23498c2ecf20Sopenharmony_ci 23508c2ecf20Sopenharmony_ci sp->role.invalid = 1; 23518c2ecf20Sopenharmony_ci return list_unstable; 23528c2ecf20Sopenharmony_ci} 23538c2ecf20Sopenharmony_ci 23548c2ecf20Sopenharmony_cistatic bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp, 23558c2ecf20Sopenharmony_ci struct list_head *invalid_list) 23568c2ecf20Sopenharmony_ci{ 23578c2ecf20Sopenharmony_ci int nr_zapped; 23588c2ecf20Sopenharmony_ci 23598c2ecf20Sopenharmony_ci __kvm_mmu_prepare_zap_page(kvm, sp, invalid_list, &nr_zapped); 23608c2ecf20Sopenharmony_ci return nr_zapped; 23618c2ecf20Sopenharmony_ci} 23628c2ecf20Sopenharmony_ci 23638c2ecf20Sopenharmony_cistatic void kvm_mmu_commit_zap_page(struct kvm *kvm, 23648c2ecf20Sopenharmony_ci struct list_head *invalid_list) 23658c2ecf20Sopenharmony_ci{ 23668c2ecf20Sopenharmony_ci struct kvm_mmu_page *sp, *nsp; 23678c2ecf20Sopenharmony_ci 23688c2ecf20Sopenharmony_ci if (list_empty(invalid_list)) 23698c2ecf20Sopenharmony_ci return; 23708c2ecf20Sopenharmony_ci 23718c2ecf20Sopenharmony_ci /* 23728c2ecf20Sopenharmony_ci * We need to make sure everyone sees our modifications to 23738c2ecf20Sopenharmony_ci * the page tables and see changes to vcpu->mode here. The barrier 23748c2ecf20Sopenharmony_ci * in the kvm_flush_remote_tlbs() achieves this. This pairs 23758c2ecf20Sopenharmony_ci * with vcpu_enter_guest and walk_shadow_page_lockless_begin/end. 23768c2ecf20Sopenharmony_ci * 23778c2ecf20Sopenharmony_ci * In addition, kvm_flush_remote_tlbs waits for all vcpus to exit 23788c2ecf20Sopenharmony_ci * guest mode and/or lockless shadow page table walks. 23798c2ecf20Sopenharmony_ci */ 23808c2ecf20Sopenharmony_ci kvm_flush_remote_tlbs(kvm); 23818c2ecf20Sopenharmony_ci 23828c2ecf20Sopenharmony_ci list_for_each_entry_safe(sp, nsp, invalid_list, link) { 23838c2ecf20Sopenharmony_ci WARN_ON(!sp->role.invalid || sp->root_count); 23848c2ecf20Sopenharmony_ci kvm_mmu_free_page(sp); 23858c2ecf20Sopenharmony_ci } 23868c2ecf20Sopenharmony_ci} 23878c2ecf20Sopenharmony_ci 23888c2ecf20Sopenharmony_cistatic unsigned long kvm_mmu_zap_oldest_mmu_pages(struct kvm *kvm, 23898c2ecf20Sopenharmony_ci unsigned long nr_to_zap) 23908c2ecf20Sopenharmony_ci{ 23918c2ecf20Sopenharmony_ci unsigned long total_zapped = 0; 23928c2ecf20Sopenharmony_ci struct kvm_mmu_page *sp, *tmp; 23938c2ecf20Sopenharmony_ci LIST_HEAD(invalid_list); 23948c2ecf20Sopenharmony_ci bool unstable; 23958c2ecf20Sopenharmony_ci int nr_zapped; 23968c2ecf20Sopenharmony_ci 23978c2ecf20Sopenharmony_ci if (list_empty(&kvm->arch.active_mmu_pages)) 23988c2ecf20Sopenharmony_ci return 0; 23998c2ecf20Sopenharmony_ci 24008c2ecf20Sopenharmony_cirestart: 24018c2ecf20Sopenharmony_ci list_for_each_entry_safe_reverse(sp, tmp, &kvm->arch.active_mmu_pages, link) { 24028c2ecf20Sopenharmony_ci /* 24038c2ecf20Sopenharmony_ci * Don't zap active root pages, the page itself can't be freed 24048c2ecf20Sopenharmony_ci * and zapping it will just force vCPUs to realloc and reload. 24058c2ecf20Sopenharmony_ci */ 24068c2ecf20Sopenharmony_ci if (sp->root_count) 24078c2ecf20Sopenharmony_ci continue; 24088c2ecf20Sopenharmony_ci 24098c2ecf20Sopenharmony_ci unstable = __kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, 24108c2ecf20Sopenharmony_ci &nr_zapped); 24118c2ecf20Sopenharmony_ci total_zapped += nr_zapped; 24128c2ecf20Sopenharmony_ci if (total_zapped >= nr_to_zap) 24138c2ecf20Sopenharmony_ci break; 24148c2ecf20Sopenharmony_ci 24158c2ecf20Sopenharmony_ci if (unstable) 24168c2ecf20Sopenharmony_ci goto restart; 24178c2ecf20Sopenharmony_ci } 24188c2ecf20Sopenharmony_ci 24198c2ecf20Sopenharmony_ci kvm_mmu_commit_zap_page(kvm, &invalid_list); 24208c2ecf20Sopenharmony_ci 24218c2ecf20Sopenharmony_ci kvm->stat.mmu_recycled += total_zapped; 24228c2ecf20Sopenharmony_ci return total_zapped; 24238c2ecf20Sopenharmony_ci} 24248c2ecf20Sopenharmony_ci 24258c2ecf20Sopenharmony_cistatic inline unsigned long kvm_mmu_available_pages(struct kvm *kvm) 24268c2ecf20Sopenharmony_ci{ 24278c2ecf20Sopenharmony_ci if (kvm->arch.n_max_mmu_pages > kvm->arch.n_used_mmu_pages) 24288c2ecf20Sopenharmony_ci return kvm->arch.n_max_mmu_pages - 24298c2ecf20Sopenharmony_ci kvm->arch.n_used_mmu_pages; 24308c2ecf20Sopenharmony_ci 24318c2ecf20Sopenharmony_ci return 0; 24328c2ecf20Sopenharmony_ci} 24338c2ecf20Sopenharmony_ci 24348c2ecf20Sopenharmony_cistatic int make_mmu_pages_available(struct kvm_vcpu *vcpu) 24358c2ecf20Sopenharmony_ci{ 24368c2ecf20Sopenharmony_ci unsigned long avail = kvm_mmu_available_pages(vcpu->kvm); 24378c2ecf20Sopenharmony_ci 24388c2ecf20Sopenharmony_ci if (likely(avail >= KVM_MIN_FREE_MMU_PAGES)) 24398c2ecf20Sopenharmony_ci return 0; 24408c2ecf20Sopenharmony_ci 24418c2ecf20Sopenharmony_ci kvm_mmu_zap_oldest_mmu_pages(vcpu->kvm, KVM_REFILL_PAGES - avail); 24428c2ecf20Sopenharmony_ci 24438c2ecf20Sopenharmony_ci if (!kvm_mmu_available_pages(vcpu->kvm)) 24448c2ecf20Sopenharmony_ci return -ENOSPC; 24458c2ecf20Sopenharmony_ci return 0; 24468c2ecf20Sopenharmony_ci} 24478c2ecf20Sopenharmony_ci 24488c2ecf20Sopenharmony_ci/* 24498c2ecf20Sopenharmony_ci * Changing the number of mmu pages allocated to the vm 24508c2ecf20Sopenharmony_ci * Note: if goal_nr_mmu_pages is too small, you will get dead lock 24518c2ecf20Sopenharmony_ci */ 24528c2ecf20Sopenharmony_civoid kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long goal_nr_mmu_pages) 24538c2ecf20Sopenharmony_ci{ 24548c2ecf20Sopenharmony_ci spin_lock(&kvm->mmu_lock); 24558c2ecf20Sopenharmony_ci 24568c2ecf20Sopenharmony_ci if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) { 24578c2ecf20Sopenharmony_ci kvm_mmu_zap_oldest_mmu_pages(kvm, kvm->arch.n_used_mmu_pages - 24588c2ecf20Sopenharmony_ci goal_nr_mmu_pages); 24598c2ecf20Sopenharmony_ci 24608c2ecf20Sopenharmony_ci goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages; 24618c2ecf20Sopenharmony_ci } 24628c2ecf20Sopenharmony_ci 24638c2ecf20Sopenharmony_ci kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages; 24648c2ecf20Sopenharmony_ci 24658c2ecf20Sopenharmony_ci spin_unlock(&kvm->mmu_lock); 24668c2ecf20Sopenharmony_ci} 24678c2ecf20Sopenharmony_ci 24688c2ecf20Sopenharmony_ciint kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) 24698c2ecf20Sopenharmony_ci{ 24708c2ecf20Sopenharmony_ci struct kvm_mmu_page *sp; 24718c2ecf20Sopenharmony_ci LIST_HEAD(invalid_list); 24728c2ecf20Sopenharmony_ci int r; 24738c2ecf20Sopenharmony_ci 24748c2ecf20Sopenharmony_ci pgprintk("%s: looking for gfn %llx\n", __func__, gfn); 24758c2ecf20Sopenharmony_ci r = 0; 24768c2ecf20Sopenharmony_ci spin_lock(&kvm->mmu_lock); 24778c2ecf20Sopenharmony_ci for_each_gfn_indirect_valid_sp(kvm, sp, gfn) { 24788c2ecf20Sopenharmony_ci pgprintk("%s: gfn %llx role %x\n", __func__, gfn, 24798c2ecf20Sopenharmony_ci sp->role.word); 24808c2ecf20Sopenharmony_ci r = 1; 24818c2ecf20Sopenharmony_ci kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list); 24828c2ecf20Sopenharmony_ci } 24838c2ecf20Sopenharmony_ci kvm_mmu_commit_zap_page(kvm, &invalid_list); 24848c2ecf20Sopenharmony_ci spin_unlock(&kvm->mmu_lock); 24858c2ecf20Sopenharmony_ci 24868c2ecf20Sopenharmony_ci return r; 24878c2ecf20Sopenharmony_ci} 24888c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page); 24898c2ecf20Sopenharmony_ci 24908c2ecf20Sopenharmony_cistatic void kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) 24918c2ecf20Sopenharmony_ci{ 24928c2ecf20Sopenharmony_ci trace_kvm_mmu_unsync_page(sp); 24938c2ecf20Sopenharmony_ci ++vcpu->kvm->stat.mmu_unsync; 24948c2ecf20Sopenharmony_ci sp->unsync = 1; 24958c2ecf20Sopenharmony_ci 24968c2ecf20Sopenharmony_ci kvm_mmu_mark_parents_unsync(sp); 24978c2ecf20Sopenharmony_ci} 24988c2ecf20Sopenharmony_ci 24998c2ecf20Sopenharmony_cibool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn, 25008c2ecf20Sopenharmony_ci bool can_unsync) 25018c2ecf20Sopenharmony_ci{ 25028c2ecf20Sopenharmony_ci struct kvm_mmu_page *sp; 25038c2ecf20Sopenharmony_ci 25048c2ecf20Sopenharmony_ci if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE)) 25058c2ecf20Sopenharmony_ci return true; 25068c2ecf20Sopenharmony_ci 25078c2ecf20Sopenharmony_ci for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) { 25088c2ecf20Sopenharmony_ci if (!can_unsync) 25098c2ecf20Sopenharmony_ci return true; 25108c2ecf20Sopenharmony_ci 25118c2ecf20Sopenharmony_ci if (sp->unsync) 25128c2ecf20Sopenharmony_ci continue; 25138c2ecf20Sopenharmony_ci 25148c2ecf20Sopenharmony_ci WARN_ON(sp->role.level != PG_LEVEL_4K); 25158c2ecf20Sopenharmony_ci kvm_unsync_page(vcpu, sp); 25168c2ecf20Sopenharmony_ci } 25178c2ecf20Sopenharmony_ci 25188c2ecf20Sopenharmony_ci /* 25198c2ecf20Sopenharmony_ci * We need to ensure that the marking of unsync pages is visible 25208c2ecf20Sopenharmony_ci * before the SPTE is updated to allow writes because 25218c2ecf20Sopenharmony_ci * kvm_mmu_sync_roots() checks the unsync flags without holding 25228c2ecf20Sopenharmony_ci * the MMU lock and so can race with this. If the SPTE was updated 25238c2ecf20Sopenharmony_ci * before the page had been marked as unsync-ed, something like the 25248c2ecf20Sopenharmony_ci * following could happen: 25258c2ecf20Sopenharmony_ci * 25268c2ecf20Sopenharmony_ci * CPU 1 CPU 2 25278c2ecf20Sopenharmony_ci * --------------------------------------------------------------------- 25288c2ecf20Sopenharmony_ci * 1.2 Host updates SPTE 25298c2ecf20Sopenharmony_ci * to be writable 25308c2ecf20Sopenharmony_ci * 2.1 Guest writes a GPTE for GVA X. 25318c2ecf20Sopenharmony_ci * (GPTE being in the guest page table shadowed 25328c2ecf20Sopenharmony_ci * by the SP from CPU 1.) 25338c2ecf20Sopenharmony_ci * This reads SPTE during the page table walk. 25348c2ecf20Sopenharmony_ci * Since SPTE.W is read as 1, there is no 25358c2ecf20Sopenharmony_ci * fault. 25368c2ecf20Sopenharmony_ci * 25378c2ecf20Sopenharmony_ci * 2.2 Guest issues TLB flush. 25388c2ecf20Sopenharmony_ci * That causes a VM Exit. 25398c2ecf20Sopenharmony_ci * 25408c2ecf20Sopenharmony_ci * 2.3 kvm_mmu_sync_pages() reads sp->unsync. 25418c2ecf20Sopenharmony_ci * Since it is false, so it just returns. 25428c2ecf20Sopenharmony_ci * 25438c2ecf20Sopenharmony_ci * 2.4 Guest accesses GVA X. 25448c2ecf20Sopenharmony_ci * Since the mapping in the SP was not updated, 25458c2ecf20Sopenharmony_ci * so the old mapping for GVA X incorrectly 25468c2ecf20Sopenharmony_ci * gets used. 25478c2ecf20Sopenharmony_ci * 1.1 Host marks SP 25488c2ecf20Sopenharmony_ci * as unsync 25498c2ecf20Sopenharmony_ci * (sp->unsync = true) 25508c2ecf20Sopenharmony_ci * 25518c2ecf20Sopenharmony_ci * The write barrier below ensures that 1.1 happens before 1.2 and thus 25528c2ecf20Sopenharmony_ci * the situation in 2.4 does not arise. The implicit barrier in 2.2 25538c2ecf20Sopenharmony_ci * pairs with this write barrier. 25548c2ecf20Sopenharmony_ci */ 25558c2ecf20Sopenharmony_ci smp_wmb(); 25568c2ecf20Sopenharmony_ci 25578c2ecf20Sopenharmony_ci return false; 25588c2ecf20Sopenharmony_ci} 25598c2ecf20Sopenharmony_ci 25608c2ecf20Sopenharmony_cistatic int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, 25618c2ecf20Sopenharmony_ci unsigned int pte_access, int level, 25628c2ecf20Sopenharmony_ci gfn_t gfn, kvm_pfn_t pfn, bool speculative, 25638c2ecf20Sopenharmony_ci bool can_unsync, bool host_writable) 25648c2ecf20Sopenharmony_ci{ 25658c2ecf20Sopenharmony_ci u64 spte; 25668c2ecf20Sopenharmony_ci struct kvm_mmu_page *sp; 25678c2ecf20Sopenharmony_ci int ret; 25688c2ecf20Sopenharmony_ci 25698c2ecf20Sopenharmony_ci if (set_mmio_spte(vcpu, sptep, gfn, pfn, pte_access)) 25708c2ecf20Sopenharmony_ci return 0; 25718c2ecf20Sopenharmony_ci 25728c2ecf20Sopenharmony_ci sp = sptep_to_sp(sptep); 25738c2ecf20Sopenharmony_ci 25748c2ecf20Sopenharmony_ci ret = make_spte(vcpu, pte_access, level, gfn, pfn, *sptep, speculative, 25758c2ecf20Sopenharmony_ci can_unsync, host_writable, sp_ad_disabled(sp), &spte); 25768c2ecf20Sopenharmony_ci 25778c2ecf20Sopenharmony_ci if (spte & PT_WRITABLE_MASK) 25788c2ecf20Sopenharmony_ci kvm_vcpu_mark_page_dirty(vcpu, gfn); 25798c2ecf20Sopenharmony_ci 25808c2ecf20Sopenharmony_ci if (*sptep == spte) 25818c2ecf20Sopenharmony_ci ret |= SET_SPTE_SPURIOUS; 25828c2ecf20Sopenharmony_ci else if (mmu_spte_update(sptep, spte)) 25838c2ecf20Sopenharmony_ci ret |= SET_SPTE_NEED_REMOTE_TLB_FLUSH; 25848c2ecf20Sopenharmony_ci return ret; 25858c2ecf20Sopenharmony_ci} 25868c2ecf20Sopenharmony_ci 25878c2ecf20Sopenharmony_cistatic int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, 25888c2ecf20Sopenharmony_ci unsigned int pte_access, bool write_fault, int level, 25898c2ecf20Sopenharmony_ci gfn_t gfn, kvm_pfn_t pfn, bool speculative, 25908c2ecf20Sopenharmony_ci bool host_writable) 25918c2ecf20Sopenharmony_ci{ 25928c2ecf20Sopenharmony_ci int was_rmapped = 0; 25938c2ecf20Sopenharmony_ci int rmap_count; 25948c2ecf20Sopenharmony_ci int set_spte_ret; 25958c2ecf20Sopenharmony_ci int ret = RET_PF_FIXED; 25968c2ecf20Sopenharmony_ci bool flush = false; 25978c2ecf20Sopenharmony_ci 25988c2ecf20Sopenharmony_ci pgprintk("%s: spte %llx write_fault %d gfn %llx\n", __func__, 25998c2ecf20Sopenharmony_ci *sptep, write_fault, gfn); 26008c2ecf20Sopenharmony_ci 26018c2ecf20Sopenharmony_ci if (is_shadow_present_pte(*sptep)) { 26028c2ecf20Sopenharmony_ci /* 26038c2ecf20Sopenharmony_ci * If we overwrite a PTE page pointer with a 2MB PMD, unlink 26048c2ecf20Sopenharmony_ci * the parent of the now unreachable PTE. 26058c2ecf20Sopenharmony_ci */ 26068c2ecf20Sopenharmony_ci if (level > PG_LEVEL_4K && !is_large_pte(*sptep)) { 26078c2ecf20Sopenharmony_ci struct kvm_mmu_page *child; 26088c2ecf20Sopenharmony_ci u64 pte = *sptep; 26098c2ecf20Sopenharmony_ci 26108c2ecf20Sopenharmony_ci child = to_shadow_page(pte & PT64_BASE_ADDR_MASK); 26118c2ecf20Sopenharmony_ci drop_parent_pte(child, sptep); 26128c2ecf20Sopenharmony_ci flush = true; 26138c2ecf20Sopenharmony_ci } else if (pfn != spte_to_pfn(*sptep)) { 26148c2ecf20Sopenharmony_ci pgprintk("hfn old %llx new %llx\n", 26158c2ecf20Sopenharmony_ci spte_to_pfn(*sptep), pfn); 26168c2ecf20Sopenharmony_ci drop_spte(vcpu->kvm, sptep); 26178c2ecf20Sopenharmony_ci flush = true; 26188c2ecf20Sopenharmony_ci } else 26198c2ecf20Sopenharmony_ci was_rmapped = 1; 26208c2ecf20Sopenharmony_ci } 26218c2ecf20Sopenharmony_ci 26228c2ecf20Sopenharmony_ci set_spte_ret = set_spte(vcpu, sptep, pte_access, level, gfn, pfn, 26238c2ecf20Sopenharmony_ci speculative, true, host_writable); 26248c2ecf20Sopenharmony_ci if (set_spte_ret & SET_SPTE_WRITE_PROTECTED_PT) { 26258c2ecf20Sopenharmony_ci if (write_fault) 26268c2ecf20Sopenharmony_ci ret = RET_PF_EMULATE; 26278c2ecf20Sopenharmony_ci kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); 26288c2ecf20Sopenharmony_ci } 26298c2ecf20Sopenharmony_ci 26308c2ecf20Sopenharmony_ci if (set_spte_ret & SET_SPTE_NEED_REMOTE_TLB_FLUSH || flush) 26318c2ecf20Sopenharmony_ci kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn, 26328c2ecf20Sopenharmony_ci KVM_PAGES_PER_HPAGE(level)); 26338c2ecf20Sopenharmony_ci 26348c2ecf20Sopenharmony_ci if (unlikely(is_mmio_spte(*sptep))) 26358c2ecf20Sopenharmony_ci ret = RET_PF_EMULATE; 26368c2ecf20Sopenharmony_ci 26378c2ecf20Sopenharmony_ci /* 26388c2ecf20Sopenharmony_ci * The fault is fully spurious if and only if the new SPTE and old SPTE 26398c2ecf20Sopenharmony_ci * are identical, and emulation is not required. 26408c2ecf20Sopenharmony_ci */ 26418c2ecf20Sopenharmony_ci if ((set_spte_ret & SET_SPTE_SPURIOUS) && ret == RET_PF_FIXED) { 26428c2ecf20Sopenharmony_ci WARN_ON_ONCE(!was_rmapped); 26438c2ecf20Sopenharmony_ci return RET_PF_SPURIOUS; 26448c2ecf20Sopenharmony_ci } 26458c2ecf20Sopenharmony_ci 26468c2ecf20Sopenharmony_ci pgprintk("%s: setting spte %llx\n", __func__, *sptep); 26478c2ecf20Sopenharmony_ci trace_kvm_mmu_set_spte(level, gfn, sptep); 26488c2ecf20Sopenharmony_ci if (!was_rmapped && is_large_pte(*sptep)) 26498c2ecf20Sopenharmony_ci ++vcpu->kvm->stat.lpages; 26508c2ecf20Sopenharmony_ci 26518c2ecf20Sopenharmony_ci if (is_shadow_present_pte(*sptep)) { 26528c2ecf20Sopenharmony_ci if (!was_rmapped) { 26538c2ecf20Sopenharmony_ci rmap_count = rmap_add(vcpu, sptep, gfn); 26548c2ecf20Sopenharmony_ci if (rmap_count > RMAP_RECYCLE_THRESHOLD) 26558c2ecf20Sopenharmony_ci rmap_recycle(vcpu, sptep, gfn); 26568c2ecf20Sopenharmony_ci } 26578c2ecf20Sopenharmony_ci } 26588c2ecf20Sopenharmony_ci 26598c2ecf20Sopenharmony_ci return ret; 26608c2ecf20Sopenharmony_ci} 26618c2ecf20Sopenharmony_ci 26628c2ecf20Sopenharmony_cistatic kvm_pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, 26638c2ecf20Sopenharmony_ci bool no_dirty_log) 26648c2ecf20Sopenharmony_ci{ 26658c2ecf20Sopenharmony_ci struct kvm_memory_slot *slot; 26668c2ecf20Sopenharmony_ci 26678c2ecf20Sopenharmony_ci slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, no_dirty_log); 26688c2ecf20Sopenharmony_ci if (!slot) 26698c2ecf20Sopenharmony_ci return KVM_PFN_ERR_FAULT; 26708c2ecf20Sopenharmony_ci 26718c2ecf20Sopenharmony_ci return gfn_to_pfn_memslot_atomic(slot, gfn); 26728c2ecf20Sopenharmony_ci} 26738c2ecf20Sopenharmony_ci 26748c2ecf20Sopenharmony_cistatic int direct_pte_prefetch_many(struct kvm_vcpu *vcpu, 26758c2ecf20Sopenharmony_ci struct kvm_mmu_page *sp, 26768c2ecf20Sopenharmony_ci u64 *start, u64 *end) 26778c2ecf20Sopenharmony_ci{ 26788c2ecf20Sopenharmony_ci struct page *pages[PTE_PREFETCH_NUM]; 26798c2ecf20Sopenharmony_ci struct kvm_memory_slot *slot; 26808c2ecf20Sopenharmony_ci unsigned int access = sp->role.access; 26818c2ecf20Sopenharmony_ci int i, ret; 26828c2ecf20Sopenharmony_ci gfn_t gfn; 26838c2ecf20Sopenharmony_ci 26848c2ecf20Sopenharmony_ci gfn = kvm_mmu_page_get_gfn(sp, start - sp->spt); 26858c2ecf20Sopenharmony_ci slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, access & ACC_WRITE_MASK); 26868c2ecf20Sopenharmony_ci if (!slot) 26878c2ecf20Sopenharmony_ci return -1; 26888c2ecf20Sopenharmony_ci 26898c2ecf20Sopenharmony_ci ret = gfn_to_page_many_atomic(slot, gfn, pages, end - start); 26908c2ecf20Sopenharmony_ci if (ret <= 0) 26918c2ecf20Sopenharmony_ci return -1; 26928c2ecf20Sopenharmony_ci 26938c2ecf20Sopenharmony_ci for (i = 0; i < ret; i++, gfn++, start++) { 26948c2ecf20Sopenharmony_ci mmu_set_spte(vcpu, start, access, false, sp->role.level, gfn, 26958c2ecf20Sopenharmony_ci page_to_pfn(pages[i]), true, true); 26968c2ecf20Sopenharmony_ci put_page(pages[i]); 26978c2ecf20Sopenharmony_ci } 26988c2ecf20Sopenharmony_ci 26998c2ecf20Sopenharmony_ci return 0; 27008c2ecf20Sopenharmony_ci} 27018c2ecf20Sopenharmony_ci 27028c2ecf20Sopenharmony_cistatic void __direct_pte_prefetch(struct kvm_vcpu *vcpu, 27038c2ecf20Sopenharmony_ci struct kvm_mmu_page *sp, u64 *sptep) 27048c2ecf20Sopenharmony_ci{ 27058c2ecf20Sopenharmony_ci u64 *spte, *start = NULL; 27068c2ecf20Sopenharmony_ci int i; 27078c2ecf20Sopenharmony_ci 27088c2ecf20Sopenharmony_ci WARN_ON(!sp->role.direct); 27098c2ecf20Sopenharmony_ci 27108c2ecf20Sopenharmony_ci i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1); 27118c2ecf20Sopenharmony_ci spte = sp->spt + i; 27128c2ecf20Sopenharmony_ci 27138c2ecf20Sopenharmony_ci for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) { 27148c2ecf20Sopenharmony_ci if (is_shadow_present_pte(*spte) || spte == sptep) { 27158c2ecf20Sopenharmony_ci if (!start) 27168c2ecf20Sopenharmony_ci continue; 27178c2ecf20Sopenharmony_ci if (direct_pte_prefetch_many(vcpu, sp, start, spte) < 0) 27188c2ecf20Sopenharmony_ci break; 27198c2ecf20Sopenharmony_ci start = NULL; 27208c2ecf20Sopenharmony_ci } else if (!start) 27218c2ecf20Sopenharmony_ci start = spte; 27228c2ecf20Sopenharmony_ci } 27238c2ecf20Sopenharmony_ci} 27248c2ecf20Sopenharmony_ci 27258c2ecf20Sopenharmony_cistatic void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep) 27268c2ecf20Sopenharmony_ci{ 27278c2ecf20Sopenharmony_ci struct kvm_mmu_page *sp; 27288c2ecf20Sopenharmony_ci 27298c2ecf20Sopenharmony_ci sp = sptep_to_sp(sptep); 27308c2ecf20Sopenharmony_ci 27318c2ecf20Sopenharmony_ci /* 27328c2ecf20Sopenharmony_ci * Without accessed bits, there's no way to distinguish between 27338c2ecf20Sopenharmony_ci * actually accessed translations and prefetched, so disable pte 27348c2ecf20Sopenharmony_ci * prefetch if accessed bits aren't available. 27358c2ecf20Sopenharmony_ci */ 27368c2ecf20Sopenharmony_ci if (sp_ad_disabled(sp)) 27378c2ecf20Sopenharmony_ci return; 27388c2ecf20Sopenharmony_ci 27398c2ecf20Sopenharmony_ci if (sp->role.level > PG_LEVEL_4K) 27408c2ecf20Sopenharmony_ci return; 27418c2ecf20Sopenharmony_ci 27428c2ecf20Sopenharmony_ci __direct_pte_prefetch(vcpu, sp, sptep); 27438c2ecf20Sopenharmony_ci} 27448c2ecf20Sopenharmony_ci 27458c2ecf20Sopenharmony_cistatic int host_pfn_mapping_level(struct kvm_vcpu *vcpu, gfn_t gfn, 27468c2ecf20Sopenharmony_ci kvm_pfn_t pfn, struct kvm_memory_slot *slot) 27478c2ecf20Sopenharmony_ci{ 27488c2ecf20Sopenharmony_ci unsigned long hva; 27498c2ecf20Sopenharmony_ci pte_t *pte; 27508c2ecf20Sopenharmony_ci int level; 27518c2ecf20Sopenharmony_ci 27528c2ecf20Sopenharmony_ci if (!PageCompound(pfn_to_page(pfn)) && !kvm_is_zone_device_pfn(pfn)) 27538c2ecf20Sopenharmony_ci return PG_LEVEL_4K; 27548c2ecf20Sopenharmony_ci 27558c2ecf20Sopenharmony_ci /* 27568c2ecf20Sopenharmony_ci * Note, using the already-retrieved memslot and __gfn_to_hva_memslot() 27578c2ecf20Sopenharmony_ci * is not solely for performance, it's also necessary to avoid the 27588c2ecf20Sopenharmony_ci * "writable" check in __gfn_to_hva_many(), which will always fail on 27598c2ecf20Sopenharmony_ci * read-only memslots due to gfn_to_hva() assuming writes. Earlier 27608c2ecf20Sopenharmony_ci * page fault steps have already verified the guest isn't writing a 27618c2ecf20Sopenharmony_ci * read-only memslot. 27628c2ecf20Sopenharmony_ci */ 27638c2ecf20Sopenharmony_ci hva = __gfn_to_hva_memslot(slot, gfn); 27648c2ecf20Sopenharmony_ci 27658c2ecf20Sopenharmony_ci pte = lookup_address_in_mm(vcpu->kvm->mm, hva, &level); 27668c2ecf20Sopenharmony_ci if (unlikely(!pte)) 27678c2ecf20Sopenharmony_ci return PG_LEVEL_4K; 27688c2ecf20Sopenharmony_ci 27698c2ecf20Sopenharmony_ci return level; 27708c2ecf20Sopenharmony_ci} 27718c2ecf20Sopenharmony_ci 27728c2ecf20Sopenharmony_ciint kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn, 27738c2ecf20Sopenharmony_ci int max_level, kvm_pfn_t *pfnp, 27748c2ecf20Sopenharmony_ci bool huge_page_disallowed, int *req_level) 27758c2ecf20Sopenharmony_ci{ 27768c2ecf20Sopenharmony_ci struct kvm_memory_slot *slot; 27778c2ecf20Sopenharmony_ci struct kvm_lpage_info *linfo; 27788c2ecf20Sopenharmony_ci kvm_pfn_t pfn = *pfnp; 27798c2ecf20Sopenharmony_ci kvm_pfn_t mask; 27808c2ecf20Sopenharmony_ci int level; 27818c2ecf20Sopenharmony_ci 27828c2ecf20Sopenharmony_ci *req_level = PG_LEVEL_4K; 27838c2ecf20Sopenharmony_ci 27848c2ecf20Sopenharmony_ci if (unlikely(max_level == PG_LEVEL_4K)) 27858c2ecf20Sopenharmony_ci return PG_LEVEL_4K; 27868c2ecf20Sopenharmony_ci 27878c2ecf20Sopenharmony_ci if (is_error_noslot_pfn(pfn) || kvm_is_reserved_pfn(pfn)) 27888c2ecf20Sopenharmony_ci return PG_LEVEL_4K; 27898c2ecf20Sopenharmony_ci 27908c2ecf20Sopenharmony_ci slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, true); 27918c2ecf20Sopenharmony_ci if (!slot) 27928c2ecf20Sopenharmony_ci return PG_LEVEL_4K; 27938c2ecf20Sopenharmony_ci 27948c2ecf20Sopenharmony_ci max_level = min(max_level, max_huge_page_level); 27958c2ecf20Sopenharmony_ci for ( ; max_level > PG_LEVEL_4K; max_level--) { 27968c2ecf20Sopenharmony_ci linfo = lpage_info_slot(gfn, slot, max_level); 27978c2ecf20Sopenharmony_ci if (!linfo->disallow_lpage) 27988c2ecf20Sopenharmony_ci break; 27998c2ecf20Sopenharmony_ci } 28008c2ecf20Sopenharmony_ci 28018c2ecf20Sopenharmony_ci if (max_level == PG_LEVEL_4K) 28028c2ecf20Sopenharmony_ci return PG_LEVEL_4K; 28038c2ecf20Sopenharmony_ci 28048c2ecf20Sopenharmony_ci level = host_pfn_mapping_level(vcpu, gfn, pfn, slot); 28058c2ecf20Sopenharmony_ci if (level == PG_LEVEL_4K) 28068c2ecf20Sopenharmony_ci return level; 28078c2ecf20Sopenharmony_ci 28088c2ecf20Sopenharmony_ci *req_level = level = min(level, max_level); 28098c2ecf20Sopenharmony_ci 28108c2ecf20Sopenharmony_ci /* 28118c2ecf20Sopenharmony_ci * Enforce the iTLB multihit workaround after capturing the requested 28128c2ecf20Sopenharmony_ci * level, which will be used to do precise, accurate accounting. 28138c2ecf20Sopenharmony_ci */ 28148c2ecf20Sopenharmony_ci if (huge_page_disallowed) 28158c2ecf20Sopenharmony_ci return PG_LEVEL_4K; 28168c2ecf20Sopenharmony_ci 28178c2ecf20Sopenharmony_ci /* 28188c2ecf20Sopenharmony_ci * mmu_notifier_retry() was successful and mmu_lock is held, so 28198c2ecf20Sopenharmony_ci * the pmd can't be split from under us. 28208c2ecf20Sopenharmony_ci */ 28218c2ecf20Sopenharmony_ci mask = KVM_PAGES_PER_HPAGE(level) - 1; 28228c2ecf20Sopenharmony_ci VM_BUG_ON((gfn & mask) != (pfn & mask)); 28238c2ecf20Sopenharmony_ci *pfnp = pfn & ~mask; 28248c2ecf20Sopenharmony_ci 28258c2ecf20Sopenharmony_ci return level; 28268c2ecf20Sopenharmony_ci} 28278c2ecf20Sopenharmony_ci 28288c2ecf20Sopenharmony_civoid disallowed_hugepage_adjust(u64 spte, gfn_t gfn, int cur_level, 28298c2ecf20Sopenharmony_ci kvm_pfn_t *pfnp, int *goal_levelp) 28308c2ecf20Sopenharmony_ci{ 28318c2ecf20Sopenharmony_ci int level = *goal_levelp; 28328c2ecf20Sopenharmony_ci 28338c2ecf20Sopenharmony_ci if (cur_level == level && level > PG_LEVEL_4K && 28348c2ecf20Sopenharmony_ci is_shadow_present_pte(spte) && 28358c2ecf20Sopenharmony_ci !is_large_pte(spte)) { 28368c2ecf20Sopenharmony_ci /* 28378c2ecf20Sopenharmony_ci * A small SPTE exists for this pfn, but FNAME(fetch) 28388c2ecf20Sopenharmony_ci * and __direct_map would like to create a large PTE 28398c2ecf20Sopenharmony_ci * instead: just force them to go down another level, 28408c2ecf20Sopenharmony_ci * patching back for them into pfn the next 9 bits of 28418c2ecf20Sopenharmony_ci * the address. 28428c2ecf20Sopenharmony_ci */ 28438c2ecf20Sopenharmony_ci u64 page_mask = KVM_PAGES_PER_HPAGE(level) - 28448c2ecf20Sopenharmony_ci KVM_PAGES_PER_HPAGE(level - 1); 28458c2ecf20Sopenharmony_ci *pfnp |= gfn & page_mask; 28468c2ecf20Sopenharmony_ci (*goal_levelp)--; 28478c2ecf20Sopenharmony_ci } 28488c2ecf20Sopenharmony_ci} 28498c2ecf20Sopenharmony_ci 28508c2ecf20Sopenharmony_cistatic int __direct_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, 28518c2ecf20Sopenharmony_ci int map_writable, int max_level, kvm_pfn_t pfn, 28528c2ecf20Sopenharmony_ci bool prefault, bool is_tdp) 28538c2ecf20Sopenharmony_ci{ 28548c2ecf20Sopenharmony_ci bool nx_huge_page_workaround_enabled = is_nx_huge_page_enabled(); 28558c2ecf20Sopenharmony_ci bool write = error_code & PFERR_WRITE_MASK; 28568c2ecf20Sopenharmony_ci bool exec = error_code & PFERR_FETCH_MASK; 28578c2ecf20Sopenharmony_ci bool huge_page_disallowed = exec && nx_huge_page_workaround_enabled; 28588c2ecf20Sopenharmony_ci struct kvm_shadow_walk_iterator it; 28598c2ecf20Sopenharmony_ci struct kvm_mmu_page *sp; 28608c2ecf20Sopenharmony_ci int level, req_level, ret; 28618c2ecf20Sopenharmony_ci gfn_t gfn = gpa >> PAGE_SHIFT; 28628c2ecf20Sopenharmony_ci gfn_t base_gfn = gfn; 28638c2ecf20Sopenharmony_ci 28648c2ecf20Sopenharmony_ci if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa))) 28658c2ecf20Sopenharmony_ci return RET_PF_RETRY; 28668c2ecf20Sopenharmony_ci 28678c2ecf20Sopenharmony_ci level = kvm_mmu_hugepage_adjust(vcpu, gfn, max_level, &pfn, 28688c2ecf20Sopenharmony_ci huge_page_disallowed, &req_level); 28698c2ecf20Sopenharmony_ci 28708c2ecf20Sopenharmony_ci trace_kvm_mmu_spte_requested(gpa, level, pfn); 28718c2ecf20Sopenharmony_ci for_each_shadow_entry(vcpu, gpa, it) { 28728c2ecf20Sopenharmony_ci /* 28738c2ecf20Sopenharmony_ci * We cannot overwrite existing page tables with an NX 28748c2ecf20Sopenharmony_ci * large page, as the leaf could be executable. 28758c2ecf20Sopenharmony_ci */ 28768c2ecf20Sopenharmony_ci if (nx_huge_page_workaround_enabled) 28778c2ecf20Sopenharmony_ci disallowed_hugepage_adjust(*it.sptep, gfn, it.level, 28788c2ecf20Sopenharmony_ci &pfn, &level); 28798c2ecf20Sopenharmony_ci 28808c2ecf20Sopenharmony_ci base_gfn = gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1); 28818c2ecf20Sopenharmony_ci if (it.level == level) 28828c2ecf20Sopenharmony_ci break; 28838c2ecf20Sopenharmony_ci 28848c2ecf20Sopenharmony_ci drop_large_spte(vcpu, it.sptep); 28858c2ecf20Sopenharmony_ci if (!is_shadow_present_pte(*it.sptep)) { 28868c2ecf20Sopenharmony_ci sp = kvm_mmu_get_page(vcpu, base_gfn, it.addr, 28878c2ecf20Sopenharmony_ci it.level - 1, true, ACC_ALL); 28888c2ecf20Sopenharmony_ci 28898c2ecf20Sopenharmony_ci link_shadow_page(vcpu, it.sptep, sp); 28908c2ecf20Sopenharmony_ci if (is_tdp && huge_page_disallowed && 28918c2ecf20Sopenharmony_ci req_level >= it.level) 28928c2ecf20Sopenharmony_ci account_huge_nx_page(vcpu->kvm, sp); 28938c2ecf20Sopenharmony_ci } 28948c2ecf20Sopenharmony_ci } 28958c2ecf20Sopenharmony_ci 28968c2ecf20Sopenharmony_ci ret = mmu_set_spte(vcpu, it.sptep, ACC_ALL, 28978c2ecf20Sopenharmony_ci write, level, base_gfn, pfn, prefault, 28988c2ecf20Sopenharmony_ci map_writable); 28998c2ecf20Sopenharmony_ci if (ret == RET_PF_SPURIOUS) 29008c2ecf20Sopenharmony_ci return ret; 29018c2ecf20Sopenharmony_ci 29028c2ecf20Sopenharmony_ci direct_pte_prefetch(vcpu, it.sptep); 29038c2ecf20Sopenharmony_ci ++vcpu->stat.pf_fixed; 29048c2ecf20Sopenharmony_ci return ret; 29058c2ecf20Sopenharmony_ci} 29068c2ecf20Sopenharmony_ci 29078c2ecf20Sopenharmony_cistatic void kvm_send_hwpoison_signal(unsigned long address, struct task_struct *tsk) 29088c2ecf20Sopenharmony_ci{ 29098c2ecf20Sopenharmony_ci send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, PAGE_SHIFT, tsk); 29108c2ecf20Sopenharmony_ci} 29118c2ecf20Sopenharmony_ci 29128c2ecf20Sopenharmony_cistatic int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn) 29138c2ecf20Sopenharmony_ci{ 29148c2ecf20Sopenharmony_ci /* 29158c2ecf20Sopenharmony_ci * Do not cache the mmio info caused by writing the readonly gfn 29168c2ecf20Sopenharmony_ci * into the spte otherwise read access on readonly gfn also can 29178c2ecf20Sopenharmony_ci * caused mmio page fault and treat it as mmio access. 29188c2ecf20Sopenharmony_ci */ 29198c2ecf20Sopenharmony_ci if (pfn == KVM_PFN_ERR_RO_FAULT) 29208c2ecf20Sopenharmony_ci return RET_PF_EMULATE; 29218c2ecf20Sopenharmony_ci 29228c2ecf20Sopenharmony_ci if (pfn == KVM_PFN_ERR_HWPOISON) { 29238c2ecf20Sopenharmony_ci kvm_send_hwpoison_signal(kvm_vcpu_gfn_to_hva(vcpu, gfn), current); 29248c2ecf20Sopenharmony_ci return RET_PF_RETRY; 29258c2ecf20Sopenharmony_ci } 29268c2ecf20Sopenharmony_ci 29278c2ecf20Sopenharmony_ci return -EFAULT; 29288c2ecf20Sopenharmony_ci} 29298c2ecf20Sopenharmony_ci 29308c2ecf20Sopenharmony_cistatic bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn, 29318c2ecf20Sopenharmony_ci kvm_pfn_t pfn, unsigned int access, 29328c2ecf20Sopenharmony_ci int *ret_val) 29338c2ecf20Sopenharmony_ci{ 29348c2ecf20Sopenharmony_ci /* The pfn is invalid, report the error! */ 29358c2ecf20Sopenharmony_ci if (unlikely(is_error_pfn(pfn))) { 29368c2ecf20Sopenharmony_ci *ret_val = kvm_handle_bad_page(vcpu, gfn, pfn); 29378c2ecf20Sopenharmony_ci return true; 29388c2ecf20Sopenharmony_ci } 29398c2ecf20Sopenharmony_ci 29408c2ecf20Sopenharmony_ci if (unlikely(is_noslot_pfn(pfn))) 29418c2ecf20Sopenharmony_ci vcpu_cache_mmio_info(vcpu, gva, gfn, 29428c2ecf20Sopenharmony_ci access & shadow_mmio_access_mask); 29438c2ecf20Sopenharmony_ci 29448c2ecf20Sopenharmony_ci return false; 29458c2ecf20Sopenharmony_ci} 29468c2ecf20Sopenharmony_ci 29478c2ecf20Sopenharmony_cistatic bool page_fault_can_be_fast(u32 error_code) 29488c2ecf20Sopenharmony_ci{ 29498c2ecf20Sopenharmony_ci /* 29508c2ecf20Sopenharmony_ci * Do not fix the mmio spte with invalid generation number which 29518c2ecf20Sopenharmony_ci * need to be updated by slow page fault path. 29528c2ecf20Sopenharmony_ci */ 29538c2ecf20Sopenharmony_ci if (unlikely(error_code & PFERR_RSVD_MASK)) 29548c2ecf20Sopenharmony_ci return false; 29558c2ecf20Sopenharmony_ci 29568c2ecf20Sopenharmony_ci /* See if the page fault is due to an NX violation */ 29578c2ecf20Sopenharmony_ci if (unlikely(((error_code & (PFERR_FETCH_MASK | PFERR_PRESENT_MASK)) 29588c2ecf20Sopenharmony_ci == (PFERR_FETCH_MASK | PFERR_PRESENT_MASK)))) 29598c2ecf20Sopenharmony_ci return false; 29608c2ecf20Sopenharmony_ci 29618c2ecf20Sopenharmony_ci /* 29628c2ecf20Sopenharmony_ci * #PF can be fast if: 29638c2ecf20Sopenharmony_ci * 1. The shadow page table entry is not present, which could mean that 29648c2ecf20Sopenharmony_ci * the fault is potentially caused by access tracking (if enabled). 29658c2ecf20Sopenharmony_ci * 2. The shadow page table entry is present and the fault 29668c2ecf20Sopenharmony_ci * is caused by write-protect, that means we just need change the W 29678c2ecf20Sopenharmony_ci * bit of the spte which can be done out of mmu-lock. 29688c2ecf20Sopenharmony_ci * 29698c2ecf20Sopenharmony_ci * However, if access tracking is disabled we know that a non-present 29708c2ecf20Sopenharmony_ci * page must be a genuine page fault where we have to create a new SPTE. 29718c2ecf20Sopenharmony_ci * So, if access tracking is disabled, we return true only for write 29728c2ecf20Sopenharmony_ci * accesses to a present page. 29738c2ecf20Sopenharmony_ci */ 29748c2ecf20Sopenharmony_ci 29758c2ecf20Sopenharmony_ci return shadow_acc_track_mask != 0 || 29768c2ecf20Sopenharmony_ci ((error_code & (PFERR_WRITE_MASK | PFERR_PRESENT_MASK)) 29778c2ecf20Sopenharmony_ci == (PFERR_WRITE_MASK | PFERR_PRESENT_MASK)); 29788c2ecf20Sopenharmony_ci} 29798c2ecf20Sopenharmony_ci 29808c2ecf20Sopenharmony_ci/* 29818c2ecf20Sopenharmony_ci * Returns true if the SPTE was fixed successfully. Otherwise, 29828c2ecf20Sopenharmony_ci * someone else modified the SPTE from its original value. 29838c2ecf20Sopenharmony_ci */ 29848c2ecf20Sopenharmony_cistatic bool 29858c2ecf20Sopenharmony_cifast_pf_fix_direct_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, 29868c2ecf20Sopenharmony_ci u64 *sptep, u64 old_spte, u64 new_spte) 29878c2ecf20Sopenharmony_ci{ 29888c2ecf20Sopenharmony_ci gfn_t gfn; 29898c2ecf20Sopenharmony_ci 29908c2ecf20Sopenharmony_ci WARN_ON(!sp->role.direct); 29918c2ecf20Sopenharmony_ci 29928c2ecf20Sopenharmony_ci /* 29938c2ecf20Sopenharmony_ci * Theoretically we could also set dirty bit (and flush TLB) here in 29948c2ecf20Sopenharmony_ci * order to eliminate unnecessary PML logging. See comments in 29958c2ecf20Sopenharmony_ci * set_spte. But fast_page_fault is very unlikely to happen with PML 29968c2ecf20Sopenharmony_ci * enabled, so we do not do this. This might result in the same GPA 29978c2ecf20Sopenharmony_ci * to be logged in PML buffer again when the write really happens, and 29988c2ecf20Sopenharmony_ci * eventually to be called by mark_page_dirty twice. But it's also no 29998c2ecf20Sopenharmony_ci * harm. This also avoids the TLB flush needed after setting dirty bit 30008c2ecf20Sopenharmony_ci * so non-PML cases won't be impacted. 30018c2ecf20Sopenharmony_ci * 30028c2ecf20Sopenharmony_ci * Compare with set_spte where instead shadow_dirty_mask is set. 30038c2ecf20Sopenharmony_ci */ 30048c2ecf20Sopenharmony_ci if (cmpxchg64(sptep, old_spte, new_spte) != old_spte) 30058c2ecf20Sopenharmony_ci return false; 30068c2ecf20Sopenharmony_ci 30078c2ecf20Sopenharmony_ci if (is_writable_pte(new_spte) && !is_writable_pte(old_spte)) { 30088c2ecf20Sopenharmony_ci /* 30098c2ecf20Sopenharmony_ci * The gfn of direct spte is stable since it is 30108c2ecf20Sopenharmony_ci * calculated by sp->gfn. 30118c2ecf20Sopenharmony_ci */ 30128c2ecf20Sopenharmony_ci gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt); 30138c2ecf20Sopenharmony_ci kvm_vcpu_mark_page_dirty(vcpu, gfn); 30148c2ecf20Sopenharmony_ci } 30158c2ecf20Sopenharmony_ci 30168c2ecf20Sopenharmony_ci return true; 30178c2ecf20Sopenharmony_ci} 30188c2ecf20Sopenharmony_ci 30198c2ecf20Sopenharmony_cistatic bool is_access_allowed(u32 fault_err_code, u64 spte) 30208c2ecf20Sopenharmony_ci{ 30218c2ecf20Sopenharmony_ci if (fault_err_code & PFERR_FETCH_MASK) 30228c2ecf20Sopenharmony_ci return is_executable_pte(spte); 30238c2ecf20Sopenharmony_ci 30248c2ecf20Sopenharmony_ci if (fault_err_code & PFERR_WRITE_MASK) 30258c2ecf20Sopenharmony_ci return is_writable_pte(spte); 30268c2ecf20Sopenharmony_ci 30278c2ecf20Sopenharmony_ci /* Fault was on Read access */ 30288c2ecf20Sopenharmony_ci return spte & PT_PRESENT_MASK; 30298c2ecf20Sopenharmony_ci} 30308c2ecf20Sopenharmony_ci 30318c2ecf20Sopenharmony_ci/* 30328c2ecf20Sopenharmony_ci * Returns one of RET_PF_INVALID, RET_PF_FIXED or RET_PF_SPURIOUS. 30338c2ecf20Sopenharmony_ci */ 30348c2ecf20Sopenharmony_cistatic int fast_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, 30358c2ecf20Sopenharmony_ci u32 error_code) 30368c2ecf20Sopenharmony_ci{ 30378c2ecf20Sopenharmony_ci struct kvm_shadow_walk_iterator iterator; 30388c2ecf20Sopenharmony_ci struct kvm_mmu_page *sp; 30398c2ecf20Sopenharmony_ci int ret = RET_PF_INVALID; 30408c2ecf20Sopenharmony_ci u64 spte = 0ull; 30418c2ecf20Sopenharmony_ci uint retry_count = 0; 30428c2ecf20Sopenharmony_ci 30438c2ecf20Sopenharmony_ci if (!page_fault_can_be_fast(error_code)) 30448c2ecf20Sopenharmony_ci return ret; 30458c2ecf20Sopenharmony_ci 30468c2ecf20Sopenharmony_ci walk_shadow_page_lockless_begin(vcpu); 30478c2ecf20Sopenharmony_ci 30488c2ecf20Sopenharmony_ci do { 30498c2ecf20Sopenharmony_ci u64 new_spte; 30508c2ecf20Sopenharmony_ci 30518c2ecf20Sopenharmony_ci for_each_shadow_entry_lockless(vcpu, cr2_or_gpa, iterator, spte) 30528c2ecf20Sopenharmony_ci if (!is_shadow_present_pte(spte)) 30538c2ecf20Sopenharmony_ci break; 30548c2ecf20Sopenharmony_ci 30558c2ecf20Sopenharmony_ci sp = sptep_to_sp(iterator.sptep); 30568c2ecf20Sopenharmony_ci if (!is_last_spte(spte, sp->role.level)) 30578c2ecf20Sopenharmony_ci break; 30588c2ecf20Sopenharmony_ci 30598c2ecf20Sopenharmony_ci /* 30608c2ecf20Sopenharmony_ci * Check whether the memory access that caused the fault would 30618c2ecf20Sopenharmony_ci * still cause it if it were to be performed right now. If not, 30628c2ecf20Sopenharmony_ci * then this is a spurious fault caused by TLB lazily flushed, 30638c2ecf20Sopenharmony_ci * or some other CPU has already fixed the PTE after the 30648c2ecf20Sopenharmony_ci * current CPU took the fault. 30658c2ecf20Sopenharmony_ci * 30668c2ecf20Sopenharmony_ci * Need not check the access of upper level table entries since 30678c2ecf20Sopenharmony_ci * they are always ACC_ALL. 30688c2ecf20Sopenharmony_ci */ 30698c2ecf20Sopenharmony_ci if (is_access_allowed(error_code, spte)) { 30708c2ecf20Sopenharmony_ci ret = RET_PF_SPURIOUS; 30718c2ecf20Sopenharmony_ci break; 30728c2ecf20Sopenharmony_ci } 30738c2ecf20Sopenharmony_ci 30748c2ecf20Sopenharmony_ci new_spte = spte; 30758c2ecf20Sopenharmony_ci 30768c2ecf20Sopenharmony_ci if (is_access_track_spte(spte)) 30778c2ecf20Sopenharmony_ci new_spte = restore_acc_track_spte(new_spte); 30788c2ecf20Sopenharmony_ci 30798c2ecf20Sopenharmony_ci /* 30808c2ecf20Sopenharmony_ci * Currently, to simplify the code, write-protection can 30818c2ecf20Sopenharmony_ci * be removed in the fast path only if the SPTE was 30828c2ecf20Sopenharmony_ci * write-protected for dirty-logging or access tracking. 30838c2ecf20Sopenharmony_ci */ 30848c2ecf20Sopenharmony_ci if ((error_code & PFERR_WRITE_MASK) && 30858c2ecf20Sopenharmony_ci spte_can_locklessly_be_made_writable(spte)) { 30868c2ecf20Sopenharmony_ci new_spte |= PT_WRITABLE_MASK; 30878c2ecf20Sopenharmony_ci 30888c2ecf20Sopenharmony_ci /* 30898c2ecf20Sopenharmony_ci * Do not fix write-permission on the large spte. Since 30908c2ecf20Sopenharmony_ci * we only dirty the first page into the dirty-bitmap in 30918c2ecf20Sopenharmony_ci * fast_pf_fix_direct_spte(), other pages are missed 30928c2ecf20Sopenharmony_ci * if its slot has dirty logging enabled. 30938c2ecf20Sopenharmony_ci * 30948c2ecf20Sopenharmony_ci * Instead, we let the slow page fault path create a 30958c2ecf20Sopenharmony_ci * normal spte to fix the access. 30968c2ecf20Sopenharmony_ci * 30978c2ecf20Sopenharmony_ci * See the comments in kvm_arch_commit_memory_region(). 30988c2ecf20Sopenharmony_ci */ 30998c2ecf20Sopenharmony_ci if (sp->role.level > PG_LEVEL_4K) 31008c2ecf20Sopenharmony_ci break; 31018c2ecf20Sopenharmony_ci } 31028c2ecf20Sopenharmony_ci 31038c2ecf20Sopenharmony_ci /* Verify that the fault can be handled in the fast path */ 31048c2ecf20Sopenharmony_ci if (new_spte == spte || 31058c2ecf20Sopenharmony_ci !is_access_allowed(error_code, new_spte)) 31068c2ecf20Sopenharmony_ci break; 31078c2ecf20Sopenharmony_ci 31088c2ecf20Sopenharmony_ci /* 31098c2ecf20Sopenharmony_ci * Currently, fast page fault only works for direct mapping 31108c2ecf20Sopenharmony_ci * since the gfn is not stable for indirect shadow page. See 31118c2ecf20Sopenharmony_ci * Documentation/virt/kvm/locking.rst to get more detail. 31128c2ecf20Sopenharmony_ci */ 31138c2ecf20Sopenharmony_ci if (fast_pf_fix_direct_spte(vcpu, sp, iterator.sptep, spte, 31148c2ecf20Sopenharmony_ci new_spte)) { 31158c2ecf20Sopenharmony_ci ret = RET_PF_FIXED; 31168c2ecf20Sopenharmony_ci break; 31178c2ecf20Sopenharmony_ci } 31188c2ecf20Sopenharmony_ci 31198c2ecf20Sopenharmony_ci if (++retry_count > 4) { 31208c2ecf20Sopenharmony_ci printk_once(KERN_WARNING 31218c2ecf20Sopenharmony_ci "kvm: Fast #PF retrying more than 4 times.\n"); 31228c2ecf20Sopenharmony_ci break; 31238c2ecf20Sopenharmony_ci } 31248c2ecf20Sopenharmony_ci 31258c2ecf20Sopenharmony_ci } while (true); 31268c2ecf20Sopenharmony_ci 31278c2ecf20Sopenharmony_ci trace_fast_page_fault(vcpu, cr2_or_gpa, error_code, iterator.sptep, 31288c2ecf20Sopenharmony_ci spte, ret); 31298c2ecf20Sopenharmony_ci walk_shadow_page_lockless_end(vcpu); 31308c2ecf20Sopenharmony_ci 31318c2ecf20Sopenharmony_ci return ret; 31328c2ecf20Sopenharmony_ci} 31338c2ecf20Sopenharmony_ci 31348c2ecf20Sopenharmony_cistatic void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa, 31358c2ecf20Sopenharmony_ci struct list_head *invalid_list) 31368c2ecf20Sopenharmony_ci{ 31378c2ecf20Sopenharmony_ci struct kvm_mmu_page *sp; 31388c2ecf20Sopenharmony_ci 31398c2ecf20Sopenharmony_ci if (!VALID_PAGE(*root_hpa)) 31408c2ecf20Sopenharmony_ci return; 31418c2ecf20Sopenharmony_ci 31428c2ecf20Sopenharmony_ci sp = to_shadow_page(*root_hpa & PT64_BASE_ADDR_MASK); 31438c2ecf20Sopenharmony_ci if (WARN_ON(!sp)) 31448c2ecf20Sopenharmony_ci return; 31458c2ecf20Sopenharmony_ci 31468c2ecf20Sopenharmony_ci if (kvm_mmu_put_root(kvm, sp)) { 31478c2ecf20Sopenharmony_ci if (sp->tdp_mmu_page) 31488c2ecf20Sopenharmony_ci kvm_tdp_mmu_free_root(kvm, sp); 31498c2ecf20Sopenharmony_ci else if (sp->role.invalid) 31508c2ecf20Sopenharmony_ci kvm_mmu_prepare_zap_page(kvm, sp, invalid_list); 31518c2ecf20Sopenharmony_ci } 31528c2ecf20Sopenharmony_ci 31538c2ecf20Sopenharmony_ci *root_hpa = INVALID_PAGE; 31548c2ecf20Sopenharmony_ci} 31558c2ecf20Sopenharmony_ci 31568c2ecf20Sopenharmony_ci/* roots_to_free must be some combination of the KVM_MMU_ROOT_* flags */ 31578c2ecf20Sopenharmony_civoid kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, 31588c2ecf20Sopenharmony_ci ulong roots_to_free) 31598c2ecf20Sopenharmony_ci{ 31608c2ecf20Sopenharmony_ci struct kvm *kvm = vcpu->kvm; 31618c2ecf20Sopenharmony_ci int i; 31628c2ecf20Sopenharmony_ci LIST_HEAD(invalid_list); 31638c2ecf20Sopenharmony_ci bool free_active_root = roots_to_free & KVM_MMU_ROOT_CURRENT; 31648c2ecf20Sopenharmony_ci 31658c2ecf20Sopenharmony_ci BUILD_BUG_ON(KVM_MMU_NUM_PREV_ROOTS >= BITS_PER_LONG); 31668c2ecf20Sopenharmony_ci 31678c2ecf20Sopenharmony_ci /* Before acquiring the MMU lock, see if we need to do any real work. */ 31688c2ecf20Sopenharmony_ci if (!(free_active_root && VALID_PAGE(mmu->root_hpa))) { 31698c2ecf20Sopenharmony_ci for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) 31708c2ecf20Sopenharmony_ci if ((roots_to_free & KVM_MMU_ROOT_PREVIOUS(i)) && 31718c2ecf20Sopenharmony_ci VALID_PAGE(mmu->prev_roots[i].hpa)) 31728c2ecf20Sopenharmony_ci break; 31738c2ecf20Sopenharmony_ci 31748c2ecf20Sopenharmony_ci if (i == KVM_MMU_NUM_PREV_ROOTS) 31758c2ecf20Sopenharmony_ci return; 31768c2ecf20Sopenharmony_ci } 31778c2ecf20Sopenharmony_ci 31788c2ecf20Sopenharmony_ci spin_lock(&kvm->mmu_lock); 31798c2ecf20Sopenharmony_ci 31808c2ecf20Sopenharmony_ci for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) 31818c2ecf20Sopenharmony_ci if (roots_to_free & KVM_MMU_ROOT_PREVIOUS(i)) 31828c2ecf20Sopenharmony_ci mmu_free_root_page(kvm, &mmu->prev_roots[i].hpa, 31838c2ecf20Sopenharmony_ci &invalid_list); 31848c2ecf20Sopenharmony_ci 31858c2ecf20Sopenharmony_ci if (free_active_root) { 31868c2ecf20Sopenharmony_ci if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL && 31878c2ecf20Sopenharmony_ci (mmu->root_level >= PT64_ROOT_4LEVEL || mmu->direct_map)) { 31888c2ecf20Sopenharmony_ci mmu_free_root_page(kvm, &mmu->root_hpa, &invalid_list); 31898c2ecf20Sopenharmony_ci } else if (mmu->pae_root) { 31908c2ecf20Sopenharmony_ci for (i = 0; i < 4; ++i) 31918c2ecf20Sopenharmony_ci if (mmu->pae_root[i] != 0) 31928c2ecf20Sopenharmony_ci mmu_free_root_page(kvm, 31938c2ecf20Sopenharmony_ci &mmu->pae_root[i], 31948c2ecf20Sopenharmony_ci &invalid_list); 31958c2ecf20Sopenharmony_ci } 31968c2ecf20Sopenharmony_ci mmu->root_hpa = INVALID_PAGE; 31978c2ecf20Sopenharmony_ci mmu->root_pgd = 0; 31988c2ecf20Sopenharmony_ci } 31998c2ecf20Sopenharmony_ci 32008c2ecf20Sopenharmony_ci kvm_mmu_commit_zap_page(kvm, &invalid_list); 32018c2ecf20Sopenharmony_ci spin_unlock(&kvm->mmu_lock); 32028c2ecf20Sopenharmony_ci} 32038c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(kvm_mmu_free_roots); 32048c2ecf20Sopenharmony_ci 32058c2ecf20Sopenharmony_cistatic int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn) 32068c2ecf20Sopenharmony_ci{ 32078c2ecf20Sopenharmony_ci int ret = 0; 32088c2ecf20Sopenharmony_ci 32098c2ecf20Sopenharmony_ci if (!kvm_vcpu_is_visible_gfn(vcpu, root_gfn)) { 32108c2ecf20Sopenharmony_ci kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); 32118c2ecf20Sopenharmony_ci ret = 1; 32128c2ecf20Sopenharmony_ci } 32138c2ecf20Sopenharmony_ci 32148c2ecf20Sopenharmony_ci return ret; 32158c2ecf20Sopenharmony_ci} 32168c2ecf20Sopenharmony_ci 32178c2ecf20Sopenharmony_cistatic hpa_t mmu_alloc_root(struct kvm_vcpu *vcpu, gfn_t gfn, gva_t gva, 32188c2ecf20Sopenharmony_ci u8 level, bool direct) 32198c2ecf20Sopenharmony_ci{ 32208c2ecf20Sopenharmony_ci struct kvm_mmu_page *sp; 32218c2ecf20Sopenharmony_ci 32228c2ecf20Sopenharmony_ci spin_lock(&vcpu->kvm->mmu_lock); 32238c2ecf20Sopenharmony_ci 32248c2ecf20Sopenharmony_ci if (make_mmu_pages_available(vcpu)) { 32258c2ecf20Sopenharmony_ci spin_unlock(&vcpu->kvm->mmu_lock); 32268c2ecf20Sopenharmony_ci return INVALID_PAGE; 32278c2ecf20Sopenharmony_ci } 32288c2ecf20Sopenharmony_ci sp = kvm_mmu_get_page(vcpu, gfn, gva, level, direct, ACC_ALL); 32298c2ecf20Sopenharmony_ci ++sp->root_count; 32308c2ecf20Sopenharmony_ci 32318c2ecf20Sopenharmony_ci spin_unlock(&vcpu->kvm->mmu_lock); 32328c2ecf20Sopenharmony_ci return __pa(sp->spt); 32338c2ecf20Sopenharmony_ci} 32348c2ecf20Sopenharmony_ci 32358c2ecf20Sopenharmony_cistatic int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu) 32368c2ecf20Sopenharmony_ci{ 32378c2ecf20Sopenharmony_ci u8 shadow_root_level = vcpu->arch.mmu->shadow_root_level; 32388c2ecf20Sopenharmony_ci hpa_t root; 32398c2ecf20Sopenharmony_ci unsigned i; 32408c2ecf20Sopenharmony_ci 32418c2ecf20Sopenharmony_ci if (vcpu->kvm->arch.tdp_mmu_enabled) { 32428c2ecf20Sopenharmony_ci root = kvm_tdp_mmu_get_vcpu_root_hpa(vcpu); 32438c2ecf20Sopenharmony_ci 32448c2ecf20Sopenharmony_ci if (!VALID_PAGE(root)) 32458c2ecf20Sopenharmony_ci return -ENOSPC; 32468c2ecf20Sopenharmony_ci vcpu->arch.mmu->root_hpa = root; 32478c2ecf20Sopenharmony_ci } else if (shadow_root_level >= PT64_ROOT_4LEVEL) { 32488c2ecf20Sopenharmony_ci root = mmu_alloc_root(vcpu, 0, 0, shadow_root_level, 32498c2ecf20Sopenharmony_ci true); 32508c2ecf20Sopenharmony_ci 32518c2ecf20Sopenharmony_ci if (!VALID_PAGE(root)) 32528c2ecf20Sopenharmony_ci return -ENOSPC; 32538c2ecf20Sopenharmony_ci vcpu->arch.mmu->root_hpa = root; 32548c2ecf20Sopenharmony_ci } else if (shadow_root_level == PT32E_ROOT_LEVEL) { 32558c2ecf20Sopenharmony_ci for (i = 0; i < 4; ++i) { 32568c2ecf20Sopenharmony_ci MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu->pae_root[i])); 32578c2ecf20Sopenharmony_ci 32588c2ecf20Sopenharmony_ci root = mmu_alloc_root(vcpu, i << (30 - PAGE_SHIFT), 32598c2ecf20Sopenharmony_ci i << 30, PT32_ROOT_LEVEL, true); 32608c2ecf20Sopenharmony_ci if (!VALID_PAGE(root)) 32618c2ecf20Sopenharmony_ci return -ENOSPC; 32628c2ecf20Sopenharmony_ci vcpu->arch.mmu->pae_root[i] = root | PT_PRESENT_MASK; 32638c2ecf20Sopenharmony_ci } 32648c2ecf20Sopenharmony_ci vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root); 32658c2ecf20Sopenharmony_ci } else 32668c2ecf20Sopenharmony_ci BUG(); 32678c2ecf20Sopenharmony_ci 32688c2ecf20Sopenharmony_ci /* root_pgd is ignored for direct MMUs. */ 32698c2ecf20Sopenharmony_ci vcpu->arch.mmu->root_pgd = 0; 32708c2ecf20Sopenharmony_ci 32718c2ecf20Sopenharmony_ci return 0; 32728c2ecf20Sopenharmony_ci} 32738c2ecf20Sopenharmony_ci 32748c2ecf20Sopenharmony_cistatic int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) 32758c2ecf20Sopenharmony_ci{ 32768c2ecf20Sopenharmony_ci u64 pdptr, pm_mask; 32778c2ecf20Sopenharmony_ci gfn_t root_gfn, root_pgd; 32788c2ecf20Sopenharmony_ci hpa_t root; 32798c2ecf20Sopenharmony_ci int i; 32808c2ecf20Sopenharmony_ci 32818c2ecf20Sopenharmony_ci root_pgd = vcpu->arch.mmu->get_guest_pgd(vcpu); 32828c2ecf20Sopenharmony_ci root_gfn = root_pgd >> PAGE_SHIFT; 32838c2ecf20Sopenharmony_ci 32848c2ecf20Sopenharmony_ci if (mmu_check_root(vcpu, root_gfn)) 32858c2ecf20Sopenharmony_ci return 1; 32868c2ecf20Sopenharmony_ci 32878c2ecf20Sopenharmony_ci /* 32888c2ecf20Sopenharmony_ci * Do we shadow a long mode page table? If so we need to 32898c2ecf20Sopenharmony_ci * write-protect the guests page table root. 32908c2ecf20Sopenharmony_ci */ 32918c2ecf20Sopenharmony_ci if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) { 32928c2ecf20Sopenharmony_ci MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu->root_hpa)); 32938c2ecf20Sopenharmony_ci 32948c2ecf20Sopenharmony_ci root = mmu_alloc_root(vcpu, root_gfn, 0, 32958c2ecf20Sopenharmony_ci vcpu->arch.mmu->shadow_root_level, false); 32968c2ecf20Sopenharmony_ci if (!VALID_PAGE(root)) 32978c2ecf20Sopenharmony_ci return -ENOSPC; 32988c2ecf20Sopenharmony_ci vcpu->arch.mmu->root_hpa = root; 32998c2ecf20Sopenharmony_ci goto set_root_pgd; 33008c2ecf20Sopenharmony_ci } 33018c2ecf20Sopenharmony_ci 33028c2ecf20Sopenharmony_ci /* 33038c2ecf20Sopenharmony_ci * We shadow a 32 bit page table. This may be a legacy 2-level 33048c2ecf20Sopenharmony_ci * or a PAE 3-level page table. In either case we need to be aware that 33058c2ecf20Sopenharmony_ci * the shadow page table may be a PAE or a long mode page table. 33068c2ecf20Sopenharmony_ci */ 33078c2ecf20Sopenharmony_ci pm_mask = PT_PRESENT_MASK; 33088c2ecf20Sopenharmony_ci if (vcpu->arch.mmu->shadow_root_level == PT64_ROOT_4LEVEL) { 33098c2ecf20Sopenharmony_ci pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK; 33108c2ecf20Sopenharmony_ci 33118c2ecf20Sopenharmony_ci /* 33128c2ecf20Sopenharmony_ci * Allocate the page for the PDPTEs when shadowing 32-bit NPT 33138c2ecf20Sopenharmony_ci * with 64-bit only when needed. Unlike 32-bit NPT, it doesn't 33148c2ecf20Sopenharmony_ci * need to be in low mem. See also lm_root below. 33158c2ecf20Sopenharmony_ci */ 33168c2ecf20Sopenharmony_ci if (!vcpu->arch.mmu->pae_root) { 33178c2ecf20Sopenharmony_ci WARN_ON_ONCE(!tdp_enabled); 33188c2ecf20Sopenharmony_ci 33198c2ecf20Sopenharmony_ci vcpu->arch.mmu->pae_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT); 33208c2ecf20Sopenharmony_ci if (!vcpu->arch.mmu->pae_root) 33218c2ecf20Sopenharmony_ci return -ENOMEM; 33228c2ecf20Sopenharmony_ci } 33238c2ecf20Sopenharmony_ci } 33248c2ecf20Sopenharmony_ci 33258c2ecf20Sopenharmony_ci for (i = 0; i < 4; ++i) { 33268c2ecf20Sopenharmony_ci MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu->pae_root[i])); 33278c2ecf20Sopenharmony_ci if (vcpu->arch.mmu->root_level == PT32E_ROOT_LEVEL) { 33288c2ecf20Sopenharmony_ci pdptr = vcpu->arch.mmu->get_pdptr(vcpu, i); 33298c2ecf20Sopenharmony_ci if (!(pdptr & PT_PRESENT_MASK)) { 33308c2ecf20Sopenharmony_ci vcpu->arch.mmu->pae_root[i] = 0; 33318c2ecf20Sopenharmony_ci continue; 33328c2ecf20Sopenharmony_ci } 33338c2ecf20Sopenharmony_ci root_gfn = pdptr >> PAGE_SHIFT; 33348c2ecf20Sopenharmony_ci if (mmu_check_root(vcpu, root_gfn)) 33358c2ecf20Sopenharmony_ci return 1; 33368c2ecf20Sopenharmony_ci } 33378c2ecf20Sopenharmony_ci 33388c2ecf20Sopenharmony_ci root = mmu_alloc_root(vcpu, root_gfn, i << 30, 33398c2ecf20Sopenharmony_ci PT32_ROOT_LEVEL, false); 33408c2ecf20Sopenharmony_ci if (!VALID_PAGE(root)) 33418c2ecf20Sopenharmony_ci return -ENOSPC; 33428c2ecf20Sopenharmony_ci vcpu->arch.mmu->pae_root[i] = root | pm_mask; 33438c2ecf20Sopenharmony_ci } 33448c2ecf20Sopenharmony_ci vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root); 33458c2ecf20Sopenharmony_ci 33468c2ecf20Sopenharmony_ci /* 33478c2ecf20Sopenharmony_ci * When shadowing 32-bit or PAE NPT with 64-bit NPT, the PML4 and PDP 33488c2ecf20Sopenharmony_ci * tables are allocated and initialized at MMU creation as there is no 33498c2ecf20Sopenharmony_ci * equivalent level in the guest's NPT to shadow. Allocate the tables 33508c2ecf20Sopenharmony_ci * on demand, as running a 32-bit L1 VMM is very rare. The PDP is 33518c2ecf20Sopenharmony_ci * handled above (to share logic with PAE), deal with the PML4 here. 33528c2ecf20Sopenharmony_ci */ 33538c2ecf20Sopenharmony_ci if (vcpu->arch.mmu->shadow_root_level == PT64_ROOT_4LEVEL) { 33548c2ecf20Sopenharmony_ci if (vcpu->arch.mmu->lm_root == NULL) { 33558c2ecf20Sopenharmony_ci u64 *lm_root; 33568c2ecf20Sopenharmony_ci 33578c2ecf20Sopenharmony_ci lm_root = (void*)get_zeroed_page(GFP_KERNEL_ACCOUNT); 33588c2ecf20Sopenharmony_ci if (!lm_root) 33598c2ecf20Sopenharmony_ci return -ENOMEM; 33608c2ecf20Sopenharmony_ci 33618c2ecf20Sopenharmony_ci lm_root[0] = __pa(vcpu->arch.mmu->pae_root) | pm_mask; 33628c2ecf20Sopenharmony_ci 33638c2ecf20Sopenharmony_ci vcpu->arch.mmu->lm_root = lm_root; 33648c2ecf20Sopenharmony_ci } 33658c2ecf20Sopenharmony_ci 33668c2ecf20Sopenharmony_ci vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->lm_root); 33678c2ecf20Sopenharmony_ci } 33688c2ecf20Sopenharmony_ci 33698c2ecf20Sopenharmony_ciset_root_pgd: 33708c2ecf20Sopenharmony_ci vcpu->arch.mmu->root_pgd = root_pgd; 33718c2ecf20Sopenharmony_ci 33728c2ecf20Sopenharmony_ci return 0; 33738c2ecf20Sopenharmony_ci} 33748c2ecf20Sopenharmony_ci 33758c2ecf20Sopenharmony_cistatic int mmu_alloc_roots(struct kvm_vcpu *vcpu) 33768c2ecf20Sopenharmony_ci{ 33778c2ecf20Sopenharmony_ci if (vcpu->arch.mmu->direct_map) 33788c2ecf20Sopenharmony_ci return mmu_alloc_direct_roots(vcpu); 33798c2ecf20Sopenharmony_ci else 33808c2ecf20Sopenharmony_ci return mmu_alloc_shadow_roots(vcpu); 33818c2ecf20Sopenharmony_ci} 33828c2ecf20Sopenharmony_ci 33838c2ecf20Sopenharmony_civoid kvm_mmu_sync_roots(struct kvm_vcpu *vcpu) 33848c2ecf20Sopenharmony_ci{ 33858c2ecf20Sopenharmony_ci int i; 33868c2ecf20Sopenharmony_ci struct kvm_mmu_page *sp; 33878c2ecf20Sopenharmony_ci 33888c2ecf20Sopenharmony_ci if (vcpu->arch.mmu->direct_map) 33898c2ecf20Sopenharmony_ci return; 33908c2ecf20Sopenharmony_ci 33918c2ecf20Sopenharmony_ci if (!VALID_PAGE(vcpu->arch.mmu->root_hpa)) 33928c2ecf20Sopenharmony_ci return; 33938c2ecf20Sopenharmony_ci 33948c2ecf20Sopenharmony_ci vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY); 33958c2ecf20Sopenharmony_ci 33968c2ecf20Sopenharmony_ci if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) { 33978c2ecf20Sopenharmony_ci hpa_t root = vcpu->arch.mmu->root_hpa; 33988c2ecf20Sopenharmony_ci sp = to_shadow_page(root); 33998c2ecf20Sopenharmony_ci 34008c2ecf20Sopenharmony_ci /* 34018c2ecf20Sopenharmony_ci * Even if another CPU was marking the SP as unsync-ed 34028c2ecf20Sopenharmony_ci * simultaneously, any guest page table changes are not 34038c2ecf20Sopenharmony_ci * guaranteed to be visible anyway until this VCPU issues a TLB 34048c2ecf20Sopenharmony_ci * flush strictly after those changes are made. We only need to 34058c2ecf20Sopenharmony_ci * ensure that the other CPU sets these flags before any actual 34068c2ecf20Sopenharmony_ci * changes to the page tables are made. The comments in 34078c2ecf20Sopenharmony_ci * mmu_need_write_protect() describe what could go wrong if this 34088c2ecf20Sopenharmony_ci * requirement isn't satisfied. 34098c2ecf20Sopenharmony_ci */ 34108c2ecf20Sopenharmony_ci if (!smp_load_acquire(&sp->unsync) && 34118c2ecf20Sopenharmony_ci !smp_load_acquire(&sp->unsync_children)) 34128c2ecf20Sopenharmony_ci return; 34138c2ecf20Sopenharmony_ci 34148c2ecf20Sopenharmony_ci spin_lock(&vcpu->kvm->mmu_lock); 34158c2ecf20Sopenharmony_ci kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC); 34168c2ecf20Sopenharmony_ci 34178c2ecf20Sopenharmony_ci mmu_sync_children(vcpu, sp); 34188c2ecf20Sopenharmony_ci 34198c2ecf20Sopenharmony_ci kvm_mmu_audit(vcpu, AUDIT_POST_SYNC); 34208c2ecf20Sopenharmony_ci spin_unlock(&vcpu->kvm->mmu_lock); 34218c2ecf20Sopenharmony_ci return; 34228c2ecf20Sopenharmony_ci } 34238c2ecf20Sopenharmony_ci 34248c2ecf20Sopenharmony_ci spin_lock(&vcpu->kvm->mmu_lock); 34258c2ecf20Sopenharmony_ci kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC); 34268c2ecf20Sopenharmony_ci 34278c2ecf20Sopenharmony_ci for (i = 0; i < 4; ++i) { 34288c2ecf20Sopenharmony_ci hpa_t root = vcpu->arch.mmu->pae_root[i]; 34298c2ecf20Sopenharmony_ci 34308c2ecf20Sopenharmony_ci if (root && VALID_PAGE(root)) { 34318c2ecf20Sopenharmony_ci root &= PT64_BASE_ADDR_MASK; 34328c2ecf20Sopenharmony_ci sp = to_shadow_page(root); 34338c2ecf20Sopenharmony_ci mmu_sync_children(vcpu, sp); 34348c2ecf20Sopenharmony_ci } 34358c2ecf20Sopenharmony_ci } 34368c2ecf20Sopenharmony_ci 34378c2ecf20Sopenharmony_ci kvm_mmu_audit(vcpu, AUDIT_POST_SYNC); 34388c2ecf20Sopenharmony_ci spin_unlock(&vcpu->kvm->mmu_lock); 34398c2ecf20Sopenharmony_ci} 34408c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(kvm_mmu_sync_roots); 34418c2ecf20Sopenharmony_ci 34428c2ecf20Sopenharmony_cistatic gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gpa_t vaddr, 34438c2ecf20Sopenharmony_ci u32 access, struct x86_exception *exception) 34448c2ecf20Sopenharmony_ci{ 34458c2ecf20Sopenharmony_ci if (exception) 34468c2ecf20Sopenharmony_ci exception->error_code = 0; 34478c2ecf20Sopenharmony_ci return vaddr; 34488c2ecf20Sopenharmony_ci} 34498c2ecf20Sopenharmony_ci 34508c2ecf20Sopenharmony_cistatic gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gpa_t vaddr, 34518c2ecf20Sopenharmony_ci u32 access, 34528c2ecf20Sopenharmony_ci struct x86_exception *exception) 34538c2ecf20Sopenharmony_ci{ 34548c2ecf20Sopenharmony_ci if (exception) 34558c2ecf20Sopenharmony_ci exception->error_code = 0; 34568c2ecf20Sopenharmony_ci return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access, exception); 34578c2ecf20Sopenharmony_ci} 34588c2ecf20Sopenharmony_ci 34598c2ecf20Sopenharmony_cistatic bool 34608c2ecf20Sopenharmony_ci__is_rsvd_bits_set(struct rsvd_bits_validate *rsvd_check, u64 pte, int level) 34618c2ecf20Sopenharmony_ci{ 34628c2ecf20Sopenharmony_ci int bit7 = (pte >> 7) & 1; 34638c2ecf20Sopenharmony_ci 34648c2ecf20Sopenharmony_ci return pte & rsvd_check->rsvd_bits_mask[bit7][level-1]; 34658c2ecf20Sopenharmony_ci} 34668c2ecf20Sopenharmony_ci 34678c2ecf20Sopenharmony_cistatic bool __is_bad_mt_xwr(struct rsvd_bits_validate *rsvd_check, u64 pte) 34688c2ecf20Sopenharmony_ci{ 34698c2ecf20Sopenharmony_ci return rsvd_check->bad_mt_xwr & BIT_ULL(pte & 0x3f); 34708c2ecf20Sopenharmony_ci} 34718c2ecf20Sopenharmony_ci 34728c2ecf20Sopenharmony_cistatic bool mmio_info_in_cache(struct kvm_vcpu *vcpu, u64 addr, bool direct) 34738c2ecf20Sopenharmony_ci{ 34748c2ecf20Sopenharmony_ci /* 34758c2ecf20Sopenharmony_ci * A nested guest cannot use the MMIO cache if it is using nested 34768c2ecf20Sopenharmony_ci * page tables, because cr2 is a nGPA while the cache stores GPAs. 34778c2ecf20Sopenharmony_ci */ 34788c2ecf20Sopenharmony_ci if (mmu_is_nested(vcpu)) 34798c2ecf20Sopenharmony_ci return false; 34808c2ecf20Sopenharmony_ci 34818c2ecf20Sopenharmony_ci if (direct) 34828c2ecf20Sopenharmony_ci return vcpu_match_mmio_gpa(vcpu, addr); 34838c2ecf20Sopenharmony_ci 34848c2ecf20Sopenharmony_ci return vcpu_match_mmio_gva(vcpu, addr); 34858c2ecf20Sopenharmony_ci} 34868c2ecf20Sopenharmony_ci 34878c2ecf20Sopenharmony_ci/* 34888c2ecf20Sopenharmony_ci * Return the level of the lowest level SPTE added to sptes. 34898c2ecf20Sopenharmony_ci * That SPTE may be non-present. 34908c2ecf20Sopenharmony_ci */ 34918c2ecf20Sopenharmony_cistatic int get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, int *root_level) 34928c2ecf20Sopenharmony_ci{ 34938c2ecf20Sopenharmony_ci struct kvm_shadow_walk_iterator iterator; 34948c2ecf20Sopenharmony_ci int leaf = -1; 34958c2ecf20Sopenharmony_ci u64 spte; 34968c2ecf20Sopenharmony_ci 34978c2ecf20Sopenharmony_ci walk_shadow_page_lockless_begin(vcpu); 34988c2ecf20Sopenharmony_ci 34998c2ecf20Sopenharmony_ci for (shadow_walk_init(&iterator, vcpu, addr), 35008c2ecf20Sopenharmony_ci *root_level = iterator.level; 35018c2ecf20Sopenharmony_ci shadow_walk_okay(&iterator); 35028c2ecf20Sopenharmony_ci __shadow_walk_next(&iterator, spte)) { 35038c2ecf20Sopenharmony_ci leaf = iterator.level; 35048c2ecf20Sopenharmony_ci spte = mmu_spte_get_lockless(iterator.sptep); 35058c2ecf20Sopenharmony_ci 35068c2ecf20Sopenharmony_ci sptes[leaf - 1] = spte; 35078c2ecf20Sopenharmony_ci 35088c2ecf20Sopenharmony_ci if (!is_shadow_present_pte(spte)) 35098c2ecf20Sopenharmony_ci break; 35108c2ecf20Sopenharmony_ci } 35118c2ecf20Sopenharmony_ci 35128c2ecf20Sopenharmony_ci walk_shadow_page_lockless_end(vcpu); 35138c2ecf20Sopenharmony_ci 35148c2ecf20Sopenharmony_ci return leaf; 35158c2ecf20Sopenharmony_ci} 35168c2ecf20Sopenharmony_ci 35178c2ecf20Sopenharmony_ci/* return true if reserved bit is detected on spte. */ 35188c2ecf20Sopenharmony_cistatic bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep) 35198c2ecf20Sopenharmony_ci{ 35208c2ecf20Sopenharmony_ci u64 sptes[PT64_ROOT_MAX_LEVEL]; 35218c2ecf20Sopenharmony_ci struct rsvd_bits_validate *rsvd_check; 35228c2ecf20Sopenharmony_ci int root, leaf, level; 35238c2ecf20Sopenharmony_ci bool reserved = false; 35248c2ecf20Sopenharmony_ci 35258c2ecf20Sopenharmony_ci if (!VALID_PAGE(vcpu->arch.mmu->root_hpa)) { 35268c2ecf20Sopenharmony_ci *sptep = 0ull; 35278c2ecf20Sopenharmony_ci return reserved; 35288c2ecf20Sopenharmony_ci } 35298c2ecf20Sopenharmony_ci 35308c2ecf20Sopenharmony_ci if (is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa)) 35318c2ecf20Sopenharmony_ci leaf = kvm_tdp_mmu_get_walk(vcpu, addr, sptes, &root); 35328c2ecf20Sopenharmony_ci else 35338c2ecf20Sopenharmony_ci leaf = get_walk(vcpu, addr, sptes, &root); 35348c2ecf20Sopenharmony_ci 35358c2ecf20Sopenharmony_ci if (unlikely(leaf < 0)) { 35368c2ecf20Sopenharmony_ci *sptep = 0ull; 35378c2ecf20Sopenharmony_ci return reserved; 35388c2ecf20Sopenharmony_ci } 35398c2ecf20Sopenharmony_ci 35408c2ecf20Sopenharmony_ci rsvd_check = &vcpu->arch.mmu->shadow_zero_check; 35418c2ecf20Sopenharmony_ci 35428c2ecf20Sopenharmony_ci for (level = root; level >= leaf; level--) { 35438c2ecf20Sopenharmony_ci if (!is_shadow_present_pte(sptes[level - 1])) 35448c2ecf20Sopenharmony_ci break; 35458c2ecf20Sopenharmony_ci /* 35468c2ecf20Sopenharmony_ci * Use a bitwise-OR instead of a logical-OR to aggregate the 35478c2ecf20Sopenharmony_ci * reserved bit and EPT's invalid memtype/XWR checks to avoid 35488c2ecf20Sopenharmony_ci * adding a Jcc in the loop. 35498c2ecf20Sopenharmony_ci */ 35508c2ecf20Sopenharmony_ci reserved |= __is_bad_mt_xwr(rsvd_check, sptes[level - 1]) || 35518c2ecf20Sopenharmony_ci __is_rsvd_bits_set(rsvd_check, sptes[level - 1], 35528c2ecf20Sopenharmony_ci level); 35538c2ecf20Sopenharmony_ci } 35548c2ecf20Sopenharmony_ci 35558c2ecf20Sopenharmony_ci if (reserved) { 35568c2ecf20Sopenharmony_ci pr_err("%s: detect reserved bits on spte, addr 0x%llx, dump hierarchy:\n", 35578c2ecf20Sopenharmony_ci __func__, addr); 35588c2ecf20Sopenharmony_ci for (level = root; level >= leaf; level--) 35598c2ecf20Sopenharmony_ci pr_err("------ spte 0x%llx level %d.\n", 35608c2ecf20Sopenharmony_ci sptes[level - 1], level); 35618c2ecf20Sopenharmony_ci } 35628c2ecf20Sopenharmony_ci 35638c2ecf20Sopenharmony_ci *sptep = sptes[leaf - 1]; 35648c2ecf20Sopenharmony_ci 35658c2ecf20Sopenharmony_ci return reserved; 35668c2ecf20Sopenharmony_ci} 35678c2ecf20Sopenharmony_ci 35688c2ecf20Sopenharmony_cistatic int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct) 35698c2ecf20Sopenharmony_ci{ 35708c2ecf20Sopenharmony_ci u64 spte; 35718c2ecf20Sopenharmony_ci bool reserved; 35728c2ecf20Sopenharmony_ci 35738c2ecf20Sopenharmony_ci if (mmio_info_in_cache(vcpu, addr, direct)) 35748c2ecf20Sopenharmony_ci return RET_PF_EMULATE; 35758c2ecf20Sopenharmony_ci 35768c2ecf20Sopenharmony_ci reserved = get_mmio_spte(vcpu, addr, &spte); 35778c2ecf20Sopenharmony_ci if (WARN_ON(reserved)) 35788c2ecf20Sopenharmony_ci return -EINVAL; 35798c2ecf20Sopenharmony_ci 35808c2ecf20Sopenharmony_ci if (is_mmio_spte(spte)) { 35818c2ecf20Sopenharmony_ci gfn_t gfn = get_mmio_spte_gfn(spte); 35828c2ecf20Sopenharmony_ci unsigned int access = get_mmio_spte_access(spte); 35838c2ecf20Sopenharmony_ci 35848c2ecf20Sopenharmony_ci if (!check_mmio_spte(vcpu, spte)) 35858c2ecf20Sopenharmony_ci return RET_PF_INVALID; 35868c2ecf20Sopenharmony_ci 35878c2ecf20Sopenharmony_ci if (direct) 35888c2ecf20Sopenharmony_ci addr = 0; 35898c2ecf20Sopenharmony_ci 35908c2ecf20Sopenharmony_ci trace_handle_mmio_page_fault(addr, gfn, access); 35918c2ecf20Sopenharmony_ci vcpu_cache_mmio_info(vcpu, addr, gfn, access); 35928c2ecf20Sopenharmony_ci return RET_PF_EMULATE; 35938c2ecf20Sopenharmony_ci } 35948c2ecf20Sopenharmony_ci 35958c2ecf20Sopenharmony_ci /* 35968c2ecf20Sopenharmony_ci * If the page table is zapped by other cpus, let CPU fault again on 35978c2ecf20Sopenharmony_ci * the address. 35988c2ecf20Sopenharmony_ci */ 35998c2ecf20Sopenharmony_ci return RET_PF_RETRY; 36008c2ecf20Sopenharmony_ci} 36018c2ecf20Sopenharmony_ci 36028c2ecf20Sopenharmony_cistatic bool page_fault_handle_page_track(struct kvm_vcpu *vcpu, 36038c2ecf20Sopenharmony_ci u32 error_code, gfn_t gfn) 36048c2ecf20Sopenharmony_ci{ 36058c2ecf20Sopenharmony_ci if (unlikely(error_code & PFERR_RSVD_MASK)) 36068c2ecf20Sopenharmony_ci return false; 36078c2ecf20Sopenharmony_ci 36088c2ecf20Sopenharmony_ci if (!(error_code & PFERR_PRESENT_MASK) || 36098c2ecf20Sopenharmony_ci !(error_code & PFERR_WRITE_MASK)) 36108c2ecf20Sopenharmony_ci return false; 36118c2ecf20Sopenharmony_ci 36128c2ecf20Sopenharmony_ci /* 36138c2ecf20Sopenharmony_ci * guest is writing the page which is write tracked which can 36148c2ecf20Sopenharmony_ci * not be fixed by page fault handler. 36158c2ecf20Sopenharmony_ci */ 36168c2ecf20Sopenharmony_ci if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE)) 36178c2ecf20Sopenharmony_ci return true; 36188c2ecf20Sopenharmony_ci 36198c2ecf20Sopenharmony_ci return false; 36208c2ecf20Sopenharmony_ci} 36218c2ecf20Sopenharmony_ci 36228c2ecf20Sopenharmony_cistatic void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr) 36238c2ecf20Sopenharmony_ci{ 36248c2ecf20Sopenharmony_ci struct kvm_shadow_walk_iterator iterator; 36258c2ecf20Sopenharmony_ci u64 spte; 36268c2ecf20Sopenharmony_ci 36278c2ecf20Sopenharmony_ci walk_shadow_page_lockless_begin(vcpu); 36288c2ecf20Sopenharmony_ci for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) { 36298c2ecf20Sopenharmony_ci clear_sp_write_flooding_count(iterator.sptep); 36308c2ecf20Sopenharmony_ci if (!is_shadow_present_pte(spte)) 36318c2ecf20Sopenharmony_ci break; 36328c2ecf20Sopenharmony_ci } 36338c2ecf20Sopenharmony_ci walk_shadow_page_lockless_end(vcpu); 36348c2ecf20Sopenharmony_ci} 36358c2ecf20Sopenharmony_ci 36368c2ecf20Sopenharmony_cistatic u32 alloc_apf_token(struct kvm_vcpu *vcpu) 36378c2ecf20Sopenharmony_ci{ 36388c2ecf20Sopenharmony_ci /* make sure the token value is not 0 */ 36398c2ecf20Sopenharmony_ci u32 id = vcpu->arch.apf.id; 36408c2ecf20Sopenharmony_ci 36418c2ecf20Sopenharmony_ci if (id << 12 == 0) 36428c2ecf20Sopenharmony_ci vcpu->arch.apf.id = 1; 36438c2ecf20Sopenharmony_ci 36448c2ecf20Sopenharmony_ci return (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id; 36458c2ecf20Sopenharmony_ci} 36468c2ecf20Sopenharmony_ci 36478c2ecf20Sopenharmony_cistatic bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, 36488c2ecf20Sopenharmony_ci gfn_t gfn) 36498c2ecf20Sopenharmony_ci{ 36508c2ecf20Sopenharmony_ci struct kvm_arch_async_pf arch; 36518c2ecf20Sopenharmony_ci 36528c2ecf20Sopenharmony_ci arch.token = alloc_apf_token(vcpu); 36538c2ecf20Sopenharmony_ci arch.gfn = gfn; 36548c2ecf20Sopenharmony_ci arch.direct_map = vcpu->arch.mmu->direct_map; 36558c2ecf20Sopenharmony_ci arch.cr3 = vcpu->arch.mmu->get_guest_pgd(vcpu); 36568c2ecf20Sopenharmony_ci 36578c2ecf20Sopenharmony_ci return kvm_setup_async_pf(vcpu, cr2_or_gpa, 36588c2ecf20Sopenharmony_ci kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch); 36598c2ecf20Sopenharmony_ci} 36608c2ecf20Sopenharmony_ci 36618c2ecf20Sopenharmony_cistatic bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn, 36628c2ecf20Sopenharmony_ci gpa_t cr2_or_gpa, kvm_pfn_t *pfn, bool write, 36638c2ecf20Sopenharmony_ci bool *writable) 36648c2ecf20Sopenharmony_ci{ 36658c2ecf20Sopenharmony_ci struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 36668c2ecf20Sopenharmony_ci bool async; 36678c2ecf20Sopenharmony_ci 36688c2ecf20Sopenharmony_ci /* 36698c2ecf20Sopenharmony_ci * Retry the page fault if the gfn hit a memslot that is being deleted 36708c2ecf20Sopenharmony_ci * or moved. This ensures any existing SPTEs for the old memslot will 36718c2ecf20Sopenharmony_ci * be zapped before KVM inserts a new MMIO SPTE for the gfn. 36728c2ecf20Sopenharmony_ci */ 36738c2ecf20Sopenharmony_ci if (slot && (slot->flags & KVM_MEMSLOT_INVALID)) 36748c2ecf20Sopenharmony_ci return true; 36758c2ecf20Sopenharmony_ci 36768c2ecf20Sopenharmony_ci /* Don't expose private memslots to L2. */ 36778c2ecf20Sopenharmony_ci if (is_guest_mode(vcpu) && !kvm_is_visible_memslot(slot)) { 36788c2ecf20Sopenharmony_ci *pfn = KVM_PFN_NOSLOT; 36798c2ecf20Sopenharmony_ci *writable = false; 36808c2ecf20Sopenharmony_ci return false; 36818c2ecf20Sopenharmony_ci } 36828c2ecf20Sopenharmony_ci 36838c2ecf20Sopenharmony_ci async = false; 36848c2ecf20Sopenharmony_ci *pfn = __gfn_to_pfn_memslot(slot, gfn, false, &async, write, writable); 36858c2ecf20Sopenharmony_ci if (!async) 36868c2ecf20Sopenharmony_ci return false; /* *pfn has correct page already */ 36878c2ecf20Sopenharmony_ci 36888c2ecf20Sopenharmony_ci if (!prefault && kvm_can_do_async_pf(vcpu)) { 36898c2ecf20Sopenharmony_ci trace_kvm_try_async_get_page(cr2_or_gpa, gfn); 36908c2ecf20Sopenharmony_ci if (kvm_find_async_pf_gfn(vcpu, gfn)) { 36918c2ecf20Sopenharmony_ci trace_kvm_async_pf_doublefault(cr2_or_gpa, gfn); 36928c2ecf20Sopenharmony_ci kvm_make_request(KVM_REQ_APF_HALT, vcpu); 36938c2ecf20Sopenharmony_ci return true; 36948c2ecf20Sopenharmony_ci } else if (kvm_arch_setup_async_pf(vcpu, cr2_or_gpa, gfn)) 36958c2ecf20Sopenharmony_ci return true; 36968c2ecf20Sopenharmony_ci } 36978c2ecf20Sopenharmony_ci 36988c2ecf20Sopenharmony_ci *pfn = __gfn_to_pfn_memslot(slot, gfn, false, NULL, write, writable); 36998c2ecf20Sopenharmony_ci return false; 37008c2ecf20Sopenharmony_ci} 37018c2ecf20Sopenharmony_ci 37028c2ecf20Sopenharmony_cistatic int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, 37038c2ecf20Sopenharmony_ci bool prefault, int max_level, bool is_tdp) 37048c2ecf20Sopenharmony_ci{ 37058c2ecf20Sopenharmony_ci bool write = error_code & PFERR_WRITE_MASK; 37068c2ecf20Sopenharmony_ci bool map_writable; 37078c2ecf20Sopenharmony_ci 37088c2ecf20Sopenharmony_ci gfn_t gfn = gpa >> PAGE_SHIFT; 37098c2ecf20Sopenharmony_ci unsigned long mmu_seq; 37108c2ecf20Sopenharmony_ci kvm_pfn_t pfn; 37118c2ecf20Sopenharmony_ci int r; 37128c2ecf20Sopenharmony_ci 37138c2ecf20Sopenharmony_ci if (page_fault_handle_page_track(vcpu, error_code, gfn)) 37148c2ecf20Sopenharmony_ci return RET_PF_EMULATE; 37158c2ecf20Sopenharmony_ci 37168c2ecf20Sopenharmony_ci if (!is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa)) { 37178c2ecf20Sopenharmony_ci r = fast_page_fault(vcpu, gpa, error_code); 37188c2ecf20Sopenharmony_ci if (r != RET_PF_INVALID) 37198c2ecf20Sopenharmony_ci return r; 37208c2ecf20Sopenharmony_ci } 37218c2ecf20Sopenharmony_ci 37228c2ecf20Sopenharmony_ci r = mmu_topup_memory_caches(vcpu, false); 37238c2ecf20Sopenharmony_ci if (r) 37248c2ecf20Sopenharmony_ci return r; 37258c2ecf20Sopenharmony_ci 37268c2ecf20Sopenharmony_ci mmu_seq = vcpu->kvm->mmu_notifier_seq; 37278c2ecf20Sopenharmony_ci smp_rmb(); 37288c2ecf20Sopenharmony_ci 37298c2ecf20Sopenharmony_ci if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable)) 37308c2ecf20Sopenharmony_ci return RET_PF_RETRY; 37318c2ecf20Sopenharmony_ci 37328c2ecf20Sopenharmony_ci if (handle_abnormal_pfn(vcpu, is_tdp ? 0 : gpa, gfn, pfn, ACC_ALL, &r)) 37338c2ecf20Sopenharmony_ci return r; 37348c2ecf20Sopenharmony_ci 37358c2ecf20Sopenharmony_ci r = RET_PF_RETRY; 37368c2ecf20Sopenharmony_ci spin_lock(&vcpu->kvm->mmu_lock); 37378c2ecf20Sopenharmony_ci if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) 37388c2ecf20Sopenharmony_ci goto out_unlock; 37398c2ecf20Sopenharmony_ci r = make_mmu_pages_available(vcpu); 37408c2ecf20Sopenharmony_ci if (r) 37418c2ecf20Sopenharmony_ci goto out_unlock; 37428c2ecf20Sopenharmony_ci 37438c2ecf20Sopenharmony_ci if (is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa)) 37448c2ecf20Sopenharmony_ci r = kvm_tdp_mmu_map(vcpu, gpa, error_code, map_writable, max_level, 37458c2ecf20Sopenharmony_ci pfn, prefault); 37468c2ecf20Sopenharmony_ci else 37478c2ecf20Sopenharmony_ci r = __direct_map(vcpu, gpa, error_code, map_writable, max_level, pfn, 37488c2ecf20Sopenharmony_ci prefault, is_tdp); 37498c2ecf20Sopenharmony_ci 37508c2ecf20Sopenharmony_ciout_unlock: 37518c2ecf20Sopenharmony_ci spin_unlock(&vcpu->kvm->mmu_lock); 37528c2ecf20Sopenharmony_ci kvm_release_pfn_clean(pfn); 37538c2ecf20Sopenharmony_ci return r; 37548c2ecf20Sopenharmony_ci} 37558c2ecf20Sopenharmony_ci 37568c2ecf20Sopenharmony_cistatic int nonpaging_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, 37578c2ecf20Sopenharmony_ci u32 error_code, bool prefault) 37588c2ecf20Sopenharmony_ci{ 37598c2ecf20Sopenharmony_ci pgprintk("%s: gva %lx error %x\n", __func__, gpa, error_code); 37608c2ecf20Sopenharmony_ci 37618c2ecf20Sopenharmony_ci /* This path builds a PAE pagetable, we can map 2mb pages at maximum. */ 37628c2ecf20Sopenharmony_ci return direct_page_fault(vcpu, gpa & PAGE_MASK, error_code, prefault, 37638c2ecf20Sopenharmony_ci PG_LEVEL_2M, false); 37648c2ecf20Sopenharmony_ci} 37658c2ecf20Sopenharmony_ci 37668c2ecf20Sopenharmony_ciint kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code, 37678c2ecf20Sopenharmony_ci u64 fault_address, char *insn, int insn_len) 37688c2ecf20Sopenharmony_ci{ 37698c2ecf20Sopenharmony_ci int r = 1; 37708c2ecf20Sopenharmony_ci u32 flags = vcpu->arch.apf.host_apf_flags; 37718c2ecf20Sopenharmony_ci 37728c2ecf20Sopenharmony_ci#ifndef CONFIG_X86_64 37738c2ecf20Sopenharmony_ci /* A 64-bit CR2 should be impossible on 32-bit KVM. */ 37748c2ecf20Sopenharmony_ci if (WARN_ON_ONCE(fault_address >> 32)) 37758c2ecf20Sopenharmony_ci return -EFAULT; 37768c2ecf20Sopenharmony_ci#endif 37778c2ecf20Sopenharmony_ci 37788c2ecf20Sopenharmony_ci vcpu->arch.l1tf_flush_l1d = true; 37798c2ecf20Sopenharmony_ci if (!flags) { 37808c2ecf20Sopenharmony_ci trace_kvm_page_fault(fault_address, error_code); 37818c2ecf20Sopenharmony_ci 37828c2ecf20Sopenharmony_ci if (kvm_event_needs_reinjection(vcpu)) 37838c2ecf20Sopenharmony_ci kvm_mmu_unprotect_page_virt(vcpu, fault_address); 37848c2ecf20Sopenharmony_ci r = kvm_mmu_page_fault(vcpu, fault_address, error_code, insn, 37858c2ecf20Sopenharmony_ci insn_len); 37868c2ecf20Sopenharmony_ci } else if (flags & KVM_PV_REASON_PAGE_NOT_PRESENT) { 37878c2ecf20Sopenharmony_ci vcpu->arch.apf.host_apf_flags = 0; 37888c2ecf20Sopenharmony_ci local_irq_disable(); 37898c2ecf20Sopenharmony_ci kvm_async_pf_task_wait_schedule(fault_address); 37908c2ecf20Sopenharmony_ci local_irq_enable(); 37918c2ecf20Sopenharmony_ci } else { 37928c2ecf20Sopenharmony_ci WARN_ONCE(1, "Unexpected host async PF flags: %x\n", flags); 37938c2ecf20Sopenharmony_ci } 37948c2ecf20Sopenharmony_ci 37958c2ecf20Sopenharmony_ci return r; 37968c2ecf20Sopenharmony_ci} 37978c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(kvm_handle_page_fault); 37988c2ecf20Sopenharmony_ci 37998c2ecf20Sopenharmony_ciint kvm_tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code, 38008c2ecf20Sopenharmony_ci bool prefault) 38018c2ecf20Sopenharmony_ci{ 38028c2ecf20Sopenharmony_ci int max_level; 38038c2ecf20Sopenharmony_ci 38048c2ecf20Sopenharmony_ci for (max_level = KVM_MAX_HUGEPAGE_LEVEL; 38058c2ecf20Sopenharmony_ci max_level > PG_LEVEL_4K; 38068c2ecf20Sopenharmony_ci max_level--) { 38078c2ecf20Sopenharmony_ci int page_num = KVM_PAGES_PER_HPAGE(max_level); 38088c2ecf20Sopenharmony_ci gfn_t base = (gpa >> PAGE_SHIFT) & ~(page_num - 1); 38098c2ecf20Sopenharmony_ci 38108c2ecf20Sopenharmony_ci if (kvm_mtrr_check_gfn_range_consistency(vcpu, base, page_num)) 38118c2ecf20Sopenharmony_ci break; 38128c2ecf20Sopenharmony_ci } 38138c2ecf20Sopenharmony_ci 38148c2ecf20Sopenharmony_ci return direct_page_fault(vcpu, gpa, error_code, prefault, 38158c2ecf20Sopenharmony_ci max_level, true); 38168c2ecf20Sopenharmony_ci} 38178c2ecf20Sopenharmony_ci 38188c2ecf20Sopenharmony_cistatic void nonpaging_init_context(struct kvm_vcpu *vcpu, 38198c2ecf20Sopenharmony_ci struct kvm_mmu *context) 38208c2ecf20Sopenharmony_ci{ 38218c2ecf20Sopenharmony_ci context->page_fault = nonpaging_page_fault; 38228c2ecf20Sopenharmony_ci context->gva_to_gpa = nonpaging_gva_to_gpa; 38238c2ecf20Sopenharmony_ci context->sync_page = nonpaging_sync_page; 38248c2ecf20Sopenharmony_ci context->invlpg = NULL; 38258c2ecf20Sopenharmony_ci context->root_level = 0; 38268c2ecf20Sopenharmony_ci context->shadow_root_level = PT32E_ROOT_LEVEL; 38278c2ecf20Sopenharmony_ci context->direct_map = true; 38288c2ecf20Sopenharmony_ci context->nx = false; 38298c2ecf20Sopenharmony_ci} 38308c2ecf20Sopenharmony_ci 38318c2ecf20Sopenharmony_cistatic inline bool is_root_usable(struct kvm_mmu_root_info *root, gpa_t pgd, 38328c2ecf20Sopenharmony_ci union kvm_mmu_page_role role) 38338c2ecf20Sopenharmony_ci{ 38348c2ecf20Sopenharmony_ci return (role.direct || pgd == root->pgd) && 38358c2ecf20Sopenharmony_ci VALID_PAGE(root->hpa) && to_shadow_page(root->hpa) && 38368c2ecf20Sopenharmony_ci role.word == to_shadow_page(root->hpa)->role.word; 38378c2ecf20Sopenharmony_ci} 38388c2ecf20Sopenharmony_ci 38398c2ecf20Sopenharmony_ci/* 38408c2ecf20Sopenharmony_ci * Find out if a previously cached root matching the new pgd/role is available. 38418c2ecf20Sopenharmony_ci * The current root is also inserted into the cache. 38428c2ecf20Sopenharmony_ci * If a matching root was found, it is assigned to kvm_mmu->root_hpa and true is 38438c2ecf20Sopenharmony_ci * returned. 38448c2ecf20Sopenharmony_ci * Otherwise, the LRU root from the cache is assigned to kvm_mmu->root_hpa and 38458c2ecf20Sopenharmony_ci * false is returned. This root should now be freed by the caller. 38468c2ecf20Sopenharmony_ci */ 38478c2ecf20Sopenharmony_cistatic bool cached_root_available(struct kvm_vcpu *vcpu, gpa_t new_pgd, 38488c2ecf20Sopenharmony_ci union kvm_mmu_page_role new_role) 38498c2ecf20Sopenharmony_ci{ 38508c2ecf20Sopenharmony_ci uint i; 38518c2ecf20Sopenharmony_ci struct kvm_mmu_root_info root; 38528c2ecf20Sopenharmony_ci struct kvm_mmu *mmu = vcpu->arch.mmu; 38538c2ecf20Sopenharmony_ci 38548c2ecf20Sopenharmony_ci root.pgd = mmu->root_pgd; 38558c2ecf20Sopenharmony_ci root.hpa = mmu->root_hpa; 38568c2ecf20Sopenharmony_ci 38578c2ecf20Sopenharmony_ci if (is_root_usable(&root, new_pgd, new_role)) 38588c2ecf20Sopenharmony_ci return true; 38598c2ecf20Sopenharmony_ci 38608c2ecf20Sopenharmony_ci for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) { 38618c2ecf20Sopenharmony_ci swap(root, mmu->prev_roots[i]); 38628c2ecf20Sopenharmony_ci 38638c2ecf20Sopenharmony_ci if (is_root_usable(&root, new_pgd, new_role)) 38648c2ecf20Sopenharmony_ci break; 38658c2ecf20Sopenharmony_ci } 38668c2ecf20Sopenharmony_ci 38678c2ecf20Sopenharmony_ci mmu->root_hpa = root.hpa; 38688c2ecf20Sopenharmony_ci mmu->root_pgd = root.pgd; 38698c2ecf20Sopenharmony_ci 38708c2ecf20Sopenharmony_ci return i < KVM_MMU_NUM_PREV_ROOTS; 38718c2ecf20Sopenharmony_ci} 38728c2ecf20Sopenharmony_ci 38738c2ecf20Sopenharmony_cistatic bool fast_pgd_switch(struct kvm_vcpu *vcpu, gpa_t new_pgd, 38748c2ecf20Sopenharmony_ci union kvm_mmu_page_role new_role) 38758c2ecf20Sopenharmony_ci{ 38768c2ecf20Sopenharmony_ci struct kvm_mmu *mmu = vcpu->arch.mmu; 38778c2ecf20Sopenharmony_ci 38788c2ecf20Sopenharmony_ci /* 38798c2ecf20Sopenharmony_ci * For now, limit the fast switch to 64-bit hosts+VMs in order to avoid 38808c2ecf20Sopenharmony_ci * having to deal with PDPTEs. We may add support for 32-bit hosts/VMs 38818c2ecf20Sopenharmony_ci * later if necessary. 38828c2ecf20Sopenharmony_ci */ 38838c2ecf20Sopenharmony_ci if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL && 38848c2ecf20Sopenharmony_ci mmu->root_level >= PT64_ROOT_4LEVEL) 38858c2ecf20Sopenharmony_ci return cached_root_available(vcpu, new_pgd, new_role); 38868c2ecf20Sopenharmony_ci 38878c2ecf20Sopenharmony_ci return false; 38888c2ecf20Sopenharmony_ci} 38898c2ecf20Sopenharmony_ci 38908c2ecf20Sopenharmony_cistatic void __kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd, 38918c2ecf20Sopenharmony_ci union kvm_mmu_page_role new_role, 38928c2ecf20Sopenharmony_ci bool skip_tlb_flush, bool skip_mmu_sync) 38938c2ecf20Sopenharmony_ci{ 38948c2ecf20Sopenharmony_ci if (!fast_pgd_switch(vcpu, new_pgd, new_role)) { 38958c2ecf20Sopenharmony_ci kvm_mmu_free_roots(vcpu, vcpu->arch.mmu, KVM_MMU_ROOT_CURRENT); 38968c2ecf20Sopenharmony_ci return; 38978c2ecf20Sopenharmony_ci } 38988c2ecf20Sopenharmony_ci 38998c2ecf20Sopenharmony_ci /* 39008c2ecf20Sopenharmony_ci * It's possible that the cached previous root page is obsolete because 39018c2ecf20Sopenharmony_ci * of a change in the MMU generation number. However, changing the 39028c2ecf20Sopenharmony_ci * generation number is accompanied by KVM_REQ_MMU_RELOAD, which will 39038c2ecf20Sopenharmony_ci * free the root set here and allocate a new one. 39048c2ecf20Sopenharmony_ci */ 39058c2ecf20Sopenharmony_ci kvm_make_request(KVM_REQ_LOAD_MMU_PGD, vcpu); 39068c2ecf20Sopenharmony_ci 39078c2ecf20Sopenharmony_ci if (!skip_mmu_sync || force_flush_and_sync_on_reuse) 39088c2ecf20Sopenharmony_ci kvm_make_request(KVM_REQ_MMU_SYNC, vcpu); 39098c2ecf20Sopenharmony_ci if (!skip_tlb_flush || force_flush_and_sync_on_reuse) 39108c2ecf20Sopenharmony_ci kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); 39118c2ecf20Sopenharmony_ci 39128c2ecf20Sopenharmony_ci /* 39138c2ecf20Sopenharmony_ci * The last MMIO access's GVA and GPA are cached in the VCPU. When 39148c2ecf20Sopenharmony_ci * switching to a new CR3, that GVA->GPA mapping may no longer be 39158c2ecf20Sopenharmony_ci * valid. So clear any cached MMIO info even when we don't need to sync 39168c2ecf20Sopenharmony_ci * the shadow page tables. 39178c2ecf20Sopenharmony_ci */ 39188c2ecf20Sopenharmony_ci vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY); 39198c2ecf20Sopenharmony_ci 39208c2ecf20Sopenharmony_ci /* 39218c2ecf20Sopenharmony_ci * If this is a direct root page, it doesn't have a write flooding 39228c2ecf20Sopenharmony_ci * count. Otherwise, clear the write flooding count. 39238c2ecf20Sopenharmony_ci */ 39248c2ecf20Sopenharmony_ci if (!new_role.direct) 39258c2ecf20Sopenharmony_ci __clear_sp_write_flooding_count( 39268c2ecf20Sopenharmony_ci to_shadow_page(vcpu->arch.mmu->root_hpa)); 39278c2ecf20Sopenharmony_ci} 39288c2ecf20Sopenharmony_ci 39298c2ecf20Sopenharmony_civoid kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd, bool skip_tlb_flush, 39308c2ecf20Sopenharmony_ci bool skip_mmu_sync) 39318c2ecf20Sopenharmony_ci{ 39328c2ecf20Sopenharmony_ci __kvm_mmu_new_pgd(vcpu, new_pgd, kvm_mmu_calc_root_page_role(vcpu), 39338c2ecf20Sopenharmony_ci skip_tlb_flush, skip_mmu_sync); 39348c2ecf20Sopenharmony_ci} 39358c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(kvm_mmu_new_pgd); 39368c2ecf20Sopenharmony_ci 39378c2ecf20Sopenharmony_cistatic unsigned long get_cr3(struct kvm_vcpu *vcpu) 39388c2ecf20Sopenharmony_ci{ 39398c2ecf20Sopenharmony_ci return kvm_read_cr3(vcpu); 39408c2ecf20Sopenharmony_ci} 39418c2ecf20Sopenharmony_ci 39428c2ecf20Sopenharmony_cistatic bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn, 39438c2ecf20Sopenharmony_ci unsigned int access, int *nr_present) 39448c2ecf20Sopenharmony_ci{ 39458c2ecf20Sopenharmony_ci if (unlikely(is_mmio_spte(*sptep))) { 39468c2ecf20Sopenharmony_ci if (gfn != get_mmio_spte_gfn(*sptep)) { 39478c2ecf20Sopenharmony_ci mmu_spte_clear_no_track(sptep); 39488c2ecf20Sopenharmony_ci return true; 39498c2ecf20Sopenharmony_ci } 39508c2ecf20Sopenharmony_ci 39518c2ecf20Sopenharmony_ci (*nr_present)++; 39528c2ecf20Sopenharmony_ci mark_mmio_spte(vcpu, sptep, gfn, access); 39538c2ecf20Sopenharmony_ci return true; 39548c2ecf20Sopenharmony_ci } 39558c2ecf20Sopenharmony_ci 39568c2ecf20Sopenharmony_ci return false; 39578c2ecf20Sopenharmony_ci} 39588c2ecf20Sopenharmony_ci 39598c2ecf20Sopenharmony_cistatic inline bool is_last_gpte(struct kvm_mmu *mmu, 39608c2ecf20Sopenharmony_ci unsigned level, unsigned gpte) 39618c2ecf20Sopenharmony_ci{ 39628c2ecf20Sopenharmony_ci /* 39638c2ecf20Sopenharmony_ci * The RHS has bit 7 set iff level < mmu->last_nonleaf_level. 39648c2ecf20Sopenharmony_ci * If it is clear, there are no large pages at this level, so clear 39658c2ecf20Sopenharmony_ci * PT_PAGE_SIZE_MASK in gpte if that is the case. 39668c2ecf20Sopenharmony_ci */ 39678c2ecf20Sopenharmony_ci gpte &= level - mmu->last_nonleaf_level; 39688c2ecf20Sopenharmony_ci 39698c2ecf20Sopenharmony_ci /* 39708c2ecf20Sopenharmony_ci * PG_LEVEL_4K always terminates. The RHS has bit 7 set 39718c2ecf20Sopenharmony_ci * iff level <= PG_LEVEL_4K, which for our purpose means 39728c2ecf20Sopenharmony_ci * level == PG_LEVEL_4K; set PT_PAGE_SIZE_MASK in gpte then. 39738c2ecf20Sopenharmony_ci */ 39748c2ecf20Sopenharmony_ci gpte |= level - PG_LEVEL_4K - 1; 39758c2ecf20Sopenharmony_ci 39768c2ecf20Sopenharmony_ci return gpte & PT_PAGE_SIZE_MASK; 39778c2ecf20Sopenharmony_ci} 39788c2ecf20Sopenharmony_ci 39798c2ecf20Sopenharmony_ci#define PTTYPE_EPT 18 /* arbitrary */ 39808c2ecf20Sopenharmony_ci#define PTTYPE PTTYPE_EPT 39818c2ecf20Sopenharmony_ci#include "paging_tmpl.h" 39828c2ecf20Sopenharmony_ci#undef PTTYPE 39838c2ecf20Sopenharmony_ci 39848c2ecf20Sopenharmony_ci#define PTTYPE 64 39858c2ecf20Sopenharmony_ci#include "paging_tmpl.h" 39868c2ecf20Sopenharmony_ci#undef PTTYPE 39878c2ecf20Sopenharmony_ci 39888c2ecf20Sopenharmony_ci#define PTTYPE 32 39898c2ecf20Sopenharmony_ci#include "paging_tmpl.h" 39908c2ecf20Sopenharmony_ci#undef PTTYPE 39918c2ecf20Sopenharmony_ci 39928c2ecf20Sopenharmony_cistatic void 39938c2ecf20Sopenharmony_ci__reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, 39948c2ecf20Sopenharmony_ci struct rsvd_bits_validate *rsvd_check, 39958c2ecf20Sopenharmony_ci int maxphyaddr, int level, bool nx, bool gbpages, 39968c2ecf20Sopenharmony_ci bool pse, bool amd) 39978c2ecf20Sopenharmony_ci{ 39988c2ecf20Sopenharmony_ci u64 exb_bit_rsvd = 0; 39998c2ecf20Sopenharmony_ci u64 gbpages_bit_rsvd = 0; 40008c2ecf20Sopenharmony_ci u64 nonleaf_bit8_rsvd = 0; 40018c2ecf20Sopenharmony_ci 40028c2ecf20Sopenharmony_ci rsvd_check->bad_mt_xwr = 0; 40038c2ecf20Sopenharmony_ci 40048c2ecf20Sopenharmony_ci if (!nx) 40058c2ecf20Sopenharmony_ci exb_bit_rsvd = rsvd_bits(63, 63); 40068c2ecf20Sopenharmony_ci if (!gbpages) 40078c2ecf20Sopenharmony_ci gbpages_bit_rsvd = rsvd_bits(7, 7); 40088c2ecf20Sopenharmony_ci 40098c2ecf20Sopenharmony_ci /* 40108c2ecf20Sopenharmony_ci * Non-leaf PML4Es and PDPEs reserve bit 8 (which would be the G bit for 40118c2ecf20Sopenharmony_ci * leaf entries) on AMD CPUs only. 40128c2ecf20Sopenharmony_ci */ 40138c2ecf20Sopenharmony_ci if (amd) 40148c2ecf20Sopenharmony_ci nonleaf_bit8_rsvd = rsvd_bits(8, 8); 40158c2ecf20Sopenharmony_ci 40168c2ecf20Sopenharmony_ci switch (level) { 40178c2ecf20Sopenharmony_ci case PT32_ROOT_LEVEL: 40188c2ecf20Sopenharmony_ci /* no rsvd bits for 2 level 4K page table entries */ 40198c2ecf20Sopenharmony_ci rsvd_check->rsvd_bits_mask[0][1] = 0; 40208c2ecf20Sopenharmony_ci rsvd_check->rsvd_bits_mask[0][0] = 0; 40218c2ecf20Sopenharmony_ci rsvd_check->rsvd_bits_mask[1][0] = 40228c2ecf20Sopenharmony_ci rsvd_check->rsvd_bits_mask[0][0]; 40238c2ecf20Sopenharmony_ci 40248c2ecf20Sopenharmony_ci if (!pse) { 40258c2ecf20Sopenharmony_ci rsvd_check->rsvd_bits_mask[1][1] = 0; 40268c2ecf20Sopenharmony_ci break; 40278c2ecf20Sopenharmony_ci } 40288c2ecf20Sopenharmony_ci 40298c2ecf20Sopenharmony_ci if (is_cpuid_PSE36()) 40308c2ecf20Sopenharmony_ci /* 36bits PSE 4MB page */ 40318c2ecf20Sopenharmony_ci rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(17, 21); 40328c2ecf20Sopenharmony_ci else 40338c2ecf20Sopenharmony_ci /* 32 bits PSE 4MB page */ 40348c2ecf20Sopenharmony_ci rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(13, 21); 40358c2ecf20Sopenharmony_ci break; 40368c2ecf20Sopenharmony_ci case PT32E_ROOT_LEVEL: 40378c2ecf20Sopenharmony_ci rsvd_check->rsvd_bits_mask[0][2] = 40388c2ecf20Sopenharmony_ci rsvd_bits(maxphyaddr, 63) | 40398c2ecf20Sopenharmony_ci rsvd_bits(5, 8) | rsvd_bits(1, 2); /* PDPTE */ 40408c2ecf20Sopenharmony_ci rsvd_check->rsvd_bits_mask[0][1] = exb_bit_rsvd | 40418c2ecf20Sopenharmony_ci rsvd_bits(maxphyaddr, 62); /* PDE */ 40428c2ecf20Sopenharmony_ci rsvd_check->rsvd_bits_mask[0][0] = exb_bit_rsvd | 40438c2ecf20Sopenharmony_ci rsvd_bits(maxphyaddr, 62); /* PTE */ 40448c2ecf20Sopenharmony_ci rsvd_check->rsvd_bits_mask[1][1] = exb_bit_rsvd | 40458c2ecf20Sopenharmony_ci rsvd_bits(maxphyaddr, 62) | 40468c2ecf20Sopenharmony_ci rsvd_bits(13, 20); /* large page */ 40478c2ecf20Sopenharmony_ci rsvd_check->rsvd_bits_mask[1][0] = 40488c2ecf20Sopenharmony_ci rsvd_check->rsvd_bits_mask[0][0]; 40498c2ecf20Sopenharmony_ci break; 40508c2ecf20Sopenharmony_ci case PT64_ROOT_5LEVEL: 40518c2ecf20Sopenharmony_ci rsvd_check->rsvd_bits_mask[0][4] = exb_bit_rsvd | 40528c2ecf20Sopenharmony_ci nonleaf_bit8_rsvd | rsvd_bits(7, 7) | 40538c2ecf20Sopenharmony_ci rsvd_bits(maxphyaddr, 51); 40548c2ecf20Sopenharmony_ci rsvd_check->rsvd_bits_mask[1][4] = 40558c2ecf20Sopenharmony_ci rsvd_check->rsvd_bits_mask[0][4]; 40568c2ecf20Sopenharmony_ci fallthrough; 40578c2ecf20Sopenharmony_ci case PT64_ROOT_4LEVEL: 40588c2ecf20Sopenharmony_ci rsvd_check->rsvd_bits_mask[0][3] = exb_bit_rsvd | 40598c2ecf20Sopenharmony_ci nonleaf_bit8_rsvd | rsvd_bits(7, 7) | 40608c2ecf20Sopenharmony_ci rsvd_bits(maxphyaddr, 51); 40618c2ecf20Sopenharmony_ci rsvd_check->rsvd_bits_mask[0][2] = exb_bit_rsvd | 40628c2ecf20Sopenharmony_ci gbpages_bit_rsvd | 40638c2ecf20Sopenharmony_ci rsvd_bits(maxphyaddr, 51); 40648c2ecf20Sopenharmony_ci rsvd_check->rsvd_bits_mask[0][1] = exb_bit_rsvd | 40658c2ecf20Sopenharmony_ci rsvd_bits(maxphyaddr, 51); 40668c2ecf20Sopenharmony_ci rsvd_check->rsvd_bits_mask[0][0] = exb_bit_rsvd | 40678c2ecf20Sopenharmony_ci rsvd_bits(maxphyaddr, 51); 40688c2ecf20Sopenharmony_ci rsvd_check->rsvd_bits_mask[1][3] = 40698c2ecf20Sopenharmony_ci rsvd_check->rsvd_bits_mask[0][3]; 40708c2ecf20Sopenharmony_ci rsvd_check->rsvd_bits_mask[1][2] = exb_bit_rsvd | 40718c2ecf20Sopenharmony_ci gbpages_bit_rsvd | rsvd_bits(maxphyaddr, 51) | 40728c2ecf20Sopenharmony_ci rsvd_bits(13, 29); 40738c2ecf20Sopenharmony_ci rsvd_check->rsvd_bits_mask[1][1] = exb_bit_rsvd | 40748c2ecf20Sopenharmony_ci rsvd_bits(maxphyaddr, 51) | 40758c2ecf20Sopenharmony_ci rsvd_bits(13, 20); /* large page */ 40768c2ecf20Sopenharmony_ci rsvd_check->rsvd_bits_mask[1][0] = 40778c2ecf20Sopenharmony_ci rsvd_check->rsvd_bits_mask[0][0]; 40788c2ecf20Sopenharmony_ci break; 40798c2ecf20Sopenharmony_ci } 40808c2ecf20Sopenharmony_ci} 40818c2ecf20Sopenharmony_ci 40828c2ecf20Sopenharmony_cistatic void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, 40838c2ecf20Sopenharmony_ci struct kvm_mmu *context) 40848c2ecf20Sopenharmony_ci{ 40858c2ecf20Sopenharmony_ci __reset_rsvds_bits_mask(vcpu, &context->guest_rsvd_check, 40868c2ecf20Sopenharmony_ci cpuid_maxphyaddr(vcpu), context->root_level, 40878c2ecf20Sopenharmony_ci context->nx, 40888c2ecf20Sopenharmony_ci guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES), 40898c2ecf20Sopenharmony_ci is_pse(vcpu), 40908c2ecf20Sopenharmony_ci guest_cpuid_is_amd_or_hygon(vcpu)); 40918c2ecf20Sopenharmony_ci} 40928c2ecf20Sopenharmony_ci 40938c2ecf20Sopenharmony_cistatic void 40948c2ecf20Sopenharmony_ci__reset_rsvds_bits_mask_ept(struct rsvd_bits_validate *rsvd_check, 40958c2ecf20Sopenharmony_ci int maxphyaddr, bool execonly) 40968c2ecf20Sopenharmony_ci{ 40978c2ecf20Sopenharmony_ci u64 bad_mt_xwr; 40988c2ecf20Sopenharmony_ci 40998c2ecf20Sopenharmony_ci rsvd_check->rsvd_bits_mask[0][4] = 41008c2ecf20Sopenharmony_ci rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 7); 41018c2ecf20Sopenharmony_ci rsvd_check->rsvd_bits_mask[0][3] = 41028c2ecf20Sopenharmony_ci rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 7); 41038c2ecf20Sopenharmony_ci rsvd_check->rsvd_bits_mask[0][2] = 41048c2ecf20Sopenharmony_ci rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 6); 41058c2ecf20Sopenharmony_ci rsvd_check->rsvd_bits_mask[0][1] = 41068c2ecf20Sopenharmony_ci rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 6); 41078c2ecf20Sopenharmony_ci rsvd_check->rsvd_bits_mask[0][0] = rsvd_bits(maxphyaddr, 51); 41088c2ecf20Sopenharmony_ci 41098c2ecf20Sopenharmony_ci /* large page */ 41108c2ecf20Sopenharmony_ci rsvd_check->rsvd_bits_mask[1][4] = rsvd_check->rsvd_bits_mask[0][4]; 41118c2ecf20Sopenharmony_ci rsvd_check->rsvd_bits_mask[1][3] = rsvd_check->rsvd_bits_mask[0][3]; 41128c2ecf20Sopenharmony_ci rsvd_check->rsvd_bits_mask[1][2] = 41138c2ecf20Sopenharmony_ci rsvd_bits(maxphyaddr, 51) | rsvd_bits(12, 29); 41148c2ecf20Sopenharmony_ci rsvd_check->rsvd_bits_mask[1][1] = 41158c2ecf20Sopenharmony_ci rsvd_bits(maxphyaddr, 51) | rsvd_bits(12, 20); 41168c2ecf20Sopenharmony_ci rsvd_check->rsvd_bits_mask[1][0] = rsvd_check->rsvd_bits_mask[0][0]; 41178c2ecf20Sopenharmony_ci 41188c2ecf20Sopenharmony_ci bad_mt_xwr = 0xFFull << (2 * 8); /* bits 3..5 must not be 2 */ 41198c2ecf20Sopenharmony_ci bad_mt_xwr |= 0xFFull << (3 * 8); /* bits 3..5 must not be 3 */ 41208c2ecf20Sopenharmony_ci bad_mt_xwr |= 0xFFull << (7 * 8); /* bits 3..5 must not be 7 */ 41218c2ecf20Sopenharmony_ci bad_mt_xwr |= REPEAT_BYTE(1ull << 2); /* bits 0..2 must not be 010 */ 41228c2ecf20Sopenharmony_ci bad_mt_xwr |= REPEAT_BYTE(1ull << 6); /* bits 0..2 must not be 110 */ 41238c2ecf20Sopenharmony_ci if (!execonly) { 41248c2ecf20Sopenharmony_ci /* bits 0..2 must not be 100 unless VMX capabilities allow it */ 41258c2ecf20Sopenharmony_ci bad_mt_xwr |= REPEAT_BYTE(1ull << 4); 41268c2ecf20Sopenharmony_ci } 41278c2ecf20Sopenharmony_ci rsvd_check->bad_mt_xwr = bad_mt_xwr; 41288c2ecf20Sopenharmony_ci} 41298c2ecf20Sopenharmony_ci 41308c2ecf20Sopenharmony_cistatic void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu, 41318c2ecf20Sopenharmony_ci struct kvm_mmu *context, bool execonly) 41328c2ecf20Sopenharmony_ci{ 41338c2ecf20Sopenharmony_ci __reset_rsvds_bits_mask_ept(&context->guest_rsvd_check, 41348c2ecf20Sopenharmony_ci cpuid_maxphyaddr(vcpu), execonly); 41358c2ecf20Sopenharmony_ci} 41368c2ecf20Sopenharmony_ci 41378c2ecf20Sopenharmony_ci/* 41388c2ecf20Sopenharmony_ci * the page table on host is the shadow page table for the page 41398c2ecf20Sopenharmony_ci * table in guest or amd nested guest, its mmu features completely 41408c2ecf20Sopenharmony_ci * follow the features in guest. 41418c2ecf20Sopenharmony_ci */ 41428c2ecf20Sopenharmony_civoid 41438c2ecf20Sopenharmony_cireset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context) 41448c2ecf20Sopenharmony_ci{ 41458c2ecf20Sopenharmony_ci /* 41468c2ecf20Sopenharmony_ci * KVM uses NX when TDP is disabled to handle a variety of scenarios, 41478c2ecf20Sopenharmony_ci * notably for huge SPTEs if iTLB multi-hit mitigation is enabled and 41488c2ecf20Sopenharmony_ci * to generate correct permissions for CR0.WP=0/CR4.SMEP=1/EFER.NX=0. 41498c2ecf20Sopenharmony_ci * The iTLB multi-hit workaround can be toggled at any time, so assume 41508c2ecf20Sopenharmony_ci * NX can be used by any non-nested shadow MMU to avoid having to reset 41518c2ecf20Sopenharmony_ci * MMU contexts. Note, KVM forces EFER.NX=1 when TDP is disabled. 41528c2ecf20Sopenharmony_ci */ 41538c2ecf20Sopenharmony_ci bool uses_nx = context->nx || !tdp_enabled || 41548c2ecf20Sopenharmony_ci context->mmu_role.base.smep_andnot_wp; 41558c2ecf20Sopenharmony_ci struct rsvd_bits_validate *shadow_zero_check; 41568c2ecf20Sopenharmony_ci int i; 41578c2ecf20Sopenharmony_ci 41588c2ecf20Sopenharmony_ci /* 41598c2ecf20Sopenharmony_ci * Passing "true" to the last argument is okay; it adds a check 41608c2ecf20Sopenharmony_ci * on bit 8 of the SPTEs which KVM doesn't use anyway. 41618c2ecf20Sopenharmony_ci */ 41628c2ecf20Sopenharmony_ci shadow_zero_check = &context->shadow_zero_check; 41638c2ecf20Sopenharmony_ci __reset_rsvds_bits_mask(vcpu, shadow_zero_check, 41648c2ecf20Sopenharmony_ci shadow_phys_bits, 41658c2ecf20Sopenharmony_ci context->shadow_root_level, uses_nx, 41668c2ecf20Sopenharmony_ci guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES), 41678c2ecf20Sopenharmony_ci is_pse(vcpu), true); 41688c2ecf20Sopenharmony_ci 41698c2ecf20Sopenharmony_ci if (!shadow_me_mask) 41708c2ecf20Sopenharmony_ci return; 41718c2ecf20Sopenharmony_ci 41728c2ecf20Sopenharmony_ci for (i = context->shadow_root_level; --i >= 0;) { 41738c2ecf20Sopenharmony_ci shadow_zero_check->rsvd_bits_mask[0][i] &= ~shadow_me_mask; 41748c2ecf20Sopenharmony_ci shadow_zero_check->rsvd_bits_mask[1][i] &= ~shadow_me_mask; 41758c2ecf20Sopenharmony_ci } 41768c2ecf20Sopenharmony_ci 41778c2ecf20Sopenharmony_ci} 41788c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(reset_shadow_zero_bits_mask); 41798c2ecf20Sopenharmony_ci 41808c2ecf20Sopenharmony_cistatic inline bool boot_cpu_is_amd(void) 41818c2ecf20Sopenharmony_ci{ 41828c2ecf20Sopenharmony_ci WARN_ON_ONCE(!tdp_enabled); 41838c2ecf20Sopenharmony_ci return shadow_x_mask == 0; 41848c2ecf20Sopenharmony_ci} 41858c2ecf20Sopenharmony_ci 41868c2ecf20Sopenharmony_ci/* 41878c2ecf20Sopenharmony_ci * the direct page table on host, use as much mmu features as 41888c2ecf20Sopenharmony_ci * possible, however, kvm currently does not do execution-protection. 41898c2ecf20Sopenharmony_ci */ 41908c2ecf20Sopenharmony_cistatic void 41918c2ecf20Sopenharmony_cireset_tdp_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, 41928c2ecf20Sopenharmony_ci struct kvm_mmu *context) 41938c2ecf20Sopenharmony_ci{ 41948c2ecf20Sopenharmony_ci struct rsvd_bits_validate *shadow_zero_check; 41958c2ecf20Sopenharmony_ci int i; 41968c2ecf20Sopenharmony_ci 41978c2ecf20Sopenharmony_ci shadow_zero_check = &context->shadow_zero_check; 41988c2ecf20Sopenharmony_ci 41998c2ecf20Sopenharmony_ci if (boot_cpu_is_amd()) 42008c2ecf20Sopenharmony_ci __reset_rsvds_bits_mask(vcpu, shadow_zero_check, 42018c2ecf20Sopenharmony_ci shadow_phys_bits, 42028c2ecf20Sopenharmony_ci context->shadow_root_level, false, 42038c2ecf20Sopenharmony_ci boot_cpu_has(X86_FEATURE_GBPAGES), 42048c2ecf20Sopenharmony_ci true, true); 42058c2ecf20Sopenharmony_ci else 42068c2ecf20Sopenharmony_ci __reset_rsvds_bits_mask_ept(shadow_zero_check, 42078c2ecf20Sopenharmony_ci shadow_phys_bits, 42088c2ecf20Sopenharmony_ci false); 42098c2ecf20Sopenharmony_ci 42108c2ecf20Sopenharmony_ci if (!shadow_me_mask) 42118c2ecf20Sopenharmony_ci return; 42128c2ecf20Sopenharmony_ci 42138c2ecf20Sopenharmony_ci for (i = context->shadow_root_level; --i >= 0;) { 42148c2ecf20Sopenharmony_ci shadow_zero_check->rsvd_bits_mask[0][i] &= ~shadow_me_mask; 42158c2ecf20Sopenharmony_ci shadow_zero_check->rsvd_bits_mask[1][i] &= ~shadow_me_mask; 42168c2ecf20Sopenharmony_ci } 42178c2ecf20Sopenharmony_ci} 42188c2ecf20Sopenharmony_ci 42198c2ecf20Sopenharmony_ci/* 42208c2ecf20Sopenharmony_ci * as the comments in reset_shadow_zero_bits_mask() except it 42218c2ecf20Sopenharmony_ci * is the shadow page table for intel nested guest. 42228c2ecf20Sopenharmony_ci */ 42238c2ecf20Sopenharmony_cistatic void 42248c2ecf20Sopenharmony_cireset_ept_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, 42258c2ecf20Sopenharmony_ci struct kvm_mmu *context, bool execonly) 42268c2ecf20Sopenharmony_ci{ 42278c2ecf20Sopenharmony_ci __reset_rsvds_bits_mask_ept(&context->shadow_zero_check, 42288c2ecf20Sopenharmony_ci shadow_phys_bits, execonly); 42298c2ecf20Sopenharmony_ci} 42308c2ecf20Sopenharmony_ci 42318c2ecf20Sopenharmony_ci#define BYTE_MASK(access) \ 42328c2ecf20Sopenharmony_ci ((1 & (access) ? 2 : 0) | \ 42338c2ecf20Sopenharmony_ci (2 & (access) ? 4 : 0) | \ 42348c2ecf20Sopenharmony_ci (3 & (access) ? 8 : 0) | \ 42358c2ecf20Sopenharmony_ci (4 & (access) ? 16 : 0) | \ 42368c2ecf20Sopenharmony_ci (5 & (access) ? 32 : 0) | \ 42378c2ecf20Sopenharmony_ci (6 & (access) ? 64 : 0) | \ 42388c2ecf20Sopenharmony_ci (7 & (access) ? 128 : 0)) 42398c2ecf20Sopenharmony_ci 42408c2ecf20Sopenharmony_ci 42418c2ecf20Sopenharmony_cistatic void update_permission_bitmask(struct kvm_vcpu *vcpu, 42428c2ecf20Sopenharmony_ci struct kvm_mmu *mmu, bool ept) 42438c2ecf20Sopenharmony_ci{ 42448c2ecf20Sopenharmony_ci unsigned byte; 42458c2ecf20Sopenharmony_ci 42468c2ecf20Sopenharmony_ci const u8 x = BYTE_MASK(ACC_EXEC_MASK); 42478c2ecf20Sopenharmony_ci const u8 w = BYTE_MASK(ACC_WRITE_MASK); 42488c2ecf20Sopenharmony_ci const u8 u = BYTE_MASK(ACC_USER_MASK); 42498c2ecf20Sopenharmony_ci 42508c2ecf20Sopenharmony_ci bool cr4_smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP) != 0; 42518c2ecf20Sopenharmony_ci bool cr4_smap = kvm_read_cr4_bits(vcpu, X86_CR4_SMAP) != 0; 42528c2ecf20Sopenharmony_ci bool cr0_wp = is_write_protection(vcpu); 42538c2ecf20Sopenharmony_ci 42548c2ecf20Sopenharmony_ci for (byte = 0; byte < ARRAY_SIZE(mmu->permissions); ++byte) { 42558c2ecf20Sopenharmony_ci unsigned pfec = byte << 1; 42568c2ecf20Sopenharmony_ci 42578c2ecf20Sopenharmony_ci /* 42588c2ecf20Sopenharmony_ci * Each "*f" variable has a 1 bit for each UWX value 42598c2ecf20Sopenharmony_ci * that causes a fault with the given PFEC. 42608c2ecf20Sopenharmony_ci */ 42618c2ecf20Sopenharmony_ci 42628c2ecf20Sopenharmony_ci /* Faults from writes to non-writable pages */ 42638c2ecf20Sopenharmony_ci u8 wf = (pfec & PFERR_WRITE_MASK) ? (u8)~w : 0; 42648c2ecf20Sopenharmony_ci /* Faults from user mode accesses to supervisor pages */ 42658c2ecf20Sopenharmony_ci u8 uf = (pfec & PFERR_USER_MASK) ? (u8)~u : 0; 42668c2ecf20Sopenharmony_ci /* Faults from fetches of non-executable pages*/ 42678c2ecf20Sopenharmony_ci u8 ff = (pfec & PFERR_FETCH_MASK) ? (u8)~x : 0; 42688c2ecf20Sopenharmony_ci /* Faults from kernel mode fetches of user pages */ 42698c2ecf20Sopenharmony_ci u8 smepf = 0; 42708c2ecf20Sopenharmony_ci /* Faults from kernel mode accesses of user pages */ 42718c2ecf20Sopenharmony_ci u8 smapf = 0; 42728c2ecf20Sopenharmony_ci 42738c2ecf20Sopenharmony_ci if (!ept) { 42748c2ecf20Sopenharmony_ci /* Faults from kernel mode accesses to user pages */ 42758c2ecf20Sopenharmony_ci u8 kf = (pfec & PFERR_USER_MASK) ? 0 : u; 42768c2ecf20Sopenharmony_ci 42778c2ecf20Sopenharmony_ci /* Not really needed: !nx will cause pte.nx to fault */ 42788c2ecf20Sopenharmony_ci if (!mmu->nx) 42798c2ecf20Sopenharmony_ci ff = 0; 42808c2ecf20Sopenharmony_ci 42818c2ecf20Sopenharmony_ci /* Allow supervisor writes if !cr0.wp */ 42828c2ecf20Sopenharmony_ci if (!cr0_wp) 42838c2ecf20Sopenharmony_ci wf = (pfec & PFERR_USER_MASK) ? wf : 0; 42848c2ecf20Sopenharmony_ci 42858c2ecf20Sopenharmony_ci /* Disallow supervisor fetches of user code if cr4.smep */ 42868c2ecf20Sopenharmony_ci if (cr4_smep) 42878c2ecf20Sopenharmony_ci smepf = (pfec & PFERR_FETCH_MASK) ? kf : 0; 42888c2ecf20Sopenharmony_ci 42898c2ecf20Sopenharmony_ci /* 42908c2ecf20Sopenharmony_ci * SMAP:kernel-mode data accesses from user-mode 42918c2ecf20Sopenharmony_ci * mappings should fault. A fault is considered 42928c2ecf20Sopenharmony_ci * as a SMAP violation if all of the following 42938c2ecf20Sopenharmony_ci * conditions are true: 42948c2ecf20Sopenharmony_ci * - X86_CR4_SMAP is set in CR4 42958c2ecf20Sopenharmony_ci * - A user page is accessed 42968c2ecf20Sopenharmony_ci * - The access is not a fetch 42978c2ecf20Sopenharmony_ci * - Page fault in kernel mode 42988c2ecf20Sopenharmony_ci * - if CPL = 3 or X86_EFLAGS_AC is clear 42998c2ecf20Sopenharmony_ci * 43008c2ecf20Sopenharmony_ci * Here, we cover the first three conditions. 43018c2ecf20Sopenharmony_ci * The fourth is computed dynamically in permission_fault(); 43028c2ecf20Sopenharmony_ci * PFERR_RSVD_MASK bit will be set in PFEC if the access is 43038c2ecf20Sopenharmony_ci * *not* subject to SMAP restrictions. 43048c2ecf20Sopenharmony_ci */ 43058c2ecf20Sopenharmony_ci if (cr4_smap) 43068c2ecf20Sopenharmony_ci smapf = (pfec & (PFERR_RSVD_MASK|PFERR_FETCH_MASK)) ? 0 : kf; 43078c2ecf20Sopenharmony_ci } 43088c2ecf20Sopenharmony_ci 43098c2ecf20Sopenharmony_ci mmu->permissions[byte] = ff | uf | wf | smepf | smapf; 43108c2ecf20Sopenharmony_ci } 43118c2ecf20Sopenharmony_ci} 43128c2ecf20Sopenharmony_ci 43138c2ecf20Sopenharmony_ci/* 43148c2ecf20Sopenharmony_ci* PKU is an additional mechanism by which the paging controls access to 43158c2ecf20Sopenharmony_ci* user-mode addresses based on the value in the PKRU register. Protection 43168c2ecf20Sopenharmony_ci* key violations are reported through a bit in the page fault error code. 43178c2ecf20Sopenharmony_ci* Unlike other bits of the error code, the PK bit is not known at the 43188c2ecf20Sopenharmony_ci* call site of e.g. gva_to_gpa; it must be computed directly in 43198c2ecf20Sopenharmony_ci* permission_fault based on two bits of PKRU, on some machine state (CR4, 43208c2ecf20Sopenharmony_ci* CR0, EFER, CPL), and on other bits of the error code and the page tables. 43218c2ecf20Sopenharmony_ci* 43228c2ecf20Sopenharmony_ci* In particular the following conditions come from the error code, the 43238c2ecf20Sopenharmony_ci* page tables and the machine state: 43248c2ecf20Sopenharmony_ci* - PK is always zero unless CR4.PKE=1 and EFER.LMA=1 43258c2ecf20Sopenharmony_ci* - PK is always zero if RSVD=1 (reserved bit set) or F=1 (instruction fetch) 43268c2ecf20Sopenharmony_ci* - PK is always zero if U=0 in the page tables 43278c2ecf20Sopenharmony_ci* - PKRU.WD is ignored if CR0.WP=0 and the access is a supervisor access. 43288c2ecf20Sopenharmony_ci* 43298c2ecf20Sopenharmony_ci* The PKRU bitmask caches the result of these four conditions. The error 43308c2ecf20Sopenharmony_ci* code (minus the P bit) and the page table's U bit form an index into the 43318c2ecf20Sopenharmony_ci* PKRU bitmask. Two bits of the PKRU bitmask are then extracted and ANDed 43328c2ecf20Sopenharmony_ci* with the two bits of the PKRU register corresponding to the protection key. 43338c2ecf20Sopenharmony_ci* For the first three conditions above the bits will be 00, thus masking 43348c2ecf20Sopenharmony_ci* away both AD and WD. For all reads or if the last condition holds, WD 43358c2ecf20Sopenharmony_ci* only will be masked away. 43368c2ecf20Sopenharmony_ci*/ 43378c2ecf20Sopenharmony_cistatic void update_pkru_bitmask(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, 43388c2ecf20Sopenharmony_ci bool ept) 43398c2ecf20Sopenharmony_ci{ 43408c2ecf20Sopenharmony_ci unsigned bit; 43418c2ecf20Sopenharmony_ci bool wp; 43428c2ecf20Sopenharmony_ci 43438c2ecf20Sopenharmony_ci if (ept) { 43448c2ecf20Sopenharmony_ci mmu->pkru_mask = 0; 43458c2ecf20Sopenharmony_ci return; 43468c2ecf20Sopenharmony_ci } 43478c2ecf20Sopenharmony_ci 43488c2ecf20Sopenharmony_ci /* PKEY is enabled only if CR4.PKE and EFER.LMA are both set. */ 43498c2ecf20Sopenharmony_ci if (!kvm_read_cr4_bits(vcpu, X86_CR4_PKE) || !is_long_mode(vcpu)) { 43508c2ecf20Sopenharmony_ci mmu->pkru_mask = 0; 43518c2ecf20Sopenharmony_ci return; 43528c2ecf20Sopenharmony_ci } 43538c2ecf20Sopenharmony_ci 43548c2ecf20Sopenharmony_ci wp = is_write_protection(vcpu); 43558c2ecf20Sopenharmony_ci 43568c2ecf20Sopenharmony_ci for (bit = 0; bit < ARRAY_SIZE(mmu->permissions); ++bit) { 43578c2ecf20Sopenharmony_ci unsigned pfec, pkey_bits; 43588c2ecf20Sopenharmony_ci bool check_pkey, check_write, ff, uf, wf, pte_user; 43598c2ecf20Sopenharmony_ci 43608c2ecf20Sopenharmony_ci pfec = bit << 1; 43618c2ecf20Sopenharmony_ci ff = pfec & PFERR_FETCH_MASK; 43628c2ecf20Sopenharmony_ci uf = pfec & PFERR_USER_MASK; 43638c2ecf20Sopenharmony_ci wf = pfec & PFERR_WRITE_MASK; 43648c2ecf20Sopenharmony_ci 43658c2ecf20Sopenharmony_ci /* PFEC.RSVD is replaced by ACC_USER_MASK. */ 43668c2ecf20Sopenharmony_ci pte_user = pfec & PFERR_RSVD_MASK; 43678c2ecf20Sopenharmony_ci 43688c2ecf20Sopenharmony_ci /* 43698c2ecf20Sopenharmony_ci * Only need to check the access which is not an 43708c2ecf20Sopenharmony_ci * instruction fetch and is to a user page. 43718c2ecf20Sopenharmony_ci */ 43728c2ecf20Sopenharmony_ci check_pkey = (!ff && pte_user); 43738c2ecf20Sopenharmony_ci /* 43748c2ecf20Sopenharmony_ci * write access is controlled by PKRU if it is a 43758c2ecf20Sopenharmony_ci * user access or CR0.WP = 1. 43768c2ecf20Sopenharmony_ci */ 43778c2ecf20Sopenharmony_ci check_write = check_pkey && wf && (uf || wp); 43788c2ecf20Sopenharmony_ci 43798c2ecf20Sopenharmony_ci /* PKRU.AD stops both read and write access. */ 43808c2ecf20Sopenharmony_ci pkey_bits = !!check_pkey; 43818c2ecf20Sopenharmony_ci /* PKRU.WD stops write access. */ 43828c2ecf20Sopenharmony_ci pkey_bits |= (!!check_write) << 1; 43838c2ecf20Sopenharmony_ci 43848c2ecf20Sopenharmony_ci mmu->pkru_mask |= (pkey_bits & 3) << pfec; 43858c2ecf20Sopenharmony_ci } 43868c2ecf20Sopenharmony_ci} 43878c2ecf20Sopenharmony_ci 43888c2ecf20Sopenharmony_cistatic void update_last_nonleaf_level(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu) 43898c2ecf20Sopenharmony_ci{ 43908c2ecf20Sopenharmony_ci unsigned root_level = mmu->root_level; 43918c2ecf20Sopenharmony_ci 43928c2ecf20Sopenharmony_ci mmu->last_nonleaf_level = root_level; 43938c2ecf20Sopenharmony_ci if (root_level == PT32_ROOT_LEVEL && is_pse(vcpu)) 43948c2ecf20Sopenharmony_ci mmu->last_nonleaf_level++; 43958c2ecf20Sopenharmony_ci} 43968c2ecf20Sopenharmony_ci 43978c2ecf20Sopenharmony_cistatic void paging64_init_context_common(struct kvm_vcpu *vcpu, 43988c2ecf20Sopenharmony_ci struct kvm_mmu *context, 43998c2ecf20Sopenharmony_ci int level) 44008c2ecf20Sopenharmony_ci{ 44018c2ecf20Sopenharmony_ci context->nx = is_nx(vcpu); 44028c2ecf20Sopenharmony_ci context->root_level = level; 44038c2ecf20Sopenharmony_ci 44048c2ecf20Sopenharmony_ci reset_rsvds_bits_mask(vcpu, context); 44058c2ecf20Sopenharmony_ci update_permission_bitmask(vcpu, context, false); 44068c2ecf20Sopenharmony_ci update_pkru_bitmask(vcpu, context, false); 44078c2ecf20Sopenharmony_ci update_last_nonleaf_level(vcpu, context); 44088c2ecf20Sopenharmony_ci 44098c2ecf20Sopenharmony_ci MMU_WARN_ON(!is_pae(vcpu)); 44108c2ecf20Sopenharmony_ci context->page_fault = paging64_page_fault; 44118c2ecf20Sopenharmony_ci context->gva_to_gpa = paging64_gva_to_gpa; 44128c2ecf20Sopenharmony_ci context->sync_page = paging64_sync_page; 44138c2ecf20Sopenharmony_ci context->invlpg = paging64_invlpg; 44148c2ecf20Sopenharmony_ci context->shadow_root_level = level; 44158c2ecf20Sopenharmony_ci context->direct_map = false; 44168c2ecf20Sopenharmony_ci} 44178c2ecf20Sopenharmony_ci 44188c2ecf20Sopenharmony_cistatic void paging64_init_context(struct kvm_vcpu *vcpu, 44198c2ecf20Sopenharmony_ci struct kvm_mmu *context) 44208c2ecf20Sopenharmony_ci{ 44218c2ecf20Sopenharmony_ci int root_level = is_la57_mode(vcpu) ? 44228c2ecf20Sopenharmony_ci PT64_ROOT_5LEVEL : PT64_ROOT_4LEVEL; 44238c2ecf20Sopenharmony_ci 44248c2ecf20Sopenharmony_ci paging64_init_context_common(vcpu, context, root_level); 44258c2ecf20Sopenharmony_ci} 44268c2ecf20Sopenharmony_ci 44278c2ecf20Sopenharmony_cistatic void paging32_init_context(struct kvm_vcpu *vcpu, 44288c2ecf20Sopenharmony_ci struct kvm_mmu *context) 44298c2ecf20Sopenharmony_ci{ 44308c2ecf20Sopenharmony_ci context->nx = false; 44318c2ecf20Sopenharmony_ci context->root_level = PT32_ROOT_LEVEL; 44328c2ecf20Sopenharmony_ci 44338c2ecf20Sopenharmony_ci reset_rsvds_bits_mask(vcpu, context); 44348c2ecf20Sopenharmony_ci update_permission_bitmask(vcpu, context, false); 44358c2ecf20Sopenharmony_ci update_pkru_bitmask(vcpu, context, false); 44368c2ecf20Sopenharmony_ci update_last_nonleaf_level(vcpu, context); 44378c2ecf20Sopenharmony_ci 44388c2ecf20Sopenharmony_ci context->page_fault = paging32_page_fault; 44398c2ecf20Sopenharmony_ci context->gva_to_gpa = paging32_gva_to_gpa; 44408c2ecf20Sopenharmony_ci context->sync_page = paging32_sync_page; 44418c2ecf20Sopenharmony_ci context->invlpg = paging32_invlpg; 44428c2ecf20Sopenharmony_ci context->shadow_root_level = PT32E_ROOT_LEVEL; 44438c2ecf20Sopenharmony_ci context->direct_map = false; 44448c2ecf20Sopenharmony_ci} 44458c2ecf20Sopenharmony_ci 44468c2ecf20Sopenharmony_cistatic void paging32E_init_context(struct kvm_vcpu *vcpu, 44478c2ecf20Sopenharmony_ci struct kvm_mmu *context) 44488c2ecf20Sopenharmony_ci{ 44498c2ecf20Sopenharmony_ci paging64_init_context_common(vcpu, context, PT32E_ROOT_LEVEL); 44508c2ecf20Sopenharmony_ci} 44518c2ecf20Sopenharmony_ci 44528c2ecf20Sopenharmony_cistatic union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu) 44538c2ecf20Sopenharmony_ci{ 44548c2ecf20Sopenharmony_ci union kvm_mmu_extended_role ext = {0}; 44558c2ecf20Sopenharmony_ci 44568c2ecf20Sopenharmony_ci ext.cr0_pg = !!is_paging(vcpu); 44578c2ecf20Sopenharmony_ci ext.cr4_pae = !!is_pae(vcpu); 44588c2ecf20Sopenharmony_ci ext.cr4_smep = !!kvm_read_cr4_bits(vcpu, X86_CR4_SMEP); 44598c2ecf20Sopenharmony_ci ext.cr4_smap = !!kvm_read_cr4_bits(vcpu, X86_CR4_SMAP); 44608c2ecf20Sopenharmony_ci ext.cr4_pse = !!is_pse(vcpu); 44618c2ecf20Sopenharmony_ci ext.cr4_pke = !!kvm_read_cr4_bits(vcpu, X86_CR4_PKE); 44628c2ecf20Sopenharmony_ci ext.cr4_la57 = !!kvm_read_cr4_bits(vcpu, X86_CR4_LA57); 44638c2ecf20Sopenharmony_ci ext.maxphyaddr = cpuid_maxphyaddr(vcpu); 44648c2ecf20Sopenharmony_ci 44658c2ecf20Sopenharmony_ci ext.valid = 1; 44668c2ecf20Sopenharmony_ci 44678c2ecf20Sopenharmony_ci return ext; 44688c2ecf20Sopenharmony_ci} 44698c2ecf20Sopenharmony_ci 44708c2ecf20Sopenharmony_cistatic union kvm_mmu_role kvm_calc_mmu_role_common(struct kvm_vcpu *vcpu, 44718c2ecf20Sopenharmony_ci bool base_only) 44728c2ecf20Sopenharmony_ci{ 44738c2ecf20Sopenharmony_ci union kvm_mmu_role role = {0}; 44748c2ecf20Sopenharmony_ci 44758c2ecf20Sopenharmony_ci role.base.access = ACC_ALL; 44768c2ecf20Sopenharmony_ci role.base.nxe = !!is_nx(vcpu); 44778c2ecf20Sopenharmony_ci role.base.cr0_wp = is_write_protection(vcpu); 44788c2ecf20Sopenharmony_ci role.base.smm = is_smm(vcpu); 44798c2ecf20Sopenharmony_ci role.base.guest_mode = is_guest_mode(vcpu); 44808c2ecf20Sopenharmony_ci 44818c2ecf20Sopenharmony_ci if (base_only) 44828c2ecf20Sopenharmony_ci return role; 44838c2ecf20Sopenharmony_ci 44848c2ecf20Sopenharmony_ci role.ext = kvm_calc_mmu_role_ext(vcpu); 44858c2ecf20Sopenharmony_ci 44868c2ecf20Sopenharmony_ci return role; 44878c2ecf20Sopenharmony_ci} 44888c2ecf20Sopenharmony_ci 44898c2ecf20Sopenharmony_cistatic inline int kvm_mmu_get_tdp_level(struct kvm_vcpu *vcpu) 44908c2ecf20Sopenharmony_ci{ 44918c2ecf20Sopenharmony_ci /* Use 5-level TDP if and only if it's useful/necessary. */ 44928c2ecf20Sopenharmony_ci if (max_tdp_level == 5 && cpuid_maxphyaddr(vcpu) <= 48) 44938c2ecf20Sopenharmony_ci return 4; 44948c2ecf20Sopenharmony_ci 44958c2ecf20Sopenharmony_ci return max_tdp_level; 44968c2ecf20Sopenharmony_ci} 44978c2ecf20Sopenharmony_ci 44988c2ecf20Sopenharmony_cistatic union kvm_mmu_role 44998c2ecf20Sopenharmony_cikvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only) 45008c2ecf20Sopenharmony_ci{ 45018c2ecf20Sopenharmony_ci union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, base_only); 45028c2ecf20Sopenharmony_ci 45038c2ecf20Sopenharmony_ci role.base.ad_disabled = (shadow_accessed_mask == 0); 45048c2ecf20Sopenharmony_ci role.base.level = kvm_mmu_get_tdp_level(vcpu); 45058c2ecf20Sopenharmony_ci role.base.direct = true; 45068c2ecf20Sopenharmony_ci role.base.gpte_is_8_bytes = true; 45078c2ecf20Sopenharmony_ci 45088c2ecf20Sopenharmony_ci return role; 45098c2ecf20Sopenharmony_ci} 45108c2ecf20Sopenharmony_ci 45118c2ecf20Sopenharmony_cistatic void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu) 45128c2ecf20Sopenharmony_ci{ 45138c2ecf20Sopenharmony_ci struct kvm_mmu *context = &vcpu->arch.root_mmu; 45148c2ecf20Sopenharmony_ci union kvm_mmu_role new_role = 45158c2ecf20Sopenharmony_ci kvm_calc_tdp_mmu_root_page_role(vcpu, false); 45168c2ecf20Sopenharmony_ci 45178c2ecf20Sopenharmony_ci if (new_role.as_u64 == context->mmu_role.as_u64) 45188c2ecf20Sopenharmony_ci return; 45198c2ecf20Sopenharmony_ci 45208c2ecf20Sopenharmony_ci context->mmu_role.as_u64 = new_role.as_u64; 45218c2ecf20Sopenharmony_ci context->page_fault = kvm_tdp_page_fault; 45228c2ecf20Sopenharmony_ci context->sync_page = nonpaging_sync_page; 45238c2ecf20Sopenharmony_ci context->invlpg = NULL; 45248c2ecf20Sopenharmony_ci context->shadow_root_level = kvm_mmu_get_tdp_level(vcpu); 45258c2ecf20Sopenharmony_ci context->direct_map = true; 45268c2ecf20Sopenharmony_ci context->get_guest_pgd = get_cr3; 45278c2ecf20Sopenharmony_ci context->get_pdptr = kvm_pdptr_read; 45288c2ecf20Sopenharmony_ci context->inject_page_fault = kvm_inject_page_fault; 45298c2ecf20Sopenharmony_ci 45308c2ecf20Sopenharmony_ci if (!is_paging(vcpu)) { 45318c2ecf20Sopenharmony_ci context->nx = false; 45328c2ecf20Sopenharmony_ci context->gva_to_gpa = nonpaging_gva_to_gpa; 45338c2ecf20Sopenharmony_ci context->root_level = 0; 45348c2ecf20Sopenharmony_ci } else if (is_long_mode(vcpu)) { 45358c2ecf20Sopenharmony_ci context->nx = is_nx(vcpu); 45368c2ecf20Sopenharmony_ci context->root_level = is_la57_mode(vcpu) ? 45378c2ecf20Sopenharmony_ci PT64_ROOT_5LEVEL : PT64_ROOT_4LEVEL; 45388c2ecf20Sopenharmony_ci reset_rsvds_bits_mask(vcpu, context); 45398c2ecf20Sopenharmony_ci context->gva_to_gpa = paging64_gva_to_gpa; 45408c2ecf20Sopenharmony_ci } else if (is_pae(vcpu)) { 45418c2ecf20Sopenharmony_ci context->nx = is_nx(vcpu); 45428c2ecf20Sopenharmony_ci context->root_level = PT32E_ROOT_LEVEL; 45438c2ecf20Sopenharmony_ci reset_rsvds_bits_mask(vcpu, context); 45448c2ecf20Sopenharmony_ci context->gva_to_gpa = paging64_gva_to_gpa; 45458c2ecf20Sopenharmony_ci } else { 45468c2ecf20Sopenharmony_ci context->nx = false; 45478c2ecf20Sopenharmony_ci context->root_level = PT32_ROOT_LEVEL; 45488c2ecf20Sopenharmony_ci reset_rsvds_bits_mask(vcpu, context); 45498c2ecf20Sopenharmony_ci context->gva_to_gpa = paging32_gva_to_gpa; 45508c2ecf20Sopenharmony_ci } 45518c2ecf20Sopenharmony_ci 45528c2ecf20Sopenharmony_ci update_permission_bitmask(vcpu, context, false); 45538c2ecf20Sopenharmony_ci update_pkru_bitmask(vcpu, context, false); 45548c2ecf20Sopenharmony_ci update_last_nonleaf_level(vcpu, context); 45558c2ecf20Sopenharmony_ci reset_tdp_shadow_zero_bits_mask(vcpu, context); 45568c2ecf20Sopenharmony_ci} 45578c2ecf20Sopenharmony_ci 45588c2ecf20Sopenharmony_cistatic union kvm_mmu_role 45598c2ecf20Sopenharmony_cikvm_calc_shadow_root_page_role_common(struct kvm_vcpu *vcpu, bool base_only) 45608c2ecf20Sopenharmony_ci{ 45618c2ecf20Sopenharmony_ci union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, base_only); 45628c2ecf20Sopenharmony_ci 45638c2ecf20Sopenharmony_ci role.base.smep_andnot_wp = role.ext.cr4_smep && 45648c2ecf20Sopenharmony_ci !is_write_protection(vcpu); 45658c2ecf20Sopenharmony_ci role.base.smap_andnot_wp = role.ext.cr4_smap && 45668c2ecf20Sopenharmony_ci !is_write_protection(vcpu); 45678c2ecf20Sopenharmony_ci role.base.gpte_is_8_bytes = !!is_pae(vcpu); 45688c2ecf20Sopenharmony_ci 45698c2ecf20Sopenharmony_ci return role; 45708c2ecf20Sopenharmony_ci} 45718c2ecf20Sopenharmony_ci 45728c2ecf20Sopenharmony_cistatic union kvm_mmu_role 45738c2ecf20Sopenharmony_cikvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only) 45748c2ecf20Sopenharmony_ci{ 45758c2ecf20Sopenharmony_ci union kvm_mmu_role role = 45768c2ecf20Sopenharmony_ci kvm_calc_shadow_root_page_role_common(vcpu, base_only); 45778c2ecf20Sopenharmony_ci 45788c2ecf20Sopenharmony_ci role.base.direct = !is_paging(vcpu); 45798c2ecf20Sopenharmony_ci 45808c2ecf20Sopenharmony_ci if (!is_long_mode(vcpu)) 45818c2ecf20Sopenharmony_ci role.base.level = PT32E_ROOT_LEVEL; 45828c2ecf20Sopenharmony_ci else if (is_la57_mode(vcpu)) 45838c2ecf20Sopenharmony_ci role.base.level = PT64_ROOT_5LEVEL; 45848c2ecf20Sopenharmony_ci else 45858c2ecf20Sopenharmony_ci role.base.level = PT64_ROOT_4LEVEL; 45868c2ecf20Sopenharmony_ci 45878c2ecf20Sopenharmony_ci return role; 45888c2ecf20Sopenharmony_ci} 45898c2ecf20Sopenharmony_ci 45908c2ecf20Sopenharmony_cistatic void shadow_mmu_init_context(struct kvm_vcpu *vcpu, struct kvm_mmu *context, 45918c2ecf20Sopenharmony_ci u32 cr0, u32 cr4, u32 efer, 45928c2ecf20Sopenharmony_ci union kvm_mmu_role new_role) 45938c2ecf20Sopenharmony_ci{ 45948c2ecf20Sopenharmony_ci if (!(cr0 & X86_CR0_PG)) 45958c2ecf20Sopenharmony_ci nonpaging_init_context(vcpu, context); 45968c2ecf20Sopenharmony_ci else if (efer & EFER_LMA) 45978c2ecf20Sopenharmony_ci paging64_init_context(vcpu, context); 45988c2ecf20Sopenharmony_ci else if (cr4 & X86_CR4_PAE) 45998c2ecf20Sopenharmony_ci paging32E_init_context(vcpu, context); 46008c2ecf20Sopenharmony_ci else 46018c2ecf20Sopenharmony_ci paging32_init_context(vcpu, context); 46028c2ecf20Sopenharmony_ci 46038c2ecf20Sopenharmony_ci context->mmu_role.as_u64 = new_role.as_u64; 46048c2ecf20Sopenharmony_ci reset_shadow_zero_bits_mask(vcpu, context); 46058c2ecf20Sopenharmony_ci} 46068c2ecf20Sopenharmony_ci 46078c2ecf20Sopenharmony_cistatic void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4, u32 efer) 46088c2ecf20Sopenharmony_ci{ 46098c2ecf20Sopenharmony_ci struct kvm_mmu *context = &vcpu->arch.root_mmu; 46108c2ecf20Sopenharmony_ci union kvm_mmu_role new_role = 46118c2ecf20Sopenharmony_ci kvm_calc_shadow_mmu_root_page_role(vcpu, false); 46128c2ecf20Sopenharmony_ci 46138c2ecf20Sopenharmony_ci if (new_role.as_u64 != context->mmu_role.as_u64) 46148c2ecf20Sopenharmony_ci shadow_mmu_init_context(vcpu, context, cr0, cr4, efer, new_role); 46158c2ecf20Sopenharmony_ci} 46168c2ecf20Sopenharmony_ci 46178c2ecf20Sopenharmony_cistatic union kvm_mmu_role 46188c2ecf20Sopenharmony_cikvm_calc_shadow_npt_root_page_role(struct kvm_vcpu *vcpu) 46198c2ecf20Sopenharmony_ci{ 46208c2ecf20Sopenharmony_ci union kvm_mmu_role role = 46218c2ecf20Sopenharmony_ci kvm_calc_shadow_root_page_role_common(vcpu, false); 46228c2ecf20Sopenharmony_ci 46238c2ecf20Sopenharmony_ci role.base.direct = false; 46248c2ecf20Sopenharmony_ci role.base.level = kvm_mmu_get_tdp_level(vcpu); 46258c2ecf20Sopenharmony_ci 46268c2ecf20Sopenharmony_ci return role; 46278c2ecf20Sopenharmony_ci} 46288c2ecf20Sopenharmony_ci 46298c2ecf20Sopenharmony_civoid kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4, u32 efer, 46308c2ecf20Sopenharmony_ci gpa_t nested_cr3) 46318c2ecf20Sopenharmony_ci{ 46328c2ecf20Sopenharmony_ci struct kvm_mmu *context = &vcpu->arch.guest_mmu; 46338c2ecf20Sopenharmony_ci union kvm_mmu_role new_role = kvm_calc_shadow_npt_root_page_role(vcpu); 46348c2ecf20Sopenharmony_ci 46358c2ecf20Sopenharmony_ci __kvm_mmu_new_pgd(vcpu, nested_cr3, new_role.base, false, false); 46368c2ecf20Sopenharmony_ci 46378c2ecf20Sopenharmony_ci if (new_role.as_u64 != context->mmu_role.as_u64) { 46388c2ecf20Sopenharmony_ci shadow_mmu_init_context(vcpu, context, cr0, cr4, efer, new_role); 46398c2ecf20Sopenharmony_ci 46408c2ecf20Sopenharmony_ci /* 46418c2ecf20Sopenharmony_ci * Override the level set by the common init helper, nested TDP 46428c2ecf20Sopenharmony_ci * always uses the host's TDP configuration. 46438c2ecf20Sopenharmony_ci */ 46448c2ecf20Sopenharmony_ci context->shadow_root_level = new_role.base.level; 46458c2ecf20Sopenharmony_ci } 46468c2ecf20Sopenharmony_ci} 46478c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(kvm_init_shadow_npt_mmu); 46488c2ecf20Sopenharmony_ci 46498c2ecf20Sopenharmony_cistatic union kvm_mmu_role 46508c2ecf20Sopenharmony_cikvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty, 46518c2ecf20Sopenharmony_ci bool execonly, u8 level) 46528c2ecf20Sopenharmony_ci{ 46538c2ecf20Sopenharmony_ci union kvm_mmu_role role = {0}; 46548c2ecf20Sopenharmony_ci 46558c2ecf20Sopenharmony_ci /* SMM flag is inherited from root_mmu */ 46568c2ecf20Sopenharmony_ci role.base.smm = vcpu->arch.root_mmu.mmu_role.base.smm; 46578c2ecf20Sopenharmony_ci 46588c2ecf20Sopenharmony_ci role.base.level = level; 46598c2ecf20Sopenharmony_ci role.base.gpte_is_8_bytes = true; 46608c2ecf20Sopenharmony_ci role.base.direct = false; 46618c2ecf20Sopenharmony_ci role.base.ad_disabled = !accessed_dirty; 46628c2ecf20Sopenharmony_ci role.base.guest_mode = true; 46638c2ecf20Sopenharmony_ci role.base.access = ACC_ALL; 46648c2ecf20Sopenharmony_ci 46658c2ecf20Sopenharmony_ci /* 46668c2ecf20Sopenharmony_ci * WP=1 and NOT_WP=1 is an impossible combination, use WP and the 46678c2ecf20Sopenharmony_ci * SMAP variation to denote shadow EPT entries. 46688c2ecf20Sopenharmony_ci */ 46698c2ecf20Sopenharmony_ci role.base.cr0_wp = true; 46708c2ecf20Sopenharmony_ci role.base.smap_andnot_wp = true; 46718c2ecf20Sopenharmony_ci 46728c2ecf20Sopenharmony_ci role.ext = kvm_calc_mmu_role_ext(vcpu); 46738c2ecf20Sopenharmony_ci role.ext.execonly = execonly; 46748c2ecf20Sopenharmony_ci 46758c2ecf20Sopenharmony_ci return role; 46768c2ecf20Sopenharmony_ci} 46778c2ecf20Sopenharmony_ci 46788c2ecf20Sopenharmony_civoid kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly, 46798c2ecf20Sopenharmony_ci bool accessed_dirty, gpa_t new_eptp) 46808c2ecf20Sopenharmony_ci{ 46818c2ecf20Sopenharmony_ci struct kvm_mmu *context = &vcpu->arch.guest_mmu; 46828c2ecf20Sopenharmony_ci u8 level = vmx_eptp_page_walk_level(new_eptp); 46838c2ecf20Sopenharmony_ci union kvm_mmu_role new_role = 46848c2ecf20Sopenharmony_ci kvm_calc_shadow_ept_root_page_role(vcpu, accessed_dirty, 46858c2ecf20Sopenharmony_ci execonly, level); 46868c2ecf20Sopenharmony_ci 46878c2ecf20Sopenharmony_ci __kvm_mmu_new_pgd(vcpu, new_eptp, new_role.base, true, true); 46888c2ecf20Sopenharmony_ci 46898c2ecf20Sopenharmony_ci if (new_role.as_u64 == context->mmu_role.as_u64) 46908c2ecf20Sopenharmony_ci return; 46918c2ecf20Sopenharmony_ci 46928c2ecf20Sopenharmony_ci context->shadow_root_level = level; 46938c2ecf20Sopenharmony_ci 46948c2ecf20Sopenharmony_ci context->nx = true; 46958c2ecf20Sopenharmony_ci context->ept_ad = accessed_dirty; 46968c2ecf20Sopenharmony_ci context->page_fault = ept_page_fault; 46978c2ecf20Sopenharmony_ci context->gva_to_gpa = ept_gva_to_gpa; 46988c2ecf20Sopenharmony_ci context->sync_page = ept_sync_page; 46998c2ecf20Sopenharmony_ci context->invlpg = ept_invlpg; 47008c2ecf20Sopenharmony_ci context->root_level = level; 47018c2ecf20Sopenharmony_ci context->direct_map = false; 47028c2ecf20Sopenharmony_ci context->mmu_role.as_u64 = new_role.as_u64; 47038c2ecf20Sopenharmony_ci 47048c2ecf20Sopenharmony_ci update_permission_bitmask(vcpu, context, true); 47058c2ecf20Sopenharmony_ci update_pkru_bitmask(vcpu, context, true); 47068c2ecf20Sopenharmony_ci update_last_nonleaf_level(vcpu, context); 47078c2ecf20Sopenharmony_ci reset_rsvds_bits_mask_ept(vcpu, context, execonly); 47088c2ecf20Sopenharmony_ci reset_ept_shadow_zero_bits_mask(vcpu, context, execonly); 47098c2ecf20Sopenharmony_ci} 47108c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu); 47118c2ecf20Sopenharmony_ci 47128c2ecf20Sopenharmony_cistatic void init_kvm_softmmu(struct kvm_vcpu *vcpu) 47138c2ecf20Sopenharmony_ci{ 47148c2ecf20Sopenharmony_ci struct kvm_mmu *context = &vcpu->arch.root_mmu; 47158c2ecf20Sopenharmony_ci 47168c2ecf20Sopenharmony_ci kvm_init_shadow_mmu(vcpu, 47178c2ecf20Sopenharmony_ci kvm_read_cr0_bits(vcpu, X86_CR0_PG), 47188c2ecf20Sopenharmony_ci kvm_read_cr4_bits(vcpu, X86_CR4_PAE), 47198c2ecf20Sopenharmony_ci vcpu->arch.efer); 47208c2ecf20Sopenharmony_ci 47218c2ecf20Sopenharmony_ci context->get_guest_pgd = get_cr3; 47228c2ecf20Sopenharmony_ci context->get_pdptr = kvm_pdptr_read; 47238c2ecf20Sopenharmony_ci context->inject_page_fault = kvm_inject_page_fault; 47248c2ecf20Sopenharmony_ci} 47258c2ecf20Sopenharmony_ci 47268c2ecf20Sopenharmony_cistatic union kvm_mmu_role kvm_calc_nested_mmu_role(struct kvm_vcpu *vcpu) 47278c2ecf20Sopenharmony_ci{ 47288c2ecf20Sopenharmony_ci union kvm_mmu_role role = kvm_calc_shadow_root_page_role_common(vcpu, false); 47298c2ecf20Sopenharmony_ci 47308c2ecf20Sopenharmony_ci /* 47318c2ecf20Sopenharmony_ci * Nested MMUs are used only for walking L2's gva->gpa, they never have 47328c2ecf20Sopenharmony_ci * shadow pages of their own and so "direct" has no meaning. Set it 47338c2ecf20Sopenharmony_ci * to "true" to try to detect bogus usage of the nested MMU. 47348c2ecf20Sopenharmony_ci */ 47358c2ecf20Sopenharmony_ci role.base.direct = true; 47368c2ecf20Sopenharmony_ci 47378c2ecf20Sopenharmony_ci if (!is_paging(vcpu)) 47388c2ecf20Sopenharmony_ci role.base.level = 0; 47398c2ecf20Sopenharmony_ci else if (is_long_mode(vcpu)) 47408c2ecf20Sopenharmony_ci role.base.level = is_la57_mode(vcpu) ? PT64_ROOT_5LEVEL : 47418c2ecf20Sopenharmony_ci PT64_ROOT_4LEVEL; 47428c2ecf20Sopenharmony_ci else if (is_pae(vcpu)) 47438c2ecf20Sopenharmony_ci role.base.level = PT32E_ROOT_LEVEL; 47448c2ecf20Sopenharmony_ci else 47458c2ecf20Sopenharmony_ci role.base.level = PT32_ROOT_LEVEL; 47468c2ecf20Sopenharmony_ci 47478c2ecf20Sopenharmony_ci return role; 47488c2ecf20Sopenharmony_ci} 47498c2ecf20Sopenharmony_ci 47508c2ecf20Sopenharmony_cistatic void init_kvm_nested_mmu(struct kvm_vcpu *vcpu) 47518c2ecf20Sopenharmony_ci{ 47528c2ecf20Sopenharmony_ci union kvm_mmu_role new_role = kvm_calc_nested_mmu_role(vcpu); 47538c2ecf20Sopenharmony_ci struct kvm_mmu *g_context = &vcpu->arch.nested_mmu; 47548c2ecf20Sopenharmony_ci 47558c2ecf20Sopenharmony_ci if (new_role.as_u64 == g_context->mmu_role.as_u64) 47568c2ecf20Sopenharmony_ci return; 47578c2ecf20Sopenharmony_ci 47588c2ecf20Sopenharmony_ci g_context->mmu_role.as_u64 = new_role.as_u64; 47598c2ecf20Sopenharmony_ci g_context->get_guest_pgd = get_cr3; 47608c2ecf20Sopenharmony_ci g_context->get_pdptr = kvm_pdptr_read; 47618c2ecf20Sopenharmony_ci g_context->inject_page_fault = kvm_inject_page_fault; 47628c2ecf20Sopenharmony_ci 47638c2ecf20Sopenharmony_ci /* 47648c2ecf20Sopenharmony_ci * L2 page tables are never shadowed, so there is no need to sync 47658c2ecf20Sopenharmony_ci * SPTEs. 47668c2ecf20Sopenharmony_ci */ 47678c2ecf20Sopenharmony_ci g_context->invlpg = NULL; 47688c2ecf20Sopenharmony_ci 47698c2ecf20Sopenharmony_ci /* 47708c2ecf20Sopenharmony_ci * Note that arch.mmu->gva_to_gpa translates l2_gpa to l1_gpa using 47718c2ecf20Sopenharmony_ci * L1's nested page tables (e.g. EPT12). The nested translation 47728c2ecf20Sopenharmony_ci * of l2_gva to l1_gpa is done by arch.nested_mmu.gva_to_gpa using 47738c2ecf20Sopenharmony_ci * L2's page tables as the first level of translation and L1's 47748c2ecf20Sopenharmony_ci * nested page tables as the second level of translation. Basically 47758c2ecf20Sopenharmony_ci * the gva_to_gpa functions between mmu and nested_mmu are swapped. 47768c2ecf20Sopenharmony_ci */ 47778c2ecf20Sopenharmony_ci if (!is_paging(vcpu)) { 47788c2ecf20Sopenharmony_ci g_context->nx = false; 47798c2ecf20Sopenharmony_ci g_context->root_level = 0; 47808c2ecf20Sopenharmony_ci g_context->gva_to_gpa = nonpaging_gva_to_gpa_nested; 47818c2ecf20Sopenharmony_ci } else if (is_long_mode(vcpu)) { 47828c2ecf20Sopenharmony_ci g_context->nx = is_nx(vcpu); 47838c2ecf20Sopenharmony_ci g_context->root_level = is_la57_mode(vcpu) ? 47848c2ecf20Sopenharmony_ci PT64_ROOT_5LEVEL : PT64_ROOT_4LEVEL; 47858c2ecf20Sopenharmony_ci reset_rsvds_bits_mask(vcpu, g_context); 47868c2ecf20Sopenharmony_ci g_context->gva_to_gpa = paging64_gva_to_gpa_nested; 47878c2ecf20Sopenharmony_ci } else if (is_pae(vcpu)) { 47888c2ecf20Sopenharmony_ci g_context->nx = is_nx(vcpu); 47898c2ecf20Sopenharmony_ci g_context->root_level = PT32E_ROOT_LEVEL; 47908c2ecf20Sopenharmony_ci reset_rsvds_bits_mask(vcpu, g_context); 47918c2ecf20Sopenharmony_ci g_context->gva_to_gpa = paging64_gva_to_gpa_nested; 47928c2ecf20Sopenharmony_ci } else { 47938c2ecf20Sopenharmony_ci g_context->nx = false; 47948c2ecf20Sopenharmony_ci g_context->root_level = PT32_ROOT_LEVEL; 47958c2ecf20Sopenharmony_ci reset_rsvds_bits_mask(vcpu, g_context); 47968c2ecf20Sopenharmony_ci g_context->gva_to_gpa = paging32_gva_to_gpa_nested; 47978c2ecf20Sopenharmony_ci } 47988c2ecf20Sopenharmony_ci 47998c2ecf20Sopenharmony_ci update_permission_bitmask(vcpu, g_context, false); 48008c2ecf20Sopenharmony_ci update_pkru_bitmask(vcpu, g_context, false); 48018c2ecf20Sopenharmony_ci update_last_nonleaf_level(vcpu, g_context); 48028c2ecf20Sopenharmony_ci} 48038c2ecf20Sopenharmony_ci 48048c2ecf20Sopenharmony_civoid kvm_init_mmu(struct kvm_vcpu *vcpu, bool reset_roots) 48058c2ecf20Sopenharmony_ci{ 48068c2ecf20Sopenharmony_ci if (reset_roots) { 48078c2ecf20Sopenharmony_ci uint i; 48088c2ecf20Sopenharmony_ci 48098c2ecf20Sopenharmony_ci vcpu->arch.mmu->root_hpa = INVALID_PAGE; 48108c2ecf20Sopenharmony_ci 48118c2ecf20Sopenharmony_ci for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) 48128c2ecf20Sopenharmony_ci vcpu->arch.mmu->prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID; 48138c2ecf20Sopenharmony_ci } 48148c2ecf20Sopenharmony_ci 48158c2ecf20Sopenharmony_ci if (mmu_is_nested(vcpu)) 48168c2ecf20Sopenharmony_ci init_kvm_nested_mmu(vcpu); 48178c2ecf20Sopenharmony_ci else if (tdp_enabled) 48188c2ecf20Sopenharmony_ci init_kvm_tdp_mmu(vcpu); 48198c2ecf20Sopenharmony_ci else 48208c2ecf20Sopenharmony_ci init_kvm_softmmu(vcpu); 48218c2ecf20Sopenharmony_ci} 48228c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(kvm_init_mmu); 48238c2ecf20Sopenharmony_ci 48248c2ecf20Sopenharmony_cistatic union kvm_mmu_page_role 48258c2ecf20Sopenharmony_cikvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu) 48268c2ecf20Sopenharmony_ci{ 48278c2ecf20Sopenharmony_ci union kvm_mmu_role role; 48288c2ecf20Sopenharmony_ci 48298c2ecf20Sopenharmony_ci if (tdp_enabled) 48308c2ecf20Sopenharmony_ci role = kvm_calc_tdp_mmu_root_page_role(vcpu, true); 48318c2ecf20Sopenharmony_ci else 48328c2ecf20Sopenharmony_ci role = kvm_calc_shadow_mmu_root_page_role(vcpu, true); 48338c2ecf20Sopenharmony_ci 48348c2ecf20Sopenharmony_ci return role.base; 48358c2ecf20Sopenharmony_ci} 48368c2ecf20Sopenharmony_ci 48378c2ecf20Sopenharmony_civoid kvm_mmu_reset_context(struct kvm_vcpu *vcpu) 48388c2ecf20Sopenharmony_ci{ 48398c2ecf20Sopenharmony_ci kvm_mmu_unload(vcpu); 48408c2ecf20Sopenharmony_ci kvm_init_mmu(vcpu, true); 48418c2ecf20Sopenharmony_ci} 48428c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(kvm_mmu_reset_context); 48438c2ecf20Sopenharmony_ci 48448c2ecf20Sopenharmony_ciint kvm_mmu_load(struct kvm_vcpu *vcpu) 48458c2ecf20Sopenharmony_ci{ 48468c2ecf20Sopenharmony_ci int r; 48478c2ecf20Sopenharmony_ci 48488c2ecf20Sopenharmony_ci r = mmu_topup_memory_caches(vcpu, !vcpu->arch.mmu->direct_map); 48498c2ecf20Sopenharmony_ci if (r) 48508c2ecf20Sopenharmony_ci goto out; 48518c2ecf20Sopenharmony_ci r = mmu_alloc_roots(vcpu); 48528c2ecf20Sopenharmony_ci kvm_mmu_sync_roots(vcpu); 48538c2ecf20Sopenharmony_ci if (r) 48548c2ecf20Sopenharmony_ci goto out; 48558c2ecf20Sopenharmony_ci kvm_mmu_load_pgd(vcpu); 48568c2ecf20Sopenharmony_ci kvm_x86_ops.tlb_flush_current(vcpu); 48578c2ecf20Sopenharmony_ciout: 48588c2ecf20Sopenharmony_ci return r; 48598c2ecf20Sopenharmony_ci} 48608c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(kvm_mmu_load); 48618c2ecf20Sopenharmony_ci 48628c2ecf20Sopenharmony_civoid kvm_mmu_unload(struct kvm_vcpu *vcpu) 48638c2ecf20Sopenharmony_ci{ 48648c2ecf20Sopenharmony_ci kvm_mmu_free_roots(vcpu, &vcpu->arch.root_mmu, KVM_MMU_ROOTS_ALL); 48658c2ecf20Sopenharmony_ci WARN_ON(VALID_PAGE(vcpu->arch.root_mmu.root_hpa)); 48668c2ecf20Sopenharmony_ci kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL); 48678c2ecf20Sopenharmony_ci WARN_ON(VALID_PAGE(vcpu->arch.guest_mmu.root_hpa)); 48688c2ecf20Sopenharmony_ci} 48698c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(kvm_mmu_unload); 48708c2ecf20Sopenharmony_ci 48718c2ecf20Sopenharmony_cistatic bool need_remote_flush(u64 old, u64 new) 48728c2ecf20Sopenharmony_ci{ 48738c2ecf20Sopenharmony_ci if (!is_shadow_present_pte(old)) 48748c2ecf20Sopenharmony_ci return false; 48758c2ecf20Sopenharmony_ci if (!is_shadow_present_pte(new)) 48768c2ecf20Sopenharmony_ci return true; 48778c2ecf20Sopenharmony_ci if ((old ^ new) & PT64_BASE_ADDR_MASK) 48788c2ecf20Sopenharmony_ci return true; 48798c2ecf20Sopenharmony_ci old ^= shadow_nx_mask; 48808c2ecf20Sopenharmony_ci new ^= shadow_nx_mask; 48818c2ecf20Sopenharmony_ci return (old & ~new & PT64_PERM_MASK) != 0; 48828c2ecf20Sopenharmony_ci} 48838c2ecf20Sopenharmony_ci 48848c2ecf20Sopenharmony_cistatic u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa, 48858c2ecf20Sopenharmony_ci int *bytes) 48868c2ecf20Sopenharmony_ci{ 48878c2ecf20Sopenharmony_ci u64 gentry = 0; 48888c2ecf20Sopenharmony_ci int r; 48898c2ecf20Sopenharmony_ci 48908c2ecf20Sopenharmony_ci /* 48918c2ecf20Sopenharmony_ci * Assume that the pte write on a page table of the same type 48928c2ecf20Sopenharmony_ci * as the current vcpu paging mode since we update the sptes only 48938c2ecf20Sopenharmony_ci * when they have the same mode. 48948c2ecf20Sopenharmony_ci */ 48958c2ecf20Sopenharmony_ci if (is_pae(vcpu) && *bytes == 4) { 48968c2ecf20Sopenharmony_ci /* Handle a 32-bit guest writing two halves of a 64-bit gpte */ 48978c2ecf20Sopenharmony_ci *gpa &= ~(gpa_t)7; 48988c2ecf20Sopenharmony_ci *bytes = 8; 48998c2ecf20Sopenharmony_ci } 49008c2ecf20Sopenharmony_ci 49018c2ecf20Sopenharmony_ci if (*bytes == 4 || *bytes == 8) { 49028c2ecf20Sopenharmony_ci r = kvm_vcpu_read_guest_atomic(vcpu, *gpa, &gentry, *bytes); 49038c2ecf20Sopenharmony_ci if (r) 49048c2ecf20Sopenharmony_ci gentry = 0; 49058c2ecf20Sopenharmony_ci } 49068c2ecf20Sopenharmony_ci 49078c2ecf20Sopenharmony_ci return gentry; 49088c2ecf20Sopenharmony_ci} 49098c2ecf20Sopenharmony_ci 49108c2ecf20Sopenharmony_ci/* 49118c2ecf20Sopenharmony_ci * If we're seeing too many writes to a page, it may no longer be a page table, 49128c2ecf20Sopenharmony_ci * or we may be forking, in which case it is better to unmap the page. 49138c2ecf20Sopenharmony_ci */ 49148c2ecf20Sopenharmony_cistatic bool detect_write_flooding(struct kvm_mmu_page *sp) 49158c2ecf20Sopenharmony_ci{ 49168c2ecf20Sopenharmony_ci /* 49178c2ecf20Sopenharmony_ci * Skip write-flooding detected for the sp whose level is 1, because 49188c2ecf20Sopenharmony_ci * it can become unsync, then the guest page is not write-protected. 49198c2ecf20Sopenharmony_ci */ 49208c2ecf20Sopenharmony_ci if (sp->role.level == PG_LEVEL_4K) 49218c2ecf20Sopenharmony_ci return false; 49228c2ecf20Sopenharmony_ci 49238c2ecf20Sopenharmony_ci atomic_inc(&sp->write_flooding_count); 49248c2ecf20Sopenharmony_ci return atomic_read(&sp->write_flooding_count) >= 3; 49258c2ecf20Sopenharmony_ci} 49268c2ecf20Sopenharmony_ci 49278c2ecf20Sopenharmony_ci/* 49288c2ecf20Sopenharmony_ci * Misaligned accesses are too much trouble to fix up; also, they usually 49298c2ecf20Sopenharmony_ci * indicate a page is not used as a page table. 49308c2ecf20Sopenharmony_ci */ 49318c2ecf20Sopenharmony_cistatic bool detect_write_misaligned(struct kvm_mmu_page *sp, gpa_t gpa, 49328c2ecf20Sopenharmony_ci int bytes) 49338c2ecf20Sopenharmony_ci{ 49348c2ecf20Sopenharmony_ci unsigned offset, pte_size, misaligned; 49358c2ecf20Sopenharmony_ci 49368c2ecf20Sopenharmony_ci pgprintk("misaligned: gpa %llx bytes %d role %x\n", 49378c2ecf20Sopenharmony_ci gpa, bytes, sp->role.word); 49388c2ecf20Sopenharmony_ci 49398c2ecf20Sopenharmony_ci offset = offset_in_page(gpa); 49408c2ecf20Sopenharmony_ci pte_size = sp->role.gpte_is_8_bytes ? 8 : 4; 49418c2ecf20Sopenharmony_ci 49428c2ecf20Sopenharmony_ci /* 49438c2ecf20Sopenharmony_ci * Sometimes, the OS only writes the last one bytes to update status 49448c2ecf20Sopenharmony_ci * bits, for example, in linux, andb instruction is used in clear_bit(). 49458c2ecf20Sopenharmony_ci */ 49468c2ecf20Sopenharmony_ci if (!(offset & (pte_size - 1)) && bytes == 1) 49478c2ecf20Sopenharmony_ci return false; 49488c2ecf20Sopenharmony_ci 49498c2ecf20Sopenharmony_ci misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1); 49508c2ecf20Sopenharmony_ci misaligned |= bytes < 4; 49518c2ecf20Sopenharmony_ci 49528c2ecf20Sopenharmony_ci return misaligned; 49538c2ecf20Sopenharmony_ci} 49548c2ecf20Sopenharmony_ci 49558c2ecf20Sopenharmony_cistatic u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte) 49568c2ecf20Sopenharmony_ci{ 49578c2ecf20Sopenharmony_ci unsigned page_offset, quadrant; 49588c2ecf20Sopenharmony_ci u64 *spte; 49598c2ecf20Sopenharmony_ci int level; 49608c2ecf20Sopenharmony_ci 49618c2ecf20Sopenharmony_ci page_offset = offset_in_page(gpa); 49628c2ecf20Sopenharmony_ci level = sp->role.level; 49638c2ecf20Sopenharmony_ci *nspte = 1; 49648c2ecf20Sopenharmony_ci if (!sp->role.gpte_is_8_bytes) { 49658c2ecf20Sopenharmony_ci page_offset <<= 1; /* 32->64 */ 49668c2ecf20Sopenharmony_ci /* 49678c2ecf20Sopenharmony_ci * A 32-bit pde maps 4MB while the shadow pdes map 49688c2ecf20Sopenharmony_ci * only 2MB. So we need to double the offset again 49698c2ecf20Sopenharmony_ci * and zap two pdes instead of one. 49708c2ecf20Sopenharmony_ci */ 49718c2ecf20Sopenharmony_ci if (level == PT32_ROOT_LEVEL) { 49728c2ecf20Sopenharmony_ci page_offset &= ~7; /* kill rounding error */ 49738c2ecf20Sopenharmony_ci page_offset <<= 1; 49748c2ecf20Sopenharmony_ci *nspte = 2; 49758c2ecf20Sopenharmony_ci } 49768c2ecf20Sopenharmony_ci quadrant = page_offset >> PAGE_SHIFT; 49778c2ecf20Sopenharmony_ci page_offset &= ~PAGE_MASK; 49788c2ecf20Sopenharmony_ci if (quadrant != sp->role.quadrant) 49798c2ecf20Sopenharmony_ci return NULL; 49808c2ecf20Sopenharmony_ci } 49818c2ecf20Sopenharmony_ci 49828c2ecf20Sopenharmony_ci spte = &sp->spt[page_offset / sizeof(*spte)]; 49838c2ecf20Sopenharmony_ci return spte; 49848c2ecf20Sopenharmony_ci} 49858c2ecf20Sopenharmony_ci 49868c2ecf20Sopenharmony_cistatic void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, 49878c2ecf20Sopenharmony_ci const u8 *new, int bytes, 49888c2ecf20Sopenharmony_ci struct kvm_page_track_notifier_node *node) 49898c2ecf20Sopenharmony_ci{ 49908c2ecf20Sopenharmony_ci gfn_t gfn = gpa >> PAGE_SHIFT; 49918c2ecf20Sopenharmony_ci struct kvm_mmu_page *sp; 49928c2ecf20Sopenharmony_ci LIST_HEAD(invalid_list); 49938c2ecf20Sopenharmony_ci u64 entry, gentry, *spte; 49948c2ecf20Sopenharmony_ci int npte; 49958c2ecf20Sopenharmony_ci bool remote_flush, local_flush; 49968c2ecf20Sopenharmony_ci 49978c2ecf20Sopenharmony_ci /* 49988c2ecf20Sopenharmony_ci * If we don't have indirect shadow pages, it means no page is 49998c2ecf20Sopenharmony_ci * write-protected, so we can exit simply. 50008c2ecf20Sopenharmony_ci */ 50018c2ecf20Sopenharmony_ci if (!READ_ONCE(vcpu->kvm->arch.indirect_shadow_pages)) 50028c2ecf20Sopenharmony_ci return; 50038c2ecf20Sopenharmony_ci 50048c2ecf20Sopenharmony_ci remote_flush = local_flush = false; 50058c2ecf20Sopenharmony_ci 50068c2ecf20Sopenharmony_ci pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes); 50078c2ecf20Sopenharmony_ci 50088c2ecf20Sopenharmony_ci /* 50098c2ecf20Sopenharmony_ci * No need to care whether allocation memory is successful 50108c2ecf20Sopenharmony_ci * or not since pte prefetch is skiped if it does not have 50118c2ecf20Sopenharmony_ci * enough objects in the cache. 50128c2ecf20Sopenharmony_ci */ 50138c2ecf20Sopenharmony_ci mmu_topup_memory_caches(vcpu, true); 50148c2ecf20Sopenharmony_ci 50158c2ecf20Sopenharmony_ci spin_lock(&vcpu->kvm->mmu_lock); 50168c2ecf20Sopenharmony_ci 50178c2ecf20Sopenharmony_ci gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, &bytes); 50188c2ecf20Sopenharmony_ci 50198c2ecf20Sopenharmony_ci ++vcpu->kvm->stat.mmu_pte_write; 50208c2ecf20Sopenharmony_ci kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE); 50218c2ecf20Sopenharmony_ci 50228c2ecf20Sopenharmony_ci for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) { 50238c2ecf20Sopenharmony_ci if (detect_write_misaligned(sp, gpa, bytes) || 50248c2ecf20Sopenharmony_ci detect_write_flooding(sp)) { 50258c2ecf20Sopenharmony_ci kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list); 50268c2ecf20Sopenharmony_ci ++vcpu->kvm->stat.mmu_flooded; 50278c2ecf20Sopenharmony_ci continue; 50288c2ecf20Sopenharmony_ci } 50298c2ecf20Sopenharmony_ci 50308c2ecf20Sopenharmony_ci spte = get_written_sptes(sp, gpa, &npte); 50318c2ecf20Sopenharmony_ci if (!spte) 50328c2ecf20Sopenharmony_ci continue; 50338c2ecf20Sopenharmony_ci 50348c2ecf20Sopenharmony_ci local_flush = true; 50358c2ecf20Sopenharmony_ci while (npte--) { 50368c2ecf20Sopenharmony_ci entry = *spte; 50378c2ecf20Sopenharmony_ci mmu_page_zap_pte(vcpu->kvm, sp, spte, NULL); 50388c2ecf20Sopenharmony_ci if (gentry && sp->role.level != PG_LEVEL_4K) 50398c2ecf20Sopenharmony_ci ++vcpu->kvm->stat.mmu_pde_zapped; 50408c2ecf20Sopenharmony_ci if (need_remote_flush(entry, *spte)) 50418c2ecf20Sopenharmony_ci remote_flush = true; 50428c2ecf20Sopenharmony_ci ++spte; 50438c2ecf20Sopenharmony_ci } 50448c2ecf20Sopenharmony_ci } 50458c2ecf20Sopenharmony_ci kvm_mmu_flush_or_zap(vcpu, &invalid_list, remote_flush, local_flush); 50468c2ecf20Sopenharmony_ci kvm_mmu_audit(vcpu, AUDIT_POST_PTE_WRITE); 50478c2ecf20Sopenharmony_ci spin_unlock(&vcpu->kvm->mmu_lock); 50488c2ecf20Sopenharmony_ci} 50498c2ecf20Sopenharmony_ci 50508c2ecf20Sopenharmony_ciint kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva) 50518c2ecf20Sopenharmony_ci{ 50528c2ecf20Sopenharmony_ci gpa_t gpa; 50538c2ecf20Sopenharmony_ci int r; 50548c2ecf20Sopenharmony_ci 50558c2ecf20Sopenharmony_ci if (vcpu->arch.mmu->direct_map) 50568c2ecf20Sopenharmony_ci return 0; 50578c2ecf20Sopenharmony_ci 50588c2ecf20Sopenharmony_ci gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL); 50598c2ecf20Sopenharmony_ci 50608c2ecf20Sopenharmony_ci r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT); 50618c2ecf20Sopenharmony_ci 50628c2ecf20Sopenharmony_ci return r; 50638c2ecf20Sopenharmony_ci} 50648c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt); 50658c2ecf20Sopenharmony_ci 50668c2ecf20Sopenharmony_ciint kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code, 50678c2ecf20Sopenharmony_ci void *insn, int insn_len) 50688c2ecf20Sopenharmony_ci{ 50698c2ecf20Sopenharmony_ci int r, emulation_type = EMULTYPE_PF; 50708c2ecf20Sopenharmony_ci bool direct = vcpu->arch.mmu->direct_map; 50718c2ecf20Sopenharmony_ci 50728c2ecf20Sopenharmony_ci if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa))) 50738c2ecf20Sopenharmony_ci return RET_PF_RETRY; 50748c2ecf20Sopenharmony_ci 50758c2ecf20Sopenharmony_ci r = RET_PF_INVALID; 50768c2ecf20Sopenharmony_ci if (unlikely(error_code & PFERR_RSVD_MASK)) { 50778c2ecf20Sopenharmony_ci r = handle_mmio_page_fault(vcpu, cr2_or_gpa, direct); 50788c2ecf20Sopenharmony_ci if (r == RET_PF_EMULATE) 50798c2ecf20Sopenharmony_ci goto emulate; 50808c2ecf20Sopenharmony_ci } 50818c2ecf20Sopenharmony_ci 50828c2ecf20Sopenharmony_ci if (r == RET_PF_INVALID) { 50838c2ecf20Sopenharmony_ci r = kvm_mmu_do_page_fault(vcpu, cr2_or_gpa, 50848c2ecf20Sopenharmony_ci lower_32_bits(error_code), false); 50858c2ecf20Sopenharmony_ci if (WARN_ON_ONCE(r == RET_PF_INVALID)) 50868c2ecf20Sopenharmony_ci return -EIO; 50878c2ecf20Sopenharmony_ci } 50888c2ecf20Sopenharmony_ci 50898c2ecf20Sopenharmony_ci if (r < 0) 50908c2ecf20Sopenharmony_ci return r; 50918c2ecf20Sopenharmony_ci if (r != RET_PF_EMULATE) 50928c2ecf20Sopenharmony_ci return 1; 50938c2ecf20Sopenharmony_ci 50948c2ecf20Sopenharmony_ci /* 50958c2ecf20Sopenharmony_ci * Before emulating the instruction, check if the error code 50968c2ecf20Sopenharmony_ci * was due to a RO violation while translating the guest page. 50978c2ecf20Sopenharmony_ci * This can occur when using nested virtualization with nested 50988c2ecf20Sopenharmony_ci * paging in both guests. If true, we simply unprotect the page 50998c2ecf20Sopenharmony_ci * and resume the guest. 51008c2ecf20Sopenharmony_ci */ 51018c2ecf20Sopenharmony_ci if (vcpu->arch.mmu->direct_map && 51028c2ecf20Sopenharmony_ci (error_code & PFERR_NESTED_GUEST_PAGE) == PFERR_NESTED_GUEST_PAGE) { 51038c2ecf20Sopenharmony_ci kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(cr2_or_gpa)); 51048c2ecf20Sopenharmony_ci return 1; 51058c2ecf20Sopenharmony_ci } 51068c2ecf20Sopenharmony_ci 51078c2ecf20Sopenharmony_ci /* 51088c2ecf20Sopenharmony_ci * vcpu->arch.mmu.page_fault returned RET_PF_EMULATE, but we can still 51098c2ecf20Sopenharmony_ci * optimistically try to just unprotect the page and let the processor 51108c2ecf20Sopenharmony_ci * re-execute the instruction that caused the page fault. Do not allow 51118c2ecf20Sopenharmony_ci * retrying MMIO emulation, as it's not only pointless but could also 51128c2ecf20Sopenharmony_ci * cause us to enter an infinite loop because the processor will keep 51138c2ecf20Sopenharmony_ci * faulting on the non-existent MMIO address. Retrying an instruction 51148c2ecf20Sopenharmony_ci * from a nested guest is also pointless and dangerous as we are only 51158c2ecf20Sopenharmony_ci * explicitly shadowing L1's page tables, i.e. unprotecting something 51168c2ecf20Sopenharmony_ci * for L1 isn't going to magically fix whatever issue cause L2 to fail. 51178c2ecf20Sopenharmony_ci */ 51188c2ecf20Sopenharmony_ci if (!mmio_info_in_cache(vcpu, cr2_or_gpa, direct) && !is_guest_mode(vcpu)) 51198c2ecf20Sopenharmony_ci emulation_type |= EMULTYPE_ALLOW_RETRY_PF; 51208c2ecf20Sopenharmony_ciemulate: 51218c2ecf20Sopenharmony_ci return x86_emulate_instruction(vcpu, cr2_or_gpa, emulation_type, insn, 51228c2ecf20Sopenharmony_ci insn_len); 51238c2ecf20Sopenharmony_ci} 51248c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(kvm_mmu_page_fault); 51258c2ecf20Sopenharmony_ci 51268c2ecf20Sopenharmony_civoid kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, 51278c2ecf20Sopenharmony_ci gva_t gva, hpa_t root_hpa) 51288c2ecf20Sopenharmony_ci{ 51298c2ecf20Sopenharmony_ci int i; 51308c2ecf20Sopenharmony_ci 51318c2ecf20Sopenharmony_ci /* It's actually a GPA for vcpu->arch.guest_mmu. */ 51328c2ecf20Sopenharmony_ci if (mmu != &vcpu->arch.guest_mmu) { 51338c2ecf20Sopenharmony_ci /* INVLPG on a non-canonical address is a NOP according to the SDM. */ 51348c2ecf20Sopenharmony_ci if (is_noncanonical_address(gva, vcpu)) 51358c2ecf20Sopenharmony_ci return; 51368c2ecf20Sopenharmony_ci 51378c2ecf20Sopenharmony_ci kvm_x86_ops.tlb_flush_gva(vcpu, gva); 51388c2ecf20Sopenharmony_ci } 51398c2ecf20Sopenharmony_ci 51408c2ecf20Sopenharmony_ci if (!mmu->invlpg) 51418c2ecf20Sopenharmony_ci return; 51428c2ecf20Sopenharmony_ci 51438c2ecf20Sopenharmony_ci if (root_hpa == INVALID_PAGE) { 51448c2ecf20Sopenharmony_ci mmu->invlpg(vcpu, gva, mmu->root_hpa); 51458c2ecf20Sopenharmony_ci 51468c2ecf20Sopenharmony_ci /* 51478c2ecf20Sopenharmony_ci * INVLPG is required to invalidate any global mappings for the VA, 51488c2ecf20Sopenharmony_ci * irrespective of PCID. Since it would take us roughly similar amount 51498c2ecf20Sopenharmony_ci * of work to determine whether any of the prev_root mappings of the VA 51508c2ecf20Sopenharmony_ci * is marked global, or to just sync it blindly, so we might as well 51518c2ecf20Sopenharmony_ci * just always sync it. 51528c2ecf20Sopenharmony_ci * 51538c2ecf20Sopenharmony_ci * Mappings not reachable via the current cr3 or the prev_roots will be 51548c2ecf20Sopenharmony_ci * synced when switching to that cr3, so nothing needs to be done here 51558c2ecf20Sopenharmony_ci * for them. 51568c2ecf20Sopenharmony_ci */ 51578c2ecf20Sopenharmony_ci for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) 51588c2ecf20Sopenharmony_ci if (VALID_PAGE(mmu->prev_roots[i].hpa)) 51598c2ecf20Sopenharmony_ci mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa); 51608c2ecf20Sopenharmony_ci } else { 51618c2ecf20Sopenharmony_ci mmu->invlpg(vcpu, gva, root_hpa); 51628c2ecf20Sopenharmony_ci } 51638c2ecf20Sopenharmony_ci} 51648c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(kvm_mmu_invalidate_gva); 51658c2ecf20Sopenharmony_ci 51668c2ecf20Sopenharmony_civoid kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva) 51678c2ecf20Sopenharmony_ci{ 51688c2ecf20Sopenharmony_ci kvm_mmu_invalidate_gva(vcpu, vcpu->arch.walk_mmu, gva, INVALID_PAGE); 51698c2ecf20Sopenharmony_ci ++vcpu->stat.invlpg; 51708c2ecf20Sopenharmony_ci} 51718c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(kvm_mmu_invlpg); 51728c2ecf20Sopenharmony_ci 51738c2ecf20Sopenharmony_ci 51748c2ecf20Sopenharmony_civoid kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid) 51758c2ecf20Sopenharmony_ci{ 51768c2ecf20Sopenharmony_ci struct kvm_mmu *mmu = vcpu->arch.mmu; 51778c2ecf20Sopenharmony_ci bool tlb_flush = false; 51788c2ecf20Sopenharmony_ci uint i; 51798c2ecf20Sopenharmony_ci 51808c2ecf20Sopenharmony_ci if (pcid == kvm_get_active_pcid(vcpu)) { 51818c2ecf20Sopenharmony_ci if (mmu->invlpg) 51828c2ecf20Sopenharmony_ci mmu->invlpg(vcpu, gva, mmu->root_hpa); 51838c2ecf20Sopenharmony_ci tlb_flush = true; 51848c2ecf20Sopenharmony_ci } 51858c2ecf20Sopenharmony_ci 51868c2ecf20Sopenharmony_ci for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) { 51878c2ecf20Sopenharmony_ci if (VALID_PAGE(mmu->prev_roots[i].hpa) && 51888c2ecf20Sopenharmony_ci pcid == kvm_get_pcid(vcpu, mmu->prev_roots[i].pgd)) { 51898c2ecf20Sopenharmony_ci if (mmu->invlpg) 51908c2ecf20Sopenharmony_ci mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa); 51918c2ecf20Sopenharmony_ci tlb_flush = true; 51928c2ecf20Sopenharmony_ci } 51938c2ecf20Sopenharmony_ci } 51948c2ecf20Sopenharmony_ci 51958c2ecf20Sopenharmony_ci if (tlb_flush) 51968c2ecf20Sopenharmony_ci kvm_x86_ops.tlb_flush_gva(vcpu, gva); 51978c2ecf20Sopenharmony_ci 51988c2ecf20Sopenharmony_ci ++vcpu->stat.invlpg; 51998c2ecf20Sopenharmony_ci 52008c2ecf20Sopenharmony_ci /* 52018c2ecf20Sopenharmony_ci * Mappings not reachable via the current cr3 or the prev_roots will be 52028c2ecf20Sopenharmony_ci * synced when switching to that cr3, so nothing needs to be done here 52038c2ecf20Sopenharmony_ci * for them. 52048c2ecf20Sopenharmony_ci */ 52058c2ecf20Sopenharmony_ci} 52068c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(kvm_mmu_invpcid_gva); 52078c2ecf20Sopenharmony_ci 52088c2ecf20Sopenharmony_civoid kvm_configure_mmu(bool enable_tdp, int tdp_max_root_level, 52098c2ecf20Sopenharmony_ci int tdp_huge_page_level) 52108c2ecf20Sopenharmony_ci{ 52118c2ecf20Sopenharmony_ci tdp_enabled = enable_tdp; 52128c2ecf20Sopenharmony_ci max_tdp_level = tdp_max_root_level; 52138c2ecf20Sopenharmony_ci 52148c2ecf20Sopenharmony_ci /* 52158c2ecf20Sopenharmony_ci * max_huge_page_level reflects KVM's MMU capabilities irrespective 52168c2ecf20Sopenharmony_ci * of kernel support, e.g. KVM may be capable of using 1GB pages when 52178c2ecf20Sopenharmony_ci * the kernel is not. But, KVM never creates a page size greater than 52188c2ecf20Sopenharmony_ci * what is used by the kernel for any given HVA, i.e. the kernel's 52198c2ecf20Sopenharmony_ci * capabilities are ultimately consulted by kvm_mmu_hugepage_adjust(). 52208c2ecf20Sopenharmony_ci */ 52218c2ecf20Sopenharmony_ci if (tdp_enabled) 52228c2ecf20Sopenharmony_ci max_huge_page_level = tdp_huge_page_level; 52238c2ecf20Sopenharmony_ci else if (boot_cpu_has(X86_FEATURE_GBPAGES)) 52248c2ecf20Sopenharmony_ci max_huge_page_level = PG_LEVEL_1G; 52258c2ecf20Sopenharmony_ci else 52268c2ecf20Sopenharmony_ci max_huge_page_level = PG_LEVEL_2M; 52278c2ecf20Sopenharmony_ci} 52288c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(kvm_configure_mmu); 52298c2ecf20Sopenharmony_ci 52308c2ecf20Sopenharmony_ci/* The return value indicates if tlb flush on all vcpus is needed. */ 52318c2ecf20Sopenharmony_citypedef bool (*slot_level_handler) (struct kvm *kvm, struct kvm_rmap_head *rmap_head); 52328c2ecf20Sopenharmony_ci 52338c2ecf20Sopenharmony_ci/* The caller should hold mmu-lock before calling this function. */ 52348c2ecf20Sopenharmony_cistatic __always_inline bool 52358c2ecf20Sopenharmony_cislot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot, 52368c2ecf20Sopenharmony_ci slot_level_handler fn, int start_level, int end_level, 52378c2ecf20Sopenharmony_ci gfn_t start_gfn, gfn_t end_gfn, bool lock_flush_tlb) 52388c2ecf20Sopenharmony_ci{ 52398c2ecf20Sopenharmony_ci struct slot_rmap_walk_iterator iterator; 52408c2ecf20Sopenharmony_ci bool flush = false; 52418c2ecf20Sopenharmony_ci 52428c2ecf20Sopenharmony_ci for_each_slot_rmap_range(memslot, start_level, end_level, start_gfn, 52438c2ecf20Sopenharmony_ci end_gfn, &iterator) { 52448c2ecf20Sopenharmony_ci if (iterator.rmap) 52458c2ecf20Sopenharmony_ci flush |= fn(kvm, iterator.rmap); 52468c2ecf20Sopenharmony_ci 52478c2ecf20Sopenharmony_ci if (need_resched() || spin_needbreak(&kvm->mmu_lock)) { 52488c2ecf20Sopenharmony_ci if (flush && lock_flush_tlb) { 52498c2ecf20Sopenharmony_ci kvm_flush_remote_tlbs_with_address(kvm, 52508c2ecf20Sopenharmony_ci start_gfn, 52518c2ecf20Sopenharmony_ci iterator.gfn - start_gfn + 1); 52528c2ecf20Sopenharmony_ci flush = false; 52538c2ecf20Sopenharmony_ci } 52548c2ecf20Sopenharmony_ci cond_resched_lock(&kvm->mmu_lock); 52558c2ecf20Sopenharmony_ci } 52568c2ecf20Sopenharmony_ci } 52578c2ecf20Sopenharmony_ci 52588c2ecf20Sopenharmony_ci if (flush && lock_flush_tlb) { 52598c2ecf20Sopenharmony_ci kvm_flush_remote_tlbs_with_address(kvm, start_gfn, 52608c2ecf20Sopenharmony_ci end_gfn - start_gfn + 1); 52618c2ecf20Sopenharmony_ci flush = false; 52628c2ecf20Sopenharmony_ci } 52638c2ecf20Sopenharmony_ci 52648c2ecf20Sopenharmony_ci return flush; 52658c2ecf20Sopenharmony_ci} 52668c2ecf20Sopenharmony_ci 52678c2ecf20Sopenharmony_cistatic __always_inline bool 52688c2ecf20Sopenharmony_cislot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot, 52698c2ecf20Sopenharmony_ci slot_level_handler fn, int start_level, int end_level, 52708c2ecf20Sopenharmony_ci bool lock_flush_tlb) 52718c2ecf20Sopenharmony_ci{ 52728c2ecf20Sopenharmony_ci return slot_handle_level_range(kvm, memslot, fn, start_level, 52738c2ecf20Sopenharmony_ci end_level, memslot->base_gfn, 52748c2ecf20Sopenharmony_ci memslot->base_gfn + memslot->npages - 1, 52758c2ecf20Sopenharmony_ci lock_flush_tlb); 52768c2ecf20Sopenharmony_ci} 52778c2ecf20Sopenharmony_ci 52788c2ecf20Sopenharmony_cistatic __always_inline bool 52798c2ecf20Sopenharmony_cislot_handle_all_level(struct kvm *kvm, struct kvm_memory_slot *memslot, 52808c2ecf20Sopenharmony_ci slot_level_handler fn, bool lock_flush_tlb) 52818c2ecf20Sopenharmony_ci{ 52828c2ecf20Sopenharmony_ci return slot_handle_level(kvm, memslot, fn, PG_LEVEL_4K, 52838c2ecf20Sopenharmony_ci KVM_MAX_HUGEPAGE_LEVEL, lock_flush_tlb); 52848c2ecf20Sopenharmony_ci} 52858c2ecf20Sopenharmony_ci 52868c2ecf20Sopenharmony_cistatic __always_inline bool 52878c2ecf20Sopenharmony_cislot_handle_large_level(struct kvm *kvm, struct kvm_memory_slot *memslot, 52888c2ecf20Sopenharmony_ci slot_level_handler fn, bool lock_flush_tlb) 52898c2ecf20Sopenharmony_ci{ 52908c2ecf20Sopenharmony_ci return slot_handle_level(kvm, memslot, fn, PG_LEVEL_4K + 1, 52918c2ecf20Sopenharmony_ci KVM_MAX_HUGEPAGE_LEVEL, lock_flush_tlb); 52928c2ecf20Sopenharmony_ci} 52938c2ecf20Sopenharmony_ci 52948c2ecf20Sopenharmony_cistatic __always_inline bool 52958c2ecf20Sopenharmony_cislot_handle_leaf(struct kvm *kvm, struct kvm_memory_slot *memslot, 52968c2ecf20Sopenharmony_ci slot_level_handler fn, bool lock_flush_tlb) 52978c2ecf20Sopenharmony_ci{ 52988c2ecf20Sopenharmony_ci return slot_handle_level(kvm, memslot, fn, PG_LEVEL_4K, 52998c2ecf20Sopenharmony_ci PG_LEVEL_4K, lock_flush_tlb); 53008c2ecf20Sopenharmony_ci} 53018c2ecf20Sopenharmony_ci 53028c2ecf20Sopenharmony_cistatic void free_mmu_pages(struct kvm_mmu *mmu) 53038c2ecf20Sopenharmony_ci{ 53048c2ecf20Sopenharmony_ci free_page((unsigned long)mmu->pae_root); 53058c2ecf20Sopenharmony_ci free_page((unsigned long)mmu->lm_root); 53068c2ecf20Sopenharmony_ci} 53078c2ecf20Sopenharmony_ci 53088c2ecf20Sopenharmony_cistatic int __kvm_mmu_create(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu) 53098c2ecf20Sopenharmony_ci{ 53108c2ecf20Sopenharmony_ci struct page *page; 53118c2ecf20Sopenharmony_ci int i; 53128c2ecf20Sopenharmony_ci 53138c2ecf20Sopenharmony_ci mmu->root_hpa = INVALID_PAGE; 53148c2ecf20Sopenharmony_ci mmu->root_pgd = 0; 53158c2ecf20Sopenharmony_ci mmu->translate_gpa = translate_gpa; 53168c2ecf20Sopenharmony_ci for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) 53178c2ecf20Sopenharmony_ci mmu->prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID; 53188c2ecf20Sopenharmony_ci 53198c2ecf20Sopenharmony_ci /* 53208c2ecf20Sopenharmony_ci * When using PAE paging, the four PDPTEs are treated as 'root' pages, 53218c2ecf20Sopenharmony_ci * while the PDP table is a per-vCPU construct that's allocated at MMU 53228c2ecf20Sopenharmony_ci * creation. When emulating 32-bit mode, cr3 is only 32 bits even on 53238c2ecf20Sopenharmony_ci * x86_64. Therefore we need to allocate the PDP table in the first 53248c2ecf20Sopenharmony_ci * 4GB of memory, which happens to fit the DMA32 zone. TDP paging 53258c2ecf20Sopenharmony_ci * generally doesn't use PAE paging and can skip allocating the PDP 53268c2ecf20Sopenharmony_ci * table. The main exception, handled here, is SVM's 32-bit NPT. The 53278c2ecf20Sopenharmony_ci * other exception is for shadowing L1's 32-bit or PAE NPT on 64-bit 53288c2ecf20Sopenharmony_ci * KVM; that horror is handled on-demand by mmu_alloc_shadow_roots(). 53298c2ecf20Sopenharmony_ci */ 53308c2ecf20Sopenharmony_ci if (tdp_enabled && kvm_mmu_get_tdp_level(vcpu) > PT32E_ROOT_LEVEL) 53318c2ecf20Sopenharmony_ci return 0; 53328c2ecf20Sopenharmony_ci 53338c2ecf20Sopenharmony_ci page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_DMA32); 53348c2ecf20Sopenharmony_ci if (!page) 53358c2ecf20Sopenharmony_ci return -ENOMEM; 53368c2ecf20Sopenharmony_ci 53378c2ecf20Sopenharmony_ci mmu->pae_root = page_address(page); 53388c2ecf20Sopenharmony_ci for (i = 0; i < 4; ++i) 53398c2ecf20Sopenharmony_ci mmu->pae_root[i] = INVALID_PAGE; 53408c2ecf20Sopenharmony_ci 53418c2ecf20Sopenharmony_ci return 0; 53428c2ecf20Sopenharmony_ci} 53438c2ecf20Sopenharmony_ci 53448c2ecf20Sopenharmony_ciint kvm_mmu_create(struct kvm_vcpu *vcpu) 53458c2ecf20Sopenharmony_ci{ 53468c2ecf20Sopenharmony_ci int ret; 53478c2ecf20Sopenharmony_ci 53488c2ecf20Sopenharmony_ci vcpu->arch.mmu_pte_list_desc_cache.kmem_cache = pte_list_desc_cache; 53498c2ecf20Sopenharmony_ci vcpu->arch.mmu_pte_list_desc_cache.gfp_zero = __GFP_ZERO; 53508c2ecf20Sopenharmony_ci 53518c2ecf20Sopenharmony_ci vcpu->arch.mmu_page_header_cache.kmem_cache = mmu_page_header_cache; 53528c2ecf20Sopenharmony_ci vcpu->arch.mmu_page_header_cache.gfp_zero = __GFP_ZERO; 53538c2ecf20Sopenharmony_ci 53548c2ecf20Sopenharmony_ci vcpu->arch.mmu_shadow_page_cache.gfp_zero = __GFP_ZERO; 53558c2ecf20Sopenharmony_ci 53568c2ecf20Sopenharmony_ci vcpu->arch.mmu = &vcpu->arch.root_mmu; 53578c2ecf20Sopenharmony_ci vcpu->arch.walk_mmu = &vcpu->arch.root_mmu; 53588c2ecf20Sopenharmony_ci 53598c2ecf20Sopenharmony_ci vcpu->arch.nested_mmu.translate_gpa = translate_nested_gpa; 53608c2ecf20Sopenharmony_ci 53618c2ecf20Sopenharmony_ci ret = __kvm_mmu_create(vcpu, &vcpu->arch.guest_mmu); 53628c2ecf20Sopenharmony_ci if (ret) 53638c2ecf20Sopenharmony_ci return ret; 53648c2ecf20Sopenharmony_ci 53658c2ecf20Sopenharmony_ci ret = __kvm_mmu_create(vcpu, &vcpu->arch.root_mmu); 53668c2ecf20Sopenharmony_ci if (ret) 53678c2ecf20Sopenharmony_ci goto fail_allocate_root; 53688c2ecf20Sopenharmony_ci 53698c2ecf20Sopenharmony_ci return ret; 53708c2ecf20Sopenharmony_ci fail_allocate_root: 53718c2ecf20Sopenharmony_ci free_mmu_pages(&vcpu->arch.guest_mmu); 53728c2ecf20Sopenharmony_ci return ret; 53738c2ecf20Sopenharmony_ci} 53748c2ecf20Sopenharmony_ci 53758c2ecf20Sopenharmony_ci#define BATCH_ZAP_PAGES 10 53768c2ecf20Sopenharmony_cistatic void kvm_zap_obsolete_pages(struct kvm *kvm) 53778c2ecf20Sopenharmony_ci{ 53788c2ecf20Sopenharmony_ci struct kvm_mmu_page *sp, *node; 53798c2ecf20Sopenharmony_ci int nr_zapped, batch = 0; 53808c2ecf20Sopenharmony_ci bool unstable; 53818c2ecf20Sopenharmony_ci 53828c2ecf20Sopenharmony_cirestart: 53838c2ecf20Sopenharmony_ci list_for_each_entry_safe_reverse(sp, node, 53848c2ecf20Sopenharmony_ci &kvm->arch.active_mmu_pages, link) { 53858c2ecf20Sopenharmony_ci /* 53868c2ecf20Sopenharmony_ci * No obsolete valid page exists before a newly created page 53878c2ecf20Sopenharmony_ci * since active_mmu_pages is a FIFO list. 53888c2ecf20Sopenharmony_ci */ 53898c2ecf20Sopenharmony_ci if (!is_obsolete_sp(kvm, sp)) 53908c2ecf20Sopenharmony_ci break; 53918c2ecf20Sopenharmony_ci 53928c2ecf20Sopenharmony_ci /* 53938c2ecf20Sopenharmony_ci * Invalid pages should never land back on the list of active 53948c2ecf20Sopenharmony_ci * pages. Skip the bogus page, otherwise we'll get stuck in an 53958c2ecf20Sopenharmony_ci * infinite loop if the page gets put back on the list (again). 53968c2ecf20Sopenharmony_ci */ 53978c2ecf20Sopenharmony_ci if (WARN_ON(sp->role.invalid)) 53988c2ecf20Sopenharmony_ci continue; 53998c2ecf20Sopenharmony_ci 54008c2ecf20Sopenharmony_ci /* 54018c2ecf20Sopenharmony_ci * No need to flush the TLB since we're only zapping shadow 54028c2ecf20Sopenharmony_ci * pages with an obsolete generation number and all vCPUS have 54038c2ecf20Sopenharmony_ci * loaded a new root, i.e. the shadow pages being zapped cannot 54048c2ecf20Sopenharmony_ci * be in active use by the guest. 54058c2ecf20Sopenharmony_ci */ 54068c2ecf20Sopenharmony_ci if (batch >= BATCH_ZAP_PAGES && 54078c2ecf20Sopenharmony_ci cond_resched_lock(&kvm->mmu_lock)) { 54088c2ecf20Sopenharmony_ci batch = 0; 54098c2ecf20Sopenharmony_ci goto restart; 54108c2ecf20Sopenharmony_ci } 54118c2ecf20Sopenharmony_ci 54128c2ecf20Sopenharmony_ci unstable = __kvm_mmu_prepare_zap_page(kvm, sp, 54138c2ecf20Sopenharmony_ci &kvm->arch.zapped_obsolete_pages, &nr_zapped); 54148c2ecf20Sopenharmony_ci batch += nr_zapped; 54158c2ecf20Sopenharmony_ci 54168c2ecf20Sopenharmony_ci if (unstable) 54178c2ecf20Sopenharmony_ci goto restart; 54188c2ecf20Sopenharmony_ci } 54198c2ecf20Sopenharmony_ci 54208c2ecf20Sopenharmony_ci /* 54218c2ecf20Sopenharmony_ci * Trigger a remote TLB flush before freeing the page tables to ensure 54228c2ecf20Sopenharmony_ci * KVM is not in the middle of a lockless shadow page table walk, which 54238c2ecf20Sopenharmony_ci * may reference the pages. 54248c2ecf20Sopenharmony_ci */ 54258c2ecf20Sopenharmony_ci kvm_mmu_commit_zap_page(kvm, &kvm->arch.zapped_obsolete_pages); 54268c2ecf20Sopenharmony_ci} 54278c2ecf20Sopenharmony_ci 54288c2ecf20Sopenharmony_ci/* 54298c2ecf20Sopenharmony_ci * Fast invalidate all shadow pages and use lock-break technique 54308c2ecf20Sopenharmony_ci * to zap obsolete pages. 54318c2ecf20Sopenharmony_ci * 54328c2ecf20Sopenharmony_ci * It's required when memslot is being deleted or VM is being 54338c2ecf20Sopenharmony_ci * destroyed, in these cases, we should ensure that KVM MMU does 54348c2ecf20Sopenharmony_ci * not use any resource of the being-deleted slot or all slots 54358c2ecf20Sopenharmony_ci * after calling the function. 54368c2ecf20Sopenharmony_ci */ 54378c2ecf20Sopenharmony_cistatic void kvm_mmu_zap_all_fast(struct kvm *kvm) 54388c2ecf20Sopenharmony_ci{ 54398c2ecf20Sopenharmony_ci lockdep_assert_held(&kvm->slots_lock); 54408c2ecf20Sopenharmony_ci 54418c2ecf20Sopenharmony_ci spin_lock(&kvm->mmu_lock); 54428c2ecf20Sopenharmony_ci trace_kvm_mmu_zap_all_fast(kvm); 54438c2ecf20Sopenharmony_ci 54448c2ecf20Sopenharmony_ci /* 54458c2ecf20Sopenharmony_ci * Toggle mmu_valid_gen between '0' and '1'. Because slots_lock is 54468c2ecf20Sopenharmony_ci * held for the entire duration of zapping obsolete pages, it's 54478c2ecf20Sopenharmony_ci * impossible for there to be multiple invalid generations associated 54488c2ecf20Sopenharmony_ci * with *valid* shadow pages at any given time, i.e. there is exactly 54498c2ecf20Sopenharmony_ci * one valid generation and (at most) one invalid generation. 54508c2ecf20Sopenharmony_ci */ 54518c2ecf20Sopenharmony_ci kvm->arch.mmu_valid_gen = kvm->arch.mmu_valid_gen ? 0 : 1; 54528c2ecf20Sopenharmony_ci 54538c2ecf20Sopenharmony_ci /* 54548c2ecf20Sopenharmony_ci * Notify all vcpus to reload its shadow page table and flush TLB. 54558c2ecf20Sopenharmony_ci * Then all vcpus will switch to new shadow page table with the new 54568c2ecf20Sopenharmony_ci * mmu_valid_gen. 54578c2ecf20Sopenharmony_ci * 54588c2ecf20Sopenharmony_ci * Note: we need to do this under the protection of mmu_lock, 54598c2ecf20Sopenharmony_ci * otherwise, vcpu would purge shadow page but miss tlb flush. 54608c2ecf20Sopenharmony_ci */ 54618c2ecf20Sopenharmony_ci kvm_reload_remote_mmus(kvm); 54628c2ecf20Sopenharmony_ci 54638c2ecf20Sopenharmony_ci kvm_zap_obsolete_pages(kvm); 54648c2ecf20Sopenharmony_ci 54658c2ecf20Sopenharmony_ci if (kvm->arch.tdp_mmu_enabled) 54668c2ecf20Sopenharmony_ci kvm_tdp_mmu_zap_all(kvm); 54678c2ecf20Sopenharmony_ci 54688c2ecf20Sopenharmony_ci spin_unlock(&kvm->mmu_lock); 54698c2ecf20Sopenharmony_ci} 54708c2ecf20Sopenharmony_ci 54718c2ecf20Sopenharmony_cistatic bool kvm_has_zapped_obsolete_pages(struct kvm *kvm) 54728c2ecf20Sopenharmony_ci{ 54738c2ecf20Sopenharmony_ci return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages)); 54748c2ecf20Sopenharmony_ci} 54758c2ecf20Sopenharmony_ci 54768c2ecf20Sopenharmony_cistatic void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm, 54778c2ecf20Sopenharmony_ci struct kvm_memory_slot *slot, 54788c2ecf20Sopenharmony_ci struct kvm_page_track_notifier_node *node) 54798c2ecf20Sopenharmony_ci{ 54808c2ecf20Sopenharmony_ci kvm_mmu_zap_all_fast(kvm); 54818c2ecf20Sopenharmony_ci} 54828c2ecf20Sopenharmony_ci 54838c2ecf20Sopenharmony_civoid kvm_mmu_init_vm(struct kvm *kvm) 54848c2ecf20Sopenharmony_ci{ 54858c2ecf20Sopenharmony_ci struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker; 54868c2ecf20Sopenharmony_ci 54878c2ecf20Sopenharmony_ci kvm_mmu_init_tdp_mmu(kvm); 54888c2ecf20Sopenharmony_ci 54898c2ecf20Sopenharmony_ci node->track_write = kvm_mmu_pte_write; 54908c2ecf20Sopenharmony_ci node->track_flush_slot = kvm_mmu_invalidate_zap_pages_in_memslot; 54918c2ecf20Sopenharmony_ci kvm_page_track_register_notifier(kvm, node); 54928c2ecf20Sopenharmony_ci} 54938c2ecf20Sopenharmony_ci 54948c2ecf20Sopenharmony_civoid kvm_mmu_uninit_vm(struct kvm *kvm) 54958c2ecf20Sopenharmony_ci{ 54968c2ecf20Sopenharmony_ci struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker; 54978c2ecf20Sopenharmony_ci 54988c2ecf20Sopenharmony_ci kvm_page_track_unregister_notifier(kvm, node); 54998c2ecf20Sopenharmony_ci 55008c2ecf20Sopenharmony_ci kvm_mmu_uninit_tdp_mmu(kvm); 55018c2ecf20Sopenharmony_ci} 55028c2ecf20Sopenharmony_ci 55038c2ecf20Sopenharmony_civoid kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end) 55048c2ecf20Sopenharmony_ci{ 55058c2ecf20Sopenharmony_ci struct kvm_memslots *slots; 55068c2ecf20Sopenharmony_ci struct kvm_memory_slot *memslot; 55078c2ecf20Sopenharmony_ci int i; 55088c2ecf20Sopenharmony_ci bool flush; 55098c2ecf20Sopenharmony_ci 55108c2ecf20Sopenharmony_ci spin_lock(&kvm->mmu_lock); 55118c2ecf20Sopenharmony_ci for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { 55128c2ecf20Sopenharmony_ci slots = __kvm_memslots(kvm, i); 55138c2ecf20Sopenharmony_ci kvm_for_each_memslot(memslot, slots) { 55148c2ecf20Sopenharmony_ci gfn_t start, end; 55158c2ecf20Sopenharmony_ci 55168c2ecf20Sopenharmony_ci start = max(gfn_start, memslot->base_gfn); 55178c2ecf20Sopenharmony_ci end = min(gfn_end, memslot->base_gfn + memslot->npages); 55188c2ecf20Sopenharmony_ci if (start >= end) 55198c2ecf20Sopenharmony_ci continue; 55208c2ecf20Sopenharmony_ci 55218c2ecf20Sopenharmony_ci slot_handle_level_range(kvm, memslot, kvm_zap_rmapp, 55228c2ecf20Sopenharmony_ci PG_LEVEL_4K, 55238c2ecf20Sopenharmony_ci KVM_MAX_HUGEPAGE_LEVEL, 55248c2ecf20Sopenharmony_ci start, end - 1, true); 55258c2ecf20Sopenharmony_ci } 55268c2ecf20Sopenharmony_ci } 55278c2ecf20Sopenharmony_ci 55288c2ecf20Sopenharmony_ci if (kvm->arch.tdp_mmu_enabled) { 55298c2ecf20Sopenharmony_ci flush = kvm_tdp_mmu_zap_gfn_range(kvm, gfn_start, gfn_end); 55308c2ecf20Sopenharmony_ci if (flush) 55318c2ecf20Sopenharmony_ci kvm_flush_remote_tlbs(kvm); 55328c2ecf20Sopenharmony_ci } 55338c2ecf20Sopenharmony_ci 55348c2ecf20Sopenharmony_ci spin_unlock(&kvm->mmu_lock); 55358c2ecf20Sopenharmony_ci} 55368c2ecf20Sopenharmony_ci 55378c2ecf20Sopenharmony_cistatic bool slot_rmap_write_protect(struct kvm *kvm, 55388c2ecf20Sopenharmony_ci struct kvm_rmap_head *rmap_head) 55398c2ecf20Sopenharmony_ci{ 55408c2ecf20Sopenharmony_ci return __rmap_write_protect(kvm, rmap_head, false); 55418c2ecf20Sopenharmony_ci} 55428c2ecf20Sopenharmony_ci 55438c2ecf20Sopenharmony_civoid kvm_mmu_slot_remove_write_access(struct kvm *kvm, 55448c2ecf20Sopenharmony_ci struct kvm_memory_slot *memslot, 55458c2ecf20Sopenharmony_ci int start_level) 55468c2ecf20Sopenharmony_ci{ 55478c2ecf20Sopenharmony_ci bool flush; 55488c2ecf20Sopenharmony_ci 55498c2ecf20Sopenharmony_ci spin_lock(&kvm->mmu_lock); 55508c2ecf20Sopenharmony_ci flush = slot_handle_level(kvm, memslot, slot_rmap_write_protect, 55518c2ecf20Sopenharmony_ci start_level, KVM_MAX_HUGEPAGE_LEVEL, false); 55528c2ecf20Sopenharmony_ci if (kvm->arch.tdp_mmu_enabled) 55538c2ecf20Sopenharmony_ci flush |= kvm_tdp_mmu_wrprot_slot(kvm, memslot, PG_LEVEL_4K); 55548c2ecf20Sopenharmony_ci spin_unlock(&kvm->mmu_lock); 55558c2ecf20Sopenharmony_ci 55568c2ecf20Sopenharmony_ci /* 55578c2ecf20Sopenharmony_ci * We can flush all the TLBs out of the mmu lock without TLB 55588c2ecf20Sopenharmony_ci * corruption since we just change the spte from writable to 55598c2ecf20Sopenharmony_ci * readonly so that we only need to care the case of changing 55608c2ecf20Sopenharmony_ci * spte from present to present (changing the spte from present 55618c2ecf20Sopenharmony_ci * to nonpresent will flush all the TLBs immediately), in other 55628c2ecf20Sopenharmony_ci * words, the only case we care is mmu_spte_update() where we 55638c2ecf20Sopenharmony_ci * have checked SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE 55648c2ecf20Sopenharmony_ci * instead of PT_WRITABLE_MASK, that means it does not depend 55658c2ecf20Sopenharmony_ci * on PT_WRITABLE_MASK anymore. 55668c2ecf20Sopenharmony_ci */ 55678c2ecf20Sopenharmony_ci if (flush) 55688c2ecf20Sopenharmony_ci kvm_arch_flush_remote_tlbs_memslot(kvm, memslot); 55698c2ecf20Sopenharmony_ci} 55708c2ecf20Sopenharmony_ci 55718c2ecf20Sopenharmony_cistatic bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm, 55728c2ecf20Sopenharmony_ci struct kvm_rmap_head *rmap_head) 55738c2ecf20Sopenharmony_ci{ 55748c2ecf20Sopenharmony_ci u64 *sptep; 55758c2ecf20Sopenharmony_ci struct rmap_iterator iter; 55768c2ecf20Sopenharmony_ci int need_tlb_flush = 0; 55778c2ecf20Sopenharmony_ci kvm_pfn_t pfn; 55788c2ecf20Sopenharmony_ci struct kvm_mmu_page *sp; 55798c2ecf20Sopenharmony_ci 55808c2ecf20Sopenharmony_cirestart: 55818c2ecf20Sopenharmony_ci for_each_rmap_spte(rmap_head, &iter, sptep) { 55828c2ecf20Sopenharmony_ci sp = sptep_to_sp(sptep); 55838c2ecf20Sopenharmony_ci pfn = spte_to_pfn(*sptep); 55848c2ecf20Sopenharmony_ci 55858c2ecf20Sopenharmony_ci /* 55868c2ecf20Sopenharmony_ci * We cannot do huge page mapping for indirect shadow pages, 55878c2ecf20Sopenharmony_ci * which are found on the last rmap (level = 1) when not using 55888c2ecf20Sopenharmony_ci * tdp; such shadow pages are synced with the page table in 55898c2ecf20Sopenharmony_ci * the guest, and the guest page table is using 4K page size 55908c2ecf20Sopenharmony_ci * mapping if the indirect sp has level = 1. 55918c2ecf20Sopenharmony_ci */ 55928c2ecf20Sopenharmony_ci if (sp->role.direct && !kvm_is_reserved_pfn(pfn) && 55938c2ecf20Sopenharmony_ci (kvm_is_zone_device_pfn(pfn) || 55948c2ecf20Sopenharmony_ci PageCompound(pfn_to_page(pfn)))) { 55958c2ecf20Sopenharmony_ci pte_list_remove(rmap_head, sptep); 55968c2ecf20Sopenharmony_ci 55978c2ecf20Sopenharmony_ci if (kvm_available_flush_tlb_with_range()) 55988c2ecf20Sopenharmony_ci kvm_flush_remote_tlbs_with_address(kvm, sp->gfn, 55998c2ecf20Sopenharmony_ci KVM_PAGES_PER_HPAGE(sp->role.level)); 56008c2ecf20Sopenharmony_ci else 56018c2ecf20Sopenharmony_ci need_tlb_flush = 1; 56028c2ecf20Sopenharmony_ci 56038c2ecf20Sopenharmony_ci goto restart; 56048c2ecf20Sopenharmony_ci } 56058c2ecf20Sopenharmony_ci } 56068c2ecf20Sopenharmony_ci 56078c2ecf20Sopenharmony_ci return need_tlb_flush; 56088c2ecf20Sopenharmony_ci} 56098c2ecf20Sopenharmony_ci 56108c2ecf20Sopenharmony_civoid kvm_mmu_zap_collapsible_sptes(struct kvm *kvm, 56118c2ecf20Sopenharmony_ci const struct kvm_memory_slot *memslot) 56128c2ecf20Sopenharmony_ci{ 56138c2ecf20Sopenharmony_ci /* FIXME: const-ify all uses of struct kvm_memory_slot. */ 56148c2ecf20Sopenharmony_ci spin_lock(&kvm->mmu_lock); 56158c2ecf20Sopenharmony_ci slot_handle_leaf(kvm, (struct kvm_memory_slot *)memslot, 56168c2ecf20Sopenharmony_ci kvm_mmu_zap_collapsible_spte, true); 56178c2ecf20Sopenharmony_ci 56188c2ecf20Sopenharmony_ci if (kvm->arch.tdp_mmu_enabled) 56198c2ecf20Sopenharmony_ci kvm_tdp_mmu_zap_collapsible_sptes(kvm, memslot); 56208c2ecf20Sopenharmony_ci spin_unlock(&kvm->mmu_lock); 56218c2ecf20Sopenharmony_ci} 56228c2ecf20Sopenharmony_ci 56238c2ecf20Sopenharmony_civoid kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, 56248c2ecf20Sopenharmony_ci struct kvm_memory_slot *memslot) 56258c2ecf20Sopenharmony_ci{ 56268c2ecf20Sopenharmony_ci /* 56278c2ecf20Sopenharmony_ci * All current use cases for flushing the TLBs for a specific memslot 56288c2ecf20Sopenharmony_ci * are related to dirty logging, and do the TLB flush out of mmu_lock. 56298c2ecf20Sopenharmony_ci * The interaction between the various operations on memslot must be 56308c2ecf20Sopenharmony_ci * serialized by slots_locks to ensure the TLB flush from one operation 56318c2ecf20Sopenharmony_ci * is observed by any other operation on the same memslot. 56328c2ecf20Sopenharmony_ci */ 56338c2ecf20Sopenharmony_ci lockdep_assert_held(&kvm->slots_lock); 56348c2ecf20Sopenharmony_ci kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn, 56358c2ecf20Sopenharmony_ci memslot->npages); 56368c2ecf20Sopenharmony_ci} 56378c2ecf20Sopenharmony_ci 56388c2ecf20Sopenharmony_civoid kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm, 56398c2ecf20Sopenharmony_ci struct kvm_memory_slot *memslot) 56408c2ecf20Sopenharmony_ci{ 56418c2ecf20Sopenharmony_ci bool flush; 56428c2ecf20Sopenharmony_ci 56438c2ecf20Sopenharmony_ci spin_lock(&kvm->mmu_lock); 56448c2ecf20Sopenharmony_ci flush = slot_handle_leaf(kvm, memslot, __rmap_clear_dirty, false); 56458c2ecf20Sopenharmony_ci if (kvm->arch.tdp_mmu_enabled) 56468c2ecf20Sopenharmony_ci flush |= kvm_tdp_mmu_clear_dirty_slot(kvm, memslot); 56478c2ecf20Sopenharmony_ci spin_unlock(&kvm->mmu_lock); 56488c2ecf20Sopenharmony_ci 56498c2ecf20Sopenharmony_ci /* 56508c2ecf20Sopenharmony_ci * It's also safe to flush TLBs out of mmu lock here as currently this 56518c2ecf20Sopenharmony_ci * function is only used for dirty logging, in which case flushing TLB 56528c2ecf20Sopenharmony_ci * out of mmu lock also guarantees no dirty pages will be lost in 56538c2ecf20Sopenharmony_ci * dirty_bitmap. 56548c2ecf20Sopenharmony_ci */ 56558c2ecf20Sopenharmony_ci if (flush) 56568c2ecf20Sopenharmony_ci kvm_arch_flush_remote_tlbs_memslot(kvm, memslot); 56578c2ecf20Sopenharmony_ci} 56588c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(kvm_mmu_slot_leaf_clear_dirty); 56598c2ecf20Sopenharmony_ci 56608c2ecf20Sopenharmony_civoid kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm, 56618c2ecf20Sopenharmony_ci struct kvm_memory_slot *memslot) 56628c2ecf20Sopenharmony_ci{ 56638c2ecf20Sopenharmony_ci bool flush; 56648c2ecf20Sopenharmony_ci 56658c2ecf20Sopenharmony_ci spin_lock(&kvm->mmu_lock); 56668c2ecf20Sopenharmony_ci flush = slot_handle_large_level(kvm, memslot, slot_rmap_write_protect, 56678c2ecf20Sopenharmony_ci false); 56688c2ecf20Sopenharmony_ci if (kvm->arch.tdp_mmu_enabled) 56698c2ecf20Sopenharmony_ci flush |= kvm_tdp_mmu_wrprot_slot(kvm, memslot, PG_LEVEL_2M); 56708c2ecf20Sopenharmony_ci spin_unlock(&kvm->mmu_lock); 56718c2ecf20Sopenharmony_ci 56728c2ecf20Sopenharmony_ci if (flush) 56738c2ecf20Sopenharmony_ci kvm_arch_flush_remote_tlbs_memslot(kvm, memslot); 56748c2ecf20Sopenharmony_ci} 56758c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(kvm_mmu_slot_largepage_remove_write_access); 56768c2ecf20Sopenharmony_ci 56778c2ecf20Sopenharmony_civoid kvm_mmu_slot_set_dirty(struct kvm *kvm, 56788c2ecf20Sopenharmony_ci struct kvm_memory_slot *memslot) 56798c2ecf20Sopenharmony_ci{ 56808c2ecf20Sopenharmony_ci bool flush; 56818c2ecf20Sopenharmony_ci 56828c2ecf20Sopenharmony_ci spin_lock(&kvm->mmu_lock); 56838c2ecf20Sopenharmony_ci flush = slot_handle_all_level(kvm, memslot, __rmap_set_dirty, false); 56848c2ecf20Sopenharmony_ci if (kvm->arch.tdp_mmu_enabled) 56858c2ecf20Sopenharmony_ci flush |= kvm_tdp_mmu_slot_set_dirty(kvm, memslot); 56868c2ecf20Sopenharmony_ci spin_unlock(&kvm->mmu_lock); 56878c2ecf20Sopenharmony_ci 56888c2ecf20Sopenharmony_ci if (flush) 56898c2ecf20Sopenharmony_ci kvm_arch_flush_remote_tlbs_memslot(kvm, memslot); 56908c2ecf20Sopenharmony_ci} 56918c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(kvm_mmu_slot_set_dirty); 56928c2ecf20Sopenharmony_ci 56938c2ecf20Sopenharmony_civoid kvm_mmu_zap_all(struct kvm *kvm) 56948c2ecf20Sopenharmony_ci{ 56958c2ecf20Sopenharmony_ci struct kvm_mmu_page *sp, *node; 56968c2ecf20Sopenharmony_ci LIST_HEAD(invalid_list); 56978c2ecf20Sopenharmony_ci int ign; 56988c2ecf20Sopenharmony_ci 56998c2ecf20Sopenharmony_ci spin_lock(&kvm->mmu_lock); 57008c2ecf20Sopenharmony_cirestart: 57018c2ecf20Sopenharmony_ci list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) { 57028c2ecf20Sopenharmony_ci if (WARN_ON(sp->role.invalid)) 57038c2ecf20Sopenharmony_ci continue; 57048c2ecf20Sopenharmony_ci if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign)) 57058c2ecf20Sopenharmony_ci goto restart; 57068c2ecf20Sopenharmony_ci if (cond_resched_lock(&kvm->mmu_lock)) 57078c2ecf20Sopenharmony_ci goto restart; 57088c2ecf20Sopenharmony_ci } 57098c2ecf20Sopenharmony_ci 57108c2ecf20Sopenharmony_ci kvm_mmu_commit_zap_page(kvm, &invalid_list); 57118c2ecf20Sopenharmony_ci 57128c2ecf20Sopenharmony_ci if (kvm->arch.tdp_mmu_enabled) 57138c2ecf20Sopenharmony_ci kvm_tdp_mmu_zap_all(kvm); 57148c2ecf20Sopenharmony_ci 57158c2ecf20Sopenharmony_ci spin_unlock(&kvm->mmu_lock); 57168c2ecf20Sopenharmony_ci} 57178c2ecf20Sopenharmony_ci 57188c2ecf20Sopenharmony_civoid kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen) 57198c2ecf20Sopenharmony_ci{ 57208c2ecf20Sopenharmony_ci WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS); 57218c2ecf20Sopenharmony_ci 57228c2ecf20Sopenharmony_ci gen &= MMIO_SPTE_GEN_MASK; 57238c2ecf20Sopenharmony_ci 57248c2ecf20Sopenharmony_ci /* 57258c2ecf20Sopenharmony_ci * Generation numbers are incremented in multiples of the number of 57268c2ecf20Sopenharmony_ci * address spaces in order to provide unique generations across all 57278c2ecf20Sopenharmony_ci * address spaces. Strip what is effectively the address space 57288c2ecf20Sopenharmony_ci * modifier prior to checking for a wrap of the MMIO generation so 57298c2ecf20Sopenharmony_ci * that a wrap in any address space is detected. 57308c2ecf20Sopenharmony_ci */ 57318c2ecf20Sopenharmony_ci gen &= ~((u64)KVM_ADDRESS_SPACE_NUM - 1); 57328c2ecf20Sopenharmony_ci 57338c2ecf20Sopenharmony_ci /* 57348c2ecf20Sopenharmony_ci * The very rare case: if the MMIO generation number has wrapped, 57358c2ecf20Sopenharmony_ci * zap all shadow pages. 57368c2ecf20Sopenharmony_ci */ 57378c2ecf20Sopenharmony_ci if (unlikely(gen == 0)) { 57388c2ecf20Sopenharmony_ci kvm_debug_ratelimited("kvm: zapping shadow pages for mmio generation wraparound\n"); 57398c2ecf20Sopenharmony_ci kvm_mmu_zap_all_fast(kvm); 57408c2ecf20Sopenharmony_ci } 57418c2ecf20Sopenharmony_ci} 57428c2ecf20Sopenharmony_ci 57438c2ecf20Sopenharmony_cistatic unsigned long 57448c2ecf20Sopenharmony_cimmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) 57458c2ecf20Sopenharmony_ci{ 57468c2ecf20Sopenharmony_ci struct kvm *kvm; 57478c2ecf20Sopenharmony_ci int nr_to_scan = sc->nr_to_scan; 57488c2ecf20Sopenharmony_ci unsigned long freed = 0; 57498c2ecf20Sopenharmony_ci 57508c2ecf20Sopenharmony_ci mutex_lock(&kvm_lock); 57518c2ecf20Sopenharmony_ci 57528c2ecf20Sopenharmony_ci list_for_each_entry(kvm, &vm_list, vm_list) { 57538c2ecf20Sopenharmony_ci int idx; 57548c2ecf20Sopenharmony_ci LIST_HEAD(invalid_list); 57558c2ecf20Sopenharmony_ci 57568c2ecf20Sopenharmony_ci /* 57578c2ecf20Sopenharmony_ci * Never scan more than sc->nr_to_scan VM instances. 57588c2ecf20Sopenharmony_ci * Will not hit this condition practically since we do not try 57598c2ecf20Sopenharmony_ci * to shrink more than one VM and it is very unlikely to see 57608c2ecf20Sopenharmony_ci * !n_used_mmu_pages so many times. 57618c2ecf20Sopenharmony_ci */ 57628c2ecf20Sopenharmony_ci if (!nr_to_scan--) 57638c2ecf20Sopenharmony_ci break; 57648c2ecf20Sopenharmony_ci /* 57658c2ecf20Sopenharmony_ci * n_used_mmu_pages is accessed without holding kvm->mmu_lock 57668c2ecf20Sopenharmony_ci * here. We may skip a VM instance errorneosly, but we do not 57678c2ecf20Sopenharmony_ci * want to shrink a VM that only started to populate its MMU 57688c2ecf20Sopenharmony_ci * anyway. 57698c2ecf20Sopenharmony_ci */ 57708c2ecf20Sopenharmony_ci if (!kvm->arch.n_used_mmu_pages && 57718c2ecf20Sopenharmony_ci !kvm_has_zapped_obsolete_pages(kvm)) 57728c2ecf20Sopenharmony_ci continue; 57738c2ecf20Sopenharmony_ci 57748c2ecf20Sopenharmony_ci idx = srcu_read_lock(&kvm->srcu); 57758c2ecf20Sopenharmony_ci spin_lock(&kvm->mmu_lock); 57768c2ecf20Sopenharmony_ci 57778c2ecf20Sopenharmony_ci if (kvm_has_zapped_obsolete_pages(kvm)) { 57788c2ecf20Sopenharmony_ci kvm_mmu_commit_zap_page(kvm, 57798c2ecf20Sopenharmony_ci &kvm->arch.zapped_obsolete_pages); 57808c2ecf20Sopenharmony_ci goto unlock; 57818c2ecf20Sopenharmony_ci } 57828c2ecf20Sopenharmony_ci 57838c2ecf20Sopenharmony_ci freed = kvm_mmu_zap_oldest_mmu_pages(kvm, sc->nr_to_scan); 57848c2ecf20Sopenharmony_ci 57858c2ecf20Sopenharmony_ciunlock: 57868c2ecf20Sopenharmony_ci spin_unlock(&kvm->mmu_lock); 57878c2ecf20Sopenharmony_ci srcu_read_unlock(&kvm->srcu, idx); 57888c2ecf20Sopenharmony_ci 57898c2ecf20Sopenharmony_ci /* 57908c2ecf20Sopenharmony_ci * unfair on small ones 57918c2ecf20Sopenharmony_ci * per-vm shrinkers cry out 57928c2ecf20Sopenharmony_ci * sadness comes quickly 57938c2ecf20Sopenharmony_ci */ 57948c2ecf20Sopenharmony_ci list_move_tail(&kvm->vm_list, &vm_list); 57958c2ecf20Sopenharmony_ci break; 57968c2ecf20Sopenharmony_ci } 57978c2ecf20Sopenharmony_ci 57988c2ecf20Sopenharmony_ci mutex_unlock(&kvm_lock); 57998c2ecf20Sopenharmony_ci return freed; 58008c2ecf20Sopenharmony_ci} 58018c2ecf20Sopenharmony_ci 58028c2ecf20Sopenharmony_cistatic unsigned long 58038c2ecf20Sopenharmony_cimmu_shrink_count(struct shrinker *shrink, struct shrink_control *sc) 58048c2ecf20Sopenharmony_ci{ 58058c2ecf20Sopenharmony_ci return percpu_counter_read_positive(&kvm_total_used_mmu_pages); 58068c2ecf20Sopenharmony_ci} 58078c2ecf20Sopenharmony_ci 58088c2ecf20Sopenharmony_cistatic struct shrinker mmu_shrinker = { 58098c2ecf20Sopenharmony_ci .count_objects = mmu_shrink_count, 58108c2ecf20Sopenharmony_ci .scan_objects = mmu_shrink_scan, 58118c2ecf20Sopenharmony_ci .seeks = DEFAULT_SEEKS * 10, 58128c2ecf20Sopenharmony_ci}; 58138c2ecf20Sopenharmony_ci 58148c2ecf20Sopenharmony_cistatic void mmu_destroy_caches(void) 58158c2ecf20Sopenharmony_ci{ 58168c2ecf20Sopenharmony_ci kmem_cache_destroy(pte_list_desc_cache); 58178c2ecf20Sopenharmony_ci kmem_cache_destroy(mmu_page_header_cache); 58188c2ecf20Sopenharmony_ci} 58198c2ecf20Sopenharmony_ci 58208c2ecf20Sopenharmony_cistatic void kvm_set_mmio_spte_mask(void) 58218c2ecf20Sopenharmony_ci{ 58228c2ecf20Sopenharmony_ci u64 mask; 58238c2ecf20Sopenharmony_ci 58248c2ecf20Sopenharmony_ci /* 58258c2ecf20Sopenharmony_ci * Set a reserved PA bit in MMIO SPTEs to generate page faults with 58268c2ecf20Sopenharmony_ci * PFEC.RSVD=1 on MMIO accesses. 64-bit PTEs (PAE, x86-64, and EPT 58278c2ecf20Sopenharmony_ci * paging) support a maximum of 52 bits of PA, i.e. if the CPU supports 58288c2ecf20Sopenharmony_ci * 52-bit physical addresses then there are no reserved PA bits in the 58298c2ecf20Sopenharmony_ci * PTEs and so the reserved PA approach must be disabled. 58308c2ecf20Sopenharmony_ci */ 58318c2ecf20Sopenharmony_ci if (shadow_phys_bits < 52) 58328c2ecf20Sopenharmony_ci mask = BIT_ULL(51) | PT_PRESENT_MASK; 58338c2ecf20Sopenharmony_ci else 58348c2ecf20Sopenharmony_ci mask = 0; 58358c2ecf20Sopenharmony_ci 58368c2ecf20Sopenharmony_ci kvm_mmu_set_mmio_spte_mask(mask, ACC_WRITE_MASK | ACC_USER_MASK); 58378c2ecf20Sopenharmony_ci} 58388c2ecf20Sopenharmony_ci 58398c2ecf20Sopenharmony_cistatic bool get_nx_auto_mode(void) 58408c2ecf20Sopenharmony_ci{ 58418c2ecf20Sopenharmony_ci /* Return true when CPU has the bug, and mitigations are ON */ 58428c2ecf20Sopenharmony_ci return boot_cpu_has_bug(X86_BUG_ITLB_MULTIHIT) && !cpu_mitigations_off(); 58438c2ecf20Sopenharmony_ci} 58448c2ecf20Sopenharmony_ci 58458c2ecf20Sopenharmony_cistatic void __set_nx_huge_pages(bool val) 58468c2ecf20Sopenharmony_ci{ 58478c2ecf20Sopenharmony_ci nx_huge_pages = itlb_multihit_kvm_mitigation = val; 58488c2ecf20Sopenharmony_ci} 58498c2ecf20Sopenharmony_ci 58508c2ecf20Sopenharmony_cistatic int set_nx_huge_pages(const char *val, const struct kernel_param *kp) 58518c2ecf20Sopenharmony_ci{ 58528c2ecf20Sopenharmony_ci bool old_val = nx_huge_pages; 58538c2ecf20Sopenharmony_ci bool new_val; 58548c2ecf20Sopenharmony_ci 58558c2ecf20Sopenharmony_ci /* In "auto" mode deploy workaround only if CPU has the bug. */ 58568c2ecf20Sopenharmony_ci if (sysfs_streq(val, "off")) 58578c2ecf20Sopenharmony_ci new_val = 0; 58588c2ecf20Sopenharmony_ci else if (sysfs_streq(val, "force")) 58598c2ecf20Sopenharmony_ci new_val = 1; 58608c2ecf20Sopenharmony_ci else if (sysfs_streq(val, "auto")) 58618c2ecf20Sopenharmony_ci new_val = get_nx_auto_mode(); 58628c2ecf20Sopenharmony_ci else if (strtobool(val, &new_val) < 0) 58638c2ecf20Sopenharmony_ci return -EINVAL; 58648c2ecf20Sopenharmony_ci 58658c2ecf20Sopenharmony_ci __set_nx_huge_pages(new_val); 58668c2ecf20Sopenharmony_ci 58678c2ecf20Sopenharmony_ci if (new_val != old_val) { 58688c2ecf20Sopenharmony_ci struct kvm *kvm; 58698c2ecf20Sopenharmony_ci 58708c2ecf20Sopenharmony_ci mutex_lock(&kvm_lock); 58718c2ecf20Sopenharmony_ci 58728c2ecf20Sopenharmony_ci list_for_each_entry(kvm, &vm_list, vm_list) { 58738c2ecf20Sopenharmony_ci mutex_lock(&kvm->slots_lock); 58748c2ecf20Sopenharmony_ci kvm_mmu_zap_all_fast(kvm); 58758c2ecf20Sopenharmony_ci mutex_unlock(&kvm->slots_lock); 58768c2ecf20Sopenharmony_ci 58778c2ecf20Sopenharmony_ci wake_up_process(kvm->arch.nx_lpage_recovery_thread); 58788c2ecf20Sopenharmony_ci } 58798c2ecf20Sopenharmony_ci mutex_unlock(&kvm_lock); 58808c2ecf20Sopenharmony_ci } 58818c2ecf20Sopenharmony_ci 58828c2ecf20Sopenharmony_ci return 0; 58838c2ecf20Sopenharmony_ci} 58848c2ecf20Sopenharmony_ci 58858c2ecf20Sopenharmony_ci/* 58868c2ecf20Sopenharmony_ci * nx_huge_pages needs to be resolved to true/false when kvm.ko is loaded, as 58878c2ecf20Sopenharmony_ci * its default value of -1 is technically undefined behavior for a boolean. 58888c2ecf20Sopenharmony_ci */ 58898c2ecf20Sopenharmony_civoid __init kvm_mmu_x86_module_init(void) 58908c2ecf20Sopenharmony_ci{ 58918c2ecf20Sopenharmony_ci if (nx_huge_pages == -1) 58928c2ecf20Sopenharmony_ci __set_nx_huge_pages(get_nx_auto_mode()); 58938c2ecf20Sopenharmony_ci} 58948c2ecf20Sopenharmony_ci 58958c2ecf20Sopenharmony_ci/* 58968c2ecf20Sopenharmony_ci * The bulk of the MMU initialization is deferred until the vendor module is 58978c2ecf20Sopenharmony_ci * loaded as many of the masks/values may be modified by VMX or SVM, i.e. need 58988c2ecf20Sopenharmony_ci * to be reset when a potentially different vendor module is loaded. 58998c2ecf20Sopenharmony_ci */ 59008c2ecf20Sopenharmony_ciint kvm_mmu_vendor_module_init(void) 59018c2ecf20Sopenharmony_ci{ 59028c2ecf20Sopenharmony_ci int ret = -ENOMEM; 59038c2ecf20Sopenharmony_ci 59048c2ecf20Sopenharmony_ci /* 59058c2ecf20Sopenharmony_ci * MMU roles use union aliasing which is, generally speaking, an 59068c2ecf20Sopenharmony_ci * undefined behavior. However, we supposedly know how compilers behave 59078c2ecf20Sopenharmony_ci * and the current status quo is unlikely to change. Guardians below are 59088c2ecf20Sopenharmony_ci * supposed to let us know if the assumption becomes false. 59098c2ecf20Sopenharmony_ci */ 59108c2ecf20Sopenharmony_ci BUILD_BUG_ON(sizeof(union kvm_mmu_page_role) != sizeof(u32)); 59118c2ecf20Sopenharmony_ci BUILD_BUG_ON(sizeof(union kvm_mmu_extended_role) != sizeof(u32)); 59128c2ecf20Sopenharmony_ci BUILD_BUG_ON(sizeof(union kvm_mmu_role) != sizeof(u64)); 59138c2ecf20Sopenharmony_ci 59148c2ecf20Sopenharmony_ci kvm_mmu_reset_all_pte_masks(); 59158c2ecf20Sopenharmony_ci 59168c2ecf20Sopenharmony_ci kvm_set_mmio_spte_mask(); 59178c2ecf20Sopenharmony_ci 59188c2ecf20Sopenharmony_ci pte_list_desc_cache = kmem_cache_create("pte_list_desc", 59198c2ecf20Sopenharmony_ci sizeof(struct pte_list_desc), 59208c2ecf20Sopenharmony_ci 0, SLAB_ACCOUNT, NULL); 59218c2ecf20Sopenharmony_ci if (!pte_list_desc_cache) 59228c2ecf20Sopenharmony_ci goto out; 59238c2ecf20Sopenharmony_ci 59248c2ecf20Sopenharmony_ci mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header", 59258c2ecf20Sopenharmony_ci sizeof(struct kvm_mmu_page), 59268c2ecf20Sopenharmony_ci 0, SLAB_ACCOUNT, NULL); 59278c2ecf20Sopenharmony_ci if (!mmu_page_header_cache) 59288c2ecf20Sopenharmony_ci goto out; 59298c2ecf20Sopenharmony_ci 59308c2ecf20Sopenharmony_ci if (percpu_counter_init(&kvm_total_used_mmu_pages, 0, GFP_KERNEL)) 59318c2ecf20Sopenharmony_ci goto out; 59328c2ecf20Sopenharmony_ci 59338c2ecf20Sopenharmony_ci ret = register_shrinker(&mmu_shrinker); 59348c2ecf20Sopenharmony_ci if (ret) 59358c2ecf20Sopenharmony_ci goto out; 59368c2ecf20Sopenharmony_ci 59378c2ecf20Sopenharmony_ci return 0; 59388c2ecf20Sopenharmony_ci 59398c2ecf20Sopenharmony_ciout: 59408c2ecf20Sopenharmony_ci mmu_destroy_caches(); 59418c2ecf20Sopenharmony_ci return ret; 59428c2ecf20Sopenharmony_ci} 59438c2ecf20Sopenharmony_ci 59448c2ecf20Sopenharmony_ci/* 59458c2ecf20Sopenharmony_ci * Calculate mmu pages needed for kvm. 59468c2ecf20Sopenharmony_ci */ 59478c2ecf20Sopenharmony_ciunsigned long kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm) 59488c2ecf20Sopenharmony_ci{ 59498c2ecf20Sopenharmony_ci unsigned long nr_mmu_pages; 59508c2ecf20Sopenharmony_ci unsigned long nr_pages = 0; 59518c2ecf20Sopenharmony_ci struct kvm_memslots *slots; 59528c2ecf20Sopenharmony_ci struct kvm_memory_slot *memslot; 59538c2ecf20Sopenharmony_ci int i; 59548c2ecf20Sopenharmony_ci 59558c2ecf20Sopenharmony_ci for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { 59568c2ecf20Sopenharmony_ci slots = __kvm_memslots(kvm, i); 59578c2ecf20Sopenharmony_ci 59588c2ecf20Sopenharmony_ci kvm_for_each_memslot(memslot, slots) 59598c2ecf20Sopenharmony_ci nr_pages += memslot->npages; 59608c2ecf20Sopenharmony_ci } 59618c2ecf20Sopenharmony_ci 59628c2ecf20Sopenharmony_ci nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000; 59638c2ecf20Sopenharmony_ci nr_mmu_pages = max(nr_mmu_pages, KVM_MIN_ALLOC_MMU_PAGES); 59648c2ecf20Sopenharmony_ci 59658c2ecf20Sopenharmony_ci return nr_mmu_pages; 59668c2ecf20Sopenharmony_ci} 59678c2ecf20Sopenharmony_ci 59688c2ecf20Sopenharmony_civoid kvm_mmu_destroy(struct kvm_vcpu *vcpu) 59698c2ecf20Sopenharmony_ci{ 59708c2ecf20Sopenharmony_ci kvm_mmu_unload(vcpu); 59718c2ecf20Sopenharmony_ci free_mmu_pages(&vcpu->arch.root_mmu); 59728c2ecf20Sopenharmony_ci free_mmu_pages(&vcpu->arch.guest_mmu); 59738c2ecf20Sopenharmony_ci mmu_free_memory_caches(vcpu); 59748c2ecf20Sopenharmony_ci} 59758c2ecf20Sopenharmony_ci 59768c2ecf20Sopenharmony_civoid kvm_mmu_vendor_module_exit(void) 59778c2ecf20Sopenharmony_ci{ 59788c2ecf20Sopenharmony_ci mmu_destroy_caches(); 59798c2ecf20Sopenharmony_ci percpu_counter_destroy(&kvm_total_used_mmu_pages); 59808c2ecf20Sopenharmony_ci unregister_shrinker(&mmu_shrinker); 59818c2ecf20Sopenharmony_ci mmu_audit_disable(); 59828c2ecf20Sopenharmony_ci} 59838c2ecf20Sopenharmony_ci 59848c2ecf20Sopenharmony_cistatic int set_nx_huge_pages_recovery_ratio(const char *val, const struct kernel_param *kp) 59858c2ecf20Sopenharmony_ci{ 59868c2ecf20Sopenharmony_ci unsigned int old_val; 59878c2ecf20Sopenharmony_ci int err; 59888c2ecf20Sopenharmony_ci 59898c2ecf20Sopenharmony_ci old_val = nx_huge_pages_recovery_ratio; 59908c2ecf20Sopenharmony_ci err = param_set_uint(val, kp); 59918c2ecf20Sopenharmony_ci if (err) 59928c2ecf20Sopenharmony_ci return err; 59938c2ecf20Sopenharmony_ci 59948c2ecf20Sopenharmony_ci if (READ_ONCE(nx_huge_pages) && 59958c2ecf20Sopenharmony_ci !old_val && nx_huge_pages_recovery_ratio) { 59968c2ecf20Sopenharmony_ci struct kvm *kvm; 59978c2ecf20Sopenharmony_ci 59988c2ecf20Sopenharmony_ci mutex_lock(&kvm_lock); 59998c2ecf20Sopenharmony_ci 60008c2ecf20Sopenharmony_ci list_for_each_entry(kvm, &vm_list, vm_list) 60018c2ecf20Sopenharmony_ci wake_up_process(kvm->arch.nx_lpage_recovery_thread); 60028c2ecf20Sopenharmony_ci 60038c2ecf20Sopenharmony_ci mutex_unlock(&kvm_lock); 60048c2ecf20Sopenharmony_ci } 60058c2ecf20Sopenharmony_ci 60068c2ecf20Sopenharmony_ci return err; 60078c2ecf20Sopenharmony_ci} 60088c2ecf20Sopenharmony_ci 60098c2ecf20Sopenharmony_cistatic void kvm_recover_nx_lpages(struct kvm *kvm) 60108c2ecf20Sopenharmony_ci{ 60118c2ecf20Sopenharmony_ci int rcu_idx; 60128c2ecf20Sopenharmony_ci struct kvm_mmu_page *sp; 60138c2ecf20Sopenharmony_ci unsigned int ratio; 60148c2ecf20Sopenharmony_ci LIST_HEAD(invalid_list); 60158c2ecf20Sopenharmony_ci bool flush = false; 60168c2ecf20Sopenharmony_ci ulong to_zap; 60178c2ecf20Sopenharmony_ci 60188c2ecf20Sopenharmony_ci rcu_idx = srcu_read_lock(&kvm->srcu); 60198c2ecf20Sopenharmony_ci spin_lock(&kvm->mmu_lock); 60208c2ecf20Sopenharmony_ci 60218c2ecf20Sopenharmony_ci ratio = READ_ONCE(nx_huge_pages_recovery_ratio); 60228c2ecf20Sopenharmony_ci to_zap = ratio ? DIV_ROUND_UP(kvm->stat.nx_lpage_splits, ratio) : 0; 60238c2ecf20Sopenharmony_ci for ( ; to_zap; --to_zap) { 60248c2ecf20Sopenharmony_ci if (list_empty(&kvm->arch.lpage_disallowed_mmu_pages)) 60258c2ecf20Sopenharmony_ci break; 60268c2ecf20Sopenharmony_ci 60278c2ecf20Sopenharmony_ci /* 60288c2ecf20Sopenharmony_ci * We use a separate list instead of just using active_mmu_pages 60298c2ecf20Sopenharmony_ci * because the number of lpage_disallowed pages is expected to 60308c2ecf20Sopenharmony_ci * be relatively small compared to the total. 60318c2ecf20Sopenharmony_ci */ 60328c2ecf20Sopenharmony_ci sp = list_first_entry(&kvm->arch.lpage_disallowed_mmu_pages, 60338c2ecf20Sopenharmony_ci struct kvm_mmu_page, 60348c2ecf20Sopenharmony_ci lpage_disallowed_link); 60358c2ecf20Sopenharmony_ci WARN_ON_ONCE(!sp->lpage_disallowed); 60368c2ecf20Sopenharmony_ci if (sp->tdp_mmu_page) { 60378c2ecf20Sopenharmony_ci flush |= kvm_tdp_mmu_zap_sp(kvm, sp); 60388c2ecf20Sopenharmony_ci } else { 60398c2ecf20Sopenharmony_ci kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list); 60408c2ecf20Sopenharmony_ci WARN_ON_ONCE(sp->lpage_disallowed); 60418c2ecf20Sopenharmony_ci } 60428c2ecf20Sopenharmony_ci 60438c2ecf20Sopenharmony_ci if (need_resched() || spin_needbreak(&kvm->mmu_lock)) { 60448c2ecf20Sopenharmony_ci kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush); 60458c2ecf20Sopenharmony_ci cond_resched_lock(&kvm->mmu_lock); 60468c2ecf20Sopenharmony_ci flush = false; 60478c2ecf20Sopenharmony_ci } 60488c2ecf20Sopenharmony_ci } 60498c2ecf20Sopenharmony_ci kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush); 60508c2ecf20Sopenharmony_ci 60518c2ecf20Sopenharmony_ci spin_unlock(&kvm->mmu_lock); 60528c2ecf20Sopenharmony_ci srcu_read_unlock(&kvm->srcu, rcu_idx); 60538c2ecf20Sopenharmony_ci} 60548c2ecf20Sopenharmony_ci 60558c2ecf20Sopenharmony_cistatic long get_nx_lpage_recovery_timeout(u64 start_time) 60568c2ecf20Sopenharmony_ci{ 60578c2ecf20Sopenharmony_ci return READ_ONCE(nx_huge_pages) && READ_ONCE(nx_huge_pages_recovery_ratio) 60588c2ecf20Sopenharmony_ci ? start_time + 60 * HZ - get_jiffies_64() 60598c2ecf20Sopenharmony_ci : MAX_SCHEDULE_TIMEOUT; 60608c2ecf20Sopenharmony_ci} 60618c2ecf20Sopenharmony_ci 60628c2ecf20Sopenharmony_cistatic int kvm_nx_lpage_recovery_worker(struct kvm *kvm, uintptr_t data) 60638c2ecf20Sopenharmony_ci{ 60648c2ecf20Sopenharmony_ci u64 start_time; 60658c2ecf20Sopenharmony_ci long remaining_time; 60668c2ecf20Sopenharmony_ci 60678c2ecf20Sopenharmony_ci while (true) { 60688c2ecf20Sopenharmony_ci start_time = get_jiffies_64(); 60698c2ecf20Sopenharmony_ci remaining_time = get_nx_lpage_recovery_timeout(start_time); 60708c2ecf20Sopenharmony_ci 60718c2ecf20Sopenharmony_ci set_current_state(TASK_INTERRUPTIBLE); 60728c2ecf20Sopenharmony_ci while (!kthread_should_stop() && remaining_time > 0) { 60738c2ecf20Sopenharmony_ci schedule_timeout(remaining_time); 60748c2ecf20Sopenharmony_ci remaining_time = get_nx_lpage_recovery_timeout(start_time); 60758c2ecf20Sopenharmony_ci set_current_state(TASK_INTERRUPTIBLE); 60768c2ecf20Sopenharmony_ci } 60778c2ecf20Sopenharmony_ci 60788c2ecf20Sopenharmony_ci set_current_state(TASK_RUNNING); 60798c2ecf20Sopenharmony_ci 60808c2ecf20Sopenharmony_ci if (kthread_should_stop()) 60818c2ecf20Sopenharmony_ci return 0; 60828c2ecf20Sopenharmony_ci 60838c2ecf20Sopenharmony_ci kvm_recover_nx_lpages(kvm); 60848c2ecf20Sopenharmony_ci } 60858c2ecf20Sopenharmony_ci} 60868c2ecf20Sopenharmony_ci 60878c2ecf20Sopenharmony_ciint kvm_mmu_post_init_vm(struct kvm *kvm) 60888c2ecf20Sopenharmony_ci{ 60898c2ecf20Sopenharmony_ci int err; 60908c2ecf20Sopenharmony_ci 60918c2ecf20Sopenharmony_ci err = kvm_vm_create_worker_thread(kvm, kvm_nx_lpage_recovery_worker, 0, 60928c2ecf20Sopenharmony_ci "kvm-nx-lpage-recovery", 60938c2ecf20Sopenharmony_ci &kvm->arch.nx_lpage_recovery_thread); 60948c2ecf20Sopenharmony_ci if (!err) 60958c2ecf20Sopenharmony_ci kthread_unpark(kvm->arch.nx_lpage_recovery_thread); 60968c2ecf20Sopenharmony_ci 60978c2ecf20Sopenharmony_ci return err; 60988c2ecf20Sopenharmony_ci} 60998c2ecf20Sopenharmony_ci 61008c2ecf20Sopenharmony_civoid kvm_mmu_pre_destroy_vm(struct kvm *kvm) 61018c2ecf20Sopenharmony_ci{ 61028c2ecf20Sopenharmony_ci if (kvm->arch.nx_lpage_recovery_thread) 61038c2ecf20Sopenharmony_ci kthread_stop(kvm->arch.nx_lpage_recovery_thread); 61048c2ecf20Sopenharmony_ci} 6105