162306a36Sopenharmony_ci/* SPDX-License-Identifier: GPL-2.0 */ 262306a36Sopenharmony_ci#ifndef __KVM_X86_MMU_INTERNAL_H 362306a36Sopenharmony_ci#define __KVM_X86_MMU_INTERNAL_H 462306a36Sopenharmony_ci 562306a36Sopenharmony_ci#include <linux/types.h> 662306a36Sopenharmony_ci#include <linux/kvm_host.h> 762306a36Sopenharmony_ci#include <asm/kvm_host.h> 862306a36Sopenharmony_ci 962306a36Sopenharmony_ci#ifdef CONFIG_KVM_PROVE_MMU 1062306a36Sopenharmony_ci#define KVM_MMU_WARN_ON(x) WARN_ON_ONCE(x) 1162306a36Sopenharmony_ci#else 1262306a36Sopenharmony_ci#define KVM_MMU_WARN_ON(x) BUILD_BUG_ON_INVALID(x) 1362306a36Sopenharmony_ci#endif 1462306a36Sopenharmony_ci 1562306a36Sopenharmony_ci/* Page table builder macros common to shadow (host) PTEs and guest PTEs. */ 1662306a36Sopenharmony_ci#define __PT_LEVEL_SHIFT(level, bits_per_level) \ 1762306a36Sopenharmony_ci (PAGE_SHIFT + ((level) - 1) * (bits_per_level)) 1862306a36Sopenharmony_ci#define __PT_INDEX(address, level, bits_per_level) \ 1962306a36Sopenharmony_ci (((address) >> __PT_LEVEL_SHIFT(level, bits_per_level)) & ((1 << (bits_per_level)) - 1)) 2062306a36Sopenharmony_ci 2162306a36Sopenharmony_ci#define __PT_LVL_ADDR_MASK(base_addr_mask, level, bits_per_level) \ 2262306a36Sopenharmony_ci ((base_addr_mask) & ~((1ULL << (PAGE_SHIFT + (((level) - 1) * (bits_per_level)))) - 1)) 2362306a36Sopenharmony_ci 2462306a36Sopenharmony_ci#define __PT_LVL_OFFSET_MASK(base_addr_mask, level, bits_per_level) \ 2562306a36Sopenharmony_ci ((base_addr_mask) & ((1ULL << (PAGE_SHIFT + (((level) - 1) * (bits_per_level)))) - 1)) 2662306a36Sopenharmony_ci 2762306a36Sopenharmony_ci#define __PT_ENT_PER_PAGE(bits_per_level) (1 << (bits_per_level)) 2862306a36Sopenharmony_ci 2962306a36Sopenharmony_ci/* 3062306a36Sopenharmony_ci * Unlike regular MMU roots, PAE "roots", a.k.a. PDPTEs/PDPTRs, have a PRESENT 3162306a36Sopenharmony_ci * bit, and thus are guaranteed to be non-zero when valid. And, when a guest 3262306a36Sopenharmony_ci * PDPTR is !PRESENT, its corresponding PAE root cannot be set to INVALID_PAGE, 3362306a36Sopenharmony_ci * as the CPU would treat that as PRESENT PDPTR with reserved bits set. Use 3462306a36Sopenharmony_ci * '0' instead of INVALID_PAGE to indicate an invalid PAE root. 3562306a36Sopenharmony_ci */ 3662306a36Sopenharmony_ci#define INVALID_PAE_ROOT 0 3762306a36Sopenharmony_ci#define IS_VALID_PAE_ROOT(x) (!!(x)) 3862306a36Sopenharmony_ci 3962306a36Sopenharmony_cistatic inline hpa_t kvm_mmu_get_dummy_root(void) 4062306a36Sopenharmony_ci{ 4162306a36Sopenharmony_ci return my_zero_pfn(0) << PAGE_SHIFT; 4262306a36Sopenharmony_ci} 4362306a36Sopenharmony_ci 4462306a36Sopenharmony_cistatic inline bool kvm_mmu_is_dummy_root(hpa_t shadow_page) 4562306a36Sopenharmony_ci{ 4662306a36Sopenharmony_ci return is_zero_pfn(shadow_page >> PAGE_SHIFT); 4762306a36Sopenharmony_ci} 4862306a36Sopenharmony_ci 4962306a36Sopenharmony_citypedef u64 __rcu *tdp_ptep_t; 5062306a36Sopenharmony_ci 5162306a36Sopenharmony_cistruct kvm_mmu_page { 5262306a36Sopenharmony_ci /* 5362306a36Sopenharmony_ci * Note, "link" through "spt" fit in a single 64 byte cache line on 5462306a36Sopenharmony_ci * 64-bit kernels, keep it that way unless there's a reason not to. 5562306a36Sopenharmony_ci */ 5662306a36Sopenharmony_ci struct list_head link; 5762306a36Sopenharmony_ci struct hlist_node hash_link; 5862306a36Sopenharmony_ci 5962306a36Sopenharmony_ci bool tdp_mmu_page; 6062306a36Sopenharmony_ci bool unsync; 6162306a36Sopenharmony_ci union { 6262306a36Sopenharmony_ci u8 mmu_valid_gen; 6362306a36Sopenharmony_ci 6462306a36Sopenharmony_ci /* Only accessed under slots_lock. */ 6562306a36Sopenharmony_ci bool tdp_mmu_scheduled_root_to_zap; 6662306a36Sopenharmony_ci }; 6762306a36Sopenharmony_ci 6862306a36Sopenharmony_ci /* 6962306a36Sopenharmony_ci * The shadow page can't be replaced by an equivalent huge page 7062306a36Sopenharmony_ci * because it is being used to map an executable page in the guest 7162306a36Sopenharmony_ci * and the NX huge page mitigation is enabled. 7262306a36Sopenharmony_ci */ 7362306a36Sopenharmony_ci bool nx_huge_page_disallowed; 7462306a36Sopenharmony_ci 7562306a36Sopenharmony_ci /* 7662306a36Sopenharmony_ci * The following two entries are used to key the shadow page in the 7762306a36Sopenharmony_ci * hash table. 7862306a36Sopenharmony_ci */ 7962306a36Sopenharmony_ci union kvm_mmu_page_role role; 8062306a36Sopenharmony_ci gfn_t gfn; 8162306a36Sopenharmony_ci 8262306a36Sopenharmony_ci u64 *spt; 8362306a36Sopenharmony_ci 8462306a36Sopenharmony_ci /* 8562306a36Sopenharmony_ci * Stores the result of the guest translation being shadowed by each 8662306a36Sopenharmony_ci * SPTE. KVM shadows two types of guest translations: nGPA -> GPA 8762306a36Sopenharmony_ci * (shadow EPT/NPT) and GVA -> GPA (traditional shadow paging). In both 8862306a36Sopenharmony_ci * cases the result of the translation is a GPA and a set of access 8962306a36Sopenharmony_ci * constraints. 9062306a36Sopenharmony_ci * 9162306a36Sopenharmony_ci * The GFN is stored in the upper bits (PAGE_SHIFT) and the shadowed 9262306a36Sopenharmony_ci * access permissions are stored in the lower bits. Note, for 9362306a36Sopenharmony_ci * convenience and uniformity across guests, the access permissions are 9462306a36Sopenharmony_ci * stored in KVM format (e.g. ACC_EXEC_MASK) not the raw guest format. 9562306a36Sopenharmony_ci */ 9662306a36Sopenharmony_ci u64 *shadowed_translation; 9762306a36Sopenharmony_ci 9862306a36Sopenharmony_ci /* Currently serving as active root */ 9962306a36Sopenharmony_ci union { 10062306a36Sopenharmony_ci int root_count; 10162306a36Sopenharmony_ci refcount_t tdp_mmu_root_count; 10262306a36Sopenharmony_ci }; 10362306a36Sopenharmony_ci unsigned int unsync_children; 10462306a36Sopenharmony_ci union { 10562306a36Sopenharmony_ci struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */ 10662306a36Sopenharmony_ci tdp_ptep_t ptep; 10762306a36Sopenharmony_ci }; 10862306a36Sopenharmony_ci DECLARE_BITMAP(unsync_child_bitmap, 512); 10962306a36Sopenharmony_ci 11062306a36Sopenharmony_ci /* 11162306a36Sopenharmony_ci * Tracks shadow pages that, if zapped, would allow KVM to create an NX 11262306a36Sopenharmony_ci * huge page. A shadow page will have nx_huge_page_disallowed set but 11362306a36Sopenharmony_ci * not be on the list if a huge page is disallowed for other reasons, 11462306a36Sopenharmony_ci * e.g. because KVM is shadowing a PTE at the same gfn, the memslot 11562306a36Sopenharmony_ci * isn't properly aligned, etc... 11662306a36Sopenharmony_ci */ 11762306a36Sopenharmony_ci struct list_head possible_nx_huge_page_link; 11862306a36Sopenharmony_ci#ifdef CONFIG_X86_32 11962306a36Sopenharmony_ci /* 12062306a36Sopenharmony_ci * Used out of the mmu-lock to avoid reading spte values while an 12162306a36Sopenharmony_ci * update is in progress; see the comments in __get_spte_lockless(). 12262306a36Sopenharmony_ci */ 12362306a36Sopenharmony_ci int clear_spte_count; 12462306a36Sopenharmony_ci#endif 12562306a36Sopenharmony_ci 12662306a36Sopenharmony_ci /* Number of writes since the last time traversal visited this page. */ 12762306a36Sopenharmony_ci atomic_t write_flooding_count; 12862306a36Sopenharmony_ci 12962306a36Sopenharmony_ci#ifdef CONFIG_X86_64 13062306a36Sopenharmony_ci /* Used for freeing the page asynchronously if it is a TDP MMU page. */ 13162306a36Sopenharmony_ci struct rcu_head rcu_head; 13262306a36Sopenharmony_ci#endif 13362306a36Sopenharmony_ci}; 13462306a36Sopenharmony_ci 13562306a36Sopenharmony_ciextern struct kmem_cache *mmu_page_header_cache; 13662306a36Sopenharmony_ci 13762306a36Sopenharmony_cistatic inline int kvm_mmu_role_as_id(union kvm_mmu_page_role role) 13862306a36Sopenharmony_ci{ 13962306a36Sopenharmony_ci return role.smm ? 1 : 0; 14062306a36Sopenharmony_ci} 14162306a36Sopenharmony_ci 14262306a36Sopenharmony_cistatic inline int kvm_mmu_page_as_id(struct kvm_mmu_page *sp) 14362306a36Sopenharmony_ci{ 14462306a36Sopenharmony_ci return kvm_mmu_role_as_id(sp->role); 14562306a36Sopenharmony_ci} 14662306a36Sopenharmony_ci 14762306a36Sopenharmony_cistatic inline bool kvm_mmu_page_ad_need_write_protect(struct kvm_mmu_page *sp) 14862306a36Sopenharmony_ci{ 14962306a36Sopenharmony_ci /* 15062306a36Sopenharmony_ci * When using the EPT page-modification log, the GPAs in the CPU dirty 15162306a36Sopenharmony_ci * log would come from L2 rather than L1. Therefore, we need to rely 15262306a36Sopenharmony_ci * on write protection to record dirty pages, which bypasses PML, since 15362306a36Sopenharmony_ci * writes now result in a vmexit. Note, the check on CPU dirty logging 15462306a36Sopenharmony_ci * being enabled is mandatory as the bits used to denote WP-only SPTEs 15562306a36Sopenharmony_ci * are reserved for PAE paging (32-bit KVM). 15662306a36Sopenharmony_ci */ 15762306a36Sopenharmony_ci return kvm_x86_ops.cpu_dirty_log_size && sp->role.guest_mode; 15862306a36Sopenharmony_ci} 15962306a36Sopenharmony_ci 16062306a36Sopenharmony_cistatic inline gfn_t gfn_round_for_level(gfn_t gfn, int level) 16162306a36Sopenharmony_ci{ 16262306a36Sopenharmony_ci return gfn & -KVM_PAGES_PER_HPAGE(level); 16362306a36Sopenharmony_ci} 16462306a36Sopenharmony_ci 16562306a36Sopenharmony_ciint mmu_try_to_unsync_pages(struct kvm *kvm, const struct kvm_memory_slot *slot, 16662306a36Sopenharmony_ci gfn_t gfn, bool can_unsync, bool prefetch); 16762306a36Sopenharmony_ci 16862306a36Sopenharmony_civoid kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn); 16962306a36Sopenharmony_civoid kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn); 17062306a36Sopenharmony_cibool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm, 17162306a36Sopenharmony_ci struct kvm_memory_slot *slot, u64 gfn, 17262306a36Sopenharmony_ci int min_level); 17362306a36Sopenharmony_ci 17462306a36Sopenharmony_ci/* Flush the given page (huge or not) of guest memory. */ 17562306a36Sopenharmony_cistatic inline void kvm_flush_remote_tlbs_gfn(struct kvm *kvm, gfn_t gfn, int level) 17662306a36Sopenharmony_ci{ 17762306a36Sopenharmony_ci kvm_flush_remote_tlbs_range(kvm, gfn_round_for_level(gfn, level), 17862306a36Sopenharmony_ci KVM_PAGES_PER_HPAGE(level)); 17962306a36Sopenharmony_ci} 18062306a36Sopenharmony_ci 18162306a36Sopenharmony_ciunsigned int pte_list_count(struct kvm_rmap_head *rmap_head); 18262306a36Sopenharmony_ci 18362306a36Sopenharmony_ciextern int nx_huge_pages; 18462306a36Sopenharmony_cistatic inline bool is_nx_huge_page_enabled(struct kvm *kvm) 18562306a36Sopenharmony_ci{ 18662306a36Sopenharmony_ci return READ_ONCE(nx_huge_pages) && !kvm->arch.disable_nx_huge_pages; 18762306a36Sopenharmony_ci} 18862306a36Sopenharmony_ci 18962306a36Sopenharmony_cistruct kvm_page_fault { 19062306a36Sopenharmony_ci /* arguments to kvm_mmu_do_page_fault. */ 19162306a36Sopenharmony_ci const gpa_t addr; 19262306a36Sopenharmony_ci const u32 error_code; 19362306a36Sopenharmony_ci const bool prefetch; 19462306a36Sopenharmony_ci 19562306a36Sopenharmony_ci /* Derived from error_code. */ 19662306a36Sopenharmony_ci const bool exec; 19762306a36Sopenharmony_ci const bool write; 19862306a36Sopenharmony_ci const bool present; 19962306a36Sopenharmony_ci const bool rsvd; 20062306a36Sopenharmony_ci const bool user; 20162306a36Sopenharmony_ci 20262306a36Sopenharmony_ci /* Derived from mmu and global state. */ 20362306a36Sopenharmony_ci const bool is_tdp; 20462306a36Sopenharmony_ci const bool nx_huge_page_workaround_enabled; 20562306a36Sopenharmony_ci 20662306a36Sopenharmony_ci /* 20762306a36Sopenharmony_ci * Whether a >4KB mapping can be created or is forbidden due to NX 20862306a36Sopenharmony_ci * hugepages. 20962306a36Sopenharmony_ci */ 21062306a36Sopenharmony_ci bool huge_page_disallowed; 21162306a36Sopenharmony_ci 21262306a36Sopenharmony_ci /* 21362306a36Sopenharmony_ci * Maximum page size that can be created for this fault; input to 21462306a36Sopenharmony_ci * FNAME(fetch), direct_map() and kvm_tdp_mmu_map(). 21562306a36Sopenharmony_ci */ 21662306a36Sopenharmony_ci u8 max_level; 21762306a36Sopenharmony_ci 21862306a36Sopenharmony_ci /* 21962306a36Sopenharmony_ci * Page size that can be created based on the max_level and the 22062306a36Sopenharmony_ci * page size used by the host mapping. 22162306a36Sopenharmony_ci */ 22262306a36Sopenharmony_ci u8 req_level; 22362306a36Sopenharmony_ci 22462306a36Sopenharmony_ci /* 22562306a36Sopenharmony_ci * Page size that will be created based on the req_level and 22662306a36Sopenharmony_ci * huge_page_disallowed. 22762306a36Sopenharmony_ci */ 22862306a36Sopenharmony_ci u8 goal_level; 22962306a36Sopenharmony_ci 23062306a36Sopenharmony_ci /* Shifted addr, or result of guest page table walk if addr is a gva. */ 23162306a36Sopenharmony_ci gfn_t gfn; 23262306a36Sopenharmony_ci 23362306a36Sopenharmony_ci /* The memslot containing gfn. May be NULL. */ 23462306a36Sopenharmony_ci struct kvm_memory_slot *slot; 23562306a36Sopenharmony_ci 23662306a36Sopenharmony_ci /* Outputs of kvm_faultin_pfn. */ 23762306a36Sopenharmony_ci unsigned long mmu_seq; 23862306a36Sopenharmony_ci kvm_pfn_t pfn; 23962306a36Sopenharmony_ci hva_t hva; 24062306a36Sopenharmony_ci bool map_writable; 24162306a36Sopenharmony_ci 24262306a36Sopenharmony_ci /* 24362306a36Sopenharmony_ci * Indicates the guest is trying to write a gfn that contains one or 24462306a36Sopenharmony_ci * more of the PTEs used to translate the write itself, i.e. the access 24562306a36Sopenharmony_ci * is changing its own translation in the guest page tables. 24662306a36Sopenharmony_ci */ 24762306a36Sopenharmony_ci bool write_fault_to_shadow_pgtable; 24862306a36Sopenharmony_ci}; 24962306a36Sopenharmony_ci 25062306a36Sopenharmony_ciint kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault); 25162306a36Sopenharmony_ci 25262306a36Sopenharmony_ci/* 25362306a36Sopenharmony_ci * Return values of handle_mmio_page_fault(), mmu.page_fault(), fast_page_fault(), 25462306a36Sopenharmony_ci * and of course kvm_mmu_do_page_fault(). 25562306a36Sopenharmony_ci * 25662306a36Sopenharmony_ci * RET_PF_CONTINUE: So far, so good, keep handling the page fault. 25762306a36Sopenharmony_ci * RET_PF_RETRY: let CPU fault again on the address. 25862306a36Sopenharmony_ci * RET_PF_EMULATE: mmio page fault, emulate the instruction directly. 25962306a36Sopenharmony_ci * RET_PF_INVALID: the spte is invalid, let the real page fault path update it. 26062306a36Sopenharmony_ci * RET_PF_FIXED: The faulting entry has been fixed. 26162306a36Sopenharmony_ci * RET_PF_SPURIOUS: The faulting entry was already fixed, e.g. by another vCPU. 26262306a36Sopenharmony_ci * 26362306a36Sopenharmony_ci * Any names added to this enum should be exported to userspace for use in 26462306a36Sopenharmony_ci * tracepoints via TRACE_DEFINE_ENUM() in mmutrace.h 26562306a36Sopenharmony_ci * 26662306a36Sopenharmony_ci * Note, all values must be greater than or equal to zero so as not to encroach 26762306a36Sopenharmony_ci * on -errno return values. Somewhat arbitrarily use '0' for CONTINUE, which 26862306a36Sopenharmony_ci * will allow for efficient machine code when checking for CONTINUE, e.g. 26962306a36Sopenharmony_ci * "TEST %rax, %rax, JNZ", as all "stop!" values are non-zero. 27062306a36Sopenharmony_ci */ 27162306a36Sopenharmony_cienum { 27262306a36Sopenharmony_ci RET_PF_CONTINUE = 0, 27362306a36Sopenharmony_ci RET_PF_RETRY, 27462306a36Sopenharmony_ci RET_PF_EMULATE, 27562306a36Sopenharmony_ci RET_PF_INVALID, 27662306a36Sopenharmony_ci RET_PF_FIXED, 27762306a36Sopenharmony_ci RET_PF_SPURIOUS, 27862306a36Sopenharmony_ci}; 27962306a36Sopenharmony_ci 28062306a36Sopenharmony_cistatic inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, 28162306a36Sopenharmony_ci u32 err, bool prefetch, int *emulation_type) 28262306a36Sopenharmony_ci{ 28362306a36Sopenharmony_ci struct kvm_page_fault fault = { 28462306a36Sopenharmony_ci .addr = cr2_or_gpa, 28562306a36Sopenharmony_ci .error_code = err, 28662306a36Sopenharmony_ci .exec = err & PFERR_FETCH_MASK, 28762306a36Sopenharmony_ci .write = err & PFERR_WRITE_MASK, 28862306a36Sopenharmony_ci .present = err & PFERR_PRESENT_MASK, 28962306a36Sopenharmony_ci .rsvd = err & PFERR_RSVD_MASK, 29062306a36Sopenharmony_ci .user = err & PFERR_USER_MASK, 29162306a36Sopenharmony_ci .prefetch = prefetch, 29262306a36Sopenharmony_ci .is_tdp = likely(vcpu->arch.mmu->page_fault == kvm_tdp_page_fault), 29362306a36Sopenharmony_ci .nx_huge_page_workaround_enabled = 29462306a36Sopenharmony_ci is_nx_huge_page_enabled(vcpu->kvm), 29562306a36Sopenharmony_ci 29662306a36Sopenharmony_ci .max_level = KVM_MAX_HUGEPAGE_LEVEL, 29762306a36Sopenharmony_ci .req_level = PG_LEVEL_4K, 29862306a36Sopenharmony_ci .goal_level = PG_LEVEL_4K, 29962306a36Sopenharmony_ci }; 30062306a36Sopenharmony_ci int r; 30162306a36Sopenharmony_ci 30262306a36Sopenharmony_ci if (vcpu->arch.mmu->root_role.direct) { 30362306a36Sopenharmony_ci fault.gfn = fault.addr >> PAGE_SHIFT; 30462306a36Sopenharmony_ci fault.slot = kvm_vcpu_gfn_to_memslot(vcpu, fault.gfn); 30562306a36Sopenharmony_ci } 30662306a36Sopenharmony_ci 30762306a36Sopenharmony_ci /* 30862306a36Sopenharmony_ci * Async #PF "faults", a.k.a. prefetch faults, are not faults from the 30962306a36Sopenharmony_ci * guest perspective and have already been counted at the time of the 31062306a36Sopenharmony_ci * original fault. 31162306a36Sopenharmony_ci */ 31262306a36Sopenharmony_ci if (!prefetch) 31362306a36Sopenharmony_ci vcpu->stat.pf_taken++; 31462306a36Sopenharmony_ci 31562306a36Sopenharmony_ci if (IS_ENABLED(CONFIG_RETPOLINE) && fault.is_tdp) 31662306a36Sopenharmony_ci r = kvm_tdp_page_fault(vcpu, &fault); 31762306a36Sopenharmony_ci else 31862306a36Sopenharmony_ci r = vcpu->arch.mmu->page_fault(vcpu, &fault); 31962306a36Sopenharmony_ci 32062306a36Sopenharmony_ci if (fault.write_fault_to_shadow_pgtable && emulation_type) 32162306a36Sopenharmony_ci *emulation_type |= EMULTYPE_WRITE_PF_TO_SP; 32262306a36Sopenharmony_ci 32362306a36Sopenharmony_ci /* 32462306a36Sopenharmony_ci * Similar to above, prefetch faults aren't truly spurious, and the 32562306a36Sopenharmony_ci * async #PF path doesn't do emulation. Do count faults that are fixed 32662306a36Sopenharmony_ci * by the async #PF handler though, otherwise they'll never be counted. 32762306a36Sopenharmony_ci */ 32862306a36Sopenharmony_ci if (r == RET_PF_FIXED) 32962306a36Sopenharmony_ci vcpu->stat.pf_fixed++; 33062306a36Sopenharmony_ci else if (prefetch) 33162306a36Sopenharmony_ci ; 33262306a36Sopenharmony_ci else if (r == RET_PF_EMULATE) 33362306a36Sopenharmony_ci vcpu->stat.pf_emulate++; 33462306a36Sopenharmony_ci else if (r == RET_PF_SPURIOUS) 33562306a36Sopenharmony_ci vcpu->stat.pf_spurious++; 33662306a36Sopenharmony_ci return r; 33762306a36Sopenharmony_ci} 33862306a36Sopenharmony_ci 33962306a36Sopenharmony_ciint kvm_mmu_max_mapping_level(struct kvm *kvm, 34062306a36Sopenharmony_ci const struct kvm_memory_slot *slot, gfn_t gfn, 34162306a36Sopenharmony_ci int max_level); 34262306a36Sopenharmony_civoid kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault); 34362306a36Sopenharmony_civoid disallowed_hugepage_adjust(struct kvm_page_fault *fault, u64 spte, int cur_level); 34462306a36Sopenharmony_ci 34562306a36Sopenharmony_civoid *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc); 34662306a36Sopenharmony_ci 34762306a36Sopenharmony_civoid track_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp); 34862306a36Sopenharmony_civoid untrack_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp); 34962306a36Sopenharmony_ci 35062306a36Sopenharmony_ci#endif /* __KVM_X86_MMU_INTERNAL_H */ 351