1// SPDX-License-Identifier: GPL-2.0 2 3#ifndef __KVM_X86_MMU_TDP_MMU_H 4#define __KVM_X86_MMU_TDP_MMU_H 5 6#include <linux/kvm_host.h> 7 8#include "spte.h" 9 10void kvm_mmu_init_tdp_mmu(struct kvm *kvm); 11void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm); 12 13hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu); 14 15__must_check static inline bool kvm_tdp_mmu_get_root(struct kvm_mmu_page *root) 16{ 17 return refcount_inc_not_zero(&root->tdp_mmu_root_count); 18} 19 20void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root, 21 bool shared); 22 23bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, gfn_t start, gfn_t end, bool flush); 24bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp); 25void kvm_tdp_mmu_zap_all(struct kvm *kvm); 26void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm); 27void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm); 28 29int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault); 30 31bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range, 32 bool flush); 33bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range); 34bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range); 35bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range); 36 37bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm, 38 const struct kvm_memory_slot *slot, int min_level); 39bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm, 40 const struct kvm_memory_slot *slot); 41void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm, 42 struct kvm_memory_slot *slot, 43 gfn_t gfn, unsigned long mask, 44 bool wrprot); 45void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm, 46 const struct kvm_memory_slot *slot); 47 48bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm, 49 struct kvm_memory_slot *slot, gfn_t gfn, 50 int min_level); 51 52void kvm_tdp_mmu_try_split_huge_pages(struct kvm *kvm, 53 const struct kvm_memory_slot *slot, 54 gfn_t start, gfn_t end, 55 int target_level, bool shared); 56 57static inline void kvm_tdp_mmu_walk_lockless_begin(void) 58{ 59 rcu_read_lock(); 60} 61 62static inline void kvm_tdp_mmu_walk_lockless_end(void) 63{ 64 rcu_read_unlock(); 65} 66 67int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes, 68 int *root_level); 69u64 *kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, u64 addr, 70 u64 *spte); 71 72#ifdef CONFIG_X86_64 73static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return sp->tdp_mmu_page; } 74#else 75static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return false; } 76#endif 77 78#endif /* __KVM_X86_MMU_TDP_MMU_H */ 79