Lines Matching defs:kvm

25 void kvm_mmu_init_tdp_mmu(struct kvm *kvm)
31 kvm->arch.tdp_mmu_enabled = true;
33 INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots);
34 INIT_LIST_HEAD(&kvm->arch.tdp_mmu_pages);
37 void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
39 if (!kvm->arch.tdp_mmu_enabled)
42 WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots));
45 static void tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root)
47 if (kvm_mmu_put_root(kvm, root))
48 kvm_tdp_mmu_free_root(kvm, root);
51 static inline bool tdp_mmu_next_root_valid(struct kvm *kvm,
54 lockdep_assert_held(&kvm->mmu_lock);
56 if (list_entry_is_head(root, &kvm->arch.tdp_mmu_roots, link))
59 kvm_mmu_get_root(kvm, root);
64 static inline struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
70 tdp_mmu_put_root(kvm, root);
89 bool is_tdp_mmu_root(struct kvm *kvm, hpa_t hpa)
93 if (!kvm->arch.tdp_mmu_enabled)
105 static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
108 void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root)
112 lockdep_assert_held(&kvm->mmu_lock);
119 zap_gfn_range(kvm, root, 0, max_gfn, false, false);
158 struct kvm *kvm = vcpu->kvm;
163 spin_lock(&kvm->mmu_lock);
166 for_each_tdp_mmu_root(kvm, root) {
168 kvm_mmu_get_root(kvm, root);
169 spin_unlock(&kvm->mmu_lock);
177 list_add(&root->link, &kvm->arch.tdp_mmu_roots);
179 spin_unlock(&kvm->mmu_lock);
195 static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
215 static void handle_changed_spte_dirty_log(struct kvm *kvm, int as_id, gfn_t gfn,
228 slot = __gfn_to_memslot(__kvm_memslots(kvm, as_id), gfn);
235 * @kvm: kvm instance
245 static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
325 unaccount_huge_nx_page(kvm, sp);
330 handle_changed_spte(kvm, as_id,
335 kvm_flush_remote_tlbs_with_address(kvm, gfn,
343 static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
346 __handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level);
348 handle_changed_spte_dirty_log(kvm, as_id, gfn, old_spte,
352 static inline void __tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
362 __handle_changed_spte(kvm, as_id, iter->gfn, iter->old_spte, new_spte,
368 handle_changed_spte_dirty_log(kvm, as_id, iter->gfn,
373 static inline void tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
376 __tdp_mmu_set_spte(kvm, iter, new_spte, true, true);
379 static inline void tdp_mmu_set_spte_no_acc_track(struct kvm *kvm,
383 __tdp_mmu_set_spte(kvm, iter, new_spte, false, true);
386 static inline void tdp_mmu_set_spte_no_dirty_log(struct kvm *kvm,
390 __tdp_mmu_set_spte(kvm, iter, new_spte, true, false);
422 static inline bool tdp_mmu_iter_cond_resched(struct kvm *kvm,
429 if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
431 kvm_flush_remote_tlbs(kvm);
433 cond_resched_lock(&kvm->mmu_lock);
460 static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
467 tdp_mmu_iter_cond_resched(kvm, &iter, flush)) {
485 tdp_mmu_set_spte(kvm, &iter, 0);
498 bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start, gfn_t end,
504 for_each_tdp_mmu_root_yield_safe(kvm, root)
505 flush = zap_gfn_range(kvm, root, start, end, can_yield, flush);
510 void kvm_tdp_mmu_zap_all(struct kvm *kvm)
515 flush = kvm_tdp_mmu_zap_gfn_range(kvm, 0, max_gfn);
517 kvm_flush_remote_tlbs(kvm);
545 tdp_mmu_set_spte(vcpu->kvm, iter, new_spte);
593 if (WARN_ON(!is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa)))
615 tdp_mmu_set_spte(vcpu->kvm, &iter, 0);
617 kvm_flush_remote_tlbs_with_address(vcpu->kvm, iter.gfn,
630 list_add(&sp->link, &vcpu->kvm->arch.tdp_mmu_pages);
638 account_huge_nx_page(vcpu->kvm, sp);
640 tdp_mmu_set_spte(vcpu->kvm, &iter, new_spte);
653 static int kvm_tdp_mmu_handle_hva_range(struct kvm *kvm, unsigned long start,
655 int (*handler)(struct kvm *kvm, struct kvm_memory_slot *slot,
665 for_each_tdp_mmu_root_yield_safe(kvm, root) {
667 slots = __kvm_memslots(kvm, as_id);
684 ret |= handler(kvm, memslot, root, gfn_start,
692 static int zap_gfn_range_hva_wrapper(struct kvm *kvm,
697 return zap_gfn_range(kvm, root, start, end, false, false);
700 int kvm_tdp_mmu_zap_hva_range(struct kvm *kvm, unsigned long start,
703 return kvm_tdp_mmu_handle_hva_range(kvm, start, end, 0,
711 static int age_gfn_range(struct kvm *kvm, struct kvm_memory_slot *slot,
744 tdp_mmu_set_spte_no_acc_track(kvm, &iter, new_spte);
751 int kvm_tdp_mmu_age_hva_range(struct kvm *kvm, unsigned long start,
754 return kvm_tdp_mmu_handle_hva_range(kvm, start, end, 0,
758 static int test_age_gfn(struct kvm *kvm, struct kvm_memory_slot *slot,
771 int kvm_tdp_mmu_test_age_hva(struct kvm *kvm, unsigned long hva)
773 return kvm_tdp_mmu_handle_hva_range(kvm, hva, hva + 1, 0,
783 static int set_tdp_spte(struct kvm *kvm, struct kvm_memory_slot *slot,
804 tdp_mmu_set_spte(kvm, &iter, 0);
806 kvm_flush_remote_tlbs_with_address(kvm, iter.gfn, 1);
812 tdp_mmu_set_spte(kvm, &iter, new_spte);
819 kvm_flush_remote_tlbs_with_address(kvm, gfn, 1);
824 int kvm_tdp_mmu_set_spte_hva(struct kvm *kvm, unsigned long address,
827 return kvm_tdp_mmu_handle_hva_range(kvm, address, address + 1,
837 static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
848 if (tdp_mmu_iter_cond_resched(kvm, &iter, false))
857 tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte);
868 bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm, struct kvm_memory_slot *slot,
875 for_each_tdp_mmu_root_yield_safe(kvm, root) {
880 spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn,
894 static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
902 if (tdp_mmu_iter_cond_resched(kvm, &iter, false))
920 tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte);
933 bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm, struct kvm_memory_slot *slot)
939 for_each_tdp_mmu_root_yield_safe(kvm, root) {
944 spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn,
958 static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
985 tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte);
998 void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
1006 lockdep_assert_held(&kvm->mmu_lock);
1007 for_each_tdp_mmu_root(kvm, root) {
1012 clear_dirty_pt_masked(kvm, root, gfn, mask, wrprot);
1021 static bool set_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
1029 if (tdp_mmu_iter_cond_resched(kvm, &iter, false))
1037 tdp_mmu_set_spte(kvm, &iter, new_spte);
1049 bool kvm_tdp_mmu_slot_set_dirty(struct kvm *kvm, struct kvm_memory_slot *slot)
1055 for_each_tdp_mmu_root_yield_safe(kvm, root) {
1060 spte_set |= set_dirty_gfn_range(kvm, root, slot->base_gfn,
1070 static void zap_collapsible_spte_range(struct kvm *kvm,
1079 if (tdp_mmu_iter_cond_resched(kvm, &iter, spte_set)) {
1094 tdp_mmu_set_spte(kvm, &iter, 0);
1100 kvm_flush_remote_tlbs(kvm);
1107 void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
1113 for_each_tdp_mmu_root_yield_safe(kvm, root) {
1118 zap_collapsible_spte_range(kvm, root, slot->base_gfn,
1128 static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root,
1142 tdp_mmu_set_spte(kvm, &iter, new_spte);
1154 bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
1161 lockdep_assert_held(&kvm->mmu_lock);
1162 for_each_tdp_mmu_root(kvm, root) {
1167 spte_set |= write_protect_gfn(kvm, root, gfn);