Lines Matching defs:kvm
12 #include <trace/events/kvm.h>
15 void kvm_mmu_init_tdp_mmu(struct kvm *kvm)
17 INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots);
18 spin_lock_init(&kvm->arch.tdp_mmu_pages_lock);
22 static __always_inline bool kvm_lockdep_assert_mmu_lock_held(struct kvm *kvm,
26 lockdep_assert_held_read(&kvm->mmu_lock);
28 lockdep_assert_held_write(&kvm->mmu_lock);
33 void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
40 kvm_tdp_mmu_invalidate_all_roots(kvm);
41 kvm_tdp_mmu_zap_invalidated_roots(kvm);
43 WARN_ON(atomic64_read(&kvm->arch.tdp_mmu_pages));
44 WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots));
76 void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
79 kvm_lockdep_assert_mmu_lock_held(kvm, shared);
89 KVM_BUG_ON(!is_tdp_mmu_page(root) || !root->role.invalid, kvm);
91 spin_lock(&kvm->arch.tdp_mmu_pages_lock);
93 spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
107 static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
116 next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
120 next_root = list_first_or_null_rcu(&kvm->arch.tdp_mmu_roots,
128 next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
135 kvm_tdp_mmu_put_root(kvm, prev_root, shared);
223 struct kvm *kvm = vcpu->kvm;
226 lockdep_assert_held_write(&kvm->mmu_lock);
232 for_each_tdp_mmu_root(kvm, root, kvm_mmu_role_as_id(role)) {
250 spin_lock(&kvm->arch.tdp_mmu_pages_lock);
251 list_add_rcu(&root->link, &kvm->arch.tdp_mmu_roots);
252 spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
258 static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
262 static void tdp_account_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
265 atomic64_inc(&kvm->arch.tdp_mmu_pages);
268 static void tdp_unaccount_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
271 atomic64_dec(&kvm->arch.tdp_mmu_pages);
277 * @kvm: kvm instance
283 static void tdp_mmu_unlink_sp(struct kvm *kvm, struct kvm_mmu_page *sp,
286 tdp_unaccount_mmu_page(kvm, sp);
292 spin_lock(&kvm->arch.tdp_mmu_pages_lock);
294 lockdep_assert_held_write(&kvm->mmu_lock);
297 untrack_possible_nx_huge_page(kvm, sp);
300 spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
306 * @kvm: kvm instance
320 static void handle_removed_pt(struct kvm *kvm, tdp_ptep_t pt, bool shared)
329 tdp_mmu_unlink_sp(kvm, sp, shared);
396 handle_changed_spte(kvm, kvm_mmu_page_as_id(sp), gfn,
405 * @kvm: kvm instance
419 static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
490 kvm_update_page_stats(kvm, level, is_leaf ? 1 : -1);
504 handle_removed_pt(kvm, spte_to_child_pt(old_spte, level), shared);
519 * @kvm: kvm instance
528 static inline int tdp_mmu_set_spte_atomic(struct kvm *kvm,
542 lockdep_assert_held_read(&kvm->mmu_lock);
554 handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte,
560 static inline int tdp_mmu_zap_spte_atomic(struct kvm *kvm,
571 ret = tdp_mmu_set_spte_atomic(kvm, iter, REMOVED_SPTE);
575 kvm_flush_remote_tlbs_gfn(kvm, iter->gfn, iter->level);
592 * @kvm: KVM instance
603 static u64 tdp_mmu_set_spte(struct kvm *kvm, int as_id, tdp_ptep_t sptep,
606 lockdep_assert_held_write(&kvm->mmu_lock);
619 handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level, false);
623 static inline void tdp_mmu_iter_set_spte(struct kvm *kvm, struct tdp_iter *iter,
627 iter->old_spte = tdp_mmu_set_spte(kvm, iter->as_id, iter->sptep,
659 static inline bool __must_check tdp_mmu_iter_cond_resched(struct kvm *kvm,
669 if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
671 kvm_flush_remote_tlbs(kvm);
676 cond_resched_rwlock_read(&kvm->mmu_lock);
678 cond_resched_rwlock_write(&kvm->mmu_lock);
701 static void __tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root,
711 if (tdp_mmu_iter_cond_resched(kvm, &iter, false, shared))
721 tdp_mmu_iter_set_spte(kvm, &iter, 0);
722 else if (tdp_mmu_set_spte_atomic(kvm, &iter, 0))
727 static void tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root,
743 kvm_lockdep_assert_mmu_lock_held(kvm, shared);
757 __tdp_mmu_zap_root(kvm, root, shared, PG_LEVEL_1G);
758 __tdp_mmu_zap_root(kvm, root, shared, root->role.level);
763 bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
778 tdp_mmu_set_spte(kvm, kvm_mmu_page_as_id(sp), sp->ptep, old_spte, 0,
791 static bool tdp_mmu_zap_leafs(struct kvm *kvm, struct kvm_mmu_page *root,
798 lockdep_assert_held_write(&kvm->mmu_lock);
804 tdp_mmu_iter_cond_resched(kvm, &iter, flush, false)) {
813 tdp_mmu_iter_set_spte(kvm, &iter, 0);
831 bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, gfn_t start, gfn_t end, bool flush)
835 for_each_tdp_mmu_root_yield_safe(kvm, root, false)
836 flush = tdp_mmu_zap_leafs(kvm, root, start, end, true, flush);
841 void kvm_tdp_mmu_zap_all(struct kvm *kvm)
857 for_each_tdp_mmu_root_yield_safe(kvm, root, false)
858 tdp_mmu_zap_root(kvm, root, false);
865 void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
869 read_lock(&kvm->mmu_lock);
871 for_each_tdp_mmu_root_yield_safe(kvm, root, true) {
876 KVM_BUG_ON(!root->role.invalid, kvm);
887 tdp_mmu_zap_root(kvm, root, true);
894 kvm_tdp_mmu_put_root(kvm, root, true);
897 read_unlock(&kvm->mmu_lock);
910 void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm)
920 * Waive the assertion if there are no users of @kvm, i.e. the VM is
926 refcount_read(&kvm->users_count) && kvm->created_vcpus)
927 lockdep_assert_held_write(&kvm->mmu_lock);
931 * be other references to @kvm, i.e. nothing else can invalidate roots
934 list_for_each_entry(root, &kvm->arch.tdp_mmu_roots, link) {
973 else if (tdp_mmu_set_spte_atomic(vcpu->kvm, iter, new_spte))
977 kvm_flush_remote_tlbs_gfn(vcpu->kvm, iter->gfn, iter->level);
1007 * @kvm: kvm instance
1015 static int tdp_mmu_link_sp(struct kvm *kvm, struct tdp_iter *iter,
1022 ret = tdp_mmu_set_spte_atomic(kvm, iter, spte);
1026 tdp_mmu_iter_set_spte(kvm, iter, spte);
1029 tdp_account_mmu_page(kvm, sp);
1034 static int tdp_mmu_split_huge_page(struct kvm *kvm, struct tdp_iter *iter,
1044 struct kvm *kvm = vcpu->kvm;
1086 r = tdp_mmu_split_huge_page(kvm, &iter, sp, true);
1088 r = tdp_mmu_link_sp(kvm, &iter, sp, true);
1101 spin_lock(&kvm->arch.tdp_mmu_pages_lock);
1103 track_possible_nx_huge_page(kvm, sp);
1104 spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
1123 bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
1128 __for_each_tdp_mmu_root_yield_safe(kvm, root, range->slot->as_id, false, false)
1129 flush = tdp_mmu_zap_leafs(kvm, root, range->start, range->end,
1135 typedef bool (*tdp_handler_t)(struct kvm *kvm, struct tdp_iter *iter,
1138 static __always_inline bool kvm_tdp_mmu_handle_gfn(struct kvm *kvm,
1150 for_each_tdp_mmu_root(kvm, root, range->slot->as_id) {
1154 ret |= handler(kvm, &iter, range);
1170 static bool age_gfn_range(struct kvm *kvm, struct tdp_iter *iter,
1204 bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
1206 return kvm_tdp_mmu_handle_gfn(kvm, range, age_gfn_range);
1209 static bool test_age_gfn(struct kvm *kvm, struct tdp_iter *iter,
1215 bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1217 return kvm_tdp_mmu_handle_gfn(kvm, range, test_age_gfn);
1220 static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter,
1238 tdp_mmu_iter_set_spte(kvm, iter, 0);
1244 tdp_mmu_iter_set_spte(kvm, iter, new_spte);
1256 bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1263 return kvm_tdp_mmu_handle_gfn(kvm, range, set_spte_gfn);
1271 static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
1284 if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
1294 if (tdp_mmu_set_spte_atomic(kvm, &iter, new_spte))
1309 bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm,
1315 lockdep_assert_held_read(&kvm->mmu_lock);
1317 for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
1318 spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn,
1343 static struct kvm_mmu_page *tdp_mmu_alloc_sp_for_split(struct kvm *kvm,
1365 read_unlock(&kvm->mmu_lock);
1367 write_unlock(&kvm->mmu_lock);
1373 read_lock(&kvm->mmu_lock);
1375 write_lock(&kvm->mmu_lock);
1383 static int tdp_mmu_split_huge_page(struct kvm *kvm, struct tdp_iter *iter,
1395 sp->spt[i] = make_huge_page_split_spte(kvm, huge_spte, sp->role, i);
1405 ret = tdp_mmu_link_sp(kvm, iter, sp, shared);
1414 kvm_update_page_stats(kvm, level - 1, SPTE_ENT_PER_PAGE);
1421 static int tdp_mmu_split_huge_pages_root(struct kvm *kvm,
1445 if (tdp_mmu_iter_cond_resched(kvm, &iter, false, shared))
1452 sp = tdp_mmu_alloc_sp_for_split(kvm, &iter, shared);
1467 if (tdp_mmu_split_huge_page(kvm, &iter, sp, shared))
1490 void kvm_tdp_mmu_try_split_huge_pages(struct kvm *kvm,
1498 kvm_lockdep_assert_mmu_lock_held(kvm, shared);
1500 for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, shared) {
1501 r = tdp_mmu_split_huge_pages_root(kvm, root, start, end, target_level, shared);
1503 kvm_tdp_mmu_put_root(kvm, root, shared);
1516 static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
1527 if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
1539 if (tdp_mmu_set_spte_atomic(kvm, &iter, iter.old_spte & ~dbit))
1556 bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm,
1562 lockdep_assert_held_read(&kvm->mmu_lock);
1564 for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
1565 spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn,
1578 static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
1585 lockdep_assert_held_write(&kvm->mmu_lock);
1626 void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
1633 for_each_tdp_mmu_root(kvm, root, slot->as_id)
1634 clear_dirty_pt_masked(kvm, root, gfn, mask, wrprot);
1637 static void zap_collapsible_spte_range(struct kvm *kvm,
1650 if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
1675 max_mapping_level = kvm_mmu_max_mapping_level(kvm, slot,
1681 if (tdp_mmu_zap_spte_atomic(kvm, &iter))
1692 void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
1697 lockdep_assert_held_read(&kvm->mmu_lock);
1699 for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
1700 zap_collapsible_spte_range(kvm, root, slot);
1708 static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root,
1730 tdp_mmu_iter_set_spte(kvm, &iter, new_spte);
1744 bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
1751 lockdep_assert_held_write(&kvm->mmu_lock);
1752 for_each_tdp_mmu_root(kvm, root, slot->as_id)
1753 spte_set |= write_protect_gfn(kvm, root, gfn, min_level);