Lines Matching refs:root
45 static void tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root)
47 if (kvm_mmu_put_root(kvm, root))
48 kvm_tdp_mmu_free_root(kvm, root);
52 struct kvm_mmu_page *root)
56 if (list_entry_is_head(root, &kvm->arch.tdp_mmu_roots, link))
59 kvm_mmu_get_root(kvm, root);
65 struct kvm_mmu_page *root)
69 next_root = list_next_entry(root, link);
70 tdp_mmu_put_root(kvm, root);
78 * recent root. (Unless keeping a live reference is desirable.)
105 static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
108 void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root)
114 WARN_ON(root->root_count);
115 WARN_ON(!root->tdp_mmu_page);
117 list_del(&root->link);
119 zap_gfn_range(kvm, root, 0, max_gfn, false, false);
121 free_page((unsigned long)root->spt);
122 kmem_cache_free(mmu_page_header_cache, root);
159 struct kvm_mmu_page *root;
165 /* Check for an existing root before allocating a new one. */
166 for_each_tdp_mmu_root(kvm, root) {
167 if (root->role.word == role.word) {
168 kvm_mmu_get_root(kvm, root);
170 return root;
174 root = alloc_tdp_mmu_page(vcpu, 0, vcpu->arch.mmu->shadow_root_level);
175 root->root_count = 1;
177 list_add(&root->link, &kvm->arch.tdp_mmu_roots);
181 return root;
186 struct kvm_mmu_page *root;
188 root = get_tdp_mmu_vcpu_root(vcpu);
189 if (!root)
192 return __pa(root->spt);
357 struct kvm_mmu_page *root = sptep_to_sp(root_pt);
358 int as_id = kvm_mmu_page_as_id(root);
417 * paging structure root.
449 * non-root pages mapping GFNs strictly within that range. Returns true if
460 static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
465 tdp_root_for_each_pte(iter, root, start, end) {
494 * non-root pages mapping GFNs strictly within that range. Returns true if
501 struct kvm_mmu_page *root;
504 for_each_tdp_mmu_root_yield_safe(kvm, root)
505 flush = zap_gfn_range(kvm, root, start, end, can_yield, flush);
656 struct kvm_mmu_page *root, gfn_t start,
661 struct kvm_mmu_page *root;
665 for_each_tdp_mmu_root_yield_safe(kvm, root) {
666 as_id = kvm_mmu_page_as_id(root);
684 ret |= handler(kvm, memslot, root, gfn_start,
694 struct kvm_mmu_page *root, gfn_t start,
697 return zap_gfn_range(kvm, root, start, end, false, false);
712 struct kvm_mmu_page *root, gfn_t start, gfn_t end,
719 tdp_root_for_each_leaf_pte(iter, root, start, end) {
759 struct kvm_mmu_page *root, gfn_t gfn, gfn_t unused,
764 tdp_root_for_each_leaf_pte(iter, root, gfn, gfn + 1)
784 struct kvm_mmu_page *root, gfn_t gfn, gfn_t unused,
797 tdp_root_for_each_pte(iter, root, gfn, gfn + 1) {
837 static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
846 for_each_tdp_pte_min_level(iter, root->spt, root->role.level,
871 struct kvm_mmu_page *root;
875 for_each_tdp_mmu_root_yield_safe(kvm, root) {
876 root_as_id = kvm_mmu_page_as_id(root);
880 spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn,
894 static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
901 tdp_root_for_each_leaf_pte(iter, root, start, end) {
935 struct kvm_mmu_page *root;
939 for_each_tdp_mmu_root_yield_safe(kvm, root) {
940 root_as_id = kvm_mmu_page_as_id(root);
944 spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn,
958 static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
964 tdp_root_for_each_leaf_pte(iter, root, gfn + __ffs(mask),
1003 struct kvm_mmu_page *root;
1007 for_each_tdp_mmu_root(kvm, root) {
1008 root_as_id = kvm_mmu_page_as_id(root);
1012 clear_dirty_pt_masked(kvm, root, gfn, mask, wrprot);
1021 static bool set_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
1028 tdp_root_for_each_pte(iter, root, start, end) {
1051 struct kvm_mmu_page *root;
1055 for_each_tdp_mmu_root_yield_safe(kvm, root) {
1056 root_as_id = kvm_mmu_page_as_id(root);
1060 spte_set |= set_dirty_gfn_range(kvm, root, slot->base_gfn,
1071 struct kvm_mmu_page *root,
1078 tdp_root_for_each_pte(iter, root, start, end) {
1110 struct kvm_mmu_page *root;
1113 for_each_tdp_mmu_root_yield_safe(kvm, root) {
1114 root_as_id = kvm_mmu_page_as_id(root);
1118 zap_collapsible_spte_range(kvm, root, slot->base_gfn,
1128 static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root,
1135 tdp_root_for_each_leaf_pte(iter, root, gfn, gfn + 1) {
1157 struct kvm_mmu_page *root;
1162 for_each_tdp_mmu_root(kvm, root) {
1163 root_as_id = kvm_mmu_page_as_id(root);
1167 spte_set |= write_protect_gfn(kvm, root, gfn);