Lines Matching defs:root
1921 * level tracks the root level
2361 struct kvm_vcpu *vcpu, hpa_t root,
2365 iterator->shadow_addr = root;
2378 BUG_ON(root != vcpu->arch.mmu->root.hpa);
2392 shadow_walk_init_using_root(iterator, vcpu, vcpu->arch.mmu->root.hpa,
2601 * Remove the active root from the active page list, the root
2620 * Make the request to free obsolete roots after marking the root
2677 * Don't zap active root pages, the page itself can't be freed
3572 && VALID_PAGE(mmu->root.hpa);
3592 if (kvm_mmu_is_dummy_root(mmu->root.hpa)) {
3594 } else if (root_to_sp(mmu->root.hpa)) {
3595 mmu_free_root_page(kvm, &mmu->root.hpa, &invalid_list);
3606 mmu->root.hpa = INVALID_PAGE;
3607 mmu->root.pgd = 0;
3664 hpa_t root;
3674 root = kvm_tdp_mmu_get_vcpu_root_hpa(vcpu);
3675 mmu->root.hpa = root;
3677 root = mmu_alloc_root(vcpu, 0, 0, shadow_root_level);
3678 mmu->root.hpa = root;
3688 root = mmu_alloc_root(vcpu, i << (30 - PAGE_SHIFT), 0,
3690 mmu->pae_root[i] = root | PT_PRESENT_MASK |
3693 mmu->root.hpa = __pa(mmu->pae_root);
3695 WARN_ONCE(1, "Bad TDP root level = %d\n", shadow_root_level);
3700 /* root.pgd is ignored for direct MMUs. */
3701 mmu->root.pgd = 0;
3714 * Check if this is the first shadow root being allocated before
3722 /* Recheck, under the lock, whether this is the first shadow root. */
3774 hpa_t root;
3780 mmu->root.hpa = kvm_mmu_get_dummy_root();
3810 * write-protect the guests page table root.
3813 root = mmu_alloc_root(vcpu, root_gfn, 0,
3815 mmu->root.hpa = root;
3867 root = mmu_alloc_root(vcpu, root_gfn, quadrant, PT32_ROOT_LEVEL);
3868 mmu->pae_root[i] = root | pm_mask;
3872 mmu->root.hpa = __pa(mmu->pml5_root);
3874 mmu->root.hpa = __pa(mmu->pml4_root);
3876 mmu->root.hpa = __pa(mmu->pae_root);
3879 mmu->root.pgd = root_pgd;
3896 * tables are allocated and initialized at root creation as there is no
3958 static bool is_unsync_root(hpa_t root)
3962 if (!VALID_PAGE(root) || kvm_mmu_is_dummy_root(root))
3978 sp = root_to_sp(root);
3982 * PDPTEs for a given PAE root need to be synchronized individually.
4001 if (!VALID_PAGE(vcpu->arch.mmu->root.hpa))
4007 hpa_t root = vcpu->arch.mmu->root.hpa;
4009 if (!is_unsync_root(root))
4012 sp = root_to_sp(root);
4023 hpa_t root = vcpu->arch.mmu->pae_root[i];
4025 if (IS_VALID_PAE_ROOT(root)) {
4026 sp = spte_to_child_sp(root);
4101 int root, leaf, level;
4107 leaf = kvm_tdp_mmu_get_walk(vcpu, addr, sptes, &root);
4109 leaf = get_walk(vcpu, addr, sptes, &root);
4131 for (level = root; level >= leaf; level--)
4137 for (level = root; level >= leaf; level--)
4338 * root was invalidated by a memslot update or a relevant mmu_notifier fired.
4343 struct kvm_mmu_page *sp = root_to_sp(vcpu->arch.mmu->root.hpa);
4352 * only a hint that the current root _may_ be obsolete and needs to be
4354 * previous root, then __kvm_mmu_prepare_zap_page() signals all vCPUs
4355 * to reload even if no vCPU is actively using the root.
4369 if (WARN_ON_ONCE(kvm_mmu_is_dummy_root(vcpu->arch.mmu->root.hpa)))
4522 static inline bool is_root_usable(struct kvm_mmu_root_info *root, gpa_t pgd,
4527 if (!VALID_PAGE(root->hpa))
4530 if (!role.direct && pgd != root->pgd)
4533 sp = root_to_sp(root->hpa);
4541 * Find out if a previously cached root matching the new pgd/role is available,
4542 * and insert the current root as the MRU in the cache.
4543 * If a matching root is found, it is assigned to kvm_mmu->root and
4545 * If no match is found, kvm_mmu->root is left invalid, the LRU root is
4546 * evicted to make room for the current root, and false is returned.
4554 if (is_root_usable(&mmu->root, new_pgd, new_role))
4566 swap(mmu->root, mmu->prev_roots[i]);
4567 if (is_root_usable(&mmu->root, new_pgd, new_role))
4576 * Find out if a previously cached root matching the new pgd/role is available.
4577 * On entry, mmu->root is invalid.
4578 * If a matching root is found, it is assigned to kvm_mmu->root, the LRU entry
4580 * If no match is found, kvm_mmu->root is left invalid and false is returned.
4595 swap(mmu->root, mmu->prev_roots[i]);
4610 if (VALID_PAGE(mmu->root.hpa) && !root_to_sp(mmu->root.hpa))
4613 if (VALID_PAGE(mmu->root.hpa))
4625 * Return immediately if no usable root was found, kvm_mmu_reload()
4626 * will establish a valid root prior to the next VM-Enter.
4632 * It's possible that the cached previous root page is obsolete because
4635 * which will free the root set here and allocate a new one.
4653 * If this is a direct root page, it doesn't have a write flooding
4657 struct kvm_mmu_page *sp = root_to_sp(vcpu->arch.mmu->root.hpa);
5476 * Flush any TLB entries for the new root, the provenance of the root
5478 * for a freed root, in theory another hypervisor could have left
5480 * flush when freeing a root (see kvm_tdp_mmu_put_root()).
5492 WARN_ON_ONCE(VALID_PAGE(vcpu->arch.root_mmu.root.hpa));
5494 WARN_ON_ONCE(VALID_PAGE(vcpu->arch.guest_mmu.root.hpa));
5513 * (or any shadow paging flavor with a dummy root, see note below)
5530 if (is_obsolete_root(kvm, mmu->root.hpa))
5713 if (WARN_ON_ONCE(!VALID_PAGE(vcpu->arch.mmu->root.hpa)))
5825 __kvm_mmu_invalidate_addr(vcpu, mmu, addr, mmu->root.hpa);
5971 mmu->root.hpa = INVALID_PAGE;
5972 mmu->root.pgd = 0;
5981 * When using PAE paging, the four PDPTEs are treated as 'root' pages,
6076 * loaded a new root, i.e. the shadow pages being zapped cannot
6098 * KVM performs a local TLB flush when allocating a new root (see
6158 * Deferring the zap until the final reference to the root is put would