Lines Matching defs:root

2116 					struct kvm_vcpu *vcpu, hpa_t root,
2120 iterator->shadow_addr = root;
2133 BUG_ON(root != vcpu->arch.mmu->root_hpa);
2333 * Remove the active root from the active page list, the root
2403 * Don't zap active root pages, the page itself can't be freed
3238 hpa_t root;
3242 root = kvm_tdp_mmu_get_vcpu_root_hpa(vcpu);
3244 if (!VALID_PAGE(root))
3246 vcpu->arch.mmu->root_hpa = root;
3248 root = mmu_alloc_root(vcpu, 0, 0, shadow_root_level,
3251 if (!VALID_PAGE(root))
3253 vcpu->arch.mmu->root_hpa = root;
3258 root = mmu_alloc_root(vcpu, i << (30 - PAGE_SHIFT),
3260 if (!VALID_PAGE(root))
3262 vcpu->arch.mmu->pae_root[i] = root | PT_PRESENT_MASK;
3278 hpa_t root;
3289 * write-protect the guests page table root.
3294 root = mmu_alloc_root(vcpu, root_gfn, 0,
3296 if (!VALID_PAGE(root))
3298 vcpu->arch.mmu->root_hpa = root;
3338 root = mmu_alloc_root(vcpu, root_gfn, i << 30,
3340 if (!VALID_PAGE(root))
3342 vcpu->arch.mmu->pae_root[i] = root | pm_mask;
3397 hpa_t root = vcpu->arch.mmu->root_hpa;
3398 sp = to_shadow_page(root);
3428 hpa_t root = vcpu->arch.mmu->pae_root[i];
3430 if (root && VALID_PAGE(root)) {
3431 root &= PT64_BASE_ADDR_MASK;
3432 sp = to_shadow_page(root);
3522 int root, leaf, level;
3531 leaf = kvm_tdp_mmu_get_walk(vcpu, addr, sptes, &root);
3533 leaf = get_walk(vcpu, addr, sptes, &root);
3542 for (level = root; level >= leaf; level--) {
3558 for (level = root; level >= leaf; level--)
3831 static inline bool is_root_usable(struct kvm_mmu_root_info *root, gpa_t pgd,
3834 return (role.direct || pgd == root->pgd) &&
3835 VALID_PAGE(root->hpa) && to_shadow_page(root->hpa) &&
3836 role.word == to_shadow_page(root->hpa)->role.word;
3840 * Find out if a previously cached root matching the new pgd/role is available.
3841 * The current root is also inserted into the cache.
3842 * If a matching root was found, it is assigned to kvm_mmu->root_hpa and true is
3844 * Otherwise, the LRU root from the cache is assigned to kvm_mmu->root_hpa and
3845 * false is returned. This root should now be freed by the caller.
3851 struct kvm_mmu_root_info root;
3854 root.pgd = mmu->root_pgd;
3855 root.hpa = mmu->root_hpa;
3857 if (is_root_usable(&root, new_pgd, new_role))
3861 swap(root, mmu->prev_roots[i]);
3863 if (is_root_usable(&root, new_pgd, new_role))
3867 mmu->root_hpa = root.hpa;
3868 mmu->root_pgd = root.pgd;
3900 * It's possible that the cached previous root page is obsolete because
3903 * free the root set here and allocate a new one.
3921 * If this is a direct root page, it doesn't have a write flooding
5320 * When using PAE paging, the four PDPTEs are treated as 'root' pages,
5403 * loaded a new root, i.e. the shadow pages being zapped cannot