Lines Matching refs:arch
17 INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots);
18 spin_lock_init(&kvm->arch.tdp_mmu_pages_lock);
43 WARN_ON(atomic64_read(&kvm->arch.tdp_mmu_pages));
44 WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots));
91 spin_lock(&kvm->arch.tdp_mmu_pages_lock);
93 spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
116 next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
120 next_root = list_first_or_null_rcu(&kvm->arch.tdp_mmu_roots,
128 next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
176 list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link) \
185 sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
186 sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
222 union kvm_mmu_page_role role = vcpu->arch.mmu->root_role;
250 spin_lock(&kvm->arch.tdp_mmu_pages_lock);
251 list_add_rcu(&root->link, &kvm->arch.tdp_mmu_roots);
252 spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
265 atomic64_inc(&kvm->arch.tdp_mmu_pages);
271 atomic64_dec(&kvm->arch.tdp_mmu_pages);
292 spin_lock(&kvm->arch.tdp_mmu_pages_lock);
300 spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
934 list_for_each_entry(root, &kvm->arch.tdp_mmu_roots, link) {
1043 struct kvm_mmu *mmu = vcpu->arch.mmu;
1101 spin_lock(&kvm->arch.tdp_mmu_pages_lock);
1104 spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
1669 * to query that info from slot->arch.lpage_info will cause an
1768 struct kvm_mmu *mmu = vcpu->arch.mmu;
1772 *root_level = vcpu->arch.mmu->root_role.level;
1797 struct kvm_mmu *mmu = vcpu->arch.mmu;