Lines Matching refs:arch

252 		.efer = vcpu->arch.efer,
646 return tdp_mmu_enabled && vcpu->arch.mmu->root_role.direct;
688 r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache,
692 r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_shadow_page_cache,
697 r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_shadowed_info_cache,
702 return kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
708 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache);
709 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_shadow_page_cache);
710 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_shadowed_info_cache);
711 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
795 return &slot->arch.lpage_info[level - 2][idx];
827 kvm->arch.indirect_shadow_pages++;
857 &kvm->arch.possible_nx_huge_pages);
875 kvm->arch.indirect_shadow_pages--;
1086 return &slot->arch.rmap[level - PG_LEVEL_4K][idx];
1662 struct kvm_mmu_memory_cache *cache = &vcpu->arch.mmu_pte_list_desc_cache;
1709 * kvm->arch.n_used_mmu_pages values. We need a global,
1715 kvm->arch.n_used_mmu_pages += nr;
1909 &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)]) \
1914 union kvm_mmu_page_role root_role = vcpu->arch.mmu->root_role;
1938 if (WARN_ON_ONCE(sp->role.direct || !vcpu->arch.mmu->sync_spte ||
1950 return vcpu->arch.mmu->sync_spte(vcpu, sp, i);
2012 unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen);
2252 sp->mmu_valid_gen = kvm->arch.mmu_valid_gen;
2253 list_add(&sp->link, &kvm->arch.active_mmu_pages);
2276 sp_list = &kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)];
2293 .page_header_cache = &vcpu->arch.mmu_page_header_cache,
2294 .shadow_page_cache = &vcpu->arch.mmu_shadow_page_cache,
2295 .shadowed_info_cache = &vcpu->arch.mmu_shadowed_info_cache,
2366 iterator->level = vcpu->arch.mmu->root_role.level;
2369 vcpu->arch.mmu->cpu_role.base.level < PT64_ROOT_4LEVEL &&
2370 !vcpu->arch.mmu->root_role.direct)
2378 BUG_ON(root != vcpu->arch.mmu->root.hpa);
2381 = vcpu->arch.mmu->pae_root[(addr >> 30) & 3];
2392 shadow_walk_init_using_root(iterator, vcpu, vcpu->arch.mmu->root.hpa,
2461 __link_shadow_page(vcpu->kvm, &vcpu->arch.mmu_pte_list_desc_cache, sptep, sp, true);
2671 if (list_empty(&kvm->arch.active_mmu_pages))
2675 list_for_each_entry_safe_reverse(sp, tmp, &kvm->arch.active_mmu_pages, link) {
2701 if (kvm->arch.n_max_mmu_pages > kvm->arch.n_used_mmu_pages)
2702 return kvm->arch.n_max_mmu_pages -
2703 kvm->arch.n_used_mmu_pages;
2739 if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) {
2740 kvm_mmu_zap_oldest_mmu_pages(kvm, kvm->arch.n_used_mmu_pages -
2743 goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages;
2746 kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages;
2774 if (vcpu->arch.mmu->root_role.direct)
2838 spin_lock(&kvm->arch.mmu_unsync_pages_lock);
2856 spin_unlock(&kvm->arch.mmu_unsync_pages_lock);
3645 union kvm_mmu_page_role role = vcpu->arch.mmu->root_role;
3662 struct kvm_mmu *mmu = vcpu->arch.mmu;
3761 smp_store_release(&kvm->arch.shadow_root_allocated, true);
3770 struct kvm_mmu *mmu = vcpu->arch.mmu;
3888 struct kvm_mmu *mmu = vcpu->arch.mmu;
3998 if (vcpu->arch.mmu->root_role.direct)
4001 if (!VALID_PAGE(vcpu->arch.mmu->root.hpa))
4006 if (vcpu->arch.mmu->cpu_role.base.level >= PT64_ROOT_4LEVEL) {
4007 hpa_t root = vcpu->arch.mmu->root.hpa;
4023 hpa_t root = vcpu->arch.mmu->pae_root[i];
4040 if (is_unsync_root(vcpu->arch.mmu->prev_roots[i].hpa))
4044 kvm_mmu_free_roots(vcpu->kvm, vcpu->arch.mmu, roots_to_free);
4129 rsvd_check = &vcpu->arch.mmu->shadow_zero_check;
4213 u32 id = vcpu->arch.apf.id;
4216 vcpu->arch.apf.id = 1;
4218 return (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id;
4224 struct kvm_arch_async_pf arch;
4226 arch.token = alloc_apf_token(vcpu);
4227 arch.gfn = gfn;
4228 arch.direct_map = vcpu->arch.mmu->root_role.direct;
4229 arch.cr3 = kvm_mmu_get_guest_pgd(vcpu, vcpu->arch.mmu);
4232 kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
4239 if ((vcpu->arch.mmu->root_role.direct != work->arch.direct_map) ||
4247 if (!vcpu->arch.mmu->root_role.direct &&
4248 work->arch.cr3 != kvm_mmu_get_guest_pgd(vcpu, vcpu->arch.mmu))
4343 struct kvm_mmu_page *sp = root_to_sp(vcpu->arch.mmu->root.hpa);
4369 if (WARN_ON_ONCE(kvm_mmu_is_dummy_root(vcpu->arch.mmu->root.hpa)))
4417 u32 flags = vcpu->arch.apf.host_apf_flags;
4425 vcpu->arch.l1tf_flush_l1d = true;
4434 vcpu->arch.apf.host_apf_flags = 0;
4621 struct kvm_mmu *mmu = vcpu->arch.mmu;
4657 struct kvm_mmu_page *sp = root_to_sp(vcpu->arch.mmu->root.hpa);
4787 vcpu->arch.reserved_gpa_bits,
4836 vcpu->arch.reserved_gpa_bits, execonly,
5200 struct kvm_mmu *context = &vcpu->arch.root_mmu;
5251 struct kvm_mmu *context = &vcpu->arch.root_mmu;
5276 struct kvm_mmu *context = &vcpu->arch.guest_mmu;
5328 struct kvm_mmu *context = &vcpu->arch.guest_mmu;
5356 struct kvm_mmu *context = &vcpu->arch.root_mmu;
5368 struct kvm_mmu *g_context = &vcpu->arch.nested_mmu;
5385 * Note that arch.mmu->gva_to_gpa translates l2_gpa to l1_gpa using
5387 * of l2_gva to l1_gpa is done by arch.nested_mmu.gva_to_gpa using
5432 vcpu->arch.root_mmu.root_role.word = 0;
5433 vcpu->arch.guest_mmu.root_role.word = 0;
5434 vcpu->arch.nested_mmu.root_role.word = 0;
5435 vcpu->arch.root_mmu.cpu_role.ext.valid = 0;
5436 vcpu->arch.guest_mmu.cpu_role.ext.valid = 0;
5437 vcpu->arch.nested_mmu.cpu_role.ext.valid = 0;
5458 r = mmu_topup_memory_caches(vcpu, !vcpu->arch.mmu->root_role.direct);
5464 if (vcpu->arch.mmu->root_role.direct)
5491 kvm_mmu_free_roots(kvm, &vcpu->arch.root_mmu, KVM_MMU_ROOTS_ALL);
5492 WARN_ON_ONCE(VALID_PAGE(vcpu->arch.root_mmu.root.hpa));
5493 kvm_mmu_free_roots(kvm, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
5494 WARN_ON_ONCE(VALID_PAGE(vcpu->arch.guest_mmu.root.hpa));
5544 __kvm_mmu_free_obsolete_roots(vcpu->kvm, &vcpu->arch.root_mmu);
5545 __kvm_mmu_free_obsolete_roots(vcpu->kvm, &vcpu->arch.guest_mmu);
5661 if (!READ_ONCE(vcpu->kvm->arch.indirect_shadow_pages))
5700 bool direct = vcpu->arch.mmu->root_role.direct;
5713 if (WARN_ON_ONCE(!VALID_PAGE(vcpu->arch.mmu->root.hpa)))
5743 if (vcpu->arch.mmu->root_role.direct &&
5750 * vcpu->arch.mmu.page_fault returned RET_PF_EMULATE, but we can still
5780 if (WARN_ON_ONCE(mmu != vcpu->arch.mmu))
5812 /* It's actually a GPA for vcpu->arch.guest_mmu. */
5813 if (mmu != &vcpu->arch.guest_mmu) {
5846 kvm_mmu_invalidate_addr(vcpu, vcpu->arch.walk_mmu, gva, KVM_MMU_ROOTS_ALL);
5854 struct kvm_mmu *mmu = vcpu->arch.mmu;
5976 /* vcpu->arch.guest_mmu isn't used when !tdp_enabled. */
5977 if (!tdp_enabled && mmu == &vcpu->arch.guest_mmu)
6023 vcpu->arch.mmu_pte_list_desc_cache.kmem_cache = pte_list_desc_cache;
6024 vcpu->arch.mmu_pte_list_desc_cache.gfp_zero = __GFP_ZERO;
6026 vcpu->arch.mmu_page_header_cache.kmem_cache = mmu_page_header_cache;
6027 vcpu->arch.mmu_page_header_cache.gfp_zero = __GFP_ZERO;
6029 vcpu->arch.mmu_shadow_page_cache.gfp_zero = __GFP_ZERO;
6031 vcpu->arch.mmu = &vcpu->arch.root_mmu;
6032 vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
6034 ret = __kvm_mmu_create(vcpu, &vcpu->arch.guest_mmu);
6038 ret = __kvm_mmu_create(vcpu, &vcpu->arch.root_mmu);
6044 free_mmu_pages(&vcpu->arch.guest_mmu);
6057 &kvm->arch.active_mmu_pages, link) {
6086 &kvm->arch.zapped_obsolete_pages, &nr_zapped);
6102 kvm_mmu_commit_zap_page(kvm, &kvm->arch.zapped_obsolete_pages);
6128 kvm->arch.mmu_valid_gen = kvm->arch.mmu_valid_gen ? 0 : 1;
6167 return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages));
6172 INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
6173 INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages);
6174 INIT_LIST_HEAD(&kvm->arch.possible_nx_huge_pages);
6175 spin_lock_init(&kvm->arch.mmu_unsync_pages_lock);
6180 kvm->arch.split_page_header_cache.kmem_cache = mmu_page_header_cache;
6181 kvm->arch.split_page_header_cache.gfp_zero = __GFP_ZERO;
6183 kvm->arch.split_shadow_page_cache.gfp_zero = __GFP_ZERO;
6185 kvm->arch.split_desc_cache.kmem_cache = pte_list_desc_cache;
6186 kvm->arch.split_desc_cache.gfp_zero = __GFP_ZERO;
6191 kvm_mmu_free_memory_cache(&kvm->arch.split_desc_cache);
6192 kvm_mmu_free_memory_cache(&kvm->arch.split_page_header_cache);
6193 kvm_mmu_free_memory_cache(&kvm->arch.split_shadow_page_cache);
6303 return need_topup(&kvm->arch.split_desc_cache, SPLIT_DESC_CACHE_MIN_NR_OBJECTS) ||
6304 need_topup(&kvm->arch.split_page_header_cache, 1) ||
6305 need_topup(&kvm->arch.split_shadow_page_cache, 1);
6329 r = __kvm_mmu_topup_memory_cache(&kvm->arch.split_desc_cache, capacity,
6334 r = kvm_mmu_topup_memory_cache(&kvm->arch.split_page_header_cache, 1);
6338 return kvm_mmu_topup_memory_cache(&kvm->arch.split_shadow_page_cache, 1);
6361 caches.page_header_cache = &kvm->arch.split_page_header_cache;
6362 caches.shadow_page_cache = &kvm->arch.split_shadow_page_cache;
6373 struct kvm_mmu_memory_cache *cache = &kvm->arch.split_desc_cache;
6670 list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) {
6750 if (!kvm->arch.n_used_mmu_pages &&
6759 &kvm->arch.zapped_obsolete_pages);
6860 wake_up_process(kvm->arch.nx_huge_page_recovery_thread);
6941 free_mmu_pages(&vcpu->arch.root_mmu);
6942 free_mmu_pages(&vcpu->arch.guest_mmu);
7002 wake_up_process(kvm->arch.nx_huge_page_recovery_thread);
7034 if (list_empty(&kvm->arch.possible_nx_huge_pages))
7044 sp = list_first_entry(&kvm->arch.possible_nx_huge_pages,
7150 &kvm->arch.nx_huge_page_recovery_thread);
7152 kthread_unpark(kvm->arch.nx_huge_page_recovery_thread);
7159 if (kvm->arch.nx_huge_page_recovery_thread)
7160 kthread_stop(kvm->arch.nx_huge_page_recovery_thread);