Lines Matching refs:arch
280 return vcpu->arch.efer & EFER_NX;
661 r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache,
665 r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_shadow_page_cache,
670 r = kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_gfn_array_cache,
675 return kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
681 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache);
682 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_shadow_page_cache);
683 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_gfn_array_cache);
684 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
689 return kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_list_desc_cache);
730 return &slot->arch.lpage_info[level - 2][idx];
762 kvm->arch.indirect_shadow_pages++;
782 &kvm->arch.lpage_disallowed_mmu_pages);
792 kvm->arch.indirect_shadow_pages--;
943 return &slot->arch.rmap[level - PG_LEVEL_4K][idx];
961 mc = &vcpu->arch.mmu_pte_list_desc_cache;
1224 if (kvm->arch.tdp_mmu_enabled)
1253 if (kvm->arch.tdp_mmu_enabled)
1300 if (kvm->arch.tdp_mmu_enabled)
1512 if (kvm->arch.tdp_mmu_enabled)
1524 if (kvm->arch.tdp_mmu_enabled)
1579 if (kvm->arch.tdp_mmu_enabled)
1590 if (kvm->arch.tdp_mmu_enabled)
1614 * kvm->arch.n_used_mmu_pages values. We need a global,
1620 kvm->arch.n_used_mmu_pages += nr;
1666 sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
1667 sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
1669 sp->gfns = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_gfn_array_cache);
1677 sp->mmu_valid_gen = vcpu->kvm->arch.mmu_valid_gen;
1678 list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
1819 &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)]) \
1832 vcpu->arch.mmu->sync_page(vcpu, sp) == 0) {
1875 unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen);
2027 bool direct_mmu = vcpu->arch.mmu->direct_map;
2037 role = vcpu->arch.mmu->mmu_role.base;
2043 if (!direct_mmu && vcpu->arch.mmu->root_level <= PT32_ROOT_LEVEL) {
2049 sp_list = &vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)];
2121 iterator->level = vcpu->arch.mmu->shadow_root_level;
2124 vcpu->arch.mmu->root_level < PT64_ROOT_4LEVEL &&
2125 !vcpu->arch.mmu->direct_map)
2133 BUG_ON(root != vcpu->arch.mmu->root_hpa);
2136 = vcpu->arch.mmu->pae_root[(addr >> 30) & 3];
2147 shadow_walk_init_using_root(iterator, vcpu, vcpu->arch.mmu->root_hpa,
2397 if (list_empty(&kvm->arch.active_mmu_pages))
2401 list_for_each_entry_safe_reverse(sp, tmp, &kvm->arch.active_mmu_pages, link) {
2427 if (kvm->arch.n_max_mmu_pages > kvm->arch.n_used_mmu_pages)
2428 return kvm->arch.n_max_mmu_pages -
2429 kvm->arch.n_used_mmu_pages;
2456 if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) {
2457 kvm_mmu_zap_oldest_mmu_pages(kvm, kvm->arch.n_used_mmu_pages -
2460 goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages;
2463 kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages;
2864 if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa)))
3237 u8 shadow_root_level = vcpu->arch.mmu->shadow_root_level;
3241 if (vcpu->kvm->arch.tdp_mmu_enabled) {
3246 vcpu->arch.mmu->root_hpa = root;
3253 vcpu->arch.mmu->root_hpa = root;
3256 MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu->pae_root[i]));
3262 vcpu->arch.mmu->pae_root[i] = root | PT_PRESENT_MASK;
3264 vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root);
3269 vcpu->arch.mmu->root_pgd = 0;
3281 root_pgd = vcpu->arch.mmu->get_guest_pgd(vcpu);
3291 if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) {
3292 MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu->root_hpa));
3295 vcpu->arch.mmu->shadow_root_level, false);
3298 vcpu->arch.mmu->root_hpa = root;
3308 if (vcpu->arch.mmu->shadow_root_level == PT64_ROOT_4LEVEL) {
3316 if (!vcpu->arch.mmu->pae_root) {
3319 vcpu->arch.mmu->pae_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
3320 if (!vcpu->arch.mmu->pae_root)
3326 MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu->pae_root[i]));
3327 if (vcpu->arch.mmu->root_level == PT32E_ROOT_LEVEL) {
3328 pdptr = vcpu->arch.mmu->get_pdptr(vcpu, i);
3330 vcpu->arch.mmu->pae_root[i] = 0;
3342 vcpu->arch.mmu->pae_root[i] = root | pm_mask;
3344 vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root);
3353 if (vcpu->arch.mmu->shadow_root_level == PT64_ROOT_4LEVEL) {
3354 if (vcpu->arch.mmu->lm_root == NULL) {
3361 lm_root[0] = __pa(vcpu->arch.mmu->pae_root) | pm_mask;
3363 vcpu->arch.mmu->lm_root = lm_root;
3366 vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->lm_root);
3370 vcpu->arch.mmu->root_pgd = root_pgd;
3377 if (vcpu->arch.mmu->direct_map)
3388 if (vcpu->arch.mmu->direct_map)
3391 if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
3396 if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) {
3397 hpa_t root = vcpu->arch.mmu->root_hpa;
3428 hpa_t root = vcpu->arch.mmu->pae_root[i];
3456 return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access, exception);
3525 if (!VALID_PAGE(vcpu->arch.mmu->root_hpa)) {
3530 if (is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa))
3540 rsvd_check = &vcpu->arch.mmu->shadow_zero_check;
3639 u32 id = vcpu->arch.apf.id;
3642 vcpu->arch.apf.id = 1;
3644 return (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id;
3650 struct kvm_arch_async_pf arch;
3652 arch.token = alloc_apf_token(vcpu);
3653 arch.gfn = gfn;
3654 arch.direct_map = vcpu->arch.mmu->direct_map;
3655 arch.cr3 = vcpu->arch.mmu->get_guest_pgd(vcpu);
3658 kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
3716 if (!is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa)) {
3743 if (is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa))
3770 u32 flags = vcpu->arch.apf.host_apf_flags;
3778 vcpu->arch.l1tf_flush_l1d = true;
3787 vcpu->arch.apf.host_apf_flags = 0;
3852 struct kvm_mmu *mmu = vcpu->arch.mmu;
3876 struct kvm_mmu *mmu = vcpu->arch.mmu;
3895 kvm_mmu_free_roots(vcpu, vcpu->arch.mmu, KVM_MMU_ROOT_CURRENT);
3926 to_shadow_page(vcpu->arch.mmu->root_hpa));
4513 struct kvm_mmu *context = &vcpu->arch.root_mmu;
4609 struct kvm_mmu *context = &vcpu->arch.root_mmu;
4632 struct kvm_mmu *context = &vcpu->arch.guest_mmu;
4656 role.base.smm = vcpu->arch.root_mmu.mmu_role.base.smm;
4681 struct kvm_mmu *context = &vcpu->arch.guest_mmu;
4714 struct kvm_mmu *context = &vcpu->arch.root_mmu;
4719 vcpu->arch.efer);
4753 struct kvm_mmu *g_context = &vcpu->arch.nested_mmu;
4770 * Note that arch.mmu->gva_to_gpa translates l2_gpa to l1_gpa using
4772 * of l2_gva to l1_gpa is done by arch.nested_mmu.gva_to_gpa using
4809 vcpu->arch.mmu->root_hpa = INVALID_PAGE;
4812 vcpu->arch.mmu->prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
4848 r = mmu_topup_memory_caches(vcpu, !vcpu->arch.mmu->direct_map);
4864 kvm_mmu_free_roots(vcpu, &vcpu->arch.root_mmu, KVM_MMU_ROOTS_ALL);
4865 WARN_ON(VALID_PAGE(vcpu->arch.root_mmu.root_hpa));
4866 kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
4867 WARN_ON(VALID_PAGE(vcpu->arch.guest_mmu.root_hpa));
5001 if (!READ_ONCE(vcpu->kvm->arch.indirect_shadow_pages))
5055 if (vcpu->arch.mmu->direct_map)
5070 bool direct = vcpu->arch.mmu->direct_map;
5072 if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa)))
5101 if (vcpu->arch.mmu->direct_map &&
5108 * vcpu->arch.mmu.page_fault returned RET_PF_EMULATE, but we can still
5131 /* It's actually a GPA for vcpu->arch.guest_mmu. */
5132 if (mmu != &vcpu->arch.guest_mmu) {
5168 kvm_mmu_invalidate_gva(vcpu, vcpu->arch.walk_mmu, gva, INVALID_PAGE);
5176 struct kvm_mmu *mmu = vcpu->arch.mmu;
5348 vcpu->arch.mmu_pte_list_desc_cache.kmem_cache = pte_list_desc_cache;
5349 vcpu->arch.mmu_pte_list_desc_cache.gfp_zero = __GFP_ZERO;
5351 vcpu->arch.mmu_page_header_cache.kmem_cache = mmu_page_header_cache;
5352 vcpu->arch.mmu_page_header_cache.gfp_zero = __GFP_ZERO;
5354 vcpu->arch.mmu_shadow_page_cache.gfp_zero = __GFP_ZERO;
5356 vcpu->arch.mmu = &vcpu->arch.root_mmu;
5357 vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
5359 vcpu->arch.nested_mmu.translate_gpa = translate_nested_gpa;
5361 ret = __kvm_mmu_create(vcpu, &vcpu->arch.guest_mmu);
5365 ret = __kvm_mmu_create(vcpu, &vcpu->arch.root_mmu);
5371 free_mmu_pages(&vcpu->arch.guest_mmu);
5384 &kvm->arch.active_mmu_pages, link) {
5413 &kvm->arch.zapped_obsolete_pages, &nr_zapped);
5425 kvm_mmu_commit_zap_page(kvm, &kvm->arch.zapped_obsolete_pages);
5451 kvm->arch.mmu_valid_gen = kvm->arch.mmu_valid_gen ? 0 : 1;
5465 if (kvm->arch.tdp_mmu_enabled)
5473 return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages));
5485 struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
5496 struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
5528 if (kvm->arch.tdp_mmu_enabled) {
5552 if (kvm->arch.tdp_mmu_enabled)
5618 if (kvm->arch.tdp_mmu_enabled)
5645 if (kvm->arch.tdp_mmu_enabled)
5668 if (kvm->arch.tdp_mmu_enabled)
5684 if (kvm->arch.tdp_mmu_enabled)
5701 list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) {
5712 if (kvm->arch.tdp_mmu_enabled)
5770 if (!kvm->arch.n_used_mmu_pages &&
5779 &kvm->arch.zapped_obsolete_pages);
5877 wake_up_process(kvm->arch.nx_lpage_recovery_thread);
5971 free_mmu_pages(&vcpu->arch.root_mmu);
5972 free_mmu_pages(&vcpu->arch.guest_mmu);
6001 wake_up_process(kvm->arch.nx_lpage_recovery_thread);
6024 if (list_empty(&kvm->arch.lpage_disallowed_mmu_pages))
6032 sp = list_first_entry(&kvm->arch.lpage_disallowed_mmu_pages,
6093 &kvm->arch.nx_lpage_recovery_thread);
6095 kthread_unpark(kvm->arch.nx_lpage_recovery_thread);
6102 if (kvm->arch.nx_lpage_recovery_thread)
6103 kthread_stop(kvm->arch.nx_lpage_recovery_thread);