Lines Matching defs:context
129 * given GFN when used in the context of rmaps. Using a custom list allows KVM
1101 * Unlike rmap_add, rmap_remove does not run in the context of a vCPU
1102 * so we have to determine which memslots to use based on context
1918 * page using the current MMU context.
1934 * sync a shadow page for a different MMU context, e.g. if the role
4515 static void nonpaging_init_context(struct kvm_mmu *context)
4517 context->page_fault = nonpaging_page_fault;
4518 context->gva_to_gpa = nonpaging_gva_to_gpa;
4519 context->sync_spte = NULL;
4784 struct kvm_mmu *context)
4786 __reset_rsvds_bits_mask(&context->guest_rsvd_check,
4788 context->cpu_role.base.level, is_efer_nx(context),
4790 is_cr4_pse(context),
4833 struct kvm_mmu *context, bool execonly, int huge_page_level)
4835 __reset_rsvds_bits_mask_ept(&context->guest_rsvd_check,
4851 struct kvm_mmu *context)
4860 WARN_ON_ONCE(context->root_role.level < PT32E_ROOT_LEVEL);
4862 shadow_zero_check = &context->shadow_zero_check;
4864 context->root_role.level,
4865 context->root_role.efer_nx,
4872 for (i = context->root_role.level; --i >= 0;) {
4897 static void reset_tdp_shadow_zero_bits_mask(struct kvm_mmu *context)
4902 shadow_zero_check = &context->shadow_zero_check;
4906 context->root_role.level, true,
4917 for (i = context->root_role.level; --i >= 0;) {
4928 reset_ept_shadow_zero_bits_mask(struct kvm_mmu *context, bool execonly)
4930 __reset_rsvds_bits_mask_ept(&context->shadow_zero_check,
5096 static void paging64_init_context(struct kvm_mmu *context)
5098 context->page_fault = paging64_page_fault;
5099 context->gva_to_gpa = paging64_gva_to_gpa;
5100 context->sync_spte = paging64_sync_spte;
5103 static void paging32_init_context(struct kvm_mmu *context)
5105 context->page_fault = paging32_page_fault;
5106 context->gva_to_gpa = paging32_gva_to_gpa;
5107 context->sync_spte = paging32_sync_spte;
5200 struct kvm_mmu *context = &vcpu->arch.root_mmu;
5203 if (cpu_role.as_u64 == context->cpu_role.as_u64 &&
5204 root_role.word == context->root_role.word)
5207 context->cpu_role.as_u64 = cpu_role.as_u64;
5208 context->root_role.word = root_role.word;
5209 context->page_fault = kvm_tdp_page_fault;
5210 context->sync_spte = NULL;
5211 context->get_guest_pgd = get_guest_cr3;
5212 context->get_pdptr = kvm_pdptr_read;
5213 context->inject_page_fault = kvm_inject_page_fault;
5215 if (!is_cr0_pg(context))
5216 context->gva_to_gpa = nonpaging_gva_to_gpa;
5217 else if (is_cr4_pae(context))
5218 context->gva_to_gpa = paging64_gva_to_gpa;
5220 context->gva_to_gpa = paging32_gva_to_gpa;
5222 reset_guest_paging_metadata(vcpu, context);
5223 reset_tdp_shadow_zero_bits_mask(context);
5226 static void shadow_mmu_init_context(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
5230 if (cpu_role.as_u64 == context->cpu_role.as_u64 &&
5231 root_role.word == context->root_role.word)
5234 context->cpu_role.as_u64 = cpu_role.as_u64;
5235 context->root_role.word = root_role.word;
5237 if (!is_cr0_pg(context))
5238 nonpaging_init_context(context);
5239 else if (is_cr4_pae(context))
5240 paging64_init_context(context);
5242 paging32_init_context(context);
5244 reset_guest_paging_metadata(vcpu, context);
5245 reset_shadow_zero_bits_mask(vcpu, context);
5251 struct kvm_mmu *context = &vcpu->arch.root_mmu;
5270 shadow_mmu_init_context(vcpu, context, cpu_role, root_role);
5276 struct kvm_mmu *context = &vcpu->arch.guest_mmu;
5294 shadow_mmu_init_context(vcpu, context, cpu_role, root_role);
5328 struct kvm_mmu *context = &vcpu->arch.guest_mmu;
5334 if (new_mode.as_u64 != context->cpu_role.as_u64) {
5336 context->cpu_role.as_u64 = new_mode.as_u64;
5337 context->root_role.word = new_mode.base.word;
5339 context->page_fault = ept_page_fault;
5340 context->gva_to_gpa = ept_gva_to_gpa;
5341 context->sync_spte = ept_sync_spte;
5343 update_permission_bitmask(context, true);
5344 context->pkru_mask = 0;
5345 reset_rsvds_bits_mask_ept(vcpu, context, execonly, huge_page_level);
5346 reset_ept_shadow_zero_bits_mask(context, execonly);
5356 struct kvm_mmu *context = &vcpu->arch.root_mmu;
5360 context->get_guest_pgd = get_guest_cr3;
5361 context->get_pdptr = kvm_pdptr_read;
5362 context->inject_page_fault = kvm_inject_page_fault;
5777 * the context of the current MMU, and would need to be reworked if