Lines Matching defs:context
3819 struct kvm_mmu *context)
3821 context->page_fault = nonpaging_page_fault;
3822 context->gva_to_gpa = nonpaging_gva_to_gpa;
3823 context->sync_page = nonpaging_sync_page;
3824 context->invlpg = NULL;
3825 context->root_level = 0;
3826 context->shadow_root_level = PT32E_ROOT_LEVEL;
3827 context->direct_map = true;
3828 context->nx = false;
4083 struct kvm_mmu *context)
4085 __reset_rsvds_bits_mask(vcpu, &context->guest_rsvd_check,
4086 cpuid_maxphyaddr(vcpu), context->root_level,
4087 context->nx,
4131 struct kvm_mmu *context, bool execonly)
4133 __reset_rsvds_bits_mask_ept(&context->guest_rsvd_check,
4143 reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
4153 bool uses_nx = context->nx || !tdp_enabled ||
4154 context->mmu_role.base.smep_andnot_wp;
4162 shadow_zero_check = &context->shadow_zero_check;
4165 context->shadow_root_level, uses_nx,
4172 for (i = context->shadow_root_level; --i >= 0;) {
4192 struct kvm_mmu *context)
4197 shadow_zero_check = &context->shadow_zero_check;
4202 context->shadow_root_level, false,
4213 for (i = context->shadow_root_level; --i >= 0;) {
4225 struct kvm_mmu *context, bool execonly)
4227 __reset_rsvds_bits_mask_ept(&context->shadow_zero_check,
4398 struct kvm_mmu *context,
4401 context->nx = is_nx(vcpu);
4402 context->root_level = level;
4404 reset_rsvds_bits_mask(vcpu, context);
4405 update_permission_bitmask(vcpu, context, false);
4406 update_pkru_bitmask(vcpu, context, false);
4407 update_last_nonleaf_level(vcpu, context);
4410 context->page_fault = paging64_page_fault;
4411 context->gva_to_gpa = paging64_gva_to_gpa;
4412 context->sync_page = paging64_sync_page;
4413 context->invlpg = paging64_invlpg;
4414 context->shadow_root_level = level;
4415 context->direct_map = false;
4419 struct kvm_mmu *context)
4424 paging64_init_context_common(vcpu, context, root_level);
4428 struct kvm_mmu *context)
4430 context->nx = false;
4431 context->root_level = PT32_ROOT_LEVEL;
4433 reset_rsvds_bits_mask(vcpu, context);
4434 update_permission_bitmask(vcpu, context, false);
4435 update_pkru_bitmask(vcpu, context, false);
4436 update_last_nonleaf_level(vcpu, context);
4438 context->page_fault = paging32_page_fault;
4439 context->gva_to_gpa = paging32_gva_to_gpa;
4440 context->sync_page = paging32_sync_page;
4441 context->invlpg = paging32_invlpg;
4442 context->shadow_root_level = PT32E_ROOT_LEVEL;
4443 context->direct_map = false;
4447 struct kvm_mmu *context)
4449 paging64_init_context_common(vcpu, context, PT32E_ROOT_LEVEL);
4513 struct kvm_mmu *context = &vcpu->arch.root_mmu;
4517 if (new_role.as_u64 == context->mmu_role.as_u64)
4520 context->mmu_role.as_u64 = new_role.as_u64;
4521 context->page_fault = kvm_tdp_page_fault;
4522 context->sync_page = nonpaging_sync_page;
4523 context->invlpg = NULL;
4524 context->shadow_root_level = kvm_mmu_get_tdp_level(vcpu);
4525 context->direct_map = true;
4526 context->get_guest_pgd = get_cr3;
4527 context->get_pdptr = kvm_pdptr_read;
4528 context->inject_page_fault = kvm_inject_page_fault;
4531 context->nx = false;
4532 context->gva_to_gpa = nonpaging_gva_to_gpa;
4533 context->root_level = 0;
4535 context->nx = is_nx(vcpu);
4536 context->root_level = is_la57_mode(vcpu) ?
4538 reset_rsvds_bits_mask(vcpu, context);
4539 context->gva_to_gpa = paging64_gva_to_gpa;
4541 context->nx = is_nx(vcpu);
4542 context->root_level = PT32E_ROOT_LEVEL;
4543 reset_rsvds_bits_mask(vcpu, context);
4544 context->gva_to_gpa = paging64_gva_to_gpa;
4546 context->nx = false;
4547 context->root_level = PT32_ROOT_LEVEL;
4548 reset_rsvds_bits_mask(vcpu, context);
4549 context->gva_to_gpa = paging32_gva_to_gpa;
4552 update_permission_bitmask(vcpu, context, false);
4553 update_pkru_bitmask(vcpu, context, false);
4554 update_last_nonleaf_level(vcpu, context);
4555 reset_tdp_shadow_zero_bits_mask(vcpu, context);
4590 static void shadow_mmu_init_context(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
4595 nonpaging_init_context(vcpu, context);
4597 paging64_init_context(vcpu, context);
4599 paging32E_init_context(vcpu, context);
4601 paging32_init_context(vcpu, context);
4603 context->mmu_role.as_u64 = new_role.as_u64;
4604 reset_shadow_zero_bits_mask(vcpu, context);
4609 struct kvm_mmu *context = &vcpu->arch.root_mmu;
4613 if (new_role.as_u64 != context->mmu_role.as_u64)
4614 shadow_mmu_init_context(vcpu, context, cr0, cr4, efer, new_role);
4632 struct kvm_mmu *context = &vcpu->arch.guest_mmu;
4637 if (new_role.as_u64 != context->mmu_role.as_u64) {
4638 shadow_mmu_init_context(vcpu, context, cr0, cr4, efer, new_role);
4644 context->shadow_root_level = new_role.base.level;
4681 struct kvm_mmu *context = &vcpu->arch.guest_mmu;
4689 if (new_role.as_u64 == context->mmu_role.as_u64)
4692 context->shadow_root_level = level;
4694 context->nx = true;
4695 context->ept_ad = accessed_dirty;
4696 context->page_fault = ept_page_fault;
4697 context->gva_to_gpa = ept_gva_to_gpa;
4698 context->sync_page = ept_sync_page;
4699 context->invlpg = ept_invlpg;
4700 context->root_level = level;
4701 context->direct_map = false;
4702 context->mmu_role.as_u64 = new_role.as_u64;
4704 update_permission_bitmask(vcpu, context, true);
4705 update_pkru_bitmask(vcpu, context, true);
4706 update_last_nonleaf_level(vcpu, context);
4707 reset_rsvds_bits_mask_ept(vcpu, context, execonly);
4708 reset_ept_shadow_zero_bits_mask(vcpu, context, execonly);
4714 struct kvm_mmu *context = &vcpu->arch.root_mmu;
4721 context->get_guest_pgd = get_cr3;
4722 context->get_pdptr = kvm_pdptr_read;
4723 context->inject_page_fault = kvm_inject_page_fault;