Lines Matching refs:kvm_mmu

3157 void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
3819 struct kvm_mmu *context)
3842 * If a matching root was found, it is assigned to kvm_mmu->root_hpa and true is
3844 * Otherwise, the LRU root from the cache is assigned to kvm_mmu->root_hpa and
3852 struct kvm_mmu *mmu = vcpu->arch.mmu;
3876 struct kvm_mmu *mmu = vcpu->arch.mmu;
3959 static inline bool is_last_gpte(struct kvm_mmu *mmu,
4083 struct kvm_mmu *context)
4131 struct kvm_mmu *context, bool execonly)
4143 reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
4192 struct kvm_mmu *context)
4225 struct kvm_mmu *context, bool execonly)
4242 struct kvm_mmu *mmu, bool ept)
4337 static void update_pkru_bitmask(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
4388 static void update_last_nonleaf_level(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
4398 struct kvm_mmu *context,
4419 struct kvm_mmu *context)
4428 struct kvm_mmu *context)
4447 struct kvm_mmu *context)
4513 struct kvm_mmu *context = &vcpu->arch.root_mmu;
4590 static void shadow_mmu_init_context(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
4609 struct kvm_mmu *context = &vcpu->arch.root_mmu;
4632 struct kvm_mmu *context = &vcpu->arch.guest_mmu;
4681 struct kvm_mmu *context = &vcpu->arch.guest_mmu;
4714 struct kvm_mmu *context = &vcpu->arch.root_mmu;
4753 struct kvm_mmu *g_context = &vcpu->arch.nested_mmu;
5126 void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
5176 struct kvm_mmu *mmu = vcpu->arch.mmu;
5302 static void free_mmu_pages(struct kvm_mmu *mmu)
5308 static int __kvm_mmu_create(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)