Lines Matching defs:FNAME

26 	#define FNAME(name) paging##64_##name
44 #define FNAME(name) paging##32_##name
58 #define FNAME(name) ept_##name
78 #define gpte_to_gfn_lvl FNAME(gpte_to_gfn_lvl)
105 static inline void FNAME(protect_clean_gpte)(struct kvm_mmu *mmu, unsigned *access,
123 static inline int FNAME(is_present_gpte)(unsigned long pte)
132 static bool FNAME(is_bad_mt_xwr)(struct rsvd_bits_validate *rsvd_check, u64 gpte)
141 static bool FNAME(is_rsvd_bits_set)(struct kvm_mmu *mmu, u64 gpte, int level)
144 FNAME(is_bad_mt_xwr)(&mmu->guest_rsvd_check, gpte);
147 static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
186 static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu,
190 if (!FNAME(is_present_gpte)(gpte))
198 if (FNAME(is_rsvd_bits_set)(vcpu->arch.mmu, gpte, PG_LEVEL_4K))
214 static inline unsigned FNAME(gpte_access)(u64 gpte)
232 static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
284 ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index, orig_pte, pte);
294 static inline unsigned FNAME(gpte_pkeys)(struct kvm_vcpu *vcpu, u64 gpte)
308 static int FNAME(walk_addr_generic)(struct guest_walker *walker,
341 if (!FNAME(is_present_gpte)(pte))
409 if (unlikely(!FNAME(is_present_gpte)(pte)))
412 if (unlikely(FNAME(is_rsvd_bits_set)(mmu, pte, walker->level))) {
420 walker->pt_access[walker->level - 1] = FNAME(gpte_access)(pt_access ^ walk_nx_mask);
423 pte_pkey = FNAME(gpte_pkeys)(vcpu, pte);
427 walker->pte_access = FNAME(gpte_access)(pte_access ^ walk_nx_mask);
445 FNAME(protect_clean_gpte)(mmu, &walker->pte_access, pte);
456 ret = FNAME(update_accessed_dirty_bits)(vcpu, mmu, walker,
510 static int FNAME(walk_addr)(struct guest_walker *walker,
513 return FNAME(walk_addr_generic)(walker, vcpu, vcpu->arch.mmu, addr,
518 static int FNAME(walk_addr_nested)(struct guest_walker *walker,
522 return FNAME(walk_addr_generic)(walker, vcpu, &vcpu->arch.nested_mmu,
528 FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
535 if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte))
541 pte_access = sp->role.access & FNAME(gpte_access)(gpte);
542 FNAME(protect_clean_gpte)(vcpu->arch.mmu, &pte_access, gpte);
559 static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
564 FNAME(prefetch_gpte)(vcpu, sp, spte, gpte, false);
567 static bool FNAME(gpte_changed)(struct kvm_vcpu *vcpu,
590 static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
616 if (!FNAME(prefetch_gpte)(vcpu, sp, spte, gptep[i], true))
626 static int FNAME(fetch)(struct kvm_vcpu *vcpu, gpa_t addr,
652 if (FNAME(gpte_changed)(vcpu, gw, top_level))
678 if (FNAME(gpte_changed)(vcpu, gw, it.level - 1))
723 FNAME(pte_prefetch)(vcpu, gw, it.sptep);
735 * It is the helper function of FNAME(page_fault). When guest uses large page
749 FNAME(is_self_change_mapping)(struct kvm_vcpu *vcpu,
785 static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code,
808 r = FNAME(walk_addr)(&walker, vcpu, addr, error_code);
832 is_self_change_mapping = FNAME(is_self_change_mapping)(vcpu,
879 r = FNAME(fetch)(vcpu, addr, &walker, error_code, max_level, pfn,
889 static gpa_t FNAME(get_level1_sp_gpa)(struct kvm_mmu_page *sp)
901 static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa)
936 pte_gpa = FNAME(get_level1_sp_gpa)(sp);
951 FNAME(update_pte)(vcpu, sp, sptep, &gpte);
961 static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gpa_t addr, u32 access,
968 r = FNAME(walk_addr)(&walker, vcpu, addr, access);
981 static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gpa_t vaddr,
994 r = FNAME(walk_addr_nested)(&walker, vcpu, vaddr, access);
1019 static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
1029 first_pte_gpa = FNAME(get_level1_sp_gpa)(sp);
1046 if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) {
1059 pte_access &= FNAME(gpte_access)(gpte);
1060 FNAME(protect_clean_gpte)(vcpu->arch.mmu, &pte_access, gpte);
1095 #undef FNAME