/kernel/linux/linux-6.6/arch/x86/kvm/mmu/ |
H A D | paging_tmpl.h | 96 static inline gfn_t pse36_gfn_delta(u32 gpte) in pse36_gfn_delta() argument 100 return (gpte & PT32_DIR_PSE36_MASK) << shift; in pse36_gfn_delta() 104 static gfn_t gpte_to_gfn_lvl(pt_element_t gpte, int lvl) in gpte_to_gfn_lvl() argument 106 return (gpte & PT_LVL_ADDR_MASK(lvl)) >> PAGE_SHIFT; in gpte_to_gfn_lvl() 110 unsigned gpte) in protect_clean_gpte() 122 mask |= (gpte >> (PT_GUEST_DIRTY_SHIFT - PT_WRITABLE_SHIFT)) & in protect_clean_gpte() 136 static bool FNAME(is_bad_mt_xwr)(struct rsvd_bits_validate *rsvd_check, u64 gpte) in is_bad_mt_xwr() argument 141 return __is_bad_mt_xwr(rsvd_check, gpte); in is_bad_mt_xwr() 145 static bool FNAME(is_rsvd_bits_set)(struct kvm_mmu *mmu, u64 gpte, int level) in is_rsvd_bits_set() argument 147 return __is_rsvd_bits_set(&mmu->guest_rsvd_check, gpte, leve in is_rsvd_bits_set() 109 protect_clean_gpte(struct kvm_mmu *mmu, unsigned *access, unsigned gpte) protect_clean_gpte() argument 151 prefetch_invalid_gpte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, u64 *spte, u64 gpte) prefetch_invalid_gpte() argument 179 gpte_access(u64 gpte) gpte_access() argument 259 gpte_pkeys(struct kvm_vcpu *vcpu, u64 gpte) gpte_pkeys() argument 270 is_last_gpte(struct kvm_mmu *mmu, unsigned int level, unsigned int gpte) is_last_gpte() argument 533 prefetch_gpte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, u64 *spte, pt_element_t gpte) prefetch_gpte() argument 910 pt_element_t gpte; sync_spte() local [all...] |
/kernel/linux/linux-5.10/arch/powerpc/kvm/ |
H A D | book3s_64_mmu.c | 192 struct kvmppc_pte *gpte, bool data, in kvmppc_mmu_book3s_64_xlate() 213 gpte->eaddr = eaddr; in kvmppc_mmu_book3s_64_xlate() 214 gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data); in kvmppc_mmu_book3s_64_xlate() 215 gpte->raddr = vcpu->arch.magic_page_pa | (gpte->raddr & 0xfff); in kvmppc_mmu_book3s_64_xlate() 216 gpte->raddr &= KVM_PAM; in kvmppc_mmu_book3s_64_xlate() 217 gpte->may_execute = true; in kvmppc_mmu_book3s_64_xlate() 218 gpte->may_read = true; in kvmppc_mmu_book3s_64_xlate() 219 gpte->may_write = true; in kvmppc_mmu_book3s_64_xlate() 220 gpte in kvmppc_mmu_book3s_64_xlate() 191 kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, struct kvmppc_pte *gpte, bool data, bool iswrite) kvmppc_mmu_book3s_64_xlate() argument [all...] |
H A D | book3s_64_mmu_radix.c | 131 struct kvmppc_pte *gpte, u64 root, in kvmppc_mmu_walk_radix_tree() 198 gpte->page_size = ps; in kvmppc_mmu_walk_radix_tree() 199 gpte->page_shift = offset; in kvmppc_mmu_walk_radix_tree() 201 gpte->eaddr = eaddr; in kvmppc_mmu_walk_radix_tree() 202 gpte->raddr = gpa; in kvmppc_mmu_walk_radix_tree() 205 gpte->may_read = !!(pte & _PAGE_READ); in kvmppc_mmu_walk_radix_tree() 206 gpte->may_write = !!(pte & _PAGE_WRITE); in kvmppc_mmu_walk_radix_tree() 207 gpte->may_execute = !!(pte & _PAGE_EXEC); in kvmppc_mmu_walk_radix_tree() 209 gpte->rc = pte & (_PAGE_ACCESSED | _PAGE_DIRTY); in kvmppc_mmu_walk_radix_tree() 226 struct kvmppc_pte *gpte, u6 in kvmppc_mmu_radix_translate_table() 130 kvmppc_mmu_walk_radix_tree(struct kvm_vcpu *vcpu, gva_t eaddr, struct kvmppc_pte *gpte, u64 root, u64 *pte_ret_p) kvmppc_mmu_walk_radix_tree() argument 225 kvmppc_mmu_radix_translate_table(struct kvm_vcpu *vcpu, gva_t eaddr, struct kvmppc_pte *gpte, u64 table, int table_index, u64 *pte_ret_p) kvmppc_mmu_radix_translate_table() argument 256 kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, struct kvmppc_pte *gpte, bool data, bool iswrite) kvmppc_mmu_radix_xlate() argument [all...] |
H A D | book3s_hv_nested.c | 1239 struct kvmppc_pte gpte, in kvmhv_handle_nested_set_rc() 1251 if (pgflags & ~gpte.rc) in kvmhv_handle_nested_set_rc() 1257 gpte.raddr, kvm->arch.lpid); in kvmhv_handle_nested_set_rc() 1307 struct kvmppc_pte gpte; in __kvmhv_nested_page_fault() local 1330 ret = kvmhv_translate_addr_nested(vcpu, gp, n_gpa, dsisr, &gpte); in __kvmhv_nested_page_fault() 1346 ret = kvmhv_handle_nested_set_rc(vcpu, gp, n_gpa, gpte, dsisr); in __kvmhv_nested_page_fault() 1363 l1_shift = gpte.page_shift; in __kvmhv_nested_page_fault() 1370 gpa = gpte.raddr; in __kvmhv_nested_page_fault() 1428 perm |= gpte.may_read ? 0UL : _PAGE_READ; in __kvmhv_nested_page_fault() 1429 perm |= gpte in __kvmhv_nested_page_fault() 1236 kvmhv_handle_nested_set_rc(struct kvm_vcpu *vcpu, struct kvm_nested_guest *gp, unsigned long n_gpa, struct kvmppc_pte gpte, unsigned long dsisr) kvmhv_handle_nested_set_rc() argument [all...] |
H A D | book3s_64_mmu_hv.c | 332 struct kvmppc_pte *gpte, bool data, bool iswrite) in kvmppc_mmu_book3s_64_hv_xlate() 344 return kvmppc_mmu_radix_xlate(vcpu, eaddr, gpte, data, iswrite); in kvmppc_mmu_book3s_64_hv_xlate() 374 gpte->eaddr = eaddr; in kvmppc_mmu_book3s_64_hv_xlate() 375 gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff); in kvmppc_mmu_book3s_64_hv_xlate() 383 gpte->may_read = hpte_read_permission(pp, key); in kvmppc_mmu_book3s_64_hv_xlate() 384 gpte->may_write = hpte_write_permission(pp, key); in kvmppc_mmu_book3s_64_hv_xlate() 385 gpte->may_execute = gpte->may_read && !(gr & (HPTE_R_N | HPTE_R_G)); in kvmppc_mmu_book3s_64_hv_xlate() 391 gpte->may_read = 0; in kvmppc_mmu_book3s_64_hv_xlate() 393 gpte in kvmppc_mmu_book3s_64_hv_xlate() 331 kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, struct kvmppc_pte *gpte, bool data, bool iswrite) kvmppc_mmu_book3s_64_hv_xlate() argument [all...] |
/kernel/linux/linux-6.6/arch/powerpc/kvm/ |
H A D | book3s_64_mmu.c | 192 struct kvmppc_pte *gpte, bool data, in kvmppc_mmu_book3s_64_xlate() 213 gpte->eaddr = eaddr; in kvmppc_mmu_book3s_64_xlate() 214 gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data); in kvmppc_mmu_book3s_64_xlate() 215 gpte->raddr = vcpu->arch.magic_page_pa | (gpte->raddr & 0xfff); in kvmppc_mmu_book3s_64_xlate() 216 gpte->raddr &= KVM_PAM; in kvmppc_mmu_book3s_64_xlate() 217 gpte->may_execute = true; in kvmppc_mmu_book3s_64_xlate() 218 gpte->may_read = true; in kvmppc_mmu_book3s_64_xlate() 219 gpte->may_write = true; in kvmppc_mmu_book3s_64_xlate() 220 gpte in kvmppc_mmu_book3s_64_xlate() 191 kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, struct kvmppc_pte *gpte, bool data, bool iswrite) kvmppc_mmu_book3s_64_xlate() argument [all...] |
H A D | book3s_64_mmu_radix.c | 138 struct kvmppc_pte *gpte, u64 root, in kvmppc_mmu_walk_radix_tree() 206 gpte->page_size = ps; in kvmppc_mmu_walk_radix_tree() 207 gpte->page_shift = offset; in kvmppc_mmu_walk_radix_tree() 209 gpte->eaddr = eaddr; in kvmppc_mmu_walk_radix_tree() 210 gpte->raddr = gpa; in kvmppc_mmu_walk_radix_tree() 213 gpte->may_read = !!(pte & _PAGE_READ); in kvmppc_mmu_walk_radix_tree() 214 gpte->may_write = !!(pte & _PAGE_WRITE); in kvmppc_mmu_walk_radix_tree() 215 gpte->may_execute = !!(pte & _PAGE_EXEC); in kvmppc_mmu_walk_radix_tree() 217 gpte->rc = pte & (_PAGE_ACCESSED | _PAGE_DIRTY); in kvmppc_mmu_walk_radix_tree() 234 struct kvmppc_pte *gpte, u6 in kvmppc_mmu_radix_translate_table() 137 kvmppc_mmu_walk_radix_tree(struct kvm_vcpu *vcpu, gva_t eaddr, struct kvmppc_pte *gpte, u64 root, u64 *pte_ret_p) kvmppc_mmu_walk_radix_tree() argument 233 kvmppc_mmu_radix_translate_table(struct kvm_vcpu *vcpu, gva_t eaddr, struct kvmppc_pte *gpte, u64 table, int table_index, u64 *pte_ret_p) kvmppc_mmu_radix_translate_table() argument 264 kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, struct kvmppc_pte *gpte, bool data, bool iswrite) kvmppc_mmu_radix_xlate() argument [all...] |
H A D | book3s_hv_nested.c | 1423 struct kvmppc_pte gpte, in kvmhv_handle_nested_set_rc() 1435 if (pgflags & ~gpte.rc) in kvmhv_handle_nested_set_rc() 1441 gpte.raddr, kvm->arch.lpid); in kvmhv_handle_nested_set_rc() 1491 struct kvmppc_pte gpte; in __kvmhv_nested_page_fault() local 1514 ret = kvmhv_translate_addr_nested(vcpu, gp, n_gpa, dsisr, &gpte); in __kvmhv_nested_page_fault() 1530 ret = kvmhv_handle_nested_set_rc(vcpu, gp, n_gpa, gpte, dsisr); in __kvmhv_nested_page_fault() 1547 l1_shift = gpte.page_shift; in __kvmhv_nested_page_fault() 1554 gpa = gpte.raddr; in __kvmhv_nested_page_fault() 1615 perm |= gpte.may_read ? 0UL : _PAGE_READ; in __kvmhv_nested_page_fault() 1616 perm |= gpte in __kvmhv_nested_page_fault() 1420 kvmhv_handle_nested_set_rc(struct kvm_vcpu *vcpu, struct kvm_nested_guest *gp, unsigned long n_gpa, struct kvmppc_pte gpte, unsigned long dsisr) kvmhv_handle_nested_set_rc() argument [all...] |
H A D | book3s_64_mmu_hv.c | 342 struct kvmppc_pte *gpte, bool data, bool iswrite) in kvmppc_mmu_book3s_64_hv_xlate() 354 return kvmppc_mmu_radix_xlate(vcpu, eaddr, gpte, data, iswrite); in kvmppc_mmu_book3s_64_hv_xlate() 384 gpte->eaddr = eaddr; in kvmppc_mmu_book3s_64_hv_xlate() 385 gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff); in kvmppc_mmu_book3s_64_hv_xlate() 393 gpte->may_read = hpte_read_permission(pp, key); in kvmppc_mmu_book3s_64_hv_xlate() 394 gpte->may_write = hpte_write_permission(pp, key); in kvmppc_mmu_book3s_64_hv_xlate() 395 gpte->may_execute = gpte->may_read && !(gr & (HPTE_R_N | HPTE_R_G)); in kvmppc_mmu_book3s_64_hv_xlate() 401 gpte->may_read = 0; in kvmppc_mmu_book3s_64_hv_xlate() 403 gpte in kvmppc_mmu_book3s_64_hv_xlate() 341 kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, struct kvmppc_pte *gpte, bool data, bool iswrite) kvmppc_mmu_book3s_64_hv_xlate() argument [all...] |
/kernel/linux/linux-5.10/arch/x86/kvm/mmu/ |
H A D | paging_tmpl.h | 100 static gfn_t gpte_to_gfn_lvl(pt_element_t gpte, int lvl) in gpte_to_gfn_lvl() argument 102 return (gpte & PT_LVL_ADDR_MASK(lvl)) >> PAGE_SHIFT; in gpte_to_gfn_lvl() 106 unsigned gpte) in protect_clean_gpte() 118 mask |= (gpte >> (PT_GUEST_DIRTY_SHIFT - PT_WRITABLE_SHIFT)) & in protect_clean_gpte() 132 static bool FNAME(is_bad_mt_xwr)(struct rsvd_bits_validate *rsvd_check, u64 gpte) in is_bad_mt_xwr() argument 137 return __is_bad_mt_xwr(rsvd_check, gpte); in is_bad_mt_xwr() 141 static bool FNAME(is_rsvd_bits_set)(struct kvm_mmu *mmu, u64 gpte, int level) in is_rsvd_bits_set() argument 143 return __is_rsvd_bits_set(&mmu->guest_rsvd_check, gpte, level) || in is_rsvd_bits_set() 144 FNAME(is_bad_mt_xwr)(&mmu->guest_rsvd_check, gpte); in is_rsvd_bits_set() 188 u64 gpte) in prefetch_invalid_gpte() 105 protect_clean_gpte(struct kvm_mmu *mmu, unsigned *access, unsigned gpte) protect_clean_gpte() argument 186 prefetch_invalid_gpte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, u64 *spte, u64 gpte) prefetch_invalid_gpte() argument 214 gpte_access(u64 gpte) gpte_access() argument 294 gpte_pkeys(struct kvm_vcpu *vcpu, u64 gpte) gpte_pkeys() argument 528 prefetch_gpte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, u64 *spte, pt_element_t gpte, bool no_dirty_log) prefetch_gpte() argument 562 pt_element_t gpte = *(const pt_element_t *)pte; update_pte() local 930 pt_element_t gpte; invlpg() local 1033 pt_element_t gpte; sync_page() local [all...] |
H A D | mmu.c | 283 static gfn_t pse36_gfn_delta(u32 gpte) in pse36_gfn_delta() argument 287 return (gpte & PT32_DIR_PSE36_MASK) << shift; in pse36_gfn_delta() 3960 unsigned level, unsigned gpte) in is_last_gpte() 3965 * PT_PAGE_SIZE_MASK in gpte if that is the case. in is_last_gpte() 3967 gpte &= level - mmu->last_nonleaf_level; in is_last_gpte() 3972 * level == PG_LEVEL_4K; set PT_PAGE_SIZE_MASK in gpte then. in is_last_gpte() 3974 gpte |= level - PG_LEVEL_4K - 1; in is_last_gpte() 3976 return gpte & PT_PAGE_SIZE_MASK; in is_last_gpte() 4896 /* Handle a 32-bit guest writing two halves of a 64-bit gpte */ in mmu_pte_write_fetch_gpte() 3959 is_last_gpte(struct kvm_mmu *mmu, unsigned level, unsigned gpte) is_last_gpte() argument
|
/kernel/linux/linux-5.10/arch/powerpc/include/asm/ |
H A D | kvm_book3s.h | 186 struct kvmppc_pte *gpte, u64 root, 189 struct kvmppc_pte *gpte, u64 table, 192 struct kvmppc_pte *gpte, bool data, bool iswrite);
|
/kernel/linux/linux-6.6/arch/powerpc/include/asm/ |
H A D | kvm_book3s.h | 186 struct kvmppc_pte *gpte, u64 root, 189 struct kvmppc_pte *gpte, u64 table, 192 struct kvmppc_pte *gpte, bool data, bool iswrite);
|