/kernel/linux/linux-6.6/arch/x86/kvm/mmu/ |
H A D | spte.c | 16 #include "spte.h" 74 u64 spte = generation_mmio_spte_mask(gen); in make_mmio_spte() local 80 spte |= shadow_mmio_value | access; in make_mmio_spte() 81 spte |= gpa | shadow_nonpresent_or_rsvd_mask; in make_mmio_spte() 82 spte |= (gpa & shadow_nonpresent_or_rsvd_mask) in make_mmio_spte() 85 return spte; in make_mmio_spte() 114 bool spte_has_volatile_bits(u64 spte) in spte_has_volatile_bits() argument 117 * Always atomically update spte if it can be updated in spte_has_volatile_bits() 122 if (!is_writable_pte(spte) && is_mmu_writable_spte(spte)) in spte_has_volatile_bits() 144 u64 spte = SPTE_MMU_PRESENT_MASK; make_spte() local 251 make_spte_executable(u64 spte) make_spte_executable() argument 312 u64 spte = SPTE_MMU_PRESENT_MASK; make_nonleaf_spte() local 341 mark_spte_for_access_track(u64 spte) mark_spte_for_access_track() argument [all...] |
H A D | spte.h | 103 * Bits 0-7 of the MMIO generation are propagated to spte bits 3-10 104 * Bits 8-18 of the MMIO generation are propagated to spte bits 52-62 111 * checking for MMIO spte cache hits. 202 static inline bool is_removed_spte(u64 spte) in is_removed_spte() argument 204 return spte == REMOVED_SPTE; in is_removed_spte() 230 static inline struct kvm_mmu_page *spte_to_child_sp(u64 spte) in spte_to_child_sp() argument 232 return to_shadow_page(spte & SPTE_BASE_ADDR_MASK); in spte_to_child_sp() 252 static inline bool is_mmio_spte(u64 spte) in is_mmio_spte() argument 254 return (spte & shadow_mmio_mask) == shadow_mmio_value && in is_mmio_spte() 279 static inline bool spte_ad_enabled(u64 spte) in spte_ad_enabled() argument 285 spte_ad_need_write_protect(u64 spte) spte_ad_need_write_protect() argument 296 spte_shadow_accessed_mask(u64 spte) spte_shadow_accessed_mask() argument 302 spte_shadow_dirty_mask(u64 spte) spte_shadow_dirty_mask() argument 308 is_access_track_spte(u64 spte) is_access_track_spte() argument 323 is_executable_pte(u64 spte) is_executable_pte() argument 333 is_accessed_spte(u64 spte) is_accessed_spte() argument 341 is_dirty_spte(u64 spte) is_dirty_spte() argument 368 is_rsvd_spte(struct rsvd_bits_validate *rsvd_check, u64 spte, int level) is_rsvd_spte() argument 447 check_spte_writable_invariants(u64 spte) check_spte_writable_invariants() argument 455 KBUILD_MODNAME ": Writable SPTE is not MMU-writable: %llx", spte); check_spte_writable_invariants() local 458 is_mmu_writable_spte(u64 spte) is_mmu_writable_spte() argument 463 get_mmio_spte_generation(u64 spte) get_mmio_spte_generation() argument 486 restore_acc_track_spte(u64 spte) restore_acc_track_spte() argument [all...] |
H A D | mmutrace.h | 212 TP_PROTO(u64 *sptep, gfn_t gfn, u64 spte), 213 TP_ARGS(sptep, gfn, spte), 225 __entry->access = spte & ACC_ALL; 226 __entry->gen = get_mmio_spte_generation(spte); 312 TP_PROTO(u64 spte, unsigned int kvm_gen, unsigned int spte_gen), 313 TP_ARGS(spte, kvm_gen, spte_gen), 318 __field(u64, spte) 324 __entry->spte = spte; 327 TP_printk("spte [all...] |
H A D | mmu.c | 30 #include "spte.h" 174 #define for_each_shadow_entry_lockless(_vcpu, _addr, _walker, spte) \ 177 ({ spte = mmu_spte_get_lockless(_walker.sptep); 1; }); \ 178 __shadow_walk_next(&(_walker), spte)) 184 static void mmu_spte_set(u64 *sptep, u64 spte); 299 u64 spte = make_mmio_spte(vcpu, gfn, access); in mark_mmio_spte() local 301 trace_mark_mmio_spte(sptep, gfn, spte); in mark_mmio_spte() 302 mmu_spte_set(sptep, spte); in mark_mmio_spte() 305 static gfn_t get_mmio_spte_gfn(u64 spte) in get_mmio_spte_gfn() argument 307 u64 gpa = spte in get_mmio_spte_gfn() 315 get_mmio_spte_access(u64 spte) get_mmio_spte_access() argument 320 check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte) check_mmio_spte() argument 341 __set_spte(u64 *sptep, u64 spte) __set_spte() argument 346 __update_clear_spte_fast(u64 *sptep, u64 spte) __update_clear_spte_fast() argument 351 __update_clear_spte_slow(u64 *sptep, u64 spte) __update_clear_spte_slow() argument 366 u64 spte; global() member 369 count_spte_clear(u64 *sptep, u64 spte) count_spte_clear() argument 381 __set_spte(u64 *sptep, u64 spte) __set_spte() argument 400 __update_clear_spte_fast(u64 *sptep, u64 spte) __update_clear_spte_fast() argument 419 __update_clear_spte_slow(u64 *sptep, u64 spte) __update_clear_spte_slow() argument 456 union split_spte spte, *orig = (union split_spte *)sptep; __get_spte_lockless() local 621 u64 spte = mmu_spte_get_lockless(sptep); mmu_spte_age() local 927 pte_list_add(struct kvm_mmu_memory_cache *cache, u64 *spte, struct kvm_rmap_head *rmap_head) pte_list_add() argument 1000 pte_list_remove(struct kvm *kvm, u64 *spte, struct kvm_rmap_head *rmap_head) pte_list_remove() argument 1089 rmap_remove(struct kvm *kvm, u64 *spte) rmap_remove() argument 1225 u64 spte = *sptep; spte_write_protect() local 1253 u64 spte = *sptep; spte_clear_dirty() local 1635 __rmap_add(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, const struct kvm_memory_slot *slot, u64 *spte, gfn_t gfn, unsigned int access) __rmap_add() argument 1659 rmap_add(struct kvm_vcpu *vcpu, const struct kvm_memory_slot *slot, u64 *spte, gfn_t gfn, unsigned int access) rmap_add() argument 1781 mark_unsync(u64 *spte) mark_unsync() argument 2134 clear_sp_write_flooding_count(u64 *spte) clear_sp_write_flooding_count() argument 2406 __shadow_walk_next(struct kvm_shadow_walk_iterator *iterator, u64 spte) __shadow_walk_next() argument 2427 u64 spte; __link_shadow_page() local 2487 mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp, u64 *spte, struct list_head *invalid_list) mmu_page_zap_pte() argument 2910 u64 spte; mmu_set_spte() local 3002 u64 *spte, *start = NULL; __direct_pte_prefetch() local 3196 disallowed_hugepage_adjust(struct kvm_page_fault *fault, u64 spte, int cur_level) disallowed_hugepage_adjust() argument 3386 is_access_allowed(struct kvm_page_fault *fault, u64 spte) is_access_allowed() argument 3407 fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, gpa_t gpa, u64 *spte) fast_pf_get_last_sptep() argument 3428 u64 spte = 0ull; fast_page_fault() local 4081 u64 spte; get_walk() local 4148 u64 spte; handle_mmio_page_fault() local 4202 u64 spte; shadow_page_table_clear_flood() local 5619 u64 *spte; get_written_sptes() local 5653 u64 entry, gentry, *spte; kvm_mmu_track_write() local 6377 u64 *sptep, spte; shadow_mmu_split_huge_page() local 6421 u64 spte; shadow_mmu_try_split_huge_page() local [all...] |
H A D | paging_tmpl.h | 152 struct kvm_mmu_page *sp, u64 *spte, in prefetch_invalid_gpte() 169 drop_spte(vcpu->kvm, spte); in prefetch_invalid_gpte() 534 u64 *spte, pt_element_t gpte) in prefetch_gpte() 541 if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte)) in prefetch_gpte() 556 mmu_set_spte(vcpu, slot, spte, pte_access, gfn, pfn, NULL); in prefetch_gpte() 589 u64 *spte; in pte_prefetch() local 608 spte = sp->spt + i; in pte_prefetch() 610 for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) { in pte_prefetch() 611 if (spte == sptep) in pte_prefetch() 614 if (is_shadow_present_pte(*spte)) in pte_prefetch() 151 prefetch_invalid_gpte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, u64 *spte, u64 gpte) prefetch_invalid_gpte() argument 533 prefetch_gpte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, u64 *spte, pt_element_t gpte) prefetch_gpte() argument 907 u64 *sptep, spte; sync_spte() local [all...] |
H A D | tdp_iter.c | 6 #include "spte.h" 62 tdp_ptep_t spte_to_child_pt(u64 spte, int level) in spte_to_child_pt() argument 68 if (!is_shadow_present_pte(spte) || is_last_spte(spte, level)) in spte_to_child_pt() 71 return (tdp_ptep_t)__va(spte_to_pfn(spte) << PAGE_SHIFT); in spte_to_child_pt()
|
H A D | tdp_mmu.h | 8 #include "spte.h" 70 u64 *spte);
|
H A D | tdp_mmu.c | 9 #include "spte.h" 517 * refreshed to the current value of the spte. 526 * known value of the spte. 1004 * tdp_mmu_link_sp - Replace the given spte with an spte pointing to the 1018 u64 spte = make_nonleaf_spte(sp->spt, !kvm_ad_enabled()); in tdp_mmu_link_sp() local 1022 ret = tdp_mmu_set_spte_atomic(kvm, iter, spte); in tdp_mmu_link_sp() 1026 tdp_mmu_iter_set_spte(kvm, iter, spte); in tdp_mmu_link_sp() 1398 * Replace the huge spte with a pointer to the populated lower level in tdp_mmu_split_huge_page() 1783 * Returns the last level spte pointe 1793 kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, u64 addr, u64 *spte) kvm_tdp_mmu_fast_pf_get_last_sptep() argument [all...] |
H A D | mmu_internal.h | 120 * Used out of the mmu-lock to avoid reading spte values while an 259 * RET_PF_INVALID: the spte is invalid, let the real page fault path update it. 343 void disallowed_hugepage_adjust(struct kvm_page_fault *fault, u64 spte, int cur_level);
|
/kernel/linux/linux-5.10/arch/x86/kvm/mmu/ |
H A D | spte.h | 56 * Bits 0-8 of the MMIO generation are propagated to spte bits 3-11 57 * Bits 9-17 of the MMIO generation are propagated to spte bits 54-62 64 * checking for MMIO spte cache hits. 143 static inline bool is_mmio_spte(u64 spte) in is_mmio_spte() argument 145 return (spte & SPTE_SPECIAL_MASK) == SPTE_MMIO_MASK; in is_mmio_spte() 153 static inline bool spte_ad_enabled(u64 spte) in spte_ad_enabled() argument 155 MMU_WARN_ON(is_mmio_spte(spte)); in spte_ad_enabled() 156 return (spte & SPTE_SPECIAL_MASK) != SPTE_AD_DISABLED_MASK; in spte_ad_enabled() 159 static inline bool spte_ad_need_write_protect(u64 spte) in spte_ad_need_write_protect() argument 161 MMU_WARN_ON(is_mmio_spte(spte)); in spte_ad_need_write_protect() 165 spte_shadow_accessed_mask(u64 spte) spte_shadow_accessed_mask() argument 171 spte_shadow_dirty_mask(u64 spte) spte_shadow_dirty_mask() argument 177 is_access_track_spte(u64 spte) is_access_track_spte() argument 201 is_executable_pte(u64 spte) is_executable_pte() argument 211 is_accessed_spte(u64 spte) is_accessed_spte() argument 219 is_dirty_spte(u64 spte) is_dirty_spte() argument 226 spte_can_locklessly_be_made_writable(u64 spte) spte_can_locklessly_be_made_writable() argument 232 get_mmio_spte_generation(u64 spte) get_mmio_spte_generation() argument [all...] |
H A D | spte.c | 16 #include "spte.h" 89 u64 spte = 0; in make_spte() local 93 spte |= SPTE_AD_DISABLED_MASK; in make_spte() 95 spte |= SPTE_AD_WRPROT_ONLY_MASK; in make_spte() 103 spte |= shadow_present_mask; in make_spte() 105 spte |= spte_shadow_accessed_mask(spte); in make_spte() 113 spte |= shadow_x_mask; in make_spte() 115 spte |= shadow_nx_mask; in make_spte() 118 spte | in make_spte() 170 u64 spte; make_nonleaf_spte() local 217 mark_spte_for_access_track(u64 spte) mark_spte_for_access_track() argument [all...] |
H A D | mmu.c | 27 #include "spte.h" 166 #define for_each_shadow_entry_lockless(_vcpu, _addr, _walker, spte) \ 169 ({ spte = mmu_spte_get_lockless(_walker.sptep); 1; }); \ 170 __shadow_walk_next(&(_walker), spte)) 176 static void mmu_spte_set(u64 *sptep, u64 spte); 226 static gfn_t get_mmio_spte_gfn(u64 spte) in get_mmio_spte_gfn() argument 228 u64 gpa = spte & shadow_nonpresent_or_rsvd_lower_gfn_mask; in get_mmio_spte_gfn() 230 gpa |= (spte >> SHADOW_NONPRESENT_OR_RSVD_MASK_LEN) in get_mmio_spte_gfn() 236 static unsigned get_mmio_spte_access(u64 spte) in get_mmio_spte_access() argument 238 return spte in get_mmio_spte_access() 252 check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte) check_mmio_spte() argument 291 __set_spte(u64 *sptep, u64 spte) __set_spte() argument 296 __update_clear_spte_fast(u64 *sptep, u64 spte) __update_clear_spte_fast() argument 301 __update_clear_spte_slow(u64 *sptep, u64 spte) __update_clear_spte_slow() argument 316 u64 spte; global() member 319 count_spte_clear(u64 *sptep, u64 spte) count_spte_clear() argument 331 __set_spte(u64 *sptep, u64 spte) __set_spte() argument 350 __update_clear_spte_fast(u64 *sptep, u64 spte) __update_clear_spte_fast() argument 369 __update_clear_spte_slow(u64 *sptep, u64 spte) __update_clear_spte_slow() argument 406 union split_spte spte, *orig = (union split_spte *)sptep; __get_spte_lockless() local 427 spte_has_volatile_bits(u64 spte) spte_has_volatile_bits() argument 587 restore_acc_track_spte(u64 spte) restore_acc_track_spte() argument 607 u64 spte = mmu_spte_get_lockless(sptep); mmu_spte_age() local 836 pte_list_add(struct kvm_vcpu *vcpu, u64 *spte, struct kvm_rmap_head *rmap_head) pte_list_add() argument 895 __pte_list_remove(u64 *spte, struct kvm_rmap_head *rmap_head) __pte_list_remove() argument 965 rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) rmap_add() argument 976 rmap_remove(struct kvm *kvm, u64 *spte) rmap_remove() argument 1108 u64 spte = *sptep; spte_write_protect() local 1139 u64 spte = *sptep; spte_clear_dirty() local 1181 u64 spte = *sptep; spte_set_dirty() local 1560 rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) rmap_recycle() argument 1694 mark_unsync(u64 *spte) mark_unsync() argument 2015 clear_sp_write_flooding_count(u64 *spte) clear_sp_write_flooding_count() argument 2161 __shadow_walk_next(struct kvm_shadow_walk_iterator *iterator, u64 spte) __shadow_walk_next() argument 2181 u64 spte; link_shadow_page() local 2218 mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp, u64 *spte, struct list_head *invalid_list) mmu_page_zap_pte() argument 2565 u64 spte; set_spte() local 2705 u64 *spte, *start = NULL; __direct_pte_prefetch() local 2828 disallowed_hugepage_adjust(u64 spte, gfn_t gfn, int cur_level, kvm_pfn_t *pfnp, int *goal_levelp) disallowed_hugepage_adjust() argument 3019 is_access_allowed(u32 fault_err_code, u64 spte) is_access_allowed() argument 3040 u64 spte = 0ull; fast_page_fault() local 3495 u64 spte; get_walk() local 3570 u64 spte; handle_mmio_page_fault() local 3625 u64 spte; shadow_page_table_clear_flood() local 4958 u64 *spte; get_written_sptes() local 4993 u64 entry, gentry, *spte; kvm_mmu_pte_write() local [all...] |
H A D | mmutrace.h | 205 TP_PROTO(u64 *sptep, gfn_t gfn, u64 spte), 206 TP_ARGS(sptep, gfn, spte), 218 __entry->access = spte & ACC_ALL; 219 __entry->gen = get_mmio_spte_generation(spte); 305 TP_PROTO(u64 spte, unsigned int kvm_gen, unsigned int spte_gen), 306 TP_ARGS(spte, kvm_gen, spte_gen), 311 __field(u64, spte) 317 __entry->spte = spte; 320 TP_printk("spte [all...] |
H A D | tdp_iter.c | 5 #include "spte.h" 51 u64 *spte_to_child_pt(u64 spte, int level) in spte_to_child_pt() argument 57 if (!is_shadow_present_pte(spte) || is_last_spte(spte, level)) in spte_to_child_pt() 60 return __va(spte_to_pfn(spte) << PAGE_SHIFT); in spte_to_child_pt()
|
H A D | paging_tmpl.h | 187 struct kvm_mmu_page *sp, u64 *spte, in prefetch_invalid_gpte() 204 drop_spte(vcpu->kvm, spte); in prefetch_invalid_gpte() 529 u64 *spte, pt_element_t gpte, bool no_dirty_log) in prefetch_gpte() 535 if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte)) in prefetch_gpte() 538 pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte); in prefetch_gpte() 552 mmu_set_spte(vcpu, spte, pte_access, false, PG_LEVEL_4K, gfn, pfn, in prefetch_gpte() 560 u64 *spte, const void *pte) in update_pte() 564 FNAME(prefetch_gpte)(vcpu, sp, spte, gpte, false); in update_pte() 595 u64 *spte; in pte_prefetch() local 186 prefetch_invalid_gpte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, u64 *spte, u64 gpte) prefetch_invalid_gpte() argument 528 prefetch_gpte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, u64 *spte, pt_element_t gpte, bool no_dirty_log) prefetch_gpte() argument 559 update_pte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, u64 *spte, const void *pte) update_pte() argument [all...] |
H A D | mmu_internal.h | 41 /* hold the gfn of each spte inside spt */ 50 * Used out of the mmu-lock to avoid reading spte values while an 119 * RET_PF_INVALID: the spte is invalid, let the real page fault path update it. 139 void disallowed_hugepage_adjust(u64 spte, gfn_t gfn, int cur_level,
|
/kernel/linux/linux-5.10/arch/x86/kvm/ |
H A D | Makefile | 19 mmu/spte.o mmu/tdp_iter.o mmu/tdp_mmu.o
|
/kernel/linux/linux-6.6/arch/x86/kvm/ |
H A D | Makefile | 15 mmu/spte.o
|
/kernel/linux/linux-5.10/arch/s390/mm/ |
H A D | pgtable.c | 652 pte_t spte, tpte; in ptep_shadow_pte() local 658 spte = *sptep; in ptep_shadow_pte() 659 if (!(pte_val(spte) & _PAGE_INVALID) && in ptep_shadow_pte() 660 !((pte_val(spte) & _PAGE_PROTECT) && in ptep_shadow_pte() 664 pte_val(tpte) = (pte_val(spte) & PAGE_MASK) | in ptep_shadow_pte()
|
/kernel/linux/linux-6.6/arch/s390/mm/ |
H A D | pgtable.c | 691 pte_t spte, tpte; in ptep_shadow_pte() local 697 spte = *sptep; in ptep_shadow_pte() 698 if (!(pte_val(spte) & _PAGE_INVALID) && in ptep_shadow_pte() 699 !((pte_val(spte) & _PAGE_PROTECT) && in ptep_shadow_pte() 703 tpte = __pte((pte_val(spte) & PAGE_MASK) | in ptep_shadow_pte()
|
/kernel/linux/linux-5.10/mm/ |
H A D | hugetlb.c | 5421 pte_t *spte = NULL; in huge_pmd_share() local 5435 spte = huge_pte_offset(svma->vm_mm, saddr, in huge_pmd_share() 5437 if (spte) { in huge_pmd_share() 5438 get_page(virt_to_page(spte)); in huge_pmd_share() 5444 if (!spte) in huge_pmd_share() 5447 ptl = huge_pte_lock(hstate_vma(vma), mm, spte); in huge_pmd_share() 5450 (pmd_t *)((unsigned long)spte & PAGE_MASK)); in huge_pmd_share() 5453 put_page(virt_to_page(spte)); in huge_pmd_share()
|
/kernel/linux/linux-5.10/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ |
H A D | vmm.c | 448 bool spte = pgt->pte[ptei] & NVKM_VMM_PTE_SPTES; in nvkm_vmm_ref_hwpt() local 451 if (spte != next) in nvkm_vmm_ref_hwpt() 455 if (!spte) { in nvkm_vmm_ref_hwpt()
|
/kernel/linux/linux-6.6/mm/ |
H A D | hugetlb.c | 7005 pte_t *spte = NULL; in huge_pmd_share() local 7015 spte = hugetlb_walk(svma, saddr, in huge_pmd_share() 7017 if (spte) { in huge_pmd_share() 7018 get_page(virt_to_page(spte)); in huge_pmd_share() 7024 if (!spte) in huge_pmd_share() 7030 (pmd_t *)((unsigned long)spte & PAGE_MASK)); in huge_pmd_share() 7033 put_page(virt_to_page(spte)); in huge_pmd_share()
|
/kernel/linux/linux-6.6/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/ |
H A D | vmm.c | 448 bool spte = pgt->pte[ptei] & NVKM_VMM_PTE_SPTES; in nvkm_vmm_ref_hwpt() local 451 if (spte != next) in nvkm_vmm_ref_hwpt() 455 if (!spte) { in nvkm_vmm_ref_hwpt()
|