Lines Matching refs:sptep
158 u64 *sptep;
177 ({ spte = mmu_spte_get_lockless(_walker.sptep); 1; }); \
184 static void mmu_spte_set(u64 *sptep, u64 spte);
288 static void kvm_flush_remote_tlbs_sptep(struct kvm *kvm, u64 *sptep)
290 struct kvm_mmu_page *sp = sptep_to_sp(sptep);
291 gfn_t gfn = kvm_mmu_page_get_gfn(sp, spte_index(sptep));
296 static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
301 trace_mark_mmio_spte(sptep, gfn, spte);
302 mmu_spte_set(sptep, spte);
341 static void __set_spte(u64 *sptep, u64 spte)
343 WRITE_ONCE(*sptep, spte);
346 static void __update_clear_spte_fast(u64 *sptep, u64 spte)
348 WRITE_ONCE(*sptep, spte);
351 static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
353 return xchg(sptep, spte);
356 static u64 __get_spte_lockless(u64 *sptep)
358 return READ_ONCE(*sptep);
369 static void count_spte_clear(u64 *sptep, u64 spte)
371 struct kvm_mmu_page *sp = sptep_to_sp(sptep);
381 static void __set_spte(u64 *sptep, u64 spte)
385 ssptep = (union split_spte *)sptep;
400 static void __update_clear_spte_fast(u64 *sptep, u64 spte)
404 ssptep = (union split_spte *)sptep;
416 count_spte_clear(sptep, spte);
419 static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
423 ssptep = (union split_spte *)sptep;
430 count_spte_clear(sptep, spte);
453 static u64 __get_spte_lockless(u64 *sptep)
455 struct kvm_mmu_page *sp = sptep_to_sp(sptep);
456 union split_spte spte, *orig = (union split_spte *)sptep;
478 * Set the sptep from nonpresent to present.
479 * Note: the sptep being assigned *must* be either not present
483 static void mmu_spte_set(u64 *sptep, u64 new_spte)
485 WARN_ON_ONCE(is_shadow_present_pte(*sptep));
486 __set_spte(sptep, new_spte);
493 static u64 mmu_spte_update_no_track(u64 *sptep, u64 new_spte)
495 u64 old_spte = *sptep;
501 mmu_spte_set(sptep, new_spte);
506 __update_clear_spte_fast(sptep, new_spte);
508 old_spte = __update_clear_spte_slow(sptep, new_spte);
524 static bool mmu_spte_update(u64 *sptep, u64 new_spte)
527 u64 old_spte = mmu_spte_update_no_track(sptep, new_spte);
561 * It sets the sptep from present to nonpresent, and track the
562 * state bits, it is used to clear the last level sptep.
565 static u64 mmu_spte_clear_track_bits(struct kvm *kvm, u64 *sptep)
568 u64 old_spte = *sptep;
569 int level = sptep_to_sp(sptep)->role.level;
574 __update_clear_spte_fast(sptep, 0ull);
576 old_spte = __update_clear_spte_slow(sptep, 0ull);
605 * Directly clear spte without caring the state bits of sptep,
608 static void mmu_spte_clear_no_track(u64 *sptep)
610 __update_clear_spte_fast(sptep, 0ull);
613 static u64 mmu_spte_get_lockless(u64 *sptep)
615 return __get_spte_lockless(sptep);
619 static bool mmu_spte_age(u64 *sptep)
621 u64 spte = mmu_spte_get_lockless(sptep);
628 (unsigned long *)sptep);
638 mmu_spte_update_no_track(sptep, spte);
1032 struct kvm_rmap_head *rmap_head, u64 *sptep)
1034 mmu_spte_clear_track_bits(kvm, sptep);
1035 pte_list_remove(kvm, sptep, rmap_head);
1119 struct pte_list_desc *desc; /* holds the sptep if not NULL */
1120 int pos; /* index of the sptep */
1128 * Returns sptep if found, NULL otherwise.
1133 u64 *sptep;
1140 sptep = (u64 *)rmap_head->val;
1146 sptep = iter->desc->sptes[iter->pos];
1148 BUG_ON(!is_shadow_present_pte(*sptep));
1149 return sptep;
1155 * Returns sptep if found, NULL otherwise.
1159 u64 *sptep;
1164 sptep = iter->desc->sptes[iter->pos];
1165 if (sptep)
1174 sptep = iter->desc->sptes[iter->pos];
1181 BUG_ON(!is_shadow_present_pte(*sptep));
1182 return sptep;
1189 static void drop_spte(struct kvm *kvm, u64 *sptep)
1191 u64 old_spte = mmu_spte_clear_track_bits(kvm, sptep);
1194 rmap_remove(kvm, sptep);
1197 static void drop_large_spte(struct kvm *kvm, u64 *sptep, bool flush)
1201 sp = sptep_to_sp(sptep);
1204 drop_spte(kvm, sptep);
1207 kvm_flush_remote_tlbs_sptep(kvm, sptep);
1211 * Write-protect on the specified @sptep, @pt_protect indicates whether
1223 static bool spte_write_protect(u64 *sptep, bool pt_protect)
1225 u64 spte = *sptep;
1235 return mmu_spte_update(sptep, spte);
1241 u64 *sptep;
1245 for_each_rmap_spte(rmap_head, &iter, sptep)
1246 flush |= spte_write_protect(sptep, pt_protect);
1251 static bool spte_clear_dirty(u64 *sptep)
1253 u64 spte = *sptep;
1257 return mmu_spte_update(sptep, spte);
1260 static bool spte_wrprot_for_clear_dirty(u64 *sptep)
1263 (unsigned long *)sptep);
1264 if (was_writable && !spte_ad_enabled(*sptep))
1265 kvm_set_pfn_dirty(spte_to_pfn(*sptep));
1279 u64 *sptep;
1283 for_each_rmap_spte(rmap_head, &iter, sptep)
1284 if (spte_ad_need_write_protect(*sptep))
1285 flush |= spte_wrprot_for_clear_dirty(sptep);
1287 flush |= spte_clear_dirty(sptep);
1455 u64 *sptep;
1465 for_each_rmap_spte(rmap_head, &iter, sptep) {
1469 kvm_zap_one_rmap_spte(kvm, rmap_head, sptep);
1473 *sptep, new_pfn);
1475 mmu_spte_clear_track_bits(kvm, sptep);
1476 mmu_spte_set(sptep, new_spte);
1610 u64 *sptep;
1614 for_each_rmap_spte(rmap_head, &iter, sptep)
1615 young |= mmu_spte_age(sptep);
1624 u64 *sptep;
1627 for_each_rmap_spte(rmap_head, &iter, sptep)
1628 if (is_accessed_spte(*sptep))
1773 u64 *sptep;
1776 for_each_rmap_spte(&sp->parent_ptes, &iter, sptep) {
1777 mark_unsync(sptep);
2301 static union kvm_mmu_page_role kvm_mmu_child_role(u64 *sptep, bool direct,
2304 struct kvm_mmu_page *parent_sp = sptep_to_sp(sptep);
2341 role.quadrant = spte_index(sptep) & 1;
2348 u64 *sptep, gfn_t gfn,
2353 if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep))
2356 role = kvm_mmu_child_role(sptep, direct, access);
2402 iterator->sptep = ((u64 *)__va(iterator->shadow_addr)) + iterator->index;
2420 __shadow_walk_next(iterator, *iterator->sptep);
2424 struct kvm_mmu_memory_cache *cache, u64 *sptep,
2436 if (is_shadow_present_pte(*sptep))
2437 drop_large_spte(kvm, sptep, flush);
2441 mmu_spte_set(sptep, spte);
2443 mmu_page_add_parent_pte(cache, sp, sptep);
2455 mark_unsync(sptep);
2458 static void link_shadow_page(struct kvm_vcpu *vcpu, u64 *sptep,
2461 __link_shadow_page(vcpu->kvm, &vcpu->arch.mmu_pte_list_desc_cache, sptep, sp, true);
2464 static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2467 if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)) {
2477 child = spte_to_child_sp(*sptep);
2481 drop_parent_pte(vcpu->kvm, child, sptep);
2482 kvm_flush_remote_tlbs_sptep(vcpu->kvm, sptep);
2532 u64 *sptep;
2535 while ((sptep = rmap_get_first(&sp->parent_ptes, &iter)))
2536 drop_parent_pte(kvm, sp, sptep);
2901 u64 *sptep, unsigned int pte_access, gfn_t gfn,
2904 struct kvm_mmu_page *sp = sptep_to_sp(sptep);
2919 mark_mmio_spte(vcpu, sptep, gfn, pte_access);
2923 if (is_shadow_present_pte(*sptep)) {
2928 if (level > PG_LEVEL_4K && !is_large_pte(*sptep)) {
2930 u64 pte = *sptep;
2933 drop_parent_pte(vcpu->kvm, child, sptep);
2935 } else if (pfn != spte_to_pfn(*sptep)) {
2936 drop_spte(vcpu->kvm, sptep);
2942 wrprot = make_spte(vcpu, sp, slot, pte_access, gfn, pfn, *sptep, prefetch,
2945 if (*sptep == spte) {
2948 flush |= mmu_spte_update(sptep, spte);
2949 trace_kvm_mmu_set_spte(level, gfn, sptep);
2962 rmap_add(vcpu, slot, sptep, gfn, pte_access);
2965 kvm_mmu_page_set_access(sp, spte_index(sptep), pte_access);
3000 struct kvm_mmu_page *sp, u64 *sptep)
3007 i = spte_index(sptep) & ~(PTE_PREFETCH_NUM - 1);
3011 if (is_shadow_present_pte(*spte) || spte == sptep) {
3024 static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
3028 sp = sptep_to_sp(sptep);
3048 __direct_pte_prefetch(vcpu, sp, sptep);
3233 disallowed_hugepage_adjust(fault, *it.sptep, it.level);
3239 sp = kvm_mmu_get_child_sp(vcpu, it.sptep, base_gfn, true, ACC_ALL);
3243 link_shadow_page(vcpu, it.sptep, sp);
3252 ret = mmu_set_spte(vcpu, fault->slot, it.sptep, ACC_ALL,
3257 direct_pte_prefetch(vcpu, it.sptep);
3363 u64 *sptep, u64 old_spte, u64 new_spte)
3377 if (!try_cmpxchg64(sptep, &old_spte, new_spte))
3405 * - The returned sptep must not be used after walk_shadow_page_lockless_end.
3411 u64 *sptep = NULL;
3414 sptep = iterator.sptep;
3418 return sptep;
3429 u64 *sptep = NULL;
3441 sptep = kvm_tdp_mmu_fast_pf_get_last_sptep(vcpu, fault->addr, &spte);
3443 sptep = fast_pf_get_last_sptep(vcpu, fault->addr, &spte);
3448 sp = sptep_to_sp(sptep);
3517 if (fast_pf_fix_direct_spte(vcpu, fault, sptep, spte, new_spte)) {
3529 trace_fast_page_fault(vcpu, fault, sptep, spte, ret);
4088 spte = mmu_spte_get_lockless(iterator.sptep);
4097 static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
4114 *sptep = 0ull;
4118 *sptep = sptes[leaf];
4206 clear_sp_write_flooding_count(iterator.sptep);
4665 static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
4668 if (unlikely(is_mmio_spte(*sptep))) {
4669 if (gfn != get_mmio_spte_gfn(*sptep)) {
4670 mmu_spte_clear_no_track(sptep);
4674 mark_mmio_spte(vcpu, sptep, gfn, access);
5788 struct kvm_mmu_page *sp = sptep_to_sp(iterator.sptep);
5794 mmu_page_zap_pte(vcpu->kvm, sp, iterator.sptep, NULL);
5796 kvm_flush_remote_tlbs_sptep(vcpu->kvm, iterator.sptep);
6377 u64 *sptep, spte;
6384 sptep = &sp->spt[index];
6401 if (is_shadow_present_pte(*sptep)) {
6402 flush |= !is_last_spte(*sptep, sp->role.level);
6407 mmu_spte_set(sptep, spte);
6408 __rmap_add(kvm, cache, slot, sptep, gfn, sp->role.access);
6572 u64 *sptep;
6578 for_each_rmap_spte(rmap_head, &iter, sptep) {
6579 sp = sptep_to_sp(sptep);
6591 kvm_zap_one_rmap_spte(kvm, rmap_head, sptep);
6594 kvm_flush_remote_tlbs_sptep(kvm, sptep);