Lines Matching refs:spte
30 #include "spte.h"
174 #define for_each_shadow_entry_lockless(_vcpu, _addr, _walker, spte) \
177 ({ spte = mmu_spte_get_lockless(_walker.sptep); 1; }); \
178 __shadow_walk_next(&(_walker), spte))
184 static void mmu_spte_set(u64 *sptep, u64 spte);
299 u64 spte = make_mmio_spte(vcpu, gfn, access);
301 trace_mark_mmio_spte(sptep, gfn, spte);
302 mmu_spte_set(sptep, spte);
305 static gfn_t get_mmio_spte_gfn(u64 spte)
307 u64 gpa = spte & shadow_nonpresent_or_rsvd_lower_gfn_mask;
309 gpa |= (spte >> SHADOW_NONPRESENT_OR_RSVD_MASK_LEN)
315 static unsigned get_mmio_spte_access(u64 spte)
317 return spte & shadow_mmio_access_mask;
320 static bool check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte)
329 spte_gen = get_mmio_spte_generation(spte);
331 trace_check_mmio_spte(spte, kvm_gen, spte_gen);
341 static void __set_spte(u64 *sptep, u64 spte)
343 WRITE_ONCE(*sptep, spte);
346 static void __update_clear_spte_fast(u64 *sptep, u64 spte)
348 WRITE_ONCE(*sptep, spte);
351 static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
353 return xchg(sptep, spte);
366 u64 spte;
369 static void count_spte_clear(u64 *sptep, u64 spte)
373 if (is_shadow_present_pte(spte))
376 /* Ensure the spte is completely set before we increase the count */
381 static void __set_spte(u64 *sptep, u64 spte)
386 sspte = (union split_spte)spte;
391 * If we map the spte from nonpresent to present, We should store
393 * fetch this spte while we are setting the spte.
400 static void __update_clear_spte_fast(u64 *sptep, u64 spte)
405 sspte = (union split_spte)spte;
410 * If we map the spte from present to nonpresent, we should clear
416 count_spte_clear(sptep, spte);
419 static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
424 sspte = (union split_spte)spte;
430 count_spte_clear(sptep, spte);
432 return orig.spte;
436 * The idea using the light way get the spte on x86_32 guest is from
439 * An spte tlb flush may be pending, because kvm_set_pte_rmap
441 * we need to protect against in-progress updates of the spte.
443 * Reading the spte while an update is in progress may get the old value
444 * for the high part of the spte. The race is fine for a present->non-present
445 * change (because the high part of the spte is ignored for non-present spte),
446 * but for a present->present change we must reread the spte.
450 * present->non-present updates: if it changed while reading the spte,
456 union split_spte spte, *orig = (union split_spte *)sptep;
463 spte.spte_low = orig->spte_low;
466 spte.spte_high = orig->spte_high;
469 if (unlikely(spte.spte_low != orig->spte_low ||
473 return spte.spte;
481 * the spte.
520 * spte, even though the writable spte might be cached on a CPU's TLB.
533 * For the spte updated out of mmu-lock is safe, since
605 * Directly clear spte without caring the state bits of sptep,
606 * it is used to set the upper level spte.
621 u64 spte = mmu_spte_get_lockless(sptep);
623 if (!is_accessed_spte(spte))
626 if (spte_ad_enabled(spte)) {
634 if (is_writable_pte(spte))
635 kvm_set_pfn_dirty(spte_to_pfn(spte));
637 spte = mark_spte_for_access_track(spte);
638 mmu_spte_update_no_track(sptep, spte);
661 * Make sure a following spte read is not reordered ahead of the write
919 * If the bit zero of rmap_head->val is clear, then it points to the only spte
927 static int pte_list_add(struct kvm_mmu_memory_cache *cache, u64 *spte,
934 rmap_head->val = (unsigned long)spte;
938 desc->sptes[1] = spte;
958 desc->sptes[desc->spte_count++] = spte;
1000 static void pte_list_remove(struct kvm *kvm, u64 *spte,
1010 if (KVM_BUG_ON_DATA_CORRUPTION((u64 *)rmap_head->val != spte, kvm))
1018 if (desc->sptes[i] == spte) {
1089 static void rmap_remove(struct kvm *kvm, u64 *spte)
1097 sp = sptep_to_sp(spte);
1098 gfn = kvm_mmu_page_get_gfn(sp, spte_index(spte));
1110 pte_list_remove(kvm, spte, rmap_head);
1212 * spte write-protection is caused by protecting shadow page table.
1214 * Note: write protection is difference between dirty logging and spte
1216 * - for dirty logging, the spte can be set to writable at anytime if
1218 * - for spte protection, the spte can be writable only after unsync-ing
1225 u64 spte = *sptep;
1227 if (!is_writable_pte(spte) &&
1228 !(pt_protect && is_mmu_writable_spte(spte)))
1232 spte &= ~shadow_mmu_writable_mask;
1233 spte = spte & ~PT_WRITABLE_MASK;
1235 return mmu_spte_update(sptep, spte);
1253 u64 spte = *sptep;
1255 KVM_MMU_WARN_ON(!spte_ad_enabled(spte));
1256 spte &= ~shadow_dirty_mask;
1257 return mmu_spte_update(sptep, spte);
1638 u64 *spte, gfn_t gfn, unsigned int access)
1644 sp = sptep_to_sp(spte);
1645 kvm_mmu_page_set_translation(sp, spte_index(spte), gfn, access);
1649 rmap_count = pte_list_add(cache, spte, rmap_head);
1660 u64 *spte, gfn_t gfn, unsigned int access)
1664 __rmap_add(vcpu->kvm, cache, slot, spte, gfn, access);
1770 static void mark_unsync(u64 *spte);
1781 static void mark_unsync(u64 *spte)
1785 sp = sptep_to_sp(spte);
1786 if (__test_and_set_bit(spte_index(spte), sp->unsync_child_bitmap))
2134 static void clear_sp_write_flooding_count(u64 *spte)
2136 __clear_sp_write_flooding_count(sptep_to_sp(spte));
2407 u64 spte)
2409 if (!is_shadow_present_pte(spte) || is_last_spte(spte, iterator->level)) {
2414 iterator->shadow_addr = spte & SPTE_BASE_ADDR_MASK;
2427 u64 spte;
2439 spte = make_nonleaf_spte(sp->spt, sp_ad_disabled(sp));
2441 mmu_spte_set(sptep, spte);
2474 * so we should update the spte at this point to get
2488 u64 *spte, struct list_head *invalid_list)
2493 pte = *spte;
2496 drop_spte(kvm, spte);
2499 drop_parent_pte(kvm, child, spte);
2512 mmu_spte_clear_no_track(spte);
2910 u64 spte;
2943 true, host_writable, &spte);
2945 if (*sptep == spte) {
2948 flush |= mmu_spte_update(sptep, spte);
3002 u64 *spte, *start = NULL;
3008 spte = sp->spt + i;
3010 for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) {
3011 if (is_shadow_present_pte(*spte) || spte == sptep) {
3014 if (direct_pte_prefetch_many(vcpu, sp, start, spte) < 0)
3018 start = spte;
3021 direct_pte_prefetch_many(vcpu, sp, start, spte);
3196 void disallowed_hugepage_adjust(struct kvm_page_fault *fault, u64 spte, int cur_level)
3200 is_shadow_present_pte(spte) &&
3201 !is_large_pte(spte) &&
3202 spte_to_child_sp(spte)->nx_huge_page_disallowed) {
3277 * into the spte otherwise read access on readonly gfn also can
3386 static bool is_access_allowed(struct kvm_page_fault *fault, u64 spte)
3389 return is_executable_pte(spte);
3392 return is_writable_pte(spte);
3395 return spte & PT_PRESENT_MASK;
3399 * Returns the last level spte pointer of the shadow page walk for the given
3400 * gpa, and sets *spte to the spte value. This spte may be non-preset. If no
3401 * walk could be performed, returns NULL and *spte does not contain valid data.
3407 static u64 *fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, gpa_t gpa, u64 *spte)
3415 *spte = old_spte;
3428 u64 spte = 0ull;
3441 sptep = kvm_tdp_mmu_fast_pf_get_last_sptep(vcpu, fault->addr, &spte);
3443 sptep = fast_pf_get_last_sptep(vcpu, fault->addr, &spte);
3445 if (!is_shadow_present_pte(spte))
3449 if (!is_last_spte(spte, sp->role.level))
3462 if (is_access_allowed(fault, spte)) {
3467 new_spte = spte;
3475 if (unlikely(!kvm_ad_enabled()) && is_access_track_spte(spte))
3489 if (fault->write && is_mmu_writable_spte(spte)) {
3493 * Do not fix write-permission on the large spte when
3500 * normal spte to fix the access.
3508 if (new_spte == spte ||
3517 if (fast_pf_fix_direct_spte(vcpu, fault, sptep, spte, new_spte)) {
3529 trace_fast_page_fault(vcpu, fault, sptep, spte, ret);
4081 u64 spte;
4086 __shadow_walk_next(&iterator, spte)) {
4088 spte = mmu_spte_get_lockless(iterator.sptep);
4090 sptes[leaf] = spte;
4135 pr_err("%s: reserved bits set on MMU-present spte, addr 0x%llx, hierarchy:\n",
4138 pr_err("------ spte = 0x%llx level = %d, rsvd bits = 0x%llx",
4148 u64 spte;
4154 reserved = get_mmio_spte(vcpu, addr, &spte);
4158 if (is_mmio_spte(spte)) {
4159 gfn_t gfn = get_mmio_spte_gfn(spte);
4160 unsigned int access = get_mmio_spte_access(spte);
4162 if (!check_mmio_spte(vcpu, spte))
4202 u64 spte;
4205 for_each_shadow_entry_lockless(vcpu, addr, iterator, spte)
5619 u64 *spte;
5643 spte = &sp->spt[page_offset / sizeof(*spte)];
5644 return spte;
5653 u64 entry, gentry, *spte;
5678 spte = get_written_sptes(sp, gpa, &npte);
5679 if (!spte)
5683 entry = *spte;
5684 mmu_page_zap_pte(vcpu->kvm, sp, spte, NULL);
5689 ++spte;
6377 u64 *sptep, spte;
6406 spte = make_huge_page_split_spte(kvm, huge_spte, sp->role, index);
6407 mmu_spte_set(sptep, spte);
6421 u64 spte;
6426 spte = *huge_sptep;
6449 trace_kvm_mmu_split_huge_page(gfn, spte, level, r);