Lines Matching refs:spte

27 #include "spte.h"
166 #define for_each_shadow_entry_lockless(_vcpu, _addr, _walker, spte) \
169 ({ spte = mmu_spte_get_lockless(_walker.sptep); 1; }); \
170 __shadow_walk_next(&(_walker), spte))
176 static void mmu_spte_set(u64 *sptep, u64 spte);
226 static gfn_t get_mmio_spte_gfn(u64 spte)
228 u64 gpa = spte & shadow_nonpresent_or_rsvd_lower_gfn_mask;
230 gpa |= (spte >> SHADOW_NONPRESENT_OR_RSVD_MASK_LEN)
236 static unsigned get_mmio_spte_access(u64 spte)
238 return spte & shadow_mmio_access_mask;
252 static bool check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte)
261 spte_gen = get_mmio_spte_generation(spte);
263 trace_check_mmio_spte(spte, kvm_gen, spte_gen);
291 static void __set_spte(u64 *sptep, u64 spte)
293 WRITE_ONCE(*sptep, spte);
296 static void __update_clear_spte_fast(u64 *sptep, u64 spte)
298 WRITE_ONCE(*sptep, spte);
301 static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
303 return xchg(sptep, spte);
316 u64 spte;
319 static void count_spte_clear(u64 *sptep, u64 spte)
323 if (is_shadow_present_pte(spte))
326 /* Ensure the spte is completely set before we increase the count */
331 static void __set_spte(u64 *sptep, u64 spte)
336 sspte = (union split_spte)spte;
341 * If we map the spte from nonpresent to present, We should store
343 * fetch this spte while we are setting the spte.
350 static void __update_clear_spte_fast(u64 *sptep, u64 spte)
355 sspte = (union split_spte)spte;
360 * If we map the spte from present to nonpresent, we should clear
366 count_spte_clear(sptep, spte);
369 static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
374 sspte = (union split_spte)spte;
380 count_spte_clear(sptep, spte);
382 return orig.spte;
386 * The idea using the light way get the spte on x86_32 guest is from
389 * An spte tlb flush may be pending, because kvm_set_pte_rmapp
391 * we need to protect against in-progress updates of the spte.
393 * Reading the spte while an update is in progress may get the old value
394 * for the high part of the spte. The race is fine for a present->non-present
395 * change (because the high part of the spte is ignored for non-present spte),
396 * but for a present->present change we must reread the spte.
400 * present->non-present updates: if it changed while reading the spte,
406 union split_spte spte, *orig = (union split_spte *)sptep;
413 spte.spte_low = orig->spte_low;
416 spte.spte_high = orig->spte_high;
419 if (unlikely(spte.spte_low != orig->spte_low ||
423 return spte.spte;
427 static bool spte_has_volatile_bits(u64 spte)
429 if (!is_shadow_present_pte(spte))
433 * Always atomically update spte if it can be updated
438 if (spte_can_locklessly_be_made_writable(spte) ||
439 is_access_track_spte(spte))
442 if (spte_ad_enabled(spte)) {
443 if ((spte & shadow_accessed_mask) == 0 ||
444 (is_writable_pte(spte) && (spte & shadow_dirty_mask) == 0))
455 * the spte.
491 * Whenever we overwrite a writable spte with a read-only one we
493 * will find a read-only spte, even though the writable spte
508 * For the spte updated out of mmu-lock is safe, since
573 * Directly clear spte without caring the state bits of sptep,
574 * it is used to set the upper level spte.
587 static u64 restore_acc_track_spte(u64 spte)
589 u64 new_spte = spte;
590 u64 saved_bits = (spte >> SHADOW_ACC_TRACK_SAVED_BITS_SHIFT)
593 WARN_ON_ONCE(spte_ad_enabled(spte));
594 WARN_ON_ONCE(!is_access_track_spte(spte));
607 u64 spte = mmu_spte_get_lockless(sptep);
609 if (!is_accessed_spte(spte))
612 if (spte_ad_enabled(spte)) {
620 if (is_writable_pte(spte))
621 kvm_set_pfn_dirty(spte_to_pfn(spte));
623 spte = mark_spte_for_access_track(spte);
624 mmu_spte_update_no_track(sptep, spte);
639 * Make sure a following spte read is not reordered ahead of the write
828 * If the bit zero of rmap_head->val is clear, then it points to the only spte
836 static int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte,
843 rmap_printk("pte_list_add: %p %llx 0->1\n", spte, *spte);
844 rmap_head->val = (unsigned long)spte;
846 rmap_printk("pte_list_add: %p %llx 1->many\n", spte, *spte);
849 desc->sptes[1] = spte;
853 rmap_printk("pte_list_add: %p %llx many->many\n", spte, *spte);
867 desc->sptes[i] = spte;
895 static void __pte_list_remove(u64 *spte, struct kvm_rmap_head *rmap_head)
902 pr_err("%s: %p 0->BUG\n", __func__, spte);
905 rmap_printk("%s: %p 1->0\n", __func__, spte);
906 if ((u64 *)rmap_head->val != spte) {
907 pr_err("%s: %p 1->BUG\n", __func__, spte);
912 rmap_printk("%s: %p many->many\n", __func__, spte);
917 if (desc->sptes[i] == spte) {
926 pr_err("%s: %p many->many\n", __func__, spte);
965 static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
970 sp = sptep_to_sp(spte);
971 kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn);
973 return pte_list_add(vcpu, spte, rmap_head);
976 static void rmap_remove(struct kvm *kvm, u64 *spte)
982 sp = sptep_to_sp(spte);
983 gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt);
985 __pte_list_remove(spte, rmap_head);
1095 * spte write-protection is caused by protecting shadow page table.
1097 * Note: write protection is difference between dirty logging and spte
1099 * - for dirty logging, the spte can be set to writable at anytime if
1101 * - for spte protection, the spte can be writable only after unsync-ing
1108 u64 spte = *sptep;
1110 if (!is_writable_pte(spte) &&
1111 !(pt_protect && spte_can_locklessly_be_made_writable(spte)))
1114 rmap_printk("rmap_write_protect: spte %p %llx\n", sptep, *sptep);
1117 spte &= ~SPTE_MMU_WRITEABLE;
1118 spte = spte & ~PT_WRITABLE_MASK;
1120 return mmu_spte_update(sptep, spte);
1139 u64 spte = *sptep;
1141 rmap_printk("rmap_clear_dirty: spte %p %llx\n", sptep, *sptep);
1143 MMU_WARN_ON(!spte_ad_enabled(spte));
1144 spte &= ~shadow_dirty_mask;
1145 return mmu_spte_update(sptep, spte);
1181 u64 spte = *sptep;
1183 rmap_printk("rmap_set_dirty: spte %p %llx\n", sptep, *sptep);
1190 spte |= shadow_dirty_mask;
1192 return mmu_spte_update(sptep, spte);
1322 rmap_printk("%s: spte %p %llx.\n", __func__, sptep, *sptep);
1354 rmap_printk("kvm_set_pte_rmapp: spte %p %llx gfn %llx (%d)\n",
1560 static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
1565 sp = sptep_to_sp(spte);
1683 static void mark_unsync(u64 *spte);
1694 static void mark_unsync(u64 *spte)
1699 sp = sptep_to_sp(spte);
1700 index = spte - sp->spt;
2015 static void clear_sp_write_flooding_count(u64 *spte)
2017 __clear_sp_write_flooding_count(sptep_to_sp(spte));
2162 u64 spte)
2164 if (is_last_spte(spte, iterator->level)) {
2169 iterator->shadow_addr = spte & PT64_BASE_ADDR_MASK;
2181 u64 spte;
2185 spte = make_nonleaf_spte(sp->spt, sp_ad_disabled(sp));
2187 mmu_spte_set(sptep, spte);
2205 * so we should update the spte at this point to get
2219 u64 *spte, struct list_head *invalid_list)
2224 pte = *spte;
2227 drop_spte(kvm, spte);
2232 drop_parent_pte(child, spte);
2245 mmu_spte_clear_no_track(spte);
2565 u64 spte;
2575 can_unsync, host_writable, sp_ad_disabled(sp), &spte);
2577 if (spte & PT_WRITABLE_MASK)
2580 if (*sptep == spte)
2582 else if (mmu_spte_update(sptep, spte))
2598 pgprintk("%s: spte %llx write_fault %d gfn %llx\n", __func__,
2646 pgprintk("%s: setting spte %llx\n", __func__, *sptep);
2705 u64 *spte, *start = NULL;
2711 spte = sp->spt + i;
2713 for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) {
2714 if (is_shadow_present_pte(*spte) || spte == sptep) {
2717 if (direct_pte_prefetch_many(vcpu, sp, start, spte) < 0)
2721 start = spte;
2828 void disallowed_hugepage_adjust(u64 spte, gfn_t gfn, int cur_level,
2834 is_shadow_present_pte(spte) &&
2835 !is_large_pte(spte)) {
2916 * into the spte otherwise read access on readonly gfn also can
2950 * Do not fix the mmio spte with invalid generation number which
2967 * bit of the spte which can be done out of mmu-lock.
3009 * The gfn of direct spte is stable since it is
3019 static bool is_access_allowed(u32 fault_err_code, u64 spte)
3022 return is_executable_pte(spte);
3025 return is_writable_pte(spte);
3028 return spte & PT_PRESENT_MASK;
3040 u64 spte = 0ull;
3051 for_each_shadow_entry_lockless(vcpu, cr2_or_gpa, iterator, spte)
3052 if (!is_shadow_present_pte(spte))
3056 if (!is_last_spte(spte, sp->role.level))
3069 if (is_access_allowed(error_code, spte)) {
3074 new_spte = spte;
3076 if (is_access_track_spte(spte))
3085 spte_can_locklessly_be_made_writable(spte)) {
3089 * Do not fix write-permission on the large spte. Since
3095 * normal spte to fix the access.
3104 if (new_spte == spte ||
3113 if (fast_pf_fix_direct_spte(vcpu, sp, iterator.sptep, spte,
3128 spte, ret);
3495 u64 spte;
3502 __shadow_walk_next(&iterator, spte)) {
3504 spte = mmu_spte_get_lockless(iterator.sptep);
3506 sptes[leaf - 1] = spte;
3508 if (!is_shadow_present_pte(spte))
3517 /* return true if reserved bit is detected on spte. */
3556 pr_err("%s: detect reserved bits on spte, addr 0x%llx, dump hierarchy:\n",
3559 pr_err("------ spte 0x%llx level %d.\n",
3570 u64 spte;
3576 reserved = get_mmio_spte(vcpu, addr, &spte);
3580 if (is_mmio_spte(spte)) {
3581 gfn_t gfn = get_mmio_spte_gfn(spte);
3582 unsigned int access = get_mmio_spte_access(spte);
3584 if (!check_mmio_spte(vcpu, spte))
3625 u64 spte;
3628 for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) {
3630 if (!is_shadow_present_pte(spte))
4958 u64 *spte;
4982 spte = &sp->spt[page_offset / sizeof(*spte)];
4983 return spte;
4993 u64 entry, gentry, *spte;
5030 spte = get_written_sptes(sp, gpa, &npte);
5031 if (!spte)
5036 entry = *spte;
5037 mmu_page_zap_pte(vcpu->kvm, sp, spte, NULL);
5040 if (need_remote_flush(entry, *spte))
5042 ++spte;
5558 * corruption since we just change the spte from writable to
5560 * spte from present to present (changing the spte from present