Lines Matching refs:spte
187 struct kvm_mmu_page *sp, u64 *spte,
204 drop_spte(vcpu->kvm, spte);
529 u64 *spte, pt_element_t gpte, bool no_dirty_log)
535 if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte))
538 pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
552 mmu_set_spte(vcpu, spte, pte_access, false, PG_LEVEL_4K, gfn, pfn,
560 u64 *spte, const void *pte)
564 FNAME(prefetch_gpte)(vcpu, sp, spte, gpte, false);
595 u64 *spte;
607 spte = sp->spt + i;
609 for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) {
610 if (spte == sptep)
613 if (is_shadow_present_pte(*spte))
616 if (!FNAME(prefetch_gpte)(vcpu, sp, spte, gptep[i], true))
852 * we will cache the incorrect access into mmio spte.
1008 * - The spte has a reference to the struct page, so the pfn for a given gfn
1012 * We should flush all tlbs if spte is dropped even though guest is
1048 * Update spte before increasing tlbs_dirty to make
1049 * sure no tlb flush is lost after spte is zapped; see