Lines Matching defs:gfn
96 gfn_t gfn;
328 gfn_t gfn;
432 gfn = gpte_to_gfn_lvl(pte, walker->level);
433 gfn += (addr & PT_LVL_OFFSET_MASK(walker->level)) >> PAGE_SHIFT;
436 gfn += pse36_gfn_delta(pte);
438 real_gpa = mmu->translate_gpa(vcpu, gfn_to_gpa(gfn), access, &walker->fault);
442 walker->gfn = real_gpa >> PAGE_SHIFT;
532 gfn_t gfn;
540 gfn = gpte_to_gfn(gpte);
543 pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn,
552 mmu_set_spte(vcpu, spte, pte_access, false, PG_LEVEL_4K, gfn, pfn,
639 gfn_t base_gfn = gw->gfn;
685 level = kvm_mmu_hugepage_adjust(vcpu, gw->gfn, max_level, &pfn,
698 disallowed_hugepage_adjust(*it.sptep, gw->gfn, it.level,
701 base_gfn = gw->gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
732 * To see whether the mapped gfn can write its page table in the current
736 * size to map the writable gfn which is used as current page table, we should
741 * @write_fault_to_shadow_pgtable will return true if the fault gfn is
746 * size to map the gfn which is used as PDPT.
762 gfn_t gfn = walker->gfn ^ walker->table_gfn[level - 1];
764 self_changed |= !(gfn & mask);
765 *write_fault_to_shadow_pgtable |= !gfn;
821 if (page_fault_handle_page_track(vcpu, error_code, walker.gfn)) {
843 if (try_async_pf(vcpu, prefault, walker.gfn, addr, &pfn, write_fault,
847 if (handle_abnormal_pfn(vcpu, addr, walker.gfn, pfn, walker.pte_access, &r))
898 return gfn_to_gpa(sp->gfn) + offset * sizeof(pt_element_t);
942 sp->gfn, KVM_PAGES_PER_HPAGE(sp->role.level));
971 gpa = gfn_to_gpa(walker.gfn);
997 gpa = gfn_to_gpa(walker.gfn);
1008 * - The spte has a reference to the struct page, so the pfn for a given gfn
1035 gfn_t gfn;
1057 gfn = gpte_to_gfn(gpte);
1062 if (sync_mmio_spte(vcpu, &sp->spt[i], gfn, pte_access,
1066 if (gfn != sp->gfns[i]) {
1083 gfn, spte_to_pfn(sp->spt[i]),