Lines Matching refs:gfn
277 int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages)
282 return static_call(kvm_x86_flush_remote_tlbs_range)(kvm, gfn, nr_pages);
291 gfn_t gfn = kvm_mmu_page_get_gfn(sp, spte_index(sptep));
293 kvm_flush_remote_tlbs_gfn(kvm, gfn, sp->role.level);
296 static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
299 u64 spte = make_mmio_spte(vcpu, gfn, access);
301 trace_mark_mmio_spte(sptep, gfn, spte);
724 return sp->gfn;
729 return sp->gfn + (index << ((sp->role.level - 1) * SPTE_LEVEL_BITS));
759 gfn_t gfn, unsigned int access)
762 sp->shadowed_translation[index] = (gfn << PAGE_SHIFT) | access;
769 sp->gfn, kvm_mmu_page_get_access(sp, index), access);
771 WARN_ONCE(gfn != kvm_mmu_page_get_gfn(sp, index),
772 "gfn mismatch under %s page %llx (expected %llx, got %llx)\n",
774 sp->gfn, kvm_mmu_page_get_gfn(sp, index), gfn);
780 gfn_t gfn = kvm_mmu_page_get_gfn(sp, index);
782 kvm_mmu_page_set_translation(sp, index, gfn, access);
786 * Return the pointer to the large page information for a given gfn,
789 static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn,
794 idx = gfn_to_index(gfn, slot->base_gfn, level);
799 gfn_t gfn, int count)
805 linfo = lpage_info_slot(gfn, slot, i);
811 void kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn)
813 update_gfn_disallow_lpage_count(slot, gfn, 1);
816 void kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn)
818 update_gfn_disallow_lpage_count(slot, gfn, -1);
825 gfn_t gfn;
828 gfn = sp->gfn;
830 slot = __gfn_to_memslot(slots, gfn);
834 return __kvm_write_track_add_gfn(kvm, slot, gfn);
836 kvm_mmu_gfn_disallow_lpage(slot, gfn);
838 if (kvm_mmu_slot_gfn_write_protect(kvm, slot, gfn, PG_LEVEL_4K))
839 kvm_flush_remote_tlbs_gfn(kvm, gfn, PG_LEVEL_4K);
873 gfn_t gfn;
876 gfn = sp->gfn;
878 slot = __gfn_to_memslot(slots, gfn);
880 return __kvm_write_track_remove_gfn(kvm, slot, gfn);
882 kvm_mmu_gfn_allow_lpage(slot, gfn);
902 gfn_t gfn,
907 slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
1080 static struct kvm_rmap_head *gfn_to_rmap(gfn_t gfn, int level,
1085 idx = gfn_to_index(gfn, slot->base_gfn, level);
1094 gfn_t gfn;
1098 gfn = kvm_mmu_page_get_gfn(sp, spte_index(spte));
1107 slot = __gfn_to_memslot(slots, gfn);
1108 rmap_head = gfn_to_rmap(gfn, sp->role.level, slot);
1409 struct kvm_memory_slot *slot, u64 gfn,
1418 rmap_head = gfn_to_rmap(gfn, i, slot);
1425 kvm_tdp_mmu_write_protect_gfn(kvm, slot, gfn, min_level);
1430 static bool kvm_vcpu_write_protect_gfn(struct kvm_vcpu *vcpu, u64 gfn)
1434 slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
1435 return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn, PG_LEVEL_4K);
1445 struct kvm_memory_slot *slot, gfn_t gfn, int level,
1452 struct kvm_memory_slot *slot, gfn_t gfn, int level,
1481 kvm_flush_remote_tlbs_gfn(kvm, gfn, level);
1497 gfn_t gfn;
1509 iterator->gfn = iterator->start_gfn;
1510 iterator->rmap = gfn_to_rmap(iterator->gfn, level, iterator->slot);
1536 iterator->gfn += (1UL << KVM_HPAGE_GFN_SHIFT(iterator->level));
1558 struct kvm_memory_slot *slot, gfn_t gfn,
1570 ret |= handler(kvm, iterator.rmap, range->slot, iterator.gfn,
1607 struct kvm_memory_slot *slot, gfn_t gfn, int level,
1621 struct kvm_memory_slot *slot, gfn_t gfn,
1638 u64 *spte, gfn_t gfn, unsigned int access)
1645 kvm_mmu_page_set_translation(sp, spte_index(spte), gfn, access);
1648 rmap_head = gfn_to_rmap(gfn, sp->role.level, slot);
1655 kvm_flush_remote_tlbs_gfn(kvm, gfn, sp->role.level);
1660 u64 *spte, gfn_t gfn, unsigned int access)
1664 __rmap_add(vcpu->kvm, cache, slot, spte, gfn, access);
1700 pr_err_ratelimited("SPTE %llx (@ %p) for gfn %llx shadow-present at free",
1743 static unsigned kvm_page_table_hashfn(gfn_t gfn)
1745 return hash_64(gfn, KVM_MMU_HASH_SHIFT);
1910 if ((_sp)->gfn != (_gfn) || !sp_has_gptes(_sp)) {} else
2101 protected |= kvm_vcpu_write_protect_gfn(vcpu, sp->gfn);
2147 gfn_t gfn,
2157 if (sp->gfn != gfn) {
2165 * unsync pages for the same gfn. While it's possible
2193 * SPs for a single gfn to be unsync.
2232 gfn_t gfn,
2256 sp->gfn = gfn;
2269 gfn_t gfn,
2276 sp_list = &kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)];
2278 sp = kvm_mmu_find_shadow_page(kvm, vcpu, gfn, sp_list, role);
2281 sp = kvm_mmu_alloc_shadow_page(kvm, caches, gfn, sp_list, role);
2289 gfn_t gfn,
2298 return __kvm_mmu_get_shadow_page(vcpu->kvm, vcpu, &caches, gfn, role);
2348 u64 *sptep, gfn_t gfn,
2357 return kvm_mmu_get_shadow_page(vcpu, gfn, role);
2448 * kvm_mmu_find_shadow_page() without write-protecting the gfn,
2751 int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
2759 for_each_gfn_valid_sp_with_gptes(kvm, sp, gfn) {
2794 * Attempt to unsync any shadow pages that can be reached by the specified gfn,
2795 * KVM is creating a writable mapping for said gfn. Returns 0 if all pages
2800 gfn_t gfn, bool can_unsync, bool prefetch)
2810 if (kvm_gfn_is_write_tracked(kvm, slot, gfn))
2819 for_each_gfn_valid_sp_with_gptes(kvm, sp, gfn) {
2901 u64 *sptep, unsigned int pte_access, gfn_t gfn,
2919 mark_mmio_spte(vcpu, sptep, gfn, pte_access);
2942 wrprot = make_spte(vcpu, sp, slot, pte_access, gfn, pfn, *sptep, prefetch,
2949 trace_kvm_mmu_set_spte(level, gfn, sptep);
2958 kvm_flush_remote_tlbs_gfn(vcpu->kvm, gfn, level);
2962 rmap_add(vcpu, slot, sptep, gfn, pte_access);
2979 gfn_t gfn;
2981 gfn = kvm_mmu_page_get_gfn(sp, spte_index(start));
2982 slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, access & ACC_WRITE_MASK);
2986 ret = gfn_to_page_many_atomic(slot, gfn, pages, end - start);
2990 for (i = 0; i < ret; i++, gfn++, start++) {
2991 mmu_set_spte(vcpu, slot, start, access, gfn,
3052 * Lookup the mapping level for @gfn in the current mm.
3076 static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn,
3095 hva = __gfn_to_hva_memslot(slot, gfn);
3141 const struct kvm_memory_slot *slot, gfn_t gfn,
3149 linfo = lpage_info_slot(gfn, slot, max_level);
3157 host_level = host_pfn_mapping_level(kvm, gfn, slot);
3182 fault->gfn, fault->max_level);
3192 VM_BUG_ON((fault->gfn & mask) != (fault->pfn & mask));
3212 fault->pfn |= fault->gfn & page_mask;
3222 gfn_t base_gfn = fault->gfn;
3235 base_gfn = gfn_round_for_level(fault->gfn, it.level);
3261 static void kvm_send_hwpoison_signal(struct kvm_memory_slot *slot, gfn_t gfn)
3263 unsigned long hva = gfn_to_hva_memslot(slot, gfn);
3276 * Do not cache the mmio info caused by writing the readonly gfn
3277 * into the spte otherwise read access on readonly gfn also can
3284 kvm_send_hwpoison_signal(fault->slot, fault->gfn);
3297 vcpu_cache_mmio_info(vcpu, gva, fault->gfn,
3309 * Do not create an MMIO SPTE for a gfn greater than host.MAXPHYADDR,
3311 * tricked by L0 userspace (you can observe gfn > L1.MAXPHYADDR if and
3315 if (unlikely(fault->gfn > kvm_mmu_max_gfn()))
3381 mark_page_dirty_in_slot(vcpu->kvm, fault->slot, fault->gfn);
3514 * since the gfn is not stable for indirect shadow page. See
3642 static hpa_t mmu_alloc_root(struct kvm_vcpu *vcpu, gfn_t gfn, int quadrant,
3654 sp = kvm_mmu_get_shadow_page(vcpu, gfn, role);
4159 gfn_t gfn = get_mmio_spte_gfn(spte);
4168 trace_handle_mmio_page_fault(addr, gfn, access);
4169 vcpu_cache_mmio_info(vcpu, addr, gfn, access);
4193 if (kvm_gfn_is_write_tracked(vcpu->kvm, fault->slot, fault->gfn))
4222 gfn_t gfn)
4227 arch.gfn = gfn;
4232 kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
4260 * Retry the page fault if the gfn hit a memslot that is being deleted
4262 * be zapped before KVM inserts a new MMIO SPTE for the gfn.
4287 fault->pfn = __gfn_to_pfn_memslot(slot, fault->gfn, false, false, &async,
4294 trace_kvm_try_async_get_page(fault->addr, fault->gfn);
4295 if (kvm_find_async_pf_gfn(vcpu, fault->gfn)) {
4296 trace_kvm_async_pf_repeated_fault(fault->addr, fault->gfn);
4299 } else if (kvm_arch_setup_async_pf(vcpu, fault->addr, fault->gfn)) {
4309 fault->pfn = __gfn_to_pfn_memslot(slot, fault->gfn, false, true, NULL,
4499 gfn_t base = gfn_round_for_level(fault->gfn,
4665 static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
4669 if (gfn != get_mmio_spte_gfn(*sptep)) {
4674 mark_mmio_spte(vcpu, sptep, gfn, access);
5650 gfn_t gfn = gpa >> PAGE_SHIFT;
5670 for_each_gfn_valid_sp_with_gptes(vcpu->kvm, sp, gfn) {
5928 iterator.gfn - start_gfn + 1);
6313 * more than one rmap entry for a gfn, i.e. requires an L1 gfn to be
6321 * encounters an aliased gfn or two.
6347 gfn_t gfn;
6349 gfn = kvm_mmu_page_get_gfn(huge_sp, spte_index(huge_sptep));
6365 return __kvm_mmu_get_shadow_page(kvm, NULL, &caches, gfn, role);
6378 gfn_t gfn;
6385 gfn = kvm_mmu_page_get_gfn(sp, index);
6391 * gfn-to-pfn translation since the SP is direct, so no need to
6408 __rmap_add(kvm, cache, slot, sptep, gfn, sp->role.access);
6420 gfn_t gfn;
6424 gfn = kvm_mmu_page_get_gfn(huge_sp, spte_index(huge_sptep));
6449 trace_kvm_mmu_split_huge_page(gfn, spte, level, r);
6589 sp->role.level < kvm_mmu_max_mapping_level(kvm, slot, sp->gfn,
7075 slot = __gfn_to_memslot(slots, sp->gfn);