Lines Matching refs:gfn
217 static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
220 u64 mask = make_mmio_spte(vcpu, gfn, access);
222 trace_mark_mmio_spte(sptep, gfn, mask);
241 static bool set_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
245 mark_mmio_spte(vcpu, sptep, gfn, access);
702 return sp->gfn + (index << ((sp->role.level - 1) * PT64_LEVEL_BITS));
705 static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn)
708 sp->gfns[index] = gfn;
712 if (WARN_ON(gfn != kvm_mmu_page_get_gfn(sp, index)))
713 pr_err_ratelimited("gfn mismatch under direct page %llx "
715 sp->gfn,
716 kvm_mmu_page_get_gfn(sp, index), gfn);
720 * Return the pointer to the large page information for a given gfn,
723 static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn,
729 idx = gfn_to_index(gfn, slot->base_gfn, level);
734 gfn_t gfn, int count)
740 linfo = lpage_info_slot(gfn, slot, i);
746 void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn)
748 update_gfn_disallow_lpage_count(slot, gfn, 1);
751 void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn)
753 update_gfn_disallow_lpage_count(slot, gfn, -1);
760 gfn_t gfn;
763 gfn = sp->gfn;
765 slot = __gfn_to_memslot(slots, gfn);
769 return kvm_slot_page_track_add_page(kvm, slot, gfn,
772 kvm_mmu_gfn_disallow_lpage(slot, gfn);
790 gfn_t gfn;
793 gfn = sp->gfn;
795 slot = __gfn_to_memslot(slots, gfn);
797 return kvm_slot_page_track_remove_page(kvm, slot, gfn,
800 kvm_mmu_gfn_allow_lpage(slot, gfn);
811 gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn,
816 slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
937 static struct kvm_rmap_head *__gfn_to_rmap(gfn_t gfn, int level,
942 idx = gfn_to_index(gfn, slot->base_gfn, level);
946 static struct kvm_rmap_head *gfn_to_rmap(struct kvm *kvm, gfn_t gfn,
953 slot = __gfn_to_memslot(slots, gfn);
954 return __gfn_to_rmap(gfn, sp->role.level, slot);
965 static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
971 kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn);
972 rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp);
979 gfn_t gfn;
983 gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt);
984 rmap_head = gfn_to_rmap(kvm, gfn, sp);
1088 kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn,
1289 struct kvm_memory_slot *slot, u64 gfn)
1296 rmap_head = __gfn_to_rmap(gfn, i, slot);
1302 kvm_tdp_mmu_write_protect_gfn(kvm, slot, gfn);
1307 static bool rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
1311 slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
1312 return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn);
1332 struct kvm_memory_slot *slot, gfn_t gfn, int level,
1339 struct kvm_memory_slot *slot, gfn_t gfn, int level,
1354 rmap_printk("kvm_set_pte_rmapp: spte %p %llx gfn %llx (%d)\n",
1355 sptep, *sptep, gfn, level);
1372 kvm_flush_remote_tlbs_with_address(kvm, gfn, 1);
1388 gfn_t gfn;
1400 iterator->gfn = iterator->start_gfn;
1401 iterator->rmap = __gfn_to_rmap(iterator->gfn, level, iterator->slot);
1428 iterator->gfn += (1UL << KVM_HPAGE_GFN_SHIFT(iterator->level));
1454 gfn_t gfn,
1476 * {gfn(page) | page intersects with [hva_start, hva_end)} =
1487 iterator.gfn, iterator.level, data);
1499 gfn_t gfn, int level,
1531 struct kvm_memory_slot *slot, gfn_t gfn, int level,
1541 trace_kvm_age_page(gfn, level, slot, young);
1546 struct kvm_memory_slot *slot, gfn_t gfn,
1560 static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
1567 rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp);
1569 kvm_unmap_rmapp(vcpu->kvm, rmap_head, NULL, gfn, sp->role.level, 0);
1570 kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn,
1635 static unsigned kvm_page_table_hashfn(gfn_t gfn)
1637 return hash_64(gfn, KVM_MMU_HASH_SHIFT);
1820 if ((_sp)->gfn != (_gfn) || (_sp)->role.direct) {} else
1827 /* @sp->gfn should be write-protected at the call site */
1885 /* @gfn should be write-protected at the call site */
1886 static bool kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn,
1892 for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) {
1989 protected |= rmap_write_protect(vcpu, sp->gfn);
2021 gfn_t gfn,
2049 sp_list = &vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)];
2051 if (sp->gfn != gfn) {
2090 sp->gfn = gfn;
2100 if (level == PG_LEVEL_4K && rmap_write_protect(vcpu, gfn))
2101 kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn, 1);
2104 flush |= kvm_sync_pages(vcpu, gfn, &invalid_list);
2213 kvm_flush_remote_tlbs_with_address(vcpu->kvm, child->gfn, 1);
2468 int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
2474 pgprintk("%s: looking for gfn %llx\n", __func__, gfn);
2477 for_each_gfn_indirect_valid_sp(kvm, sp, gfn) {
2478 pgprintk("%s: gfn %llx role %x\n", __func__, gfn,
2499 bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
2504 if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE))
2507 for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
2562 gfn_t gfn, kvm_pfn_t pfn, bool speculative,
2569 if (set_mmio_spte(vcpu, sptep, gfn, pfn, pte_access))
2574 ret = make_spte(vcpu, pte_access, level, gfn, pfn, *sptep, speculative,
2578 kvm_vcpu_mark_page_dirty(vcpu, gfn);
2589 gfn_t gfn, kvm_pfn_t pfn, bool speculative,
2598 pgprintk("%s: spte %llx write_fault %d gfn %llx\n", __func__,
2599 *sptep, write_fault, gfn);
2622 set_spte_ret = set_spte(vcpu, sptep, pte_access, level, gfn, pfn,
2631 kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn,
2647 trace_kvm_mmu_set_spte(level, gfn, sptep);
2653 rmap_count = rmap_add(vcpu, sptep, gfn);
2655 rmap_recycle(vcpu, sptep, gfn);
2662 static kvm_pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn,
2667 slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, no_dirty_log);
2671 return gfn_to_pfn_memslot_atomic(slot, gfn);
2682 gfn_t gfn;
2684 gfn = kvm_mmu_page_get_gfn(sp, start - sp->spt);
2685 slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, access & ACC_WRITE_MASK);
2689 ret = gfn_to_page_many_atomic(slot, gfn, pages, end - start);
2693 for (i = 0; i < ret; i++, gfn++, start++) {
2694 mmu_set_spte(vcpu, start, access, false, sp->role.level, gfn,
2745 static int host_pfn_mapping_level(struct kvm_vcpu *vcpu, gfn_t gfn,
2763 hva = __gfn_to_hva_memslot(slot, gfn);
2772 int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn,
2790 slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, true);
2796 linfo = lpage_info_slot(gfn, slot, max_level);
2804 level = host_pfn_mapping_level(vcpu, gfn, pfn, slot);
2822 VM_BUG_ON((gfn & mask) != (pfn & mask));
2828 void disallowed_hugepage_adjust(u64 spte, gfn_t gfn, int cur_level,
2845 *pfnp |= gfn & page_mask;
2861 gfn_t gfn = gpa >> PAGE_SHIFT;
2862 gfn_t base_gfn = gfn;
2867 level = kvm_mmu_hugepage_adjust(vcpu, gfn, max_level, &pfn,
2877 disallowed_hugepage_adjust(*it.sptep, gfn, it.level,
2880 base_gfn = gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
2912 static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn)
2915 * Do not cache the mmio info caused by writing the readonly gfn
2916 * into the spte otherwise read access on readonly gfn also can
2923 kvm_send_hwpoison_signal(kvm_vcpu_gfn_to_hva(vcpu, gfn), current);
2930 static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
2936 *ret_val = kvm_handle_bad_page(vcpu, gfn, pfn);
2941 vcpu_cache_mmio_info(vcpu, gva, gfn,
2988 gfn_t gfn;
3009 * The gfn of direct spte is stable since it is
3010 * calculated by sp->gfn.
3012 gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt);
3013 kvm_vcpu_mark_page_dirty(vcpu, gfn);
3110 * since the gfn is not stable for indirect shadow page. See
3217 static hpa_t mmu_alloc_root(struct kvm_vcpu *vcpu, gfn_t gfn, gva_t gva,
3228 sp = kvm_mmu_get_page(vcpu, gfn, gva, level, direct, ACC_ALL);
3581 gfn_t gfn = get_mmio_spte_gfn(spte);
3590 trace_handle_mmio_page_fault(addr, gfn, access);
3591 vcpu_cache_mmio_info(vcpu, addr, gfn, access);
3603 u32 error_code, gfn_t gfn)
3616 if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE))
3648 gfn_t gfn)
3653 arch.gfn = gfn;
3658 kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
3661 static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
3665 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
3669 * Retry the page fault if the gfn hit a memslot that is being deleted
3671 * be zapped before KVM inserts a new MMIO SPTE for the gfn.
3684 *pfn = __gfn_to_pfn_memslot(slot, gfn, false, &async, write, writable);
3689 trace_kvm_try_async_get_page(cr2_or_gpa, gfn);
3690 if (kvm_find_async_pf_gfn(vcpu, gfn)) {
3691 trace_kvm_async_pf_doublefault(cr2_or_gpa, gfn);
3694 } else if (kvm_arch_setup_async_pf(vcpu, cr2_or_gpa, gfn))
3698 *pfn = __gfn_to_pfn_memslot(slot, gfn, false, NULL, write, writable);
3708 gfn_t gfn = gpa >> PAGE_SHIFT;
3713 if (page_fault_handle_page_track(vcpu, error_code, gfn))
3729 if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable))
3732 if (handle_abnormal_pfn(vcpu, is_tdp ? 0 : gpa, gfn, pfn, ACC_ALL, &r))
3942 static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
3946 if (gfn != get_mmio_spte_gfn(*sptep)) {
3952 mark_mmio_spte(vcpu, sptep, gfn, access);
4990 gfn_t gfn = gpa >> PAGE_SHIFT;
5022 for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
5251 iterator.gfn - start_gfn + 1);
5598 kvm_flush_remote_tlbs_with_address(kvm, sp->gfn,