Lines Matching refs:gfn
192 gfn_t gfn, union kvm_mmu_page_role role)
199 sp->gfn = gfn;
217 tdp_mmu_init_sp(child_sp, iter->sptep, iter->gfn, role);
258 static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
324 gfn_t base_gfn = sp->gfn;
333 gfn_t gfn = base_gfn + i * KVM_PAGES_PER_HPAGE(level);
396 handle_changed_spte(kvm, kvm_mmu_page_as_id(sp), gfn,
407 * @gfn: the base GFN that was mapped by the SPTE
419 static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
431 WARN_ON_ONCE(gfn & (KVM_PAGES_PER_HPAGE(level) - 1));
446 "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d",
447 as_id, gfn, old_spte, new_spte, level);
459 trace_kvm_tdp_mmu_spte_changed(as_id, gfn, level, old_spte, new_spte);
484 "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d",
485 as_id, gfn, old_spte, new_spte, level);
554 handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte,
575 kvm_flush_remote_tlbs_gfn(kvm, iter->gfn, iter->level);
597 * @gfn: The base GFN that was (or will be) mapped by the SPTE
604 u64 old_spte, u64 new_spte, gfn_t gfn, int level)
619 handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level, false);
629 iter->gfn, iter->level);
682 WARN_ON_ONCE(iter->gfn > iter->next_last_level_gfn);
694 * a gpa range that would exceed the max gfn, and KVM does not create
779 sp->gfn, sp->role.level + 1);
965 new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL);
967 wrprot = make_spte(vcpu, sp, fault->slot, ACC_ALL, iter->gfn,
977 kvm_flush_remote_tlbs_gfn(vcpu->kvm, iter->gfn, iter->level);
992 trace_mark_mmio_spte(rcu_dereference(iter->sptep), iter->gfn,
996 trace_kvm_mmu_set_spte(iter->level, iter->gfn,
1055 tdp_mmu_for_each_pte(iter, mmu, fault->gfn, fault->gfn + 1) {
1199 trace_kvm_tdp_mmu_spte_changed(iter->as_id, iter->gfn, iter->level,
1417 trace_kvm_mmu_split_huge_page(iter->gfn, huge_spte, level, ret);
1455 trace_kvm_mmu_split_huge_page(iter.gfn,
1573 * set in mask, starting at gfn. The given memslot is expected to contain all
1579 gfn_t gfn, unsigned long mask, bool wrprot)
1589 tdp_root_for_each_leaf_pte(iter, root, gfn + __ffs(mask),
1590 gfn + BITS_PER_LONG) {
1598 !(mask & (1UL << (iter.gfn - gfn))))
1601 mask &= ~(1UL << (iter.gfn - gfn));
1610 trace_kvm_tdp_mmu_spte_changed(iter.as_id, iter.gfn, iter.level,
1621 * set in mask, starting at gfn. The given memslot is expected to contain all
1628 gfn_t gfn, unsigned long mask,
1634 clear_dirty_pt_masked(kvm, root, gfn, mask, wrprot);
1666 * If iter.gfn resides outside of the slot, i.e. the page for
1672 if (iter.gfn < start || iter.gfn >= end)
1676 iter.gfn, PG_LEVEL_NUM);
1709 gfn_t gfn, int min_level)
1719 for_each_tdp_pte_min_level(iter, root, min_level, gfn, gfn + 1) {
1745 struct kvm_memory_slot *slot, gfn_t gfn,
1753 spte_set |= write_protect_gfn(kvm, root, gfn, min_level);
1769 gfn_t gfn = addr >> PAGE_SHIFT;
1774 tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
1798 gfn_t gfn = addr >> PAGE_SHIFT;
1801 tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {