Lines Matching refs:gfn

139 static struct kvm_mmu_page *alloc_tdp_mmu_page(struct kvm_vcpu *vcpu, gfn_t gfn,
149 sp->gfn = gfn;
195 static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
215 static void handle_changed_spte_dirty_log(struct kvm *kvm, int as_id, gfn_t gfn,
228 slot = __gfn_to_memslot(__kvm_memslots(kvm, as_id), gfn);
229 mark_page_dirty_in_slot(slot, gfn);
237 * @gfn: the base GFN that was mapped by the SPTE
245 static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
260 WARN_ON(gfn & (KVM_PAGES_PER_HPAGE(level) - 1));
275 "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d",
276 as_id, gfn, old_spte, new_spte, level);
304 "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d",
305 as_id, gfn, old_spte, new_spte, level);
331 gfn + (i * KVM_PAGES_PER_HPAGE(level - 1)),
335 kvm_flush_remote_tlbs_with_address(kvm, gfn,
343 static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
346 __handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level);
348 handle_changed_spte_dirty_log(kvm, as_id, gfn, old_spte,
362 __handle_changed_spte(kvm, as_id, iter->gfn, iter->old_spte, new_spte,
368 handle_changed_spte_dirty_log(kvm, as_id, iter->gfn,
435 WARN_ON(iter->gfn > iter->next_last_level_gfn);
480 if ((iter.gfn < start ||
481 iter.gfn + KVM_PAGES_PER_HPAGE(iter.level) > end) &&
534 new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL);
535 trace_mark_mmio_spte(iter->sptep, iter->gfn, new_spte);
537 make_spte_ret = make_spte(vcpu, ACC_ALL, iter->level, iter->gfn,
562 trace_kvm_mmu_set_spte(iter->level, iter->gfn, iter->sptep);
587 gfn_t gfn = gpa >> PAGE_SHIFT;
596 level = kvm_mmu_hugepage_adjust(vcpu, gfn, max_level, &pfn,
600 tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
602 disallowed_hugepage_adjust(iter.old_spte, gfn,
617 kvm_flush_remote_tlbs_with_address(vcpu->kvm, iter.gfn,
629 sp = alloc_tdp_mmu_page(vcpu, iter.gfn, iter.level);
678 * {gfn(page) | page intersects with [hva_start, hva_end)} =
759 struct kvm_mmu_page *root, gfn_t gfn, gfn_t unused,
764 tdp_root_for_each_leaf_pte(iter, root, gfn, gfn + 1)
784 struct kvm_mmu_page *root, gfn_t gfn, gfn_t unused,
797 tdp_root_for_each_pte(iter, root, gfn, gfn + 1) {
806 kvm_flush_remote_tlbs_with_address(kvm, iter.gfn, 1);
819 kvm_flush_remote_tlbs_with_address(kvm, gfn, 1);
953 * set in mask, starting at gfn. The given memslot is expected to contain all
959 gfn_t gfn, unsigned long mask, bool wrprot)
964 tdp_root_for_each_leaf_pte(iter, root, gfn + __ffs(mask),
965 gfn + BITS_PER_LONG) {
970 !(mask & (1UL << (iter.gfn - gfn))))
987 mask &= ~(1UL << (iter.gfn - gfn));
993 * set in mask, starting at gfn. The given memslot is expected to contain all
1000 gfn_t gfn, unsigned long mask,
1012 clear_dirty_pt_masked(kvm, root, gfn, mask, wrprot);
1129 gfn_t gfn)
1135 tdp_root_for_each_leaf_pte(iter, root, gfn, gfn + 1) {
1155 struct kvm_memory_slot *slot, gfn_t gfn)
1167 spte_set |= write_protect_gfn(kvm, root, gfn);
1181 gfn_t gfn = addr >> PAGE_SHIFT;
1186 tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {