Lines Matching defs:gfn

448  * @gfn_offset:	The gfn offset in memory slot
653 int (*handler)(struct kvm *kvm, gfn_t gfn,
668 gfn_t gfn, gfn_end;
677 * {gfn(page) | page intersects with [hva_start, hva_end)} =
680 gfn = hva_to_gfn_memslot(hva_start, memslot);
682 ret |= handler(kvm, gfn, gfn_end, memslot, data);
689 static int kvm_unmap_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
695 kvm_flush_gpa_pt(kvm, gfn, gfn_end - 1, &npages);
709 static int kvm_set_spte_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
712 gpa_t gpa = gfn << PAGE_SHIFT;
752 static int kvm_age_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
755 return kvm_mkold_pgd(kvm->arch.gpa_mm.pgd, gfn << PAGE_SHIFT,
759 static int kvm_test_age_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
762 gpa_t gpa = gfn << PAGE_SHIFT;
890 gfn_t gfn = *gpap >> PAGE_SHIFT;
920 VM_BUG_ON((gfn & mask) != (pfn & mask));
1014 gfn_t gfn = gpa >> PAGE_SHIFT;
1048 slot = gfn_to_memslot(kvm, gfn);
1065 mark_page_dirty(kvm, gfn);
1079 static pte_t *kvm_split_huge(struct kvm_vcpu *vcpu, pte_t *ptep, gfn_t gfn,
1097 return child + (gfn & (PTRS_PER_PTE - 1));
1133 gfn_t gfn = gpa >> PAGE_SHIFT;
1145 memslot = gfn_to_memslot(kvm, gfn);
1146 hva = gfn_to_hva_memslot_prot(memslot, gfn, &writeable);
1168 /* PMD is not folded, adjust gfn to new boundary */
1170 gfn = (gpa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT;
1199 pfn = gfn_to_pfn_prot(kvm, gfn, write, &writeable);
1252 mark_page_dirty(kvm, gfn);
1271 mark_page_dirty(kvm, gfn);
1276 ptep = kvm_split_huge(vcpu, ptep, gfn, vma, hva);