Lines Matching defs:gfn
757 static int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
760 return kvm_read_guest_page_mmu(vcpu, vcpu->arch.walk_mmu, gfn,
810 gfn_t gfn;
819 gfn = (kvm_read_cr3(vcpu) & 0xffffffe0ul) >> PAGE_SHIFT;
821 r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte),
4162 * Take the srcu lock as memslots will be accessed to check the gfn
10851 * If the gfn and userspace address are not aligned wrt each
11166 static inline u32 kvm_async_pf_hash_fn(gfn_t gfn)
11170 return hash_32(gfn & 0xffffffff, order_base_2(ASYNC_PF_PER_VCPU));
11178 static void kvm_add_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
11180 u32 key = kvm_async_pf_hash_fn(gfn);
11185 vcpu->arch.apf.gfns[key] = gfn;
11188 static u32 kvm_async_pf_gfn_slot(struct kvm_vcpu *vcpu, gfn_t gfn)
11191 u32 key = kvm_async_pf_hash_fn(gfn);
11194 (vcpu->arch.apf.gfns[key] != gfn &&
11201 bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
11203 return vcpu->arch.apf.gfns[kvm_async_pf_gfn_slot(vcpu, gfn)] == gfn;
11206 static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
11210 i = j = kvm_async_pf_gfn_slot(vcpu, gfn);
11212 if (WARN_ON_ONCE(vcpu->arch.apf.gfns[i] != gfn))
11296 kvm_add_async_pf_gfn(vcpu, work->arch.gfn);
11333 kvm_del_async_pf_gfn(vcpu, work->arch.gfn);