Lines Matching defs:gfn
1291 * validation of sp->gfn happens in:
1753 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
1755 return __gfn_to_memslot(kvm_memslots(kvm), gfn);
1759 struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn)
1761 return __gfn_to_memslot(kvm_vcpu_memslots(vcpu), gfn);
1764 bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
1766 struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn);
1772 bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
1774 struct kvm_memory_slot *memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
1780 unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn)
1787 addr = kvm_vcpu_gfn_to_hva_prot(vcpu, gfn, NULL);
1809 static unsigned long __gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn,
1819 *nr_pages = slot->npages - (gfn - slot->base_gfn);
1821 return __gfn_to_hva_memslot(slot, gfn);
1824 static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn,
1827 return __gfn_to_hva_many(slot, gfn, nr_pages, true);
1831 gfn_t gfn)
1833 return gfn_to_hva_many(slot, gfn, NULL);
1837 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
1839 return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL);
1843 unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn)
1845 return gfn_to_hva_many(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, NULL);
1850 * Return the hva of a @gfn and the R/W attribute if possible.
1852 * @slot: the kvm_memory_slot which contains @gfn
1853 * @gfn: the gfn to be translated
1858 gfn_t gfn, bool *writable)
1860 unsigned long hva = __gfn_to_hva_many(slot, gfn, NULL, false);
1868 unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable)
1870 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
1872 return gfn_to_hva_memslot_prot(slot, gfn, writable);
1875 unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable)
1877 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
1879 return gfn_to_hva_memslot_prot(slot, gfn, writable);
2104 kvm_pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn,
2108 unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault);
2133 kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
2136 return __gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn, false, NULL,
2141 kvm_pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
2143 return __gfn_to_pfn_memslot(slot, gfn, false, NULL, true, NULL);
2147 kvm_pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn)
2149 return __gfn_to_pfn_memslot(slot, gfn, true, NULL, true, NULL);
2153 kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn)
2155 return gfn_to_pfn_memslot_atomic(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn);
2159 kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
2161 return gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn);
2165 kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn)
2167 return gfn_to_pfn_memslot(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn);
2171 int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
2177 addr = gfn_to_hva_many(slot, gfn, &entry);
2201 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
2205 pfn = gfn_to_pfn(kvm, gfn);
2217 cache->pfn = cache->gfn = 0;
2225 static void kvm_cache_gfn_to_pfn(struct kvm_memory_slot *slot, gfn_t gfn,
2230 cache->pfn = gfn_to_pfn_memslot(slot, gfn);
2231 cache->gfn = gfn;
2236 static int __kvm_map_gfn(struct kvm_memslots *slots, gfn_t gfn,
2244 struct kvm_memory_slot *slot = __gfn_to_memslot(slots, gfn);
2251 if (!cache->pfn || cache->gfn != gfn ||
2255 kvm_cache_gfn_to_pfn(slot, gfn, cache, gen);
2261 pfn = gfn_to_pfn_memslot(slot, gfn);
2286 map->gfn = gfn;
2291 int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map,
2294 return __kvm_map_gfn(kvm_memslots(vcpu->kvm), gfn, map,
2299 int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
2301 return __kvm_map_gfn(kvm_vcpu_memslots(vcpu), gfn, map,
2331 mark_page_dirty_in_slot(memslot, map->gfn);
2345 __kvm_unmap_gfn(gfn_to_memslot(vcpu->kvm, map->gfn), map,
2353 __kvm_unmap_gfn(kvm_vcpu_gfn_to_memslot(vcpu, map->gfn), map, NULL,
2358 struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn)
2362 pfn = kvm_vcpu_gfn_to_pfn(vcpu, gfn);
2439 static int __kvm_read_guest_page(struct kvm_memory_slot *slot, gfn_t gfn,
2445 addr = gfn_to_hva_memslot_prot(slot, gfn, NULL);
2454 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
2457 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
2459 return __kvm_read_guest_page(slot, gfn, data, offset, len);
2463 int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data,
2466 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
2468 return __kvm_read_guest_page(slot, gfn, data, offset, len);
2474 gfn_t gfn = gpa >> PAGE_SHIFT;
2480 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
2486 ++gfn;
2494 gfn_t gfn = gpa >> PAGE_SHIFT;
2500 ret = kvm_vcpu_read_guest_page(vcpu, gfn, data, offset, seg);
2506 ++gfn;
2512 static int __kvm_read_guest_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
2518 addr = gfn_to_hva_memslot_prot(slot, gfn, NULL);
2532 gfn_t gfn = gpa >> PAGE_SHIFT;
2533 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
2536 return __kvm_read_guest_atomic(slot, gfn, data, offset, len);
2540 static int __kvm_write_guest_page(struct kvm_memory_slot *memslot, gfn_t gfn,
2546 addr = gfn_to_hva_memslot(memslot, gfn);
2552 mark_page_dirty_in_slot(memslot, gfn);
2556 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn,
2559 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
2561 return __kvm_write_guest_page(slot, gfn, data, offset, len);
2565 int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
2568 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
2570 return __kvm_write_guest_page(slot, gfn, data, offset, len);
2577 gfn_t gfn = gpa >> PAGE_SHIFT;
2583 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
2589 ++gfn;
2598 gfn_t gfn = gpa >> PAGE_SHIFT;
2604 ret = kvm_vcpu_write_guest_page(vcpu, gfn, data, offset, seg);
2610 ++gfn;
2740 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
2744 return kvm_write_guest_page(kvm, gfn, zero_page, offset, len);
2750 gfn_t gfn = gpa >> PAGE_SHIFT;
2756 ret = kvm_clear_guest_page(kvm, gfn, offset, seg);
2761 ++gfn;
2767 void mark_page_dirty_in_slot(struct kvm_memory_slot *memslot, gfn_t gfn)
2770 unsigned long rel_gfn = gfn - memslot->base_gfn;
2777 void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
2781 memslot = gfn_to_memslot(kvm, gfn);
2782 mark_page_dirty_in_slot(memslot, gfn);
2786 void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn)
2790 memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
2791 mark_page_dirty_in_slot(memslot, gfn);