Home
last modified time | relevance | path

Searched refs:gfn (Results 1 - 25 of 150) sorted by relevance

123456

/kernel/linux/linux-5.10/drivers/gpu/drm/i915/gvt/
H A Dpage_track.c29 * @gfn: the gfn of guest page
35 struct intel_vgpu *vgpu, unsigned long gfn) in intel_vgpu_find_page_track()
37 return radix_tree_lookup(&vgpu->page_track_tree, gfn); in intel_vgpu_find_page_track()
43 * @gfn: the gfn of guest page
50 int intel_vgpu_register_page_track(struct intel_vgpu *vgpu, unsigned long gfn, in intel_vgpu_register_page_track() argument
56 track = intel_vgpu_find_page_track(vgpu, gfn); in intel_vgpu_register_page_track()
67 ret = radix_tree_insert(&vgpu->page_track_tree, gfn, track); in intel_vgpu_register_page_track()
79 * @gfn
34 intel_vgpu_find_page_track( struct intel_vgpu *vgpu, unsigned long gfn) intel_vgpu_find_page_track() argument
82 intel_vgpu_unregister_page_track(struct intel_vgpu *vgpu, unsigned long gfn) intel_vgpu_unregister_page_track() argument
103 intel_vgpu_enable_page_track(struct intel_vgpu *vgpu, unsigned long gfn) intel_vgpu_enable_page_track() argument
130 intel_vgpu_disable_page_track(struct intel_vgpu *vgpu, unsigned long gfn) intel_vgpu_disable_page_track() argument
[all...]
H A Dmpt.h157 * @gfn: the gfn of guest
163 struct intel_vgpu *vgpu, unsigned long gfn) in intel_gvt_hypervisor_enable_page_track()
165 return intel_gvt_host.mpt->enable_page_track(vgpu->handle, gfn); in intel_gvt_hypervisor_enable_page_track()
171 * @gfn: the gfn of guest
177 struct intel_vgpu *vgpu, unsigned long gfn) in intel_gvt_hypervisor_disable_page_track()
179 return intel_gvt_host.mpt->disable_page_track(vgpu->handle, gfn); in intel_gvt_hypervisor_disable_page_track()
223 struct intel_vgpu *vgpu, unsigned long gfn) in intel_gvt_hypervisor_gfn_to_mfn()
225 return intel_gvt_host.mpt->gfn_to_mfn(vgpu->handle, gfn); in intel_gvt_hypervisor_gfn_to_mfn()
162 intel_gvt_hypervisor_enable_page_track( struct intel_vgpu *vgpu, unsigned long gfn) intel_gvt_hypervisor_enable_page_track() argument
176 intel_gvt_hypervisor_disable_page_track( struct intel_vgpu *vgpu, unsigned long gfn) intel_gvt_hypervisor_disable_page_track() argument
222 intel_gvt_hypervisor_gfn_to_mfn( struct intel_vgpu *vgpu, unsigned long gfn) intel_gvt_hypervisor_gfn_to_mfn() argument
238 intel_gvt_hypervisor_dma_map_guest_page( struct intel_vgpu *vgpu, unsigned long gfn, unsigned long size, dma_addr_t *dma_addr) intel_gvt_hypervisor_dma_map_guest_page() argument
283 intel_gvt_hypervisor_map_gfn_to_mfn( struct intel_vgpu *vgpu, unsigned long gfn, unsigned long mfn, unsigned int nr, bool map) intel_gvt_hypervisor_map_gfn_to_mfn() argument
386 intel_gvt_hypervisor_is_valid_gfn( struct intel_vgpu *vgpu, unsigned long gfn) intel_gvt_hypervisor_is_valid_gfn() argument
[all...]
H A Dhypercall.h57 int (*enable_page_track)(unsigned long handle, u64 gfn);
58 int (*disable_page_track)(unsigned long handle, u64 gfn);
63 unsigned long (*gfn_to_mfn)(unsigned long handle, unsigned long gfn);
65 int (*dma_map_guest_page)(unsigned long handle, unsigned long gfn,
71 int (*map_gfn_to_mfn)(unsigned long handle, unsigned long gfn,
79 bool (*is_valid_gfn)(unsigned long handle, unsigned long gfn);
H A Dpage_track.h45 struct intel_vgpu *vgpu, unsigned long gfn);
48 unsigned long gfn, gvt_page_track_handler_t handler,
51 unsigned long gfn);
53 int intel_vgpu_enable_page_track(struct intel_vgpu *vgpu, unsigned long gfn);
54 int intel_vgpu_disable_page_track(struct intel_vgpu *vgpu, unsigned long gfn);
H A Dkvmgt.c87 gfn_t gfn; member
105 gfn_t gfn; member
151 static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, in gvt_unpin_guest_page() argument
163 unsigned long cur_gfn = gfn + npage; in gvt_unpin_guest_page()
171 static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, in gvt_pin_guest_page() argument
186 unsigned long cur_gfn = gfn + npage; in gvt_pin_guest_page()
192 gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx, ret %d\n", in gvt_pin_guest_page()
217 gvt_unpin_guest_page(vgpu, gfn, npage * PAGE_SIZE); in gvt_pin_guest_page()
221 static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn, in gvt_dma_map_page() argument
228 ret = gvt_pin_guest_page(vgpu, gfn, siz in gvt_dma_map_page()
244 gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn, dma_addr_t dma_addr, unsigned long size) gvt_dma_unmap_page() argument
272 __gvt_cache_find_gfn(struct intel_vgpu *vgpu, gfn_t gfn) __gvt_cache_find_gfn() argument
290 __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, dma_addr_t dma_addr, unsigned long size) __gvt_cache_add() argument
399 __kvmgt_protect_table_find(struct kvmgt_guest_info *info, gfn_t gfn) __kvmgt_protect_table_find() argument
413 kvmgt_gfn_is_write_protected(struct kvmgt_guest_info *info, gfn_t gfn) kvmgt_gfn_is_write_protected() argument
422 kvmgt_protect_table_add(struct kvmgt_guest_info *info, gfn_t gfn) kvmgt_protect_table_add() argument
437 kvmgt_protect_table_del(struct kvmgt_guest_info *info, gfn_t gfn) kvmgt_protect_table_del() argument
1686 kvmgt_page_track_add(unsigned long handle, u64 gfn) kvmgt_page_track_add() argument
1720 kvmgt_page_track_remove(unsigned long handle, u64 gfn) kvmgt_page_track_remove() argument
1771 gfn_t gfn; kvmgt_page_track_flush_slot() local
1932 kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn) kvmgt_gfn_to_pfn() argument
1949 kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn, unsigned long size, dma_addr_t *dma_addr) kvmgt_dma_map_guest_page() argument
2084 kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn) kvmgt_is_valid_gfn() argument
[all...]
/kernel/linux/linux-6.6/drivers/gpu/drm/i915/gvt/
H A Dpage_track.c29 * @gfn: the gfn of guest page
35 struct intel_vgpu *vgpu, unsigned long gfn) in intel_vgpu_find_page_track()
37 return radix_tree_lookup(&vgpu->page_track_tree, gfn); in intel_vgpu_find_page_track()
43 * @gfn: the gfn of guest page
50 int intel_vgpu_register_page_track(struct intel_vgpu *vgpu, unsigned long gfn, in intel_vgpu_register_page_track() argument
56 track = intel_vgpu_find_page_track(vgpu, gfn); in intel_vgpu_register_page_track()
67 ret = radix_tree_insert(&vgpu->page_track_tree, gfn, track); in intel_vgpu_register_page_track()
79 * @gfn
34 intel_vgpu_find_page_track( struct intel_vgpu *vgpu, unsigned long gfn) intel_vgpu_find_page_track() argument
82 intel_vgpu_unregister_page_track(struct intel_vgpu *vgpu, unsigned long gfn) intel_vgpu_unregister_page_track() argument
103 intel_vgpu_enable_page_track(struct intel_vgpu *vgpu, unsigned long gfn) intel_vgpu_enable_page_track() argument
130 intel_vgpu_disable_page_track(struct intel_vgpu *vgpu, unsigned long gfn) intel_vgpu_disable_page_track() argument
[all...]
H A Dpage_track.h45 struct intel_vgpu *vgpu, unsigned long gfn);
48 unsigned long gfn, gvt_page_track_handler_t handler,
51 unsigned long gfn);
53 int intel_vgpu_enable_page_track(struct intel_vgpu *vgpu, unsigned long gfn);
54 int intel_vgpu_disable_page_track(struct intel_vgpu *vgpu, unsigned long gfn);
H A Dkvmgt.c92 gfn_t gfn; member
100 gfn_t gfn; member
111 static void kvmgt_page_track_remove_region(gfn_t gfn, unsigned long nr_pages,
128 static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, in gvt_unpin_guest_page() argument
131 vfio_unpin_pages(&vgpu->vfio_device, gfn << PAGE_SHIFT, in gvt_unpin_guest_page()
136 static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, in gvt_pin_guest_page() argument
149 dma_addr_t cur_iova = (gfn + npage) << PAGE_SHIFT; in gvt_pin_guest_page()
173 gvt_unpin_guest_page(vgpu, gfn, npage * PAGE_SIZE); in gvt_pin_guest_page()
177 static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn, in gvt_dma_map_page() argument
184 ret = gvt_pin_guest_page(vgpu, gfn, siz in gvt_dma_map_page()
200 gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn, dma_addr_t dma_addr, unsigned long size) gvt_dma_unmap_page() argument
228 __gvt_cache_find_gfn(struct intel_vgpu *vgpu, gfn_t gfn) __gvt_cache_find_gfn() argument
246 __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, dma_addr_t dma_addr, unsigned long size) __gvt_cache_add() argument
349 __kvmgt_protect_table_find(struct intel_vgpu *info, gfn_t gfn) __kvmgt_protect_table_find() argument
365 kvmgt_gfn_is_write_protected(struct intel_vgpu *info, gfn_t gfn) kvmgt_gfn_is_write_protected() argument
373 kvmgt_protect_table_add(struct intel_vgpu *info, gfn_t gfn) kvmgt_protect_table_add() argument
388 kvmgt_protect_table_del(struct intel_vgpu *info, gfn_t gfn) kvmgt_protect_table_del() argument
1545 intel_gvt_page_track_add(struct intel_vgpu *info, u64 gfn) intel_gvt_page_track_add() argument
1563 intel_gvt_page_track_remove(struct intel_vgpu *info, u64 gfn) intel_gvt_page_track_remove() argument
1596 kvmgt_page_track_remove_region(gfn_t gfn, unsigned long nr_pages, struct kvm_page_track_notifier_node *node) kvmgt_page_track_remove_region() argument
1629 intel_gvt_dma_map_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, unsigned long size, dma_addr_t *dma_addr) intel_gvt_dma_map_guest_page() argument
[all...]
/kernel/linux/linux-6.6/arch/x86/kvm/mmu/
H A Dmmutrace.h13 __field(__u64, gfn) \
20 __entry->gfn = sp->gfn; \
34 trace_seq_printf(p, "sp gen %u gfn %llx l%u %u-byte q%u%s %s%s" \
37 __entry->gfn, role.level, \
212 TP_PROTO(u64 *sptep, gfn_t gfn, u64 spte),
213 TP_ARGS(sptep, gfn, spte),
217 __field(gfn_t, gfn)
224 __entry->gfn = gfn;
[all...]
H A Dpage_track.c62 static void update_gfn_write_track(struct kvm_memory_slot *slot, gfn_t gfn, in update_gfn_write_track() argument
67 index = gfn_to_index(gfn, slot->base_gfn, PG_LEVEL_4K); in update_gfn_write_track()
78 gfn_t gfn) in __kvm_write_track_add_gfn()
88 update_gfn_write_track(slot, gfn, 1); in __kvm_write_track_add_gfn()
94 kvm_mmu_gfn_disallow_lpage(slot, gfn); in __kvm_write_track_add_gfn()
96 if (kvm_mmu_slot_gfn_write_protect(kvm, slot, gfn, PG_LEVEL_4K)) in __kvm_write_track_add_gfn()
101 struct kvm_memory_slot *slot, gfn_t gfn) in __kvm_write_track_remove_gfn()
111 update_gfn_write_track(slot, gfn, -1); in __kvm_write_track_remove_gfn()
117 kvm_mmu_gfn_allow_lpage(slot, gfn); in __kvm_write_track_remove_gfn()
124 const struct kvm_memory_slot *slot, gfn_t gfn) in kvm_gfn_is_write_tracked()
77 __kvm_write_track_add_gfn(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn) __kvm_write_track_add_gfn() argument
100 __kvm_write_track_remove_gfn(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn) __kvm_write_track_remove_gfn() argument
123 kvm_gfn_is_write_tracked(struct kvm *kvm, const struct kvm_memory_slot *slot, gfn_t gfn) kvm_gfn_is_write_tracked() argument
255 kvm_write_track_add_gfn(struct kvm *kvm, gfn_t gfn) kvm_write_track_add_gfn() argument
285 kvm_write_track_remove_gfn(struct kvm *kvm, gfn_t gfn) kvm_write_track_remove_gfn() argument
[all...]
H A Dtdp_mmu.c192 gfn_t gfn, union kvm_mmu_page_role role) in tdp_mmu_init_sp()
199 sp->gfn = gfn; in tdp_mmu_init_sp()
217 tdp_mmu_init_sp(child_sp, iter->sptep, iter->gfn, role); in tdp_mmu_init_child_sp()
258 static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
324 gfn_t base_gfn = sp->gfn; in handle_removed_pt()
333 gfn_t gfn = base_gfn + i * KVM_PAGES_PER_HPAGE(level); in handle_removed_pt() local
396 handle_changed_spte(kvm, kvm_mmu_page_as_id(sp), gfn, in handle_removed_pt()
407 * @gfn: the base GFN that was mapped by the SPTE
419 static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, in handle_changed_spte() argument
191 tdp_mmu_init_sp(struct kvm_mmu_page *sp, tdp_ptep_t sptep, gfn_t gfn, union kvm_mmu_page_role role) tdp_mmu_init_sp() argument
603 tdp_mmu_set_spte(struct kvm *kvm, int as_id, tdp_ptep_t sptep, u64 old_spte, u64 new_spte, gfn_t gfn, int level) tdp_mmu_set_spte() argument
1578 clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root, gfn_t gfn, unsigned long mask, bool wrprot) clear_dirty_pt_masked() argument
1626 kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn, unsigned long mask, bool wrprot) kvm_tdp_mmu_clear_dirty_pt_masked() argument
1708 write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root, gfn_t gfn, int min_level) write_protect_gfn() argument
1744 kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn, int min_level) kvm_tdp_mmu_write_protect_gfn() argument
1769 gfn_t gfn = addr >> PAGE_SHIFT; kvm_tdp_mmu_get_walk() local
1798 gfn_t gfn = addr >> PAGE_SHIFT; kvm_tdp_mmu_fast_pf_get_last_sptep() local
[all...]
H A Dmmu_internal.h80 gfn_t gfn; member
114 * e.g. because KVM is shadowing a PTE at the same gfn, the memslot
160 static inline gfn_t gfn_round_for_level(gfn_t gfn, int level) in gfn_round_for_level() argument
162 return gfn & -KVM_PAGES_PER_HPAGE(level); in gfn_round_for_level()
166 gfn_t gfn, bool can_unsync, bool prefetch);
168 void kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn);
169 void kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn);
171 struct kvm_memory_slot *slot, u64 gfn,
175 static inline void kvm_flush_remote_tlbs_gfn(struct kvm *kvm, gfn_t gfn, int level) in kvm_flush_remote_tlbs_gfn() argument
177 kvm_flush_remote_tlbs_range(kvm, gfn_round_for_level(gfn, leve in kvm_flush_remote_tlbs_gfn()
231 gfn_t gfn; global() member
[all...]
H A Dtdp_iter.c15 SPTE_INDEX(iter->gfn << PAGE_SHIFT, iter->level); in tdp_iter_refresh_sptep()
29 iter->gfn = gfn_round_for_level(iter->next_last_level_gfn, iter->level); in tdp_iter_restart()
97 iter->gfn = gfn_round_for_level(iter->next_last_level_gfn, iter->level); in try_step_down()
116 if (SPTE_INDEX(iter->gfn << PAGE_SHIFT, iter->level) == in try_step_side()
120 iter->gfn += KVM_PAGES_PER_HPAGE(iter->level); in try_step_side()
121 iter->next_last_level_gfn = iter->gfn; in try_step_side()
139 iter->gfn = gfn_round_for_level(iter->gfn, iter->level); in try_step_up()
H A Dmmu.c277 int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages) in kvm_arch_flush_remote_tlbs_range() argument
282 return static_call(kvm_x86_flush_remote_tlbs_range)(kvm, gfn, nr_pages); in kvm_arch_flush_remote_tlbs_range()
291 gfn_t gfn = kvm_mmu_page_get_gfn(sp, spte_index(sptep)); in kvm_flush_remote_tlbs_sptep() local
293 kvm_flush_remote_tlbs_gfn(kvm, gfn, sp->role.level); in kvm_flush_remote_tlbs_sptep()
296 static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn, in mark_mmio_spte() argument
299 u64 spte = make_mmio_spte(vcpu, gfn, access); in mark_mmio_spte()
301 trace_mark_mmio_spte(sptep, gfn, spte); in mark_mmio_spte()
724 return sp->gfn; in kvm_mmu_page_get_gfn()
729 return sp->gfn + (index << ((sp->role.level - 1) * SPTE_LEVEL_BITS)); in kvm_mmu_page_get_gfn()
759 gfn_t gfn, unsigne in kvm_mmu_page_set_translation()
758 kvm_mmu_page_set_translation(struct kvm_mmu_page *sp, int index, gfn_t gfn, unsigned int access) kvm_mmu_page_set_translation() argument
780 gfn_t gfn = kvm_mmu_page_get_gfn(sp, index); kvm_mmu_page_set_access() local
789 lpage_info_slot(gfn_t gfn, const struct kvm_memory_slot *slot, int level) lpage_info_slot() argument
798 update_gfn_disallow_lpage_count(const struct kvm_memory_slot *slot, gfn_t gfn, int count) update_gfn_disallow_lpage_count() argument
811 kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn) kvm_mmu_gfn_disallow_lpage() argument
816 kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn) kvm_mmu_gfn_allow_lpage() argument
825 gfn_t gfn; account_shadowed() local
873 gfn_t gfn; unaccount_shadowed() local
901 gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn, bool no_dirty_log) gfn_to_memslot_dirty_bitmap() argument
1080 gfn_to_rmap(gfn_t gfn, int level, const struct kvm_memory_slot *slot) gfn_to_rmap() argument
1094 gfn_t gfn; rmap_remove() local
1408 kvm_mmu_slot_gfn_write_protect(struct kvm *kvm, struct kvm_memory_slot *slot, u64 gfn, int min_level) kvm_mmu_slot_gfn_write_protect() argument
1430 kvm_vcpu_write_protect_gfn(struct kvm_vcpu *vcpu, u64 gfn) kvm_vcpu_write_protect_gfn() argument
1444 kvm_zap_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head, struct kvm_memory_slot *slot, gfn_t gfn, int level, pte_t unused) kvm_zap_rmap() argument
1451 kvm_set_pte_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head, struct kvm_memory_slot *slot, gfn_t gfn, int level, pte_t pte) kvm_set_pte_rmap() argument
1497 gfn_t gfn; global() member
1606 kvm_age_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head, struct kvm_memory_slot *slot, gfn_t gfn, int level, pte_t unused) kvm_age_rmap() argument
1620 kvm_test_age_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head, struct kvm_memory_slot *slot, gfn_t gfn, int level, pte_t unused) kvm_test_age_rmap() argument
1635 __rmap_add(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, const struct kvm_memory_slot *slot, u64 *spte, gfn_t gfn, unsigned int access) __rmap_add() argument
1659 rmap_add(struct kvm_vcpu *vcpu, const struct kvm_memory_slot *slot, u64 *spte, gfn_t gfn, unsigned int access) rmap_add() argument
1743 kvm_page_table_hashfn(gfn_t gfn) kvm_page_table_hashfn() argument
2145 kvm_mmu_find_shadow_page(struct kvm *kvm, struct kvm_vcpu *vcpu, gfn_t gfn, struct hlist_head *sp_list, union kvm_mmu_page_role role) kvm_mmu_find_shadow_page() argument
2230 kvm_mmu_alloc_shadow_page(struct kvm *kvm, struct shadow_page_caches *caches, gfn_t gfn, struct hlist_head *sp_list, union kvm_mmu_page_role role) kvm_mmu_alloc_shadow_page() argument
2266 __kvm_mmu_get_shadow_page(struct kvm *kvm, struct kvm_vcpu *vcpu, struct shadow_page_caches *caches, gfn_t gfn, union kvm_mmu_page_role role) __kvm_mmu_get_shadow_page() argument
2288 kvm_mmu_get_shadow_page(struct kvm_vcpu *vcpu, gfn_t gfn, union kvm_mmu_page_role role) kvm_mmu_get_shadow_page() argument
2347 kvm_mmu_get_child_sp(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn, bool direct, unsigned int access) kvm_mmu_get_child_sp() argument
2751 kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) kvm_mmu_unprotect_page() argument
2799 mmu_try_to_unsync_pages(struct kvm *kvm, const struct kvm_memory_slot *slot, gfn_t gfn, bool can_unsync, bool prefetch) mmu_try_to_unsync_pages() argument
2900 mmu_set_spte(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot, u64 *sptep, unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn, struct kvm_page_fault *fault) mmu_set_spte() argument
2979 gfn_t gfn; direct_pte_prefetch_many() local
3076 host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn, const struct kvm_memory_slot *slot) host_pfn_mapping_level() argument
3140 kvm_mmu_max_mapping_level(struct kvm *kvm, const struct kvm_memory_slot *slot, gfn_t gfn, int max_level) kvm_mmu_max_mapping_level() argument
3261 kvm_send_hwpoison_signal(struct kvm_memory_slot *slot, gfn_t gfn) kvm_send_hwpoison_signal() argument
3642 mmu_alloc_root(struct kvm_vcpu *vcpu, gfn_t gfn, int quadrant, u8 level) mmu_alloc_root() argument
4159 gfn_t gfn = get_mmio_spte_gfn(spte); handle_mmio_page_fault() local
4221 kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, gfn_t gfn) kvm_arch_setup_async_pf() argument
4665 sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn, unsigned int access) sync_mmio_spte() argument
5650 gfn_t gfn = gpa >> PAGE_SHIFT; kvm_mmu_track_write() local
6347 gfn_t gfn; shadow_mmu_get_sp_for_split() local
6378 gfn_t gfn; shadow_mmu_split_huge_page() local
6420 gfn_t gfn; shadow_mmu_try_split_huge_page() local
[all...]
/kernel/linux/linux-5.10/arch/x86/kvm/mmu/
H A Dtdp_mmu.c139 static struct kvm_mmu_page *alloc_tdp_mmu_page(struct kvm_vcpu *vcpu, gfn_t gfn, in alloc_tdp_mmu_page() argument
149 sp->gfn = gfn; in alloc_tdp_mmu_page()
195 static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
215 static void handle_changed_spte_dirty_log(struct kvm *kvm, int as_id, gfn_t gfn, in handle_changed_spte_dirty_log() argument
228 slot = __gfn_to_memslot(__kvm_memslots(kvm, as_id), gfn); in handle_changed_spte_dirty_log()
229 mark_page_dirty_in_slot(slot, gfn); in handle_changed_spte_dirty_log()
237 * @gfn: the base GFN that was mapped by the SPTE
245 static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, in __handle_changed_spte() argument
260 WARN_ON(gfn in __handle_changed_spte()
343 handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, u64 old_spte, u64 new_spte, int level) handle_changed_spte() argument
587 gfn_t gfn = gpa >> PAGE_SHIFT; kvm_tdp_mmu_map() local
758 test_age_gfn(struct kvm *kvm, struct kvm_memory_slot *slot, struct kvm_mmu_page *root, gfn_t gfn, gfn_t unused, unsigned long unused2) test_age_gfn() argument
783 set_tdp_spte(struct kvm *kvm, struct kvm_memory_slot *slot, struct kvm_mmu_page *root, gfn_t gfn, gfn_t unused, unsigned long data) set_tdp_spte() argument
958 clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root, gfn_t gfn, unsigned long mask, bool wrprot) clear_dirty_pt_masked() argument
998 kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn, unsigned long mask, bool wrprot) kvm_tdp_mmu_clear_dirty_pt_masked() argument
1128 write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root, gfn_t gfn) write_protect_gfn() argument
1154 kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn) kvm_tdp_mmu_write_protect_gfn() argument
1181 gfn_t gfn = addr >> PAGE_SHIFT; kvm_tdp_mmu_get_walk() local
[all...]
H A Dmmutrace.h13 __field(__u64, gfn) \
20 __entry->gfn = sp->gfn; \
34 trace_seq_printf(p, "sp gen %u gfn %llx l%u %u-byte q%u%s %s%s" \
37 __entry->gfn, role.level, \
205 TP_PROTO(u64 *sptep, gfn_t gfn, u64 spte),
206 TP_ARGS(sptep, gfn, spte),
210 __field(gfn_t, gfn)
217 __entry->gfn = gfn;
[all...]
H A Dtdp_iter.c14 SHADOW_PT_INDEX(iter->gfn << PAGE_SHIFT, iter->level); in tdp_iter_refresh_sptep()
18 static gfn_t round_gfn_for_level(gfn_t gfn, int level) in round_gfn_for_level() argument
20 return gfn & -KVM_PAGES_PER_HPAGE(level); in round_gfn_for_level()
40 iter->gfn = round_gfn_for_level(iter->next_last_level_gfn, iter->level); in tdp_iter_start()
86 iter->gfn = round_gfn_for_level(iter->next_last_level_gfn, iter->level); in try_step_down()
105 if (SHADOW_PT_INDEX(iter->gfn << PAGE_SHIFT, iter->level) == in try_step_side()
109 iter->gfn += KVM_PAGES_PER_HPAGE(iter->level); in try_step_side()
110 iter->next_last_level_gfn = iter->gfn; in try_step_side()
128 iter->gfn = round_gfn_for_level(iter->gfn, ite in try_step_up()
[all...]
H A Dpage_track.c59 static void update_gfn_track(struct kvm_memory_slot *slot, gfn_t gfn, in update_gfn_track() argument
64 index = gfn_to_index(gfn, slot->base_gfn, PG_LEVEL_4K); in update_gfn_track()
82 * @slot: the @gfn belongs to.
83 * @gfn: the guest page.
87 struct kvm_memory_slot *slot, gfn_t gfn, in kvm_slot_page_track_add_page()
94 update_gfn_track(slot, gfn, mode, 1); in kvm_slot_page_track_add_page()
100 kvm_mmu_gfn_disallow_lpage(slot, gfn); in kvm_slot_page_track_add_page()
103 if (kvm_mmu_slot_gfn_write_protect(kvm, slot, gfn)) in kvm_slot_page_track_add_page()
117 * @slot: the @gfn belongs to.
118 * @gfn
86 kvm_slot_page_track_add_page(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn, enum kvm_page_track_mode mode) kvm_slot_page_track_add_page() argument
121 kvm_slot_page_track_remove_page(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn, enum kvm_page_track_mode mode) kvm_slot_page_track_remove_page() argument
141 kvm_page_track_is_active(struct kvm_vcpu *vcpu, gfn_t gfn, enum kvm_page_track_mode mode) kvm_page_track_is_active() argument
[all...]
H A Dmmu.c217 static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn, in mark_mmio_spte() argument
220 u64 mask = make_mmio_spte(vcpu, gfn, access); in mark_mmio_spte()
222 trace_mark_mmio_spte(sptep, gfn, mask); in mark_mmio_spte()
241 static bool set_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn, in set_mmio_spte() argument
245 mark_mmio_spte(vcpu, sptep, gfn, access); in set_mmio_spte()
702 return sp->gfn + (index << ((sp->role.level - 1) * PT64_LEVEL_BITS)); in kvm_mmu_page_get_gfn()
705 static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn) in kvm_mmu_page_set_gfn() argument
708 sp->gfns[index] = gfn; in kvm_mmu_page_set_gfn()
712 if (WARN_ON(gfn != kvm_mmu_page_get_gfn(sp, index))) in kvm_mmu_page_set_gfn()
713 pr_err_ratelimited("gfn mismatc in kvm_mmu_page_set_gfn()
723 lpage_info_slot(gfn_t gfn, struct kvm_memory_slot *slot, int level) lpage_info_slot() argument
733 update_gfn_disallow_lpage_count(struct kvm_memory_slot *slot, gfn_t gfn, int count) update_gfn_disallow_lpage_count() argument
746 kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn) kvm_mmu_gfn_disallow_lpage() argument
751 kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn) kvm_mmu_gfn_allow_lpage() argument
760 gfn_t gfn; account_shadowed() local
790 gfn_t gfn; unaccount_shadowed() local
811 gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn, bool no_dirty_log) gfn_to_memslot_dirty_bitmap() argument
937 __gfn_to_rmap(gfn_t gfn, int level, struct kvm_memory_slot *slot) __gfn_to_rmap() argument
946 gfn_to_rmap(struct kvm *kvm, gfn_t gfn, struct kvm_mmu_page *sp) gfn_to_rmap() argument
965 rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) rmap_add() argument
979 gfn_t gfn; rmap_remove() local
1288 kvm_mmu_slot_gfn_write_protect(struct kvm *kvm, struct kvm_memory_slot *slot, u64 gfn) kvm_mmu_slot_gfn_write_protect() argument
1307 rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn) rmap_write_protect() argument
1331 kvm_unmap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, struct kvm_memory_slot *slot, gfn_t gfn, int level, unsigned long data) kvm_unmap_rmapp() argument
1338 kvm_set_pte_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, struct kvm_memory_slot *slot, gfn_t gfn, int level, unsigned long data) kvm_set_pte_rmapp() argument
1388 gfn_t gfn; global() member
1447 kvm_handle_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, unsigned long data, int (*handler)(struct kvm *kvm, struct kvm_rmap_head *rmap_head, struct kvm_memory_slot *slot, gfn_t gfn, int level, unsigned long data)) kvm_handle_hva_range() argument
1494 kvm_handle_hva(struct kvm *kvm, unsigned long hva, unsigned long data, int (*handler)(struct kvm *kvm, struct kvm_rmap_head *rmap_head, struct kvm_memory_slot *slot, gfn_t gfn, int level, unsigned long data)) kvm_handle_hva() argument
1530 kvm_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, struct kvm_memory_slot *slot, gfn_t gfn, int level, unsigned long data) kvm_age_rmapp() argument
1545 kvm_test_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, struct kvm_memory_slot *slot, gfn_t gfn, int level, unsigned long data) kvm_test_age_rmapp() argument
1560 rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) rmap_recycle() argument
1635 kvm_page_table_hashfn(gfn_t gfn) kvm_page_table_hashfn() argument
1886 kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, struct list_head *invalid_list) kvm_sync_pages() argument
2020 kvm_mmu_get_page(struct kvm_vcpu *vcpu, gfn_t gfn, gva_t gaddr, unsigned level, int direct, unsigned int access) kvm_mmu_get_page() argument
2468 kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) kvm_mmu_unprotect_page() argument
2499 mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn, bool can_unsync) mmu_need_write_protect() argument
2560 set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned int pte_access, int level, gfn_t gfn, kvm_pfn_t pfn, bool speculative, bool can_unsync, bool host_writable) set_spte() argument
2587 mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned int pte_access, bool write_fault, int level, gfn_t gfn, kvm_pfn_t pfn, bool speculative, bool host_writable) mmu_set_spte() argument
2662 pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, bool no_dirty_log) pte_prefetch_gfn_to_pfn() argument
2682 gfn_t gfn; direct_pte_prefetch_many() local
2745 host_pfn_mapping_level(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn, struct kvm_memory_slot *slot) host_pfn_mapping_level() argument
2772 kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn, int max_level, kvm_pfn_t *pfnp, bool huge_page_disallowed, int *req_level) kvm_mmu_hugepage_adjust() argument
2828 disallowed_hugepage_adjust(u64 spte, gfn_t gfn, int cur_level, kvm_pfn_t *pfnp, int *goal_levelp) disallowed_hugepage_adjust() argument
2861 gfn_t gfn = gpa >> PAGE_SHIFT; __direct_map() local
2912 kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn) kvm_handle_bad_page() argument
2930 handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn, kvm_pfn_t pfn, unsigned int access, int *ret_val) handle_abnormal_pfn() argument
2988 gfn_t gfn; fast_pf_fix_direct_spte() local
3217 mmu_alloc_root(struct kvm_vcpu *vcpu, gfn_t gfn, gva_t gva, u8 level, bool direct) mmu_alloc_root() argument
3581 gfn_t gfn = get_mmio_spte_gfn(spte); handle_mmio_page_fault() local
3602 page_fault_handle_page_track(struct kvm_vcpu *vcpu, u32 error_code, gfn_t gfn) page_fault_handle_page_track() argument
3647 kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, gfn_t gfn) kvm_arch_setup_async_pf() argument
3661 try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn, gpa_t cr2_or_gpa, kvm_pfn_t *pfn, bool write, bool *writable) try_async_pf() argument
3708 gfn_t gfn = gpa >> PAGE_SHIFT; direct_page_fault() local
3942 sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn, unsigned int access, int *nr_present) sync_mmio_spte() argument
4990 gfn_t gfn = gpa >> PAGE_SHIFT; kvm_mmu_pte_write() local
[all...]
H A Dmmu_internal.h38 gfn_t gfn; member
41 /* hold the gfn of each spte inside spt */
88 bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
91 void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
92 void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
94 struct kvm_memory_slot *slot, u64 gfn);
136 int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn,
139 void disallowed_hugepage_adjust(u64 spte, gfn_t gfn, int cur_level,
/kernel/linux/linux-5.10/arch/powerpc/kvm/
H A Dbook3s_hv_uvmem.c287 static void kvmppc_mark_gfn(unsigned long gfn, struct kvm *kvm, in kvmppc_mark_gfn() argument
293 if (gfn >= p->base_pfn && gfn < p->base_pfn + p->nr_pfns) { in kvmppc_mark_gfn()
294 unsigned long index = gfn - p->base_pfn; in kvmppc_mark_gfn()
306 static void kvmppc_gfn_secure_uvmem_pfn(unsigned long gfn, in kvmppc_gfn_secure_uvmem_pfn() argument
309 kvmppc_mark_gfn(gfn, kvm, KVMPPC_GFN_UVMEM_PFN, uvmem_pfn); in kvmppc_gfn_secure_uvmem_pfn()
313 static void kvmppc_gfn_secure_mem_pfn(unsigned long gfn, struct kvm *kvm) in kvmppc_gfn_secure_mem_pfn() argument
315 kvmppc_mark_gfn(gfn, kvm, KVMPPC_GFN_MEM_PFN, 0); in kvmppc_gfn_secure_mem_pfn()
319 static void kvmppc_gfn_shared(unsigned long gfn, struct kvm *kvm) in kvmppc_gfn_shared() argument
321 kvmppc_mark_gfn(gfn, kv in kvmppc_gfn_shared()
325 kvmppc_gfn_remove(unsigned long gfn, struct kvm *kvm) kvmppc_gfn_remove() argument
331 kvmppc_gfn_is_uvmem_pfn(unsigned long gfn, struct kvm *kvm, unsigned long *uvmem_pfn) kvmppc_gfn_is_uvmem_pfn() argument
359 kvmppc_next_nontransitioned_gfn(const struct kvm_memory_slot *memslot, struct kvm *kvm, unsigned long *gfn) kvmppc_next_nontransitioned_gfn() argument
392 unsigned long gfn = memslot->base_gfn; kvmppc_memslot_page_merge() local
610 unsigned long uvmem_pfn, gfn; kvmppc_uvmem_drop_pages() local
791 unsigned long gfn = memslot->base_gfn; kvmppc_uv_migrate_mem_slot() local
879 unsigned long gfn = gpa >> page_shift; kvmppc_share_page() local
937 unsigned long gfn = gpa >> page_shift; kvmppc_h_svm_page_in() local
1043 unsigned long gfn = gpa >> page_shift; kvmppc_h_svm_page_out() local
1078 kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gfn) kvmppc_send_page_to_uv() argument
[all...]
/kernel/linux/linux-6.6/arch/powerpc/kvm/
H A Dbook3s_hv_uvmem.c289 static void kvmppc_mark_gfn(unsigned long gfn, struct kvm *kvm, in kvmppc_mark_gfn() argument
295 if (gfn >= p->base_pfn && gfn < p->base_pfn + p->nr_pfns) { in kvmppc_mark_gfn()
296 unsigned long index = gfn - p->base_pfn; in kvmppc_mark_gfn()
308 static void kvmppc_gfn_secure_uvmem_pfn(unsigned long gfn, in kvmppc_gfn_secure_uvmem_pfn() argument
311 kvmppc_mark_gfn(gfn, kvm, KVMPPC_GFN_UVMEM_PFN, uvmem_pfn); in kvmppc_gfn_secure_uvmem_pfn()
315 static void kvmppc_gfn_secure_mem_pfn(unsigned long gfn, struct kvm *kvm) in kvmppc_gfn_secure_mem_pfn() argument
317 kvmppc_mark_gfn(gfn, kvm, KVMPPC_GFN_MEM_PFN, 0); in kvmppc_gfn_secure_mem_pfn()
321 static void kvmppc_gfn_shared(unsigned long gfn, struct kvm *kvm) in kvmppc_gfn_shared() argument
323 kvmppc_mark_gfn(gfn, kv in kvmppc_gfn_shared()
327 kvmppc_gfn_remove(unsigned long gfn, struct kvm *kvm) kvmppc_gfn_remove() argument
333 kvmppc_gfn_is_uvmem_pfn(unsigned long gfn, struct kvm *kvm, unsigned long *uvmem_pfn) kvmppc_gfn_is_uvmem_pfn() argument
361 kvmppc_next_nontransitioned_gfn(const struct kvm_memory_slot *memslot, struct kvm *kvm, unsigned long *gfn) kvmppc_next_nontransitioned_gfn() argument
394 unsigned long gfn = memslot->base_gfn; kvmppc_memslot_page_merge() local
617 unsigned long uvmem_pfn, gfn; kvmppc_uvmem_drop_pages() local
797 unsigned long gfn = memslot->base_gfn; kvmppc_uv_migrate_mem_slot() local
885 unsigned long gfn = gpa >> page_shift; kvmppc_share_page() local
943 unsigned long gfn = gpa >> page_shift; kvmppc_h_svm_page_in() local
1049 unsigned long gfn = gpa >> page_shift; kvmppc_h_svm_page_out() local
1084 kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gfn) kvmppc_send_page_to_uv() argument
[all...]
/kernel/linux/linux-5.10/include/linux/
H A Dkvm_host.h91 * error pfns indicate that the gfn is in slot but faild to
100 * error_noslot pfns indicate that the gfn can not be
109 /* noslot pfn indicates that the gfn is not in slot. */
241 kvm_pfn_t gfn; member
731 int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
734 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
735 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
736 unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable);
737 unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
738 unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, gfn_t gfn,
1095 search_memslots(struct kvm_memslots *slots, gfn_t gfn) search_memslots() argument
1127 __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn) __gfn_to_memslot() argument
1133 __gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn) __gfn_to_hva_memslot() argument
1146 memslot_id(struct kvm *kvm, gfn_t gfn) memslot_id() argument
1159 gfn_to_gpa(gfn_t gfn) gfn_to_gpa() argument
[all...]
/kernel/linux/linux-6.6/include/xen/
H A Dxen-ops.h66 xen_pfn_t *gfn, int nr,
79 xen_pfn_t *gfn, int nr, in xen_xlate_remap_gfn_array()
98 * xen_remap_domain_gfn_array() - map an array of foreign frames by gfn
101 * @gfn: Array of GFNs to map
108 * @gfn and @err_ptr may point to the same buffer, the GFNs will be
116 xen_pfn_t *gfn, int nr, in xen_remap_domain_gfn_array()
122 return xen_xlate_remap_gfn_array(vma, addr, gfn, nr, err_ptr, in xen_remap_domain_gfn_array()
130 return xen_remap_pfn(vma, addr, gfn, nr, err_ptr, prot, domid, in xen_remap_domain_gfn_array()
165 * @gfn: First GFN to map.
176 xen_pfn_t gfn, in in xen_remap_domain_gfn_range()
77 xen_xlate_remap_gfn_array(struct vm_area_struct *vma, unsigned long addr, xen_pfn_t *gfn, int nr, int *err_ptr, pgprot_t prot, unsigned int domid, struct page **pages) xen_xlate_remap_gfn_array() argument
114 xen_remap_domain_gfn_array(struct vm_area_struct *vma, unsigned long addr, xen_pfn_t *gfn, int nr, int *err_ptr, pgprot_t prot, unsigned int domid, struct page **pages) xen_remap_domain_gfn_array() argument
174 xen_remap_domain_gfn_range(struct vm_area_struct *vma, unsigned long addr, xen_pfn_t gfn, int nr, pgprot_t prot, unsigned int domid, struct page **pages) xen_remap_domain_gfn_range() argument
[all...]
/kernel/linux/linux-5.10/include/trace/events/
H A Dkvm.h259 TP_PROTO(ulong gfn, int level, struct kvm_memory_slot *slot, int ref),
260 TP_ARGS(gfn, level, slot, ref),
264 __field( u64, gfn )
270 __entry->gfn = gfn;
272 __entry->hva = ((gfn - slot->base_gfn) <<
277 TP_printk("hva %llx gfn %llx level %u %s",
278 __entry->hva, __entry->gfn, __entry->level,
285 TP_PROTO(u64 gva, u64 gfn),
287 TP_ARGS(gva, gfn),
[all...]

Completed in 24 milliseconds

123456