Home
last modified time | relevance | path

Searched refs:memslot (Results 1 - 25 of 75) sorted by relevance

123

/kernel/linux/linux-5.10/arch/arm64/kvm/
H A Dmmu.c70 static bool memslot_is_logging(struct kvm_memory_slot *memslot) in memslot_is_logging() argument
72 return memslot->dirty_bitmap && !(memslot->flags & KVM_MEM_READONLY); in memslot_is_logging()
145 struct kvm_memory_slot *memslot) in stage2_flush_memslot()
147 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT; in stage2_flush_memslot()
148 phys_addr_t end = addr + PAGE_SIZE * memslot->npages; in stage2_flush_memslot()
163 struct kvm_memory_slot *memslot; in stage2_flush_vm() local
170 kvm_for_each_memslot(memslot, slots) in stage2_flush_vm()
171 stage2_flush_memslot(kvm, memslot); in stage2_flush_vm()
404 struct kvm_memory_slot *memslot) in stage2_unmap_memslot()
144 stage2_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot) stage2_flush_memslot() argument
403 stage2_unmap_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot) stage2_unmap_memslot() argument
454 struct kvm_memory_slot *memslot; stage2_unmap_vm() local
561 struct kvm_memory_slot *memslot = id_to_memslot(slots, slot); kvm_mmu_wp_memory_region() local
627 fault_supports_stage2_huge_mapping(struct kvm_memory_slot *memslot, unsigned long hva, unsigned long map_size) fault_supports_stage2_huge_mapping() argument
697 transparent_hugepage_adjust(struct kvm_memory_slot *memslot, unsigned long hva, kvm_pfn_t *pfnp, phys_addr_t *ipap) transparent_hugepage_adjust() argument
741 user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, struct kvm_memory_slot *memslot, unsigned long hva, unsigned long fault_status) user_mem_abort() argument
955 struct kvm_memory_slot *memslot; kvm_handle_guest_abort() local
1073 struct kvm_memory_slot *memslot; handle_hva_to_gpa() local
1294 kvm_arch_prepare_memory_region(struct kvm *kvm, struct kvm_memory_slot *memslot, const struct kvm_userspace_memory_region *mem, enum kvm_mr_change change) kvm_arch_prepare_memory_region() argument
[all...]
/kernel/linux/linux-5.10/arch/powerpc/kvm/
H A Dbook3s_64_mmu_hv.c204 void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot, in kvmppc_map_vrma() argument
218 npages = memslot->npages >> (porder - PAGE_SHIFT); in kvmppc_map_vrma()
492 struct kvm_memory_slot *memslot; in kvmppc_book3s_hv_page_fault() local
555 memslot = gfn_to_memslot(kvm, gfn); in kvmppc_book3s_hv_page_fault()
557 trace_kvm_page_fault_enter(vcpu, hpte, memslot, ea, dsisr); in kvmppc_book3s_hv_page_fault()
559 /* No memslot means it's an emulated MMIO region */ in kvmppc_book3s_hv_page_fault()
560 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) in kvmppc_book3s_hv_page_fault()
568 if (gfn_base < memslot->base_gfn) in kvmppc_book3s_hv_page_fault()
580 hva = gfn_to_hva_memslot(memslot, gf in kvmppc_book3s_hv_page_fault()
735 struct kvm_memory_slot *memslot; kvmppc_rmap_reset() local
765 struct kvm_memory_slot *memslot; kvm_handle_hva_range() local
800 kvmppc_unmap_hpte(struct kvm *kvm, unsigned long i, struct kvm_memory_slot *memslot, unsigned long *rmapp, unsigned long gfn) kvmppc_unmap_hpte() argument
842 kvm_unmap_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot, unsigned long gfn) kvm_unmap_rmapp() argument
888 kvmppc_core_flush_memslot_hv(struct kvm *kvm, struct kvm_memory_slot *memslot) kvmppc_core_flush_memslot_hv() argument
915 kvm_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot, unsigned long gfn) kvm_age_rmapp() argument
978 kvm_test_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot, unsigned long gfn) kvm_test_age_rmapp() argument
1117 kvmppc_harvest_vpa_dirty(struct kvmppc_vpa *vpa, struct kvm_memory_slot *memslot, unsigned long *map) kvmppc_harvest_vpa_dirty() argument
1135 kvmppc_hv_get_dirty_log_hpt(struct kvm *kvm, struct kvm_memory_slot *memslot, unsigned long *map) kvmppc_hv_get_dirty_log_hpt() argument
1161 struct kvm_memory_slot *memslot; kvmppc_pin_guest_page() local
1193 struct kvm_memory_slot *memslot; kvmppc_unpin_guest_page() local
1280 struct kvm_memory_slot *memslot = resize_hpt_rehash_hpte() local
[all...]
H A Dbook3s_hv_uvmem.c359 static bool kvmppc_next_nontransitioned_gfn(const struct kvm_memory_slot *memslot, in kvmppc_next_nontransitioned_gfn() argument
375 * kvmppc_uvmem_slot and memslot. in kvmppc_next_nontransitioned_gfn()
390 const struct kvm_memory_slot *memslot, bool merge) in kvmppc_memslot_page_merge()
392 unsigned long gfn = memslot->base_gfn; in kvmppc_memslot_page_merge()
401 end = start + (memslot->npages << PAGE_SHIFT); in kvmppc_memslot_page_merge()
424 const struct kvm_memory_slot *memslot) in __kvmppc_uvmem_memslot_delete()
426 uv_unregister_mem_slot(kvm->arch.lpid, memslot->id); in __kvmppc_uvmem_memslot_delete()
427 kvmppc_uvmem_slot_free(kvm, memslot); in __kvmppc_uvmem_memslot_delete()
428 kvmppc_memslot_page_merge(kvm, memslot, true); in __kvmppc_uvmem_memslot_delete()
432 const struct kvm_memory_slot *memslot) in __kvmppc_uvmem_memslot_create()
389 kvmppc_memslot_page_merge(struct kvm *kvm, const struct kvm_memory_slot *memslot, bool merge) kvmppc_memslot_page_merge() argument
423 __kvmppc_uvmem_memslot_delete(struct kvm *kvm, const struct kvm_memory_slot *memslot) __kvmppc_uvmem_memslot_delete() argument
431 __kvmppc_uvmem_memslot_create(struct kvm *kvm, const struct kvm_memory_slot *memslot) __kvmppc_uvmem_memslot_create() argument
461 struct kvm_memory_slot *memslot, *m; kvmppc_h_svm_init_start() local
655 struct kvm_memory_slot *memslot; kvmppc_h_svm_init_abort() local
788 kvmppc_uv_migrate_mem_slot(struct kvm *kvm, const struct kvm_memory_slot *memslot) kvmppc_uv_migrate_mem_slot() argument
827 struct kvm_memory_slot *memslot; kvmppc_h_svm_init_done() local
[all...]
H A Dbook3s_hv_rm_mmu.c108 /* Update the dirty bitmap of a memslot */
109 void kvmppc_update_dirty_map(const struct kvm_memory_slot *memslot, in kvmppc_update_dirty_map() argument
114 if (!psize || !memslot->dirty_bitmap) in kvmppc_update_dirty_map()
117 gfn -= memslot->base_gfn; in kvmppc_update_dirty_map()
118 set_dirty_bits_atomic(memslot->dirty_bitmap, gfn, npages); in kvmppc_update_dirty_map()
125 struct kvm_memory_slot *memslot; in kvmppc_set_dirty_from_hpte() local
131 memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn); in kvmppc_set_dirty_from_hpte()
132 if (memslot && memslot->dirty_bitmap) in kvmppc_set_dirty_from_hpte()
133 kvmppc_update_dirty_map(memslot, gf in kvmppc_set_dirty_from_hpte()
142 struct kvm_memory_slot *memslot; revmap_for_hpte() local
168 struct kvm_memory_slot *memslot; remove_revmap_chain() local
206 struct kvm_memory_slot *memslot; kvmppc_do_h_enter() local
886 struct kvm_memory_slot *memslot; kvmppc_get_hpa() local
926 struct kvm_memory_slot *memslot; kvmppc_do_h_page_init_zero() local
[all...]
H A Dbook3s_64_mmu_radix.c396 const struct kvm_memory_slot *memslot, in kvmppc_unmap_pte()
412 if (!memslot) { in kvmppc_unmap_pte()
413 memslot = gfn_to_memslot(kvm, gfn); in kvmppc_unmap_pte()
414 if (!memslot) in kvmppc_unmap_pte()
427 kvmhv_remove_nest_rmap_range(kvm, memslot, gpa, hpa, page_size); in kvmppc_unmap_pte()
429 if ((old & _PAGE_DIRTY) && memslot->dirty_bitmap) in kvmppc_unmap_pte()
430 kvmppc_update_dirty_map(memslot, gfn, page_size); in kvmppc_unmap_pte()
441 * turned off for a memslot while the VM is running. The new memslot
442 * becomes visible to page faults before the memslot commi
394 kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa, unsigned int shift, const struct kvm_memory_slot *memslot, unsigned int lpid) kvmppc_unmap_pte() argument
792 kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu, unsigned long gpa, struct kvm_memory_slot *memslot, bool writing, bool kvm_ro, pte_t *inserted_pte, unsigned int *levelp) kvmppc_book3s_instantiate_page() argument
921 struct kvm_memory_slot *memslot; kvmppc_book3s_radix_page_fault() local
998 kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, unsigned long gfn) kvm_unmap_radix() argument
1018 kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, unsigned long gfn) kvm_age_radix() argument
1046 kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, unsigned long gfn) kvm_test_age_radix() argument
1064 kvm_radix_test_clear_dirty(struct kvm *kvm, struct kvm_memory_slot *memslot, int pagenum) kvm_radix_test_clear_dirty() argument
1119 kvmppc_hv_get_dirty_log_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, unsigned long *map) kvmppc_hv_get_dirty_log_radix() argument
1144 kvmppc_radix_flush_memslot(struct kvm *kvm, const struct kvm_memory_slot *memslot) kvmppc_radix_flush_memslot() argument
[all...]
H A Dtrace_hv.h274 struct kvm_memory_slot *memslot, unsigned long ea,
277 TP_ARGS(vcpu, hptep, memslot, ea, dsisr),
297 __entry->base_gfn = memslot ? memslot->base_gfn : -1UL;
298 __entry->slot_flags = memslot ? memslot->flags : 0;
/kernel/linux/linux-6.6/arch/powerpc/kvm/
H A Dbook3s_hv_uvmem.c361 static bool kvmppc_next_nontransitioned_gfn(const struct kvm_memory_slot *memslot, in kvmppc_next_nontransitioned_gfn() argument
377 * kvmppc_uvmem_slot and memslot. in kvmppc_next_nontransitioned_gfn()
392 const struct kvm_memory_slot *memslot, bool merge) in kvmppc_memslot_page_merge()
394 unsigned long gfn = memslot->base_gfn; in kvmppc_memslot_page_merge()
404 end = start + (memslot->npages << PAGE_SHIFT); in kvmppc_memslot_page_merge()
431 const struct kvm_memory_slot *memslot) in __kvmppc_uvmem_memslot_delete()
433 uv_unregister_mem_slot(kvm->arch.lpid, memslot->id); in __kvmppc_uvmem_memslot_delete()
434 kvmppc_uvmem_slot_free(kvm, memslot); in __kvmppc_uvmem_memslot_delete()
435 kvmppc_memslot_page_merge(kvm, memslot, true); in __kvmppc_uvmem_memslot_delete()
439 const struct kvm_memory_slot *memslot) in __kvmppc_uvmem_memslot_create()
391 kvmppc_memslot_page_merge(struct kvm *kvm, const struct kvm_memory_slot *memslot, bool merge) kvmppc_memslot_page_merge() argument
430 __kvmppc_uvmem_memslot_delete(struct kvm *kvm, const struct kvm_memory_slot *memslot) __kvmppc_uvmem_memslot_delete() argument
438 __kvmppc_uvmem_memslot_create(struct kvm *kvm, const struct kvm_memory_slot *memslot) __kvmppc_uvmem_memslot_create() argument
468 struct kvm_memory_slot *memslot, *m; kvmppc_h_svm_init_start() local
662 struct kvm_memory_slot *memslot; kvmppc_h_svm_init_abort() local
794 kvmppc_uv_migrate_mem_slot(struct kvm *kvm, const struct kvm_memory_slot *memslot) kvmppc_uv_migrate_mem_slot() argument
833 struct kvm_memory_slot *memslot; kvmppc_h_svm_init_done() local
[all...]
H A Dbook3s_64_mmu_hv.c206 void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot, in kvmppc_map_vrma() argument
220 npages = memslot->npages >> (porder - PAGE_SHIFT); in kvmppc_map_vrma()
516 struct kvm_memory_slot *memslot; in kvmppc_book3s_hv_page_fault() local
579 memslot = gfn_to_memslot(kvm, gfn); in kvmppc_book3s_hv_page_fault()
581 trace_kvm_page_fault_enter(vcpu, hpte, memslot, ea, dsisr); in kvmppc_book3s_hv_page_fault()
583 /* No memslot means it's an emulated MMIO region */ in kvmppc_book3s_hv_page_fault()
584 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) in kvmppc_book3s_hv_page_fault()
592 if (gfn_base < memslot->base_gfn) in kvmppc_book3s_hv_page_fault()
604 hva = gfn_to_hva_memslot(memslot, gf in kvmppc_book3s_hv_page_fault()
759 struct kvm_memory_slot *memslot; kvmppc_rmap_reset() local
779 kvmppc_unmap_hpte(struct kvm *kvm, unsigned long i, struct kvm_memory_slot *memslot, unsigned long *rmapp, unsigned long gfn) kvmppc_unmap_hpte() argument
821 kvm_unmap_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot, unsigned long gfn) kvm_unmap_rmapp() argument
872 kvmppc_core_flush_memslot_hv(struct kvm *kvm, struct kvm_memory_slot *memslot) kvmppc_core_flush_memslot_hv() argument
899 kvm_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot, unsigned long gfn) kvm_age_rmapp() argument
970 kvm_test_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot, unsigned long gfn) kvm_test_age_rmapp() argument
1115 kvmppc_harvest_vpa_dirty(struct kvmppc_vpa *vpa, struct kvm_memory_slot *memslot, unsigned long *map) kvmppc_harvest_vpa_dirty() argument
1133 kvmppc_hv_get_dirty_log_hpt(struct kvm *kvm, struct kvm_memory_slot *memslot, unsigned long *map) kvmppc_hv_get_dirty_log_hpt() argument
1159 struct kvm_memory_slot *memslot; kvmppc_pin_guest_page() local
1191 struct kvm_memory_slot *memslot; kvmppc_unpin_guest_page() local
1278 struct kvm_memory_slot *memslot = resize_hpt_rehash_hpte() local
[all...]
H A Dbook3s_64_mmu_radix.c420 const struct kvm_memory_slot *memslot, in kvmppc_unmap_pte()
436 if (!memslot) { in kvmppc_unmap_pte()
437 memslot = gfn_to_memslot(kvm, gfn); in kvmppc_unmap_pte()
438 if (!memslot) in kvmppc_unmap_pte()
451 kvmhv_remove_nest_rmap_range(kvm, memslot, gpa, hpa, page_size); in kvmppc_unmap_pte()
453 if ((old & _PAGE_DIRTY) && memslot->dirty_bitmap) in kvmppc_unmap_pte()
454 kvmppc_update_dirty_map(memslot, gfn, page_size); in kvmppc_unmap_pte()
465 * turned off for a memslot while the VM is running. The new memslot
466 * becomes visible to page faults before the memslot commi
418 kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa, unsigned int shift, const struct kvm_memory_slot *memslot, unsigned int lpid) kvmppc_unmap_pte() argument
816 kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu, unsigned long gpa, struct kvm_memory_slot *memslot, bool writing, bool kvm_ro, pte_t *inserted_pte, unsigned int *levelp) kvmppc_book3s_instantiate_page() argument
945 struct kvm_memory_slot *memslot; kvmppc_book3s_radix_page_fault() local
1027 kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, unsigned long gfn) kvm_unmap_radix() argument
1046 kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, unsigned long gfn) kvm_age_radix() argument
1074 kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, unsigned long gfn) kvm_test_age_radix() argument
1093 kvm_radix_test_clear_dirty(struct kvm *kvm, struct kvm_memory_slot *memslot, int pagenum) kvm_radix_test_clear_dirty() argument
1148 kvmppc_hv_get_dirty_log_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, unsigned long *map) kvmppc_hv_get_dirty_log_radix() argument
1173 kvmppc_radix_flush_memslot(struct kvm *kvm, const struct kvm_memory_slot *memslot) kvmppc_radix_flush_memslot() argument
[all...]
H A Dbook3s_hv_rm_mmu.c95 /* Update the dirty bitmap of a memslot */
96 void kvmppc_update_dirty_map(const struct kvm_memory_slot *memslot, in kvmppc_update_dirty_map() argument
101 if (!psize || !memslot->dirty_bitmap) in kvmppc_update_dirty_map()
104 gfn -= memslot->base_gfn; in kvmppc_update_dirty_map()
105 set_dirty_bits_atomic(memslot->dirty_bitmap, gfn, npages); in kvmppc_update_dirty_map()
112 struct kvm_memory_slot *memslot; in kvmppc_set_dirty_from_hpte() local
118 memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn); in kvmppc_set_dirty_from_hpte()
119 if (memslot && memslot->dirty_bitmap) in kvmppc_set_dirty_from_hpte()
120 kvmppc_update_dirty_map(memslot, gf in kvmppc_set_dirty_from_hpte()
129 struct kvm_memory_slot *memslot; revmap_for_hpte() local
155 struct kvm_memory_slot *memslot; remove_revmap_chain() local
193 struct kvm_memory_slot *memslot; kvmppc_do_h_enter() local
888 struct kvm_memory_slot *memslot; kvmppc_get_hpa() local
928 struct kvm_memory_slot *memslot; kvmppc_do_h_page_init_zero() local
[all...]
H A Dtrace_hv.h294 struct kvm_memory_slot *memslot, unsigned long ea,
297 TP_ARGS(vcpu, hptep, memslot, ea, dsisr),
317 __entry->base_gfn = memslot ? memslot->base_gfn : -1UL;
318 __entry->slot_flags = memslot ? memslot->flags : 0;
/kernel/linux/linux-6.6/arch/arm64/kvm/
H A Dmmu.c158 static bool memslot_is_logging(struct kvm_memory_slot *memslot) in memslot_is_logging() argument
160 return memslot->dirty_bitmap && !(memslot->flags & KVM_MEM_READONLY); in memslot_is_logging()
337 struct kvm_memory_slot *memslot) in stage2_flush_memslot()
339 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT; in stage2_flush_memslot()
340 phys_addr_t end = addr + PAGE_SIZE * memslot->npages; in stage2_flush_memslot()
355 struct kvm_memory_slot *memslot; in stage2_flush_vm() local
362 kvm_for_each_memslot(memslot, bkt, slots) in stage2_flush_vm()
363 stage2_flush_memslot(kvm, memslot); in stage2_flush_vm()
942 struct kvm_memory_slot *memslot) in stage2_unmap_memslot()
336 stage2_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot) stage2_flush_memslot() argument
941 stage2_unmap_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot) stage2_unmap_memslot() argument
993 struct kvm_memory_slot *memslot; stage2_unmap_vm() local
1128 struct kvm_memory_slot *memslot = id_to_memslot(slots, slot); kvm_mmu_wp_memory_region() local
1155 struct kvm_memory_slot *memslot; kvm_mmu_split_memory_region() local
1211 fault_supports_stage2_huge_mapping(struct kvm_memory_slot *memslot, unsigned long hva, unsigned long map_size) fault_supports_stage2_huge_mapping() argument
1281 transparent_hugepage_adjust(struct kvm *kvm, struct kvm_memory_slot *memslot, unsigned long hva, kvm_pfn_t *pfnp, phys_addr_t *ipap) transparent_hugepage_adjust() argument
1394 user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, struct kvm_memory_slot *memslot, unsigned long hva, unsigned long fault_status) user_mem_abort() argument
1642 struct kvm_memory_slot *memslot; kvm_handle_guest_abort() local
[all...]
/kernel/linux/linux-5.10/arch/loongarch/kvm/
H A Dmmu.c655 struct kvm_memory_slot *memslot, in handle_hva_to_gpa()
660 struct kvm_memory_slot *memslot; in handle_hva_to_gpa() local
666 kvm_for_each_memslot(memslot, slots) { in handle_hva_to_gpa()
670 hva_start = max(start, memslot->userspace_addr); in handle_hva_to_gpa()
671 hva_end = min(end, memslot->userspace_addr + in handle_hva_to_gpa()
672 (memslot->npages << PAGE_SHIFT)); in handle_hva_to_gpa()
680 gfn = hva_to_gfn_memslot(hva_start, memslot); in handle_hva_to_gpa()
681 gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot); in handle_hva_to_gpa()
682 ret |= handler(kvm, gfn, gfn_end, memslot, data); in handle_hva_to_gpa()
690 struct kvm_memory_slot *memslot, voi in kvm_unmap_hva_handler()
650 handle_hva_to_gpa(struct kvm *kvm, unsigned long start, unsigned long end, int (*handler)(struct kvm *kvm, gfn_t gfn, gpa_t gfn_end, struct kvm_memory_slot *memslot, void *data), void *data) handle_hva_to_gpa() argument
689 kvm_unmap_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end, struct kvm_memory_slot *memslot, void *data) kvm_unmap_hva_handler() argument
709 kvm_set_spte_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end, struct kvm_memory_slot *memslot, void *data) kvm_set_spte_handler() argument
752 kvm_age_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end, struct kvm_memory_slot *memslot, void *data) kvm_age_hva_handler() argument
759 kvm_test_age_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end, struct kvm_memory_slot *memslot, void *data) kvm_test_age_hva_handler() argument
935 fault_supports_huge_mapping(struct kvm_memory_slot *memslot, unsigned long hva, bool write) fault_supports_huge_mapping() argument
1136 struct kvm_memory_slot *memslot; kvm_map_page() local
[all...]
/kernel/linux/linux-6.6/virt/kvm/
H A Ddirty_ring.c55 struct kvm_memory_slot *memslot; in kvm_reset_dirty_gfn() local
64 memslot = id_to_memslot(__kvm_memslots(kvm, as_id), id); in kvm_reset_dirty_gfn()
66 if (!memslot || (offset + __fls(mask)) >= memslot->npages) in kvm_reset_dirty_gfn()
70 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, offset, mask); in kvm_reset_dirty_gfn()
H A Dkvm_main.c383 const struct kvm_memory_slot *memslot) in kvm_flush_remote_tlbs_memslot()
386 * All current use cases for flushing the TLBs for a specific memslot in kvm_flush_remote_tlbs_memslot()
388 * mmu_lock. The interaction between the various operations on memslot in kvm_flush_remote_tlbs_memslot()
390 * operation is observed by any other operation on the same memslot. in kvm_flush_remote_tlbs_memslot()
393 kvm_flush_remote_tlbs_range(kvm, memslot->base_gfn, memslot->npages); in kvm_flush_remote_tlbs_memslot()
577 /* Iterate over each memslot intersecting [start, last] (inclusive) range */
705 * unmap the memslot instead of skipping the memslot to ensure that KVM in kvm_change_spte_gfn()
729 * invalidations, including this one, found a relevant memslot a in kvm_mmu_notifier_change_pte()
382 kvm_flush_remote_tlbs_memslot(struct kvm *kvm, const struct kvm_memory_slot *memslot) kvm_flush_remote_tlbs_memslot() argument
978 kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot) kvm_destroy_dirty_bitmap() argument
1000 struct kvm_memory_slot *memslot; kvm_free_memslots() local
1404 kvm_alloc_dirty_bitmap(struct kvm_memory_slot *memslot) kvm_alloc_dirty_bitmap() argument
2082 kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log, int *is_dirty, struct kvm_memory_slot **memslot) kvm_get_dirty_log() argument
2148 struct kvm_memory_slot *memslot; kvm_get_dirty_log_protect() local
2259 struct kvm_memory_slot *memslot; kvm_clear_dirty_log_protect() local
2386 struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn); kvm_is_visible_gfn() local
2394 struct kvm_memory_slot *memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); kvm_vcpu_is_visible_gfn() local
3129 __kvm_write_guest_page(struct kvm *kvm, struct kvm_memory_slot *memslot, gfn_t gfn, const void *data, int offset, int len) __kvm_write_guest_page() argument
3350 mark_page_dirty_in_slot(struct kvm *kvm, const struct kvm_memory_slot *memslot, gfn_t gfn) mark_page_dirty_in_slot() argument
3377 struct kvm_memory_slot *memslot; mark_page_dirty() local
3386 struct kvm_memory_slot *memslot; kvm_vcpu_mark_page_dirty() local
[all...]
/kernel/linux/linux-5.10/virt/kvm/
H A Dkvm_main.c695 static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot) in kvm_destroy_dirty_bitmap() argument
697 if (!memslot->dirty_bitmap) in kvm_destroy_dirty_bitmap()
700 kvfree(memslot->dirty_bitmap); in kvm_destroy_dirty_bitmap()
701 memslot->dirty_bitmap = NULL; in kvm_destroy_dirty_bitmap()
716 struct kvm_memory_slot *memslot; in kvm_free_memslots() local
721 kvm_for_each_memslot(memslot, slots) in kvm_free_memslots()
722 kvm_free_memslot(kvm, memslot); in kvm_free_memslots()
1009 static int kvm_alloc_dirty_bitmap(struct kvm_memory_slot *memslot) in kvm_alloc_dirty_bitmap() argument
1011 unsigned long dirty_bytes = kvm_dirty_bitmap_bytes(memslot); in kvm_alloc_dirty_bitmap()
1013 memslot in kvm_alloc_dirty_bitmap()
1024 kvm_memslot_delete(struct kvm_memslots *slots, struct kvm_memory_slot *memslot) kvm_memslot_delete() argument
1062 kvm_memslot_move_backward(struct kvm_memslots *slots, struct kvm_memory_slot *memslot) kvm_memslot_move_backward() argument
1097 kvm_memslot_move_forward(struct kvm_memslots *slots, struct kvm_memory_slot *memslot, int start) kvm_memslot_move_forward() argument
1158 update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *memslot, enum kvm_mr_change change) update_memslots() argument
1502 kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log, int *is_dirty, struct kvm_memory_slot **memslot) kvm_get_dirty_log() argument
1564 struct kvm_memory_slot *memslot; kvm_get_dirty_log_protect() local
1671 struct kvm_memory_slot *memslot; kvm_clear_dirty_log_protect() local
1766 struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn); kvm_is_visible_gfn() local
1774 struct kvm_memory_slot *memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); kvm_vcpu_is_visible_gfn() local
2306 __kvm_unmap_gfn(struct kvm_memory_slot *memslot, struct kvm_host_map *map, struct gfn_to_pfn_cache *cache, bool dirty, bool atomic) __kvm_unmap_gfn() argument
2540 __kvm_write_guest_page(struct kvm_memory_slot *memslot, gfn_t gfn, const void *data, int offset, int len) __kvm_write_guest_page() argument
2767 mark_page_dirty_in_slot(struct kvm_memory_slot *memslot, gfn_t gfn) mark_page_dirty_in_slot() argument
2779 struct kvm_memory_slot *memslot; mark_page_dirty() local
2788 struct kvm_memory_slot *memslot; kvm_vcpu_mark_page_dirty() local
[all...]
/kernel/linux/linux-5.10/arch/powerpc/include/asm/
H A Dkvm_book3s.h197 const struct kvm_memory_slot *memslot,
204 struct kvm_memory_slot *memslot,
213 extern int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
215 extern int kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
217 extern int kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
220 struct kvm_memory_slot *memslot, unsigned long *map);
222 const struct kvm_memory_slot *memslot);
240 extern void kvmppc_update_dirty_map(const struct kvm_memory_slot *memslot,
257 struct kvm_memory_slot *memslot, unsigned long *map);
259 struct kvm_memory_slot *memslot,
[all...]
/kernel/linux/linux-6.6/arch/powerpc/include/asm/
H A Dkvm_book3s.h197 const struct kvm_memory_slot *memslot,
204 struct kvm_memory_slot *memslot,
213 extern void kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
215 extern bool kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
217 extern bool kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
220 struct kvm_memory_slot *memslot, unsigned long *map);
222 const struct kvm_memory_slot *memslot);
240 extern void kvmppc_update_dirty_map(const struct kvm_memory_slot *memslot,
257 struct kvm_memory_slot *memslot, unsigned long *map);
259 struct kvm_memory_slot *memslot,
[all...]
/kernel/linux/linux-5.10/arch/mips/kvm/
H A Dmmu.c447 struct kvm_memory_slot *memslot, in handle_hva_to_gpa()
452 struct kvm_memory_slot *memslot; in handle_hva_to_gpa() local
458 kvm_for_each_memslot(memslot, slots) { in handle_hva_to_gpa()
462 hva_start = max(start, memslot->userspace_addr); in handle_hva_to_gpa()
463 hva_end = min(end, memslot->userspace_addr + in handle_hva_to_gpa()
464 (memslot->npages << PAGE_SHIFT)); in handle_hva_to_gpa()
472 gfn = hva_to_gfn_memslot(hva_start, memslot); in handle_hva_to_gpa()
473 gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot); in handle_hva_to_gpa()
475 ret |= handler(kvm, gfn, gfn_end, memslot, data); in handle_hva_to_gpa()
483 struct kvm_memory_slot *memslot, voi in kvm_unmap_hva_handler()
442 handle_hva_to_gpa(struct kvm *kvm, unsigned long start, unsigned long end, int (*handler)(struct kvm *kvm, gfn_t gfn, gpa_t gfn_end, struct kvm_memory_slot *memslot, void *data), void *data) handle_hva_to_gpa() argument
482 kvm_unmap_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end, struct kvm_memory_slot *memslot, void *data) kvm_unmap_hva_handler() argument
498 kvm_set_spte_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end, struct kvm_memory_slot *memslot, void *data) kvm_set_spte_handler() argument
540 kvm_age_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end, struct kvm_memory_slot *memslot, void *data) kvm_age_hva_handler() argument
546 kvm_test_age_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end, struct kvm_memory_slot *memslot, void *data) kvm_test_age_hva_handler() argument
[all...]
/kernel/linux/linux-5.10/include/linux/
H A Dkvm_host.h50 * Bit 63 of the memslot generation number is an "update in-progress flag",
53 * mark cached memslot data, e.g. MMIO accesses, as potentially being stale,
64 * memslot update is in-progress, and to prevent cache hits *after* updating
353 static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot) in kvm_dirty_bitmap_bytes() argument
355 return ALIGN(memslot->npages, BITS_PER_LONG) / 8; in kvm_dirty_bitmap_bytes()
358 static inline unsigned long *kvm_second_dirty_bitmap(struct kvm_memory_slot *memslot) in kvm_second_dirty_bitmap() argument
360 unsigned long len = kvm_dirty_bitmap_bytes(memslot); in kvm_second_dirty_bitmap()
362 return memslot->dirty_bitmap + len / sizeof(*memslot->dirty_bitmap); in kvm_second_dirty_bitmap()
436 * to get the memslot b
1434 kvm_is_visible_memslot(struct kvm_memory_slot *memslot) kvm_is_visible_memslot() argument
[all...]
/kernel/linux/linux-6.6/arch/riscv/kvm/
H A Dvcpu_exit.c16 struct kvm_memory_slot *memslot; in gstage_page_fault() local
24 memslot = gfn_to_memslot(vcpu->kvm, gfn); in gstage_page_fault()
25 hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable); in gstage_page_fault()
43 ret = kvm_riscv_gstage_map(vcpu, memslot, fault_addr, hva, in gstage_page_fault()
H A Dmmu.c336 struct kvm_memory_slot *memslot = id_to_memslot(slots, slot); in gstage_wp_memory_region() local
337 phys_addr_t start = memslot->base_gfn << PAGE_SHIFT; in gstage_wp_memory_region()
338 phys_addr_t end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT; in gstage_wp_memory_region()
405 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) in kvm_arch_sync_dirty_log() argument
439 * At this point memslot has been committed and there is an in kvm_arch_commit_memory_region()
610 struct kvm_memory_slot *memslot, in kvm_riscv_gstage_map()
621 bool logging = (memslot->dirty_bitmap && in kvm_riscv_gstage_map()
622 !(memslot->flags & KVM_MEM_READONLY)) ? true : false; in kvm_riscv_gstage_map()
609 kvm_riscv_gstage_map(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot, gpa_t gpa, unsigned long hva, bool is_write) kvm_riscv_gstage_map() argument
/kernel/linux/linux-5.10/tools/testing/selftests/kvm/lib/s390x/
H A Dprocessor.c18 void virt_pgd_alloc(struct kvm_vm *vm, uint32_t memslot) in virt_pgd_alloc() argument
29 KVM_GUEST_PAGE_TABLE_MIN_PADDR, memslot); in virt_pgd_alloc()
41 static uint64_t virt_alloc_region(struct kvm_vm *vm, int ri, uint32_t memslot) in virt_alloc_region() argument
46 KVM_GUEST_PAGE_TABLE_MIN_PADDR, memslot); in virt_alloc_region()
55 uint32_t memslot) in virt_pg_map()
82 entry[idx] = virt_alloc_region(vm, ri, memslot); in virt_pg_map()
54 virt_pg_map(struct kvm_vm *vm, uint64_t gva, uint64_t gpa, uint32_t memslot) virt_pg_map() argument
/kernel/linux/linux-5.10/arch/x86/kvm/mmu/
H A Dmmu.c1459 struct kvm_memory_slot *memslot; in kvm_handle_hva_range() local
1466 kvm_for_each_memslot(memslot, slots) { in kvm_handle_hva_range()
1470 hva_start = max(start, memslot->userspace_addr); in kvm_handle_hva_range()
1471 hva_end = min(end, memslot->userspace_addr + in kvm_handle_hva_range()
1472 (memslot->npages << PAGE_SHIFT)); in kvm_handle_hva_range()
1479 gfn_start = hva_to_gfn_memslot(hva_start, memslot); in kvm_handle_hva_range()
1480 gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot); in kvm_handle_hva_range()
1482 for_each_slot_rmap_range(memslot, PG_LEVEL_4K, in kvm_handle_hva_range()
1486 ret |= handler(kvm, iterator.rmap, memslot, in kvm_handle_hva_range()
2756 * Note, using the already-retrieved memslot an in host_pfn_mapping_level()
5235 slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot, slot_level_handler fn, int start_level, int end_level, gfn_t start_gfn, gfn_t end_gfn, bool lock_flush_tlb) slot_handle_level_range() argument
5268 slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot, slot_level_handler fn, int start_level, int end_level, bool lock_flush_tlb) slot_handle_level() argument
5279 slot_handle_all_level(struct kvm *kvm, struct kvm_memory_slot *memslot, slot_level_handler fn, bool lock_flush_tlb) slot_handle_all_level() argument
5287 slot_handle_large_level(struct kvm *kvm, struct kvm_memory_slot *memslot, slot_level_handler fn, bool lock_flush_tlb) slot_handle_large_level() argument
5295 slot_handle_leaf(struct kvm *kvm, struct kvm_memory_slot *memslot, slot_level_handler fn, bool lock_flush_tlb) slot_handle_leaf() argument
5506 struct kvm_memory_slot *memslot; kvm_zap_gfn_range() local
5543 kvm_mmu_slot_remove_write_access(struct kvm *kvm, struct kvm_memory_slot *memslot, int start_level) kvm_mmu_slot_remove_write_access() argument
5610 kvm_mmu_zap_collapsible_sptes(struct kvm *kvm, const struct kvm_memory_slot *memslot) kvm_mmu_zap_collapsible_sptes() argument
5623 kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot) kvm_arch_flush_remote_tlbs_memslot() argument
5638 kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm, struct kvm_memory_slot *memslot) kvm_mmu_slot_leaf_clear_dirty() argument
5660 kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm, struct kvm_memory_slot *memslot) kvm_mmu_slot_largepage_remove_write_access() argument
5677 kvm_mmu_slot_set_dirty(struct kvm *kvm, struct kvm_memory_slot *memslot) kvm_mmu_slot_set_dirty() argument
5952 struct kvm_memory_slot *memslot; kvm_mmu_calculate_default_mmu_pages() local
[all...]
/kernel/linux/linux-6.6/include/linux/
H A Dkvm_types.h62 struct kvm_memory_slot *memslot; member
69 struct kvm_memory_slot *memslot; member

Completed in 44 milliseconds

123