/kernel/linux/linux-6.6/arch/riscv/kvm/ |
H A D | mmu.c | 337 phys_addr_t start = memslot->base_gfn << PAGE_SHIFT; in gstage_wp_memory_region() 338 phys_addr_t end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT; in gstage_wp_memory_region() 398 phys_addr_t base_gfn = slot->base_gfn + gfn_offset; in kvm_arch_mmu_enable_log_dirty_pt_masked() local 399 phys_addr_t start = (base_gfn + __ffs(mask)) << PAGE_SHIFT; in kvm_arch_mmu_enable_log_dirty_pt_masked() 400 phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT; in kvm_arch_mmu_enable_log_dirty_pt_masked() 425 gpa_t gpa = slot->base_gfn << PAGE_SHIFT; in kvm_arch_flush_shadow_memslot() 465 if ((new->base_gfn + new->npages) >= in kvm_arch_prepare_memory_region() 472 base_gpa = new->base_gfn << PAGE_SHIFT; in kvm_arch_prepare_memory_region()
|
/kernel/linux/linux-6.6/arch/x86/kvm/ |
H A D | mmu.h | 269 static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level) in gfn_to_index() argument 273 (base_gfn >> KVM_HPAGE_GFN_SHIFT(level)); in gfn_to_index() 280 return gfn_to_index(slot->base_gfn + npages - 1, in __kvm_mmu_slot_lpages() 281 slot->base_gfn, level) + 1; in __kvm_mmu_slot_lpages()
|
/kernel/linux/linux-5.10/arch/loongarch/kvm/ |
H A D | mmu.c | 459 gfn_t base_gfn = slot->base_gfn + gfn_offset; in kvm_arch_mmu_enable_log_dirty_pt_masked() local 460 gfn_t start = base_gfn + __ffs(mask); in kvm_arch_mmu_enable_log_dirty_pt_masked() 461 gfn_t end = base_gfn + __fls(mask); in kvm_arch_mmu_enable_log_dirty_pt_masked() 492 needs_flush = kvm_mkclean_gpa_pt(kvm, new->base_gfn, in kvm_arch_commit_memory_region() 493 new->base_gfn + new->npages - 1); in kvm_arch_commit_memory_region() 523 kvm_flush_gpa_pt(kvm, slot->base_gfn, in kvm_arch_flush_shadow_memslot() 524 slot->base_gfn + slot->npages - 1, &npages); in kvm_arch_flush_shadow_memslot() 949 gpa_start = memslot->base_gfn << PAGE_SHIFT; in fault_supports_huge_mapping() 965 * memslot->base_gfn << PAGE_SIZ in fault_supports_huge_mapping() 1060 gfn_t base_gfn = (gpa & PMD_MASK) >> PAGE_SHIFT; kvm_map_page_fast() local 1261 gfn_t base_gfn = (gpa & PMD_MASK) >> PAGE_SHIFT; kvm_map_page() local [all...] |
/kernel/linux/linux-6.6/arch/x86/kvm/mmu/ |
H A D | page_track.c | 67 index = gfn_to_index(gfn, slot->base_gfn, PG_LEVEL_4K); in update_gfn_write_track() 134 index = gfn_to_index(gfn, slot->base_gfn, PG_LEVEL_4K); in kvm_gfn_is_write_tracked() 244 n->track_remove_region(slot->base_gfn, slot->npages, n); in kvm_page_track_delete_slot()
|
H A D | tdp_mmu.c | 324 gfn_t base_gfn = sp->gfn; in handle_removed_pt() local 333 gfn_t gfn = base_gfn + i * KVM_PAGES_PER_HPAGE(level); in handle_removed_pt() 1318 spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn, in kvm_tdp_mmu_wrprot_slot() 1319 slot->base_gfn + slot->npages, min_level); in kvm_tdp_mmu_wrprot_slot() 1565 spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn, in kvm_tdp_mmu_clear_dirty_slot() 1566 slot->base_gfn + slot->npages); in kvm_tdp_mmu_clear_dirty_slot() 1641 gfn_t start = slot->base_gfn; in zap_collapsible_spte_range()
|
H A D | paging_tmpl.h | 634 gfn_t base_gfn = fault->gfn; in fetch() local 636 WARN_ON_ONCE(gw->gfn != base_gfn); in fetch() 731 base_gfn = gfn_round_for_level(fault->gfn, it.level); in fetch() 737 sp = kvm_mmu_get_child_sp(vcpu, it.sptep, base_gfn, in fetch() 752 base_gfn, fault->pfn, fault); in fetch()
|
H A D | mmu.c | 794 idx = gfn_to_index(gfn, slot->base_gfn, level); in lpage_info_slot() 1085 idx = gfn_to_index(gfn, slot->base_gfn, level); in gfn_to_rmap() 1309 slot->base_gfn + gfn_offset, mask, true); in kvm_mmu_write_protect_pt_masked() 1315 rmap_head = gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask), in kvm_mmu_write_protect_pt_masked() 1342 slot->base_gfn + gfn_offset, mask, false); in kvm_mmu_clear_dirty_pt_masked() 1348 rmap_head = gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask), in kvm_mmu_clear_dirty_pt_masked() 1376 * The gfn_offset is guaranteed to be aligned to 64, but the base_gfn in kvm_arch_mmu_enable_log_dirty_pt_masked() 1381 gfn_t start = slot->base_gfn + gfn_offset + __ffs(mask); in kvm_arch_mmu_enable_log_dirty_pt_masked() 1382 gfn_t end = slot->base_gfn + gfn_offset + __fls(mask); in kvm_arch_mmu_enable_log_dirty_pt_masked() 3222 gfn_t base_gfn in direct_map() local [all...] |
/kernel/linux/linux-5.10/arch/x86/kvm/mmu/ |
H A D | tdp_mmu.c | 880 spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn, in kvm_tdp_mmu_wrprot_slot() 881 slot->base_gfn + slot->npages, min_level); in kvm_tdp_mmu_wrprot_slot() 944 spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn, in kvm_tdp_mmu_clear_dirty_slot() 945 slot->base_gfn + slot->npages); in kvm_tdp_mmu_clear_dirty_slot() 1060 spte_set |= set_dirty_gfn_range(kvm, root, slot->base_gfn, in kvm_tdp_mmu_slot_set_dirty() 1061 slot->base_gfn + slot->npages); in kvm_tdp_mmu_slot_set_dirty() 1118 zap_collapsible_spte_range(kvm, root, slot->base_gfn, in kvm_tdp_mmu_zap_collapsible_sptes() 1119 slot->base_gfn + slot->npages); in kvm_tdp_mmu_zap_collapsible_sptes()
|
H A D | page_track.c | 64 index = gfn_to_index(gfn, slot->base_gfn, PG_LEVEL_4K); in update_gfn_track() 154 index = gfn_to_index(gfn, slot->base_gfn, PG_LEVEL_4K); in kvm_page_track_is_active()
|
/kernel/linux/linux-5.10/arch/arm64/kvm/ |
H A D | mmu.c | 147 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT; in stage2_flush_memslot() 407 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT; in stage2_unmap_memslot() 567 start = memslot->base_gfn << PAGE_SHIFT; in kvm_mmu_wp_memory_region() 568 end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT; in kvm_mmu_wp_memory_region() 591 phys_addr_t base_gfn = slot->base_gfn + gfn_offset; in kvm_mmu_write_protect_pt_masked() local 592 phys_addr_t start = (base_gfn + __ffs(mask)) << PAGE_SHIFT; in kvm_mmu_write_protect_pt_masked() 593 phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT; in kvm_mmu_write_protect_pt_masked() 641 gpa_start = memslot->base_gfn << PAGE_SHIFT; in fault_supports_stage2_huge_mapping() 658 * memslot->base_gfn << PAGE_SHIF in fault_supports_stage2_huge_mapping() [all...] |
/kernel/linux/linux-5.10/arch/powerpc/kvm/ |
H A D | trace_hv.h | 285 __field(u64, base_gfn) 297 __entry->base_gfn = memslot ? memslot->base_gfn : -1UL; 305 __entry->base_gfn, __entry->slot_flags)
|
H A D | book3s_hv_uvmem.c | 259 p->base_pfn = slot->base_gfn; in kvmppc_uvmem_slot_init() 277 if (p->base_pfn == slot->base_gfn) { in kvmppc_uvmem_slot_free() 392 unsigned long gfn = memslot->base_gfn; in kvmppc_memslot_page_merge() 443 memslot->base_gfn << PAGE_SHIFT, in __kvmppc_uvmem_memslot_create() 617 gfn = slot->base_gfn; in kvmppc_uvmem_drop_pages() 791 unsigned long gfn = memslot->base_gfn; in kvmppc_uv_migrate_mem_slot()
|
H A D | book3s_64_mmu_hv.c | 568 if (gfn_base < memslot->base_gfn) in kvmppc_book3s_hv_page_fault() 682 rmap = &memslot->arch.rmap[gfn_base - memslot->base_gfn]; in kvmppc_book3s_hv_page_fault() 849 rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; in kvm_unmap_rmapp() 895 gfn = memslot->base_gfn; in kvmppc_core_flush_memslot_hv() 924 rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; in kvm_age_rmapp() 987 rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; in kvm_test_age_rmapp() 1126 if (gfn < memslot->base_gfn || in kvmppc_harvest_vpa_dirty() 1127 gfn >= memslot->base_gfn + memslot->npages) in kvmppc_harvest_vpa_dirty() 1132 __set_bit_le(gfn - memslot->base_gfn, map); in kvmppc_harvest_vpa_dirty() 1207 set_bit_le(gfn - memslot->base_gfn, memslo in kvmppc_unpin_guest_page() [all...] |
/kernel/linux/linux-6.6/arch/mips/kvm/ |
H A D | mmu.c | 418 gfn_t base_gfn = slot->base_gfn + gfn_offset; in kvm_arch_mmu_enable_log_dirty_pt_masked() local 419 gfn_t start = base_gfn + __ffs(mask); in kvm_arch_mmu_enable_log_dirty_pt_masked() 420 gfn_t end = base_gfn + __fls(mask); in kvm_arch_mmu_enable_log_dirty_pt_masked()
|
/kernel/linux/linux-6.6/arch/powerpc/kvm/ |
H A D | trace_hv.h | 305 __field(u64, base_gfn) 317 __entry->base_gfn = memslot ? memslot->base_gfn : -1UL; 325 __entry->base_gfn, __entry->slot_flags)
|
H A D | book3s_hv_uvmem.c | 261 p->base_pfn = slot->base_gfn; in kvmppc_uvmem_slot_init() 279 if (p->base_pfn == slot->base_gfn) { in kvmppc_uvmem_slot_free() 394 unsigned long gfn = memslot->base_gfn; in kvmppc_memslot_page_merge() 450 memslot->base_gfn << PAGE_SHIFT, in __kvmppc_uvmem_memslot_create() 624 gfn = slot->base_gfn; in kvmppc_uvmem_drop_pages() 797 unsigned long gfn = memslot->base_gfn; in kvmppc_uv_migrate_mem_slot()
|
H A D | book3s_64_mmu_hv.c | 592 if (gfn_base < memslot->base_gfn) in kvmppc_book3s_hv_page_fault() 706 rmap = &memslot->arch.rmap[gfn_base - memslot->base_gfn]; in kvmppc_book3s_hv_page_fault() 828 rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; in kvm_unmap_rmapp() 879 gfn = memslot->base_gfn; in kvmppc_core_flush_memslot_hv() 908 rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; in kvm_age_rmapp() 979 rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; in kvm_test_age_rmapp() 1124 if (gfn < memslot->base_gfn || in kvmppc_harvest_vpa_dirty() 1125 gfn >= memslot->base_gfn + memslot->npages) in kvmppc_harvest_vpa_dirty() 1130 __set_bit_le(gfn - memslot->base_gfn, map); in kvmppc_harvest_vpa_dirty() 1205 set_bit_le(gfn - memslot->base_gfn, memslo in kvmppc_unpin_guest_page() [all...] |
/kernel/linux/linux-6.6/arch/arm64/kvm/ |
H A D | mmu.c | 339 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT; in stage2_flush_memslot() 945 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT; in stage2_unmap_memslot() 1134 start = memslot->base_gfn << PAGE_SHIFT; in kvm_mmu_wp_memory_region() 1135 end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT; in kvm_mmu_wp_memory_region() 1163 start = memslot->base_gfn << PAGE_SHIFT; in kvm_mmu_split_memory_region() 1164 end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT; in kvm_mmu_split_memory_region() 1186 phys_addr_t base_gfn = slot->base_gfn + gfn_offset; in kvm_arch_mmu_enable_log_dirty_pt_masked() local 1187 phys_addr_t start = (base_gfn + __ffs(mask)) << PAGE_SHIFT; in kvm_arch_mmu_enable_log_dirty_pt_masked() 1188 phys_addr_t end = (base_gfn in kvm_arch_mmu_enable_log_dirty_pt_masked() [all...] |
/kernel/linux/linux-5.10/include/linux/ |
H A D | kvm_host.h | 343 gfn_t base_gfn; member 1104 if (gfn >= memslots[slot].base_gfn && in search_memslots() 1105 gfn < memslots[slot].base_gfn + memslots[slot].npages) in search_memslots() 1111 if (gfn >= memslots[slot].base_gfn) in search_memslots() 1117 if (start < slots->used_slots && gfn >= memslots[start].base_gfn && in search_memslots() 1118 gfn < memslots[start].base_gfn + memslots[start].npages) { in search_memslots() 1141 unsigned long offset = gfn - slot->base_gfn; in __gfn_to_hva_memslot() 1156 return slot->base_gfn + gfn_offset; in hva_to_gfn_memslot()
|
/kernel/linux/linux-6.6/include/linux/ |
H A D | kvm_host.h | 583 gfn_t base_gfn; member 1069 if (start < slot->base_gfn) { in kvm_memslot_iter_start() 1107 if (iter->slot->base_gfn + iter->slot->npages <= start) in kvm_memslot_iter_start() 1121 return iter->slot->base_gfn < end; in kvm_memslot_iter_is_valid() 1653 if (gfn >= slot->base_gfn && gfn < slot->base_gfn + slot->npages) in try_get_memslot() 1676 if (gfn >= slot->base_gfn) { in search_memslots() 1677 if (gfn < slot->base_gfn + slot->npages) in search_memslots() 1726 unsigned long offset = gfn - slot->base_gfn; in __gfn_to_hva_memslot() 1741 return slot->base_gfn in hva_to_gfn_memslot() [all...] |
/kernel/linux/linux-6.6/virt/kvm/ |
H A D | kvm_main.c | 393 kvm_flush_remote_tlbs_range(kvm, memslot->base_gfn, memslot->npages); in kvm_flush_remote_tlbs_memslot() 1456 if (slot->base_gfn < tmp->base_gfn) in kvm_insert_gfn_node() 1458 else if (slot->base_gfn > tmp->base_gfn) in kvm_insert_gfn_node() 1480 WARN_ON_ONCE(old->base_gfn != new->base_gfn); in kvm_replace_gfn_node() 1539 if (old && old->base_gfn == new->base_gfn) { in kvm_replace_memslot() 1735 dest->base_gfn in kvm_copy_memslot() 1956 gfn_t base_gfn; __kvm_set_memory_region() local [all...] |
/kernel/linux/linux-5.10/arch/mips/kvm/ |
H A D | mmu.c | 419 gfn_t base_gfn = slot->base_gfn + gfn_offset; in kvm_arch_mmu_enable_log_dirty_pt_masked() local 420 gfn_t start = base_gfn + __ffs(mask); in kvm_arch_mmu_enable_log_dirty_pt_masked() 421 gfn_t end = base_gfn + __fls(mask); in kvm_arch_mmu_enable_log_dirty_pt_masked()
|
/kernel/linux/linux-5.10/virt/kvm/ |
H A D | kvm_main.c | 1078 if (memslot->base_gfn > mslots[i + 1].base_gfn) in kvm_memslot_move_backward() 1081 WARN_ON_ONCE(memslot->base_gfn == mslots[i + 1].base_gfn); in kvm_memslot_move_backward() 1105 if (memslot->base_gfn < mslots[i - 1].base_gfn) in kvm_memslot_move_forward() 1108 WARN_ON_ONCE(memslot->base_gfn == mslots[i - 1].base_gfn); in kvm_memslot_move_forward() 1405 new.base_gfn = mem->guest_phys_addr >> PAGE_SHIFT; in __kvm_set_memory_region() 1423 if (new.base_gfn ! in __kvm_set_memory_region() [all...] |
/kernel/linux/linux-5.10/arch/s390/kvm/ |
H A D | kvm-s390.c | 601 cur_gfn = memslot->base_gfn; in kvm_arch_sync_dirty_log() 602 last_gfn = memslot->base_gfn + memslot->npages; in kvm_arch_sync_dirty_log() 1945 if (gfn >= memslots[slot].base_gfn && in gfn_to_memslot_approx() 1946 gfn < memslots[slot].base_gfn + memslots[slot].npages) in gfn_to_memslot_approx() 1952 if (gfn >= memslots[slot].base_gfn) in gfn_to_memslot_approx() 1961 if (gfn >= memslots[start].base_gfn && in gfn_to_memslot_approx() 1962 gfn < memslots[start].base_gfn + memslots[start].npages) { in gfn_to_memslot_approx() 1997 unsigned long ofs = cur_gfn - ms->base_gfn; in kvm_s390_next_dirty_cmma() 1999 if (ms->base_gfn + ms->npages <= cur_gfn) { in kvm_s390_next_dirty_cmma() 2009 if (cur_gfn < ms->base_gfn) in kvm_s390_next_dirty_cmma() [all...] |
/kernel/linux/linux-6.6/arch/s390/kvm/ |
H A D | pv.c | 261 while (slot && slot->base_gfn < pages_2g) { in kvm_s390_destroy_lower_2g() 262 len = min_t(u64, slot->npages, pages_2g - slot->base_gfn) * PAGE_SIZE; in kvm_s390_destroy_lower_2g() 265 slot = gfn_to_memslot(kvm, slot->base_gfn + slot->npages); in kvm_s390_destroy_lower_2g()
|