/kernel/linux/linux-6.6/tools/testing/selftests/kvm/s390x/ |
H A D | cmma_test.c | 510 size_t gfn_offset; in test_get_skip_holes() local 538 gfn_offset = TEST_DATA_START_GFN; in test_get_skip_holes() 548 assert_cmma_dirty(gfn_offset, 1, &log); in test_get_skip_holes() 549 gfn_offset++; in test_get_skip_holes() 560 assert_cmma_dirty(gfn_offset, 0x20, &log); in test_get_skip_holes() 561 gfn_offset += 0x20; in test_get_skip_holes() 564 gfn_offset += 0x20; in test_get_skip_holes() 574 query_cmma_range(vm, gfn_offset, 0x20, &log); in test_get_skip_holes() 575 assert_cmma_dirty(gfn_offset, 0x20, &log); in test_get_skip_holes() 576 gfn_offset in test_get_skip_holes() [all...] |
/kernel/linux/linux-6.6/arch/mips/kvm/ |
H A D | mmu.c | 407 * @gfn_offset: The gfn offset in memory slot 408 * @mask: The mask of dirty pages at offset 'gfn_offset' in this memory 416 gfn_t gfn_offset, unsigned long mask) in kvm_arch_mmu_enable_log_dirty_pt_masked() 418 gfn_t base_gfn = slot->base_gfn + gfn_offset; in kvm_arch_mmu_enable_log_dirty_pt_masked() 414 kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn_offset, unsigned long mask) kvm_arch_mmu_enable_log_dirty_pt_masked() argument
|
/kernel/linux/linux-5.10/arch/arm64/kvm/ |
H A D | mmu.c | 580 * @gfn_offset: The gfn offset in memory slot 581 * @mask: The mask of dirty pages at offset 'gfn_offset' in this memory 589 gfn_t gfn_offset, unsigned long mask) in kvm_mmu_write_protect_pt_masked() 591 phys_addr_t base_gfn = slot->base_gfn + gfn_offset; in kvm_mmu_write_protect_pt_masked() 607 gfn_t gfn_offset, unsigned long mask) in kvm_arch_mmu_enable_log_dirty_pt_masked() 609 kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask); in kvm_arch_mmu_enable_log_dirty_pt_masked() 587 kvm_mmu_write_protect_pt_masked(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn_offset, unsigned long mask) kvm_mmu_write_protect_pt_masked() argument 605 kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn_offset, unsigned long mask) kvm_arch_mmu_enable_log_dirty_pt_masked() argument
|
/kernel/linux/linux-6.6/arch/riscv/kvm/ |
H A D | mmu.c | 395 gfn_t gfn_offset, in kvm_arch_mmu_enable_log_dirty_pt_masked() 398 phys_addr_t base_gfn = slot->base_gfn + gfn_offset; in kvm_arch_mmu_enable_log_dirty_pt_masked() 393 kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn_offset, unsigned long mask) kvm_arch_mmu_enable_log_dirty_pt_masked() argument
|
/kernel/linux/linux-5.10/include/linux/ |
H A D | kvm_host.h | 895 gfn_t gfn_offset, 1154 gfn_t gfn_offset = (hva - slot->userspace_addr) >> PAGE_SHIFT; in hva_to_gfn_memslot() local 1156 return slot->base_gfn + gfn_offset; in hva_to_gfn_memslot()
|
/kernel/linux/linux-6.6/arch/x86/kvm/mmu/ |
H A D | mmu.c | 1296 * @gfn_offset: start of the BITS_PER_LONG pages we care about 1303 gfn_t gfn_offset, unsigned long mask) in kvm_mmu_write_protect_pt_masked() 1309 slot->base_gfn + gfn_offset, mask, true); in kvm_mmu_write_protect_pt_masked() 1315 rmap_head = gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask), in kvm_mmu_write_protect_pt_masked() 1329 * @gfn_offset: start of the BITS_PER_LONG pages we care about 1336 gfn_t gfn_offset, unsigned long mask) in kvm_mmu_clear_dirty_pt_masked() 1342 slot->base_gfn + gfn_offset, mask, false); in kvm_mmu_clear_dirty_pt_masked() 1348 rmap_head = gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask), in kvm_mmu_clear_dirty_pt_masked() 1369 gfn_t gfn_offset, unsigned long mask) in kvm_arch_mmu_enable_log_dirty_pt_masked() 1376 * The gfn_offset i in kvm_arch_mmu_enable_log_dirty_pt_masked() 1301 kvm_mmu_write_protect_pt_masked(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn_offset, unsigned long mask) kvm_mmu_write_protect_pt_masked() argument 1334 kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn_offset, unsigned long mask) kvm_mmu_clear_dirty_pt_masked() argument 1367 kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn_offset, unsigned long mask) kvm_arch_mmu_enable_log_dirty_pt_masked() argument [all...] |
/kernel/linux/linux-5.10/arch/mips/kvm/ |
H A D | mmu.c | 408 * @gfn_offset: The gfn offset in memory slot 409 * @mask: The mask of dirty pages at offset 'gfn_offset' in this memory 417 gfn_t gfn_offset, unsigned long mask) in kvm_arch_mmu_enable_log_dirty_pt_masked() 419 gfn_t base_gfn = slot->base_gfn + gfn_offset; in kvm_arch_mmu_enable_log_dirty_pt_masked() 415 kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn_offset, unsigned long mask) kvm_arch_mmu_enable_log_dirty_pt_masked() argument
|
/kernel/linux/linux-5.10/arch/x86/kvm/mmu/ |
H A D | mmu.c | 1212 * @gfn_offset: start of the BITS_PER_LONG pages we care about 1220 gfn_t gfn_offset, unsigned long mask) in kvm_mmu_write_protect_pt_masked() 1226 slot->base_gfn + gfn_offset, mask, true); in kvm_mmu_write_protect_pt_masked() 1228 rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask), in kvm_mmu_write_protect_pt_masked() 1242 * @gfn_offset: start of the BITS_PER_LONG pages we care about 1249 gfn_t gfn_offset, unsigned long mask) in kvm_mmu_clear_dirty_pt_masked() 1255 slot->base_gfn + gfn_offset, mask, false); in kvm_mmu_clear_dirty_pt_masked() 1257 rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask), in kvm_mmu_clear_dirty_pt_masked() 1279 gfn_t gfn_offset, unsigned long mask) in kvm_arch_mmu_enable_log_dirty_pt_masked() 1282 kvm_x86_ops.enable_log_dirty_pt_masked(kvm, slot, gfn_offset, in kvm_arch_mmu_enable_log_dirty_pt_masked() 1218 kvm_mmu_write_protect_pt_masked(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn_offset, unsigned long mask) kvm_mmu_write_protect_pt_masked() argument 1247 kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn_offset, unsigned long mask) kvm_mmu_clear_dirty_pt_masked() argument 1277 kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn_offset, unsigned long mask) kvm_arch_mmu_enable_log_dirty_pt_masked() argument [all...] |
/kernel/linux/linux-5.10/arch/loongarch/kvm/ |
H A D | mmu.c | 448 * @gfn_offset: The gfn offset in memory slot 449 * @mask: The mask of dirty pages at offset 'gfn_offset' in this memory 457 gfn_t gfn_offset, unsigned long mask) in kvm_arch_mmu_enable_log_dirty_pt_masked() 459 gfn_t base_gfn = slot->base_gfn + gfn_offset; in kvm_arch_mmu_enable_log_dirty_pt_masked() 455 kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn_offset, unsigned long mask) kvm_arch_mmu_enable_log_dirty_pt_masked() argument
|
/kernel/linux/linux-6.6/include/linux/ |
H A D | kvm_host.h | 1410 gfn_t gfn_offset, 1739 gfn_t gfn_offset = (hva - slot->userspace_addr) >> PAGE_SHIFT; in hva_to_gfn_memslot() local 1741 return slot->base_gfn + gfn_offset; in hva_to_gfn_memslot()
|
/kernel/linux/linux-6.6/arch/arm64/kvm/ |
H A D | mmu.c | 1175 * @gfn_offset: The gfn offset in memory slot 1176 * @mask: The mask of pages at offset 'gfn_offset' in this memory 1184 gfn_t gfn_offset, unsigned long mask) in kvm_arch_mmu_enable_log_dirty_pt_masked() 1186 phys_addr_t base_gfn = slot->base_gfn + gfn_offset; in kvm_arch_mmu_enable_log_dirty_pt_masked() 1182 kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn_offset, unsigned long mask) kvm_arch_mmu_enable_log_dirty_pt_masked() argument
|
/kernel/linux/linux-5.10/arch/x86/include/asm/ |
H A D | kvm_host.h | 1376 gfn_t gfn_offset, unsigned long mask);
|