Lines Matching refs:memslot

1459 	struct kvm_memory_slot *memslot;
1466 kvm_for_each_memslot(memslot, slots) {
1470 hva_start = max(start, memslot->userspace_addr);
1471 hva_end = min(end, memslot->userspace_addr +
1472 (memslot->npages << PAGE_SHIFT));
1479 gfn_start = hva_to_gfn_memslot(hva_start, memslot);
1480 gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
1482 for_each_slot_rmap_range(memslot, PG_LEVEL_4K,
1486 ret |= handler(kvm, iterator.rmap, memslot,
2756 * Note, using the already-retrieved memslot and __gfn_to_hva_memslot()
2761 * read-only memslot.
3669 * Retry the page fault if the gfn hit a memslot that is being deleted
3670 * or moved. This ensures any existing SPTEs for the old memslot will
5235 slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot,
5242 for_each_slot_rmap_range(memslot, start_level, end_level, start_gfn,
5268 slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
5272 return slot_handle_level_range(kvm, memslot, fn, start_level,
5273 end_level, memslot->base_gfn,
5274 memslot->base_gfn + memslot->npages - 1,
5279 slot_handle_all_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
5282 return slot_handle_level(kvm, memslot, fn, PG_LEVEL_4K,
5287 slot_handle_large_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
5290 return slot_handle_level(kvm, memslot, fn, PG_LEVEL_4K + 1,
5295 slot_handle_leaf(struct kvm *kvm, struct kvm_memory_slot *memslot,
5298 return slot_handle_level(kvm, memslot, fn, PG_LEVEL_4K,
5432 * It's required when memslot is being deleted or VM is being
5506 struct kvm_memory_slot *memslot;
5513 kvm_for_each_memslot(memslot, slots) {
5516 start = max(gfn_start, memslot->base_gfn);
5517 end = min(gfn_end, memslot->base_gfn + memslot->npages);
5521 slot_handle_level_range(kvm, memslot, kvm_zap_rmapp,
5544 struct kvm_memory_slot *memslot,
5550 flush = slot_handle_level(kvm, memslot, slot_rmap_write_protect,
5553 flush |= kvm_tdp_mmu_wrprot_slot(kvm, memslot, PG_LEVEL_4K);
5568 kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
5611 const struct kvm_memory_slot *memslot)
5615 slot_handle_leaf(kvm, (struct kvm_memory_slot *)memslot,
5619 kvm_tdp_mmu_zap_collapsible_sptes(kvm, memslot);
5624 struct kvm_memory_slot *memslot)
5627 * All current use cases for flushing the TLBs for a specific memslot
5629 * The interaction between the various operations on memslot must be
5631 * is observed by any other operation on the same memslot.
5634 kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn,
5635 memslot->npages);
5639 struct kvm_memory_slot *memslot)
5644 flush = slot_handle_leaf(kvm, memslot, __rmap_clear_dirty, false);
5646 flush |= kvm_tdp_mmu_clear_dirty_slot(kvm, memslot);
5656 kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
5661 struct kvm_memory_slot *memslot)
5666 flush = slot_handle_large_level(kvm, memslot, slot_rmap_write_protect,
5669 flush |= kvm_tdp_mmu_wrprot_slot(kvm, memslot, PG_LEVEL_2M);
5673 kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
5678 struct kvm_memory_slot *memslot)
5683 flush = slot_handle_all_level(kvm, memslot, __rmap_set_dirty, false);
5685 flush |= kvm_tdp_mmu_slot_set_dirty(kvm, memslot);
5689 kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
5952 struct kvm_memory_slot *memslot;
5958 kvm_for_each_memslot(memslot, slots)
5959 nr_pages += memslot->npages;