Lines Matching defs:kvm

174 /* Caller must hold kvm->mm_lock */
175 static pte_t *kvm_pte_for_gpa(struct kvm *kvm,
179 return kvm_walk_pgd(kvm->arch.gpa_mm.pgd, cache, addr);
301 * @kvm: KVM pointer.
307 * The caller must hold the @kvm->mmu_lock spinlock.
312 static bool kvm_flush_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn, void *data)
314 return kvm_flush_gpa_pgd(kvm->arch.gpa_mm.pgd,
425 * @kvm: KVM pointer.
432 * The caller must hold the @kvm->mmu_lock spinlock.
438 static int kvm_mkclean_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn)
440 return kvm_mkclean_pgd(kvm->arch.gpa_mm.pgd, start_gfn << PAGE_SHIFT,
446 * @kvm: The KVM pointer
453 * acquire @kvm->mmu_lock.
455 void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
463 kvm_mkclean_gpa_pt(kvm, start, end);
466 void kvm_arch_commit_memory_region(struct kvm *kvm,
474 kvm_debug("%s: kvm: %p slot: %d, GPA: %llx, size: %llx, QVA: %llx\n",
475 __func__, kvm, mem->slot, mem->guest_phys_addr,
490 spin_lock(&kvm->mmu_lock);
492 needs_flush = kvm_mkclean_gpa_pt(kvm, new->base_gfn,
496 kvm_flush_remote_tlbs(kvm);
497 spin_unlock(&kvm->mmu_lock);
501 void kvm_arch_flush_shadow_all(struct kvm *kvm)
504 kvm_flush_gpa_pt(kvm, 0, ~0UL, NULL);
507 kvm_flush_remote_tlbs(kvm);
510 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
521 spin_lock(&kvm->mmu_lock);
523 kvm_flush_gpa_pt(kvm, slot->base_gfn,
527 kvm_flush_remote_tlbs(kvm);
528 spin_unlock(&kvm->mmu_lock);
531 void _kvm_destroy_mm(struct kvm *kvm)
534 WARN_ON(!kvm_flush_gpa_pt(kvm, 0, ~0UL, NULL));
535 pgd_free(NULL, kvm->arch.gpa_mm.pgd);
536 kvm->arch.gpa_mm.pgd = NULL;
650 static int handle_hva_to_gpa(struct kvm *kvm,
653 int (*handler)(struct kvm *kvm, gfn_t gfn,
663 slots = kvm_memslots(kvm);
682 ret |= handler(kvm, gfn, gfn_end, memslot, data);
689 static int kvm_unmap_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
695 kvm_flush_gpa_pt(kvm, gfn, gfn_end - 1, &npages);
701 int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, bool blockable)
706 return handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, &npages);
709 static int kvm_set_spte_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
714 pte_t *gpa_pte = kvm_pte_for_gpa(kvm, NULL, gpa);
740 int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
745 ret = handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &pte);
748 kvm_flush_remote_tlbs(kvm);
752 static int kvm_age_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
755 return kvm_mkold_pgd(kvm->arch.gpa_mm.pgd, gfn << PAGE_SHIFT,
759 static int kvm_test_age_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
763 pte_t *gpa_pte = kvm_pte_for_gpa(kvm, NULL, gpa);
770 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
772 return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL);
775 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
777 return handle_hva_to_gpa(kvm, hva, hva, kvm_test_age_hva_handler, NULL);
780 static pud_t *kvm_get_pud(struct kvm *kvm,
785 pgd = kvm->arch.gpa_mm.pgd + pgd_index(addr);
795 static pmd_t *kvm_get_pmd(struct kvm *kvm,
801 pud = kvm_get_pud(kvm, cache, addr);
823 pmd = kvm_get_pmd(vcpu->kvm, cache, addr);
857 kvm_flush_gpa_pt(vcpu->kvm,
1013 struct kvm *kvm = vcpu->kvm;
1021 spin_lock(&kvm->mmu_lock);
1024 ptep = kvm_pte_for_gpa(kvm, NULL, gpa);
1048 slot = gfn_to_memslot(kvm, gfn);
1063 mark_page_dirty(kvm, base_gfn + i);
1065 mark_page_dirty(kvm, gfn);
1070 spin_unlock(&kvm->mmu_lock);
1135 struct kvm *kvm = vcpu->kvm;
1140 srcu_idx = srcu_read_lock(&kvm->srcu);
1145 memslot = gfn_to_memslot(kvm, gfn);
1184 mmu_seq = kvm->mmu_notifier_seq;
1199 pfn = gfn_to_pfn_prot(kvm, gfn, write, &writeable);
1205 spin_lock(&kvm->mmu_lock);
1207 if (mmu_notifier_retry(kvm, mmu_seq)) {
1213 spin_unlock(&kvm->mmu_lock);
1252 mark_page_dirty(kvm, gfn);
1263 mark_page_dirty(kvm, base_gfn + i);
1271 mark_page_dirty(kvm, gfn);
1274 ptep = kvm_pte_for_gpa(kvm, memcache, gpa);
1282 spin_unlock(&kvm->mmu_lock);
1286 srcu_read_unlock(&kvm->srcu, srcu_idx);