Lines Matching defs:kvm

142 /* Caller must hold kvm->mm_lock */
143 static pte_t *kvm_mips_pte_for_gpa(struct kvm *kvm,
147 return kvm_mips_walk_pgd(kvm->arch.gpa_mm.pgd, cache, addr);
260 * @kvm: KVM pointer.
266 * The caller must hold the @kvm->mmu_lock spinlock.
271 bool kvm_mips_flush_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn)
273 return kvm_mips_flush_gpa_pgd(kvm->arch.gpa_mm.pgd,
384 * @kvm: KVM pointer.
391 * The caller must hold the @kvm->mmu_lock spinlock.
397 int kvm_mips_mkclean_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn)
399 return kvm_mips_mkclean_pgd(kvm->arch.gpa_mm.pgd,
406 * @kvm: The KVM pointer
413 * acquire @kvm->mmu_lock.
415 void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
423 kvm_mips_mkclean_gpa_pt(kvm, start, end);
434 static int kvm_mips_mkold_gpa_pt(struct kvm *kvm, gfn_t start_gfn,
437 return kvm_mips_mkold_pgd(kvm->arch.gpa_mm.pgd,
442 static int handle_hva_to_gpa(struct kvm *kvm,
445 int (*handler)(struct kvm *kvm, gfn_t gfn,
455 slots = kvm_memslots(kvm);
475 ret |= handler(kvm, gfn, gfn_end, memslot, data);
482 static int kvm_unmap_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
485 kvm_mips_flush_gpa_pt(kvm, gfn, gfn_end);
489 int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
492 handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
494 kvm_mips_callbacks->flush_shadow_all(kvm);
498 static int kvm_set_spte_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
503 pte_t *gpa_pte = kvm_mips_pte_for_gpa(kvm, NULL, gpa);
529 int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
534 ret = handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &pte);
536 kvm_mips_callbacks->flush_shadow_all(kvm);
540 static int kvm_age_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
543 return kvm_mips_mkold_gpa_pt(kvm, gfn, gfn_end);
546 static int kvm_test_age_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
550 pte_t *gpa_pte = kvm_mips_pte_for_gpa(kvm, NULL, gpa);
557 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
559 return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL);
562 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
564 return handle_hva_to_gpa(kvm, hva, hva, kvm_test_age_hva_handler, NULL);
589 struct kvm *kvm = vcpu->kvm;
596 spin_lock(&kvm->mmu_lock);
599 ptep = kvm_mips_pte_for_gpa(kvm, NULL, gpa);
621 mark_page_dirty(kvm, gfn);
631 spin_unlock(&kvm->mmu_lock);
665 struct kvm *kvm = vcpu->kvm;
676 srcu_idx = srcu_read_lock(&kvm->srcu);
692 mmu_seq = kvm->mmu_notifier_seq;
707 pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writeable);
713 spin_lock(&kvm->mmu_lock);
715 if (mmu_notifier_retry(kvm, mmu_seq)) {
721 spin_unlock(&kvm->mmu_lock);
727 ptep = kvm_mips_pte_for_gpa(kvm, memcache, gpa);
735 mark_page_dirty(kvm, gfn);
750 spin_unlock(&kvm->mmu_lock);
754 srcu_read_unlock(&kvm->srcu, srcu_idx);
1019 struct kvm *kvm = vcpu->kvm;
1043 spin_lock(&kvm->mmu_lock);
1044 ptep_buddy = kvm_mips_pte_for_gpa(kvm, NULL,
1048 spin_unlock(&kvm->mmu_lock);