Lines Matching defs:kvm
12 #include <trace/events/kvm.h>
42 static int stage2_apply_range(struct kvm *kvm, phys_addr_t addr,
51 struct kvm_pgtable *pgt = kvm->arch.mmu.pgt;
55 next = stage2_pgd_addr_end(kvm, addr, end);
61 cond_resched_lock(&kvm->mmu_lock);
67 #define stage2_apply_range_resched(kvm, addr, end, fn) \
68 stage2_apply_range(kvm, addr, end, fn, true)
77 * @kvm: pointer to kvm structure.
81 void kvm_flush_remote_tlbs(struct kvm *kvm)
83 kvm_call_hyp(__kvm_tlb_flush_vmid, &kvm->arch.mmu);
130 struct kvm *kvm = mmu->kvm;
133 assert_spin_locked(&kvm->mmu_lock);
135 WARN_ON(stage2_apply_range(kvm, start, end, kvm_pgtable_stage2_unmap,
144 static void stage2_flush_memslot(struct kvm *kvm,
150 stage2_apply_range_resched(kvm, addr, end, kvm_pgtable_stage2_flush);
155 * @kvm: The struct kvm pointer
160 static void stage2_flush_vm(struct kvm *kvm)
166 idx = srcu_read_lock(&kvm->srcu);
167 spin_lock(&kvm->mmu_lock);
169 slots = kvm_memslots(kvm);
171 stage2_flush_memslot(kvm, memslot);
173 spin_unlock(&kvm->mmu_lock);
174 srcu_read_unlock(&kvm->srcu, idx);
356 * @kvm: The pointer to the KVM structure
363 int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu)
377 err = kvm_pgtable_stage2_init(pgt, kvm);
390 mmu->kvm = kvm;
403 static void stage2_unmap_memslot(struct kvm *kvm,
438 unmap_stage2_range(&kvm->arch.mmu, gpa, vm_end - vm_start);
446 * @kvm: The struct kvm pointer
451 void stage2_unmap_vm(struct kvm *kvm)
457 idx = srcu_read_lock(&kvm->srcu);
459 spin_lock(&kvm->mmu_lock);
461 slots = kvm_memslots(kvm);
463 stage2_unmap_memslot(kvm, memslot);
465 spin_unlock(&kvm->mmu_lock);
467 srcu_read_unlock(&kvm->srcu, idx);
472 struct kvm *kvm = mmu->kvm;
475 spin_lock(&kvm->mmu_lock);
482 spin_unlock(&kvm->mmu_lock);
493 * @kvm: The KVM pointer
499 int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
505 struct kvm_pgtable *pgt = kvm->arch.mmu.pgt;
515 kvm_mmu_cache_min_pages(kvm));
519 spin_lock(&kvm->mmu_lock);
522 spin_unlock(&kvm->mmu_lock);
541 struct kvm *kvm = mmu->kvm;
542 stage2_apply_range_resched(kvm, addr, end, kvm_pgtable_stage2_wrprotect);
547 * @kvm: The KVM pointer
555 * Acquires kvm_mmu_lock. Called with kvm->slots_lock mutex acquired,
558 void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot)
560 struct kvm_memslots *slots = kvm_memslots(kvm);
570 spin_lock(&kvm->mmu_lock);
571 stage2_wp_range(&kvm->arch.mmu, start, end);
572 spin_unlock(&kvm->mmu_lock);
573 kvm_flush_remote_tlbs(kvm);
578 * @kvm: The KVM pointer
587 static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
595 stage2_wp_range(&kvm->arch.mmu, start, end);
605 void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
609 kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
750 struct kvm *kvm = vcpu->kvm;
831 kvm_mmu_cache_min_pages(kvm));
836 mmu_seq = vcpu->kvm->mmu_notifier_seq;
848 pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable);
870 spin_lock(&kvm->mmu_lock);
872 if (mmu_notifier_retry(kvm, mmu_seq))
885 mark_page_dirty(kvm, gfn);
915 spin_unlock(&kvm->mmu_lock);
930 spin_lock(&vcpu->kvm->mmu_lock);
933 spin_unlock(&vcpu->kvm->mmu_lock);
991 idx = srcu_read_lock(&vcpu->kvm->srcu);
994 memslot = gfn_to_memslot(vcpu->kvm, gfn);
1043 VM_BUG_ON(fault_ipa >= kvm_phys_size(vcpu->kvm));
1060 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1064 static int handle_hva_to_gpa(struct kvm *kvm,
1067 int (*handler)(struct kvm *kvm,
1076 slots = kvm_memslots(kvm);
1090 ret |= handler(kvm, gpa, (u64)(hva_end - hva_start), data);
1096 static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
1101 __unmap_stage2_range(&kvm->arch.mmu, gpa, size, may_block);
1105 int kvm_unmap_hva_range(struct kvm *kvm,
1108 if (!kvm->arch.mmu.pgt)
1112 handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, &flags);
1116 static int kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
1128 kvm_pgtable_stage2_map(kvm->arch.mmu.pgt, gpa, PAGE_SIZE,
1133 int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
1138 if (!kvm->arch.mmu.pgt)
1148 handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &pfn);
1152 static int kvm_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
1158 kpte = kvm_pgtable_stage2_mkold(kvm->arch.mmu.pgt, gpa);
1163 static int kvm_test_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
1166 return kvm_pgtable_stage2_is_young(kvm->arch.mmu.pgt, gpa);
1169 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
1171 if (!kvm->arch.mmu.pgt)
1174 return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL);
1177 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
1179 if (!kvm->arch.mmu.pgt)
1182 return handle_hva_to_gpa(kvm, hva, hva + PAGE_SIZE,
1271 void kvm_arch_commit_memory_region(struct kvm *kvm,
1288 if (!kvm_dirty_log_manual_protect_and_init_set(kvm)) {
1289 kvm_mmu_wp_memory_region(kvm, mem->slot);
1294 int kvm_arch_prepare_memory_region(struct kvm *kvm,
1312 if ((memslot->base_gfn + memslot->npages) > (kvm_phys_size(kvm) >> PAGE_SHIFT))
1355 ret = kvm_phys_addr_ioremap(kvm, gpa, pa,
1367 spin_lock(&kvm->mmu_lock);
1369 unmap_stage2_range(&kvm->arch.mmu, mem->guest_phys_addr, mem->memory_size);
1371 stage2_flush_memslot(kvm, memslot);
1372 spin_unlock(&kvm->mmu_lock);
1378 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
1382 void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
1386 void kvm_arch_flush_shadow_all(struct kvm *kvm)
1388 kvm_free_stage2_pgd(&kvm->arch.mmu);
1391 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
1397 spin_lock(&kvm->mmu_lock);
1398 unmap_stage2_range(&kvm->arch.mmu, gpa, size);
1399 spin_unlock(&kvm->mmu_lock);
1446 stage2_flush_vm(vcpu->kvm);
1461 stage2_flush_vm(vcpu->kvm);