Lines Matching defs:kvm
31 * kvm->arch.uvmem_lock is a per-guest lock that prevents concurrent
49 * 1. kvm->srcu - Protects KVM memslots
50 * 2. kvm->mm->mmap_lock - find_vma, migrate_vma_pages and helpers, ksm_madvise
51 * 3. kvm->arch.uvmem_lock - protects read/writes to uvmem slots thus acting
231 struct kvm *kvm;
246 int kvmppc_uvmem_slot_init(struct kvm *kvm, const struct kvm_memory_slot *slot)
261 mutex_lock(&kvm->arch.uvmem_lock);
262 list_add(&p->list, &kvm->arch.uvmem_pfns);
263 mutex_unlock(&kvm->arch.uvmem_lock);
271 void kvmppc_uvmem_slot_free(struct kvm *kvm, const struct kvm_memory_slot *slot)
275 mutex_lock(&kvm->arch.uvmem_lock);
276 list_for_each_entry_safe(p, next, &kvm->arch.uvmem_pfns, list) {
284 mutex_unlock(&kvm->arch.uvmem_lock);
287 static void kvmppc_mark_gfn(unsigned long gfn, struct kvm *kvm,
292 list_for_each_entry(p, &kvm->arch.uvmem_pfns, list) {
307 unsigned long uvmem_pfn, struct kvm *kvm)
309 kvmppc_mark_gfn(gfn, kvm, KVMPPC_GFN_UVMEM_PFN, uvmem_pfn);
313 static void kvmppc_gfn_secure_mem_pfn(unsigned long gfn, struct kvm *kvm)
315 kvmppc_mark_gfn(gfn, kvm, KVMPPC_GFN_MEM_PFN, 0);
319 static void kvmppc_gfn_shared(unsigned long gfn, struct kvm *kvm)
321 kvmppc_mark_gfn(gfn, kvm, KVMPPC_GFN_SHARED, 0);
325 static void kvmppc_gfn_remove(unsigned long gfn, struct kvm *kvm)
327 kvmppc_mark_gfn(gfn, kvm, 0, 0);
331 static bool kvmppc_gfn_is_uvmem_pfn(unsigned long gfn, struct kvm *kvm,
336 list_for_each_entry(p, &kvm->arch.uvmem_pfns, list) {
357 * Must be called with kvm->arch.uvmem_lock held.
360 struct kvm *kvm, unsigned long *gfn)
366 list_for_each_entry(iter, &kvm->arch.uvmem_pfns, list)
389 static int kvmppc_memslot_page_merge(struct kvm *kvm,
393 unsigned long end, start = gfn_to_hva(kvm, gfn);
403 mmap_write_lock(kvm->mm);
405 vma = find_vma_intersection(kvm->mm, start, end);
419 mmap_write_unlock(kvm->mm);
423 static void __kvmppc_uvmem_memslot_delete(struct kvm *kvm,
426 uv_unregister_mem_slot(kvm->arch.lpid, memslot->id);
427 kvmppc_uvmem_slot_free(kvm, memslot);
428 kvmppc_memslot_page_merge(kvm, memslot, true);
431 static int __kvmppc_uvmem_memslot_create(struct kvm *kvm,
436 if (kvmppc_memslot_page_merge(kvm, memslot, false))
439 if (kvmppc_uvmem_slot_init(kvm, memslot))
442 ret = uv_register_mem_slot(kvm->arch.lpid,
452 kvmppc_uvmem_slot_free(kvm, memslot);
454 kvmppc_memslot_page_merge(kvm, memslot, true);
458 unsigned long kvmppc_h_svm_init_start(struct kvm *kvm)
465 kvm->arch.secure_guest = KVMPPC_SECURE_INIT_START;
471 if (!kvm_is_radix(kvm))
475 if (!kvm->arch.svm_enabled)
478 srcu_idx = srcu_read_lock(&kvm->srcu);
481 slots = kvm_memslots(kvm);
483 ret = __kvmppc_uvmem_memslot_create(kvm, memslot);
489 slots = kvm_memslots(kvm);
493 __kvmppc_uvmem_memslot_delete(kvm, memslot);
497 srcu_read_unlock(&kvm->srcu, srcu_idx);
504 * Caller must held kvm->arch.uvmem_lock.
509 struct kvm *kvm, unsigned long gpa, struct page *fault_page)
529 if (!kvmppc_gfn_is_uvmem_pfn(gpa >> page_shift, kvm, NULL))
561 ret = uv_page_out(kvm->arch.lpid, pfn << page_shift,
582 struct kvm *kvm, unsigned long gpa,
587 mutex_lock(&kvm->arch.uvmem_lock);
588 ret = __kvmppc_svm_page_out(vma, start, end, page_shift, kvm, gpa,
590 mutex_unlock(&kvm->arch.uvmem_lock);
604 struct kvm *kvm, bool skip_page_out)
613 mmap_read_lock(kvm->mm);
622 vma = find_vma_intersection(kvm->mm, addr, addr+1);
629 mutex_lock(&kvm->arch.uvmem_lock);
631 if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, &uvmem_pfn)) {
638 PAGE_SHIFT, kvm, pvt->gpa, NULL))
643 kvmppc_gfn_remove(gfn, kvm);
646 mutex_unlock(&kvm->arch.uvmem_lock);
649 mmap_read_unlock(kvm->mm);
652 unsigned long kvmppc_h_svm_init_abort(struct kvm *kvm)
661 if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
664 if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
667 srcu_idx = srcu_read_lock(&kvm->srcu);
669 kvm_for_each_memslot(memslot, kvm_memslots(kvm))
670 kvmppc_uvmem_drop_pages(memslot, kvm, false);
672 srcu_read_unlock(&kvm->srcu, srcu_idx);
674 kvm->arch.secure_guest = 0;
675 uv_svm_terminate(kvm->arch.lpid);
686 * Called with kvm->arch.uvmem_lock held
688 static struct page *kvmppc_uvmem_get_page(unsigned long gpa, struct kvm *kvm)
712 kvmppc_gfn_secure_uvmem_pfn(gpa >> PAGE_SHIFT, uvmem_pfn, kvm);
715 pvt->kvm = kvm;
736 unsigned long end, unsigned long gpa, struct kvm *kvm,
764 dpage = kvmppc_uvmem_get_page(gpa, kvm);
774 ret = uv_page_in(kvm->arch.lpid, pfn << page_shift,
788 static int kvmppc_uv_migrate_mem_slot(struct kvm *kvm,
796 mmap_read_lock(kvm->mm);
797 mutex_lock(&kvm->arch.uvmem_lock);
798 while (kvmppc_next_nontransitioned_gfn(memslot, kvm, &gfn)) {
800 start = gfn_to_hva(kvm, gfn);
805 vma = find_vma_intersection(kvm->mm, start, end);
810 (gfn << PAGE_SHIFT), kvm, PAGE_SHIFT, false);
819 mutex_unlock(&kvm->arch.uvmem_lock);
820 mmap_read_unlock(kvm->mm);
824 unsigned long kvmppc_h_svm_init_done(struct kvm *kvm)
831 if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
835 srcu_idx = srcu_read_lock(&kvm->srcu);
836 slots = kvm_memslots(kvm);
838 ret = kvmppc_uv_migrate_mem_slot(kvm, memslot);
854 kvm->arch.secure_guest |= KVMPPC_SECURE_INIT_DONE;
855 pr_info("LPID %d went secure\n", kvm->arch.lpid);
858 srcu_read_unlock(&kvm->srcu, srcu_idx);
871 static unsigned long kvmppc_share_page(struct kvm *kvm, unsigned long gpa,
883 srcu_idx = srcu_read_lock(&kvm->srcu);
884 mutex_lock(&kvm->arch.uvmem_lock);
885 if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, &uvmem_pfn)) {
897 mutex_unlock(&kvm->arch.uvmem_lock);
898 pfn = gfn_to_pfn(kvm, gfn);
902 mutex_lock(&kvm->arch.uvmem_lock);
903 if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, &uvmem_pfn)) {
912 if (!uv_page_in(kvm->arch.lpid, pfn << page_shift, gpa, 0,
914 kvmppc_gfn_shared(gfn, kvm);
918 mutex_unlock(&kvm->arch.uvmem_lock);
920 srcu_read_unlock(&kvm->srcu, srcu_idx);
930 unsigned long kvmppc_h_svm_page_in(struct kvm *kvm, unsigned long gpa,
940 if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
950 return kvmppc_share_page(kvm, gpa, page_shift);
953 srcu_idx = srcu_read_lock(&kvm->srcu);
954 mmap_read_lock(kvm->mm);
956 start = gfn_to_hva(kvm, gfn);
960 mutex_lock(&kvm->arch.uvmem_lock);
962 if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, NULL))
966 vma = find_vma_intersection(kvm->mm, start, end);
970 if (kvmppc_svm_page_in(vma, start, end, gpa, kvm, page_shift,
977 mutex_unlock(&kvm->arch.uvmem_lock);
979 mmap_read_unlock(kvm->mm);
980 srcu_read_unlock(&kvm->srcu, srcu_idx);
999 pvt->kvm, pvt->gpa, vmf->page))
1010 * Gets called with kvm->arch.uvmem_lock held.
1025 kvmppc_gfn_remove(pvt->gpa >> PAGE_SHIFT, pvt->kvm);
1027 kvmppc_gfn_secure_mem_pfn(pvt->gpa >> PAGE_SHIFT, pvt->kvm);
1040 kvmppc_h_svm_page_out(struct kvm *kvm, unsigned long gpa,
1049 if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
1059 srcu_idx = srcu_read_lock(&kvm->srcu);
1060 mmap_read_lock(kvm->mm);
1061 start = gfn_to_hva(kvm, gfn);
1066 vma = find_vma_intersection(kvm->mm, start, end);
1070 if (!kvmppc_svm_page_out(vma, start, end, page_shift, kvm, gpa, NULL))
1073 mmap_read_unlock(kvm->mm);
1074 srcu_read_unlock(&kvm->srcu, srcu_idx);
1078 int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gfn)
1083 pfn = gfn_to_pfn(kvm, gfn);
1087 mutex_lock(&kvm->arch.uvmem_lock);
1088 if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, NULL))
1091 ret = uv_page_in(kvm->arch.lpid, pfn << PAGE_SHIFT, gfn << PAGE_SHIFT,
1095 mutex_unlock(&kvm->arch.uvmem_lock);
1099 int kvmppc_uvmem_memslot_create(struct kvm *kvm, const struct kvm_memory_slot *new)
1101 int ret = __kvmppc_uvmem_memslot_create(kvm, new);
1104 ret = kvmppc_uv_migrate_mem_slot(kvm, new);
1109 void kvmppc_uvmem_memslot_delete(struct kvm *kvm, const struct kvm_memory_slot *old)
1111 __kvmppc_uvmem_memslot_delete(kvm, old);
1163 * Don't fail the initialization of kvm-hv module if