Lines Matching defs:kvm
232 if (vcpu->kvm->arch.l1_ptcr == 0)
241 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
246 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
278 l2 = kvmhv_get_nested(vcpu->kvm, l2_hv.lpid, true);
348 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
353 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
386 pr_err("kvm-hv: failed to allocated nested partition table\n");
393 pr_err("kvm-hv: Parent hypervisor does not support nesting (rc=%ld)\n",
454 void kvmhv_vm_nested_init(struct kvm *kvm)
456 kvm->arch.max_nested_lpid = -1;
466 struct kvm *kvm = vcpu->kvm;
471 srcu_idx = srcu_read_lock(&kvm->srcu);
477 !kvm_is_visible_gfn(vcpu->kvm, (ptcr & PRTB_MASK) >> PAGE_SHIFT))
479 srcu_read_unlock(&kvm->srcu, srcu_idx);
481 kvm->arch.l1_ptcr = ptcr;
517 gp = kvmhv_get_nested(vcpu->kvm, l1_lpid, false);
533 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
535 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
540 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
542 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
573 struct kvm *kvm = gp->l1_host;
576 ptbl_addr = (kvm->arch.l1_ptcr & PRTB_MASK) + (gp->l1_lpid << 4);
577 if (gp->l1_lpid < (1ul << ((kvm->arch.l1_ptcr & PRTS_MASK) + 8))) {
578 int srcu_idx = srcu_read_lock(&kvm->srcu);
579 ret = kvm_read_guest(kvm, ptbl_addr,
581 srcu_read_unlock(&kvm->srcu, srcu_idx);
593 static struct kvm_nested_guest *kvmhv_alloc_nested(struct kvm *kvm, unsigned int lpid)
601 gp->l1_host = kvm;
604 gp->shadow_pgtable = pgd_alloc(kvm->mm);
618 pgd_free(kvm->mm, gp->shadow_pgtable);
629 struct kvm *kvm = gp->l1_host;
635 * so we don't need to hold kvm->mmu_lock.
637 kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable,
639 pgd_free(kvm->mm, gp->shadow_pgtable);
648 struct kvm *kvm = gp->l1_host;
652 spin_lock(&kvm->mmu_lock);
653 if (gp == kvm->arch.nested_guests[lpid]) {
654 kvm->arch.nested_guests[lpid] = NULL;
655 if (lpid == kvm->arch.max_nested_lpid) {
656 while (--lpid >= 0 && !kvm->arch.nested_guests[lpid])
658 kvm->arch.max_nested_lpid = lpid;
663 spin_unlock(&kvm->mmu_lock);
674 void kvmhv_release_all_nested(struct kvm *kvm)
682 spin_lock(&kvm->mmu_lock);
683 for (i = 0; i <= kvm->arch.max_nested_lpid; i++) {
684 gp = kvm->arch.nested_guests[i];
687 kvm->arch.nested_guests[i] = NULL;
693 kvm->arch.max_nested_lpid = -1;
694 spin_unlock(&kvm->mmu_lock);
700 srcu_idx = srcu_read_lock(&kvm->srcu);
701 kvm_for_each_memslot(memslot, kvm_memslots(kvm))
703 srcu_read_unlock(&kvm->srcu, srcu_idx);
709 struct kvm *kvm = gp->l1_host;
711 spin_lock(&kvm->mmu_lock);
712 kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable, gp->shadow_lpid);
713 spin_unlock(&kvm->mmu_lock);
720 struct kvm_nested_guest *kvmhv_get_nested(struct kvm *kvm, int l1_lpid,
726 l1_lpid >= (1ul << ((kvm->arch.l1_ptcr & PRTS_MASK) + 12 - 4)))
729 spin_lock(&kvm->mmu_lock);
730 gp = kvm->arch.nested_guests[l1_lpid];
733 spin_unlock(&kvm->mmu_lock);
738 newgp = kvmhv_alloc_nested(kvm, l1_lpid);
741 spin_lock(&kvm->mmu_lock);
742 if (kvm->arch.nested_guests[l1_lpid]) {
744 gp = kvm->arch.nested_guests[l1_lpid];
746 kvm->arch.nested_guests[l1_lpid] = newgp;
750 if (l1_lpid > kvm->arch.max_nested_lpid)
751 kvm->arch.max_nested_lpid = l1_lpid;
754 spin_unlock(&kvm->mmu_lock);
764 struct kvm *kvm = gp->l1_host;
767 spin_lock(&kvm->mmu_lock);
769 spin_unlock(&kvm->mmu_lock);
774 static struct kvm_nested_guest *kvmhv_find_nested(struct kvm *kvm, int lpid)
776 if (lpid > kvm->arch.max_nested_lpid)
778 return kvm->arch.nested_guests[lpid];
781 pte_t *find_kvm_nested_guest_pte(struct kvm *kvm, unsigned long lpid,
787 gp = kvmhv_find_nested(kvm, lpid);
791 VM_WARN(!spin_is_locked(&kvm->mmu_lock),
792 "%s called with kvm mmu_lock not held \n", __func__);
804 void kvmhv_insert_nest_rmap(struct kvm *kvm, unsigned long *rmapp,
836 static void kvmhv_update_nest_rmap_rc(struct kvm *kvm, u64 n_rmap,
848 ptep = find_kvm_nested_guest_pte(kvm, lpid, gpa, &shift);
857 kvmppc_radix_tlbie_page(kvm, gpa, shift, lpid);
865 void kvmhv_update_nest_rmap_rc_list(struct kvm *kvm, unsigned long *rmapp,
880 kvmhv_update_nest_rmap_rc(kvm, rmap, clr, set, hpa, mask);
883 static void kvmhv_remove_nest_rmap(struct kvm *kvm, u64 n_rmap,
893 gp = kvmhv_find_nested(kvm, lpid);
898 ptep = find_kvm_nested_guest_pte(kvm, lpid, gpa, &shift);
901 kvmppc_unmap_pte(kvm, ptep, gpa, shift, NULL, gp->shadow_lpid);
904 static void kvmhv_remove_nest_rmap_list(struct kvm *kvm, unsigned long *rmapp,
912 kvmhv_remove_nest_rmap(kvm, rmap, hpa, mask);
917 /* called with kvm->mmu_lock held */
918 void kvmhv_remove_nest_rmap_range(struct kvm *kvm,
936 kvmhv_remove_nest_rmap_list(kvm, rmap, hpa, addr_mask);
959 struct kvm *kvm = vcpu->kvm;
964 spin_lock(&kvm->mmu_lock);
965 ptep = find_kvm_nested_guest_pte(kvm, gp->l1_lpid, gpa, &shift);
969 kvmppc_unmap_pte(kvm, ptep, gpa, shift, NULL, gp->shadow_lpid);
972 spin_unlock(&kvm->mmu_lock);
1017 struct kvm *kvm = vcpu->kvm;
1032 gp = kvmhv_get_nested(kvm, lpid, false);
1053 struct kvm *kvm = vcpu->kvm;
1059 spin_lock(&kvm->mmu_lock);
1060 kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable,
1063 spin_unlock(&kvm->mmu_lock);
1083 struct kvm *kvm = vcpu->kvm;
1087 spin_lock(&kvm->mmu_lock);
1088 for (i = 0; i <= kvm->arch.max_nested_lpid; i++) {
1089 gp = kvm->arch.nested_guests[i];
1091 spin_unlock(&kvm->mmu_lock);
1093 spin_lock(&kvm->mmu_lock);
1096 spin_unlock(&kvm->mmu_lock);
1102 struct kvm *kvm = vcpu->kvm;
1139 gp = kvmhv_get_nested(kvm, lpid, false);
1242 struct kvm *kvm = vcpu->kvm;
1254 spin_lock(&kvm->mmu_lock);
1256 ret = kvmppc_hv_handle_set_rc(kvm, false, writing,
1257 gpte.raddr, kvm->arch.lpid);
1264 ret = kvmppc_hv_handle_set_rc(kvm, true, writing,
1272 spin_unlock(&kvm->mmu_lock);
1304 struct kvm *kvm = vcpu->kvm;
1375 memslot = gfn_to_memslot(kvm, gfn);
1399 mmu_seq = kvm->mmu_notifier_seq;
1404 spin_lock(&kvm->mmu_lock);
1405 pte_p = find_kvm_secondary_pte(kvm, gpa, &shift);
1410 spin_unlock(&kvm->mmu_lock);
1457 ret = kvmppc_create_pte(kvm, gp->shadow_pgtable, pte, n_gpa, level,
1481 int kvmhv_nested_next_lpid(struct kvm *kvm, int lpid)
1485 spin_lock(&kvm->mmu_lock);
1486 while (++lpid <= kvm->arch.max_nested_lpid) {
1487 if (kvm->arch.nested_guests[lpid]) {
1492 spin_unlock(&kvm->mmu_lock);