Lines Matching refs:gp

27 static void kvmhv_update_ptbl_cache(struct kvm_nested_guest *gp);
516 static void kvmhv_set_nested_ptbl(struct kvm_nested_guest *gp)
521 __pa(gp->shadow_pgtable) | RADIX_PGD_INDEX_SIZE;
522 kvmhv_set_ptbl_entry(gp->shadow_lpid, dw0, gp->process_table);
560 struct kvm_nested_guest *gp;
581 gp = kvmhv_get_nested(vcpu->kvm, l1_lpid, false);
582 if (!gp) {
587 mutex_lock(&gp->tlb_lock);
591 rc = __kvmhv_copy_tofrom_guest_radix(gp->shadow_lpid, pid,
611 rc = __kvmhv_copy_tofrom_guest_radix(gp->shadow_lpid, pid,
618 mutex_unlock(&gp->tlb_lock);
619 kvmhv_put_nested(gp);
630 * Caller must hold gp->tlb_lock.
632 static void kvmhv_update_ptbl_cache(struct kvm_nested_guest *gp)
637 struct kvm *kvm = gp->l1_host;
640 ptbl_addr = (kvm->arch.l1_ptcr & PRTB_MASK) + (gp->l1_lpid << 4);
641 if (gp->l1_lpid < (1ul << ((kvm->arch.l1_ptcr & PRTS_MASK) + 12 - 4))) {
648 gp->l1_gr_to_hr = 0;
649 gp->process_table = 0;
651 gp->l1_gr_to_hr = be64_to_cpu(ptbl_entry.patb0);
652 gp->process_table = be64_to_cpu(ptbl_entry.patb1);
654 kvmhv_set_nested_ptbl(gp);
675 static void __add_nested(struct kvm *kvm, int lpid, struct kvm_nested_guest *gp)
677 if (idr_replace(&kvm->arch.kvm_nested_guest_idr, gp, lpid))
688 struct kvm_nested_guest *gp;
691 gp = kzalloc(sizeof(*gp), GFP_KERNEL);
692 if (!gp)
694 gp->l1_host = kvm;
695 gp->l1_lpid = lpid;
696 mutex_init(&gp->tlb_lock);
697 gp->shadow_pgtable = pgd_alloc(kvm->mm);
698 if (!gp->shadow_pgtable)
703 gp->shadow_lpid = shadow_lpid;
704 gp->radix = 1;
706 memset(gp->prev_cpu, -1, sizeof(gp->prev_cpu));
708 return gp;
711 pgd_free(kvm->mm, gp->shadow_pgtable);
713 kfree(gp);
720 static void kvmhv_release_nested(struct kvm_nested_guest *gp)
722 struct kvm *kvm = gp->l1_host;
724 if (gp->shadow_pgtable) {
730 kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable,
731 gp->shadow_lpid);
732 pgd_free(kvm->mm, gp->shadow_pgtable);
734 kvmhv_set_ptbl_entry(gp->shadow_lpid, 0, 0);
735 kvmppc_free_lpid(gp->shadow_lpid);
736 kfree(gp);
739 static void kvmhv_remove_nested(struct kvm_nested_guest *gp)
741 struct kvm *kvm = gp->l1_host;
742 int lpid = gp->l1_lpid;
746 if (gp == __find_nested(kvm, lpid)) {
748 --gp->refcnt;
750 ref = gp->refcnt;
753 kvmhv_release_nested(gp);
765 struct kvm_nested_guest *gp;
771 idr_for_each_entry(&kvm->arch.kvm_nested_guest_idr, gp, lpid) {
773 if (--gp->refcnt == 0) {
774 gp->next = freelist;
775 freelist = gp;
781 while ((gp = freelist) != NULL) {
782 freelist = gp->next;
783 kvmhv_release_nested(gp);
792 /* caller must hold gp->tlb_lock */
793 static void kvmhv_flush_nested(struct kvm_nested_guest *gp)
795 struct kvm *kvm = gp->l1_host;
798 kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable, gp->shadow_lpid);
800 kvmhv_flush_lpid(gp->shadow_lpid);
801 kvmhv_update_ptbl_cache(gp);
802 if (gp->l1_gr_to_hr == 0)
803 kvmhv_remove_nested(gp);
809 struct kvm_nested_guest *gp, *newgp;
815 gp = __find_nested(kvm, l1_lpid);
816 if (gp)
817 ++gp->refcnt;
820 if (gp || !create)
821 return gp;
833 gp = __find_nested(kvm, l1_lpid);
834 if (!gp) {
837 gp = newgp;
840 ++gp->refcnt;
846 return gp;
849 void kvmhv_put_nested(struct kvm_nested_guest *gp)
851 struct kvm *kvm = gp->l1_host;
855 ref = --gp->refcnt;
858 kvmhv_release_nested(gp);
864 struct kvm_nested_guest *gp;
867 gp = __find_nested(kvm, lpid);
868 if (!gp)
873 pte = __find_linux_pte(gp->shadow_pgtable, ea, NULL, hshift);
966 struct kvm_nested_guest *gp;
973 gp = __find_nested(kvm, lpid);
974 if (!gp)
981 kvmppc_unmap_pte(kvm, ptep, gpa, shift, NULL, gp->shadow_lpid);
1036 struct kvm_nested_guest *gp,
1045 ptep = find_kvm_nested_guest_pte(kvm, gp->l1_lpid, gpa, &shift);
1049 kvmppc_unmap_pte(kvm, ptep, gpa, shift, NULL, gp->shadow_lpid);
1098 struct kvm_nested_guest *gp;
1112 gp = kvmhv_get_nested(kvm, lpid, false);
1113 if (!gp) /* No such guest -> nothing to do */
1115 mutex_lock(&gp->tlb_lock);
1119 kvmhv_invalidate_shadow_pte(vcpu, gp, addr, &shadow_shift);
1125 mutex_unlock(&gp->tlb_lock);
1126 kvmhv_put_nested(gp);
1131 struct kvm_nested_guest *gp, int ric)
1135 mutex_lock(&gp->tlb_lock);
1140 kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable,
1141 gp->shadow_lpid);
1142 kvmhv_flush_lpid(gp->shadow_lpid);
1153 kvmhv_flush_nested(gp);
1158 mutex_unlock(&gp->tlb_lock);
1164 struct kvm_nested_guest *gp;
1168 idr_for_each_entry(&kvm->arch.kvm_nested_guest_idr, gp, lpid) {
1170 kvmhv_emulate_tlbie_lpid(vcpu, gp, ric);
1180 struct kvm_nested_guest *gp;
1216 gp = kvmhv_get_nested(kvm, lpid, false);
1217 if (gp) {
1218 kvmhv_emulate_tlbie_lpid(vcpu, gp, ric);
1219 kvmhv_put_nested(gp);
1254 struct kvm_nested_guest *gp;
1256 gp = kvmhv_get_nested(kvm, lpid, false);
1257 if (gp) {
1258 kvmhv_emulate_tlbie_lpid(vcpu, gp, ric);
1259 kvmhv_put_nested(gp);
1359 struct kvm_nested_guest *gp,
1366 ret = kvmppc_mmu_walk_radix_tree(vcpu, n_gpa, gpte_p, gp->l1_gr_to_hr,
1421 struct kvm_nested_guest *gp,
1449 n_gpa, gp->l1_lpid);
1484 /* called with gp->tlb_lock held */
1486 struct kvm_nested_guest *gp)
1503 if (!gp->l1_gr_to_hr) {
1504 kvmhv_update_ptbl_cache(gp);
1505 if (!gp->l1_gr_to_hr)
1514 ret = kvmhv_translate_addr_nested(vcpu, gp, n_gpa, dsisr, &gpte);
1530 ret = kvmhv_handle_nested_set_rc(vcpu, gp, n_gpa, gpte, dsisr);
1642 (((unsigned long) gp->l1_lpid) << RMAP_NESTED_LPID_SHIFT);
1644 ret = kvmppc_create_pte(kvm, gp->shadow_pgtable, pte, n_gpa, level,
1645 mmu_seq, gp->shadow_lpid, rmapp, &n_rmap);
1653 kvmhv_invalidate_shadow_pte(vcpu, gp, n_gpa, NULL);
1659 struct kvm_nested_guest *gp = vcpu->arch.nested;
1662 mutex_lock(&gp->tlb_lock);
1663 ret = __kvmhv_nested_page_fault(vcpu, gp);
1664 mutex_unlock(&gp->tlb_lock);