Lines Matching refs:gp
25 static void kvmhv_update_ptbl_cache(struct kvm_nested_guest *gp);
445 static void kvmhv_set_nested_ptbl(struct kvm_nested_guest *gp)
450 __pa(gp->shadow_pgtable) | RADIX_PGD_INDEX_SIZE;
451 kvmhv_set_ptbl_entry(gp->shadow_lpid, dw0, gp->process_table);
496 struct kvm_nested_guest *gp;
517 gp = kvmhv_get_nested(vcpu->kvm, l1_lpid, false);
518 if (!gp) {
523 mutex_lock(&gp->tlb_lock);
527 rc = __kvmhv_copy_tofrom_guest_radix(gp->shadow_lpid, pid,
547 rc = __kvmhv_copy_tofrom_guest_radix(gp->shadow_lpid, pid,
554 mutex_unlock(&gp->tlb_lock);
555 kvmhv_put_nested(gp);
566 * Caller must hold gp->tlb_lock.
568 static void kvmhv_update_ptbl_cache(struct kvm_nested_guest *gp)
573 struct kvm *kvm = gp->l1_host;
576 ptbl_addr = (kvm->arch.l1_ptcr & PRTB_MASK) + (gp->l1_lpid << 4);
577 if (gp->l1_lpid < (1ul << ((kvm->arch.l1_ptcr & PRTS_MASK) + 8))) {
584 gp->l1_gr_to_hr = 0;
585 gp->process_table = 0;
587 gp->l1_gr_to_hr = be64_to_cpu(ptbl_entry.patb0);
588 gp->process_table = be64_to_cpu(ptbl_entry.patb1);
590 kvmhv_set_nested_ptbl(gp);
595 struct kvm_nested_guest *gp;
598 gp = kzalloc(sizeof(*gp), GFP_KERNEL);
599 if (!gp)
601 gp->l1_host = kvm;
602 gp->l1_lpid = lpid;
603 mutex_init(&gp->tlb_lock);
604 gp->shadow_pgtable = pgd_alloc(kvm->mm);
605 if (!gp->shadow_pgtable)
610 gp->shadow_lpid = shadow_lpid;
611 gp->radix = 1;
613 memset(gp->prev_cpu, -1, sizeof(gp->prev_cpu));
615 return gp;
618 pgd_free(kvm->mm, gp->shadow_pgtable);
620 kfree(gp);
627 static void kvmhv_release_nested(struct kvm_nested_guest *gp)
629 struct kvm *kvm = gp->l1_host;
631 if (gp->shadow_pgtable) {
637 kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable,
638 gp->shadow_lpid);
639 pgd_free(kvm->mm, gp->shadow_pgtable);
641 kvmhv_set_ptbl_entry(gp->shadow_lpid, 0, 0);
642 kvmppc_free_lpid(gp->shadow_lpid);
643 kfree(gp);
646 static void kvmhv_remove_nested(struct kvm_nested_guest *gp)
648 struct kvm *kvm = gp->l1_host;
649 int lpid = gp->l1_lpid;
653 if (gp == kvm->arch.nested_guests[lpid]) {
660 --gp->refcnt;
662 ref = gp->refcnt;
665 kvmhv_release_nested(gp);
677 struct kvm_nested_guest *gp;
684 gp = kvm->arch.nested_guests[i];
685 if (!gp)
688 if (--gp->refcnt == 0) {
689 gp->next = freelist;
690 freelist = gp;
695 while ((gp = freelist) != NULL) {
696 freelist = gp->next;
697 kvmhv_release_nested(gp);
706 /* caller must hold gp->tlb_lock */
707 static void kvmhv_flush_nested(struct kvm_nested_guest *gp)
709 struct kvm *kvm = gp->l1_host;
712 kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable, gp->shadow_lpid);
714 kvmhv_flush_lpid(gp->shadow_lpid);
715 kvmhv_update_ptbl_cache(gp);
716 if (gp->l1_gr_to_hr == 0)
717 kvmhv_remove_nested(gp);
723 struct kvm_nested_guest *gp, *newgp;
730 gp = kvm->arch.nested_guests[l1_lpid];
731 if (gp)
732 ++gp->refcnt;
735 if (gp || !create)
736 return gp;
744 gp = kvm->arch.nested_guests[l1_lpid];
748 gp = newgp;
753 ++gp->refcnt;
759 return gp;
762 void kvmhv_put_nested(struct kvm_nested_guest *gp)
764 struct kvm *kvm = gp->l1_host;
768 ref = --gp->refcnt;
771 kvmhv_release_nested(gp);
784 struct kvm_nested_guest *gp;
787 gp = kvmhv_find_nested(kvm, lpid);
788 if (!gp)
793 pte = __find_linux_pte(gp->shadow_pgtable, ea, NULL, hshift);
886 struct kvm_nested_guest *gp;
893 gp = kvmhv_find_nested(kvm, lpid);
894 if (!gp)
901 kvmppc_unmap_pte(kvm, ptep, gpa, shift, NULL, gp->shadow_lpid);
956 struct kvm_nested_guest *gp,
965 ptep = find_kvm_nested_guest_pte(kvm, gp->l1_lpid, gpa, &shift);
969 kvmppc_unmap_pte(kvm, ptep, gpa, shift, NULL, gp->shadow_lpid);
1018 struct kvm_nested_guest *gp;
1032 gp = kvmhv_get_nested(kvm, lpid, false);
1033 if (!gp) /* No such guest -> nothing to do */
1035 mutex_lock(&gp->tlb_lock);
1039 kvmhv_invalidate_shadow_pte(vcpu, gp, addr, &shadow_shift);
1045 mutex_unlock(&gp->tlb_lock);
1046 kvmhv_put_nested(gp);
1051 struct kvm_nested_guest *gp, int ric)
1055 mutex_lock(&gp->tlb_lock);
1060 kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable,
1061 gp->shadow_lpid);
1062 kvmhv_flush_lpid(gp->shadow_lpid);
1073 kvmhv_flush_nested(gp);
1078 mutex_unlock(&gp->tlb_lock);
1084 struct kvm_nested_guest *gp;
1089 gp = kvm->arch.nested_guests[i];
1090 if (gp) {
1092 kvmhv_emulate_tlbie_lpid(vcpu, gp, ric);
1103 struct kvm_nested_guest *gp;
1139 gp = kvmhv_get_nested(kvm, lpid, false);
1140 if (gp) {
1141 kvmhv_emulate_tlbie_lpid(vcpu, gp, ric);
1142 kvmhv_put_nested(gp);
1175 struct kvm_nested_guest *gp,
1182 ret = kvmppc_mmu_walk_radix_tree(vcpu, n_gpa, gpte_p, gp->l1_gr_to_hr,
1237 struct kvm_nested_guest *gp,
1265 n_gpa, gp->l1_lpid);
1300 /* called with gp->tlb_lock held */
1302 struct kvm_nested_guest *gp)
1319 if (!gp->l1_gr_to_hr) {
1320 kvmhv_update_ptbl_cache(gp);
1321 if (!gp->l1_gr_to_hr)
1330 ret = kvmhv_translate_addr_nested(vcpu, gp, n_gpa, dsisr, &gpte);
1346 ret = kvmhv_handle_nested_set_rc(vcpu, gp, n_gpa, gpte, dsisr);
1455 (((unsigned long) gp->l1_lpid) << RMAP_NESTED_LPID_SHIFT);
1457 ret = kvmppc_create_pte(kvm, gp->shadow_pgtable, pte, n_gpa, level,
1458 mmu_seq, gp->shadow_lpid, rmapp, &n_rmap);
1466 kvmhv_invalidate_shadow_pte(vcpu, gp, n_gpa, NULL);
1472 struct kvm_nested_guest *gp = vcpu->arch.nested;
1475 mutex_lock(&gp->tlb_lock);
1476 ret = __kvmhv_nested_page_fault(vcpu, gp);
1477 mutex_unlock(&gp->tlb_lock);