Lines Matching refs:lpid
70 hr->lpid = swab32(hr->lpid);
343 /* translate lpid */
344 l2 = kvmhv_get_nested(vcpu->kvm, l2_hv.lpid, true);
481 static void kvmhv_flush_lpid(unsigned int lpid)
486 radix__flush_all_lpid(lpid);
492 lpid, TLBIEL_INVAL_SET_LPID);
494 rc = pseries_rpt_invalidate(lpid, H_RPTI_TARGET_CMMU,
503 void kvmhv_set_ptbl_entry(unsigned int lpid, u64 dw0, u64 dw1)
506 mmu_partition_table_set_entry(lpid, dw0, dw1, true);
510 pseries_partition_tb[lpid].patb0 = cpu_to_be64(dw0);
511 pseries_partition_tb[lpid].patb1 = cpu_to_be64(dw1);
513 kvmhv_flush_lpid(lpid);
551 * r4 = L1 lpid of nested guest
662 static struct kvm_nested_guest *__find_nested(struct kvm *kvm, int lpid)
664 return idr_find(&kvm->arch.kvm_nested_guest_idr, lpid);
667 static bool __prealloc_nested(struct kvm *kvm, int lpid)
670 NULL, lpid, lpid + 1, GFP_KERNEL) != lpid)
675 static void __add_nested(struct kvm *kvm, int lpid, struct kvm_nested_guest *gp)
677 if (idr_replace(&kvm->arch.kvm_nested_guest_idr, gp, lpid))
681 static void __remove_nested(struct kvm *kvm, int lpid)
683 idr_remove(&kvm->arch.kvm_nested_guest_idr, lpid);
686 static struct kvm_nested_guest *kvmhv_alloc_nested(struct kvm *kvm, unsigned int lpid)
695 gp->l1_lpid = lpid;
742 int lpid = gp->l1_lpid;
746 if (gp == __find_nested(kvm, lpid)) {
747 __remove_nested(kvm, lpid);
764 int lpid;
771 idr_for_each_entry(&kvm->arch.kvm_nested_guest_idr, gp, lpid) {
772 __remove_nested(kvm, lpid);
861 pte_t *find_kvm_nested_guest_pte(struct kvm *kvm, unsigned long lpid,
867 gp = __find_nested(kvm, lpid);
921 unsigned int shift, lpid;
925 lpid = (n_rmap & RMAP_NESTED_LPID_MASK) >> RMAP_NESTED_LPID_SHIFT;
928 ptep = find_kvm_nested_guest_pte(kvm, lpid, gpa, &shift);
937 kvmppc_radix_tlbie_page(kvm, gpa, shift, lpid);
968 unsigned int shift, lpid;
972 lpid = (n_rmap & RMAP_NESTED_LPID_MASK) >> RMAP_NESTED_LPID_SHIFT;
973 gp = __find_nested(kvm, lpid);
978 ptep = find_kvm_nested_guest_pte(kvm, lpid, gpa, &shift);
1094 static int kvmhv_emulate_tlbie_tlb_addr(struct kvm_vcpu *vcpu, int lpid,
1112 gp = kvmhv_get_nested(kvm, lpid, false);
1165 int lpid;
1168 idr_for_each_entry(&kvm->arch.kvm_nested_guest_idr, gp, lpid) {
1182 int lpid;
1189 lpid = get_lpid(rsval);
1212 ret = kvmhv_emulate_tlbie_tlb_addr(vcpu, lpid, ap, epn);
1216 gp = kvmhv_get_nested(kvm, lpid, false);
1251 unsigned long lpid, unsigned long ric)
1256 gp = kvmhv_get_nested(kvm, lpid, false);
1271 unsigned long lpid,
1290 return do_tlb_invalidate_nested_all(vcpu, lpid,
1296 ret = kvmhv_emulate_tlbie_tlb_addr(vcpu, lpid, ap,
1310 long do_h_rpt_invalidate_pat(struct kvm_vcpu *vcpu, unsigned long lpid,
1315 * If L2 lpid isn't valid, we need to return H_PARAMETER.
1317 * However, nested KVM issues a L2 lpid flush call when creating
1319 * corresponding shadow lpid is created in HV which happens in
1323 if (!__find_nested(vcpu->kvm, lpid))
1327 * A flush all request can be handled by a full lpid flush only.
1330 return do_tlb_invalidate_nested_all(vcpu, lpid, RIC_FLUSH_ALL);
1342 * ret = do_tlb_invalidate_nested_all(vcpu, lpid, RIC_FLUSH_PWC);
1349 return do_tlb_invalidate_nested_all(vcpu, lpid, RIC_FLUSH_TLB);
1352 return do_tlb_invalidate_nested_tlb(vcpu, lpid, pg_sizes,
1441 gpte.raddr, kvm->arch.lpid);
1668 int kvmhv_nested_next_lpid(struct kvm *kvm, int lpid)
1670 int ret = lpid + 1;