1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright IBM Corporation, 2018
4 * Authors Suraj Jitindar Singh <sjitindarsingh@gmail.com>
5 *	   Paul Mackerras <paulus@ozlabs.org>
6 *
7 * Description: KVM functions specific to running nested KVM-HV guests
8 * on Book3S processors (specifically POWER9 and later).
9 */
10
11#include <linux/kernel.h>
12#include <linux/kvm_host.h>
13#include <linux/llist.h>
14#include <linux/pgtable.h>
15
16#include <asm/kvm_ppc.h>
17#include <asm/kvm_book3s.h>
18#include <asm/mmu.h>
19#include <asm/pgalloc.h>
20#include <asm/pte-walk.h>
21#include <asm/reg.h>
22#include <asm/plpar_wrappers.h>
23#include <asm/firmware.h>
24
25static struct patb_entry *pseries_partition_tb;
26
27static void kvmhv_update_ptbl_cache(struct kvm_nested_guest *gp);
28static void kvmhv_free_memslot_nest_rmap(struct kvm_memory_slot *free);
29
30void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr)
31{
32	struct kvmppc_vcore *vc = vcpu->arch.vcore;
33
34	hr->pcr = vc->pcr | PCR_MASK;
35	hr->dpdes = vc->dpdes;
36	hr->hfscr = vcpu->arch.hfscr;
37	hr->tb_offset = vc->tb_offset;
38	hr->dawr0 = vcpu->arch.dawr0;
39	hr->dawrx0 = vcpu->arch.dawrx0;
40	hr->ciabr = vcpu->arch.ciabr;
41	hr->purr = vcpu->arch.purr;
42	hr->spurr = vcpu->arch.spurr;
43	hr->ic = vcpu->arch.ic;
44	hr->vtb = vc->vtb;
45	hr->srr0 = vcpu->arch.shregs.srr0;
46	hr->srr1 = vcpu->arch.shregs.srr1;
47	hr->sprg[0] = vcpu->arch.shregs.sprg0;
48	hr->sprg[1] = vcpu->arch.shregs.sprg1;
49	hr->sprg[2] = vcpu->arch.shregs.sprg2;
50	hr->sprg[3] = vcpu->arch.shregs.sprg3;
51	hr->pidr = vcpu->arch.pid;
52	hr->cfar = vcpu->arch.cfar;
53	hr->ppr = vcpu->arch.ppr;
54	hr->dawr1 = vcpu->arch.dawr1;
55	hr->dawrx1 = vcpu->arch.dawrx1;
56}
57
58/* Use noinline_for_stack due to https://bugs.llvm.org/show_bug.cgi?id=49610 */
59static noinline_for_stack void byteswap_pt_regs(struct pt_regs *regs)
60{
61	unsigned long *addr = (unsigned long *) regs;
62
63	for (; addr < ((unsigned long *) (regs + 1)); addr++)
64		*addr = swab64(*addr);
65}
66
67static void byteswap_hv_regs(struct hv_guest_state *hr)
68{
69	hr->version = swab64(hr->version);
70	hr->lpid = swab32(hr->lpid);
71	hr->vcpu_token = swab32(hr->vcpu_token);
72	hr->lpcr = swab64(hr->lpcr);
73	hr->pcr = swab64(hr->pcr) | PCR_MASK;
74	hr->amor = swab64(hr->amor);
75	hr->dpdes = swab64(hr->dpdes);
76	hr->hfscr = swab64(hr->hfscr);
77	hr->tb_offset = swab64(hr->tb_offset);
78	hr->dawr0 = swab64(hr->dawr0);
79	hr->dawrx0 = swab64(hr->dawrx0);
80	hr->ciabr = swab64(hr->ciabr);
81	hr->hdec_expiry = swab64(hr->hdec_expiry);
82	hr->purr = swab64(hr->purr);
83	hr->spurr = swab64(hr->spurr);
84	hr->ic = swab64(hr->ic);
85	hr->vtb = swab64(hr->vtb);
86	hr->hdar = swab64(hr->hdar);
87	hr->hdsisr = swab64(hr->hdsisr);
88	hr->heir = swab64(hr->heir);
89	hr->asdr = swab64(hr->asdr);
90	hr->srr0 = swab64(hr->srr0);
91	hr->srr1 = swab64(hr->srr1);
92	hr->sprg[0] = swab64(hr->sprg[0]);
93	hr->sprg[1] = swab64(hr->sprg[1]);
94	hr->sprg[2] = swab64(hr->sprg[2]);
95	hr->sprg[3] = swab64(hr->sprg[3]);
96	hr->pidr = swab64(hr->pidr);
97	hr->cfar = swab64(hr->cfar);
98	hr->ppr = swab64(hr->ppr);
99	hr->dawr1 = swab64(hr->dawr1);
100	hr->dawrx1 = swab64(hr->dawrx1);
101}
102
103static void save_hv_return_state(struct kvm_vcpu *vcpu,
104				 struct hv_guest_state *hr)
105{
106	struct kvmppc_vcore *vc = vcpu->arch.vcore;
107
108	hr->dpdes = vc->dpdes;
109	hr->purr = vcpu->arch.purr;
110	hr->spurr = vcpu->arch.spurr;
111	hr->ic = vcpu->arch.ic;
112	hr->vtb = vc->vtb;
113	hr->srr0 = vcpu->arch.shregs.srr0;
114	hr->srr1 = vcpu->arch.shregs.srr1;
115	hr->sprg[0] = vcpu->arch.shregs.sprg0;
116	hr->sprg[1] = vcpu->arch.shregs.sprg1;
117	hr->sprg[2] = vcpu->arch.shregs.sprg2;
118	hr->sprg[3] = vcpu->arch.shregs.sprg3;
119	hr->pidr = vcpu->arch.pid;
120	hr->cfar = vcpu->arch.cfar;
121	hr->ppr = vcpu->arch.ppr;
122	switch (vcpu->arch.trap) {
123	case BOOK3S_INTERRUPT_H_DATA_STORAGE:
124		hr->hdar = vcpu->arch.fault_dar;
125		hr->hdsisr = vcpu->arch.fault_dsisr;
126		hr->asdr = vcpu->arch.fault_gpa;
127		break;
128	case BOOK3S_INTERRUPT_H_INST_STORAGE:
129		hr->asdr = vcpu->arch.fault_gpa;
130		break;
131	case BOOK3S_INTERRUPT_H_FAC_UNAVAIL:
132		hr->hfscr = ((~HFSCR_INTR_CAUSE & hr->hfscr) |
133			     (HFSCR_INTR_CAUSE & vcpu->arch.hfscr));
134		break;
135	case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
136		hr->heir = vcpu->arch.emul_inst;
137		break;
138	}
139}
140
141static void restore_hv_regs(struct kvm_vcpu *vcpu, const struct hv_guest_state *hr)
142{
143	struct kvmppc_vcore *vc = vcpu->arch.vcore;
144
145	vc->pcr = hr->pcr | PCR_MASK;
146	vc->dpdes = hr->dpdes;
147	vcpu->arch.hfscr = hr->hfscr;
148	vcpu->arch.dawr0 = hr->dawr0;
149	vcpu->arch.dawrx0 = hr->dawrx0;
150	vcpu->arch.ciabr = hr->ciabr;
151	vcpu->arch.purr = hr->purr;
152	vcpu->arch.spurr = hr->spurr;
153	vcpu->arch.ic = hr->ic;
154	vc->vtb = hr->vtb;
155	vcpu->arch.shregs.srr0 = hr->srr0;
156	vcpu->arch.shregs.srr1 = hr->srr1;
157	vcpu->arch.shregs.sprg0 = hr->sprg[0];
158	vcpu->arch.shregs.sprg1 = hr->sprg[1];
159	vcpu->arch.shregs.sprg2 = hr->sprg[2];
160	vcpu->arch.shregs.sprg3 = hr->sprg[3];
161	vcpu->arch.pid = hr->pidr;
162	vcpu->arch.cfar = hr->cfar;
163	vcpu->arch.ppr = hr->ppr;
164	vcpu->arch.dawr1 = hr->dawr1;
165	vcpu->arch.dawrx1 = hr->dawrx1;
166}
167
168void kvmhv_restore_hv_return_state(struct kvm_vcpu *vcpu,
169				   struct hv_guest_state *hr)
170{
171	struct kvmppc_vcore *vc = vcpu->arch.vcore;
172
173	vc->dpdes = hr->dpdes;
174	vcpu->arch.hfscr = hr->hfscr;
175	vcpu->arch.purr = hr->purr;
176	vcpu->arch.spurr = hr->spurr;
177	vcpu->arch.ic = hr->ic;
178	vc->vtb = hr->vtb;
179	vcpu->arch.fault_dar = hr->hdar;
180	vcpu->arch.fault_dsisr = hr->hdsisr;
181	vcpu->arch.fault_gpa = hr->asdr;
182	vcpu->arch.emul_inst = hr->heir;
183	vcpu->arch.shregs.srr0 = hr->srr0;
184	vcpu->arch.shregs.srr1 = hr->srr1;
185	vcpu->arch.shregs.sprg0 = hr->sprg[0];
186	vcpu->arch.shregs.sprg1 = hr->sprg[1];
187	vcpu->arch.shregs.sprg2 = hr->sprg[2];
188	vcpu->arch.shregs.sprg3 = hr->sprg[3];
189	vcpu->arch.pid = hr->pidr;
190	vcpu->arch.cfar = hr->cfar;
191	vcpu->arch.ppr = hr->ppr;
192}
193
194static void kvmhv_nested_mmio_needed(struct kvm_vcpu *vcpu, u64 regs_ptr)
195{
196	/* No need to reflect the page fault to L1, we've handled it */
197	vcpu->arch.trap = 0;
198
199	/*
200	 * Since the L2 gprs have already been written back into L1 memory when
201	 * we complete the mmio, store the L1 memory location of the L2 gpr
202	 * being loaded into by the mmio so that the loaded value can be
203	 * written there in kvmppc_complete_mmio_load()
204	 */
205	if (((vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) == KVM_MMIO_REG_GPR)
206	    && (vcpu->mmio_is_write == 0)) {
207		vcpu->arch.nested_io_gpr = (gpa_t) regs_ptr +
208					   offsetof(struct pt_regs,
209						    gpr[vcpu->arch.io_gpr]);
210		vcpu->arch.io_gpr = KVM_MMIO_REG_NESTED_GPR;
211	}
212}
213
214static int kvmhv_read_guest_state_and_regs(struct kvm_vcpu *vcpu,
215					   struct hv_guest_state *l2_hv,
216					   struct pt_regs *l2_regs,
217					   u64 hv_ptr, u64 regs_ptr)
218{
219	int size;
220
221	if (kvm_vcpu_read_guest(vcpu, hv_ptr, &l2_hv->version,
222				sizeof(l2_hv->version)))
223		return -1;
224
225	if (kvmppc_need_byteswap(vcpu))
226		l2_hv->version = swab64(l2_hv->version);
227
228	size = hv_guest_state_size(l2_hv->version);
229	if (size < 0)
230		return -1;
231
232	return kvm_vcpu_read_guest(vcpu, hv_ptr, l2_hv, size) ||
233		kvm_vcpu_read_guest(vcpu, regs_ptr, l2_regs,
234				    sizeof(struct pt_regs));
235}
236
237static int kvmhv_write_guest_state_and_regs(struct kvm_vcpu *vcpu,
238					    struct hv_guest_state *l2_hv,
239					    struct pt_regs *l2_regs,
240					    u64 hv_ptr, u64 regs_ptr)
241{
242	int size;
243
244	size = hv_guest_state_size(l2_hv->version);
245	if (size < 0)
246		return -1;
247
248	return kvm_vcpu_write_guest(vcpu, hv_ptr, l2_hv, size) ||
249		kvm_vcpu_write_guest(vcpu, regs_ptr, l2_regs,
250				     sizeof(struct pt_regs));
251}
252
253static void load_l2_hv_regs(struct kvm_vcpu *vcpu,
254			    const struct hv_guest_state *l2_hv,
255			    const struct hv_guest_state *l1_hv, u64 *lpcr)
256{
257	struct kvmppc_vcore *vc = vcpu->arch.vcore;
258	u64 mask;
259
260	restore_hv_regs(vcpu, l2_hv);
261
262	/*
263	 * Don't let L1 change LPCR bits for the L2 except these:
264	 */
265	mask = LPCR_DPFD | LPCR_ILE | LPCR_TC | LPCR_AIL | LPCR_LD | LPCR_MER;
266
267	/*
268	 * Additional filtering is required depending on hardware
269	 * and configuration.
270	 */
271	*lpcr = kvmppc_filter_lpcr_hv(vcpu->kvm,
272				      (vc->lpcr & ~mask) | (*lpcr & mask));
273
274	/*
275	 * Don't let L1 enable features for L2 which we don't allow for L1,
276	 * but preserve the interrupt cause field.
277	 */
278	vcpu->arch.hfscr = l2_hv->hfscr & (HFSCR_INTR_CAUSE | vcpu->arch.hfscr_permitted);
279
280	/* Don't let data address watchpoint match in hypervisor state */
281	vcpu->arch.dawrx0 = l2_hv->dawrx0 & ~DAWRX_HYP;
282	vcpu->arch.dawrx1 = l2_hv->dawrx1 & ~DAWRX_HYP;
283
284	/* Don't let completed instruction address breakpt match in HV state */
285	if ((l2_hv->ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER)
286		vcpu->arch.ciabr = l2_hv->ciabr & ~CIABR_PRIV;
287}
288
289long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
290{
291	long int err, r;
292	struct kvm_nested_guest *l2;
293	struct pt_regs l2_regs, saved_l1_regs;
294	struct hv_guest_state l2_hv = {0}, saved_l1_hv;
295	struct kvmppc_vcore *vc = vcpu->arch.vcore;
296	u64 hv_ptr, regs_ptr;
297	u64 hdec_exp, lpcr;
298	s64 delta_purr, delta_spurr, delta_ic, delta_vtb;
299
300	if (vcpu->kvm->arch.l1_ptcr == 0)
301		return H_NOT_AVAILABLE;
302
303	if (MSR_TM_TRANSACTIONAL(vcpu->arch.shregs.msr))
304		return H_BAD_MODE;
305
306	/* copy parameters in */
307	hv_ptr = kvmppc_get_gpr(vcpu, 4);
308	regs_ptr = kvmppc_get_gpr(vcpu, 5);
309	kvm_vcpu_srcu_read_lock(vcpu);
310	err = kvmhv_read_guest_state_and_regs(vcpu, &l2_hv, &l2_regs,
311					      hv_ptr, regs_ptr);
312	kvm_vcpu_srcu_read_unlock(vcpu);
313	if (err)
314		return H_PARAMETER;
315
316	if (kvmppc_need_byteswap(vcpu))
317		byteswap_hv_regs(&l2_hv);
318	if (l2_hv.version > HV_GUEST_STATE_VERSION)
319		return H_P2;
320
321	if (kvmppc_need_byteswap(vcpu))
322		byteswap_pt_regs(&l2_regs);
323	if (l2_hv.vcpu_token >= NR_CPUS)
324		return H_PARAMETER;
325
326	/*
327	 * L1 must have set up a suspended state to enter the L2 in a
328	 * transactional state, and only in that case. These have to be
329	 * filtered out here to prevent causing a TM Bad Thing in the
330	 * host HRFID. We could synthesize a TM Bad Thing back to the L1
331	 * here but there doesn't seem like much point.
332	 */
333	if (MSR_TM_SUSPENDED(vcpu->arch.shregs.msr)) {
334		if (!MSR_TM_ACTIVE(l2_regs.msr))
335			return H_BAD_MODE;
336	} else {
337		if (l2_regs.msr & MSR_TS_MASK)
338			return H_BAD_MODE;
339		if (WARN_ON_ONCE(vcpu->arch.shregs.msr & MSR_TS_MASK))
340			return H_BAD_MODE;
341	}
342
343	/* translate lpid */
344	l2 = kvmhv_get_nested(vcpu->kvm, l2_hv.lpid, true);
345	if (!l2)
346		return H_PARAMETER;
347	if (!l2->l1_gr_to_hr) {
348		mutex_lock(&l2->tlb_lock);
349		kvmhv_update_ptbl_cache(l2);
350		mutex_unlock(&l2->tlb_lock);
351	}
352
353	/* save l1 values of things */
354	vcpu->arch.regs.msr = vcpu->arch.shregs.msr;
355	saved_l1_regs = vcpu->arch.regs;
356	kvmhv_save_hv_regs(vcpu, &saved_l1_hv);
357
358	/* convert TB values/offsets to host (L0) values */
359	hdec_exp = l2_hv.hdec_expiry - vc->tb_offset;
360	vc->tb_offset += l2_hv.tb_offset;
361	vcpu->arch.dec_expires += l2_hv.tb_offset;
362
363	/* set L1 state to L2 state */
364	vcpu->arch.nested = l2;
365	vcpu->arch.nested_vcpu_id = l2_hv.vcpu_token;
366	vcpu->arch.nested_hfscr = l2_hv.hfscr;
367	vcpu->arch.regs = l2_regs;
368
369	/* Guest must always run with ME enabled, HV disabled. */
370	vcpu->arch.shregs.msr = (vcpu->arch.regs.msr | MSR_ME) & ~MSR_HV;
371
372	lpcr = l2_hv.lpcr;
373	load_l2_hv_regs(vcpu, &l2_hv, &saved_l1_hv, &lpcr);
374
375	vcpu->arch.ret = RESUME_GUEST;
376	vcpu->arch.trap = 0;
377	do {
378		r = kvmhv_run_single_vcpu(vcpu, hdec_exp, lpcr);
379	} while (is_kvmppc_resume_guest(r));
380
381	/* save L2 state for return */
382	l2_regs = vcpu->arch.regs;
383	l2_regs.msr = vcpu->arch.shregs.msr;
384	delta_purr = vcpu->arch.purr - l2_hv.purr;
385	delta_spurr = vcpu->arch.spurr - l2_hv.spurr;
386	delta_ic = vcpu->arch.ic - l2_hv.ic;
387	delta_vtb = vc->vtb - l2_hv.vtb;
388	save_hv_return_state(vcpu, &l2_hv);
389
390	/* restore L1 state */
391	vcpu->arch.nested = NULL;
392	vcpu->arch.regs = saved_l1_regs;
393	vcpu->arch.shregs.msr = saved_l1_regs.msr & ~MSR_TS_MASK;
394	/* set L1 MSR TS field according to L2 transaction state */
395	if (l2_regs.msr & MSR_TS_MASK)
396		vcpu->arch.shregs.msr |= MSR_TS_S;
397	vc->tb_offset = saved_l1_hv.tb_offset;
398	/* XXX: is this always the same delta as saved_l1_hv.tb_offset? */
399	vcpu->arch.dec_expires -= l2_hv.tb_offset;
400	restore_hv_regs(vcpu, &saved_l1_hv);
401	vcpu->arch.purr += delta_purr;
402	vcpu->arch.spurr += delta_spurr;
403	vcpu->arch.ic += delta_ic;
404	vc->vtb += delta_vtb;
405
406	kvmhv_put_nested(l2);
407
408	/* copy l2_hv_state and regs back to guest */
409	if (kvmppc_need_byteswap(vcpu)) {
410		byteswap_hv_regs(&l2_hv);
411		byteswap_pt_regs(&l2_regs);
412	}
413	kvm_vcpu_srcu_read_lock(vcpu);
414	err = kvmhv_write_guest_state_and_regs(vcpu, &l2_hv, &l2_regs,
415					       hv_ptr, regs_ptr);
416	kvm_vcpu_srcu_read_unlock(vcpu);
417	if (err)
418		return H_AUTHORITY;
419
420	if (r == -EINTR)
421		return H_INTERRUPT;
422
423	if (vcpu->mmio_needed) {
424		kvmhv_nested_mmio_needed(vcpu, regs_ptr);
425		return H_TOO_HARD;
426	}
427
428	return vcpu->arch.trap;
429}
430
431long kvmhv_nested_init(void)
432{
433	long int ptb_order;
434	unsigned long ptcr;
435	long rc;
436
437	if (!kvmhv_on_pseries())
438		return 0;
439	if (!radix_enabled())
440		return -ENODEV;
441
442	/* Partition table entry is 1<<4 bytes in size, hence the 4. */
443	ptb_order = KVM_MAX_NESTED_GUESTS_SHIFT + 4;
444	/* Minimum partition table size is 1<<12 bytes */
445	if (ptb_order < 12)
446		ptb_order = 12;
447	pseries_partition_tb = kmalloc(sizeof(struct patb_entry) << ptb_order,
448				       GFP_KERNEL);
449	if (!pseries_partition_tb) {
450		pr_err("kvm-hv: failed to allocated nested partition table\n");
451		return -ENOMEM;
452	}
453
454	ptcr = __pa(pseries_partition_tb) | (ptb_order - 12);
455	rc = plpar_hcall_norets(H_SET_PARTITION_TABLE, ptcr);
456	if (rc != H_SUCCESS) {
457		pr_err("kvm-hv: Parent hypervisor does not support nesting (rc=%ld)\n",
458		       rc);
459		kfree(pseries_partition_tb);
460		pseries_partition_tb = NULL;
461		return -ENODEV;
462	}
463
464	return 0;
465}
466
467void kvmhv_nested_exit(void)
468{
469	/*
470	 * N.B. the kvmhv_on_pseries() test is there because it enables
471	 * the compiler to remove the call to plpar_hcall_norets()
472	 * when CONFIG_PPC_PSERIES=n.
473	 */
474	if (kvmhv_on_pseries() && pseries_partition_tb) {
475		plpar_hcall_norets(H_SET_PARTITION_TABLE, 0);
476		kfree(pseries_partition_tb);
477		pseries_partition_tb = NULL;
478	}
479}
480
481static void kvmhv_flush_lpid(unsigned int lpid)
482{
483	long rc;
484
485	if (!kvmhv_on_pseries()) {
486		radix__flush_all_lpid(lpid);
487		return;
488	}
489
490	if (!firmware_has_feature(FW_FEATURE_RPT_INVALIDATE))
491		rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(2, 0, 1),
492					lpid, TLBIEL_INVAL_SET_LPID);
493	else
494		rc = pseries_rpt_invalidate(lpid, H_RPTI_TARGET_CMMU,
495					    H_RPTI_TYPE_NESTED |
496					    H_RPTI_TYPE_TLB | H_RPTI_TYPE_PWC |
497					    H_RPTI_TYPE_PAT,
498					    H_RPTI_PAGE_ALL, 0, -1UL);
499	if (rc)
500		pr_err("KVM: TLB LPID invalidation hcall failed, rc=%ld\n", rc);
501}
502
503void kvmhv_set_ptbl_entry(unsigned int lpid, u64 dw0, u64 dw1)
504{
505	if (!kvmhv_on_pseries()) {
506		mmu_partition_table_set_entry(lpid, dw0, dw1, true);
507		return;
508	}
509
510	pseries_partition_tb[lpid].patb0 = cpu_to_be64(dw0);
511	pseries_partition_tb[lpid].patb1 = cpu_to_be64(dw1);
512	/* L0 will do the necessary barriers */
513	kvmhv_flush_lpid(lpid);
514}
515
516static void kvmhv_set_nested_ptbl(struct kvm_nested_guest *gp)
517{
518	unsigned long dw0;
519
520	dw0 = PATB_HR | radix__get_tree_size() |
521		__pa(gp->shadow_pgtable) | RADIX_PGD_INDEX_SIZE;
522	kvmhv_set_ptbl_entry(gp->shadow_lpid, dw0, gp->process_table);
523}
524
525/*
526 * Handle the H_SET_PARTITION_TABLE hcall.
527 * r4 = guest real address of partition table + log_2(size) - 12
528 * (formatted as for the PTCR).
529 */
530long kvmhv_set_partition_table(struct kvm_vcpu *vcpu)
531{
532	struct kvm *kvm = vcpu->kvm;
533	unsigned long ptcr = kvmppc_get_gpr(vcpu, 4);
534	int srcu_idx;
535	long ret = H_SUCCESS;
536
537	srcu_idx = srcu_read_lock(&kvm->srcu);
538	/* Check partition size and base address. */
539	if ((ptcr & PRTS_MASK) + 12 - 4 > KVM_MAX_NESTED_GUESTS_SHIFT ||
540	    !kvm_is_visible_gfn(vcpu->kvm, (ptcr & PRTB_MASK) >> PAGE_SHIFT))
541		ret = H_PARAMETER;
542	srcu_read_unlock(&kvm->srcu, srcu_idx);
543	if (ret == H_SUCCESS)
544		kvm->arch.l1_ptcr = ptcr;
545
546	return ret;
547}
548
549/*
550 * Handle the H_COPY_TOFROM_GUEST hcall.
551 * r4 = L1 lpid of nested guest
552 * r5 = pid
553 * r6 = eaddr to access
554 * r7 = to buffer (L1 gpa)
555 * r8 = from buffer (L1 gpa)
556 * r9 = n bytes to copy
557 */
558long kvmhv_copy_tofrom_guest_nested(struct kvm_vcpu *vcpu)
559{
560	struct kvm_nested_guest *gp;
561	int l1_lpid = kvmppc_get_gpr(vcpu, 4);
562	int pid = kvmppc_get_gpr(vcpu, 5);
563	gva_t eaddr = kvmppc_get_gpr(vcpu, 6);
564	gpa_t gp_to = (gpa_t) kvmppc_get_gpr(vcpu, 7);
565	gpa_t gp_from = (gpa_t) kvmppc_get_gpr(vcpu, 8);
566	void *buf;
567	unsigned long n = kvmppc_get_gpr(vcpu, 9);
568	bool is_load = !!gp_to;
569	long rc;
570
571	if (gp_to && gp_from) /* One must be NULL to determine the direction */
572		return H_PARAMETER;
573
574	if (eaddr & (0xFFFUL << 52))
575		return H_PARAMETER;
576
577	buf = kzalloc(n, GFP_KERNEL | __GFP_NOWARN);
578	if (!buf)
579		return H_NO_MEM;
580
581	gp = kvmhv_get_nested(vcpu->kvm, l1_lpid, false);
582	if (!gp) {
583		rc = H_PARAMETER;
584		goto out_free;
585	}
586
587	mutex_lock(&gp->tlb_lock);
588
589	if (is_load) {
590		/* Load from the nested guest into our buffer */
591		rc = __kvmhv_copy_tofrom_guest_radix(gp->shadow_lpid, pid,
592						     eaddr, buf, NULL, n);
593		if (rc)
594			goto not_found;
595
596		/* Write what was loaded into our buffer back to the L1 guest */
597		kvm_vcpu_srcu_read_lock(vcpu);
598		rc = kvm_vcpu_write_guest(vcpu, gp_to, buf, n);
599		kvm_vcpu_srcu_read_unlock(vcpu);
600		if (rc)
601			goto not_found;
602	} else {
603		/* Load the data to be stored from the L1 guest into our buf */
604		kvm_vcpu_srcu_read_lock(vcpu);
605		rc = kvm_vcpu_read_guest(vcpu, gp_from, buf, n);
606		kvm_vcpu_srcu_read_unlock(vcpu);
607		if (rc)
608			goto not_found;
609
610		/* Store from our buffer into the nested guest */
611		rc = __kvmhv_copy_tofrom_guest_radix(gp->shadow_lpid, pid,
612						     eaddr, NULL, buf, n);
613		if (rc)
614			goto not_found;
615	}
616
617out_unlock:
618	mutex_unlock(&gp->tlb_lock);
619	kvmhv_put_nested(gp);
620out_free:
621	kfree(buf);
622	return rc;
623not_found:
624	rc = H_NOT_FOUND;
625	goto out_unlock;
626}
627
628/*
629 * Reload the partition table entry for a guest.
630 * Caller must hold gp->tlb_lock.
631 */
632static void kvmhv_update_ptbl_cache(struct kvm_nested_guest *gp)
633{
634	int ret;
635	struct patb_entry ptbl_entry;
636	unsigned long ptbl_addr;
637	struct kvm *kvm = gp->l1_host;
638
639	ret = -EFAULT;
640	ptbl_addr = (kvm->arch.l1_ptcr & PRTB_MASK) + (gp->l1_lpid << 4);
641	if (gp->l1_lpid < (1ul << ((kvm->arch.l1_ptcr & PRTS_MASK) + 12 - 4))) {
642		int srcu_idx = srcu_read_lock(&kvm->srcu);
643		ret = kvm_read_guest(kvm, ptbl_addr,
644				     &ptbl_entry, sizeof(ptbl_entry));
645		srcu_read_unlock(&kvm->srcu, srcu_idx);
646	}
647	if (ret) {
648		gp->l1_gr_to_hr = 0;
649		gp->process_table = 0;
650	} else {
651		gp->l1_gr_to_hr = be64_to_cpu(ptbl_entry.patb0);
652		gp->process_table = be64_to_cpu(ptbl_entry.patb1);
653	}
654	kvmhv_set_nested_ptbl(gp);
655}
656
657void kvmhv_vm_nested_init(struct kvm *kvm)
658{
659	idr_init(&kvm->arch.kvm_nested_guest_idr);
660}
661
662static struct kvm_nested_guest *__find_nested(struct kvm *kvm, int lpid)
663{
664	return idr_find(&kvm->arch.kvm_nested_guest_idr, lpid);
665}
666
667static bool __prealloc_nested(struct kvm *kvm, int lpid)
668{
669	if (idr_alloc(&kvm->arch.kvm_nested_guest_idr,
670				NULL, lpid, lpid + 1, GFP_KERNEL) != lpid)
671		return false;
672	return true;
673}
674
675static void __add_nested(struct kvm *kvm, int lpid, struct kvm_nested_guest *gp)
676{
677	if (idr_replace(&kvm->arch.kvm_nested_guest_idr, gp, lpid))
678		WARN_ON(1);
679}
680
681static void __remove_nested(struct kvm *kvm, int lpid)
682{
683	idr_remove(&kvm->arch.kvm_nested_guest_idr, lpid);
684}
685
686static struct kvm_nested_guest *kvmhv_alloc_nested(struct kvm *kvm, unsigned int lpid)
687{
688	struct kvm_nested_guest *gp;
689	long shadow_lpid;
690
691	gp = kzalloc(sizeof(*gp), GFP_KERNEL);
692	if (!gp)
693		return NULL;
694	gp->l1_host = kvm;
695	gp->l1_lpid = lpid;
696	mutex_init(&gp->tlb_lock);
697	gp->shadow_pgtable = pgd_alloc(kvm->mm);
698	if (!gp->shadow_pgtable)
699		goto out_free;
700	shadow_lpid = kvmppc_alloc_lpid();
701	if (shadow_lpid < 0)
702		goto out_free2;
703	gp->shadow_lpid = shadow_lpid;
704	gp->radix = 1;
705
706	memset(gp->prev_cpu, -1, sizeof(gp->prev_cpu));
707
708	return gp;
709
710 out_free2:
711	pgd_free(kvm->mm, gp->shadow_pgtable);
712 out_free:
713	kfree(gp);
714	return NULL;
715}
716
717/*
718 * Free up any resources allocated for a nested guest.
719 */
720static void kvmhv_release_nested(struct kvm_nested_guest *gp)
721{
722	struct kvm *kvm = gp->l1_host;
723
724	if (gp->shadow_pgtable) {
725		/*
726		 * No vcpu is using this struct and no call to
727		 * kvmhv_get_nested can find this struct,
728		 * so we don't need to hold kvm->mmu_lock.
729		 */
730		kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable,
731					  gp->shadow_lpid);
732		pgd_free(kvm->mm, gp->shadow_pgtable);
733	}
734	kvmhv_set_ptbl_entry(gp->shadow_lpid, 0, 0);
735	kvmppc_free_lpid(gp->shadow_lpid);
736	kfree(gp);
737}
738
739static void kvmhv_remove_nested(struct kvm_nested_guest *gp)
740{
741	struct kvm *kvm = gp->l1_host;
742	int lpid = gp->l1_lpid;
743	long ref;
744
745	spin_lock(&kvm->mmu_lock);
746	if (gp == __find_nested(kvm, lpid)) {
747		__remove_nested(kvm, lpid);
748		--gp->refcnt;
749	}
750	ref = gp->refcnt;
751	spin_unlock(&kvm->mmu_lock);
752	if (ref == 0)
753		kvmhv_release_nested(gp);
754}
755
756/*
757 * Free up all nested resources allocated for this guest.
758 * This is called with no vcpus of the guest running, when
759 * switching the guest to HPT mode or when destroying the
760 * guest.
761 */
762void kvmhv_release_all_nested(struct kvm *kvm)
763{
764	int lpid;
765	struct kvm_nested_guest *gp;
766	struct kvm_nested_guest *freelist = NULL;
767	struct kvm_memory_slot *memslot;
768	int srcu_idx, bkt;
769
770	spin_lock(&kvm->mmu_lock);
771	idr_for_each_entry(&kvm->arch.kvm_nested_guest_idr, gp, lpid) {
772		__remove_nested(kvm, lpid);
773		if (--gp->refcnt == 0) {
774			gp->next = freelist;
775			freelist = gp;
776		}
777	}
778	idr_destroy(&kvm->arch.kvm_nested_guest_idr);
779	/* idr is empty and may be reused at this point */
780	spin_unlock(&kvm->mmu_lock);
781	while ((gp = freelist) != NULL) {
782		freelist = gp->next;
783		kvmhv_release_nested(gp);
784	}
785
786	srcu_idx = srcu_read_lock(&kvm->srcu);
787	kvm_for_each_memslot(memslot, bkt, kvm_memslots(kvm))
788		kvmhv_free_memslot_nest_rmap(memslot);
789	srcu_read_unlock(&kvm->srcu, srcu_idx);
790}
791
792/* caller must hold gp->tlb_lock */
793static void kvmhv_flush_nested(struct kvm_nested_guest *gp)
794{
795	struct kvm *kvm = gp->l1_host;
796
797	spin_lock(&kvm->mmu_lock);
798	kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable, gp->shadow_lpid);
799	spin_unlock(&kvm->mmu_lock);
800	kvmhv_flush_lpid(gp->shadow_lpid);
801	kvmhv_update_ptbl_cache(gp);
802	if (gp->l1_gr_to_hr == 0)
803		kvmhv_remove_nested(gp);
804}
805
806struct kvm_nested_guest *kvmhv_get_nested(struct kvm *kvm, int l1_lpid,
807					  bool create)
808{
809	struct kvm_nested_guest *gp, *newgp;
810
811	if (l1_lpid >= (1ul << ((kvm->arch.l1_ptcr & PRTS_MASK) + 12 - 4)))
812		return NULL;
813
814	spin_lock(&kvm->mmu_lock);
815	gp = __find_nested(kvm, l1_lpid);
816	if (gp)
817		++gp->refcnt;
818	spin_unlock(&kvm->mmu_lock);
819
820	if (gp || !create)
821		return gp;
822
823	newgp = kvmhv_alloc_nested(kvm, l1_lpid);
824	if (!newgp)
825		return NULL;
826
827	if (!__prealloc_nested(kvm, l1_lpid)) {
828		kvmhv_release_nested(newgp);
829		return NULL;
830	}
831
832	spin_lock(&kvm->mmu_lock);
833	gp = __find_nested(kvm, l1_lpid);
834	if (!gp) {
835		__add_nested(kvm, l1_lpid, newgp);
836		++newgp->refcnt;
837		gp = newgp;
838		newgp = NULL;
839	}
840	++gp->refcnt;
841	spin_unlock(&kvm->mmu_lock);
842
843	if (newgp)
844		kvmhv_release_nested(newgp);
845
846	return gp;
847}
848
849void kvmhv_put_nested(struct kvm_nested_guest *gp)
850{
851	struct kvm *kvm = gp->l1_host;
852	long ref;
853
854	spin_lock(&kvm->mmu_lock);
855	ref = --gp->refcnt;
856	spin_unlock(&kvm->mmu_lock);
857	if (ref == 0)
858		kvmhv_release_nested(gp);
859}
860
861pte_t *find_kvm_nested_guest_pte(struct kvm *kvm, unsigned long lpid,
862				 unsigned long ea, unsigned *hshift)
863{
864	struct kvm_nested_guest *gp;
865	pte_t *pte;
866
867	gp = __find_nested(kvm, lpid);
868	if (!gp)
869		return NULL;
870
871	VM_WARN(!spin_is_locked(&kvm->mmu_lock),
872		"%s called with kvm mmu_lock not held \n", __func__);
873	pte = __find_linux_pte(gp->shadow_pgtable, ea, NULL, hshift);
874
875	return pte;
876}
877
878static inline bool kvmhv_n_rmap_is_equal(u64 rmap_1, u64 rmap_2)
879{
880	return !((rmap_1 ^ rmap_2) & (RMAP_NESTED_LPID_MASK |
881				       RMAP_NESTED_GPA_MASK));
882}
883
884void kvmhv_insert_nest_rmap(struct kvm *kvm, unsigned long *rmapp,
885			    struct rmap_nested **n_rmap)
886{
887	struct llist_node *entry = ((struct llist_head *) rmapp)->first;
888	struct rmap_nested *cursor;
889	u64 rmap, new_rmap = (*n_rmap)->rmap;
890
891	/* Are there any existing entries? */
892	if (!(*rmapp)) {
893		/* No -> use the rmap as a single entry */
894		*rmapp = new_rmap | RMAP_NESTED_IS_SINGLE_ENTRY;
895		return;
896	}
897
898	/* Do any entries match what we're trying to insert? */
899	for_each_nest_rmap_safe(cursor, entry, &rmap) {
900		if (kvmhv_n_rmap_is_equal(rmap, new_rmap))
901			return;
902	}
903
904	/* Do we need to create a list or just add the new entry? */
905	rmap = *rmapp;
906	if (rmap & RMAP_NESTED_IS_SINGLE_ENTRY) /* Not previously a list */
907		*rmapp = 0UL;
908	llist_add(&((*n_rmap)->list), (struct llist_head *) rmapp);
909	if (rmap & RMAP_NESTED_IS_SINGLE_ENTRY) /* Not previously a list */
910		(*n_rmap)->list.next = (struct llist_node *) rmap;
911
912	/* Set NULL so not freed by caller */
913	*n_rmap = NULL;
914}
915
916static void kvmhv_update_nest_rmap_rc(struct kvm *kvm, u64 n_rmap,
917				      unsigned long clr, unsigned long set,
918				      unsigned long hpa, unsigned long mask)
919{
920	unsigned long gpa;
921	unsigned int shift, lpid;
922	pte_t *ptep;
923
924	gpa = n_rmap & RMAP_NESTED_GPA_MASK;
925	lpid = (n_rmap & RMAP_NESTED_LPID_MASK) >> RMAP_NESTED_LPID_SHIFT;
926
927	/* Find the pte */
928	ptep = find_kvm_nested_guest_pte(kvm, lpid, gpa, &shift);
929	/*
930	 * If the pte is present and the pfn is still the same, update the pte.
931	 * If the pfn has changed then this is a stale rmap entry, the nested
932	 * gpa actually points somewhere else now, and there is nothing to do.
933	 * XXX A future optimisation would be to remove the rmap entry here.
934	 */
935	if (ptep && pte_present(*ptep) && ((pte_val(*ptep) & mask) == hpa)) {
936		__radix_pte_update(ptep, clr, set);
937		kvmppc_radix_tlbie_page(kvm, gpa, shift, lpid);
938	}
939}
940
941/*
942 * For a given list of rmap entries, update the rc bits in all ptes in shadow
943 * page tables for nested guests which are referenced by the rmap list.
944 */
945void kvmhv_update_nest_rmap_rc_list(struct kvm *kvm, unsigned long *rmapp,
946				    unsigned long clr, unsigned long set,
947				    unsigned long hpa, unsigned long nbytes)
948{
949	struct llist_node *entry = ((struct llist_head *) rmapp)->first;
950	struct rmap_nested *cursor;
951	unsigned long rmap, mask;
952
953	if ((clr | set) & ~(_PAGE_DIRTY | _PAGE_ACCESSED))
954		return;
955
956	mask = PTE_RPN_MASK & ~(nbytes - 1);
957	hpa &= mask;
958
959	for_each_nest_rmap_safe(cursor, entry, &rmap)
960		kvmhv_update_nest_rmap_rc(kvm, rmap, clr, set, hpa, mask);
961}
962
963static void kvmhv_remove_nest_rmap(struct kvm *kvm, u64 n_rmap,
964				   unsigned long hpa, unsigned long mask)
965{
966	struct kvm_nested_guest *gp;
967	unsigned long gpa;
968	unsigned int shift, lpid;
969	pte_t *ptep;
970
971	gpa = n_rmap & RMAP_NESTED_GPA_MASK;
972	lpid = (n_rmap & RMAP_NESTED_LPID_MASK) >> RMAP_NESTED_LPID_SHIFT;
973	gp = __find_nested(kvm, lpid);
974	if (!gp)
975		return;
976
977	/* Find and invalidate the pte */
978	ptep = find_kvm_nested_guest_pte(kvm, lpid, gpa, &shift);
979	/* Don't spuriously invalidate ptes if the pfn has changed */
980	if (ptep && pte_present(*ptep) && ((pte_val(*ptep) & mask) == hpa))
981		kvmppc_unmap_pte(kvm, ptep, gpa, shift, NULL, gp->shadow_lpid);
982}
983
984static void kvmhv_remove_nest_rmap_list(struct kvm *kvm, unsigned long *rmapp,
985					unsigned long hpa, unsigned long mask)
986{
987	struct llist_node *entry = llist_del_all((struct llist_head *) rmapp);
988	struct rmap_nested *cursor;
989	unsigned long rmap;
990
991	for_each_nest_rmap_safe(cursor, entry, &rmap) {
992		kvmhv_remove_nest_rmap(kvm, rmap, hpa, mask);
993		kfree(cursor);
994	}
995}
996
997/* called with kvm->mmu_lock held */
998void kvmhv_remove_nest_rmap_range(struct kvm *kvm,
999				  const struct kvm_memory_slot *memslot,
1000				  unsigned long gpa, unsigned long hpa,
1001				  unsigned long nbytes)
1002{
1003	unsigned long gfn, end_gfn;
1004	unsigned long addr_mask;
1005
1006	if (!memslot)
1007		return;
1008	gfn = (gpa >> PAGE_SHIFT) - memslot->base_gfn;
1009	end_gfn = gfn + (nbytes >> PAGE_SHIFT);
1010
1011	addr_mask = PTE_RPN_MASK & ~(nbytes - 1);
1012	hpa &= addr_mask;
1013
1014	for (; gfn < end_gfn; gfn++) {
1015		unsigned long *rmap = &memslot->arch.rmap[gfn];
1016		kvmhv_remove_nest_rmap_list(kvm, rmap, hpa, addr_mask);
1017	}
1018}
1019
1020static void kvmhv_free_memslot_nest_rmap(struct kvm_memory_slot *free)
1021{
1022	unsigned long page;
1023
1024	for (page = 0; page < free->npages; page++) {
1025		unsigned long rmap, *rmapp = &free->arch.rmap[page];
1026		struct rmap_nested *cursor;
1027		struct llist_node *entry;
1028
1029		entry = llist_del_all((struct llist_head *) rmapp);
1030		for_each_nest_rmap_safe(cursor, entry, &rmap)
1031			kfree(cursor);
1032	}
1033}
1034
1035static bool kvmhv_invalidate_shadow_pte(struct kvm_vcpu *vcpu,
1036					struct kvm_nested_guest *gp,
1037					long gpa, int *shift_ret)
1038{
1039	struct kvm *kvm = vcpu->kvm;
1040	bool ret = false;
1041	pte_t *ptep;
1042	int shift;
1043
1044	spin_lock(&kvm->mmu_lock);
1045	ptep = find_kvm_nested_guest_pte(kvm, gp->l1_lpid, gpa, &shift);
1046	if (!shift)
1047		shift = PAGE_SHIFT;
1048	if (ptep && pte_present(*ptep)) {
1049		kvmppc_unmap_pte(kvm, ptep, gpa, shift, NULL, gp->shadow_lpid);
1050		ret = true;
1051	}
1052	spin_unlock(&kvm->mmu_lock);
1053
1054	if (shift_ret)
1055		*shift_ret = shift;
1056	return ret;
1057}
1058
1059static inline int get_ric(unsigned int instr)
1060{
1061	return (instr >> 18) & 0x3;
1062}
1063
1064static inline int get_prs(unsigned int instr)
1065{
1066	return (instr >> 17) & 0x1;
1067}
1068
1069static inline int get_r(unsigned int instr)
1070{
1071	return (instr >> 16) & 0x1;
1072}
1073
1074static inline int get_lpid(unsigned long r_val)
1075{
1076	return r_val & 0xffffffff;
1077}
1078
1079static inline int get_is(unsigned long r_val)
1080{
1081	return (r_val >> 10) & 0x3;
1082}
1083
1084static inline int get_ap(unsigned long r_val)
1085{
1086	return (r_val >> 5) & 0x7;
1087}
1088
1089static inline long get_epn(unsigned long r_val)
1090{
1091	return r_val >> 12;
1092}
1093
1094static int kvmhv_emulate_tlbie_tlb_addr(struct kvm_vcpu *vcpu, int lpid,
1095					int ap, long epn)
1096{
1097	struct kvm *kvm = vcpu->kvm;
1098	struct kvm_nested_guest *gp;
1099	long npages;
1100	int shift, shadow_shift;
1101	unsigned long addr;
1102
1103	shift = ap_to_shift(ap);
1104	addr = epn << 12;
1105	if (shift < 0)
1106		/* Invalid ap encoding */
1107		return -EINVAL;
1108
1109	addr &= ~((1UL << shift) - 1);
1110	npages = 1UL << (shift - PAGE_SHIFT);
1111
1112	gp = kvmhv_get_nested(kvm, lpid, false);
1113	if (!gp) /* No such guest -> nothing to do */
1114		return 0;
1115	mutex_lock(&gp->tlb_lock);
1116
1117	/* There may be more than one host page backing this single guest pte */
1118	do {
1119		kvmhv_invalidate_shadow_pte(vcpu, gp, addr, &shadow_shift);
1120
1121		npages -= 1UL << (shadow_shift - PAGE_SHIFT);
1122		addr += 1UL << shadow_shift;
1123	} while (npages > 0);
1124
1125	mutex_unlock(&gp->tlb_lock);
1126	kvmhv_put_nested(gp);
1127	return 0;
1128}
1129
1130static void kvmhv_emulate_tlbie_lpid(struct kvm_vcpu *vcpu,
1131				     struct kvm_nested_guest *gp, int ric)
1132{
1133	struct kvm *kvm = vcpu->kvm;
1134
1135	mutex_lock(&gp->tlb_lock);
1136	switch (ric) {
1137	case 0:
1138		/* Invalidate TLB */
1139		spin_lock(&kvm->mmu_lock);
1140		kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable,
1141					  gp->shadow_lpid);
1142		kvmhv_flush_lpid(gp->shadow_lpid);
1143		spin_unlock(&kvm->mmu_lock);
1144		break;
1145	case 1:
1146		/*
1147		 * Invalidate PWC
1148		 * We don't cache this -> nothing to do
1149		 */
1150		break;
1151	case 2:
1152		/* Invalidate TLB, PWC and caching of partition table entries */
1153		kvmhv_flush_nested(gp);
1154		break;
1155	default:
1156		break;
1157	}
1158	mutex_unlock(&gp->tlb_lock);
1159}
1160
1161static void kvmhv_emulate_tlbie_all_lpid(struct kvm_vcpu *vcpu, int ric)
1162{
1163	struct kvm *kvm = vcpu->kvm;
1164	struct kvm_nested_guest *gp;
1165	int lpid;
1166
1167	spin_lock(&kvm->mmu_lock);
1168	idr_for_each_entry(&kvm->arch.kvm_nested_guest_idr, gp, lpid) {
1169		spin_unlock(&kvm->mmu_lock);
1170		kvmhv_emulate_tlbie_lpid(vcpu, gp, ric);
1171		spin_lock(&kvm->mmu_lock);
1172	}
1173	spin_unlock(&kvm->mmu_lock);
1174}
1175
1176static int kvmhv_emulate_priv_tlbie(struct kvm_vcpu *vcpu, unsigned int instr,
1177				    unsigned long rsval, unsigned long rbval)
1178{
1179	struct kvm *kvm = vcpu->kvm;
1180	struct kvm_nested_guest *gp;
1181	int r, ric, prs, is, ap;
1182	int lpid;
1183	long epn;
1184	int ret = 0;
1185
1186	ric = get_ric(instr);
1187	prs = get_prs(instr);
1188	r = get_r(instr);
1189	lpid = get_lpid(rsval);
1190	is = get_is(rbval);
1191
1192	/*
1193	 * These cases are invalid and are not handled:
1194	 * r   != 1 -> Only radix supported
1195	 * prs == 1 -> Not HV privileged
1196	 * ric == 3 -> No cluster bombs for radix
1197	 * is  == 1 -> Partition scoped translations not associated with pid
1198	 * (!is) && (ric == 1 || ric == 2) -> Not supported by ISA
1199	 */
1200	if ((!r) || (prs) || (ric == 3) || (is == 1) ||
1201	    ((!is) && (ric == 1 || ric == 2)))
1202		return -EINVAL;
1203
1204	switch (is) {
1205	case 0:
1206		/*
1207		 * We know ric == 0
1208		 * Invalidate TLB for a given target address
1209		 */
1210		epn = get_epn(rbval);
1211		ap = get_ap(rbval);
1212		ret = kvmhv_emulate_tlbie_tlb_addr(vcpu, lpid, ap, epn);
1213		break;
1214	case 2:
1215		/* Invalidate matching LPID */
1216		gp = kvmhv_get_nested(kvm, lpid, false);
1217		if (gp) {
1218			kvmhv_emulate_tlbie_lpid(vcpu, gp, ric);
1219			kvmhv_put_nested(gp);
1220		}
1221		break;
1222	case 3:
1223		/* Invalidate ALL LPIDs */
1224		kvmhv_emulate_tlbie_all_lpid(vcpu, ric);
1225		break;
1226	default:
1227		ret = -EINVAL;
1228		break;
1229	}
1230
1231	return ret;
1232}
1233
1234/*
1235 * This handles the H_TLB_INVALIDATE hcall.
1236 * Parameters are (r4) tlbie instruction code, (r5) rS contents,
1237 * (r6) rB contents.
1238 */
1239long kvmhv_do_nested_tlbie(struct kvm_vcpu *vcpu)
1240{
1241	int ret;
1242
1243	ret = kvmhv_emulate_priv_tlbie(vcpu, kvmppc_get_gpr(vcpu, 4),
1244			kvmppc_get_gpr(vcpu, 5), kvmppc_get_gpr(vcpu, 6));
1245	if (ret)
1246		return H_PARAMETER;
1247	return H_SUCCESS;
1248}
1249
1250static long do_tlb_invalidate_nested_all(struct kvm_vcpu *vcpu,
1251					 unsigned long lpid, unsigned long ric)
1252{
1253	struct kvm *kvm = vcpu->kvm;
1254	struct kvm_nested_guest *gp;
1255
1256	gp = kvmhv_get_nested(kvm, lpid, false);
1257	if (gp) {
1258		kvmhv_emulate_tlbie_lpid(vcpu, gp, ric);
1259		kvmhv_put_nested(gp);
1260	}
1261	return H_SUCCESS;
1262}
1263
1264/*
1265 * Number of pages above which we invalidate the entire LPID rather than
1266 * flush individual pages.
1267 */
1268static unsigned long tlb_range_flush_page_ceiling __read_mostly = 33;
1269
1270static long do_tlb_invalidate_nested_tlb(struct kvm_vcpu *vcpu,
1271					 unsigned long lpid,
1272					 unsigned long pg_sizes,
1273					 unsigned long start,
1274					 unsigned long end)
1275{
1276	int ret = H_P4;
1277	unsigned long addr, nr_pages;
1278	struct mmu_psize_def *def;
1279	unsigned long psize, ap, page_size;
1280	bool flush_lpid;
1281
1282	for (psize = 0; psize < MMU_PAGE_COUNT; psize++) {
1283		def = &mmu_psize_defs[psize];
1284		if (!(pg_sizes & def->h_rpt_pgsize))
1285			continue;
1286
1287		nr_pages = (end - start) >> def->shift;
1288		flush_lpid = nr_pages > tlb_range_flush_page_ceiling;
1289		if (flush_lpid)
1290			return do_tlb_invalidate_nested_all(vcpu, lpid,
1291							RIC_FLUSH_TLB);
1292		addr = start;
1293		ap = mmu_get_ap(psize);
1294		page_size = 1UL << def->shift;
1295		do {
1296			ret = kvmhv_emulate_tlbie_tlb_addr(vcpu, lpid, ap,
1297						   get_epn(addr));
1298			if (ret)
1299				return H_P4;
1300			addr += page_size;
1301		} while (addr < end);
1302	}
1303	return ret;
1304}
1305
1306/*
1307 * Performs partition-scoped invalidations for nested guests
1308 * as part of H_RPT_INVALIDATE hcall.
1309 */
1310long do_h_rpt_invalidate_pat(struct kvm_vcpu *vcpu, unsigned long lpid,
1311			     unsigned long type, unsigned long pg_sizes,
1312			     unsigned long start, unsigned long end)
1313{
1314	/*
1315	 * If L2 lpid isn't valid, we need to return H_PARAMETER.
1316	 *
1317	 * However, nested KVM issues a L2 lpid flush call when creating
1318	 * partition table entries for L2. This happens even before the
1319	 * corresponding shadow lpid is created in HV which happens in
1320	 * H_ENTER_NESTED call. Since we can't differentiate this case from
1321	 * the invalid case, we ignore such flush requests and return success.
1322	 */
1323	if (!__find_nested(vcpu->kvm, lpid))
1324		return H_SUCCESS;
1325
1326	/*
1327	 * A flush all request can be handled by a full lpid flush only.
1328	 */
1329	if ((type & H_RPTI_TYPE_NESTED_ALL) == H_RPTI_TYPE_NESTED_ALL)
1330		return do_tlb_invalidate_nested_all(vcpu, lpid, RIC_FLUSH_ALL);
1331
1332	/*
1333	 * We don't need to handle a PWC flush like process table here,
1334	 * because intermediate partition scoped table in nested guest doesn't
1335	 * really have PWC. Only level we have PWC is in L0 and for nested
1336	 * invalidate at L0 we always do kvm_flush_lpid() which does
1337	 * radix__flush_all_lpid(). For range invalidate at any level, we
1338	 * are not removing the higher level page tables and hence there is
1339	 * no PWC invalidate needed.
1340	 *
1341	 * if (type & H_RPTI_TYPE_PWC) {
1342	 *	ret = do_tlb_invalidate_nested_all(vcpu, lpid, RIC_FLUSH_PWC);
1343	 *	if (ret)
1344	 *		return H_P4;
1345	 * }
1346	 */
1347
1348	if (start == 0 && end == -1)
1349		return do_tlb_invalidate_nested_all(vcpu, lpid, RIC_FLUSH_TLB);
1350
1351	if (type & H_RPTI_TYPE_TLB)
1352		return do_tlb_invalidate_nested_tlb(vcpu, lpid, pg_sizes,
1353						    start, end);
1354	return H_SUCCESS;
1355}
1356
1357/* Used to convert a nested guest real address to a L1 guest real address */
1358static int kvmhv_translate_addr_nested(struct kvm_vcpu *vcpu,
1359				       struct kvm_nested_guest *gp,
1360				       unsigned long n_gpa, unsigned long dsisr,
1361				       struct kvmppc_pte *gpte_p)
1362{
1363	u64 fault_addr, flags = dsisr & DSISR_ISSTORE;
1364	int ret;
1365
1366	ret = kvmppc_mmu_walk_radix_tree(vcpu, n_gpa, gpte_p, gp->l1_gr_to_hr,
1367					 &fault_addr);
1368
1369	if (ret) {
1370		/* We didn't find a pte */
1371		if (ret == -EINVAL) {
1372			/* Unsupported mmu config */
1373			flags |= DSISR_UNSUPP_MMU;
1374		} else if (ret == -ENOENT) {
1375			/* No translation found */
1376			flags |= DSISR_NOHPTE;
1377		} else if (ret == -EFAULT) {
1378			/* Couldn't access L1 real address */
1379			flags |= DSISR_PRTABLE_FAULT;
1380			vcpu->arch.fault_gpa = fault_addr;
1381		} else {
1382			/* Unknown error */
1383			return ret;
1384		}
1385		goto forward_to_l1;
1386	} else {
1387		/* We found a pte -> check permissions */
1388		if (dsisr & DSISR_ISSTORE) {
1389			/* Can we write? */
1390			if (!gpte_p->may_write) {
1391				flags |= DSISR_PROTFAULT;
1392				goto forward_to_l1;
1393			}
1394		} else if (vcpu->arch.trap == BOOK3S_INTERRUPT_H_INST_STORAGE) {
1395			/* Can we execute? */
1396			if (!gpte_p->may_execute) {
1397				flags |= SRR1_ISI_N_G_OR_CIP;
1398				goto forward_to_l1;
1399			}
1400		} else {
1401			/* Can we read? */
1402			if (!gpte_p->may_read && !gpte_p->may_write) {
1403				flags |= DSISR_PROTFAULT;
1404				goto forward_to_l1;
1405			}
1406		}
1407	}
1408
1409	return 0;
1410
1411forward_to_l1:
1412	vcpu->arch.fault_dsisr = flags;
1413	if (vcpu->arch.trap == BOOK3S_INTERRUPT_H_INST_STORAGE) {
1414		vcpu->arch.shregs.msr &= SRR1_MSR_BITS;
1415		vcpu->arch.shregs.msr |= flags;
1416	}
1417	return RESUME_HOST;
1418}
1419
1420static long kvmhv_handle_nested_set_rc(struct kvm_vcpu *vcpu,
1421				       struct kvm_nested_guest *gp,
1422				       unsigned long n_gpa,
1423				       struct kvmppc_pte gpte,
1424				       unsigned long dsisr)
1425{
1426	struct kvm *kvm = vcpu->kvm;
1427	bool writing = !!(dsisr & DSISR_ISSTORE);
1428	u64 pgflags;
1429	long ret;
1430
1431	/* Are the rc bits set in the L1 partition scoped pte? */
1432	pgflags = _PAGE_ACCESSED;
1433	if (writing)
1434		pgflags |= _PAGE_DIRTY;
1435	if (pgflags & ~gpte.rc)
1436		return RESUME_HOST;
1437
1438	spin_lock(&kvm->mmu_lock);
1439	/* Set the rc bit in the pte of our (L0) pgtable for the L1 guest */
1440	ret = kvmppc_hv_handle_set_rc(kvm, false, writing,
1441				      gpte.raddr, kvm->arch.lpid);
1442	if (!ret) {
1443		ret = -EINVAL;
1444		goto out_unlock;
1445	}
1446
1447	/* Set the rc bit in the pte of the shadow_pgtable for the nest guest */
1448	ret = kvmppc_hv_handle_set_rc(kvm, true, writing,
1449				      n_gpa, gp->l1_lpid);
1450	if (!ret)
1451		ret = -EINVAL;
1452	else
1453		ret = 0;
1454
1455out_unlock:
1456	spin_unlock(&kvm->mmu_lock);
1457	return ret;
1458}
1459
1460static inline int kvmppc_radix_level_to_shift(int level)
1461{
1462	switch (level) {
1463	case 2:
1464		return PUD_SHIFT;
1465	case 1:
1466		return PMD_SHIFT;
1467	default:
1468		return PAGE_SHIFT;
1469	}
1470}
1471
1472static inline int kvmppc_radix_shift_to_level(int shift)
1473{
1474	if (shift == PUD_SHIFT)
1475		return 2;
1476	if (shift == PMD_SHIFT)
1477		return 1;
1478	if (shift == PAGE_SHIFT)
1479		return 0;
1480	WARN_ON_ONCE(1);
1481	return 0;
1482}
1483
1484/* called with gp->tlb_lock held */
1485static long int __kvmhv_nested_page_fault(struct kvm_vcpu *vcpu,
1486					  struct kvm_nested_guest *gp)
1487{
1488	struct kvm *kvm = vcpu->kvm;
1489	struct kvm_memory_slot *memslot;
1490	struct rmap_nested *n_rmap;
1491	struct kvmppc_pte gpte;
1492	pte_t pte, *pte_p;
1493	unsigned long mmu_seq;
1494	unsigned long dsisr = vcpu->arch.fault_dsisr;
1495	unsigned long ea = vcpu->arch.fault_dar;
1496	unsigned long *rmapp;
1497	unsigned long n_gpa, gpa, gfn, perm = 0UL;
1498	unsigned int shift, l1_shift, level;
1499	bool writing = !!(dsisr & DSISR_ISSTORE);
1500	bool kvm_ro = false;
1501	long int ret;
1502
1503	if (!gp->l1_gr_to_hr) {
1504		kvmhv_update_ptbl_cache(gp);
1505		if (!gp->l1_gr_to_hr)
1506			return RESUME_HOST;
1507	}
1508
1509	/* Convert the nested guest real address into a L1 guest real address */
1510
1511	n_gpa = vcpu->arch.fault_gpa & ~0xF000000000000FFFULL;
1512	if (!(dsisr & DSISR_PRTABLE_FAULT))
1513		n_gpa |= ea & 0xFFF;
1514	ret = kvmhv_translate_addr_nested(vcpu, gp, n_gpa, dsisr, &gpte);
1515
1516	/*
1517	 * If the hardware found a translation but we don't now have a usable
1518	 * translation in the l1 partition-scoped tree, remove the shadow pte
1519	 * and let the guest retry.
1520	 */
1521	if (ret == RESUME_HOST &&
1522	    (dsisr & (DSISR_PROTFAULT | DSISR_BADACCESS | DSISR_NOEXEC_OR_G |
1523		      DSISR_BAD_COPYPASTE)))
1524		goto inval;
1525	if (ret)
1526		return ret;
1527
1528	/* Failed to set the reference/change bits */
1529	if (dsisr & DSISR_SET_RC) {
1530		ret = kvmhv_handle_nested_set_rc(vcpu, gp, n_gpa, gpte, dsisr);
1531		if (ret == RESUME_HOST)
1532			return ret;
1533		if (ret)
1534			goto inval;
1535		dsisr &= ~DSISR_SET_RC;
1536		if (!(dsisr & (DSISR_BAD_FAULT_64S | DSISR_NOHPTE |
1537			       DSISR_PROTFAULT)))
1538			return RESUME_GUEST;
1539	}
1540
1541	/*
1542	 * We took an HISI or HDSI while we were running a nested guest which
1543	 * means we have no partition scoped translation for that. This means
1544	 * we need to insert a pte for the mapping into our shadow_pgtable.
1545	 */
1546
1547	l1_shift = gpte.page_shift;
1548	if (l1_shift < PAGE_SHIFT) {
1549		/* We don't support l1 using a page size smaller than our own */
1550		pr_err("KVM: L1 guest page shift (%d) less than our own (%d)\n",
1551			l1_shift, PAGE_SHIFT);
1552		return -EINVAL;
1553	}
1554	gpa = gpte.raddr;
1555	gfn = gpa >> PAGE_SHIFT;
1556
1557	/* 1. Get the corresponding host memslot */
1558
1559	memslot = gfn_to_memslot(kvm, gfn);
1560	if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) {
1561		if (dsisr & (DSISR_PRTABLE_FAULT | DSISR_BADACCESS)) {
1562			/* unusual error -> reflect to the guest as a DSI */
1563			kvmppc_core_queue_data_storage(vcpu,
1564					kvmppc_get_msr(vcpu) & SRR1_PREFIXED,
1565					ea, dsisr);
1566			return RESUME_GUEST;
1567		}
1568
1569		/* passthrough of emulated MMIO case */
1570		return kvmppc_hv_emulate_mmio(vcpu, gpa, ea, writing);
1571	}
1572	if (memslot->flags & KVM_MEM_READONLY) {
1573		if (writing) {
1574			/* Give the guest a DSI */
1575			kvmppc_core_queue_data_storage(vcpu,
1576					kvmppc_get_msr(vcpu) & SRR1_PREFIXED,
1577					ea, DSISR_ISSTORE | DSISR_PROTFAULT);
1578			return RESUME_GUEST;
1579		}
1580		kvm_ro = true;
1581	}
1582
1583	/* 2. Find the host pte for this L1 guest real address */
1584
1585	/* Used to check for invalidations in progress */
1586	mmu_seq = kvm->mmu_invalidate_seq;
1587	smp_rmb();
1588
1589	/* See if can find translation in our partition scoped tables for L1 */
1590	pte = __pte(0);
1591	spin_lock(&kvm->mmu_lock);
1592	pte_p = find_kvm_secondary_pte(kvm, gpa, &shift);
1593	if (!shift)
1594		shift = PAGE_SHIFT;
1595	if (pte_p)
1596		pte = *pte_p;
1597	spin_unlock(&kvm->mmu_lock);
1598
1599	if (!pte_present(pte) || (writing && !(pte_val(pte) & _PAGE_WRITE))) {
1600		/* No suitable pte found -> try to insert a mapping */
1601		ret = kvmppc_book3s_instantiate_page(vcpu, gpa, memslot,
1602					writing, kvm_ro, &pte, &level);
1603		if (ret == -EAGAIN)
1604			return RESUME_GUEST;
1605		else if (ret)
1606			return ret;
1607		shift = kvmppc_radix_level_to_shift(level);
1608	}
1609	/* Align gfn to the start of the page */
1610	gfn = (gpa & ~((1UL << shift) - 1)) >> PAGE_SHIFT;
1611
1612	/* 3. Compute the pte we need to insert for nest_gpa -> host r_addr */
1613
1614	/* The permissions is the combination of the host and l1 guest ptes */
1615	perm |= gpte.may_read ? 0UL : _PAGE_READ;
1616	perm |= gpte.may_write ? 0UL : _PAGE_WRITE;
1617	perm |= gpte.may_execute ? 0UL : _PAGE_EXEC;
1618	/* Only set accessed/dirty (rc) bits if set in host and l1 guest ptes */
1619	perm |= (gpte.rc & _PAGE_ACCESSED) ? 0UL : _PAGE_ACCESSED;
1620	perm |= ((gpte.rc & _PAGE_DIRTY) && writing) ? 0UL : _PAGE_DIRTY;
1621	pte = __pte(pte_val(pte) & ~perm);
1622
1623	/* What size pte can we insert? */
1624	if (shift > l1_shift) {
1625		u64 mask;
1626		unsigned int actual_shift = PAGE_SHIFT;
1627		if (PMD_SHIFT < l1_shift)
1628			actual_shift = PMD_SHIFT;
1629		mask = (1UL << shift) - (1UL << actual_shift);
1630		pte = __pte(pte_val(pte) | (gpa & mask));
1631		shift = actual_shift;
1632	}
1633	level = kvmppc_radix_shift_to_level(shift);
1634	n_gpa &= ~((1UL << shift) - 1);
1635
1636	/* 4. Insert the pte into our shadow_pgtable */
1637
1638	n_rmap = kzalloc(sizeof(*n_rmap), GFP_KERNEL);
1639	if (!n_rmap)
1640		return RESUME_GUEST; /* Let the guest try again */
1641	n_rmap->rmap = (n_gpa & RMAP_NESTED_GPA_MASK) |
1642		(((unsigned long) gp->l1_lpid) << RMAP_NESTED_LPID_SHIFT);
1643	rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn];
1644	ret = kvmppc_create_pte(kvm, gp->shadow_pgtable, pte, n_gpa, level,
1645				mmu_seq, gp->shadow_lpid, rmapp, &n_rmap);
1646	kfree(n_rmap);
1647	if (ret == -EAGAIN)
1648		ret = RESUME_GUEST;	/* Let the guest try again */
1649
1650	return ret;
1651
1652 inval:
1653	kvmhv_invalidate_shadow_pte(vcpu, gp, n_gpa, NULL);
1654	return RESUME_GUEST;
1655}
1656
1657long int kvmhv_nested_page_fault(struct kvm_vcpu *vcpu)
1658{
1659	struct kvm_nested_guest *gp = vcpu->arch.nested;
1660	long int ret;
1661
1662	mutex_lock(&gp->tlb_lock);
1663	ret = __kvmhv_nested_page_fault(vcpu, gp);
1664	mutex_unlock(&gp->tlb_lock);
1665	return ret;
1666}
1667
1668int kvmhv_nested_next_lpid(struct kvm *kvm, int lpid)
1669{
1670	int ret = lpid + 1;
1671
1672	spin_lock(&kvm->mmu_lock);
1673	if (!idr_get_next(&kvm->arch.kvm_nested_guest_idr, &ret))
1674		ret = -1;
1675	spin_unlock(&kvm->mmu_lock);
1676
1677	return ret;
1678}
1679