xref: /kernel/linux/linux-5.10/arch/s390/kvm/vsie.c (revision 8c2ecf20)
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * kvm nested virtualization support for s390x
4 *
5 * Copyright IBM Corp. 2016, 2018
6 *
7 *    Author(s): David Hildenbrand <dahi@linux.vnet.ibm.com>
8 */
9#include <linux/vmalloc.h>
10#include <linux/kvm_host.h>
11#include <linux/bug.h>
12#include <linux/list.h>
13#include <linux/bitmap.h>
14#include <linux/sched/signal.h>
15
16#include <asm/gmap.h>
17#include <asm/mmu_context.h>
18#include <asm/sclp.h>
19#include <asm/nmi.h>
20#include <asm/dis.h>
21#include "kvm-s390.h"
22#include "gaccess.h"
23
24struct vsie_page {
25	struct kvm_s390_sie_block scb_s;	/* 0x0000 */
26	/*
27	 * the backup info for machine check. ensure it's at
28	 * the same offset as that in struct sie_page!
29	 */
30	struct mcck_volatile_info mcck_info;    /* 0x0200 */
31	/*
32	 * The pinned original scb. Be aware that other VCPUs can modify
33	 * it while we read from it. Values that are used for conditions or
34	 * are reused conditionally, should be accessed via READ_ONCE.
35	 */
36	struct kvm_s390_sie_block *scb_o;	/* 0x0218 */
37	/* the shadow gmap in use by the vsie_page */
38	struct gmap *gmap;			/* 0x0220 */
39	/* address of the last reported fault to guest2 */
40	unsigned long fault_addr;		/* 0x0228 */
41	/* calculated guest addresses of satellite control blocks */
42	gpa_t sca_gpa;				/* 0x0230 */
43	gpa_t itdba_gpa;			/* 0x0238 */
44	gpa_t gvrd_gpa;				/* 0x0240 */
45	gpa_t riccbd_gpa;			/* 0x0248 */
46	gpa_t sdnx_gpa;				/* 0x0250 */
47	__u8 reserved[0x0700 - 0x0258];		/* 0x0258 */
48	struct kvm_s390_crypto_cb crycb;	/* 0x0700 */
49	__u8 fac[S390_ARCH_FAC_LIST_SIZE_BYTE];	/* 0x0800 */
50};
51
52/* trigger a validity icpt for the given scb */
53static int set_validity_icpt(struct kvm_s390_sie_block *scb,
54			     __u16 reason_code)
55{
56	scb->ipa = 0x1000;
57	scb->ipb = ((__u32) reason_code) << 16;
58	scb->icptcode = ICPT_VALIDITY;
59	return 1;
60}
61
62/* mark the prefix as unmapped, this will block the VSIE */
63static void prefix_unmapped(struct vsie_page *vsie_page)
64{
65	atomic_or(PROG_REQUEST, &vsie_page->scb_s.prog20);
66}
67
68/* mark the prefix as unmapped and wait until the VSIE has been left */
69static void prefix_unmapped_sync(struct vsie_page *vsie_page)
70{
71	prefix_unmapped(vsie_page);
72	if (vsie_page->scb_s.prog0c & PROG_IN_SIE)
73		atomic_or(CPUSTAT_STOP_INT, &vsie_page->scb_s.cpuflags);
74	while (vsie_page->scb_s.prog0c & PROG_IN_SIE)
75		cpu_relax();
76}
77
78/* mark the prefix as mapped, this will allow the VSIE to run */
79static void prefix_mapped(struct vsie_page *vsie_page)
80{
81	atomic_andnot(PROG_REQUEST, &vsie_page->scb_s.prog20);
82}
83
84/* test if the prefix is mapped into the gmap shadow */
85static int prefix_is_mapped(struct vsie_page *vsie_page)
86{
87	return !(atomic_read(&vsie_page->scb_s.prog20) & PROG_REQUEST);
88}
89
90/* copy the updated intervention request bits into the shadow scb */
91static void update_intervention_requests(struct vsie_page *vsie_page)
92{
93	const int bits = CPUSTAT_STOP_INT | CPUSTAT_IO_INT | CPUSTAT_EXT_INT;
94	int cpuflags;
95
96	cpuflags = atomic_read(&vsie_page->scb_o->cpuflags);
97	atomic_andnot(bits, &vsie_page->scb_s.cpuflags);
98	atomic_or(cpuflags & bits, &vsie_page->scb_s.cpuflags);
99}
100
101/* shadow (filter and validate) the cpuflags  */
102static int prepare_cpuflags(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
103{
104	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
105	struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
106	int newflags, cpuflags = atomic_read(&scb_o->cpuflags);
107
108	/* we don't allow ESA/390 guests */
109	if (!(cpuflags & CPUSTAT_ZARCH))
110		return set_validity_icpt(scb_s, 0x0001U);
111
112	if (cpuflags & (CPUSTAT_RRF | CPUSTAT_MCDS))
113		return set_validity_icpt(scb_s, 0x0001U);
114	else if (cpuflags & (CPUSTAT_SLSV | CPUSTAT_SLSR))
115		return set_validity_icpt(scb_s, 0x0007U);
116
117	/* intervention requests will be set later */
118	newflags = CPUSTAT_ZARCH;
119	if (cpuflags & CPUSTAT_GED && test_kvm_facility(vcpu->kvm, 8))
120		newflags |= CPUSTAT_GED;
121	if (cpuflags & CPUSTAT_GED2 && test_kvm_facility(vcpu->kvm, 78)) {
122		if (cpuflags & CPUSTAT_GED)
123			return set_validity_icpt(scb_s, 0x0001U);
124		newflags |= CPUSTAT_GED2;
125	}
126	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_GPERE))
127		newflags |= cpuflags & CPUSTAT_P;
128	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_GSLS))
129		newflags |= cpuflags & CPUSTAT_SM;
130	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_IBS))
131		newflags |= cpuflags & CPUSTAT_IBS;
132	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_KSS))
133		newflags |= cpuflags & CPUSTAT_KSS;
134
135	atomic_set(&scb_s->cpuflags, newflags);
136	return 0;
137}
138/* Copy to APCB FORMAT1 from APCB FORMAT0 */
139static int setup_apcb10(struct kvm_vcpu *vcpu, struct kvm_s390_apcb1 *apcb_s,
140			unsigned long apcb_o, struct kvm_s390_apcb1 *apcb_h)
141{
142	struct kvm_s390_apcb0 tmp;
143
144	if (read_guest_real(vcpu, apcb_o, &tmp, sizeof(struct kvm_s390_apcb0)))
145		return -EFAULT;
146
147	apcb_s->apm[0] = apcb_h->apm[0] & tmp.apm[0];
148	apcb_s->aqm[0] = apcb_h->aqm[0] & tmp.aqm[0] & 0xffff000000000000UL;
149	apcb_s->adm[0] = apcb_h->adm[0] & tmp.adm[0] & 0xffff000000000000UL;
150
151	return 0;
152
153}
154
155/**
156 * setup_apcb00 - Copy to APCB FORMAT0 from APCB FORMAT0
157 * @vcpu: pointer to the virtual CPU
158 * @apcb_s: pointer to start of apcb in the shadow crycb
159 * @apcb_o: pointer to start of original apcb in the guest2
160 * @apcb_h: pointer to start of apcb in the guest1
161 *
162 * Returns 0 and -EFAULT on error reading guest apcb
163 */
164static int setup_apcb00(struct kvm_vcpu *vcpu, unsigned long *apcb_s,
165			unsigned long apcb_o, unsigned long *apcb_h)
166{
167	if (read_guest_real(vcpu, apcb_o, apcb_s,
168			    sizeof(struct kvm_s390_apcb0)))
169		return -EFAULT;
170
171	bitmap_and(apcb_s, apcb_s, apcb_h,
172		   BITS_PER_BYTE * sizeof(struct kvm_s390_apcb0));
173
174	return 0;
175}
176
177/**
178 * setup_apcb11 - Copy the FORMAT1 APCB from the guest to the shadow CRYCB
179 * @vcpu: pointer to the virtual CPU
180 * @apcb_s: pointer to start of apcb in the shadow crycb
181 * @apcb_o: pointer to start of original guest apcb
182 * @apcb_h: pointer to start of apcb in the host
183 *
184 * Returns 0 and -EFAULT on error reading guest apcb
185 */
186static int setup_apcb11(struct kvm_vcpu *vcpu, unsigned long *apcb_s,
187			unsigned long apcb_o,
188			unsigned long *apcb_h)
189{
190	if (read_guest_real(vcpu, apcb_o, apcb_s,
191			    sizeof(struct kvm_s390_apcb1)))
192		return -EFAULT;
193
194	bitmap_and(apcb_s, apcb_s, apcb_h,
195		   BITS_PER_BYTE * sizeof(struct kvm_s390_apcb1));
196
197	return 0;
198}
199
200/**
201 * setup_apcb - Create a shadow copy of the apcb.
202 * @vcpu: pointer to the virtual CPU
203 * @crycb_s: pointer to shadow crycb
204 * @crycb_o: pointer to original guest crycb
205 * @crycb_h: pointer to the host crycb
206 * @fmt_o: format of the original guest crycb.
207 * @fmt_h: format of the host crycb.
208 *
209 * Checks the compatibility between the guest and host crycb and calls the
210 * appropriate copy function.
211 *
212 * Return 0 or an error number if the guest and host crycb are incompatible.
213 */
214static int setup_apcb(struct kvm_vcpu *vcpu, struct kvm_s390_crypto_cb *crycb_s,
215	       const u32 crycb_o,
216	       struct kvm_s390_crypto_cb *crycb_h,
217	       int fmt_o, int fmt_h)
218{
219	struct kvm_s390_crypto_cb *crycb;
220
221	crycb = (struct kvm_s390_crypto_cb *) (unsigned long)crycb_o;
222
223	switch (fmt_o) {
224	case CRYCB_FORMAT2:
225		if ((crycb_o & PAGE_MASK) != ((crycb_o + 256) & PAGE_MASK))
226			return -EACCES;
227		if (fmt_h != CRYCB_FORMAT2)
228			return -EINVAL;
229		return setup_apcb11(vcpu, (unsigned long *)&crycb_s->apcb1,
230				    (unsigned long) &crycb->apcb1,
231				    (unsigned long *)&crycb_h->apcb1);
232	case CRYCB_FORMAT1:
233		switch (fmt_h) {
234		case CRYCB_FORMAT2:
235			return setup_apcb10(vcpu, &crycb_s->apcb1,
236					    (unsigned long) &crycb->apcb0,
237					    &crycb_h->apcb1);
238		case CRYCB_FORMAT1:
239			return setup_apcb00(vcpu,
240					    (unsigned long *) &crycb_s->apcb0,
241					    (unsigned long) &crycb->apcb0,
242					    (unsigned long *) &crycb_h->apcb0);
243		}
244		break;
245	case CRYCB_FORMAT0:
246		if ((crycb_o & PAGE_MASK) != ((crycb_o + 32) & PAGE_MASK))
247			return -EACCES;
248
249		switch (fmt_h) {
250		case CRYCB_FORMAT2:
251			return setup_apcb10(vcpu, &crycb_s->apcb1,
252					    (unsigned long) &crycb->apcb0,
253					    &crycb_h->apcb1);
254		case CRYCB_FORMAT1:
255		case CRYCB_FORMAT0:
256			return setup_apcb00(vcpu,
257					    (unsigned long *) &crycb_s->apcb0,
258					    (unsigned long) &crycb->apcb0,
259					    (unsigned long *) &crycb_h->apcb0);
260		}
261	}
262	return -EINVAL;
263}
264
265/**
266 * shadow_crycb - Create a shadow copy of the crycb block
267 * @vcpu: a pointer to the virtual CPU
268 * @vsie_page: a pointer to internal date used for the vSIE
269 *
270 * Create a shadow copy of the crycb block and setup key wrapping, if
271 * requested for guest 3 and enabled for guest 2.
272 *
273 * We accept format-1 or format-2, but we convert format-1 into format-2
274 * in the shadow CRYCB.
275 * Using format-2 enables the firmware to choose the right format when
276 * scheduling the SIE.
277 * There is nothing to do for format-0.
278 *
279 * This function centralize the issuing of set_validity_icpt() for all
280 * the subfunctions working on the crycb.
281 *
282 * Returns: - 0 if shadowed or nothing to do
283 *          - > 0 if control has to be given to guest 2
284 */
285static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
286{
287	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
288	struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
289	const uint32_t crycbd_o = READ_ONCE(scb_o->crycbd);
290	const u32 crycb_addr = crycbd_o & 0x7ffffff8U;
291	unsigned long *b1, *b2;
292	u8 ecb3_flags;
293	u32 ecd_flags;
294	int apie_h;
295	int apie_s;
296	int key_msk = test_kvm_facility(vcpu->kvm, 76);
297	int fmt_o = crycbd_o & CRYCB_FORMAT_MASK;
298	int fmt_h = vcpu->arch.sie_block->crycbd & CRYCB_FORMAT_MASK;
299	int ret = 0;
300
301	scb_s->crycbd = 0;
302
303	apie_h = vcpu->arch.sie_block->eca & ECA_APIE;
304	apie_s = apie_h & scb_o->eca;
305	if (!apie_s && (!key_msk || (fmt_o == CRYCB_FORMAT0)))
306		return 0;
307
308	if (!crycb_addr)
309		return set_validity_icpt(scb_s, 0x0039U);
310
311	if (fmt_o == CRYCB_FORMAT1)
312		if ((crycb_addr & PAGE_MASK) !=
313		    ((crycb_addr + 128) & PAGE_MASK))
314			return set_validity_icpt(scb_s, 0x003CU);
315
316	if (apie_s) {
317		ret = setup_apcb(vcpu, &vsie_page->crycb, crycb_addr,
318				 vcpu->kvm->arch.crypto.crycb,
319				 fmt_o, fmt_h);
320		if (ret)
321			goto end;
322		scb_s->eca |= scb_o->eca & ECA_APIE;
323	}
324
325	/* we may only allow it if enabled for guest 2 */
326	ecb3_flags = scb_o->ecb3 & vcpu->arch.sie_block->ecb3 &
327		     (ECB3_AES | ECB3_DEA);
328	ecd_flags = scb_o->ecd & vcpu->arch.sie_block->ecd & ECD_ECC;
329	if (!ecb3_flags && !ecd_flags)
330		goto end;
331
332	/* copy only the wrapping keys */
333	if (read_guest_real(vcpu, crycb_addr + 72,
334			    vsie_page->crycb.dea_wrapping_key_mask, 56))
335		return set_validity_icpt(scb_s, 0x0035U);
336
337	scb_s->ecb3 |= ecb3_flags;
338	scb_s->ecd |= ecd_flags;
339
340	/* xor both blocks in one run */
341	b1 = (unsigned long *) vsie_page->crycb.dea_wrapping_key_mask;
342	b2 = (unsigned long *)
343			    vcpu->kvm->arch.crypto.crycb->dea_wrapping_key_mask;
344	/* as 56%8 == 0, bitmap_xor won't overwrite any data */
345	bitmap_xor(b1, b1, b2, BITS_PER_BYTE * 56);
346end:
347	switch (ret) {
348	case -EINVAL:
349		return set_validity_icpt(scb_s, 0x0022U);
350	case -EFAULT:
351		return set_validity_icpt(scb_s, 0x0035U);
352	case -EACCES:
353		return set_validity_icpt(scb_s, 0x003CU);
354	}
355	scb_s->crycbd = ((__u32)(__u64) &vsie_page->crycb) | CRYCB_FORMAT2;
356	return 0;
357}
358
359/* shadow (round up/down) the ibc to avoid validity icpt */
360static void prepare_ibc(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
361{
362	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
363	struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
364	/* READ_ONCE does not work on bitfields - use a temporary variable */
365	const uint32_t __new_ibc = scb_o->ibc;
366	const uint32_t new_ibc = READ_ONCE(__new_ibc) & 0x0fffU;
367	__u64 min_ibc = (sclp.ibc >> 16) & 0x0fffU;
368
369	scb_s->ibc = 0;
370	/* ibc installed in g2 and requested for g3 */
371	if (vcpu->kvm->arch.model.ibc && new_ibc) {
372		scb_s->ibc = new_ibc;
373		/* takte care of the minimum ibc level of the machine */
374		if (scb_s->ibc < min_ibc)
375			scb_s->ibc = min_ibc;
376		/* take care of the maximum ibc level set for the guest */
377		if (scb_s->ibc > vcpu->kvm->arch.model.ibc)
378			scb_s->ibc = vcpu->kvm->arch.model.ibc;
379	}
380}
381
382/* unshadow the scb, copying parameters back to the real scb */
383static void unshadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
384{
385	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
386	struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
387
388	/* interception */
389	scb_o->icptcode = scb_s->icptcode;
390	scb_o->icptstatus = scb_s->icptstatus;
391	scb_o->ipa = scb_s->ipa;
392	scb_o->ipb = scb_s->ipb;
393	scb_o->gbea = scb_s->gbea;
394
395	/* timer */
396	scb_o->cputm = scb_s->cputm;
397	scb_o->ckc = scb_s->ckc;
398	scb_o->todpr = scb_s->todpr;
399
400	/* guest state */
401	scb_o->gpsw = scb_s->gpsw;
402	scb_o->gg14 = scb_s->gg14;
403	scb_o->gg15 = scb_s->gg15;
404	memcpy(scb_o->gcr, scb_s->gcr, 128);
405	scb_o->pp = scb_s->pp;
406
407	/* branch prediction */
408	if (test_kvm_facility(vcpu->kvm, 82)) {
409		scb_o->fpf &= ~FPF_BPBC;
410		scb_o->fpf |= scb_s->fpf & FPF_BPBC;
411	}
412
413	/* interrupt intercept */
414	switch (scb_s->icptcode) {
415	case ICPT_PROGI:
416	case ICPT_INSTPROGI:
417	case ICPT_EXTINT:
418		memcpy((void *)((u64)scb_o + 0xc0),
419		       (void *)((u64)scb_s + 0xc0), 0xf0 - 0xc0);
420		break;
421	}
422
423	if (scb_s->ihcpu != 0xffffU)
424		scb_o->ihcpu = scb_s->ihcpu;
425}
426
427/*
428 * Setup the shadow scb by copying and checking the relevant parts of the g2
429 * provided scb.
430 *
431 * Returns: - 0 if the scb has been shadowed
432 *          - > 0 if control has to be given to guest 2
433 */
434static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
435{
436	struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
437	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
438	/* READ_ONCE does not work on bitfields - use a temporary variable */
439	const uint32_t __new_prefix = scb_o->prefix;
440	const uint32_t new_prefix = READ_ONCE(__new_prefix);
441	const bool wants_tx = READ_ONCE(scb_o->ecb) & ECB_TE;
442	bool had_tx = scb_s->ecb & ECB_TE;
443	unsigned long new_mso = 0;
444	int rc;
445
446	/* make sure we don't have any leftovers when reusing the scb */
447	scb_s->icptcode = 0;
448	scb_s->eca = 0;
449	scb_s->ecb = 0;
450	scb_s->ecb2 = 0;
451	scb_s->ecb3 = 0;
452	scb_s->ecd = 0;
453	scb_s->fac = 0;
454	scb_s->fpf = 0;
455
456	rc = prepare_cpuflags(vcpu, vsie_page);
457	if (rc)
458		goto out;
459
460	/* timer */
461	scb_s->cputm = scb_o->cputm;
462	scb_s->ckc = scb_o->ckc;
463	scb_s->todpr = scb_o->todpr;
464	scb_s->epoch = scb_o->epoch;
465
466	/* guest state */
467	scb_s->gpsw = scb_o->gpsw;
468	scb_s->gg14 = scb_o->gg14;
469	scb_s->gg15 = scb_o->gg15;
470	memcpy(scb_s->gcr, scb_o->gcr, 128);
471	scb_s->pp = scb_o->pp;
472
473	/* interception / execution handling */
474	scb_s->gbea = scb_o->gbea;
475	scb_s->lctl = scb_o->lctl;
476	scb_s->svcc = scb_o->svcc;
477	scb_s->ictl = scb_o->ictl;
478	/*
479	 * SKEY handling functions can't deal with false setting of PTE invalid
480	 * bits. Therefore we cannot provide interpretation and would later
481	 * have to provide own emulation handlers.
482	 */
483	if (!(atomic_read(&scb_s->cpuflags) & CPUSTAT_KSS))
484		scb_s->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
485
486	scb_s->icpua = scb_o->icpua;
487
488	if (!(atomic_read(&scb_s->cpuflags) & CPUSTAT_SM))
489		new_mso = READ_ONCE(scb_o->mso) & 0xfffffffffff00000UL;
490	/* if the hva of the prefix changes, we have to remap the prefix */
491	if (scb_s->mso != new_mso || scb_s->prefix != new_prefix)
492		prefix_unmapped(vsie_page);
493	 /* SIE will do mso/msl validity and exception checks for us */
494	scb_s->msl = scb_o->msl & 0xfffffffffff00000UL;
495	scb_s->mso = new_mso;
496	scb_s->prefix = new_prefix;
497
498	/* We have to definetly flush the tlb if this scb never ran */
499	if (scb_s->ihcpu != 0xffffU)
500		scb_s->ihcpu = scb_o->ihcpu;
501
502	/* MVPG and Protection Exception Interpretation are always available */
503	scb_s->eca |= scb_o->eca & (ECA_MVPGI | ECA_PROTEXCI);
504	/* Host-protection-interruption introduced with ESOP */
505	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_ESOP))
506		scb_s->ecb |= scb_o->ecb & ECB_HOSTPROTINT;
507	/* transactional execution */
508	if (test_kvm_facility(vcpu->kvm, 73) && wants_tx) {
509		/* remap the prefix is tx is toggled on */
510		if (!had_tx)
511			prefix_unmapped(vsie_page);
512		scb_s->ecb |= ECB_TE;
513	}
514	/* branch prediction */
515	if (test_kvm_facility(vcpu->kvm, 82))
516		scb_s->fpf |= scb_o->fpf & FPF_BPBC;
517	/* SIMD */
518	if (test_kvm_facility(vcpu->kvm, 129)) {
519		scb_s->eca |= scb_o->eca & ECA_VX;
520		scb_s->ecd |= scb_o->ecd & ECD_HOSTREGMGMT;
521	}
522	/* Run-time-Instrumentation */
523	if (test_kvm_facility(vcpu->kvm, 64))
524		scb_s->ecb3 |= scb_o->ecb3 & ECB3_RI;
525	/* Instruction Execution Prevention */
526	if (test_kvm_facility(vcpu->kvm, 130))
527		scb_s->ecb2 |= scb_o->ecb2 & ECB2_IEP;
528	/* Guarded Storage */
529	if (test_kvm_facility(vcpu->kvm, 133)) {
530		scb_s->ecb |= scb_o->ecb & ECB_GS;
531		scb_s->ecd |= scb_o->ecd & ECD_HOSTREGMGMT;
532	}
533	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_SIIF))
534		scb_s->eca |= scb_o->eca & ECA_SII;
535	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_IB))
536		scb_s->eca |= scb_o->eca & ECA_IB;
537	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_CEI))
538		scb_s->eca |= scb_o->eca & ECA_CEI;
539	/* Epoch Extension */
540	if (test_kvm_facility(vcpu->kvm, 139)) {
541		scb_s->ecd |= scb_o->ecd & ECD_MEF;
542		scb_s->epdx = scb_o->epdx;
543	}
544
545	/* etoken */
546	if (test_kvm_facility(vcpu->kvm, 156))
547		scb_s->ecd |= scb_o->ecd & ECD_ETOKENF;
548
549	scb_s->hpid = HPID_VSIE;
550	scb_s->cpnc = scb_o->cpnc;
551
552	prepare_ibc(vcpu, vsie_page);
553	rc = shadow_crycb(vcpu, vsie_page);
554out:
555	if (rc)
556		unshadow_scb(vcpu, vsie_page);
557	return rc;
558}
559
560void kvm_s390_vsie_gmap_notifier(struct gmap *gmap, unsigned long start,
561				 unsigned long end)
562{
563	struct kvm *kvm = gmap->private;
564	struct vsie_page *cur;
565	unsigned long prefix;
566	struct page *page;
567	int i;
568
569	if (!gmap_is_shadow(gmap))
570		return;
571	if (start >= 1UL << 31)
572		/* We are only interested in prefix pages */
573		return;
574
575	/*
576	 * Only new shadow blocks are added to the list during runtime,
577	 * therefore we can safely reference them all the time.
578	 */
579	for (i = 0; i < kvm->arch.vsie.page_count; i++) {
580		page = READ_ONCE(kvm->arch.vsie.pages[i]);
581		if (!page)
582			continue;
583		cur = page_to_virt(page);
584		if (READ_ONCE(cur->gmap) != gmap)
585			continue;
586		prefix = cur->scb_s.prefix << GUEST_PREFIX_SHIFT;
587		/* with mso/msl, the prefix lies at an offset */
588		prefix += cur->scb_s.mso;
589		if (prefix <= end && start <= prefix + 2 * PAGE_SIZE - 1)
590			prefix_unmapped_sync(cur);
591	}
592}
593
594/*
595 * Map the first prefix page and if tx is enabled also the second prefix page.
596 *
597 * The prefix will be protected, a gmap notifier will inform about unmaps.
598 * The shadow scb must not be executed until the prefix is remapped, this is
599 * guaranteed by properly handling PROG_REQUEST.
600 *
601 * Returns: - 0 on if successfully mapped or already mapped
602 *          - > 0 if control has to be given to guest 2
603 *          - -EAGAIN if the caller can retry immediately
604 *          - -ENOMEM if out of memory
605 */
606static int map_prefix(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
607{
608	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
609	u64 prefix = scb_s->prefix << GUEST_PREFIX_SHIFT;
610	int rc;
611
612	if (prefix_is_mapped(vsie_page))
613		return 0;
614
615	/* mark it as mapped so we can catch any concurrent unmappers */
616	prefix_mapped(vsie_page);
617
618	/* with mso/msl, the prefix lies at offset *mso* */
619	prefix += scb_s->mso;
620
621	rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, prefix, NULL);
622	if (!rc && (scb_s->ecb & ECB_TE))
623		rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
624					   prefix + PAGE_SIZE, NULL);
625	/*
626	 * We don't have to mprotect, we will be called for all unshadows.
627	 * SIE will detect if protection applies and trigger a validity.
628	 */
629	if (rc)
630		prefix_unmapped(vsie_page);
631	if (rc > 0 || rc == -EFAULT)
632		rc = set_validity_icpt(scb_s, 0x0037U);
633	return rc;
634}
635
636/*
637 * Pin the guest page given by gpa and set hpa to the pinned host address.
638 * Will always be pinned writable.
639 *
640 * Returns: - 0 on success
641 *          - -EINVAL if the gpa is not valid guest storage
642 */
643static int pin_guest_page(struct kvm *kvm, gpa_t gpa, hpa_t *hpa)
644{
645	struct page *page;
646
647	page = gfn_to_page(kvm, gpa_to_gfn(gpa));
648	if (is_error_page(page))
649		return -EINVAL;
650	*hpa = (hpa_t) page_to_virt(page) + (gpa & ~PAGE_MASK);
651	return 0;
652}
653
654/* Unpins a page previously pinned via pin_guest_page, marking it as dirty. */
655static void unpin_guest_page(struct kvm *kvm, gpa_t gpa, hpa_t hpa)
656{
657	kvm_release_pfn_dirty(hpa >> PAGE_SHIFT);
658	/* mark the page always as dirty for migration */
659	mark_page_dirty(kvm, gpa_to_gfn(gpa));
660}
661
662/* unpin all blocks previously pinned by pin_blocks(), marking them dirty */
663static void unpin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
664{
665	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
666	hpa_t hpa;
667
668	hpa = (u64) scb_s->scaoh << 32 | scb_s->scaol;
669	if (hpa) {
670		unpin_guest_page(vcpu->kvm, vsie_page->sca_gpa, hpa);
671		vsie_page->sca_gpa = 0;
672		scb_s->scaol = 0;
673		scb_s->scaoh = 0;
674	}
675
676	hpa = scb_s->itdba;
677	if (hpa) {
678		unpin_guest_page(vcpu->kvm, vsie_page->itdba_gpa, hpa);
679		vsie_page->itdba_gpa = 0;
680		scb_s->itdba = 0;
681	}
682
683	hpa = scb_s->gvrd;
684	if (hpa) {
685		unpin_guest_page(vcpu->kvm, vsie_page->gvrd_gpa, hpa);
686		vsie_page->gvrd_gpa = 0;
687		scb_s->gvrd = 0;
688	}
689
690	hpa = scb_s->riccbd;
691	if (hpa) {
692		unpin_guest_page(vcpu->kvm, vsie_page->riccbd_gpa, hpa);
693		vsie_page->riccbd_gpa = 0;
694		scb_s->riccbd = 0;
695	}
696
697	hpa = scb_s->sdnxo;
698	if (hpa) {
699		unpin_guest_page(vcpu->kvm, vsie_page->sdnx_gpa, hpa);
700		vsie_page->sdnx_gpa = 0;
701		scb_s->sdnxo = 0;
702	}
703}
704
705/*
706 * Instead of shadowing some blocks, we can simply forward them because the
707 * addresses in the scb are 64 bit long.
708 *
709 * This works as long as the data lies in one page. If blocks ever exceed one
710 * page, we have to fall back to shadowing.
711 *
712 * As we reuse the sca, the vcpu pointers contained in it are invalid. We must
713 * therefore not enable any facilities that access these pointers (e.g. SIGPIF).
714 *
715 * Returns: - 0 if all blocks were pinned.
716 *          - > 0 if control has to be given to guest 2
717 *          - -ENOMEM if out of memory
718 */
719static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
720{
721	struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
722	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
723	hpa_t hpa;
724	gpa_t gpa;
725	int rc = 0;
726
727	gpa = READ_ONCE(scb_o->scaol) & ~0xfUL;
728	if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_64BSCAO))
729		gpa |= (u64) READ_ONCE(scb_o->scaoh) << 32;
730	if (gpa) {
731		if (gpa < 2 * PAGE_SIZE)
732			rc = set_validity_icpt(scb_s, 0x0038U);
733		else if ((gpa & ~0x1fffUL) == kvm_s390_get_prefix(vcpu))
734			rc = set_validity_icpt(scb_s, 0x0011U);
735		else if ((gpa & PAGE_MASK) !=
736			 ((gpa + sizeof(struct bsca_block) - 1) & PAGE_MASK))
737			rc = set_validity_icpt(scb_s, 0x003bU);
738		if (!rc) {
739			rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
740			if (rc)
741				rc = set_validity_icpt(scb_s, 0x0034U);
742		}
743		if (rc)
744			goto unpin;
745		vsie_page->sca_gpa = gpa;
746		scb_s->scaoh = (u32)((u64)hpa >> 32);
747		scb_s->scaol = (u32)(u64)hpa;
748	}
749
750	gpa = READ_ONCE(scb_o->itdba) & ~0xffUL;
751	if (gpa && (scb_s->ecb & ECB_TE)) {
752		if (gpa < 2 * PAGE_SIZE) {
753			rc = set_validity_icpt(scb_s, 0x0080U);
754			goto unpin;
755		}
756		/* 256 bytes cannot cross page boundaries */
757		rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
758		if (rc) {
759			rc = set_validity_icpt(scb_s, 0x0080U);
760			goto unpin;
761		}
762		vsie_page->itdba_gpa = gpa;
763		scb_s->itdba = hpa;
764	}
765
766	gpa = READ_ONCE(scb_o->gvrd) & ~0x1ffUL;
767	if (gpa && (scb_s->eca & ECA_VX) && !(scb_s->ecd & ECD_HOSTREGMGMT)) {
768		if (gpa < 2 * PAGE_SIZE) {
769			rc = set_validity_icpt(scb_s, 0x1310U);
770			goto unpin;
771		}
772		/*
773		 * 512 bytes vector registers cannot cross page boundaries
774		 * if this block gets bigger, we have to shadow it.
775		 */
776		rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
777		if (rc) {
778			rc = set_validity_icpt(scb_s, 0x1310U);
779			goto unpin;
780		}
781		vsie_page->gvrd_gpa = gpa;
782		scb_s->gvrd = hpa;
783	}
784
785	gpa = READ_ONCE(scb_o->riccbd) & ~0x3fUL;
786	if (gpa && (scb_s->ecb3 & ECB3_RI)) {
787		if (gpa < 2 * PAGE_SIZE) {
788			rc = set_validity_icpt(scb_s, 0x0043U);
789			goto unpin;
790		}
791		/* 64 bytes cannot cross page boundaries */
792		rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
793		if (rc) {
794			rc = set_validity_icpt(scb_s, 0x0043U);
795			goto unpin;
796		}
797		/* Validity 0x0044 will be checked by SIE */
798		vsie_page->riccbd_gpa = gpa;
799		scb_s->riccbd = hpa;
800	}
801	if (((scb_s->ecb & ECB_GS) && !(scb_s->ecd & ECD_HOSTREGMGMT)) ||
802	    (scb_s->ecd & ECD_ETOKENF)) {
803		unsigned long sdnxc;
804
805		gpa = READ_ONCE(scb_o->sdnxo) & ~0xfUL;
806		sdnxc = READ_ONCE(scb_o->sdnxo) & 0xfUL;
807		if (!gpa || gpa < 2 * PAGE_SIZE) {
808			rc = set_validity_icpt(scb_s, 0x10b0U);
809			goto unpin;
810		}
811		if (sdnxc < 6 || sdnxc > 12) {
812			rc = set_validity_icpt(scb_s, 0x10b1U);
813			goto unpin;
814		}
815		if (gpa & ((1 << sdnxc) - 1)) {
816			rc = set_validity_icpt(scb_s, 0x10b2U);
817			goto unpin;
818		}
819		/* Due to alignment rules (checked above) this cannot
820		 * cross page boundaries
821		 */
822		rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
823		if (rc) {
824			rc = set_validity_icpt(scb_s, 0x10b0U);
825			goto unpin;
826		}
827		vsie_page->sdnx_gpa = gpa;
828		scb_s->sdnxo = hpa | sdnxc;
829	}
830	return 0;
831unpin:
832	unpin_blocks(vcpu, vsie_page);
833	return rc;
834}
835
836/* unpin the scb provided by guest 2, marking it as dirty */
837static void unpin_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page,
838		      gpa_t gpa)
839{
840	hpa_t hpa = (hpa_t) vsie_page->scb_o;
841
842	if (hpa)
843		unpin_guest_page(vcpu->kvm, gpa, hpa);
844	vsie_page->scb_o = NULL;
845}
846
847/*
848 * Pin the scb at gpa provided by guest 2 at vsie_page->scb_o.
849 *
850 * Returns: - 0 if the scb was pinned.
851 *          - > 0 if control has to be given to guest 2
852 */
853static int pin_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page,
854		   gpa_t gpa)
855{
856	hpa_t hpa;
857	int rc;
858
859	rc = pin_guest_page(vcpu->kvm, gpa, &hpa);
860	if (rc) {
861		rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
862		WARN_ON_ONCE(rc);
863		return 1;
864	}
865	vsie_page->scb_o = (struct kvm_s390_sie_block *) hpa;
866	return 0;
867}
868
869/*
870 * Inject a fault into guest 2.
871 *
872 * Returns: - > 0 if control has to be given to guest 2
873 *            < 0 if an error occurred during injection.
874 */
875static int inject_fault(struct kvm_vcpu *vcpu, __u16 code, __u64 vaddr,
876			bool write_flag)
877{
878	struct kvm_s390_pgm_info pgm = {
879		.code = code,
880		.trans_exc_code =
881			/* 0-51: virtual address */
882			(vaddr & 0xfffffffffffff000UL) |
883			/* 52-53: store / fetch */
884			(((unsigned int) !write_flag) + 1) << 10,
885			/* 62-63: asce id (alway primary == 0) */
886		.exc_access_id = 0, /* always primary */
887		.op_access_id = 0, /* not MVPG */
888	};
889	int rc;
890
891	if (code == PGM_PROTECTION)
892		pgm.trans_exc_code |= 0x4UL;
893
894	rc = kvm_s390_inject_prog_irq(vcpu, &pgm);
895	return rc ? rc : 1;
896}
897
898/*
899 * Handle a fault during vsie execution on a gmap shadow.
900 *
901 * Returns: - 0 if the fault was resolved
902 *          - > 0 if control has to be given to guest 2
903 *          - < 0 if an error occurred
904 */
905static int handle_fault(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
906{
907	int rc;
908
909	if (current->thread.gmap_int_code == PGM_PROTECTION)
910		/* we can directly forward all protection exceptions */
911		return inject_fault(vcpu, PGM_PROTECTION,
912				    current->thread.gmap_addr, 1);
913
914	rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
915				   current->thread.gmap_addr, NULL);
916	if (rc > 0) {
917		rc = inject_fault(vcpu, rc,
918				  current->thread.gmap_addr,
919				  current->thread.gmap_write_flag);
920		if (rc >= 0)
921			vsie_page->fault_addr = current->thread.gmap_addr;
922	}
923	return rc;
924}
925
926/*
927 * Retry the previous fault that required guest 2 intervention. This avoids
928 * one superfluous SIE re-entry and direct exit.
929 *
930 * Will ignore any errors. The next SIE fault will do proper fault handling.
931 */
932static void handle_last_fault(struct kvm_vcpu *vcpu,
933			      struct vsie_page *vsie_page)
934{
935	if (vsie_page->fault_addr)
936		kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
937				      vsie_page->fault_addr, NULL);
938	vsie_page->fault_addr = 0;
939}
940
941static inline void clear_vsie_icpt(struct vsie_page *vsie_page)
942{
943	vsie_page->scb_s.icptcode = 0;
944}
945
946/* rewind the psw and clear the vsie icpt, so we can retry execution */
947static void retry_vsie_icpt(struct vsie_page *vsie_page)
948{
949	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
950	int ilen = insn_length(scb_s->ipa >> 8);
951
952	/* take care of EXECUTE instructions */
953	if (scb_s->icptstatus & 1) {
954		ilen = (scb_s->icptstatus >> 4) & 0x6;
955		if (!ilen)
956			ilen = 4;
957	}
958	scb_s->gpsw.addr = __rewind_psw(scb_s->gpsw, ilen);
959	clear_vsie_icpt(vsie_page);
960}
961
962/*
963 * Try to shadow + enable the guest 2 provided facility list.
964 * Retry instruction execution if enabled for and provided by guest 2.
965 *
966 * Returns: - 0 if handled (retry or guest 2 icpt)
967 *          - > 0 if control has to be given to guest 2
968 */
969static int handle_stfle(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
970{
971	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
972	__u32 fac = READ_ONCE(vsie_page->scb_o->fac) & 0x7ffffff8U;
973
974	if (fac && test_kvm_facility(vcpu->kvm, 7)) {
975		retry_vsie_icpt(vsie_page);
976		if (read_guest_real(vcpu, fac, &vsie_page->fac,
977				    sizeof(vsie_page->fac)))
978			return set_validity_icpt(scb_s, 0x1090U);
979		scb_s->fac = (__u32)(__u64) &vsie_page->fac;
980	}
981	return 0;
982}
983
984/*
985 * Get a register for a nested guest.
986 * @vcpu the vcpu of the guest
987 * @vsie_page the vsie_page for the nested guest
988 * @reg the register number, the upper 4 bits are ignored.
989 * returns: the value of the register.
990 */
991static u64 vsie_get_register(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page, u8 reg)
992{
993	/* no need to validate the parameter and/or perform error handling */
994	reg &= 0xf;
995	switch (reg) {
996	case 15:
997		return vsie_page->scb_s.gg15;
998	case 14:
999		return vsie_page->scb_s.gg14;
1000	default:
1001		return vcpu->run->s.regs.gprs[reg];
1002	}
1003}
1004
1005static int vsie_handle_mvpg(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
1006{
1007	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
1008	unsigned long pei_dest, pei_src, src, dest, mask, prefix;
1009	u64 *pei_block = &vsie_page->scb_o->mcic;
1010	int edat, rc_dest, rc_src;
1011	union ctlreg0 cr0;
1012
1013	cr0.val = vcpu->arch.sie_block->gcr[0];
1014	edat = cr0.edat && test_kvm_facility(vcpu->kvm, 8);
1015	mask = _kvm_s390_logical_to_effective(&scb_s->gpsw, PAGE_MASK);
1016	prefix = scb_s->prefix << GUEST_PREFIX_SHIFT;
1017
1018	dest = vsie_get_register(vcpu, vsie_page, scb_s->ipb >> 20) & mask;
1019	dest = _kvm_s390_real_to_abs(prefix, dest) + scb_s->mso;
1020	src = vsie_get_register(vcpu, vsie_page, scb_s->ipb >> 16) & mask;
1021	src = _kvm_s390_real_to_abs(prefix, src) + scb_s->mso;
1022
1023	rc_dest = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, dest, &pei_dest);
1024	rc_src = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, src, &pei_src);
1025	/*
1026	 * Either everything went well, or something non-critical went wrong
1027	 * e.g. because of a race. In either case, simply retry.
1028	 */
1029	if (rc_dest == -EAGAIN || rc_src == -EAGAIN || (!rc_dest && !rc_src)) {
1030		retry_vsie_icpt(vsie_page);
1031		return -EAGAIN;
1032	}
1033	/* Something more serious went wrong, propagate the error */
1034	if (rc_dest < 0)
1035		return rc_dest;
1036	if (rc_src < 0)
1037		return rc_src;
1038
1039	/* The only possible suppressing exception: just deliver it */
1040	if (rc_dest == PGM_TRANSLATION_SPEC || rc_src == PGM_TRANSLATION_SPEC) {
1041		clear_vsie_icpt(vsie_page);
1042		rc_dest = kvm_s390_inject_program_int(vcpu, PGM_TRANSLATION_SPEC);
1043		WARN_ON_ONCE(rc_dest);
1044		return 1;
1045	}
1046
1047	/*
1048	 * Forward the PEI intercept to the guest if it was a page fault, or
1049	 * also for segment and region table faults if EDAT applies.
1050	 */
1051	if (edat) {
1052		rc_dest = rc_dest == PGM_ASCE_TYPE ? rc_dest : 0;
1053		rc_src = rc_src == PGM_ASCE_TYPE ? rc_src : 0;
1054	} else {
1055		rc_dest = rc_dest != PGM_PAGE_TRANSLATION ? rc_dest : 0;
1056		rc_src = rc_src != PGM_PAGE_TRANSLATION ? rc_src : 0;
1057	}
1058	if (!rc_dest && !rc_src) {
1059		pei_block[0] = pei_dest;
1060		pei_block[1] = pei_src;
1061		return 1;
1062	}
1063
1064	retry_vsie_icpt(vsie_page);
1065
1066	/*
1067	 * The host has edat, and the guest does not, or it was an ASCE type
1068	 * exception. The host needs to inject the appropriate DAT interrupts
1069	 * into the guest.
1070	 */
1071	if (rc_dest)
1072		return inject_fault(vcpu, rc_dest, dest, 1);
1073	return inject_fault(vcpu, rc_src, src, 0);
1074}
1075
1076/*
1077 * Run the vsie on a shadow scb and a shadow gmap, without any further
1078 * sanity checks, handling SIE faults.
1079 *
1080 * Returns: - 0 everything went fine
1081 *          - > 0 if control has to be given to guest 2
1082 *          - < 0 if an error occurred
1083 */
1084static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
1085	__releases(vcpu->kvm->srcu)
1086	__acquires(vcpu->kvm->srcu)
1087{
1088	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
1089	struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
1090	int guest_bp_isolation;
1091	int rc = 0;
1092
1093	handle_last_fault(vcpu, vsie_page);
1094
1095	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
1096
1097	/* save current guest state of bp isolation override */
1098	guest_bp_isolation = test_thread_flag(TIF_ISOLATE_BP_GUEST);
1099
1100	/*
1101	 * The guest is running with BPBC, so we have to force it on for our
1102	 * nested guest. This is done by enabling BPBC globally, so the BPBC
1103	 * control in the SCB (which the nested guest can modify) is simply
1104	 * ignored.
1105	 */
1106	if (test_kvm_facility(vcpu->kvm, 82) &&
1107	    vcpu->arch.sie_block->fpf & FPF_BPBC)
1108		set_thread_flag(TIF_ISOLATE_BP_GUEST);
1109
1110	local_irq_disable();
1111	guest_enter_irqoff();
1112	local_irq_enable();
1113
1114	/*
1115	 * Simulate a SIE entry of the VCPU (see sie64a), so VCPU blocking
1116	 * and VCPU requests also hinder the vSIE from running and lead
1117	 * to an immediate exit. kvm_s390_vsie_kick() has to be used to
1118	 * also kick the vSIE.
1119	 */
1120	vcpu->arch.sie_block->prog0c |= PROG_IN_SIE;
1121	barrier();
1122	if (!kvm_s390_vcpu_sie_inhibited(vcpu))
1123		rc = sie64a(scb_s, vcpu->run->s.regs.gprs);
1124	barrier();
1125	vcpu->arch.sie_block->prog0c &= ~PROG_IN_SIE;
1126
1127	local_irq_disable();
1128	guest_exit_irqoff();
1129	local_irq_enable();
1130
1131	/* restore guest state for bp isolation override */
1132	if (!guest_bp_isolation)
1133		clear_thread_flag(TIF_ISOLATE_BP_GUEST);
1134
1135	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1136
1137	if (rc == -EINTR) {
1138		VCPU_EVENT(vcpu, 3, "%s", "machine check");
1139		kvm_s390_reinject_machine_check(vcpu, &vsie_page->mcck_info);
1140		return 0;
1141	}
1142
1143	if (rc > 0)
1144		rc = 0; /* we could still have an icpt */
1145	else if (rc == -EFAULT)
1146		return handle_fault(vcpu, vsie_page);
1147
1148	switch (scb_s->icptcode) {
1149	case ICPT_INST:
1150		if (scb_s->ipa == 0xb2b0)
1151			rc = handle_stfle(vcpu, vsie_page);
1152		break;
1153	case ICPT_STOP:
1154		/* stop not requested by g2 - must have been a kick */
1155		if (!(atomic_read(&scb_o->cpuflags) & CPUSTAT_STOP_INT))
1156			clear_vsie_icpt(vsie_page);
1157		break;
1158	case ICPT_VALIDITY:
1159		if ((scb_s->ipa & 0xf000) != 0xf000)
1160			scb_s->ipa += 0x1000;
1161		break;
1162	case ICPT_PARTEXEC:
1163		if (scb_s->ipa == 0xb254)
1164			rc = vsie_handle_mvpg(vcpu, vsie_page);
1165		break;
1166	}
1167	return rc;
1168}
1169
1170static void release_gmap_shadow(struct vsie_page *vsie_page)
1171{
1172	if (vsie_page->gmap)
1173		gmap_put(vsie_page->gmap);
1174	WRITE_ONCE(vsie_page->gmap, NULL);
1175	prefix_unmapped(vsie_page);
1176}
1177
1178static int acquire_gmap_shadow(struct kvm_vcpu *vcpu,
1179			       struct vsie_page *vsie_page)
1180{
1181	unsigned long asce;
1182	union ctlreg0 cr0;
1183	struct gmap *gmap;
1184	int edat;
1185
1186	asce = vcpu->arch.sie_block->gcr[1];
1187	cr0.val = vcpu->arch.sie_block->gcr[0];
1188	edat = cr0.edat && test_kvm_facility(vcpu->kvm, 8);
1189	edat += edat && test_kvm_facility(vcpu->kvm, 78);
1190
1191	/*
1192	 * ASCE or EDAT could have changed since last icpt, or the gmap
1193	 * we're holding has been unshadowed. If the gmap is still valid,
1194	 * we can safely reuse it.
1195	 */
1196	if (vsie_page->gmap && gmap_shadow_valid(vsie_page->gmap, asce, edat))
1197		return 0;
1198
1199	/* release the old shadow - if any, and mark the prefix as unmapped */
1200	release_gmap_shadow(vsie_page);
1201	gmap = gmap_shadow(vcpu->arch.gmap, asce, edat);
1202	if (IS_ERR(gmap))
1203		return PTR_ERR(gmap);
1204	gmap->private = vcpu->kvm;
1205	WRITE_ONCE(vsie_page->gmap, gmap);
1206	return 0;
1207}
1208
1209/*
1210 * Register the shadow scb at the VCPU, e.g. for kicking out of vsie.
1211 */
1212static void register_shadow_scb(struct kvm_vcpu *vcpu,
1213				struct vsie_page *vsie_page)
1214{
1215	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
1216
1217	WRITE_ONCE(vcpu->arch.vsie_block, &vsie_page->scb_s);
1218	/*
1219	 * External calls have to lead to a kick of the vcpu and
1220	 * therefore the vsie -> Simulate Wait state.
1221	 */
1222	kvm_s390_set_cpuflags(vcpu, CPUSTAT_WAIT);
1223	/*
1224	 * We have to adjust the g3 epoch by the g2 epoch. The epoch will
1225	 * automatically be adjusted on tod clock changes via kvm_sync_clock.
1226	 */
1227	preempt_disable();
1228	scb_s->epoch += vcpu->kvm->arch.epoch;
1229
1230	if (scb_s->ecd & ECD_MEF) {
1231		scb_s->epdx += vcpu->kvm->arch.epdx;
1232		if (scb_s->epoch < vcpu->kvm->arch.epoch)
1233			scb_s->epdx += 1;
1234	}
1235
1236	preempt_enable();
1237}
1238
1239/*
1240 * Unregister a shadow scb from a VCPU.
1241 */
1242static void unregister_shadow_scb(struct kvm_vcpu *vcpu)
1243{
1244	kvm_s390_clear_cpuflags(vcpu, CPUSTAT_WAIT);
1245	WRITE_ONCE(vcpu->arch.vsie_block, NULL);
1246}
1247
1248/*
1249 * Run the vsie on a shadowed scb, managing the gmap shadow, handling
1250 * prefix pages and faults.
1251 *
1252 * Returns: - 0 if no errors occurred
1253 *          - > 0 if control has to be given to guest 2
1254 *          - -ENOMEM if out of memory
1255 */
1256static int vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
1257{
1258	struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
1259	int rc = 0;
1260
1261	while (1) {
1262		rc = acquire_gmap_shadow(vcpu, vsie_page);
1263		if (!rc)
1264			rc = map_prefix(vcpu, vsie_page);
1265		if (!rc) {
1266			gmap_enable(vsie_page->gmap);
1267			update_intervention_requests(vsie_page);
1268			rc = do_vsie_run(vcpu, vsie_page);
1269			gmap_enable(vcpu->arch.gmap);
1270		}
1271		atomic_andnot(PROG_BLOCK_SIE, &scb_s->prog20);
1272
1273		if (rc == -EAGAIN)
1274			rc = 0;
1275		if (rc || scb_s->icptcode || signal_pending(current) ||
1276		    kvm_s390_vcpu_has_irq(vcpu, 0) ||
1277		    kvm_s390_vcpu_sie_inhibited(vcpu))
1278			break;
1279		cond_resched();
1280	}
1281
1282	if (rc == -EFAULT) {
1283		/*
1284		 * Addressing exceptions are always presentes as intercepts.
1285		 * As addressing exceptions are suppressing and our guest 3 PSW
1286		 * points at the responsible instruction, we have to
1287		 * forward the PSW and set the ilc. If we can't read guest 3
1288		 * instruction, we can use an arbitrary ilc. Let's always use
1289		 * ilen = 4 for now, so we can avoid reading in guest 3 virtual
1290		 * memory. (we could also fake the shadow so the hardware
1291		 * handles it).
1292		 */
1293		scb_s->icptcode = ICPT_PROGI;
1294		scb_s->iprcc = PGM_ADDRESSING;
1295		scb_s->pgmilc = 4;
1296		scb_s->gpsw.addr = __rewind_psw(scb_s->gpsw, 4);
1297		rc = 1;
1298	}
1299	return rc;
1300}
1301
1302/*
1303 * Get or create a vsie page for a scb address.
1304 *
1305 * Returns: - address of a vsie page (cached or new one)
1306 *          - NULL if the same scb address is already used by another VCPU
1307 *          - ERR_PTR(-ENOMEM) if out of memory
1308 */
1309static struct vsie_page *get_vsie_page(struct kvm *kvm, unsigned long addr)
1310{
1311	struct vsie_page *vsie_page;
1312	struct page *page;
1313	int nr_vcpus;
1314
1315	rcu_read_lock();
1316	page = radix_tree_lookup(&kvm->arch.vsie.addr_to_page, addr >> 9);
1317	rcu_read_unlock();
1318	if (page) {
1319		if (page_ref_inc_return(page) == 2)
1320			return page_to_virt(page);
1321		page_ref_dec(page);
1322	}
1323
1324	/*
1325	 * We want at least #online_vcpus shadows, so every VCPU can execute
1326	 * the VSIE in parallel.
1327	 */
1328	nr_vcpus = atomic_read(&kvm->online_vcpus);
1329
1330	mutex_lock(&kvm->arch.vsie.mutex);
1331	if (kvm->arch.vsie.page_count < nr_vcpus) {
1332		page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA);
1333		if (!page) {
1334			mutex_unlock(&kvm->arch.vsie.mutex);
1335			return ERR_PTR(-ENOMEM);
1336		}
1337		page_ref_inc(page);
1338		kvm->arch.vsie.pages[kvm->arch.vsie.page_count] = page;
1339		kvm->arch.vsie.page_count++;
1340	} else {
1341		/* reuse an existing entry that belongs to nobody */
1342		while (true) {
1343			page = kvm->arch.vsie.pages[kvm->arch.vsie.next];
1344			if (page_ref_inc_return(page) == 2)
1345				break;
1346			page_ref_dec(page);
1347			kvm->arch.vsie.next++;
1348			kvm->arch.vsie.next %= nr_vcpus;
1349		}
1350		radix_tree_delete(&kvm->arch.vsie.addr_to_page, page->index >> 9);
1351	}
1352	page->index = addr;
1353	/* double use of the same address */
1354	if (radix_tree_insert(&kvm->arch.vsie.addr_to_page, addr >> 9, page)) {
1355		page_ref_dec(page);
1356		mutex_unlock(&kvm->arch.vsie.mutex);
1357		return NULL;
1358	}
1359	mutex_unlock(&kvm->arch.vsie.mutex);
1360
1361	vsie_page = page_to_virt(page);
1362	memset(&vsie_page->scb_s, 0, sizeof(struct kvm_s390_sie_block));
1363	release_gmap_shadow(vsie_page);
1364	vsie_page->fault_addr = 0;
1365	vsie_page->scb_s.ihcpu = 0xffffU;
1366	return vsie_page;
1367}
1368
1369/* put a vsie page acquired via get_vsie_page */
1370static void put_vsie_page(struct kvm *kvm, struct vsie_page *vsie_page)
1371{
1372	struct page *page = pfn_to_page(__pa(vsie_page) >> PAGE_SHIFT);
1373
1374	page_ref_dec(page);
1375}
1376
1377int kvm_s390_handle_vsie(struct kvm_vcpu *vcpu)
1378{
1379	struct vsie_page *vsie_page;
1380	unsigned long scb_addr;
1381	int rc;
1382
1383	vcpu->stat.instruction_sie++;
1384	if (!test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_SIEF2))
1385		return -EOPNOTSUPP;
1386	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1387		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1388
1389	BUILD_BUG_ON(sizeof(struct vsie_page) != PAGE_SIZE);
1390	scb_addr = kvm_s390_get_base_disp_s(vcpu, NULL);
1391
1392	/* 512 byte alignment */
1393	if (unlikely(scb_addr & 0x1ffUL))
1394		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1395
1396	if (signal_pending(current) || kvm_s390_vcpu_has_irq(vcpu, 0) ||
1397	    kvm_s390_vcpu_sie_inhibited(vcpu))
1398		return 0;
1399
1400	vsie_page = get_vsie_page(vcpu->kvm, scb_addr);
1401	if (IS_ERR(vsie_page))
1402		return PTR_ERR(vsie_page);
1403	else if (!vsie_page)
1404		/* double use of sie control block - simply do nothing */
1405		return 0;
1406
1407	rc = pin_scb(vcpu, vsie_page, scb_addr);
1408	if (rc)
1409		goto out_put;
1410	rc = shadow_scb(vcpu, vsie_page);
1411	if (rc)
1412		goto out_unpin_scb;
1413	rc = pin_blocks(vcpu, vsie_page);
1414	if (rc)
1415		goto out_unshadow;
1416	register_shadow_scb(vcpu, vsie_page);
1417	rc = vsie_run(vcpu, vsie_page);
1418	unregister_shadow_scb(vcpu);
1419	unpin_blocks(vcpu, vsie_page);
1420out_unshadow:
1421	unshadow_scb(vcpu, vsie_page);
1422out_unpin_scb:
1423	unpin_scb(vcpu, vsie_page, scb_addr);
1424out_put:
1425	put_vsie_page(vcpu->kvm, vsie_page);
1426
1427	return rc < 0 ? rc : 0;
1428}
1429
1430/* Init the vsie data structures. To be called when a vm is initialized. */
1431void kvm_s390_vsie_init(struct kvm *kvm)
1432{
1433	mutex_init(&kvm->arch.vsie.mutex);
1434	INIT_RADIX_TREE(&kvm->arch.vsie.addr_to_page, GFP_KERNEL);
1435}
1436
1437/* Destroy the vsie data structures. To be called when a vm is destroyed. */
1438void kvm_s390_vsie_destroy(struct kvm *kvm)
1439{
1440	struct vsie_page *vsie_page;
1441	struct page *page;
1442	int i;
1443
1444	mutex_lock(&kvm->arch.vsie.mutex);
1445	for (i = 0; i < kvm->arch.vsie.page_count; i++) {
1446		page = kvm->arch.vsie.pages[i];
1447		kvm->arch.vsie.pages[i] = NULL;
1448		vsie_page = page_to_virt(page);
1449		release_gmap_shadow(vsie_page);
1450		/* free the radix tree entry */
1451		radix_tree_delete(&kvm->arch.vsie.addr_to_page, page->index >> 9);
1452		__free_page(page);
1453	}
1454	kvm->arch.vsie.page_count = 0;
1455	mutex_unlock(&kvm->arch.vsie.mutex);
1456}
1457
1458void kvm_s390_vsie_kick(struct kvm_vcpu *vcpu)
1459{
1460	struct kvm_s390_sie_block *scb = READ_ONCE(vcpu->arch.vsie_block);
1461
1462	/*
1463	 * Even if the VCPU lets go of the shadow sie block reference, it is
1464	 * still valid in the cache. So we can safely kick it.
1465	 */
1466	if (scb) {
1467		atomic_or(PROG_BLOCK_SIE, &scb->prog20);
1468		if (scb->prog0c & PROG_IN_SIE)
1469			atomic_or(CPUSTAT_STOP_INT, &scb->cpuflags);
1470	}
1471}
1472