1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 *
4 * Copyright SUSE Linux Products GmbH 2009
5 *
6 * Authors: Alexander Graf <agraf@suse.de>
7 */
8
9#include <asm/kvm_ppc.h>
10#include <asm/disassemble.h>
11#include <asm/kvm_book3s.h>
12#include <asm/reg.h>
13#include <asm/switch_to.h>
14#include <asm/time.h>
15#include <asm/tm.h>
16#include "book3s.h"
17#include <asm/asm-prototypes.h>
18
19#define OP_19_XOP_RFID		18
20#define OP_19_XOP_RFI		50
21
22#define OP_31_XOP_MFMSR		83
23#define OP_31_XOP_MTMSR		146
24#define OP_31_XOP_MTMSRD	178
25#define OP_31_XOP_MTSR		210
26#define OP_31_XOP_MTSRIN	242
27#define OP_31_XOP_TLBIEL	274
28/* Opcode is officially reserved, reuse it as sc 1 when sc 1 doesn't trap */
29#define OP_31_XOP_FAKE_SC1	308
30#define OP_31_XOP_SLBMTE	402
31#define OP_31_XOP_SLBIE		434
32#define OP_31_XOP_SLBIA		498
33#define OP_31_XOP_MFSR		595
34#define OP_31_XOP_MFSRIN	659
35#define OP_31_XOP_DCBA		758
36#define OP_31_XOP_SLBMFEV	851
37#define OP_31_XOP_EIOIO		854
38#define OP_31_XOP_SLBMFEE	915
39#define OP_31_XOP_SLBFEE	979
40
41#define OP_31_XOP_TBEGIN	654
42#define OP_31_XOP_TABORT	910
43
44#define OP_31_XOP_TRECLAIM	942
45#define OP_31_XOP_TRCHKPT	1006
46
47/* DCBZ is actually 1014, but we patch it to 1010 so we get a trap */
48#define OP_31_XOP_DCBZ		1010
49
50#define OP_LFS			48
51#define OP_LFD			50
52#define OP_STFS			52
53#define OP_STFD			54
54
55#define SPRN_GQR0		912
56#define SPRN_GQR1		913
57#define SPRN_GQR2		914
58#define SPRN_GQR3		915
59#define SPRN_GQR4		916
60#define SPRN_GQR5		917
61#define SPRN_GQR6		918
62#define SPRN_GQR7		919
63
64/* Book3S_32 defines mfsrin(v) - but that messes up our abstract
65 * function pointers, so let's just disable the define. */
66#undef mfsrin
67
68enum priv_level {
69	PRIV_PROBLEM = 0,
70	PRIV_SUPER = 1,
71	PRIV_HYPER = 2,
72};
73
74static bool spr_allowed(struct kvm_vcpu *vcpu, enum priv_level level)
75{
76	/* PAPR VMs only access supervisor SPRs */
77	if (vcpu->arch.papr_enabled && (level > PRIV_SUPER))
78		return false;
79
80	/* Limit user space to its own small SPR set */
81	if ((kvmppc_get_msr(vcpu) & MSR_PR) && level > PRIV_PROBLEM)
82		return false;
83
84	return true;
85}
86
87#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
88static inline void kvmppc_copyto_vcpu_tm(struct kvm_vcpu *vcpu)
89{
90	memcpy(&vcpu->arch.gpr_tm[0], &vcpu->arch.regs.gpr[0],
91			sizeof(vcpu->arch.gpr_tm));
92	memcpy(&vcpu->arch.fp_tm, &vcpu->arch.fp,
93			sizeof(struct thread_fp_state));
94	memcpy(&vcpu->arch.vr_tm, &vcpu->arch.vr,
95			sizeof(struct thread_vr_state));
96	vcpu->arch.ppr_tm = vcpu->arch.ppr;
97	vcpu->arch.dscr_tm = vcpu->arch.dscr;
98	vcpu->arch.amr_tm = vcpu->arch.amr;
99	vcpu->arch.ctr_tm = vcpu->arch.regs.ctr;
100	vcpu->arch.tar_tm = vcpu->arch.tar;
101	vcpu->arch.lr_tm = vcpu->arch.regs.link;
102	vcpu->arch.cr_tm = vcpu->arch.regs.ccr;
103	vcpu->arch.xer_tm = vcpu->arch.regs.xer;
104	vcpu->arch.vrsave_tm = vcpu->arch.vrsave;
105}
106
107static inline void kvmppc_copyfrom_vcpu_tm(struct kvm_vcpu *vcpu)
108{
109	memcpy(&vcpu->arch.regs.gpr[0], &vcpu->arch.gpr_tm[0],
110			sizeof(vcpu->arch.regs.gpr));
111	memcpy(&vcpu->arch.fp, &vcpu->arch.fp_tm,
112			sizeof(struct thread_fp_state));
113	memcpy(&vcpu->arch.vr, &vcpu->arch.vr_tm,
114			sizeof(struct thread_vr_state));
115	vcpu->arch.ppr = vcpu->arch.ppr_tm;
116	vcpu->arch.dscr = vcpu->arch.dscr_tm;
117	vcpu->arch.amr = vcpu->arch.amr_tm;
118	vcpu->arch.regs.ctr = vcpu->arch.ctr_tm;
119	vcpu->arch.tar = vcpu->arch.tar_tm;
120	vcpu->arch.regs.link = vcpu->arch.lr_tm;
121	vcpu->arch.regs.ccr = vcpu->arch.cr_tm;
122	vcpu->arch.regs.xer = vcpu->arch.xer_tm;
123	vcpu->arch.vrsave = vcpu->arch.vrsave_tm;
124}
125
126static void kvmppc_emulate_treclaim(struct kvm_vcpu *vcpu, int ra_val)
127{
128	unsigned long guest_msr = kvmppc_get_msr(vcpu);
129	int fc_val = ra_val ? ra_val : 1;
130	uint64_t texasr;
131
132	/* CR0 = 0 | MSR[TS] | 0 */
133	vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & ~(CR0_MASK << CR0_SHIFT)) |
134		(((guest_msr & MSR_TS_MASK) >> (MSR_TS_S_LG - 1))
135		 << CR0_SHIFT);
136
137	preempt_disable();
138	tm_enable();
139	texasr = mfspr(SPRN_TEXASR);
140	kvmppc_save_tm_pr(vcpu);
141	kvmppc_copyfrom_vcpu_tm(vcpu);
142
143	/* failure recording depends on Failure Summary bit */
144	if (!(texasr & TEXASR_FS)) {
145		texasr &= ~TEXASR_FC;
146		texasr |= ((u64)fc_val << TEXASR_FC_LG) | TEXASR_FS;
147
148		texasr &= ~(TEXASR_PR | TEXASR_HV);
149		if (kvmppc_get_msr(vcpu) & MSR_PR)
150			texasr |= TEXASR_PR;
151
152		if (kvmppc_get_msr(vcpu) & MSR_HV)
153			texasr |= TEXASR_HV;
154
155		vcpu->arch.texasr = texasr;
156		vcpu->arch.tfiar = kvmppc_get_pc(vcpu);
157		mtspr(SPRN_TEXASR, texasr);
158		mtspr(SPRN_TFIAR, vcpu->arch.tfiar);
159	}
160	tm_disable();
161	/*
162	 * treclaim need quit to non-transactional state.
163	 */
164	guest_msr &= ~(MSR_TS_MASK);
165	kvmppc_set_msr(vcpu, guest_msr);
166	preempt_enable();
167
168	if (vcpu->arch.shadow_fscr & FSCR_TAR)
169		mtspr(SPRN_TAR, vcpu->arch.tar);
170}
171
172static void kvmppc_emulate_trchkpt(struct kvm_vcpu *vcpu)
173{
174	unsigned long guest_msr = kvmppc_get_msr(vcpu);
175
176	preempt_disable();
177	/*
178	 * need flush FP/VEC/VSX to vcpu save area before
179	 * copy.
180	 */
181	kvmppc_giveup_ext(vcpu, MSR_VSX);
182	kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
183	kvmppc_copyto_vcpu_tm(vcpu);
184	kvmppc_save_tm_sprs(vcpu);
185
186	/*
187	 * as a result of trecheckpoint. set TS to suspended.
188	 */
189	guest_msr &= ~(MSR_TS_MASK);
190	guest_msr |= MSR_TS_S;
191	kvmppc_set_msr(vcpu, guest_msr);
192	kvmppc_restore_tm_pr(vcpu);
193	preempt_enable();
194}
195
196/* emulate tabort. at guest privilege state */
197void kvmppc_emulate_tabort(struct kvm_vcpu *vcpu, int ra_val)
198{
199	/* currently we only emulate tabort. but no emulation of other
200	 * tabort variants since there is no kernel usage of them at
201	 * present.
202	 */
203	unsigned long guest_msr = kvmppc_get_msr(vcpu);
204	uint64_t org_texasr;
205
206	preempt_disable();
207	tm_enable();
208	org_texasr = mfspr(SPRN_TEXASR);
209	tm_abort(ra_val);
210
211	/* CR0 = 0 | MSR[TS] | 0 */
212	vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & ~(CR0_MASK << CR0_SHIFT)) |
213		(((guest_msr & MSR_TS_MASK) >> (MSR_TS_S_LG - 1))
214		 << CR0_SHIFT);
215
216	vcpu->arch.texasr = mfspr(SPRN_TEXASR);
217	/* failure recording depends on Failure Summary bit,
218	 * and tabort will be treated as nops in non-transactional
219	 * state.
220	 */
221	if (!(org_texasr & TEXASR_FS) &&
222			MSR_TM_ACTIVE(guest_msr)) {
223		vcpu->arch.texasr &= ~(TEXASR_PR | TEXASR_HV);
224		if (guest_msr & MSR_PR)
225			vcpu->arch.texasr |= TEXASR_PR;
226
227		if (guest_msr & MSR_HV)
228			vcpu->arch.texasr |= TEXASR_HV;
229
230		vcpu->arch.tfiar = kvmppc_get_pc(vcpu);
231	}
232	tm_disable();
233	preempt_enable();
234}
235
236#endif
237
238int kvmppc_core_emulate_op_pr(struct kvm_vcpu *vcpu,
239			      unsigned int inst, int *advance)
240{
241	int emulated = EMULATE_DONE;
242	int rt = get_rt(inst);
243	int rs = get_rs(inst);
244	int ra = get_ra(inst);
245	int rb = get_rb(inst);
246	u32 inst_sc = 0x44000002;
247
248	switch (get_op(inst)) {
249	case 0:
250		emulated = EMULATE_FAIL;
251		if ((kvmppc_get_msr(vcpu) & MSR_LE) &&
252		    (inst == swab32(inst_sc))) {
253			/*
254			 * This is the byte reversed syscall instruction of our
255			 * hypercall handler. Early versions of LE Linux didn't
256			 * swap the instructions correctly and ended up in
257			 * illegal instructions.
258			 * Just always fail hypercalls on these broken systems.
259			 */
260			kvmppc_set_gpr(vcpu, 3, EV_UNIMPLEMENTED);
261			kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
262			emulated = EMULATE_DONE;
263		}
264		break;
265	case 19:
266		switch (get_xop(inst)) {
267		case OP_19_XOP_RFID:
268		case OP_19_XOP_RFI: {
269			unsigned long srr1 = kvmppc_get_srr1(vcpu);
270#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
271			unsigned long cur_msr = kvmppc_get_msr(vcpu);
272
273			/*
274			 * add rules to fit in ISA specification regarding TM
275			 * state transistion in TM disable/Suspended state,
276			 * and target TM state is TM inactive(00) state. (the
277			 * change should be suppressed).
278			 */
279			if (((cur_msr & MSR_TM) == 0) &&
280				((srr1 & MSR_TM) == 0) &&
281				MSR_TM_SUSPENDED(cur_msr) &&
282				!MSR_TM_ACTIVE(srr1))
283				srr1 |= MSR_TS_S;
284#endif
285			kvmppc_set_pc(vcpu, kvmppc_get_srr0(vcpu));
286			kvmppc_set_msr(vcpu, srr1);
287			*advance = 0;
288			break;
289		}
290
291		default:
292			emulated = EMULATE_FAIL;
293			break;
294		}
295		break;
296	case 31:
297		switch (get_xop(inst)) {
298		case OP_31_XOP_MFMSR:
299			kvmppc_set_gpr(vcpu, rt, kvmppc_get_msr(vcpu));
300			break;
301		case OP_31_XOP_MTMSRD:
302		{
303			ulong rs_val = kvmppc_get_gpr(vcpu, rs);
304			if (inst & 0x10000) {
305				ulong new_msr = kvmppc_get_msr(vcpu);
306				new_msr &= ~(MSR_RI | MSR_EE);
307				new_msr |= rs_val & (MSR_RI | MSR_EE);
308				kvmppc_set_msr_fast(vcpu, new_msr);
309			} else
310				kvmppc_set_msr(vcpu, rs_val);
311			break;
312		}
313		case OP_31_XOP_MTMSR:
314			kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, rs));
315			break;
316		case OP_31_XOP_MFSR:
317		{
318			int srnum;
319
320			srnum = kvmppc_get_field(inst, 12 + 32, 15 + 32);
321			if (vcpu->arch.mmu.mfsrin) {
322				u32 sr;
323				sr = vcpu->arch.mmu.mfsrin(vcpu, srnum);
324				kvmppc_set_gpr(vcpu, rt, sr);
325			}
326			break;
327		}
328		case OP_31_XOP_MFSRIN:
329		{
330			int srnum;
331
332			srnum = (kvmppc_get_gpr(vcpu, rb) >> 28) & 0xf;
333			if (vcpu->arch.mmu.mfsrin) {
334				u32 sr;
335				sr = vcpu->arch.mmu.mfsrin(vcpu, srnum);
336				kvmppc_set_gpr(vcpu, rt, sr);
337			}
338			break;
339		}
340		case OP_31_XOP_MTSR:
341			vcpu->arch.mmu.mtsrin(vcpu,
342				(inst >> 16) & 0xf,
343				kvmppc_get_gpr(vcpu, rs));
344			break;
345		case OP_31_XOP_MTSRIN:
346			vcpu->arch.mmu.mtsrin(vcpu,
347				(kvmppc_get_gpr(vcpu, rb) >> 28) & 0xf,
348				kvmppc_get_gpr(vcpu, rs));
349			break;
350		case OP_31_XOP_TLBIE:
351		case OP_31_XOP_TLBIEL:
352		{
353			bool large = (inst & 0x00200000) ? true : false;
354			ulong addr = kvmppc_get_gpr(vcpu, rb);
355			vcpu->arch.mmu.tlbie(vcpu, addr, large);
356			break;
357		}
358#ifdef CONFIG_PPC_BOOK3S_64
359		case OP_31_XOP_FAKE_SC1:
360		{
361			/* SC 1 papr hypercalls */
362			ulong cmd = kvmppc_get_gpr(vcpu, 3);
363			int i;
364
365		        if ((kvmppc_get_msr(vcpu) & MSR_PR) ||
366			    !vcpu->arch.papr_enabled) {
367				emulated = EMULATE_FAIL;
368				break;
369			}
370
371			if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE)
372				break;
373
374			vcpu->run->papr_hcall.nr = cmd;
375			for (i = 0; i < 9; ++i) {
376				ulong gpr = kvmppc_get_gpr(vcpu, 4 + i);
377				vcpu->run->papr_hcall.args[i] = gpr;
378			}
379
380			vcpu->run->exit_reason = KVM_EXIT_PAPR_HCALL;
381			vcpu->arch.hcall_needed = 1;
382			emulated = EMULATE_EXIT_USER;
383			break;
384		}
385#endif
386		case OP_31_XOP_EIOIO:
387			break;
388		case OP_31_XOP_SLBMTE:
389			if (!vcpu->arch.mmu.slbmte)
390				return EMULATE_FAIL;
391
392			vcpu->arch.mmu.slbmte(vcpu,
393					kvmppc_get_gpr(vcpu, rs),
394					kvmppc_get_gpr(vcpu, rb));
395			break;
396		case OP_31_XOP_SLBIE:
397			if (!vcpu->arch.mmu.slbie)
398				return EMULATE_FAIL;
399
400			vcpu->arch.mmu.slbie(vcpu,
401					kvmppc_get_gpr(vcpu, rb));
402			break;
403		case OP_31_XOP_SLBIA:
404			if (!vcpu->arch.mmu.slbia)
405				return EMULATE_FAIL;
406
407			vcpu->arch.mmu.slbia(vcpu);
408			break;
409		case OP_31_XOP_SLBFEE:
410			if (!(inst & 1) || !vcpu->arch.mmu.slbfee) {
411				return EMULATE_FAIL;
412			} else {
413				ulong b, t;
414				ulong cr = kvmppc_get_cr(vcpu) & ~CR0_MASK;
415
416				b = kvmppc_get_gpr(vcpu, rb);
417				if (!vcpu->arch.mmu.slbfee(vcpu, b, &t))
418					cr |= 2 << CR0_SHIFT;
419				kvmppc_set_gpr(vcpu, rt, t);
420				/* copy XER[SO] bit to CR0[SO] */
421				cr |= (vcpu->arch.regs.xer & 0x80000000) >>
422					(31 - CR0_SHIFT);
423				kvmppc_set_cr(vcpu, cr);
424			}
425			break;
426		case OP_31_XOP_SLBMFEE:
427			if (!vcpu->arch.mmu.slbmfee) {
428				emulated = EMULATE_FAIL;
429			} else {
430				ulong t, rb_val;
431
432				rb_val = kvmppc_get_gpr(vcpu, rb);
433				t = vcpu->arch.mmu.slbmfee(vcpu, rb_val);
434				kvmppc_set_gpr(vcpu, rt, t);
435			}
436			break;
437		case OP_31_XOP_SLBMFEV:
438			if (!vcpu->arch.mmu.slbmfev) {
439				emulated = EMULATE_FAIL;
440			} else {
441				ulong t, rb_val;
442
443				rb_val = kvmppc_get_gpr(vcpu, rb);
444				t = vcpu->arch.mmu.slbmfev(vcpu, rb_val);
445				kvmppc_set_gpr(vcpu, rt, t);
446			}
447			break;
448		case OP_31_XOP_DCBA:
449			/* Gets treated as NOP */
450			break;
451		case OP_31_XOP_DCBZ:
452		{
453			ulong rb_val = kvmppc_get_gpr(vcpu, rb);
454			ulong ra_val = 0;
455			ulong addr, vaddr;
456			u32 zeros[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
457			u32 dsisr;
458			int r;
459
460			if (ra)
461				ra_val = kvmppc_get_gpr(vcpu, ra);
462
463			addr = (ra_val + rb_val) & ~31ULL;
464			if (!(kvmppc_get_msr(vcpu) & MSR_SF))
465				addr &= 0xffffffff;
466			vaddr = addr;
467
468			r = kvmppc_st(vcpu, &addr, 32, zeros, true);
469			if ((r == -ENOENT) || (r == -EPERM)) {
470				*advance = 0;
471				kvmppc_set_dar(vcpu, vaddr);
472				vcpu->arch.fault_dar = vaddr;
473
474				dsisr = DSISR_ISSTORE;
475				if (r == -ENOENT)
476					dsisr |= DSISR_NOHPTE;
477				else if (r == -EPERM)
478					dsisr |= DSISR_PROTFAULT;
479
480				kvmppc_set_dsisr(vcpu, dsisr);
481				vcpu->arch.fault_dsisr = dsisr;
482
483				kvmppc_book3s_queue_irqprio(vcpu,
484					BOOK3S_INTERRUPT_DATA_STORAGE);
485			}
486
487			break;
488		}
489#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
490		case OP_31_XOP_TBEGIN:
491		{
492			if (!cpu_has_feature(CPU_FTR_TM))
493				break;
494
495			if (!(kvmppc_get_msr(vcpu) & MSR_TM)) {
496				kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG);
497				emulated = EMULATE_AGAIN;
498				break;
499			}
500
501			if (!(kvmppc_get_msr(vcpu) & MSR_PR)) {
502				preempt_disable();
503				vcpu->arch.regs.ccr = (CR0_TBEGIN_FAILURE |
504				  (vcpu->arch.regs.ccr & ~(CR0_MASK << CR0_SHIFT)));
505
506				vcpu->arch.texasr = (TEXASR_FS | TEXASR_EXACT |
507					(((u64)(TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT))
508						 << TEXASR_FC_LG));
509
510				if ((inst >> 21) & 0x1)
511					vcpu->arch.texasr |= TEXASR_ROT;
512
513				if (kvmppc_get_msr(vcpu) & MSR_HV)
514					vcpu->arch.texasr |= TEXASR_HV;
515
516				vcpu->arch.tfhar = kvmppc_get_pc(vcpu) + 4;
517				vcpu->arch.tfiar = kvmppc_get_pc(vcpu);
518
519				kvmppc_restore_tm_sprs(vcpu);
520				preempt_enable();
521			} else
522				emulated = EMULATE_FAIL;
523			break;
524		}
525		case OP_31_XOP_TABORT:
526		{
527			ulong guest_msr = kvmppc_get_msr(vcpu);
528			unsigned long ra_val = 0;
529
530			if (!cpu_has_feature(CPU_FTR_TM))
531				break;
532
533			if (!(kvmppc_get_msr(vcpu) & MSR_TM)) {
534				kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG);
535				emulated = EMULATE_AGAIN;
536				break;
537			}
538
539			/* only emulate for privilege guest, since problem state
540			 * guest can run with TM enabled and we don't expect to
541			 * trap at here for that case.
542			 */
543			WARN_ON(guest_msr & MSR_PR);
544
545			if (ra)
546				ra_val = kvmppc_get_gpr(vcpu, ra);
547
548			kvmppc_emulate_tabort(vcpu, ra_val);
549			break;
550		}
551		case OP_31_XOP_TRECLAIM:
552		{
553			ulong guest_msr = kvmppc_get_msr(vcpu);
554			unsigned long ra_val = 0;
555
556			if (!cpu_has_feature(CPU_FTR_TM))
557				break;
558
559			if (!(kvmppc_get_msr(vcpu) & MSR_TM)) {
560				kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG);
561				emulated = EMULATE_AGAIN;
562				break;
563			}
564
565			/* generate interrupts based on priorities */
566			if (guest_msr & MSR_PR) {
567				/* Privileged Instruction type Program Interrupt */
568				kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV);
569				emulated = EMULATE_AGAIN;
570				break;
571			}
572
573			if (!MSR_TM_ACTIVE(guest_msr)) {
574				/* TM bad thing interrupt */
575				kvmppc_core_queue_program(vcpu, SRR1_PROGTM);
576				emulated = EMULATE_AGAIN;
577				break;
578			}
579
580			if (ra)
581				ra_val = kvmppc_get_gpr(vcpu, ra);
582			kvmppc_emulate_treclaim(vcpu, ra_val);
583			break;
584		}
585		case OP_31_XOP_TRCHKPT:
586		{
587			ulong guest_msr = kvmppc_get_msr(vcpu);
588			unsigned long texasr;
589
590			if (!cpu_has_feature(CPU_FTR_TM))
591				break;
592
593			if (!(kvmppc_get_msr(vcpu) & MSR_TM)) {
594				kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG);
595				emulated = EMULATE_AGAIN;
596				break;
597			}
598
599			/* generate interrupt based on priorities */
600			if (guest_msr & MSR_PR) {
601				/* Privileged Instruction type Program Intr */
602				kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV);
603				emulated = EMULATE_AGAIN;
604				break;
605			}
606
607			tm_enable();
608			texasr = mfspr(SPRN_TEXASR);
609			tm_disable();
610
611			if (MSR_TM_ACTIVE(guest_msr) ||
612				!(texasr & (TEXASR_FS))) {
613				/* TM bad thing interrupt */
614				kvmppc_core_queue_program(vcpu, SRR1_PROGTM);
615				emulated = EMULATE_AGAIN;
616				break;
617			}
618
619			kvmppc_emulate_trchkpt(vcpu);
620			break;
621		}
622#endif
623		default:
624			emulated = EMULATE_FAIL;
625		}
626		break;
627	default:
628		emulated = EMULATE_FAIL;
629	}
630
631	if (emulated == EMULATE_FAIL)
632		emulated = kvmppc_emulate_paired_single(vcpu);
633
634	return emulated;
635}
636
637void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, bool upper,
638                    u32 val)
639{
640	if (upper) {
641		/* Upper BAT */
642		u32 bl = (val >> 2) & 0x7ff;
643		bat->bepi_mask = (~bl << 17);
644		bat->bepi = val & 0xfffe0000;
645		bat->vs = (val & 2) ? 1 : 0;
646		bat->vp = (val & 1) ? 1 : 0;
647		bat->raw = (bat->raw & 0xffffffff00000000ULL) | val;
648	} else {
649		/* Lower BAT */
650		bat->brpn = val & 0xfffe0000;
651		bat->wimg = (val >> 3) & 0xf;
652		bat->pp = val & 3;
653		bat->raw = (bat->raw & 0x00000000ffffffffULL) | ((u64)val << 32);
654	}
655}
656
657static struct kvmppc_bat *kvmppc_find_bat(struct kvm_vcpu *vcpu, int sprn)
658{
659	struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
660	struct kvmppc_bat *bat;
661
662	switch (sprn) {
663	case SPRN_IBAT0U ... SPRN_IBAT3L:
664		bat = &vcpu_book3s->ibat[(sprn - SPRN_IBAT0U) / 2];
665		break;
666	case SPRN_IBAT4U ... SPRN_IBAT7L:
667		bat = &vcpu_book3s->ibat[4 + ((sprn - SPRN_IBAT4U) / 2)];
668		break;
669	case SPRN_DBAT0U ... SPRN_DBAT3L:
670		bat = &vcpu_book3s->dbat[(sprn - SPRN_DBAT0U) / 2];
671		break;
672	case SPRN_DBAT4U ... SPRN_DBAT7L:
673		bat = &vcpu_book3s->dbat[4 + ((sprn - SPRN_DBAT4U) / 2)];
674		break;
675	default:
676		BUG();
677	}
678
679	return bat;
680}
681
682int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
683{
684	int emulated = EMULATE_DONE;
685
686	switch (sprn) {
687	case SPRN_SDR1:
688		if (!spr_allowed(vcpu, PRIV_HYPER))
689			goto unprivileged;
690		to_book3s(vcpu)->sdr1 = spr_val;
691		break;
692	case SPRN_DSISR:
693		kvmppc_set_dsisr(vcpu, spr_val);
694		break;
695	case SPRN_DAR:
696		kvmppc_set_dar(vcpu, spr_val);
697		break;
698	case SPRN_HIOR:
699		to_book3s(vcpu)->hior = spr_val;
700		break;
701	case SPRN_IBAT0U ... SPRN_IBAT3L:
702	case SPRN_IBAT4U ... SPRN_IBAT7L:
703	case SPRN_DBAT0U ... SPRN_DBAT3L:
704	case SPRN_DBAT4U ... SPRN_DBAT7L:
705	{
706		struct kvmppc_bat *bat = kvmppc_find_bat(vcpu, sprn);
707
708		kvmppc_set_bat(vcpu, bat, !(sprn % 2), (u32)spr_val);
709		/* BAT writes happen so rarely that we're ok to flush
710		 * everything here */
711		kvmppc_mmu_pte_flush(vcpu, 0, 0);
712		kvmppc_mmu_flush_segments(vcpu);
713		break;
714	}
715	case SPRN_HID0:
716		to_book3s(vcpu)->hid[0] = spr_val;
717		break;
718	case SPRN_HID1:
719		to_book3s(vcpu)->hid[1] = spr_val;
720		break;
721	case SPRN_HID2:
722		to_book3s(vcpu)->hid[2] = spr_val;
723		break;
724	case SPRN_HID2_GEKKO:
725		to_book3s(vcpu)->hid[2] = spr_val;
726		/* HID2.PSE controls paired single on gekko */
727		switch (vcpu->arch.pvr) {
728		case 0x00080200:	/* lonestar 2.0 */
729		case 0x00088202:	/* lonestar 2.2 */
730		case 0x70000100:	/* gekko 1.0 */
731		case 0x00080100:	/* gekko 2.0 */
732		case 0x00083203:	/* gekko 2.3a */
733		case 0x00083213:	/* gekko 2.3b */
734		case 0x00083204:	/* gekko 2.4 */
735		case 0x00083214:	/* gekko 2.4e (8SE) - retail HW2 */
736		case 0x00087200:	/* broadway */
737			if (vcpu->arch.hflags & BOOK3S_HFLAG_NATIVE_PS) {
738				/* Native paired singles */
739			} else if (spr_val & (1 << 29)) { /* HID2.PSE */
740				vcpu->arch.hflags |= BOOK3S_HFLAG_PAIRED_SINGLE;
741				kvmppc_giveup_ext(vcpu, MSR_FP);
742			} else {
743				vcpu->arch.hflags &= ~BOOK3S_HFLAG_PAIRED_SINGLE;
744			}
745			break;
746		}
747		break;
748	case SPRN_HID4:
749	case SPRN_HID4_GEKKO:
750		to_book3s(vcpu)->hid[4] = spr_val;
751		break;
752	case SPRN_HID5:
753		to_book3s(vcpu)->hid[5] = spr_val;
754		/* guest HID5 set can change is_dcbz32 */
755		if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
756		    (mfmsr() & MSR_HV))
757			vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
758		break;
759	case SPRN_GQR0:
760	case SPRN_GQR1:
761	case SPRN_GQR2:
762	case SPRN_GQR3:
763	case SPRN_GQR4:
764	case SPRN_GQR5:
765	case SPRN_GQR6:
766	case SPRN_GQR7:
767		to_book3s(vcpu)->gqr[sprn - SPRN_GQR0] = spr_val;
768		break;
769#ifdef CONFIG_PPC_BOOK3S_64
770	case SPRN_FSCR:
771		kvmppc_set_fscr(vcpu, spr_val);
772		break;
773	case SPRN_BESCR:
774		vcpu->arch.bescr = spr_val;
775		break;
776	case SPRN_EBBHR:
777		vcpu->arch.ebbhr = spr_val;
778		break;
779	case SPRN_EBBRR:
780		vcpu->arch.ebbrr = spr_val;
781		break;
782#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
783	case SPRN_TFHAR:
784	case SPRN_TEXASR:
785	case SPRN_TFIAR:
786		if (!cpu_has_feature(CPU_FTR_TM))
787			break;
788
789		if (!(kvmppc_get_msr(vcpu) & MSR_TM)) {
790			kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG);
791			emulated = EMULATE_AGAIN;
792			break;
793		}
794
795		if (MSR_TM_ACTIVE(kvmppc_get_msr(vcpu)) &&
796			!((MSR_TM_SUSPENDED(kvmppc_get_msr(vcpu))) &&
797					(sprn == SPRN_TFHAR))) {
798			/* it is illegal to mtspr() TM regs in
799			 * other than non-transactional state, with
800			 * the exception of TFHAR in suspend state.
801			 */
802			kvmppc_core_queue_program(vcpu, SRR1_PROGTM);
803			emulated = EMULATE_AGAIN;
804			break;
805		}
806
807		tm_enable();
808		if (sprn == SPRN_TFHAR)
809			mtspr(SPRN_TFHAR, spr_val);
810		else if (sprn == SPRN_TEXASR)
811			mtspr(SPRN_TEXASR, spr_val);
812		else
813			mtspr(SPRN_TFIAR, spr_val);
814		tm_disable();
815
816		break;
817#endif
818#endif
819	case SPRN_ICTC:
820	case SPRN_THRM1:
821	case SPRN_THRM2:
822	case SPRN_THRM3:
823	case SPRN_CTRLF:
824	case SPRN_CTRLT:
825	case SPRN_L2CR:
826	case SPRN_DSCR:
827	case SPRN_MMCR0_GEKKO:
828	case SPRN_MMCR1_GEKKO:
829	case SPRN_PMC1_GEKKO:
830	case SPRN_PMC2_GEKKO:
831	case SPRN_PMC3_GEKKO:
832	case SPRN_PMC4_GEKKO:
833	case SPRN_WPAR_GEKKO:
834	case SPRN_MSSSR0:
835	case SPRN_DABR:
836#ifdef CONFIG_PPC_BOOK3S_64
837	case SPRN_MMCRS:
838	case SPRN_MMCRA:
839	case SPRN_MMCR0:
840	case SPRN_MMCR1:
841	case SPRN_MMCR2:
842	case SPRN_UMMCR2:
843#endif
844		break;
845unprivileged:
846	default:
847		pr_info_ratelimited("KVM: invalid SPR write: %d\n", sprn);
848		if (sprn & 0x10) {
849			if (kvmppc_get_msr(vcpu) & MSR_PR) {
850				kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV);
851				emulated = EMULATE_AGAIN;
852			}
853		} else {
854			if ((kvmppc_get_msr(vcpu) & MSR_PR) || sprn == 0) {
855				kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
856				emulated = EMULATE_AGAIN;
857			}
858		}
859		break;
860	}
861
862	return emulated;
863}
864
865int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
866{
867	int emulated = EMULATE_DONE;
868
869	switch (sprn) {
870	case SPRN_IBAT0U ... SPRN_IBAT3L:
871	case SPRN_IBAT4U ... SPRN_IBAT7L:
872	case SPRN_DBAT0U ... SPRN_DBAT3L:
873	case SPRN_DBAT4U ... SPRN_DBAT7L:
874	{
875		struct kvmppc_bat *bat = kvmppc_find_bat(vcpu, sprn);
876
877		if (sprn % 2)
878			*spr_val = bat->raw >> 32;
879		else
880			*spr_val = bat->raw;
881
882		break;
883	}
884	case SPRN_SDR1:
885		if (!spr_allowed(vcpu, PRIV_HYPER))
886			goto unprivileged;
887		*spr_val = to_book3s(vcpu)->sdr1;
888		break;
889	case SPRN_DSISR:
890		*spr_val = kvmppc_get_dsisr(vcpu);
891		break;
892	case SPRN_DAR:
893		*spr_val = kvmppc_get_dar(vcpu);
894		break;
895	case SPRN_HIOR:
896		*spr_val = to_book3s(vcpu)->hior;
897		break;
898	case SPRN_HID0:
899		*spr_val = to_book3s(vcpu)->hid[0];
900		break;
901	case SPRN_HID1:
902		*spr_val = to_book3s(vcpu)->hid[1];
903		break;
904	case SPRN_HID2:
905	case SPRN_HID2_GEKKO:
906		*spr_val = to_book3s(vcpu)->hid[2];
907		break;
908	case SPRN_HID4:
909	case SPRN_HID4_GEKKO:
910		*spr_val = to_book3s(vcpu)->hid[4];
911		break;
912	case SPRN_HID5:
913		*spr_val = to_book3s(vcpu)->hid[5];
914		break;
915	case SPRN_CFAR:
916	case SPRN_DSCR:
917		*spr_val = 0;
918		break;
919	case SPRN_PURR:
920		/*
921		 * On exit we would have updated purr
922		 */
923		*spr_val = vcpu->arch.purr;
924		break;
925	case SPRN_SPURR:
926		/*
927		 * On exit we would have updated spurr
928		 */
929		*spr_val = vcpu->arch.spurr;
930		break;
931	case SPRN_VTB:
932		*spr_val = to_book3s(vcpu)->vtb;
933		break;
934	case SPRN_IC:
935		*spr_val = vcpu->arch.ic;
936		break;
937	case SPRN_GQR0:
938	case SPRN_GQR1:
939	case SPRN_GQR2:
940	case SPRN_GQR3:
941	case SPRN_GQR4:
942	case SPRN_GQR5:
943	case SPRN_GQR6:
944	case SPRN_GQR7:
945		*spr_val = to_book3s(vcpu)->gqr[sprn - SPRN_GQR0];
946		break;
947#ifdef CONFIG_PPC_BOOK3S_64
948	case SPRN_FSCR:
949		*spr_val = vcpu->arch.fscr;
950		break;
951	case SPRN_BESCR:
952		*spr_val = vcpu->arch.bescr;
953		break;
954	case SPRN_EBBHR:
955		*spr_val = vcpu->arch.ebbhr;
956		break;
957	case SPRN_EBBRR:
958		*spr_val = vcpu->arch.ebbrr;
959		break;
960#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
961	case SPRN_TFHAR:
962	case SPRN_TEXASR:
963	case SPRN_TFIAR:
964		if (!cpu_has_feature(CPU_FTR_TM))
965			break;
966
967		if (!(kvmppc_get_msr(vcpu) & MSR_TM)) {
968			kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG);
969			emulated = EMULATE_AGAIN;
970			break;
971		}
972
973		tm_enable();
974		if (sprn == SPRN_TFHAR)
975			*spr_val = mfspr(SPRN_TFHAR);
976		else if (sprn == SPRN_TEXASR)
977			*spr_val = mfspr(SPRN_TEXASR);
978		else if (sprn == SPRN_TFIAR)
979			*spr_val = mfspr(SPRN_TFIAR);
980		tm_disable();
981		break;
982#endif
983#endif
984	case SPRN_THRM1:
985	case SPRN_THRM2:
986	case SPRN_THRM3:
987	case SPRN_CTRLF:
988	case SPRN_CTRLT:
989	case SPRN_L2CR:
990	case SPRN_MMCR0_GEKKO:
991	case SPRN_MMCR1_GEKKO:
992	case SPRN_PMC1_GEKKO:
993	case SPRN_PMC2_GEKKO:
994	case SPRN_PMC3_GEKKO:
995	case SPRN_PMC4_GEKKO:
996	case SPRN_WPAR_GEKKO:
997	case SPRN_MSSSR0:
998	case SPRN_DABR:
999#ifdef CONFIG_PPC_BOOK3S_64
1000	case SPRN_MMCRS:
1001	case SPRN_MMCRA:
1002	case SPRN_MMCR0:
1003	case SPRN_MMCR1:
1004	case SPRN_MMCR2:
1005	case SPRN_UMMCR2:
1006	case SPRN_TIR:
1007#endif
1008		*spr_val = 0;
1009		break;
1010	default:
1011unprivileged:
1012		pr_info_ratelimited("KVM: invalid SPR read: %d\n", sprn);
1013		if (sprn & 0x10) {
1014			if (kvmppc_get_msr(vcpu) & MSR_PR) {
1015				kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV);
1016				emulated = EMULATE_AGAIN;
1017			}
1018		} else {
1019			if ((kvmppc_get_msr(vcpu) & MSR_PR) || sprn == 0 ||
1020			    sprn == 4 || sprn == 5 || sprn == 6) {
1021				kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
1022				emulated = EMULATE_AGAIN;
1023			}
1024		}
1025
1026		break;
1027	}
1028
1029	return emulated;
1030}
1031
1032u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst)
1033{
1034	return make_dsisr(inst);
1035}
1036
1037ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst)
1038{
1039#ifdef CONFIG_PPC_BOOK3S_64
1040	/*
1041	 * Linux's fix_alignment() assumes that DAR is valid, so can we
1042	 */
1043	return vcpu->arch.fault_dar;
1044#else
1045	ulong dar = 0;
1046	ulong ra = get_ra(inst);
1047	ulong rb = get_rb(inst);
1048
1049	switch (get_op(inst)) {
1050	case OP_LFS:
1051	case OP_LFD:
1052	case OP_STFD:
1053	case OP_STFS:
1054		if (ra)
1055			dar = kvmppc_get_gpr(vcpu, ra);
1056		dar += (s32)((s16)inst);
1057		break;
1058	case 31:
1059		if (ra)
1060			dar = kvmppc_get_gpr(vcpu, ra);
1061		dar += kvmppc_get_gpr(vcpu, rb);
1062		break;
1063	default:
1064		printk(KERN_INFO "KVM: Unaligned instruction 0x%x\n", inst);
1065		break;
1066	}
1067
1068	return dar;
1069#endif
1070}
1071