1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2019 Western Digital Corporation or its affiliates.
4 * Copyright (c) 2022 Ventana Micro Systems Inc.
5 */
6
7#include <linux/bitops.h>
8#include <linux/kvm_host.h>
9
10#define INSN_OPCODE_MASK	0x007c
11#define INSN_OPCODE_SHIFT	2
12#define INSN_OPCODE_SYSTEM	28
13
14#define INSN_MASK_WFI		0xffffffff
15#define INSN_MATCH_WFI		0x10500073
16
17#define INSN_MATCH_CSRRW	0x1073
18#define INSN_MASK_CSRRW		0x707f
19#define INSN_MATCH_CSRRS	0x2073
20#define INSN_MASK_CSRRS		0x707f
21#define INSN_MATCH_CSRRC	0x3073
22#define INSN_MASK_CSRRC		0x707f
23#define INSN_MATCH_CSRRWI	0x5073
24#define INSN_MASK_CSRRWI	0x707f
25#define INSN_MATCH_CSRRSI	0x6073
26#define INSN_MASK_CSRRSI	0x707f
27#define INSN_MATCH_CSRRCI	0x7073
28#define INSN_MASK_CSRRCI	0x707f
29
30#define INSN_MATCH_LB		0x3
31#define INSN_MASK_LB		0x707f
32#define INSN_MATCH_LH		0x1003
33#define INSN_MASK_LH		0x707f
34#define INSN_MATCH_LW		0x2003
35#define INSN_MASK_LW		0x707f
36#define INSN_MATCH_LD		0x3003
37#define INSN_MASK_LD		0x707f
38#define INSN_MATCH_LBU		0x4003
39#define INSN_MASK_LBU		0x707f
40#define INSN_MATCH_LHU		0x5003
41#define INSN_MASK_LHU		0x707f
42#define INSN_MATCH_LWU		0x6003
43#define INSN_MASK_LWU		0x707f
44#define INSN_MATCH_SB		0x23
45#define INSN_MASK_SB		0x707f
46#define INSN_MATCH_SH		0x1023
47#define INSN_MASK_SH		0x707f
48#define INSN_MATCH_SW		0x2023
49#define INSN_MASK_SW		0x707f
50#define INSN_MATCH_SD		0x3023
51#define INSN_MASK_SD		0x707f
52
53#define INSN_MATCH_C_LD		0x6000
54#define INSN_MASK_C_LD		0xe003
55#define INSN_MATCH_C_SD		0xe000
56#define INSN_MASK_C_SD		0xe003
57#define INSN_MATCH_C_LW		0x4000
58#define INSN_MASK_C_LW		0xe003
59#define INSN_MATCH_C_SW		0xc000
60#define INSN_MASK_C_SW		0xe003
61#define INSN_MATCH_C_LDSP	0x6002
62#define INSN_MASK_C_LDSP	0xe003
63#define INSN_MATCH_C_SDSP	0xe002
64#define INSN_MASK_C_SDSP	0xe003
65#define INSN_MATCH_C_LWSP	0x4002
66#define INSN_MASK_C_LWSP	0xe003
67#define INSN_MATCH_C_SWSP	0xc002
68#define INSN_MASK_C_SWSP	0xe003
69
70#define INSN_16BIT_MASK		0x3
71
72#define INSN_IS_16BIT(insn)	(((insn) & INSN_16BIT_MASK) != INSN_16BIT_MASK)
73
74#define INSN_LEN(insn)		(INSN_IS_16BIT(insn) ? 2 : 4)
75
76#ifdef CONFIG_64BIT
77#define LOG_REGBYTES		3
78#else
79#define LOG_REGBYTES		2
80#endif
81#define REGBYTES		(1 << LOG_REGBYTES)
82
83#define SH_RD			7
84#define SH_RS1			15
85#define SH_RS2			20
86#define SH_RS2C			2
87#define MASK_RX			0x1f
88
89#define RV_X(x, s, n)		(((x) >> (s)) & ((1 << (n)) - 1))
90#define RVC_LW_IMM(x)		((RV_X(x, 6, 1) << 2) | \
91				 (RV_X(x, 10, 3) << 3) | \
92				 (RV_X(x, 5, 1) << 6))
93#define RVC_LD_IMM(x)		((RV_X(x, 10, 3) << 3) | \
94				 (RV_X(x, 5, 2) << 6))
95#define RVC_LWSP_IMM(x)		((RV_X(x, 4, 3) << 2) | \
96				 (RV_X(x, 12, 1) << 5) | \
97				 (RV_X(x, 2, 2) << 6))
98#define RVC_LDSP_IMM(x)		((RV_X(x, 5, 2) << 3) | \
99				 (RV_X(x, 12, 1) << 5) | \
100				 (RV_X(x, 2, 3) << 6))
101#define RVC_SWSP_IMM(x)		((RV_X(x, 9, 4) << 2) | \
102				 (RV_X(x, 7, 2) << 6))
103#define RVC_SDSP_IMM(x)		((RV_X(x, 10, 3) << 3) | \
104				 (RV_X(x, 7, 3) << 6))
105#define RVC_RS1S(insn)		(8 + RV_X(insn, SH_RD, 3))
106#define RVC_RS2S(insn)		(8 + RV_X(insn, SH_RS2C, 3))
107#define RVC_RS2(insn)		RV_X(insn, SH_RS2C, 5)
108
109#define SHIFT_RIGHT(x, y)		\
110	((y) < 0 ? ((x) << -(y)) : ((x) >> (y)))
111
112#define REG_MASK			\
113	((1 << (5 + LOG_REGBYTES)) - (1 << LOG_REGBYTES))
114
115#define REG_OFFSET(insn, pos)		\
116	(SHIFT_RIGHT((insn), (pos) - LOG_REGBYTES) & REG_MASK)
117
118#define REG_PTR(insn, pos, regs)	\
119	((ulong *)((ulong)(regs) + REG_OFFSET(insn, pos)))
120
121#define GET_FUNCT3(insn)	(((insn) >> 12) & 7)
122
123#define GET_RS1(insn, regs)	(*REG_PTR(insn, SH_RS1, regs))
124#define GET_RS2(insn, regs)	(*REG_PTR(insn, SH_RS2, regs))
125#define GET_RS1S(insn, regs)	(*REG_PTR(RVC_RS1S(insn), 0, regs))
126#define GET_RS2S(insn, regs)	(*REG_PTR(RVC_RS2S(insn), 0, regs))
127#define GET_RS2C(insn, regs)	(*REG_PTR(insn, SH_RS2C, regs))
128#define GET_SP(regs)		(*REG_PTR(2, 0, regs))
129#define SET_RD(insn, regs, val)	(*REG_PTR(insn, SH_RD, regs) = (val))
130#define IMM_I(insn)		((s32)(insn) >> 20)
131#define IMM_S(insn)		(((s32)(insn) >> 25 << 5) | \
132				 (s32)(((insn) >> 7) & 0x1f))
133
134struct insn_func {
135	unsigned long mask;
136	unsigned long match;
137	/*
138	 * Possible return values are as follows:
139	 * 1) Returns < 0 for error case
140	 * 2) Returns 0 for exit to user-space
141	 * 3) Returns 1 to continue with next sepc
142	 * 4) Returns 2 to continue with same sepc
143	 * 5) Returns 3 to inject illegal instruction trap and continue
144	 * 6) Returns 4 to inject virtual instruction trap and continue
145	 *
146	 * Use enum kvm_insn_return for return values
147	 */
148	int (*func)(struct kvm_vcpu *vcpu, struct kvm_run *run, ulong insn);
149};
150
151static int truly_illegal_insn(struct kvm_vcpu *vcpu, struct kvm_run *run,
152			      ulong insn)
153{
154	struct kvm_cpu_trap utrap = { 0 };
155
156	/* Redirect trap to Guest VCPU */
157	utrap.sepc = vcpu->arch.guest_context.sepc;
158	utrap.scause = EXC_INST_ILLEGAL;
159	utrap.stval = insn;
160	utrap.htval = 0;
161	utrap.htinst = 0;
162	kvm_riscv_vcpu_trap_redirect(vcpu, &utrap);
163
164	return 1;
165}
166
167static int truly_virtual_insn(struct kvm_vcpu *vcpu, struct kvm_run *run,
168			      ulong insn)
169{
170	struct kvm_cpu_trap utrap = { 0 };
171
172	/* Redirect trap to Guest VCPU */
173	utrap.sepc = vcpu->arch.guest_context.sepc;
174	utrap.scause = EXC_VIRTUAL_INST_FAULT;
175	utrap.stval = insn;
176	utrap.htval = 0;
177	utrap.htinst = 0;
178	kvm_riscv_vcpu_trap_redirect(vcpu, &utrap);
179
180	return 1;
181}
182
183/**
184 * kvm_riscv_vcpu_wfi -- Emulate wait for interrupt (WFI) behaviour
185 *
186 * @vcpu: The VCPU pointer
187 */
188void kvm_riscv_vcpu_wfi(struct kvm_vcpu *vcpu)
189{
190	if (!kvm_arch_vcpu_runnable(vcpu)) {
191		kvm_vcpu_srcu_read_unlock(vcpu);
192		kvm_vcpu_halt(vcpu);
193		kvm_vcpu_srcu_read_lock(vcpu);
194	}
195}
196
197static int wfi_insn(struct kvm_vcpu *vcpu, struct kvm_run *run, ulong insn)
198{
199	vcpu->stat.wfi_exit_stat++;
200	kvm_riscv_vcpu_wfi(vcpu);
201	return KVM_INSN_CONTINUE_NEXT_SEPC;
202}
203
204struct csr_func {
205	unsigned int base;
206	unsigned int count;
207	/*
208	 * Possible return values are as same as "func" callback in
209	 * "struct insn_func".
210	 */
211	int (*func)(struct kvm_vcpu *vcpu, unsigned int csr_num,
212		    unsigned long *val, unsigned long new_val,
213		    unsigned long wr_mask);
214};
215
216static const struct csr_func csr_funcs[] = {
217	KVM_RISCV_VCPU_AIA_CSR_FUNCS
218	KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS
219};
220
221/**
222 * kvm_riscv_vcpu_csr_return -- Handle CSR read/write after user space
223 *				emulation or in-kernel emulation
224 *
225 * @vcpu: The VCPU pointer
226 * @run:  The VCPU run struct containing the CSR data
227 *
228 * Returns > 0 upon failure and 0 upon success
229 */
230int kvm_riscv_vcpu_csr_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
231{
232	ulong insn;
233
234	if (vcpu->arch.csr_decode.return_handled)
235		return 0;
236	vcpu->arch.csr_decode.return_handled = 1;
237
238	/* Update destination register for CSR reads */
239	insn = vcpu->arch.csr_decode.insn;
240	if ((insn >> SH_RD) & MASK_RX)
241		SET_RD(insn, &vcpu->arch.guest_context,
242		       run->riscv_csr.ret_value);
243
244	/* Move to next instruction */
245	vcpu->arch.guest_context.sepc += INSN_LEN(insn);
246
247	return 0;
248}
249
250static int csr_insn(struct kvm_vcpu *vcpu, struct kvm_run *run, ulong insn)
251{
252	int i, rc = KVM_INSN_ILLEGAL_TRAP;
253	unsigned int csr_num = insn >> SH_RS2;
254	unsigned int rs1_num = (insn >> SH_RS1) & MASK_RX;
255	ulong rs1_val = GET_RS1(insn, &vcpu->arch.guest_context);
256	const struct csr_func *tcfn, *cfn = NULL;
257	ulong val = 0, wr_mask = 0, new_val = 0;
258
259	/* Decode the CSR instruction */
260	switch (GET_FUNCT3(insn)) {
261	case GET_FUNCT3(INSN_MATCH_CSRRW):
262		wr_mask = -1UL;
263		new_val = rs1_val;
264		break;
265	case GET_FUNCT3(INSN_MATCH_CSRRS):
266		wr_mask = rs1_val;
267		new_val = -1UL;
268		break;
269	case GET_FUNCT3(INSN_MATCH_CSRRC):
270		wr_mask = rs1_val;
271		new_val = 0;
272		break;
273	case GET_FUNCT3(INSN_MATCH_CSRRWI):
274		wr_mask = -1UL;
275		new_val = rs1_num;
276		break;
277	case GET_FUNCT3(INSN_MATCH_CSRRSI):
278		wr_mask = rs1_num;
279		new_val = -1UL;
280		break;
281	case GET_FUNCT3(INSN_MATCH_CSRRCI):
282		wr_mask = rs1_num;
283		new_val = 0;
284		break;
285	default:
286		return rc;
287	}
288
289	/* Save instruction decode info */
290	vcpu->arch.csr_decode.insn = insn;
291	vcpu->arch.csr_decode.return_handled = 0;
292
293	/* Update CSR details in kvm_run struct */
294	run->riscv_csr.csr_num = csr_num;
295	run->riscv_csr.new_value = new_val;
296	run->riscv_csr.write_mask = wr_mask;
297	run->riscv_csr.ret_value = 0;
298
299	/* Find in-kernel CSR function */
300	for (i = 0; i < ARRAY_SIZE(csr_funcs); i++) {
301		tcfn = &csr_funcs[i];
302		if ((tcfn->base <= csr_num) &&
303		    (csr_num < (tcfn->base + tcfn->count))) {
304			cfn = tcfn;
305			break;
306		}
307	}
308
309	/* First try in-kernel CSR emulation */
310	if (cfn && cfn->func) {
311		rc = cfn->func(vcpu, csr_num, &val, new_val, wr_mask);
312		if (rc > KVM_INSN_EXIT_TO_USER_SPACE) {
313			if (rc == KVM_INSN_CONTINUE_NEXT_SEPC) {
314				run->riscv_csr.ret_value = val;
315				vcpu->stat.csr_exit_kernel++;
316				kvm_riscv_vcpu_csr_return(vcpu, run);
317				rc = KVM_INSN_CONTINUE_SAME_SEPC;
318			}
319			return rc;
320		}
321	}
322
323	/* Exit to user-space for CSR emulation */
324	if (rc <= KVM_INSN_EXIT_TO_USER_SPACE) {
325		vcpu->stat.csr_exit_user++;
326		run->exit_reason = KVM_EXIT_RISCV_CSR;
327	}
328
329	return rc;
330}
331
332static const struct insn_func system_opcode_funcs[] = {
333	{
334		.mask  = INSN_MASK_CSRRW,
335		.match = INSN_MATCH_CSRRW,
336		.func  = csr_insn,
337	},
338	{
339		.mask  = INSN_MASK_CSRRS,
340		.match = INSN_MATCH_CSRRS,
341		.func  = csr_insn,
342	},
343	{
344		.mask  = INSN_MASK_CSRRC,
345		.match = INSN_MATCH_CSRRC,
346		.func  = csr_insn,
347	},
348	{
349		.mask  = INSN_MASK_CSRRWI,
350		.match = INSN_MATCH_CSRRWI,
351		.func  = csr_insn,
352	},
353	{
354		.mask  = INSN_MASK_CSRRSI,
355		.match = INSN_MATCH_CSRRSI,
356		.func  = csr_insn,
357	},
358	{
359		.mask  = INSN_MASK_CSRRCI,
360		.match = INSN_MATCH_CSRRCI,
361		.func  = csr_insn,
362	},
363	{
364		.mask  = INSN_MASK_WFI,
365		.match = INSN_MATCH_WFI,
366		.func  = wfi_insn,
367	},
368};
369
370static int system_opcode_insn(struct kvm_vcpu *vcpu, struct kvm_run *run,
371			      ulong insn)
372{
373	int i, rc = KVM_INSN_ILLEGAL_TRAP;
374	const struct insn_func *ifn;
375
376	for (i = 0; i < ARRAY_SIZE(system_opcode_funcs); i++) {
377		ifn = &system_opcode_funcs[i];
378		if ((insn & ifn->mask) == ifn->match) {
379			rc = ifn->func(vcpu, run, insn);
380			break;
381		}
382	}
383
384	switch (rc) {
385	case KVM_INSN_ILLEGAL_TRAP:
386		return truly_illegal_insn(vcpu, run, insn);
387	case KVM_INSN_VIRTUAL_TRAP:
388		return truly_virtual_insn(vcpu, run, insn);
389	case KVM_INSN_CONTINUE_NEXT_SEPC:
390		vcpu->arch.guest_context.sepc += INSN_LEN(insn);
391		break;
392	default:
393		break;
394	}
395
396	return (rc <= 0) ? rc : 1;
397}
398
399/**
400 * kvm_riscv_vcpu_virtual_insn -- Handle virtual instruction trap
401 *
402 * @vcpu: The VCPU pointer
403 * @run:  The VCPU run struct containing the mmio data
404 * @trap: Trap details
405 *
406 * Returns > 0 to continue run-loop
407 * Returns   0 to exit run-loop and handle in user-space.
408 * Returns < 0 to report failure and exit run-loop
409 */
410int kvm_riscv_vcpu_virtual_insn(struct kvm_vcpu *vcpu, struct kvm_run *run,
411				struct kvm_cpu_trap *trap)
412{
413	unsigned long insn = trap->stval;
414	struct kvm_cpu_trap utrap = { 0 };
415	struct kvm_cpu_context *ct;
416
417	if (unlikely(INSN_IS_16BIT(insn))) {
418		if (insn == 0) {
419			ct = &vcpu->arch.guest_context;
420			insn = kvm_riscv_vcpu_unpriv_read(vcpu, true,
421							  ct->sepc,
422							  &utrap);
423			if (utrap.scause) {
424				utrap.sepc = ct->sepc;
425				kvm_riscv_vcpu_trap_redirect(vcpu, &utrap);
426				return 1;
427			}
428		}
429		if (INSN_IS_16BIT(insn))
430			return truly_illegal_insn(vcpu, run, insn);
431	}
432
433	switch ((insn & INSN_OPCODE_MASK) >> INSN_OPCODE_SHIFT) {
434	case INSN_OPCODE_SYSTEM:
435		return system_opcode_insn(vcpu, run, insn);
436	default:
437		return truly_illegal_insn(vcpu, run, insn);
438	}
439}
440
441/**
442 * kvm_riscv_vcpu_mmio_load -- Emulate MMIO load instruction
443 *
444 * @vcpu: The VCPU pointer
445 * @run:  The VCPU run struct containing the mmio data
446 * @fault_addr: Guest physical address to load
447 * @htinst: Transformed encoding of the load instruction
448 *
449 * Returns > 0 to continue run-loop
450 * Returns   0 to exit run-loop and handle in user-space.
451 * Returns < 0 to report failure and exit run-loop
452 */
453int kvm_riscv_vcpu_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run,
454			     unsigned long fault_addr,
455			     unsigned long htinst)
456{
457	u8 data_buf[8];
458	unsigned long insn;
459	int shift = 0, len = 0, insn_len = 0;
460	struct kvm_cpu_trap utrap = { 0 };
461	struct kvm_cpu_context *ct = &vcpu->arch.guest_context;
462
463	/* Determine trapped instruction */
464	if (htinst & 0x1) {
465		/*
466		 * Bit[0] == 1 implies trapped instruction value is
467		 * transformed instruction or custom instruction.
468		 */
469		insn = htinst | INSN_16BIT_MASK;
470		insn_len = (htinst & BIT(1)) ? INSN_LEN(insn) : 2;
471	} else {
472		/*
473		 * Bit[0] == 0 implies trapped instruction value is
474		 * zero or special value.
475		 */
476		insn = kvm_riscv_vcpu_unpriv_read(vcpu, true, ct->sepc,
477						  &utrap);
478		if (utrap.scause) {
479			/* Redirect trap if we failed to read instruction */
480			utrap.sepc = ct->sepc;
481			kvm_riscv_vcpu_trap_redirect(vcpu, &utrap);
482			return 1;
483		}
484		insn_len = INSN_LEN(insn);
485	}
486
487	/* Decode length of MMIO and shift */
488	if ((insn & INSN_MASK_LW) == INSN_MATCH_LW) {
489		len = 4;
490		shift = 8 * (sizeof(ulong) - len);
491	} else if ((insn & INSN_MASK_LB) == INSN_MATCH_LB) {
492		len = 1;
493		shift = 8 * (sizeof(ulong) - len);
494	} else if ((insn & INSN_MASK_LBU) == INSN_MATCH_LBU) {
495		len = 1;
496		shift = 8 * (sizeof(ulong) - len);
497#ifdef CONFIG_64BIT
498	} else if ((insn & INSN_MASK_LD) == INSN_MATCH_LD) {
499		len = 8;
500		shift = 8 * (sizeof(ulong) - len);
501	} else if ((insn & INSN_MASK_LWU) == INSN_MATCH_LWU) {
502		len = 4;
503#endif
504	} else if ((insn & INSN_MASK_LH) == INSN_MATCH_LH) {
505		len = 2;
506		shift = 8 * (sizeof(ulong) - len);
507	} else if ((insn & INSN_MASK_LHU) == INSN_MATCH_LHU) {
508		len = 2;
509#ifdef CONFIG_64BIT
510	} else if ((insn & INSN_MASK_C_LD) == INSN_MATCH_C_LD) {
511		len = 8;
512		shift = 8 * (sizeof(ulong) - len);
513		insn = RVC_RS2S(insn) << SH_RD;
514	} else if ((insn & INSN_MASK_C_LDSP) == INSN_MATCH_C_LDSP &&
515		   ((insn >> SH_RD) & 0x1f)) {
516		len = 8;
517		shift = 8 * (sizeof(ulong) - len);
518#endif
519	} else if ((insn & INSN_MASK_C_LW) == INSN_MATCH_C_LW) {
520		len = 4;
521		shift = 8 * (sizeof(ulong) - len);
522		insn = RVC_RS2S(insn) << SH_RD;
523	} else if ((insn & INSN_MASK_C_LWSP) == INSN_MATCH_C_LWSP &&
524		   ((insn >> SH_RD) & 0x1f)) {
525		len = 4;
526		shift = 8 * (sizeof(ulong) - len);
527	} else {
528		return -EOPNOTSUPP;
529	}
530
531	/* Fault address should be aligned to length of MMIO */
532	if (fault_addr & (len - 1))
533		return -EIO;
534
535	/* Save instruction decode info */
536	vcpu->arch.mmio_decode.insn = insn;
537	vcpu->arch.mmio_decode.insn_len = insn_len;
538	vcpu->arch.mmio_decode.shift = shift;
539	vcpu->arch.mmio_decode.len = len;
540	vcpu->arch.mmio_decode.return_handled = 0;
541
542	/* Update MMIO details in kvm_run struct */
543	run->mmio.is_write = false;
544	run->mmio.phys_addr = fault_addr;
545	run->mmio.len = len;
546
547	/* Try to handle MMIO access in the kernel */
548	if (!kvm_io_bus_read(vcpu, KVM_MMIO_BUS, fault_addr, len, data_buf)) {
549		/* Successfully handled MMIO access in the kernel so resume */
550		memcpy(run->mmio.data, data_buf, len);
551		vcpu->stat.mmio_exit_kernel++;
552		kvm_riscv_vcpu_mmio_return(vcpu, run);
553		return 1;
554	}
555
556	/* Exit to userspace for MMIO emulation */
557	vcpu->stat.mmio_exit_user++;
558	run->exit_reason = KVM_EXIT_MMIO;
559
560	return 0;
561}
562
563/**
564 * kvm_riscv_vcpu_mmio_store -- Emulate MMIO store instruction
565 *
566 * @vcpu: The VCPU pointer
567 * @run:  The VCPU run struct containing the mmio data
568 * @fault_addr: Guest physical address to store
569 * @htinst: Transformed encoding of the store instruction
570 *
571 * Returns > 0 to continue run-loop
572 * Returns   0 to exit run-loop and handle in user-space.
573 * Returns < 0 to report failure and exit run-loop
574 */
575int kvm_riscv_vcpu_mmio_store(struct kvm_vcpu *vcpu, struct kvm_run *run,
576			      unsigned long fault_addr,
577			      unsigned long htinst)
578{
579	u8 data8;
580	u16 data16;
581	u32 data32;
582	u64 data64;
583	ulong data;
584	unsigned long insn;
585	int len = 0, insn_len = 0;
586	struct kvm_cpu_trap utrap = { 0 };
587	struct kvm_cpu_context *ct = &vcpu->arch.guest_context;
588
589	/* Determine trapped instruction */
590	if (htinst & 0x1) {
591		/*
592		 * Bit[0] == 1 implies trapped instruction value is
593		 * transformed instruction or custom instruction.
594		 */
595		insn = htinst | INSN_16BIT_MASK;
596		insn_len = (htinst & BIT(1)) ? INSN_LEN(insn) : 2;
597	} else {
598		/*
599		 * Bit[0] == 0 implies trapped instruction value is
600		 * zero or special value.
601		 */
602		insn = kvm_riscv_vcpu_unpriv_read(vcpu, true, ct->sepc,
603						  &utrap);
604		if (utrap.scause) {
605			/* Redirect trap if we failed to read instruction */
606			utrap.sepc = ct->sepc;
607			kvm_riscv_vcpu_trap_redirect(vcpu, &utrap);
608			return 1;
609		}
610		insn_len = INSN_LEN(insn);
611	}
612
613	data = GET_RS2(insn, &vcpu->arch.guest_context);
614	data8 = data16 = data32 = data64 = data;
615
616	if ((insn & INSN_MASK_SW) == INSN_MATCH_SW) {
617		len = 4;
618	} else if ((insn & INSN_MASK_SB) == INSN_MATCH_SB) {
619		len = 1;
620#ifdef CONFIG_64BIT
621	} else if ((insn & INSN_MASK_SD) == INSN_MATCH_SD) {
622		len = 8;
623#endif
624	} else if ((insn & INSN_MASK_SH) == INSN_MATCH_SH) {
625		len = 2;
626#ifdef CONFIG_64BIT
627	} else if ((insn & INSN_MASK_C_SD) == INSN_MATCH_C_SD) {
628		len = 8;
629		data64 = GET_RS2S(insn, &vcpu->arch.guest_context);
630	} else if ((insn & INSN_MASK_C_SDSP) == INSN_MATCH_C_SDSP &&
631		   ((insn >> SH_RD) & 0x1f)) {
632		len = 8;
633		data64 = GET_RS2C(insn, &vcpu->arch.guest_context);
634#endif
635	} else if ((insn & INSN_MASK_C_SW) == INSN_MATCH_C_SW) {
636		len = 4;
637		data32 = GET_RS2S(insn, &vcpu->arch.guest_context);
638	} else if ((insn & INSN_MASK_C_SWSP) == INSN_MATCH_C_SWSP &&
639		   ((insn >> SH_RD) & 0x1f)) {
640		len = 4;
641		data32 = GET_RS2C(insn, &vcpu->arch.guest_context);
642	} else {
643		return -EOPNOTSUPP;
644	}
645
646	/* Fault address should be aligned to length of MMIO */
647	if (fault_addr & (len - 1))
648		return -EIO;
649
650	/* Save instruction decode info */
651	vcpu->arch.mmio_decode.insn = insn;
652	vcpu->arch.mmio_decode.insn_len = insn_len;
653	vcpu->arch.mmio_decode.shift = 0;
654	vcpu->arch.mmio_decode.len = len;
655	vcpu->arch.mmio_decode.return_handled = 0;
656
657	/* Copy data to kvm_run instance */
658	switch (len) {
659	case 1:
660		*((u8 *)run->mmio.data) = data8;
661		break;
662	case 2:
663		*((u16 *)run->mmio.data) = data16;
664		break;
665	case 4:
666		*((u32 *)run->mmio.data) = data32;
667		break;
668	case 8:
669		*((u64 *)run->mmio.data) = data64;
670		break;
671	default:
672		return -EOPNOTSUPP;
673	}
674
675	/* Update MMIO details in kvm_run struct */
676	run->mmio.is_write = true;
677	run->mmio.phys_addr = fault_addr;
678	run->mmio.len = len;
679
680	/* Try to handle MMIO access in the kernel */
681	if (!kvm_io_bus_write(vcpu, KVM_MMIO_BUS,
682			      fault_addr, len, run->mmio.data)) {
683		/* Successfully handled MMIO access in the kernel so resume */
684		vcpu->stat.mmio_exit_kernel++;
685		kvm_riscv_vcpu_mmio_return(vcpu, run);
686		return 1;
687	}
688
689	/* Exit to userspace for MMIO emulation */
690	vcpu->stat.mmio_exit_user++;
691	run->exit_reason = KVM_EXIT_MMIO;
692
693	return 0;
694}
695
696/**
697 * kvm_riscv_vcpu_mmio_return -- Handle MMIO loads after user space emulation
698 *			     or in-kernel IO emulation
699 *
700 * @vcpu: The VCPU pointer
701 * @run:  The VCPU run struct containing the mmio data
702 */
703int kvm_riscv_vcpu_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
704{
705	u8 data8;
706	u16 data16;
707	u32 data32;
708	u64 data64;
709	ulong insn;
710	int len, shift;
711
712	if (vcpu->arch.mmio_decode.return_handled)
713		return 0;
714
715	vcpu->arch.mmio_decode.return_handled = 1;
716	insn = vcpu->arch.mmio_decode.insn;
717
718	if (run->mmio.is_write)
719		goto done;
720
721	len = vcpu->arch.mmio_decode.len;
722	shift = vcpu->arch.mmio_decode.shift;
723
724	switch (len) {
725	case 1:
726		data8 = *((u8 *)run->mmio.data);
727		SET_RD(insn, &vcpu->arch.guest_context,
728			(ulong)data8 << shift >> shift);
729		break;
730	case 2:
731		data16 = *((u16 *)run->mmio.data);
732		SET_RD(insn, &vcpu->arch.guest_context,
733			(ulong)data16 << shift >> shift);
734		break;
735	case 4:
736		data32 = *((u32 *)run->mmio.data);
737		SET_RD(insn, &vcpu->arch.guest_context,
738			(ulong)data32 << shift >> shift);
739		break;
740	case 8:
741		data64 = *((u64 *)run->mmio.data);
742		SET_RD(insn, &vcpu->arch.guest_context,
743			(ulong)data64 << shift >> shift);
744		break;
745	default:
746		return -EOPNOTSUPP;
747	}
748
749done:
750	/* Move to next instruction */
751	vcpu->arch.guest_context.sepc += vcpu->arch.mmio_decode.insn_len;
752
753	return 0;
754}
755