1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
4 */
5
6 #include <linux/errno.h>
7 #include <linux/err.h>
8 #include <linux/ktime.h>
9 #include <linux/kvm_host.h>
10 #include <linux/vmalloc.h>
11 #include <linux/fs.h>
12 #include <linux/random.h>
13 #include <asm/page.h>
14 #include <asm/cacheflush.h>
15 #include <asm/cacheops.h>
16 #include <asm/cpu-info.h>
17 #include <asm/mmu_context.h>
18 #include <asm/tlbflush.h>
19 #include <asm/inst.h>
20 #include "kvmcpu.h"
21 #include "trace.h"
22
_kvm_emu_idle(struct kvm_vcpu *vcpu)23 int _kvm_emu_idle(struct kvm_vcpu *vcpu)
24 {
25 ++vcpu->stat.idle_exits;
26 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_IDLE);
27 if (!vcpu->arch.irq_pending) {
28 kvm_save_timer(vcpu);
29 kvm_vcpu_block(vcpu);
30
31 /*
32 * We we are runnable, then definitely go off to user space to
33 * check if any I/O interrupts are pending.
34 */
35 if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) {
36 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
37 vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
38 }
39 }
40
41 if (kvm_check_request(KVM_REQ_EVENT, vcpu)) {
42 vcpu->arch.pv.pv_unhalted = false;
43 }
44
45 return EMULATE_DONE;
46 }
47
_kvm_emu_mmio_write(struct kvm_vcpu *vcpu, larch_inst inst)48 int _kvm_emu_mmio_write(struct kvm_vcpu *vcpu, larch_inst inst)
49 {
50 struct kvm_run *run = vcpu->run;
51 unsigned int rd, op8, opcode;
52 unsigned long rd_val = 0;
53 void *data = run->mmio.data;
54 unsigned long curr_pc;
55 int ret = 0;
56
57 /*
58 * Update PC and hold onto current PC in case there is
59 * an error and we want to rollback the PC
60 */
61 curr_pc = vcpu->arch.pc;
62 update_pc(&vcpu->arch);
63
64 op8 = (inst.word >> 24) & 0xff;
65 run->mmio.phys_addr = vcpu->arch.badv;
66 if (run->mmio.phys_addr == KVM_INVALID_ADDR)
67 goto out_fail;
68
69 if (op8 < 0x28) {
70 /* stptrw/d process */
71 rd = inst.reg2i14_format.rd;
72 opcode = inst.reg2i14_format.opcode;
73
74 switch (opcode) {
75 case stptrd_op:
76 run->mmio.len = 8;
77 *(unsigned long *)data = vcpu->arch.gprs[rd];
78 break;
79 case stptrw_op:
80 run->mmio.len = 4;
81 *(unsigned int *)data = vcpu->arch.gprs[rd];
82 break;
83 default:
84 break;
85 }
86 } else if (op8 < 0x30) {
87 /* st.b/h/w/d process */
88 rd = inst.reg2i12_format.rd;
89 opcode = inst.reg2i12_format.opcode;
90 rd_val = vcpu->arch.gprs[rd];
91
92 switch (opcode) {
93 case std_op:
94 run->mmio.len = 8;
95 *(unsigned long *)data = rd_val;
96 break;
97 case stw_op:
98 run->mmio.len = 4;
99 *(unsigned int *)data = rd_val;
100 break;
101 case sth_op:
102 run->mmio.len = 2;
103 *(unsigned short *)data = rd_val;
104 break;
105 case stb_op:
106 run->mmio.len = 1;
107 *(unsigned char *)data = rd_val;
108 break;
109 default:
110 kvm_err("Store not yet supporded (inst=0x%08x)\n",
111 inst.word);
112 kvm_arch_vcpu_dump_regs(vcpu);
113 goto out_fail;
114 }
115 } else if (op8 == 0x38) {
116 /* stxb/h/w/d process */
117 rd = inst.reg3_format.rd;
118 opcode = inst.reg3_format.opcode;
119
120 switch (opcode) {
121 case stxb_op:
122 run->mmio.len = 1;
123 *(unsigned char *)data = vcpu->arch.gprs[rd];
124 break;
125 case stxh_op:
126 run->mmio.len = 2;
127 *(unsigned short *)data = vcpu->arch.gprs[rd];
128 break;
129 case stxw_op:
130 run->mmio.len = 4;
131 *(unsigned int *)data = vcpu->arch.gprs[rd];
132 break;
133 case stxd_op:
134 run->mmio.len = 8;
135 *(unsigned long *)data = vcpu->arch.gprs[rd];
136 break;
137 default:
138 kvm_err("Store not yet supporded (inst=0x%08x)\n",
139 inst.word);
140 kvm_arch_vcpu_dump_regs(vcpu);
141 goto out_fail;
142 }
143 } else {
144 kvm_err("Store not yet supporded (inst=0x%08x)\n",
145 inst.word);
146 kvm_arch_vcpu_dump_regs(vcpu);
147 goto out_fail;
148 }
149
150 /* All MMIO emulate in kernel go through the common interface */
151 ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
152 run->mmio.len, data);
153 if (!ret) {
154 vcpu->mmio_needed = 0;
155 return EMULATE_DONE;
156 }
157
158 run->mmio.is_write = 1;
159 vcpu->mmio_needed = 1;
160 vcpu->mmio_is_write = 1;
161
162 return EMULATE_DO_MMIO;
163
164 out_fail:
165 /* Rollback PC if emulation was unsuccessful */
166 vcpu->arch.pc = curr_pc;
167 return EMULATE_FAIL;
168 }
169
170
_kvm_emu_mmio_read(struct kvm_vcpu *vcpu, larch_inst inst)171 int _kvm_emu_mmio_read(struct kvm_vcpu *vcpu, larch_inst inst)
172 {
173 unsigned int op8, opcode, rd;
174 int ret = 0;
175 struct kvm_run *run = vcpu->run;
176
177 run->mmio.phys_addr = vcpu->arch.badv;
178 if (run->mmio.phys_addr == KVM_INVALID_ADDR)
179 return EMULATE_FAIL;
180
181 vcpu->mmio_needed = 2; /* signed */
182 op8 = (inst.word >> 24) & 0xff;
183
184 if (op8 < 0x28) {
185 /* ldptr.w/d process */
186 rd = inst.reg2i14_format.rd;
187 opcode = inst.reg2i14_format.opcode;
188
189 switch (opcode) {
190 case ldptrd_op:
191 run->mmio.len = 8;
192 break;
193 case ldptrw_op:
194 run->mmio.len = 4;
195 break;
196 default:
197 break;
198 }
199 } else if (op8 < 0x2f) {
200 /* ld.b/h/w/d, ld.bu/hu/wu process */
201 rd = inst.reg2i12_format.rd;
202 opcode = inst.reg2i12_format.opcode;
203
204 switch (opcode) {
205 case ldd_op:
206 run->mmio.len = 8;
207 break;
208 case ldwu_op:
209 vcpu->mmio_needed = 1; /* unsigned */
210 run->mmio.len = 4;
211 break;
212 case ldw_op:
213 run->mmio.len = 4;
214 break;
215 case ldhu_op:
216 vcpu->mmio_needed = 1; /* unsigned */
217 run->mmio.len = 2;
218 break;
219 case ldh_op:
220 run->mmio.len = 2;
221 break;
222 case ldbu_op:
223 vcpu->mmio_needed = 1; /* unsigned */
224 run->mmio.len = 1;
225 break;
226 case ldb_op:
227 run->mmio.len = 1;
228 break;
229 default:
230 kvm_err("Load not yet supporded (inst=0x%08x)\n",
231 inst.word);
232 kvm_arch_vcpu_dump_regs(vcpu);
233 vcpu->mmio_needed = 0;
234 return EMULATE_FAIL;
235 }
236 } else if (op8 == 0x38) {
237 /* ldxb/h/w/d, ldxb/h/wu, ldgtb/h/w/d, ldleb/h/w/d process */
238 rd = inst.reg3_format.rd;
239 opcode = inst.reg3_format.opcode;
240
241 switch (opcode) {
242 case ldxb_op:
243 run->mmio.len = 1;
244 break;
245 case ldxbu_op:
246 run->mmio.len = 1;
247 vcpu->mmio_needed = 1; /* unsigned */
248 break;
249 case ldxh_op:
250 run->mmio.len = 2;
251 break;
252 case ldxhu_op:
253 run->mmio.len = 2;
254 vcpu->mmio_needed = 1; /* unsigned */
255 break;
256 case ldxw_op:
257 run->mmio.len = 4;
258 break;
259 case ldxwu_op:
260 run->mmio.len = 4;
261 vcpu->mmio_needed = 1; /* unsigned */
262 break;
263 case ldxd_op:
264 run->mmio.len = 8;
265 break;
266 default:
267 kvm_err("Load not yet supporded (inst=0x%08x)\n",
268 inst.word);
269 kvm_arch_vcpu_dump_regs(vcpu);
270 vcpu->mmio_needed = 0;
271 return EMULATE_FAIL;
272 }
273 } else {
274 kvm_err("Load not yet supporded (inst=0x%08x) @ %lx\n",
275 inst.word, vcpu->arch.pc);
276 vcpu->mmio_needed = 0;
277 return EMULATE_FAIL;
278 }
279
280 /* Set for _kvm_complete_mmio_read use */
281 vcpu->arch.io_gpr = rd;
282 ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
283 run->mmio.len, run->mmio.data);
284 run->mmio.is_write = 0;
285 vcpu->mmio_is_write = 0;
286
287 if (!ret) {
288 _kvm_complete_mmio_read(vcpu, run);
289 vcpu->mmio_needed = 0;
290 return EMULATE_DONE;
291 }
292 return EMULATE_DO_MMIO;
293 }
294
_kvm_complete_mmio_read(struct kvm_vcpu *vcpu, struct kvm_run *run)295 int _kvm_complete_mmio_read(struct kvm_vcpu *vcpu, struct kvm_run *run)
296 {
297 unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
298 enum emulation_result er = EMULATE_DONE;
299
300 /* update with new PC */
301 update_pc(&vcpu->arch);
302 switch (run->mmio.len) {
303 case 8:
304 *gpr = *(s64 *)run->mmio.data;
305 break;
306
307 case 4:
308 if (vcpu->mmio_needed == 2) {
309 *gpr = *(int *)run->mmio.data;
310 } else
311 *gpr = *(unsigned int *)run->mmio.data;
312 break;
313
314 case 2:
315 if (vcpu->mmio_needed == 2)
316 *gpr = *(short *) run->mmio.data;
317 else
318 *gpr = *(unsigned short *)run->mmio.data;
319
320 break;
321 case 1:
322 if (vcpu->mmio_needed == 2)
323 *gpr = *(char *) run->mmio.data;
324 else
325 *gpr = *(unsigned char *) run->mmio.data;
326 break;
327 default:
328 kvm_err("Bad MMIO length: %d,addr is 0x%lx",
329 run->mmio.len, vcpu->arch.badv);
330 er = EMULATE_FAIL;
331 break;
332 }
333
334 return er;
335 }
336