1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
4 */
5
6 #include <linux/errno.h>
7 #include <linux/err.h>
8 #include <linux/module.h>
9 #include <linux/preempt.h>
10 #include <linux/vmalloc.h>
11 #include <asm/cacheflush.h>
12 #include <asm/cacheops.h>
13 #include <asm/cmpxchg.h>
14 #include <asm/fpu.h>
15 #include <asm/inst.h>
16 #include <asm/mmu_context.h>
17 #include <asm/numa.h>
18 #include <asm/cacheflush.h>
19 #include <asm/time.h>
20 #include <asm/tlb.h>
21 #include <asm/watch.h>
22 #include "kvmcpu.h"
23 #include <linux/kvm_host.h>
24
25 #include "trace.h"
26 #include "kvm_compat.h"
27 #include "kvmcsr.h"
28 #include "intc/ls3a_ext_irq.h"
29
30 /*
31 * Loongarch KVM callback handling for not implemented guest exiting
32 */
_kvm_fault_ni(struct kvm_vcpu *vcpu)33 static int _kvm_fault_ni(struct kvm_vcpu *vcpu)
34 {
35 unsigned long estat, badv;
36 unsigned int exccode, inst;
37
38 /*
39 * Fetch the instruction.
40 */
41 badv = vcpu->arch.badv;
42 estat = vcpu->arch.host_estat;
43 exccode = (estat & KVM_ESTAT_EXC) >> KVM_ESTAT_EXC_SHIFT;
44 inst = vcpu->arch.badi;
45 kvm_err("Exccode: %d PC=%#lx inst=0x%08x BadVaddr=%#lx estat=%#llx\n",
46 exccode, vcpu->arch.pc, inst, badv, kvm_read_gcsr_estat());
47 kvm_arch_vcpu_dump_regs(vcpu);
48 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
49 return RESUME_HOST;
50 }
51
_kvm_handle_csr(struct kvm_vcpu *vcpu, larch_inst inst)52 static int _kvm_handle_csr(struct kvm_vcpu *vcpu, larch_inst inst)
53 {
54 enum emulation_result er = EMULATE_DONE;
55 unsigned int rd, rj, csrid;
56 unsigned long csr_mask;
57 unsigned long val = 0;
58
59 /*
60 * CSR value mask imm
61 * rj = 0 means csrrd
62 * rj = 1 means csrwr
63 * rj != 0,1 means csrxchg
64 */
65 rd = inst.reg2csr_format.rd;
66 rj = inst.reg2csr_format.rj;
67 csrid = inst.reg2csr_format.csr;
68
69 /* Process CSR ops */
70 if (rj == 0) {
71 /* process csrrd */
72 val = _kvm_emu_read_csr(vcpu, csrid);
73 if (er != EMULATE_FAIL)
74 vcpu->arch.gprs[rd] = val;
75 } else if (rj == 1) {
76 /* process csrwr */
77 val = vcpu->arch.gprs[rd];
78 _kvm_emu_write_csr(vcpu, csrid, val);
79 } else {
80 /* process csrxchg */
81 val = vcpu->arch.gprs[rd];
82 csr_mask = vcpu->arch.gprs[rj];
83 _kvm_emu_xchg_csr(vcpu, csrid, csr_mask, val);
84 }
85
86 return er;
87 }
88
_kvm_emu_cache(struct kvm_vcpu *vcpu, larch_inst inst)89 static int _kvm_emu_cache(struct kvm_vcpu *vcpu, larch_inst inst)
90 {
91 return EMULATE_DONE;
92 }
93
_kvm_trap_handle_gspr(struct kvm_vcpu *vcpu)94 static int _kvm_trap_handle_gspr(struct kvm_vcpu *vcpu)
95 {
96 enum emulation_result er = EMULATE_DONE;
97 struct kvm_run *run = vcpu->run;
98 larch_inst inst;
99 unsigned long curr_pc;
100 int rd, rj;
101 unsigned int index;
102
103 /*
104 * Fetch the instruction.
105 */
106 inst.word = vcpu->arch.badi;
107 curr_pc = vcpu->arch.pc;
108 update_pc(&vcpu->arch);
109
110 er = EMULATE_FAIL;
111 switch (((inst.word >> 24) & 0xff)) {
112 case 0x0:
113 /* cpucfg GSPR */
114 if (inst.reg2_format.opcode == 0x1B) {
115 rd = inst.reg2_format.rd;
116 rj = inst.reg2_format.rj;
117 ++vcpu->stat.cpucfg_exits;
118 index = vcpu->arch.gprs[rj];
119 vcpu->arch.gprs[rd] = vcpu->kvm->arch.cpucfgs.cpucfg[index];
120 if ((index == 2) || (vcpu->arch.gprs[rd] == 0))
121 /*
122 * Fallback to get host cpucfg info, this is just for
123 * compatible with older qemu.
124 */
125 vcpu->arch.gprs[rd] = read_cpucfg(index);
126 if (index == 2)
127 /* do not support nested virtualization */
128 vcpu->arch.gprs[rd] &= ~CPUCFG2_LVZP;
129 er = EMULATE_DONE;
130 }
131 break;
132 case 0x4:
133 /* csr GSPR */
134 er = _kvm_handle_csr(vcpu, inst);
135 break;
136 case 0x6:
137 /* iocsr,cacop,idle GSPR */
138 switch (((inst.word >> 22) & 0x3ff)) {
139 case 0x18:
140 /* cache GSPR */
141 er = _kvm_emu_cache(vcpu, inst);
142 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
143 break;
144 case 0x19:
145 /* iocsr/idle GSPR */
146 switch (((inst.word >> 15) & 0x1ffff)) {
147 case 0xc90:
148 /* iocsr GSPR */
149 er = _kvm_emu_iocsr(inst, run, vcpu);
150 break;
151 case idle_op:
152 /* idle GSPR */
153 er = _kvm_emu_idle(vcpu);
154 break;
155 default:
156 er = EMULATE_FAIL;
157 break;
158 }
159 break;
160 default:
161 er = EMULATE_FAIL;
162 break;
163 }
164 break;
165 default:
166 er = EMULATE_FAIL;
167 break;
168 }
169
170 /* Rollback PC only if emulation was unsuccessful */
171 if (er == EMULATE_FAIL) {
172 kvm_err("[%#lx]%s: unsupported gspr instruction 0x%08x\n",
173 curr_pc, __func__, inst.word);
174
175 kvm_arch_vcpu_dump_regs(vcpu);
176 vcpu->arch.pc = curr_pc;
177 }
178 return er;
179 }
180
_kvm_check_hypcall(struct kvm_vcpu *vcpu)181 static int _kvm_check_hypcall(struct kvm_vcpu *vcpu)
182 {
183 enum emulation_result ret;
184 larch_inst inst;
185 unsigned long curr_pc;
186 unsigned int code;
187
188 /*
189 * Update PC and hold onto current PC in case there is
190 * an error and we want to rollback the PC
191 */
192 inst.word = vcpu->arch.badi;
193 code = inst.reg0i15_format.simmediate;
194 curr_pc = vcpu->arch.pc;
195 update_pc(&vcpu->arch);
196
197 ret = EMULATE_DONE;
198 switch (code) {
199 case KVM_HC_CODE_SERIVCE:
200 ret = EMULATE_PV_HYPERCALL;
201 break;
202 case KVM_HC_CODE_SWDBG:
203 /*
204 * Only SWDBG(SoftWare DeBug) could stop vm
205 * code other than 0 is ignored.
206 */
207 ret = EMULATE_DEBUG;
208 break;
209 default:
210 kvm_info("[%#lx] HYPCALL %#03x unsupported\n", vcpu->arch.pc, code);
211 break;
212 }
213
214 if (ret == EMULATE_DEBUG)
215 vcpu->arch.pc = curr_pc;
216
217 return ret;
218 }
219
220 /* Execute cpucfg instruction will tirggerGSPR,
221 * Also the access to unimplemented csrs 0x15
222 * 0x16, 0x50~0x53, 0x80, 0x81, 0x90~0x95, 0x98
223 * 0xc0~0xff, 0x100~0x109, 0x500~0x502,
224 * cacop_op, idle_op iocsr ops the same */
_kvm_handle_gspr(struct kvm_vcpu *vcpu)225 static int _kvm_handle_gspr(struct kvm_vcpu *vcpu)
226 {
227 enum emulation_result er = EMULATE_DONE;
228 int ret = RESUME_GUEST;
229
230 vcpu->arch.is_hypcall = 0;
231
232 er = _kvm_trap_handle_gspr(vcpu);
233
234 if (er == EMULATE_DONE) {
235 ret = RESUME_GUEST;
236 } else if (er == EMULATE_DO_MMIO) {
237 vcpu->run->exit_reason = KVM_EXIT_MMIO;
238 ret = RESUME_HOST;
239 } else if (er == EMULATE_DO_IOCSR) {
240 vcpu->run->exit_reason = KVM_EXIT_LOONGARCH_IOCSR;
241 ret = RESUME_HOST;
242 } else {
243 kvm_err("%s internal error\n", __func__);
244 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
245 ret = RESUME_HOST;
246 }
247 return ret;
248 }
249
_kvm_handle_hypcall(struct kvm_vcpu *vcpu)250 static int _kvm_handle_hypcall(struct kvm_vcpu *vcpu)
251 {
252 enum emulation_result er = EMULATE_DONE;
253 int ret = RESUME_GUEST;
254
255 vcpu->arch.is_hypcall = 0;
256 er = _kvm_check_hypcall(vcpu);
257
258 if (er == EMULATE_PV_HYPERCALL)
259 ret = _kvm_handle_pv_hcall(vcpu);
260 else if (er == EMULATE_DEBUG) {
261 vcpu->run->exit_reason = KVM_EXIT_DEBUG;
262 ret = RESUME_HOST;
263 } else
264 ret = RESUME_GUEST;
265
266 return ret;
267 }
268
_kvm_handle_gcm(struct kvm_vcpu *vcpu)269 static int _kvm_handle_gcm(struct kvm_vcpu *vcpu)
270 {
271 int ret, subcode;
272
273 vcpu->arch.is_hypcall = 0;
274 ret = RESUME_GUEST;
275 subcode = (vcpu->arch.host_estat & KVM_ESTAT_ESUBCODE) >> KVM_ESTAT_ESUBCODE_SHIFT;
276 if ((subcode != EXCSUBCODE_GCSC) && (subcode != EXCSUBCODE_GCHC)) {
277 kvm_err("%s internal error\n", __func__);
278 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
279 ret = RESUME_HOST;
280 }
281
282 return ret;
283 }
284
285 /**
286 * _kvm_handle_fpu_disabled() - Guest used fpu however it is disabled at host
287 * @vcpu: Virtual CPU context.
288 *
289 * Handle when the guest attempts to use fpu which hasn't been allowed
290 * by the root context.
291 */
_kvm_handle_fpu_disabled(struct kvm_vcpu *vcpu)292 static int _kvm_handle_fpu_disabled(struct kvm_vcpu *vcpu)
293 {
294 struct kvm_run *run = vcpu->run;
295
296 /*
297 * If guest FPU not present, the FPU operation should have been
298 * treated as a reserved instruction!
299 * If FPU already in use, we shouldn't get this at all.
300 */
301 if (WARN_ON(!_kvm_guest_has_fpu(&vcpu->arch) ||
302 vcpu->arch.aux_inuse & KVM_LARCH_FPU)) {
303 kvm_err("%s internal error\n", __func__);
304 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
305 return RESUME_HOST;
306 }
307
308 kvm_own_fpu(vcpu);
309 return RESUME_GUEST;
310 }
311
312 /**
313 * _kvm_handle_lsx_disabled() - Guest used LSX while disabled in root.
314 * @vcpu: Virtual CPU context.
315 *
316 * Handle when the guest attempts to use LSX when it is disabled in the root
317 * context.
318 */
_kvm_handle_lsx_disabled(struct kvm_vcpu *vcpu)319 static int _kvm_handle_lsx_disabled(struct kvm_vcpu *vcpu)
320 {
321 struct kvm_run *run = vcpu->run;
322
323 /*
324 * If LSX not present or not exposed to guest, the LSX operation
325 * should have been treated as a reserved instruction!
326 * If LSX already in use, we shouldn't get this at all.
327 */
328 if (!_kvm_guest_has_lsx(&vcpu->arch) ||
329 !(kvm_read_gcsr_euen() & KVM_EUEN_LSXEN) ||
330 vcpu->arch.aux_inuse & KVM_LARCH_LSX) {
331 kvm_err("%s internal error, lsx %d guest euen %llx aux %x",
332 __func__, _kvm_guest_has_lsx(&vcpu->arch),
333 kvm_read_gcsr_euen(), vcpu->arch.aux_inuse);
334 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
335 return RESUME_HOST;
336 }
337
338 kvm_own_lsx(vcpu);
339 return RESUME_GUEST;
340 }
341
_kvm_guest_has_lasx(struct kvm_vcpu *vcpu)342 bool _kvm_guest_has_lasx(struct kvm_vcpu *vcpu)
343 {
344 return cpu_has_lasx && vcpu->arch.lsx_enabled && vcpu->kvm->arch.cpucfg_lasx;
345 }
346
347 /**
348 * _kvm_handle_lasx_disabled() - Guest used LASX while disabled in root.
349 * @vcpu: Virtual CPU context.
350 *
351 * Handle when the guest attempts to use LASX when it is disabled in the root
352 * context.
353 */
_kvm_handle_lasx_disabled(struct kvm_vcpu *vcpu)354 static int _kvm_handle_lasx_disabled(struct kvm_vcpu *vcpu)
355 {
356 struct kvm_run *run = vcpu->run;
357
358 /*
359 * If LASX not present or not exposed to guest, the LASX operation
360 * should have been treated as a reserved instruction!
361 * If LASX already in use, we shouldn't get this at all.
362 */
363 if (!_kvm_guest_has_lasx(vcpu) ||
364 !(kvm_read_gcsr_euen() & KVM_EUEN_LSXEN) ||
365 !(kvm_read_gcsr_euen() & KVM_EUEN_LASXEN) ||
366 vcpu->arch.aux_inuse & KVM_LARCH_LASX) {
367 kvm_err("%s internal error, lasx %d guest euen %llx aux %x",
368 __func__, _kvm_guest_has_lasx(vcpu),
369 kvm_read_gcsr_euen(), vcpu->arch.aux_inuse);
370 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
371 return RESUME_HOST;
372 }
373
374 kvm_own_lasx(vcpu);
375
376 return RESUME_GUEST;
377 }
378
379 /**
380 * _kvm_handle_fpu_disabled() - Guest used lbt however it is disabled at host
381 * @vcpu: Virtual CPU context.
382 *
383 * Handle when the guest attempts to use lbt which hasn't been allowed
384 * by the root context.
385 */
_kvm_handle_lbt_disabled(struct kvm_vcpu *vcpu)386 static int _kvm_handle_lbt_disabled(struct kvm_vcpu *vcpu)
387 {
388 struct kvm_run *run = vcpu->run;
389
390 /*
391 * If guest LBT not present, the LBT operation should have been
392 * treated as a reserved instruction!
393 * If LBT already in use, we shouldn't get this at all.
394 */
395 if (vcpu->arch.aux_inuse & KVM_LARCH_LBT) {
396 kvm_err("%s internal error\n", __func__);
397 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
398 return RESUME_HOST;
399 }
400
401 kvm_own_lbt(vcpu);
402 return RESUME_GUEST;
403 }
404
_kvm_handle_read_fault(struct kvm_vcpu *vcpu)405 static int _kvm_handle_read_fault(struct kvm_vcpu *vcpu)
406 {
407 struct kvm_run *run = vcpu->run;
408 ulong badv = vcpu->arch.badv;
409 larch_inst inst;
410 enum emulation_result er = EMULATE_DONE;
411 int ret = RESUME_GUEST;
412
413 if (kvm_handle_mm_fault(vcpu, badv, false)) {
414 /* A code fetch fault doesn't count as an MMIO */
415 if (kvm_is_ifetch_fault(&vcpu->arch)) {
416 kvm_err("%s ifetch error addr:%lx\n", __func__, badv);
417 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
418 return RESUME_HOST;
419 }
420
421 /* Treat as MMIO */
422 inst.word = vcpu->arch.badi;
423 er = _kvm_emu_mmio_read(vcpu, inst);
424 if (er == EMULATE_FAIL) {
425 kvm_err("Guest Emulate Load failed: PC: %#lx, BadVaddr: %#lx\n",
426 vcpu->arch.pc, badv);
427 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
428 }
429 }
430
431 if (er == EMULATE_DONE) {
432 ret = RESUME_GUEST;
433 } else if (er == EMULATE_DO_MMIO) {
434 run->exit_reason = KVM_EXIT_MMIO;
435 ret = RESUME_HOST;
436 } else {
437 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
438 ret = RESUME_HOST;
439 }
440 return ret;
441 }
442
_kvm_handle_write_fault(struct kvm_vcpu *vcpu)443 static int _kvm_handle_write_fault(struct kvm_vcpu *vcpu)
444 {
445 struct kvm_run *run = vcpu->run;
446 ulong badv = vcpu->arch.badv;
447 larch_inst inst;
448 enum emulation_result er = EMULATE_DONE;
449 int ret = RESUME_GUEST;
450
451 if (kvm_handle_mm_fault(vcpu, badv, true)) {
452
453 /* Treat as MMIO */
454 inst.word = vcpu->arch.badi;
455 er = _kvm_emu_mmio_write(vcpu, inst);
456 if (er == EMULATE_FAIL) {
457 kvm_err("Guest Emulate Store failed: PC: %#lx, BadVaddr: %#lx\n",
458 vcpu->arch.pc, badv);
459 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
460 }
461 }
462
463 if (er == EMULATE_DONE) {
464 ret = RESUME_GUEST;
465 } else if (er == EMULATE_DO_MMIO) {
466 run->exit_reason = KVM_EXIT_MMIO;
467 ret = RESUME_HOST;
468 } else {
469 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
470 ret = RESUME_HOST;
471 }
472 return ret;
473 }
474
_kvm_handle_debug(struct kvm_vcpu *vcpu)475 static int _kvm_handle_debug(struct kvm_vcpu *vcpu)
476 {
477 uint32_t fwps, mwps;
478
479 fwps = kvm_csr_readq(KVM_CSR_FWPS);
480 mwps = kvm_csr_readq(KVM_CSR_MWPS);
481 if (fwps & 0xff)
482 kvm_csr_writeq(fwps, KVM_CSR_FWPS);
483 if (mwps & 0xff)
484 kvm_csr_writeq(mwps, KVM_CSR_MWPS);
485 vcpu->run->debug.arch.exception = EXCCODE_WATCH;
486 vcpu->run->debug.arch.fwps = fwps;
487 vcpu->run->debug.arch.mwps = mwps;
488 vcpu->run->exit_reason = KVM_EXIT_DEBUG;
489 return RESUME_HOST;
490 }
491
492 static exit_handle_fn _kvm_fault_tables[EXCCODE_INT_START] = {
493 [EXCCODE_TLBL] = _kvm_handle_read_fault,
494 [EXCCODE_TLBS] = _kvm_handle_write_fault,
495 [EXCCODE_TLBI] = _kvm_handle_read_fault,
496 [EXCCODE_TLBM] = _kvm_handle_write_fault,
497 [EXCCODE_TLBNR] = _kvm_handle_read_fault,
498 [EXCCODE_TLBNX] = _kvm_handle_read_fault,
499 [EXCCODE_FPDIS] = _kvm_handle_fpu_disabled,
500 [EXCCODE_LSXDIS] = _kvm_handle_lsx_disabled,
501 [EXCCODE_LASXDIS] = _kvm_handle_lasx_disabled,
502 [EXCCODE_WATCH] = _kvm_handle_debug,
503 [EXCCODE_GSPR] = _kvm_handle_gspr,
504 [EXCCODE_HVC] = _kvm_handle_hypcall,
505 [EXCCODE_GCM] = _kvm_handle_gcm,
506 [EXCCODE_BTDIS] = _kvm_handle_lbt_disabled,
507 };
508
_kvm_handle_fault(struct kvm_vcpu *vcpu, int fault)509 int _kvm_handle_fault(struct kvm_vcpu *vcpu, int fault)
510 {
511 return _kvm_fault_tables[fault](vcpu);
512 }
513
_kvm_init_fault(void)514 void _kvm_init_fault(void)
515 {
516 int i;
517
518 for (i = 0; i < EXCCODE_INT_START; i++)
519 if (!_kvm_fault_tables[i])
520 _kvm_fault_tables[i] = _kvm_fault_ni;
521 }
522