1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
4  */
5 
6 #include <linux/bitops.h>
7 #include <linux/errno.h>
8 #include <linux/err.h>
9 #include <linux/kdebug.h>
10 #include <linux/module.h>
11 #include <linux/uaccess.h>
12 #include <linux/vmalloc.h>
13 #include <linux/sched/signal.h>
14 #include <linux/fs.h>
15 #include <linux/mod_devicetable.h>
16 #include <linux/kvm.h>
17 #include <linux/kvm_host.h>
18 #include <linux/debugfs.h>
19 #include <linux/sched/stat.h>
20 #include <asm/fpu.h>
21 #include <asm/lbt.h>
22 #include <asm/watch.h>
23 #include <asm/page.h>
24 #include <asm/cacheflush.h>
25 #include <asm/mmu_context.h>
26 #include <asm/pgalloc.h>
27 #include <asm/pgtable.h>
28 #include <asm/cpufeature.h>
29 #include "kvmcpu.h"
30 #include <asm/setup.h>
31 #include <asm/time.h>
32 #include <asm/paravirt.h>
33 
34 #include "intc/ls3a_ipi.h"
35 #include "intc/ls7a_irq.h"
36 #include "intc/ls3a_ext_irq.h"
37 #include "kvm_compat.h"
38 #include "kvmcsr.h"
39 #include "ls_irq.h"
40 
41 /*
42  * Define loongarch kvm version.
43  * Add version number when qemu/kvm interface changed
44  */
45 #define KVM_LOONGARCH_VERSION 1
46 #define CREATE_TRACE_POINTS
47 #include "trace.h"
48 
49 struct kvm_stats_debugfs_item vcpu_debugfs_entries[] = {
50 	VCPU_STAT("idle", idle_exits),
51 	VCPU_STAT("signal", signal_exits),
52 	VCPU_STAT("interrupt", int_exits),
53 	VCPU_STAT("tlbmiss_ld", excep_exits[EXCCODE_TLBL]),
54 	VCPU_STAT("tlbmiss_st", excep_exits[EXCCODE_TLBS]),
55 	VCPU_STAT("tlb_ifetch", excep_exits[EXCCODE_TLBI]),
56 	VCPU_STAT("tlbmod", excep_exits[EXCCODE_TLBM]),
57 	VCPU_STAT("tlbri", excep_exits[EXCCODE_TLBNR]),
58 	VCPU_STAT("tlbxi", excep_exits[EXCCODE_TLBNX]),
59 	VCPU_STAT("fp_disabled", excep_exits[EXCCODE_FPDIS]),
60 	VCPU_STAT("lsx_disabled", excep_exits[EXCCODE_LSXDIS]),
61 	VCPU_STAT("lasx_disabled", excep_exits[EXCCODE_LASXDIS]),
62 	VCPU_STAT("fpe", excep_exits[EXCCODE_FPE]),
63 	VCPU_STAT("watch", excep_exits[EXCCODE_WATCH]),
64 	VCPU_STAT("gspr", excep_exits[EXCCODE_GSPR]),
65 	VCPU_STAT("vz_gsfc", excep_exits[EXCCODE_GCM]),
66 	VCPU_STAT("vz_hc", excep_exits[EXCCODE_HVC]),
67 
68 	VCPU_STAT("rdcsr_cpu_feature", rdcsr_cpu_feature_exits),
69 	VCPU_STAT("rdcsr_misc_func", rdcsr_misc_func_exits),
70 	VCPU_STAT("rdcsr_ipi_access", rdcsr_ipi_access_exits),
71 	VCPU_STAT("cpucfg", cpucfg_exits),
72 	VCPU_STAT("huge_dec", huge_dec_exits),
73 	VCPU_STAT("huge_thp", huge_thp_exits),
74 	VCPU_STAT("huge_adj", huge_adjust_exits),
75 	VCPU_STAT("huge_set", huge_set_exits),
76 	VCPU_STAT("huge_merg", huge_merge_exits),
77 
78 	VCPU_STAT("halt_successful_poll", halt_successful_poll),
79 	VCPU_STAT("halt_attempted_poll", halt_attempted_poll),
80 	VCPU_STAT("halt_poll_invalid", halt_poll_invalid),
81 	VCPU_STAT("halt_wakeup", halt_wakeup),
82 	VCPU_STAT("halt_poll_success_ns", halt_poll_success_ns),
83 	VCPU_STAT("halt_poll_fail_ns", halt_poll_fail_ns),
84 	{NULL}
85 };
86 
87 struct kvm_stats_debugfs_item debugfs_entries[] = {
88 	VM_STAT("remote_tlb_flush", remote_tlb_flush),
89 	VM_STAT("pip_read_exits", pip_read_exits),
90 	VM_STAT("pip_write_exits", pip_write_exits),
91 	VM_STAT("vm_ioctl_irq_line", vm_ioctl_irq_line),
92 	VM_STAT("ls7a_ioapic_update", ls7a_ioapic_update),
93 	VM_STAT("ls7a_ioapic_set_irq", ls7a_ioapic_set_irq),
94 	VM_STAT("ls7a_msi_irq", ls7a_msi_irq),
95 	VM_STAT("ioapic_reg_write", ioapic_reg_write),
96 	VM_STAT("ioapic_reg_read", ioapic_reg_read),
97 	VM_STAT("set_ls7a_ioapic", set_ls7a_ioapic),
98 	VM_STAT("get_ls7a_ioapic", get_ls7a_ioapic),
99 	VM_STAT("set_ls3a_ext_irq", set_ls3a_ext_irq),
100 	VM_STAT("get_ls3a_ext_irq", get_ls3a_ext_irq),
101 	VM_STAT("ls3a_ext_irq", trigger_ls3a_ext_irq),
102 	{NULL}
103 };
104 
lvcpu_stat_get(void *address, u64 *val)105 static int lvcpu_stat_get(void *address, u64 *val)
106 {
107 	*val = *(u64 *)address;
108 	return 0;
109 }
110 DEFINE_SIMPLE_ATTRIBUTE(lvcpu_stat_fops, lvcpu_stat_get, NULL, "%llu\n");
111 
vcpu_pid_get(void *arg, u64 *val)112 static int vcpu_pid_get(void *arg, u64 *val)
113 {
114 	struct kvm_vcpu *vcpu = (struct kvm_vcpu *)arg;
115 	if (vcpu)
116 		*val = pid_vnr(vcpu->pid);
117 	return 0;
118 }
119 DEFINE_SIMPLE_ATTRIBUTE(vcpu_pid_fops, vcpu_pid_get, NULL, "%llu\n");
120 
kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry)121 void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry)
122 {
123 	struct kvm_stats_debugfs_item *p;
124 	debugfs_create_file("pid", 0444, debugfs_dentry, vcpu, &vcpu_pid_fops);
125 	for (p = vcpu_debugfs_entries; p->name && p->kind == KVM_STAT_VCPU; ++p) {
126 		debugfs_create_file(p->name, 0444, debugfs_dentry,
127 				(void *)vcpu + p->offset, &lvcpu_stat_fops);
128 	}
129 }
130 
131 bool kvm_trace_guest_mode_change;
132 static struct kvm_context __percpu *vmcs;
133 
kvm_guest_mode_change_trace_reg(void)134 int kvm_guest_mode_change_trace_reg(void)
135 {
136 	kvm_trace_guest_mode_change = 1;
137 	return 0;
138 }
139 
kvm_guest_mode_change_trace_unreg(void)140 void kvm_guest_mode_change_trace_unreg(void)
141 {
142 	kvm_trace_guest_mode_change = 0;
143 }
144 
kvm_vcpu_has_events(struct kvm_vcpu *vcpu)145 static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
146 {
147 	if (vcpu->arch.pv.pv_unhalted)
148 		return true;
149 
150 	return false;
151 }
152 
153 /*
154  * XXXKYMA: We are simulatoring a processor that has the WII bit set in
155  * Config7, so we are "runnable" if interrupts are pending
156  */
kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)157 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
158 {
159 	return !!(vcpu->arch.irq_pending) || kvm_vcpu_has_events(vcpu);
160 }
161 
kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)162 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
163 {
164 	return false;
165 }
166 
kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)167 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
168 {
169 	return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
170 }
171 
172 #ifdef CONFIG_PARAVIRT
kvm_update_stolen_time(struct kvm_vcpu *vcpu)173 void kvm_update_stolen_time(struct kvm_vcpu *vcpu)
174 {
175 	struct kvm_host_map map;
176 	struct kvm_steal_time *st;
177 	int ret = 0;
178 
179 	if (vcpu->arch.st.guest_addr == 0)
180 		return;
181 
182 	ret = kvm_map_gfn(vcpu, vcpu->arch.st.guest_addr >> PAGE_SHIFT,
183 				&map, &vcpu->arch.st.cache, false);
184 	if (ret) {
185 		kvm_info("%s ret:%d\n", __func__, ret);
186 		return;
187 	}
188 	st = map.hva + offset_in_page(vcpu->arch.st.guest_addr);
189 	if (st->version & 1)
190 		st->version += 1; /* first time write, random junk */
191 	st->version += 1;
192 	smp_wmb();
193 	st->steal += current->sched_info.run_delay -
194 		vcpu->arch.st.last_steal;
195 	vcpu->arch.st.last_steal = current->sched_info.run_delay;
196 	smp_wmb();
197 	st->version += 1;
198 
199 	kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, false);
200 }
201 
_kvm_pvtime_supported(void)202 bool _kvm_pvtime_supported(void)
203 {
204 	return !!sched_info_on();
205 }
206 
_kvm_pvtime_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)207 int _kvm_pvtime_set_attr(struct kvm_vcpu *vcpu,
208 				struct kvm_device_attr *attr)
209 {
210 	u64 __user *user = (u64 __user *)attr->addr;
211 	struct kvm *kvm = vcpu->kvm;
212 	u64 ipa;
213 	int ret = 0;
214 	int idx;
215 
216 	if (!_kvm_pvtime_supported() ||
217 		attr->attr != KVM_LARCH_VCPU_PVTIME_IPA)
218 		return -ENXIO;
219 
220 	if (get_user(ipa, user))
221 		return -EFAULT;
222 	if (!IS_ALIGNED(ipa, 64))
223 		return -EINVAL;
224 
225 	/* Check the address is in a valid memslot */
226 	idx = srcu_read_lock(&kvm->srcu);
227 	if (kvm_is_error_hva(gfn_to_hva(kvm, ipa >> PAGE_SHIFT)))
228 		ret = -EINVAL;
229 	srcu_read_unlock(&kvm->srcu, idx);
230 
231 	if (!ret)
232 		vcpu->arch.st.guest_addr = ipa;
233 
234 	return ret;
235 }
236 
_kvm_pvtime_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)237 int _kvm_pvtime_get_attr(struct kvm_vcpu *vcpu,
238 				struct kvm_device_attr *attr)
239 {
240 	u64 __user *user = (u64 __user *)attr->addr;
241 	u64 ipa;
242 
243 	if (!_kvm_pvtime_supported() ||
244 		attr->attr != KVM_LARCH_VCPU_PVTIME_IPA)
245 		return -ENXIO;
246 
247 	ipa = vcpu->arch.st.guest_addr;
248 
249 	if (put_user(ipa, user))
250 		return -EFAULT;
251 
252 	return 0;
253 }
254 
_kvm_pvtime_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)255 int _kvm_pvtime_has_attr(struct kvm_vcpu *vcpu,
256 				struct kvm_device_attr *attr)
257 {
258 	switch (attr->attr) {
259 	case KVM_LARCH_VCPU_PVTIME_IPA:
260 		if (_kvm_pvtime_supported())
261 			return 0;
262 	}
263 
264 	return -ENXIO;
265 }
266 
kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)267 static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
268 {
269 	struct kvm_host_map map;
270 	struct kvm_steal_time *st;
271 	int ret = 0;
272 
273 	if (vcpu->arch.st.guest_addr == 0)
274 		return;
275 
276 	ret = kvm_map_gfn(vcpu, vcpu->arch.st.guest_addr >> PAGE_SHIFT,
277 				&map, &vcpu->arch.st.cache, false);
278 	if (ret) {
279 		kvm_info("%s ret:%d\n", __func__, ret);
280 		return;
281 	}
282 	st = map.hva + offset_in_page(vcpu->arch.st.guest_addr);
283 	if (st->version & 1)
284 		st->version += 1; /* first time write, random junk */
285 	st->version += 1;
286 	smp_wmb();
287 	st->preempted = KVM_VCPU_PREEMPTED;
288 	smp_wmb();
289 	st->version += 1;
290 
291 	kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, false);
292 }
293 
kvm_steal_time_clear_preempted(struct kvm_vcpu *vcpu)294 static void kvm_steal_time_clear_preempted(struct kvm_vcpu *vcpu)
295 {
296 	struct kvm_host_map map;
297 	struct kvm_steal_time *st;
298 	int ret = 0;
299 
300 	if (vcpu->arch.st.guest_addr == 0)
301 		return;
302 
303 	ret = kvm_map_gfn(vcpu, vcpu->arch.st.guest_addr >> PAGE_SHIFT,
304 				&map, &vcpu->arch.st.cache, false);
305 	if (ret) {
306 		kvm_info("%s ret:%d\n", __func__, ret);
307 		return;
308 	}
309 	st = map.hva + offset_in_page(vcpu->arch.st.guest_addr);
310 	if (st->version & 1)
311 		st->version += 1; /* first time write, random junk */
312 	st->version += 1;
313 	smp_wmb();
314 	st->preempted = 0;
315 	smp_wmb();
316 	st->version += 1;
317 
318 	kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, false);
319 }
320 #endif
321 
kvm_arch_hardware_enable(void)322 int kvm_arch_hardware_enable(void)
323 {
324 	unsigned long gcfg = 0;
325 
326 	/* First init gtlbc, gcfg, gstat, gintc. All guest use the same config */
327 	kvm_clear_csr_gtlbc(KVM_GTLBC_USETGID | KVM_GTLBC_TOTI);
328 	kvm_write_csr_gcfg(0);
329 	kvm_write_csr_gstat(0);
330 	kvm_write_csr_gintc(0);
331 
332 	/*
333 	 * Enable virtualization features granting guest direct control of
334 	 * certain features:
335 	 * GCI=2:       Trap on init or unimplement cache instruction.
336 	 * TORU=0:      Trap on Root Unimplement.
337 	 * CACTRL=1:    Root control cache.
338 	 * TOP=0:       Trap on Previlege.
339 	 * TOE=0:       Trap on Exception.
340 	 * TIT=0:       Trap on Timer.
341 	 */
342 	if (cpu_has_gcip_all)
343 		gcfg |= KVM_GCFG_GCI_SECURE;
344 	if (cpu_has_matc_root)
345 		gcfg |= KVM_GCFG_MATC_ROOT;
346 
347 	gcfg |= KVM_GCFG_TIT;
348 	kvm_write_csr_gcfg(gcfg);
349 
350 	kvm_flush_tlb_all();
351 
352 	/* Enable using TGID  */
353 	kvm_set_csr_gtlbc(KVM_GTLBC_USETGID);
354 	kvm_debug("gtlbc:%llx gintc:%llx gstat:%llx gcfg:%llx",
355 			kvm_read_csr_gtlbc(), kvm_read_csr_gintc(),
356 			kvm_read_csr_gstat(), kvm_read_csr_gcfg());
357 	return 0;
358 }
359 
kvm_arch_hardware_disable(void)360 void kvm_arch_hardware_disable(void)
361 {
362 	kvm_clear_csr_gtlbc(KVM_GTLBC_USETGID | KVM_GTLBC_TOTI);
363 	kvm_write_csr_gcfg(0);
364 	kvm_write_csr_gstat(0);
365 	kvm_write_csr_gintc(0);
366 
367 	/* Flush any remaining guest TLB entries */
368 	kvm_flush_tlb_all();
369 }
370 
kvm_arch_hardware_setup(void *opaque)371 int kvm_arch_hardware_setup(void *opaque)
372 {
373 	return 0;
374 }
375 
kvm_arch_check_processor_compat(void *rtn)376 int kvm_arch_check_processor_compat(void *rtn)
377 {
378 	return 0;
379 }
380 
kvm_arch_init_vm(struct kvm *kvm, unsigned long type)381 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
382 {
383 	/* Allocate page table to map GPA -> RPA */
384 	kvm->arch.gpa_mm.pgd = kvm_pgd_alloc();
385 	if (!kvm->arch.gpa_mm.pgd)
386 		return -ENOMEM;
387 
388 	kvm->arch.cpucfg_lasx = (read_cpucfg(LOONGARCH_CPUCFG2) &
389 					  CPUCFG2_LASX);
390 
391 	_kvm_init_iocsr(kvm);
392 	kvm->arch.vmcs = vmcs;
393 
394 	return 0;
395 }
396 
kvm_free_vcpus(struct kvm *kvm)397 static void kvm_free_vcpus(struct kvm *kvm)
398 {
399 	unsigned int i;
400 	struct kvm_vcpu *vcpu;
401 
402 	kvm_for_each_vcpu(i, vcpu, kvm) {
403 		kvm_vcpu_destroy(vcpu);
404 	}
405 
406 	mutex_lock(&kvm->lock);
407 
408 	for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
409 		kvm->vcpus[i] = NULL;
410 
411 	atomic_set(&kvm->online_vcpus, 0);
412 
413 	mutex_unlock(&kvm->lock);
414 }
415 
kvm_arch_destroy_vm(struct kvm *kvm)416 void kvm_arch_destroy_vm(struct kvm *kvm)
417 {
418 	kvm_destroy_ls3a_ipi(kvm);
419 	kvm_destroy_ls7a_ioapic(kvm);
420 	kvm_destroy_ls3a_ext_irq(kvm);
421 	kvm_free_vcpus(kvm);
422 	_kvm_destroy_mm(kvm);
423 }
424 
kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)425 long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl,
426 			unsigned long arg)
427 {
428 	return -ENOIOCTLCMD;
429 }
430 
kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, unsigned long npages)431 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
432 			    unsigned long npages)
433 {
434 	return 0;
435 }
436 
kvm_arch_prepare_memory_region(struct kvm *kvm, struct kvm_memory_slot *memslot, const struct kvm_userspace_memory_region *mem, enum kvm_mr_change change)437 int kvm_arch_prepare_memory_region(struct kvm *kvm,
438 				   struct kvm_memory_slot *memslot,
439 				   const struct kvm_userspace_memory_region *mem,
440 				   enum kvm_mr_change change)
441 {
442 	return 0;
443 }
444 
_kvm_new_vpid(unsigned long cpu, struct kvm_vcpu *vcpu)445 static void _kvm_new_vpid(unsigned long cpu, struct kvm_vcpu *vcpu)
446 {
447 	struct kvm_context *context;
448 	unsigned long vpid;
449 
450 	context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu);
451 	vpid = context->vpid_cache;
452 	if (!(++vpid & context->gid_mask)) {
453 		if (!vpid)              /* fix version if needed */
454 			vpid = context->gid_fisrt_ver;
455 
456 		++vpid;         /* vpid 0 reserved for root */
457 
458 		/* start new vpid cycle */
459 		kvm_flush_tlb_all();
460 	}
461 
462 	context->vpid_cache = vpid;
463 	vcpu->arch.vpid[cpu] = vpid;
464 }
465 
466 /* Returns 1 if the guest TLB may be clobbered */
_kvm_check_requests(struct kvm_vcpu *vcpu, int cpu)467 static int _kvm_check_requests(struct kvm_vcpu *vcpu, int cpu)
468 {
469 	int ret = 0;
470 	int i;
471 
472 	if (!kvm_request_pending(vcpu))
473 		return 0;
474 
475 	if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
476 		/* Drop all vpids for this VCPU */
477 		for_each_possible_cpu(i)
478 			vcpu->arch.vpid[i] = 0;
479 		/* This will clobber guest TLB contents too */
480 		ret = 1;
481 	}
482 
483 	return ret;
484 }
485 
_kvm_update_vmid(struct kvm_vcpu *vcpu, int cpu)486 static void _kvm_update_vmid(struct kvm_vcpu *vcpu, int cpu)
487 {
488 	struct kvm_context *context;
489 	bool migrated;
490 	unsigned int gstinfo_gidmask, gstinfo_gid = 0;
491 
492 	/*
493 	 * Are we entering guest context on a different CPU to last time?
494 	 * If so, the VCPU's guest TLB state on this CPU may be stale.
495 	 */
496 	context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu);
497 	migrated = (vcpu->arch.last_exec_cpu != cpu);
498 	vcpu->arch.last_exec_cpu = cpu;
499 
500 	/*
501 	 * Check if our vpid is of an older version and thus invalid.
502 	 *
503 	 * We also discard the stored vpid if we've executed on
504 	 * another CPU, as the guest mappings may have changed without
505 	 * hypervisor knowledge.
506 	 */
507 	gstinfo_gidmask = context->gid_mask << KVM_GSTAT_GID_SHIFT;
508 	if (migrated ||
509 			(vcpu->arch.vpid[cpu] ^ context->vpid_cache) &
510 			context->gid_ver_mask) {
511 		_kvm_new_vpid(cpu, vcpu);
512 		trace_kvm_vpid_change(vcpu, vcpu->arch.vpid[cpu]);
513 	}
514 	gstinfo_gid = (vcpu->arch.vpid[cpu] & context->gid_mask) <<
515 		KVM_GSTAT_GID_SHIFT;
516 
517 	/* Restore GSTAT(0x50).vpid */
518 	kvm_change_csr_gstat(gstinfo_gidmask, gstinfo_gid);
519 }
520 
521 /*
522  * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
523  */
_kvm_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)524 static int _kvm_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
525 {
526 	unsigned long exst = vcpu->arch.host_estat;
527 	u32 intr = exst & 0x1fff; /* ignore NMI */
528 	u32 exccode = (exst & KVM_ESTAT_EXC) >> KVM_ESTAT_EXC_SHIFT;
529 	u32 __user *opc = (u32 __user *) vcpu->arch.pc;
530 	int ret = RESUME_GUEST, cpu;
531 
532 	vcpu->mode = OUTSIDE_GUEST_MODE;
533 
534 	/* Set a default exit reason */
535 	run->exit_reason = KVM_EXIT_UNKNOWN;
536 	run->ready_for_interrupt_injection = 1;
537 
538 	/*
539 	 * Set the appropriate status bits based on host CPU features,
540 	 * before we hit the scheduler
541 	 */
542 
543 	local_irq_enable();
544 
545 	kvm_debug("%s: exst: %lx, PC: %p, kvm_run: %p, kvm_vcpu: %p\n",
546 			__func__, exst, opc, run, vcpu);
547 	trace_kvm_exit(vcpu, exccode);
548 	if (exccode) {
549 		vcpu->stat.excep_exits[exccode]++;
550 		ret = _kvm_handle_fault(vcpu, exccode);
551 	} else {
552 		WARN(!intr, "suspicious vm exiting");
553 		++vcpu->stat.int_exits;
554 
555 		if (need_resched())
556 			cond_resched();
557 
558 		ret = RESUME_GUEST;
559 	}
560 
561 #ifdef CONFIG_PARAVIRT
562 	if (kvm_check_request(KVM_REQ_RECORD_STEAL, vcpu))
563 		kvm_update_stolen_time(vcpu);
564 #endif
565 
566 	cond_resched();
567 
568 	local_irq_disable();
569 
570 	if (ret == RESUME_GUEST) {
571 		/* Only check for signals if not already exiting to userspace */
572 		if (signal_pending(current)) {
573 			run->exit_reason = KVM_EXIT_INTR;
574 			ret = (-EINTR << 2) | RESUME_HOST;
575 			++vcpu->stat.signal_exits;
576 			trace_kvm_exit(vcpu, KVM_TRACE_EXIT_SIGNAL);
577 			return ret;
578 		}
579 
580 		trace_kvm_reenter(vcpu);
581 
582 		kvm_acquire_timer(vcpu);
583 		_kvm_deliver_intr(vcpu);
584 
585 		/*
586 		 * Make sure the read of VCPU requests in vcpu_reenter()
587 		 * callback is not reordered ahead of the write to vcpu->mode,
588 		 * or we could miss a TLB flush request while the requester sees
589 		 * the VCPU as outside of guest mode and not needing an IPI.
590 		 */
591 		smp_store_mb(vcpu->mode, IN_GUEST_MODE);
592 
593 		cpu = smp_processor_id();
594 		_kvm_check_requests(vcpu, cpu);
595 		_kvm_update_vmid(vcpu, cpu);
596 	}
597 
598 	return ret;
599 }
600 
601 /* low level hrtimer wake routine */
kvm_swtimer_wakeup(struct hrtimer *timer)602 static enum hrtimer_restart kvm_swtimer_wakeup(struct hrtimer *timer)
603 {
604 	struct kvm_vcpu *vcpu;
605 
606 	vcpu = container_of(timer, struct kvm_vcpu, arch.swtimer);
607 
608 	_kvm_queue_irq(vcpu, LARCH_INT_TIMER);
609 
610 	rcuwait_wake_up(&vcpu->wait);
611 
612 	return kvm_count_timeout(vcpu);
613 }
614 
kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)615 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
616 {
617 	return 0;
618 }
619 
kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)620 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
621 {
622 	int i;
623 	unsigned long timer_hz;
624 	struct loongarch_csrs *csr = vcpu->arch.csr;
625 
626 	for_each_possible_cpu(i)
627 		vcpu->arch.vpid[i] = 0;
628 
629 	hrtimer_init(&vcpu->arch.swtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
630 	vcpu->arch.swtimer.function = kvm_swtimer_wakeup;
631 	vcpu->arch.fpu_enabled = true;
632 	vcpu->arch.lsx_enabled = true;
633 
634 	vcpu->kvm->arch.online_vcpus = vcpu->vcpu_id + 1;
635 
636 	vcpu->arch.host_eentry = kvm_csr_readq(KVM_CSR_EENTRY);
637 	vcpu->arch.guest_eentry = (unsigned long)kvm_exception_entry;
638 	vcpu->arch.vcpu_run = kvm_enter_guest;
639 	vcpu->arch.handle_exit = _kvm_handle_exit;
640 	vcpu->arch.csr = kzalloc(sizeof(struct loongarch_csrs), GFP_KERNEL);
641 	/*
642 	 * kvm all exceptions share one exception entry, and host <-> guest switch
643 	 * also switch excfg.VS field, keep host excfg.VS info here
644 	 */
645 	vcpu->arch.host_ecfg = (kvm_read_csr_ecfg() & KVM_ECFG_VS);
646 
647 	if (!vcpu->arch.csr)
648 		return -ENOMEM;
649 
650 	/* Init */
651 	vcpu->arch.last_sched_cpu = -1;
652 	vcpu->arch.last_exec_cpu = -1;
653 
654 	/*
655 	 * Initialize guest register state to valid architectural reset state.
656 	 */
657 	timer_hz = calc_const_freq();
658 	kvm_init_timer(vcpu, timer_hz);
659 
660 	/* Set Initialize mode for GUEST */
661 	kvm_write_sw_gcsr(csr, KVM_CSR_CRMD, KVM_CRMD_DA);
662 
663 	/* Set cpuid */
664 	kvm_write_sw_gcsr(csr, KVM_CSR_TMID, vcpu->vcpu_id);
665 
666 	/* start with no pending virtual guest interrupts */
667 	csr->csrs[KVM_CSR_GINTC] = 0;
668 
669 	return 0;
670 }
671 
kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)672 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
673 {
674 	int cpu;
675 	struct kvm_context *context;
676 	struct gfn_to_pfn_cache *cache = &vcpu->arch.st.cache;
677 
678 	hrtimer_cancel(&vcpu->arch.swtimer);
679 	kvm_mmu_free_memory_caches(vcpu);
680 	if (vcpu->arch.st.guest_addr)
681 		kvm_release_pfn(cache->pfn, cache->dirty, cache);
682 	kfree(vcpu->arch.csr);
683 
684 	/*
685 	 * If the VCPU is freed and reused as another VCPU, we don't want the
686 	 * matching pointer wrongly hanging around in last_vcpu.
687 	 */
688 	for_each_possible_cpu(cpu) {
689 		context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu);
690 		if (context->last_vcpu == vcpu)
691 			context->last_vcpu = NULL;
692 	}
693 }
694 
695 #define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \
696 		KVM_GUESTDBG_USE_SW_BP | KVM_GUESTDBG_SINGLESTEP)
kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg)697 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
698 					struct kvm_guest_debug *dbg)
699 {
700 	int ret = 0;
701 
702 	if (dbg->control & ~KVM_GUESTDBG_VALID_MASK) {
703 		ret = -EINVAL;
704 		goto out;
705 	}
706 	if (dbg->control & KVM_GUESTDBG_ENABLE) {
707 		vcpu->guest_debug = dbg->control;
708 		/* No hardware breakpoint */
709 	} else {
710 		vcpu->guest_debug = 0;
711 	}
712 out:
713 	return ret;
714 }
715 
kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)716 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
717 {
718 	int r = -EINTR;
719 	int cpu;
720 
721 	vcpu_load(vcpu);
722 
723 	kvm_sigset_activate(vcpu);
724 
725 	if (vcpu->mmio_needed) {
726 		if (!vcpu->mmio_is_write)
727 			_kvm_complete_mmio_read(vcpu, vcpu->run);
728 		vcpu->mmio_needed = 0;
729 	} else if (vcpu->arch.is_hypcall) {
730 		/* set return value for hypercall v0 register */
731 		vcpu->arch.gprs[REG_A0] = vcpu->run->hypercall.ret;
732 		vcpu->arch.is_hypcall = 0;
733 	}
734 
735 	if (vcpu->run->exit_reason == KVM_EXIT_LOONGARCH_IOCSR) {
736 		if (!vcpu->run->iocsr_io.is_write)
737 			_kvm_complete_iocsr_read(vcpu, vcpu->run);
738 	}
739 
740 	/* clear exit_reason */
741 	vcpu->run->exit_reason = KVM_EXIT_UNKNOWN;
742 	if (vcpu->run->immediate_exit)
743 		goto out;
744 
745 	lose_fpu(1);
746 	lose_lbt(1);
747 
748 #ifdef CONFIG_PARAVIRT
749 	if (kvm_check_request(KVM_REQ_RECORD_STEAL, vcpu))
750 		kvm_update_stolen_time(vcpu);
751 #endif
752 	local_irq_disable();
753 	guest_enter_irqoff();
754 	trace_kvm_enter(vcpu);
755 
756 	/*
757 	 * Make sure the read of VCPU requests in vcpu_run() callback is not
758 	 * reordered ahead of the write to vcpu->mode, or we could miss a TLB
759 	 * flush request while the requester sees the VCPU as outside of guest
760 	 * mode and not needing an IPI.
761 	 */
762 	smp_store_mb(vcpu->mode, IN_GUEST_MODE);
763 
764 	cpu = smp_processor_id();
765 	kvm_acquire_timer(vcpu);
766 	/* Check if we have any exceptions/interrupts pending */
767 	_kvm_deliver_intr(vcpu);
768 
769 	_kvm_check_requests(vcpu, cpu);
770 	_kvm_update_vmid(vcpu, cpu);
771 	r = kvm_enter_guest(vcpu->run, vcpu);
772 
773 	trace_kvm_out(vcpu);
774 	guest_exit_irqoff();
775 	local_irq_enable();
776 
777 out:
778 	kvm_sigset_deactivate(vcpu);
779 
780 	vcpu_put(vcpu);
781 	return r;
782 }
783 
kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_loongarch_interrupt *irq)784 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
785 			     struct kvm_loongarch_interrupt *irq)
786 {
787 	int intr = (int)irq->irq;
788 
789 	if (intr < 0) {
790 		_kvm_dequeue_irq(vcpu, -intr);
791 		return 0;
792 	}
793 
794 	_kvm_queue_irq(vcpu, intr);
795 	kvm_vcpu_kick(vcpu);
796 	return 0;
797 }
798 
kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, struct kvm_mp_state *mp_state)799 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
800 				    struct kvm_mp_state *mp_state)
801 {
802 	return -ENOIOCTLCMD;
803 }
804 
kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, struct kvm_mp_state *mp_state)805 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
806 				    struct kvm_mp_state *mp_state)
807 {
808 	return -ENOIOCTLCMD;
809 }
810 
811 /**
812  * kvm_migrate_count() - Migrate timer.
813  * @vcpu:       Virtual CPU.
814  *
815  * Migrate hrtimer to the current CPU by cancelling and restarting it
816  * if it was running prior to being cancelled.
817  *
818  * Must be called when the VCPU is migrated to a different CPU to ensure that
819  * timer expiry during guest execution interrupts the guest and causes the
820  * interrupt to be delivered in a timely manner.
821  */
kvm_migrate_count(struct kvm_vcpu *vcpu)822 static void kvm_migrate_count(struct kvm_vcpu *vcpu)
823 {
824 	if (hrtimer_cancel(&vcpu->arch.swtimer))
825 		hrtimer_restart(&vcpu->arch.swtimer);
826 }
827 
_kvm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)828 static int _kvm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
829 {
830 	struct kvm_context *context;
831 	struct loongarch_csrs *csr = vcpu->arch.csr;
832 	bool migrated, all;
833 
834 	/*
835 	 * Have we migrated to a different CPU?
836 	 * If so, any old guest TLB state may be stale.
837 	 */
838 	migrated = (vcpu->arch.last_sched_cpu != cpu);
839 
840 	/*
841 	 * Was this the last VCPU to run on this CPU?
842 	 * If not, any old guest state from this VCPU will have been clobbered.
843 	 */
844 	context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu);
845 	all = migrated || (context->last_vcpu != vcpu);
846 	context->last_vcpu = vcpu;
847 
848 	/*
849 	 * Restore timer state regardless
850 	 */
851 	kvm_restore_timer(vcpu);
852 
853 	/* Control guest page CCA attribute */
854 	kvm_change_csr_gcfg(KVM_GCFG_MATC_MASK, KVM_GCFG_MATC_ROOT);
855 	/* Restore hardware perf csr */
856 	kvm_restore_hw_perf(vcpu);
857 
858 #ifdef CONFIG_PARAVIRT
859 	kvm_make_request(KVM_REQ_RECORD_STEAL, vcpu);
860 #endif
861 	/* Don't bother restoring registers multiple times unless necessary */
862 	if (!all)
863 		return 0;
864 
865 	kvm_write_csr_gcntc((ulong)vcpu->kvm->arch.stablecounter_gftoffset);
866 	/*
867 	 * Restore guest CSR registers
868 	 */
869 	kvm_restore_hw_gcsr(csr, KVM_CSR_CRMD);
870 	kvm_restore_hw_gcsr(csr, KVM_CSR_PRMD);
871 	kvm_restore_hw_gcsr(csr, KVM_CSR_EUEN);
872 	kvm_restore_hw_gcsr(csr, KVM_CSR_MISC);
873 	kvm_restore_hw_gcsr(csr, KVM_CSR_ECFG);
874 	kvm_restore_hw_gcsr(csr, KVM_CSR_ERA);
875 	kvm_restore_hw_gcsr(csr, KVM_CSR_BADV);
876 	kvm_restore_hw_gcsr(csr, KVM_CSR_BADI);
877 	kvm_restore_hw_gcsr(csr, KVM_CSR_EENTRY);
878 	kvm_restore_hw_gcsr(csr, KVM_CSR_TLBIDX);
879 	kvm_restore_hw_gcsr(csr, KVM_CSR_TLBEHI);
880 	kvm_restore_hw_gcsr(csr, KVM_CSR_TLBELO0);
881 	kvm_restore_hw_gcsr(csr, KVM_CSR_TLBELO1);
882 	kvm_restore_hw_gcsr(csr, KVM_CSR_ASID);
883 	kvm_restore_hw_gcsr(csr, KVM_CSR_PGDL);
884 	kvm_restore_hw_gcsr(csr, KVM_CSR_PGDH);
885 	kvm_restore_hw_gcsr(csr, KVM_CSR_PWCTL0);
886 	kvm_restore_hw_gcsr(csr, KVM_CSR_PWCTL1);
887 	kvm_restore_hw_gcsr(csr, KVM_CSR_STLBPGSIZE);
888 	kvm_restore_hw_gcsr(csr, KVM_CSR_RVACFG);
889 	kvm_restore_hw_gcsr(csr, KVM_CSR_CPUID);
890 	kvm_restore_hw_gcsr(csr, KVM_CSR_KS0);
891 	kvm_restore_hw_gcsr(csr, KVM_CSR_KS1);
892 	kvm_restore_hw_gcsr(csr, KVM_CSR_KS2);
893 	kvm_restore_hw_gcsr(csr, KVM_CSR_KS3);
894 	kvm_restore_hw_gcsr(csr, KVM_CSR_KS4);
895 	kvm_restore_hw_gcsr(csr, KVM_CSR_KS5);
896 	kvm_restore_hw_gcsr(csr, KVM_CSR_KS6);
897 	kvm_restore_hw_gcsr(csr, KVM_CSR_KS7);
898 	kvm_restore_hw_gcsr(csr, KVM_CSR_TMID);
899 	kvm_restore_hw_gcsr(csr, KVM_CSR_CNTC);
900 	kvm_restore_hw_gcsr(csr, KVM_CSR_TLBRENTRY);
901 	kvm_restore_hw_gcsr(csr, KVM_CSR_TLBRBADV);
902 	kvm_restore_hw_gcsr(csr, KVM_CSR_TLBRERA);
903 	kvm_restore_hw_gcsr(csr, KVM_CSR_TLBRSAVE);
904 	kvm_restore_hw_gcsr(csr, KVM_CSR_TLBRELO0);
905 	kvm_restore_hw_gcsr(csr, KVM_CSR_TLBRELO1);
906 	kvm_restore_hw_gcsr(csr, KVM_CSR_TLBREHI);
907 	kvm_restore_hw_gcsr(csr, KVM_CSR_TLBRPRMD);
908 	kvm_restore_hw_gcsr(csr, KVM_CSR_DMWIN0);
909 	kvm_restore_hw_gcsr(csr, KVM_CSR_DMWIN1);
910 	kvm_restore_hw_gcsr(csr, KVM_CSR_DMWIN2);
911 	kvm_restore_hw_gcsr(csr, KVM_CSR_DMWIN3);
912 	kvm_restore_hw_gcsr(csr, KVM_CSR_LLBCTL);
913 
914 	/* restore Root.Guestexcept from unused Guest guestexcept register */
915 	kvm_write_csr_gintc(csr->csrs[KVM_CSR_GINTC]);
916 
917 	/*
918 	 * We should clear linked load bit to break interrupted atomics. This
919 	 * prevents a SC on the next VCPU from succeeding by matching a LL on
920 	 * the previous VCPU.
921 	 */
922 	if (vcpu->kvm->created_vcpus > 1)
923 		kvm_set_gcsr_llbctl(KVM_LLBCTL_WCLLB);
924 
925 	return 0;
926 }
927 
928 /* Restore ASID once we are scheduled back after preemption */
kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)929 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
930 {
931 	unsigned long flags;
932 
933 	local_irq_save(flags);
934 	vcpu->cpu = cpu;
935 	if (vcpu->arch.last_sched_cpu != cpu) {
936 		kvm_debug("[%d->%d]KVM VCPU[%d] switch\n",
937 				vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
938 		/*
939 		 * Migrate the timer interrupt to the current CPU so that it
940 		 * always interrupts the guest and synchronously triggers a
941 		 * guest timer interrupt.
942 		 */
943 		kvm_migrate_count(vcpu);
944 	}
945 
946 	/* restore guest state to registers */
947 	_kvm_vcpu_load(vcpu, cpu);
948 	kvm_steal_time_clear_preempted(vcpu);
949 	local_irq_restore(flags);
950 }
951 
_kvm_vcpu_put(struct kvm_vcpu *vcpu, int cpu)952 static int _kvm_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
953 {
954 	struct loongarch_csrs *csr = vcpu->arch.csr;
955 
956 	kvm_lose_fpu(vcpu);
957 	kvm_lose_hw_perf(vcpu);
958 
959 	kvm_save_hw_gcsr(csr, KVM_CSR_CRMD);
960 	kvm_save_hw_gcsr(csr, KVM_CSR_PRMD);
961 	kvm_save_hw_gcsr(csr, KVM_CSR_EUEN);
962 	kvm_save_hw_gcsr(csr, KVM_CSR_MISC);
963 	kvm_save_hw_gcsr(csr, KVM_CSR_ECFG);
964 	kvm_save_hw_gcsr(csr, KVM_CSR_ERA);
965 	kvm_save_hw_gcsr(csr, KVM_CSR_BADV);
966 	kvm_save_hw_gcsr(csr, KVM_CSR_BADI);
967 	kvm_save_hw_gcsr(csr, KVM_CSR_EENTRY);
968 	kvm_save_hw_gcsr(csr, KVM_CSR_TLBIDX);
969 	kvm_save_hw_gcsr(csr, KVM_CSR_TLBEHI);
970 	kvm_save_hw_gcsr(csr, KVM_CSR_TLBELO0);
971 	kvm_save_hw_gcsr(csr, KVM_CSR_TLBELO1);
972 	kvm_save_hw_gcsr(csr, KVM_CSR_ASID);
973 	kvm_save_hw_gcsr(csr, KVM_CSR_PGDL);
974 	kvm_save_hw_gcsr(csr, KVM_CSR_PGDH);
975 	kvm_save_hw_gcsr(csr, KVM_CSR_PGD);
976 	kvm_save_hw_gcsr(csr, KVM_CSR_PWCTL0);
977 	kvm_save_hw_gcsr(csr, KVM_CSR_PWCTL1);
978 	kvm_save_hw_gcsr(csr, KVM_CSR_STLBPGSIZE);
979 	kvm_save_hw_gcsr(csr, KVM_CSR_RVACFG);
980 	kvm_save_hw_gcsr(csr, KVM_CSR_CPUID);
981 	kvm_save_hw_gcsr(csr, KVM_CSR_PRCFG1);
982 	kvm_save_hw_gcsr(csr, KVM_CSR_PRCFG2);
983 	kvm_save_hw_gcsr(csr, KVM_CSR_PRCFG3);
984 	kvm_save_hw_gcsr(csr, KVM_CSR_KS0);
985 	kvm_save_hw_gcsr(csr, KVM_CSR_KS1);
986 	kvm_save_hw_gcsr(csr, KVM_CSR_KS2);
987 	kvm_save_hw_gcsr(csr, KVM_CSR_KS3);
988 	kvm_save_hw_gcsr(csr, KVM_CSR_KS4);
989 	kvm_save_hw_gcsr(csr, KVM_CSR_KS5);
990 	kvm_save_hw_gcsr(csr, KVM_CSR_KS6);
991 	kvm_save_hw_gcsr(csr, KVM_CSR_KS7);
992 	kvm_save_hw_gcsr(csr, KVM_CSR_TMID);
993 	kvm_save_hw_gcsr(csr, KVM_CSR_CNTC);
994 	kvm_save_hw_gcsr(csr, KVM_CSR_LLBCTL);
995 	kvm_save_hw_gcsr(csr, KVM_CSR_TLBRENTRY);
996 	kvm_save_hw_gcsr(csr, KVM_CSR_TLBRBADV);
997 	kvm_save_hw_gcsr(csr, KVM_CSR_TLBRERA);
998 	kvm_save_hw_gcsr(csr, KVM_CSR_TLBRSAVE);
999 	kvm_save_hw_gcsr(csr, KVM_CSR_TLBRELO0);
1000 	kvm_save_hw_gcsr(csr, KVM_CSR_TLBRELO1);
1001 	kvm_save_hw_gcsr(csr, KVM_CSR_TLBREHI);
1002 	kvm_save_hw_gcsr(csr, KVM_CSR_TLBRPRMD);
1003 	kvm_save_hw_gcsr(csr, KVM_CSR_DMWIN0);
1004 	kvm_save_hw_gcsr(csr, KVM_CSR_DMWIN1);
1005 	kvm_save_hw_gcsr(csr, KVM_CSR_DMWIN2);
1006 	kvm_save_hw_gcsr(csr, KVM_CSR_DMWIN3);
1007 
1008 	/* save Root.Guestexcept in unused Guest guestexcept register */
1009 	kvm_save_timer(vcpu);
1010 	csr->csrs[KVM_CSR_GINTC] = kvm_read_csr_gintc();
1011 	return 0;
1012 }
1013 
1014 /* ASID can change if another task is scheduled during preemption */
kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)1015 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1016 {
1017 	unsigned long flags;
1018 	int cpu;
1019 
1020 	local_irq_save(flags);
1021 	cpu = smp_processor_id();
1022 	vcpu->arch.last_sched_cpu = cpu;
1023 	vcpu->cpu = -1;
1024 
1025 	/* save guest state in registers */
1026 	_kvm_vcpu_put(vcpu, cpu);
1027 	kvm_steal_time_set_preempted(vcpu);
1028 	local_irq_restore(flags);
1029 }
1030 
_kvm_get_one_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg, s64 *v)1031 static int _kvm_get_one_reg(struct kvm_vcpu *vcpu,
1032 		const struct kvm_one_reg *reg, s64 *v)
1033 {
1034 	struct loongarch_csrs *csr = vcpu->arch.csr;
1035 	int reg_idx, ret;
1036 
1037 	if ((reg->id & KVM_IOC_CSRID(0)) == KVM_IOC_CSRID(0)) {
1038 		reg_idx = KVM_GET_IOC_CSRIDX(reg->id);
1039 		ret = _kvm_getcsr(vcpu, reg_idx, v, 0);
1040 		if (ret == 0)
1041 			return ret;
1042 	}
1043 
1044 	switch (reg->id) {
1045 	case KVM_REG_LBT_SCR0:
1046 		*v = vcpu->arch.lbt.scr0;
1047 		break;
1048 	case KVM_REG_LBT_SCR1:
1049 		*v = vcpu->arch.lbt.scr1;
1050 		break;
1051 	case KVM_REG_LBT_SCR2:
1052 		*v = vcpu->arch.lbt.scr2;
1053 		break;
1054 	case KVM_REG_LBT_SCR3:
1055 		*v = vcpu->arch.lbt.scr3;
1056 		break;
1057 	case KVM_REG_LBT_FLAGS:
1058 		*v = vcpu->arch.lbt.eflags;
1059 		break;
1060 	case KVM_REG_LBT_FTOP:
1061 		*v = vcpu->arch.fpu.ftop;
1062 		break;
1063 
1064 	case KVM_REG_LOONGARCH_COUNTER:
1065 		*v = drdtime() + vcpu->kvm->arch.stablecounter_gftoffset;
1066 		break;
1067 	default:
1068 		if ((reg->id & KVM_REG_LOONGARCH_MASK) != KVM_REG_LOONGARCH_CSR)
1069 			return -EINVAL;
1070 
1071 		reg_idx = KVM_GET_IOC_CSRIDX(reg->id);
1072 		if (reg_idx < CSR_ALL_SIZE)
1073 			*v = kvm_read_sw_gcsr(csr, reg_idx);
1074 		else
1075 			return -EINVAL;
1076 	}
1077 	return 0;
1078 }
1079 
_kvm_set_one_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg, s64 v)1080 static int _kvm_set_one_reg(struct kvm_vcpu *vcpu,
1081 		const struct kvm_one_reg *reg,
1082 		s64 v)
1083 {
1084 	struct loongarch_csrs *csr = vcpu->arch.csr;
1085 	struct gfn_to_pfn_cache *cache;
1086 	int ret = 0;
1087 	unsigned long flags;
1088 	u64 val;
1089 	int reg_idx;
1090 
1091 	val = v;
1092 	if ((reg->id & KVM_IOC_CSRID(0)) == KVM_IOC_CSRID(0)) {
1093 		reg_idx = KVM_GET_IOC_CSRIDX(reg->id);
1094 		ret = _kvm_setcsr(vcpu, reg_idx, &val, 0);
1095 		if (ret == 0)
1096 			return ret;
1097 	}
1098 
1099 	switch (reg->id) {
1100 	case KVM_REG_LBT_SCR0:
1101 		vcpu->arch.lbt.scr0 = val;
1102 		break;
1103 	case KVM_REG_LBT_SCR1:
1104 		vcpu->arch.lbt.scr1 = val;
1105 		break;
1106 	case KVM_REG_LBT_SCR2:
1107 		vcpu->arch.lbt.scr2 = val;
1108 		break;
1109 	case KVM_REG_LBT_SCR3:
1110 		vcpu->arch.lbt.scr3 = val;
1111 		break;
1112 	case KVM_REG_LBT_FLAGS:
1113 		vcpu->arch.lbt.eflags = val;
1114 		break;
1115 	case KVM_REG_LBT_FTOP:
1116 		vcpu->arch.fpu.ftop = val;
1117 		break;
1118 
1119 	case KVM_REG_LOONGARCH_COUNTER:
1120 		local_irq_save(flags);
1121 		/*
1122 		 * gftoffset is relative with board, not vcpu
1123 		 * only set for the first time for smp system
1124 		 */
1125 		if (!vcpu->kvm->arch.stablecounter_gftoffset)
1126 			vcpu->kvm->arch.stablecounter_gftoffset = (signed long)(v - drdtime());
1127 		kvm_write_csr_gcntc((ulong)vcpu->kvm->arch.stablecounter_gftoffset);
1128 		local_irq_restore(flags);
1129 		break;
1130 	case KVM_REG_LOONGARCH_VCPU_RESET:
1131 		cache = &vcpu->arch.st.cache;
1132 		kvm_reset_timer(vcpu);
1133 		if (vcpu->vcpu_id == 0)
1134 			kvm_setup_ls3a_extirq(vcpu->kvm);
1135 		memset(&vcpu->arch.irq_pending, 0, sizeof(vcpu->arch.irq_pending));
1136 		memset(&vcpu->arch.irq_clear, 0, sizeof(vcpu->arch.irq_clear));
1137 
1138 		if (vcpu->arch.st.guest_addr) {
1139 			kvm_release_pfn(cache->pfn, cache->dirty, cache);
1140 			/* disable pv timer when cpu resetting */
1141 			vcpu->arch.st.guest_addr = 0;
1142 		}
1143 		vcpu->kvm->arch.stablecounter_gftoffset = 0;
1144 		break;
1145 	default:
1146 		if ((reg->id & KVM_REG_LOONGARCH_MASK) != KVM_REG_LOONGARCH_CSR)
1147 			return -EINVAL;
1148 
1149 		reg_idx = KVM_GET_IOC_CSRIDX(reg->id);
1150 		if (reg_idx < CSR_ALL_SIZE)
1151 			kvm_write_sw_gcsr(csr, reg_idx, v);
1152 		else
1153 			return -EINVAL;
1154 	}
1155 	return ret;
1156 }
1157 
_kvm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)1158 static int _kvm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
1159 {
1160 	int ret;
1161 	s64 v;
1162 
1163 	ret = _kvm_get_one_reg(vcpu, reg, &v);
1164 	if (ret)
1165 		return ret;
1166 
1167 	ret = -EINVAL;
1168 	if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
1169 		u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
1170 
1171 		ret = put_user(v, uaddr64);
1172 	} else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
1173 		u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
1174 		u32 v32 = (u32)v;
1175 
1176 		ret = put_user(v32, uaddr32);
1177 	}
1178 
1179 	return ret;
1180 }
1181 
_kvm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)1182 static int _kvm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
1183 {
1184 	s64 v;
1185 	int ret;
1186 
1187 	ret = -EINVAL;
1188 	if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
1189 		u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
1190 		ret = get_user(v, uaddr64);
1191 	} else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
1192 		u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
1193 		s32 v32;
1194 
1195 		ret = get_user(v32, uaddr32);
1196 		v = (s64)v32;
1197 	}
1198 
1199 	if (ret)
1200 		return -EFAULT;
1201 
1202 	return _kvm_set_one_reg(vcpu, reg, v);
1203 }
1204 
kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, struct kvm_enable_cap *cap)1205 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1206 				     struct kvm_enable_cap *cap)
1207 {
1208 	int r = 0;
1209 
1210 	if (!kvm_vm_ioctl_check_extension(vcpu->kvm, cap->cap))
1211 		return -EINVAL;
1212 	if (cap->flags)
1213 		return -EINVAL;
1214 	if (cap->args[0])
1215 		return -EINVAL;
1216 
1217 	switch (cap->cap) {
1218 	case KVM_CAP_LOONGARCH_FPU:
1219 	case KVM_CAP_LOONGARCH_LSX:
1220 		break;
1221 	default:
1222 		r = -EINVAL;
1223 		break;
1224 	}
1225 
1226 	return r;
1227 }
1228 
kvm_arch_vcpu_async_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)1229 long kvm_arch_vcpu_async_ioctl(struct file *filp, unsigned int ioctl,
1230 			       unsigned long arg)
1231 {
1232 	struct kvm_vcpu *vcpu = filp->private_data;
1233 	void __user *argp = (void __user *)arg;
1234 
1235 	if (ioctl == KVM_INTERRUPT) {
1236 		struct kvm_loongarch_interrupt irq;
1237 
1238 		if (copy_from_user(&irq, argp, sizeof(irq)))
1239 			return -EFAULT;
1240 		kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__,
1241 			  irq.irq);
1242 
1243 		return kvm_vcpu_ioctl_interrupt(vcpu, &irq);
1244 	}
1245 
1246 	return -ENOIOCTLCMD;
1247 }
1248 
kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level, bool line_status)1249 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
1250 			  bool line_status)
1251 {
1252 	u32 irq = irq_level->irq;
1253 	unsigned int irq_type, vcpu_idx, irq_num, ret;
1254 	int nrcpus = atomic_read(&kvm->online_vcpus);
1255 	bool level = irq_level->level;
1256 	unsigned long flags;
1257 
1258 	irq_type = (irq >> KVM_LOONGSON_IRQ_TYPE_SHIFT) & KVM_LOONGSON_IRQ_TYPE_MASK;
1259 	vcpu_idx = (irq >> KVM_LOONGSON_IRQ_VCPU_SHIFT) & KVM_LOONGSON_IRQ_VCPU_MASK;
1260 	irq_num = (irq >> KVM_LOONGSON_IRQ_NUM_SHIFT) & KVM_LOONGSON_IRQ_NUM_MASK;
1261 
1262 	switch (irq_type) {
1263 	case KVM_LOONGSON_IRQ_TYPE_IOAPIC:
1264 		if (!ls7a_ioapic_in_kernel(kvm))
1265 			return -ENXIO;
1266 
1267 		if (vcpu_idx >= nrcpus)
1268 			return -EINVAL;
1269 
1270 		ls7a_ioapic_lock(ls7a_ioapic_irqchip(kvm), &flags);
1271 		ret = kvm_ls7a_ioapic_set_irq(kvm, irq_num, level);
1272 		ls7a_ioapic_unlock(ls7a_ioapic_irqchip(kvm), &flags);
1273 		return ret;
1274 	}
1275 	kvm->stat.vm_ioctl_irq_line++;
1276 
1277 	return -EINVAL;
1278 }
1279 
kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct loongarch_kvm_irqchip *chip)1280 static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct loongarch_kvm_irqchip *chip)
1281 {
1282 	int r, dlen;
1283 
1284 	r = 0;
1285 	dlen = chip->len - sizeof(struct loongarch_kvm_irqchip);
1286 	switch (chip->chip_id) {
1287 	case KVM_IRQCHIP_LS7A_IOAPIC:
1288 		if (dlen != sizeof(struct kvm_ls7a_ioapic_state)) {
1289 			kvm_err("get ls7a state err dlen:%d\n", dlen);
1290 			goto dlen_err;
1291 		}
1292 		r = kvm_get_ls7a_ioapic(kvm, (void *)chip->data);
1293 		break;
1294 	case KVM_IRQCHIP_LS3A_GIPI:
1295 		if (dlen != sizeof(gipiState)) {
1296 			kvm_err("get gipi state err dlen:%d\n", dlen);
1297 			goto dlen_err;
1298 		}
1299 		r = kvm_get_ls3a_ipi(kvm, (void *)chip->data);
1300 		break;
1301 	case KVM_IRQCHIP_LS3A_HT_IRQ:
1302 	case KVM_IRQCHIP_LS3A_ROUTE:
1303 		break;
1304 	case KVM_IRQCHIP_LS3A_EXTIRQ:
1305 		if (dlen != sizeof(struct kvm_loongarch_ls3a_extirq_state)) {
1306 			kvm_err("get extioi state err dlen:%d\n", dlen);
1307 			goto dlen_err;
1308 		}
1309 		r = kvm_get_ls3a_extirq(kvm, (void *)chip->data);
1310 		break;
1311 	case KVM_IRQCHIP_LS3A_IPMASK:
1312 		break;
1313 	default:
1314 		r = -EINVAL;
1315 		break;
1316 	}
1317 	return r;
1318 dlen_err:
1319 	r = -EINVAL;
1320 	return r;
1321 }
1322 
kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct loongarch_kvm_irqchip *chip)1323 static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct loongarch_kvm_irqchip *chip)
1324 {
1325 	int r, dlen;
1326 
1327 	r = 0;
1328 	dlen = chip->len - sizeof(struct loongarch_kvm_irqchip);
1329 	switch (chip->chip_id) {
1330 	case KVM_IRQCHIP_LS7A_IOAPIC:
1331 		if (dlen != sizeof(struct kvm_ls7a_ioapic_state)) {
1332 			kvm_err("set ls7a state err dlen:%d\n", dlen);
1333 			goto dlen_err;
1334 		}
1335 		r = kvm_set_ls7a_ioapic(kvm, (void *)chip->data);
1336 		break;
1337 	case KVM_IRQCHIP_LS3A_GIPI:
1338 		if (dlen != sizeof(gipiState)) {
1339 			kvm_err("set gipi state err dlen:%d\n", dlen);
1340 			goto dlen_err;
1341 		}
1342 		r = kvm_set_ls3a_ipi(kvm, (void *)chip->data);
1343 		break;
1344 	case KVM_IRQCHIP_LS3A_HT_IRQ:
1345 	case KVM_IRQCHIP_LS3A_ROUTE:
1346 		break;
1347 	case KVM_IRQCHIP_LS3A_EXTIRQ:
1348 		if (dlen != sizeof(struct kvm_loongarch_ls3a_extirq_state)) {
1349 			kvm_err("set extioi state err dlen:%d\n", dlen);
1350 			goto dlen_err;
1351 		}
1352 		r = kvm_set_ls3a_extirq(kvm, (void *)chip->data);
1353 		break;
1354 	case KVM_IRQCHIP_LS3A_IPMASK:
1355 		break;
1356 	default:
1357 		r = -EINVAL;
1358 		break;
1359 	}
1360 	return r;
1361 dlen_err:
1362 	r = -EINVAL;
1363 	return r;
1364 }
1365 
1366 /*
1367  * Read or write a bunch of msrs. All parameters are kernel addresses.
1368  *
1369  * @return number of msrs set successfully.
1370  */
_kvm_csr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs, struct kvm_csr_entry *entries, int (*do_csr)(struct kvm_vcpu *vcpu, unsigned index, u64 *data, int force))1371 static int _kvm_csr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
1372 		struct kvm_csr_entry *entries,
1373 		int (*do_csr)(struct kvm_vcpu *vcpu,
1374 			unsigned index, u64 *data, int force))
1375 {
1376 	int i;
1377 
1378 	for (i = 0; i < msrs->ncsrs; ++i)
1379 		if (do_csr(vcpu, entries[i].index, &entries[i].data, 1))
1380 			break;
1381 
1382 	return i;
1383 }
1384 
kvm_csr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs, int (*do_csr)(struct kvm_vcpu *vcpu, unsigned index, u64 *data, int force))1385 static int kvm_csr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
1386 		int (*do_csr)(struct kvm_vcpu *vcpu,
1387 			unsigned index, u64 *data, int force))
1388 {
1389 	struct kvm_msrs msrs;
1390 	struct kvm_csr_entry *entries;
1391 	int r, n;
1392 	unsigned size;
1393 
1394 	r = -EFAULT;
1395 	if (copy_from_user(&msrs, user_msrs, sizeof msrs))
1396 		goto out;
1397 
1398 	r = -E2BIG;
1399 	if (msrs.ncsrs >= CSR_ALL_SIZE)
1400 		goto out;
1401 
1402 	size = sizeof(struct kvm_csr_entry) * msrs.ncsrs;
1403 	entries = memdup_user(user_msrs->entries, size);
1404 	if (IS_ERR(entries)) {
1405 		r = PTR_ERR(entries);
1406 		goto out;
1407 	}
1408 
1409 	r = n = _kvm_csr_io(vcpu, &msrs, entries, do_csr);
1410 	if (r < 0)
1411 		goto out_free;
1412 
1413 	r = -EFAULT;
1414 	if (copy_to_user(user_msrs->entries, entries, size))
1415 		goto out_free;
1416 
1417 	r = n;
1418 
1419 out_free:
1420 	kfree(entries);
1421 out:
1422 	return r;
1423 }
1424 
_kvm_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)1425 static int _kvm_vcpu_set_attr(struct kvm_vcpu *vcpu,
1426 				struct kvm_device_attr *attr)
1427 {
1428 	int ret = -ENXIO;
1429 
1430 	switch (attr->group) {
1431 #ifdef CONFIG_PARAVIRT
1432 	case KVM_LARCH_VCPU_PVTIME_CTRL:
1433 		ret = _kvm_pvtime_set_attr(vcpu, attr);
1434 		break;
1435 #endif
1436 	default:
1437 		ret = -ENXIO;
1438 		break;
1439 	}
1440 
1441 	return ret;
1442 }
1443 
_kvm_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)1444 static int _kvm_vcpu_get_attr(struct kvm_vcpu *vcpu,
1445 				struct kvm_device_attr *attr)
1446 {
1447 	int ret = -ENXIO;
1448 
1449 	switch (attr->group) {
1450 #ifdef CONFIG_PARAVIRT
1451 	case KVM_LARCH_VCPU_PVTIME_CTRL:
1452 		ret = _kvm_pvtime_get_attr(vcpu, attr);
1453 		break;
1454 #endif
1455 	default:
1456 		ret = -ENXIO;
1457 		break;
1458 	}
1459 
1460 	return ret;
1461 }
1462 
_kvm_vcpu_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)1463 static int _kvm_vcpu_has_attr(struct kvm_vcpu *vcpu,
1464 				struct kvm_device_attr *attr)
1465 {
1466 	int ret = -ENXIO;
1467 
1468 	switch (attr->group) {
1469 #ifdef CONFIG_PARAVIRT
1470 	case KVM_LARCH_VCPU_PVTIME_CTRL:
1471 		ret = _kvm_pvtime_has_attr(vcpu, attr);
1472 		break;
1473 #endif
1474 	default:
1475 		ret = -ENXIO;
1476 		break;
1477 	}
1478 
1479 	return ret;
1480 }
1481 
kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)1482 long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl,
1483 			 unsigned long arg)
1484 {
1485 	struct kvm_vcpu *vcpu = filp->private_data;
1486 	void __user *argp = (void __user *)arg;
1487 	struct kvm_device_attr attr;
1488 	long r;
1489 
1490 	vcpu_load(vcpu);
1491 
1492 	switch (ioctl) {
1493 	case KVM_SET_ONE_REG:
1494 	case KVM_GET_ONE_REG: {
1495 		struct kvm_one_reg reg;
1496 
1497 		r = -EFAULT;
1498 		if (copy_from_user(&reg, argp, sizeof(reg)))
1499 			break;
1500 		if (ioctl == KVM_SET_ONE_REG)
1501 			r = _kvm_set_reg(vcpu, &reg);
1502 		else
1503 			r = _kvm_get_reg(vcpu, &reg);
1504 		break;
1505 	}
1506 	case KVM_ENABLE_CAP: {
1507 		struct kvm_enable_cap cap;
1508 
1509 		r = -EFAULT;
1510 		if (copy_from_user(&cap, argp, sizeof(cap)))
1511 			break;
1512 		r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1513 		break;
1514 	}
1515 	case KVM_CHECK_EXTENSION: {
1516 		unsigned int ext;
1517 		if (copy_from_user(&ext, argp, sizeof(ext)))
1518 			return -EFAULT;
1519 		switch (ext) {
1520 		case KVM_CAP_LOONGARCH_FPU:
1521 			r = !!cpu_has_fpu;
1522 			break;
1523 		case KVM_CAP_LOONGARCH_LSX:
1524 			r = !!cpu_has_lsx;
1525 			break;
1526 		default:
1527 			break;
1528 		}
1529 		break;
1530 	}
1531 
1532 	case KVM_LOONGARCH_GET_VCPU_STATE:
1533 	{
1534 		int i;
1535 		struct  kvm_loongarch_vcpu_state vcpu_state;
1536 		r = -EFAULT;
1537 
1538 		vcpu_state.online_vcpus = vcpu->kvm->arch.online_vcpus;
1539 		vcpu_state.is_migrate = 1;
1540 		for (i = 0; i < 4; i++)
1541 			vcpu_state.core_ext_ioisr[i] = vcpu->arch.core_ext_ioisr[i];
1542 
1543 		vcpu_state.irq_pending =  vcpu->arch.irq_pending;
1544 		vcpu_state.irq_clear =  vcpu->arch.irq_clear;
1545 
1546 		if (copy_to_user(argp, &vcpu_state, sizeof(struct kvm_loongarch_vcpu_state)))
1547 			break;
1548 		r = 0;
1549 		break;
1550 	}
1551 
1552 	case KVM_LOONGARCH_SET_VCPU_STATE:
1553 	{
1554 		int i;
1555 		struct kvm_loongarch_vcpu_state vcpu_state;
1556 		r = -EFAULT;
1557 
1558 		if (copy_from_user(&vcpu_state, argp, sizeof(struct kvm_loongarch_vcpu_state)))
1559 			return -EFAULT;
1560 
1561 		vcpu->kvm->arch.online_vcpus = vcpu_state.online_vcpus;
1562 		vcpu->kvm->arch.is_migrate = vcpu_state.is_migrate;
1563 		for (i = 0; i < 4; i++)
1564 			 vcpu->arch.core_ext_ioisr[i] = vcpu_state.core_ext_ioisr[i];
1565 
1566 		vcpu->arch.irq_pending = vcpu_state.irq_pending;
1567 		vcpu->arch.irq_clear = vcpu_state.irq_clear;
1568 		r = 0;
1569 		break;
1570 	}
1571 	case KVM_GET_MSRS: {
1572 		r = kvm_csr_io(vcpu, argp, _kvm_getcsr);
1573 		break;
1574 	}
1575 	case KVM_SET_MSRS: {
1576 		r = kvm_csr_io(vcpu, argp, _kvm_setcsr);
1577 		break;
1578 	}
1579 	case KVM_SET_DEVICE_ATTR: {
1580 		r = -EFAULT;
1581 		if (copy_from_user(&attr, argp, sizeof(attr)))
1582 			break;
1583 		r = _kvm_vcpu_set_attr(vcpu, &attr);
1584 		break;
1585 	}
1586 	case KVM_GET_DEVICE_ATTR: {
1587 		r = -EFAULT;
1588 		if (copy_from_user(&attr, argp, sizeof(attr)))
1589 			break;
1590 		r = _kvm_vcpu_get_attr(vcpu, &attr);
1591 		break;
1592 	}
1593 	case KVM_HAS_DEVICE_ATTR: {
1594 		r = -EFAULT;
1595 		if (copy_from_user(&attr, argp, sizeof(attr)))
1596 			break;
1597 		r = _kvm_vcpu_has_attr(vcpu, &attr);
1598 		break;
1599 	}
1600 	default:
1601 		r = -ENOIOCTLCMD;
1602 	}
1603 
1604 	vcpu_put(vcpu);
1605 	return r;
1606 }
1607 
kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)1608 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
1609 {
1610 
1611 }
1612 
kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)1613 void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
1614 					struct kvm_memory_slot *memslot)
1615 {
1616 	/*
1617 	 * FIXME: disable THP to improve vm migration success ratio,
1618 	 * how to know migration failure to enable THP again
1619 	 */
1620 	memslot->arch.flags |= KVM_MEMSLOT_DISABLE_THP;
1621 
1622 	/* Let implementation handle TLB/GVA invalidation */
1623 	kvm_flush_remote_tlbs(kvm);
1624 }
1625 
kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)1626 long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
1627 {
1628 	struct kvm *kvm = filp->private_data;
1629 	void __user *argp = (void __user *)arg;
1630 	long r;
1631 
1632 	switch (ioctl) {
1633 	case KVM_CREATE_IRQCHIP:
1634 	{
1635 		mutex_lock(&kvm->lock);
1636 		r = -EEXIST;
1637 		if (kvm->arch.v_ioapic)
1638 			goto create_irqchip_unlock;
1639 
1640 		r = kvm_create_ls7a_ioapic(kvm);
1641 		if (r < 0)
1642 			goto create_irqchip_unlock;
1643 		r = kvm_create_ls3a_ipi(kvm);
1644 		if (r < 0) {
1645 			mutex_lock(&kvm->slots_lock);
1646 			kvm_destroy_ls7a_ioapic(kvm);
1647 			mutex_unlock(&kvm->slots_lock);
1648 			goto create_irqchip_unlock;
1649 		}
1650 		r = kvm_create_ls3a_ext_irq(kvm);
1651 		if (r < 0) {
1652 			mutex_lock(&kvm->slots_lock);
1653 			kvm_destroy_ls3a_ipi(kvm);
1654 			kvm_destroy_ls7a_ioapic(kvm);
1655 			mutex_unlock(&kvm->slots_lock);
1656 		}
1657 		kvm_ls7a_setup_default_irq_routing(kvm);
1658 		irqchip_debug_init(kvm);
1659 		/* Write kvm->irq_routing before kvm->arch.vpic.  */
1660 		smp_wmb();
1661 create_irqchip_unlock:
1662 		mutex_unlock(&kvm->lock);
1663 		break;
1664 	}
1665 	case KVM_GET_IRQCHIP: {
1666 		struct loongarch_kvm_irqchip *kchip;
1667 		struct loongarch_kvm_irqchip uchip;
1668 		if (copy_from_user(&uchip, argp, sizeof(struct loongarch_kvm_irqchip)))
1669 			goto out;
1670 		kchip = memdup_user(argp, uchip.len);
1671 		if (IS_ERR(kchip)) {
1672 			r = PTR_ERR(kchip);
1673 			goto out;
1674 		}
1675 
1676 		r = -ENXIO;
1677 		if (!ls7a_ioapic_in_kernel(kvm))
1678 			goto get_irqchip_out;
1679 		r = kvm_vm_ioctl_get_irqchip(kvm, kchip);
1680 		if (r)
1681 			goto get_irqchip_out;
1682 		if (copy_to_user(argp, kchip, kchip->len))
1683 			goto get_irqchip_out;
1684 		r = 0;
1685 get_irqchip_out:
1686 		kfree(kchip);
1687 		break;
1688 	}
1689 	case KVM_SET_IRQCHIP: {
1690 		struct loongarch_kvm_irqchip *kchip;
1691 		struct loongarch_kvm_irqchip uchip;
1692 		if (copy_from_user(&uchip, argp, sizeof(struct loongarch_kvm_irqchip)))
1693 			goto out;
1694 
1695 		kchip = memdup_user(argp, uchip.len);
1696 		if (IS_ERR(kchip)) {
1697 			r = PTR_ERR(kchip);
1698 			goto out;
1699 		}
1700 
1701 		r = -ENXIO;
1702 		if (!ls7a_ioapic_in_kernel(kvm))
1703 			goto set_irqchip_out;
1704 		r = kvm_vm_ioctl_set_irqchip(kvm, kchip);
1705 		if (r)
1706 			goto set_irqchip_out;
1707 		r = 0;
1708 set_irqchip_out:
1709 		kfree(kchip);
1710 		break;
1711 	}
1712 	case KVM_LOONGARCH_GET_IOCSR:
1713 	{
1714 		r = _kvm_get_iocsr(kvm, argp);
1715 		break;
1716 	}
1717 	case KVM_LOONGARCH_SET_IOCSR:
1718 	{
1719 		r = _kvm_set_iocsr(kvm, argp);
1720 		break;
1721 	}
1722 	case KVM_LOONGARCH_SET_CPUCFG:
1723 	{
1724 		r = 0;
1725 		if (copy_from_user(&kvm->arch.cpucfgs, argp, sizeof(struct kvm_cpucfg)))
1726 			r = -EFAULT;
1727 		break;
1728 	}
1729 	case KVM_LOONGARCH_GET_CPUCFG:
1730 	{
1731 		r = 0;
1732 		if (copy_to_user(argp, &kvm->arch.cpucfgs, sizeof(struct kvm_cpucfg)))
1733 		   r = -EFAULT;
1734 		break;
1735 	}
1736 	default:
1737 		r = -ENOIOCTLCMD;
1738 	}
1739 out:
1740 
1741 	return r;
1742 }
1743 
kvm_arch_init(void *opaque)1744 int kvm_arch_init(void *opaque)
1745 {
1746 	struct kvm_context *context;
1747 	unsigned long vpid_mask;
1748 	int cpu;
1749 
1750 	vmcs = alloc_percpu(struct kvm_context);
1751 	if (!vmcs) {
1752 		printk(KERN_ERR "kvm: failed to allocate percpu kvm_context\n");
1753 		return -ENOMEM;
1754 	}
1755 
1756 	vpid_mask = kvm_read_csr_gstat();
1757 	vpid_mask = (vpid_mask & KVM_GSTAT_GIDBIT) >> KVM_GSTAT_GIDBIT_SHIFT;
1758 	if (vpid_mask)
1759 		vpid_mask = GENMASK(vpid_mask - 1, 0);
1760 
1761 	for_each_possible_cpu(cpu) {
1762 		context = per_cpu_ptr(vmcs, cpu);
1763 		context->gid_mask = vpid_mask;
1764 		context->gid_ver_mask = ~context->gid_mask;
1765 		context->gid_fisrt_ver = context->gid_mask + 1;
1766 		context->vpid_cache = context->gid_mask + 1;
1767 		context->last_vcpu = NULL;
1768 	}
1769 
1770 	_kvm_init_fault();
1771 	return 0;
1772 }
1773 
kvm_arch_exit(void)1774 void kvm_arch_exit(void)
1775 {
1776 	free_percpu(vmcs);
1777 }
1778 
kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)1779 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1780 				  struct kvm_sregs *sregs)
1781 {
1782 	return -ENOIOCTLCMD;
1783 }
1784 
kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)1785 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1786 				  struct kvm_sregs *sregs)
1787 {
1788 	return -ENOIOCTLCMD;
1789 }
1790 
kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)1791 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
1792 {
1793 }
1794 
kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)1795 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1796 {
1797 	int i = 0;
1798 
1799 	/* no need vcpu_load and vcpu_put */
1800 	fpu->fcsr = vcpu->arch.fpu.fcsr;
1801 	fpu->fcc = vcpu->arch.fpu.fcc;
1802 	for (i = 0; i < NUM_FPU_REGS; i++)
1803 		memcpy(&fpu->fpr[i], &vcpu->arch.fpu.fpr[i], FPU_REG_WIDTH / 64);
1804 
1805 	return 0;
1806 }
1807 
kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)1808 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1809 {
1810 	int i = 0;
1811 
1812 	/* no need vcpu_load and vcpu_put */
1813 	vcpu->arch.fpu.fcsr = fpu->fcsr;
1814 	vcpu->arch.fpu.fcc = fpu->fcc;
1815 	for (i = 0; i < NUM_FPU_REGS; i++)
1816 		memcpy(&vcpu->arch.fpu.fpr[i], &fpu->fpr[i], FPU_REG_WIDTH / 64);
1817 
1818 	return 0;
1819 }
1820 
kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)1821 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1822 {
1823 	return VM_FAULT_SIGBUS;
1824 }
1825 
kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)1826 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
1827 {
1828 	int r;
1829 
1830 	switch (ext) {
1831 	case KVM_CAP_ONE_REG:
1832 	case KVM_CAP_ENABLE_CAP:
1833 	case KVM_CAP_READONLY_MEM:
1834 	case KVM_CAP_SYNC_MMU:
1835 #ifdef CONFIG_HAVE_LS_KVM_MSI
1836 	case KVM_CAP_SIGNAL_MSI:
1837 #endif
1838 	case KVM_CAP_IMMEDIATE_EXIT:
1839 		r = 1;
1840 		break;
1841 	case KVM_CAP_NR_VCPUS:
1842 		r = num_online_cpus();
1843 		break;
1844 	case KVM_CAP_MAX_VCPUS:
1845 		r = KVM_MAX_VCPUS;
1846 		break;
1847 	case KVM_CAP_MAX_VCPU_ID:
1848 		r = KVM_MAX_VCPU_ID;
1849 		break;
1850 	case KVM_CAP_LOONGARCH_FPU:
1851 		/* We don't handle systems with inconsistent cpu_has_fpu */
1852 		r = !!cpu_has_fpu;
1853 		break;
1854 	case KVM_CAP_LOONGARCH_LSX:
1855 		/*
1856 		 * We don't support LSX vector partitioning yet:
1857 		 * 1) It would require explicit support which can't be tested
1858 		 *    yet due to lack of support in current hardware.
1859 		 * 2) It extends the state that would need to be saved/restored
1860 		 *    by e.g. QEMU for migration.
1861 		 *
1862 		 * When vector partitioning hardware becomes available, support
1863 		 * could be added by requiring a flag when enabling
1864 		 * KVM_CAP_LOONGARCH_LSX capability to indicate that userland knows
1865 		 * to save/restore the appropriate extra state.
1866 		 */
1867 		r = cpu_has_lsx;
1868 		break;
1869 	case KVM_CAP_IRQCHIP:
1870 	case KVM_CAP_IOEVENTFD:
1871 		/* we wouldn't be here unless cpu_has_lvz */
1872 		r = 1;
1873 		break;
1874 	case KVM_CAP_LOONGARCH_VZ:
1875 		/* get user defined kvm version */
1876 		r = KVM_LOONGARCH_VERSION;
1877 		break;
1878 	default:
1879 		r = 0;
1880 		break;
1881 	}
1882 	return r;
1883 }
1884 
kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)1885 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
1886 {
1887 	return _kvm_pending_timer(vcpu) ||
1888 		kvm_read_hw_gcsr(KVM_CSR_ESTAT) & (1 << INT_TI);
1889 }
1890 
kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)1891 int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
1892 {
1893 	int i;
1894 	struct loongarch_csrs *csr;
1895 
1896 	if (!vcpu)
1897 		return -1;
1898 
1899 	kvm_debug("VCPU Register Dump:\n");
1900 	kvm_debug("\tpc = 0x%08lx\n", vcpu->arch.pc);
1901 	kvm_debug("\texceptions: %08lx\n", vcpu->arch.irq_pending);
1902 
1903 	for (i = 0; i < 32; i += 4) {
1904 		kvm_debug("\tgpr%02d: %08lx %08lx %08lx %08lx\n", i,
1905 		       vcpu->arch.gprs[i],
1906 		       vcpu->arch.gprs[i + 1],
1907 		       vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]);
1908 	}
1909 
1910 	csr = vcpu->arch.csr;
1911 	kvm_debug("\tCRMOD: 0x%08lx, exst: 0x%08lx\n",
1912 		  kvm_read_hw_gcsr(KVM_CSR_CRMD),
1913 		  kvm_read_hw_gcsr(KVM_CSR_ESTAT));
1914 
1915 	kvm_debug("\tERA: 0x%08lx\n", kvm_read_hw_gcsr(KVM_CSR_ERA));
1916 
1917 	return 0;
1918 }
1919 
kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)1920 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1921 {
1922 	int i;
1923 
1924 	vcpu_load(vcpu);
1925 
1926 	for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
1927 		vcpu->arch.gprs[i] = regs->gpr[i];
1928 	vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */
1929 	vcpu->arch.pc = regs->pc;
1930 
1931 	vcpu_put(vcpu);
1932 	return 0;
1933 }
1934 
kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)1935 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1936 {
1937 	int i;
1938 
1939 	vcpu_load(vcpu);
1940 
1941 	for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
1942 		regs->gpr[i] = vcpu->arch.gprs[i];
1943 
1944 	regs->pc = vcpu->arch.pc;
1945 
1946 	vcpu_put(vcpu);
1947 	return 0;
1948 }
1949 
kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, struct kvm_translation *tr)1950 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1951 				  struct kvm_translation *tr)
1952 {
1953 	return 0;
1954 }
1955 
1956 #ifdef CONFIG_CPU_HAS_LBT
1957 /* Enable FPU for guest and restore context */
kvm_own_lbt(struct kvm_vcpu *vcpu)1958 void kvm_own_lbt(struct kvm_vcpu *vcpu)
1959 {
1960 	preempt_disable();
1961 	if (!(vcpu->arch.aux_inuse & KVM_LARCH_LBT)) {
1962 		kvm_set_csr_euen(KVM_EUEN_LBTEN);
1963 
1964 		/* If guest lbt state not active, restore it now */
1965 		kvm_restore_lbt(vcpu);
1966 		vcpu->arch.aux_inuse |= KVM_LARCH_LBT;
1967 	}
1968 	preempt_enable();
1969 }
1970 
kvm_enable_lbt_fpu(struct kvm_vcpu *vcpu, unsigned long fcsr)1971 static void kvm_enable_lbt_fpu(struct kvm_vcpu *vcpu, unsigned long fcsr)
1972 {
1973 	/*
1974 	 * if TM is enabled, top register save/restore will
1975 	 * cause lbt exception, here enable lbt in advanced
1976 	 */
1977 	if (fcsr & FPU_CSR_TM)
1978 		kvm_own_lbt(vcpu);
1979 }
1980 
kvm_lose_lbt(struct kvm_vcpu *vcpu)1981 static void kvm_lose_lbt(struct kvm_vcpu *vcpu)
1982 {
1983 	preempt_disable();
1984 	if (vcpu->arch.aux_inuse & KVM_LARCH_LBT) {
1985 		kvm_save_lbt(vcpu);
1986 		kvm_clear_csr_euen(KVM_EUEN_LBTEN);
1987 		vcpu->arch.aux_inuse &= ~KVM_LARCH_LBT;
1988 	}
1989 	preempt_enable();
1990 }
1991 
1992 #else
kvm_own_lbt(struct kvm_vcpu *vcpu)1993 void kvm_own_lbt(struct kvm_vcpu *vcpu) { }
kvm_enable_lbt_fpu(struct kvm_vcpu *vcpu, unsigned long fcsr)1994 static void kvm_enable_lbt_fpu(struct kvm_vcpu *vcpu, unsigned long fcsr) { }
kvm_lose_lbt(struct kvm_vcpu *vcpu)1995 static void kvm_lose_lbt(struct kvm_vcpu *vcpu) { }
1996 #endif
1997 
1998 /* Enable FPU for guest and restore context */
kvm_own_fpu(struct kvm_vcpu *vcpu)1999 void kvm_own_fpu(struct kvm_vcpu *vcpu)
2000 {
2001 
2002 	preempt_disable();
2003 
2004 	/*
2005 	 * Enable FPU for guest
2006 	 * We set FR and FRE according to guest context
2007 	 */
2008 	kvm_enable_lbt_fpu(vcpu, vcpu->arch.fpu.fcsr);
2009 	kvm_set_csr_euen(KVM_EUEN_FPEN);
2010 
2011 	/* If guest FPU state not active, restore it now */
2012 	kvm_restore_fpu(vcpu);
2013 	vcpu->arch.aux_inuse |= KVM_LARCH_FPU;
2014 	trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_FPU);
2015 
2016 	preempt_enable();
2017 }
2018 
2019 #ifdef CONFIG_CPU_HAS_LSX
2020 /* Enable LSX for guest and restore context */
kvm_own_lsx(struct kvm_vcpu *vcpu)2021 void kvm_own_lsx(struct kvm_vcpu *vcpu)
2022 {
2023 	preempt_disable();
2024 
2025 	/* Enable LSX for guest */
2026 	kvm_enable_lbt_fpu(vcpu, vcpu->arch.fpu.fcsr);
2027 	kvm_set_csr_euen(KVM_EUEN_LSXEN | KVM_EUEN_FPEN);
2028 	switch (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
2029 	case KVM_LARCH_FPU:
2030 		/*
2031 		 * Guest FPU state already loaded,
2032 		 * only restore upper LSX state
2033 		 */
2034 		kvm_restore_lsx_upper(vcpu);
2035 		trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE,
2036 					KVM_TRACE_AUX_LSX);
2037 		break;
2038 	default:
2039 		/* Neither FP or LSX already active,
2040 		 * restore full LSX state
2041 		 */
2042 		kvm_restore_lsx(vcpu);
2043 		trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE,
2044 				KVM_TRACE_AUX_FPU_LSX);
2045 	break;
2046 	}
2047 
2048 	vcpu->arch.aux_inuse |= KVM_LARCH_LSX | KVM_LARCH_FPU;
2049 	preempt_enable();
2050 }
2051 #endif
2052 
2053 #ifdef CONFIG_CPU_HAS_LASX
2054 /* Enable LASX for guest and restore context */
kvm_own_lasx(struct kvm_vcpu *vcpu)2055 void kvm_own_lasx(struct kvm_vcpu *vcpu)
2056 {
2057 	preempt_disable();
2058 
2059 	kvm_enable_lbt_fpu(vcpu, vcpu->arch.fpu.fcsr);
2060 	kvm_set_csr_euen(KVM_EUEN_FPEN | KVM_EUEN_LSXEN | KVM_EUEN_LASXEN);
2061 	switch (vcpu->arch.aux_inuse & (KVM_LARCH_FPU | KVM_LARCH_LSX)) {
2062 	case KVM_LARCH_LSX | KVM_LARCH_FPU:
2063 	case KVM_LARCH_LSX:
2064 		/*
2065 		 * Guest LSX state already loaded, only restore upper LASX state
2066 		 */
2067 		kvm_restore_lasx_upper(vcpu);
2068 		trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LASX);
2069 		break;
2070 	case KVM_LARCH_FPU:
2071 		/*
2072 		 * Guest FP state already loaded, only restore 64~256 LASX state
2073 		 */
2074 		kvm_restore_lsx_upper(vcpu);
2075 		kvm_restore_lasx_upper(vcpu);
2076 		trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LASX);
2077 		break;
2078 	default:
2079 		/* Neither FP or LSX already active, restore full LASX state */
2080 		kvm_restore_lasx(vcpu);
2081 		trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE,
2082 			      KVM_TRACE_AUX_FPU_LSX_LASX);
2083 		break;
2084 	}
2085 
2086 	vcpu->arch.aux_inuse |= KVM_LARCH_LASX | KVM_LARCH_LSX | KVM_LARCH_FPU;
2087 	preempt_enable();
2088 }
2089 #endif
2090 
2091 /* Save and disable FPU & LSX & LASX */
kvm_lose_fpu(struct kvm_vcpu *vcpu)2092 void kvm_lose_fpu(struct kvm_vcpu *vcpu)
2093 {
2094 #ifdef CONFIG_CPU_HAS_LBT
2095 	unsigned long fcsr;
2096 #endif
2097 
2098 	preempt_disable();
2099 #ifdef CONFIG_CPU_HAS_LBT
2100 	if (vcpu->arch.aux_inuse & KVM_LARCH_FP_ALL) {
2101 		if (!(vcpu->arch.aux_inuse & KVM_LARCH_LBT)) {
2102 			fcsr = read_fcsr(LOONGARCH_FCSR0);
2103 			kvm_enable_lbt_fpu(vcpu, fcsr);
2104 		}
2105 	}
2106 #endif
2107 	if (vcpu->arch.aux_inuse & KVM_LARCH_LASX) {
2108 		kvm_save_lasx(vcpu);
2109 		trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU_LSX_LASX);
2110 		/* Disable LASX & MAS & FPU */
2111 		kvm_clear_csr_euen(KVM_EUEN_FPEN | KVM_EUEN_LSXEN | KVM_EUEN_LASXEN);
2112 	} else if (vcpu->arch.aux_inuse & KVM_LARCH_LSX) {
2113 		kvm_save_lsx(vcpu);
2114 		trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU_LSX);
2115 		/* Disable LSX & FPU */
2116 		kvm_clear_csr_euen(KVM_EUEN_FPEN | KVM_EUEN_LSXEN);
2117 	} else if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) {
2118 		kvm_save_fpu(vcpu);
2119 		trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU);
2120 		/* Disable FPU */
2121 		kvm_clear_csr_euen(KVM_EUEN_FPEN);
2122 	}
2123 	vcpu->arch.aux_inuse &= ~KVM_LARCH_FP_ALL;
2124 
2125 	kvm_lose_lbt(vcpu);
2126 	preempt_enable();
2127 }
2128 
kvm_lose_hw_perf(struct kvm_vcpu *vcpu)2129 void kvm_lose_hw_perf(struct kvm_vcpu *vcpu)
2130 {
2131 	if (vcpu->arch.aux_inuse & KVM_LARCH_PERF) {
2132 		struct loongarch_csrs *csr = vcpu->arch.csr;
2133 		/* save guest pmu csr */
2134 		kvm_save_hw_gcsr(csr, KVM_CSR_PERFCTRL0);
2135 		kvm_save_hw_gcsr(csr, KVM_CSR_PERFCNTR0);
2136 		kvm_save_hw_gcsr(csr, KVM_CSR_PERFCTRL1);
2137 		kvm_save_hw_gcsr(csr, KVM_CSR_PERFCNTR1);
2138 		kvm_save_hw_gcsr(csr, KVM_CSR_PERFCTRL2);
2139 		kvm_save_hw_gcsr(csr, KVM_CSR_PERFCNTR2);
2140 		kvm_save_hw_gcsr(csr, KVM_CSR_PERFCTRL3);
2141 		kvm_save_hw_gcsr(csr, KVM_CSR_PERFCNTR3);
2142 		if (((kvm_read_sw_gcsr(csr, KVM_CSR_PERFCTRL0) |
2143 			kvm_read_sw_gcsr(csr, KVM_CSR_PERFCTRL1) |
2144 			kvm_read_sw_gcsr(csr, KVM_CSR_PERFCTRL2) |
2145 			kvm_read_sw_gcsr(csr, KVM_CSR_PERFCTRL3))
2146 			& KVM_PMU_PLV_ENABLE) == 0)
2147 			vcpu->arch.aux_inuse &= ~KVM_LARCH_PERF;
2148 		/* config host pmu csr */
2149 		kvm_write_csr_gcfg(kvm_read_csr_gcfg() & ~KVM_GCFG_GPERF);
2150 		/* TODO: pmu csr used by host and guest at the same time */
2151 		kvm_write_csr_perfctrl0(0);
2152 		kvm_write_csr_perfcntr0(0);
2153 		kvm_write_csr_perfctrl1(0);
2154 		kvm_write_csr_perfcntr1(0);
2155 		kvm_write_csr_perfctrl2(0);
2156 		kvm_write_csr_perfcntr2(0);
2157 		kvm_write_csr_perfctrl3(0);
2158 		kvm_write_csr_perfcntr3(0);
2159 	}
2160 }
2161 
kvm_restore_hw_perf(struct kvm_vcpu *vcpu)2162 void kvm_restore_hw_perf(struct kvm_vcpu *vcpu)
2163 {
2164 	if (vcpu->arch.aux_inuse & KVM_LARCH_PERF) {
2165 		struct loongarch_csrs *csr = vcpu->arch.csr;
2166 		/* enable guest pmu */
2167 		kvm_write_csr_gcfg(kvm_read_csr_gcfg() | KVM_GCFG_GPERF);
2168 		kvm_restore_hw_gcsr(csr, KVM_CSR_PERFCTRL0);
2169 		kvm_restore_hw_gcsr(csr, KVM_CSR_PERFCNTR0);
2170 		kvm_restore_hw_gcsr(csr, KVM_CSR_PERFCTRL1);
2171 		kvm_restore_hw_gcsr(csr, KVM_CSR_PERFCNTR1);
2172 		kvm_restore_hw_gcsr(csr, KVM_CSR_PERFCTRL2);
2173 		kvm_restore_hw_gcsr(csr, KVM_CSR_PERFCNTR2);
2174 		kvm_restore_hw_gcsr(csr, KVM_CSR_PERFCTRL3);
2175 		kvm_restore_hw_gcsr(csr, KVM_CSR_PERFCNTR3);
2176 	}
2177 }
2178 
kvm_loongarch_init(void)2179 static int __init kvm_loongarch_init(void)
2180 {
2181 	int ret;
2182 
2183 	if (!cpu_has_lvz)
2184 		return  0;
2185 
2186 	ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
2187 
2188 	if (ret)
2189 		return ret;
2190 
2191 	return 0;
2192 }
2193 
kvm_loongarch_exit(void)2194 static void __exit kvm_loongarch_exit(void)
2195 {
2196 	kvm_exit();
2197 }
2198 
2199 module_init(kvm_loongarch_init);
2200 module_exit(kvm_loongarch_exit);
2201 
2202 static const struct cpu_feature loongarch_kvm_feature[] = {
2203 	{ .feature = cpu_feature(LOONGARCH_LVZ) },
2204 	{},
2205 };
2206 MODULE_DEVICE_TABLE(cpu, loongarch_kvm_feature);
2207 
2208 EXPORT_TRACEPOINT_SYMBOL(kvm_exit);
2209