1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
4 */
5
6#ifndef __LOONGARCH_KVM_HOST_H__
7#define __LOONGARCH_KVM_HOST_H__
8
9#include <linux/cpumask.h>
10#include <linux/mutex.h>
11#include <linux/hrtimer.h>
12#include <linux/interrupt.h>
13#include <linux/types.h>
14#include <linux/kvm.h>
15#include <linux/kvm_types.h>
16#include <linux/threads.h>
17#include <linux/spinlock.h>
18
19#include <asm/inst.h>
20#include <asm/loongarchregs.h>
21
22#define __KVM_HAVE_ARCH_VCPU_DEBUGFS
23
24/* Loongarch KVM register ids */
25#define LOONGARCH_CSR_32(_R, _S)					\
26	(KVM_REG_LOONGARCH_CSR | KVM_REG_SIZE_U32 | (8 * (_R) + (_S)))
27
28#define LOONGARCH_CSR_64(_R, _S)					\
29	(KVM_REG_LOONGARCH_CSR | KVM_REG_SIZE_U64 | (8 * (_R) + (_S)))
30
31#define KVM_IOC_CSRID(id)	LOONGARCH_CSR_64(id, 0)
32#define KVM_GET_IOC_CSRIDX(id)	((id & KVM_CSR_IDX_MASK) >> 3)
33
34#define LOONGSON_VIRT_REG_BASE	0x1f000000
35#define KVM_MAX_VCPUS		256
36#define KVM_USER_MEM_SLOTS	256
37/* memory slots that does not exposed to userspace */
38#define KVM_PRIVATE_MEM_SLOTS	0
39
40#define KVM_HALT_POLL_NS_DEFAULT 500000
41
42#define KVM_REQ_RECORD_STEAL	KVM_ARCH_REQ(1)
43#define KVM_REQ_EVENT		KVM_ARCH_REQ(4)
44
45#define KVM_INVALID_ADDR		0xdeadbeef
46#define KVM_HVA_ERR_BAD			(-1UL)
47#define KVM_HVA_ERR_RO_BAD		(-2UL)
48static inline bool kvm_is_error_hva(unsigned long addr)
49{
50	return IS_ERR_VALUE(addr);
51}
52
53struct kvm_vm_stat {
54	ulong remote_tlb_flush;
55	u64 vm_ioctl_irq_line;
56	u64 ls7a_ioapic_update;
57	u64 ls7a_ioapic_set_irq;
58	u64 ioapic_reg_write;
59	u64 ioapic_reg_read;
60	u64 set_ls7a_ioapic;
61	u64 get_ls7a_ioapic;
62	u64 set_ls3a_ext_irq;
63	u64 get_ls3a_ext_irq;
64	u64 trigger_ls3a_ext_irq;
65	u64 pip_read_exits;
66	u64 pip_write_exits;
67	u64 ls7a_msi_irq;
68};
69struct kvm_vcpu_stat {
70	u64 excep_exits[EXCCODE_INT_START];
71	u64 idle_exits;
72	u64 signal_exits;
73	u64 int_exits;
74	u64 rdcsr_cpu_feature_exits;
75	u64 rdcsr_misc_func_exits;
76	u64 rdcsr_ipi_access_exits;
77	u64 cpucfg_exits;
78	u64 huge_dec_exits;
79	u64 huge_thp_exits;
80	u64 huge_adjust_exits;
81	u64 huge_set_exits;
82	u64 huge_merge_exits;
83	u64 halt_successful_poll;
84	u64 halt_attempted_poll;
85	u64 halt_poll_success_ns;
86	u64 halt_poll_fail_ns;
87	u64 halt_poll_invalid;
88	u64 halt_wakeup;
89};
90
91#define KVM_MEMSLOT_DISABLE_THP		(1UL << 17)
92struct kvm_arch_memory_slot {
93	unsigned int flags;
94};
95
96enum {
97	IOCSR_FEATURES,
98	IOCSR_VENDOR,
99	IOCSR_CPUNAME,
100	IOCSR_NODECNT,
101	IOCSR_MISC_FUNC,
102	IOCSR_MAX
103};
104
105struct kvm_context {
106	unsigned long gid_mask;
107	unsigned long gid_ver_mask;
108	unsigned long gid_fisrt_ver;
109	unsigned long vpid_cache;
110	struct kvm_vcpu *last_vcpu;
111};
112
113struct kvm_arch {
114	/* Guest physical mm */
115	struct mm_struct gpa_mm;
116	/* Mask of CPUs needing GPA ASID flush */
117	cpumask_t asid_flush_mask;
118
119	unsigned char online_vcpus;
120	unsigned char is_migrate;
121	s64 stablecounter_gftoffset;
122	u32 cpucfg_lasx;
123	struct ls7a_kvm_ioapic *v_ioapic;
124	struct ls3a_kvm_ipi *v_gipi;
125	struct ls3a_kvm_routerirq *v_routerirq;
126	struct ls3a_kvm_extirq *v_extirq;
127	spinlock_t iocsr_lock;
128	struct kvm_iocsr_entry iocsr[IOCSR_MAX];
129	struct kvm_cpucfg cpucfgs;
130	struct kvm_context __percpu *vmcs;
131};
132
133
134#define LOONGARCH_CSRS	0x100
135#define CSR_UCWIN_BASE	0x100
136#define CSR_UCWIN_SIZE	0x10
137#define CSR_DMWIN_BASE	0x180
138#define CSR_DMWIN_SIZE	0x4
139#define CSR_PERF_BASE	0x200
140#define CSR_PERF_SIZE	0x8
141#define CSR_DEBUG_BASE	0x500
142#define CSR_DEBUG_SIZE	0x3
143#define CSR_ALL_SIZE	0x800
144
145struct loongarch_csrs {
146	unsigned long csrs[CSR_ALL_SIZE];
147};
148
149/* Resume Flags */
150#define RESUME_FLAG_DR		(1<<0)	/* Reload guest nonvolatile state? */
151#define RESUME_FLAG_HOST	(1<<1)	/* Resume host? */
152
153#define RESUME_GUEST		0
154#define RESUME_GUEST_DR		RESUME_FLAG_DR
155#define RESUME_HOST		RESUME_FLAG_HOST
156
157enum emulation_result {
158	EMULATE_DONE,		/* no further processing */
159	EMULATE_DO_MMIO,	/* kvm_run filled with MMIO request */
160	EMULATE_FAIL,		/* can't emulate this instruction */
161	EMULATE_WAIT,		/* WAIT instruction */
162	EMULATE_PRIV_FAIL,
163	EMULATE_EXCEPT,		/* A guest exception has been generated */
164	EMULATE_PV_HYPERCALL,	/* HYPCALL instruction */
165	EMULATE_DEBUG,		/* Emulate guest kernel debug */
166	EMULATE_DO_IOCSR,	/* handle IOCSR request */
167};
168
169#define KVM_LARCH_FPU		(0x1 << 0)
170#define KVM_LARCH_LSX		(0x1 << 1)
171#define KVM_LARCH_LASX		(0x1 << 2)
172#define KVM_LARCH_FP_ALL	(KVM_LARCH_FPU | KVM_LARCH_LSX | KVM_LARCH_LASX)
173#define KVM_LARCH_DATA_HWBP	(0x1 << 3)
174#define KVM_LARCH_INST_HWBP	(0x1 << 4)
175#define KVM_LARCH_HWBP		(KVM_LARCH_DATA_HWBP | KVM_LARCH_INST_HWBP)
176#define KVM_LARCH_RESET		(0x1 << 5)
177#define KVM_LARCH_PERF		(0x1 << 6)
178#define KVM_LARCH_LBT		(0x1 << 7)
179
180struct kvm_vcpu_arch {
181	unsigned long guest_eentry;
182	unsigned long host_eentry;
183	int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
184	int (*handle_exit)(struct kvm_run *run, struct kvm_vcpu *vcpu);
185
186	/* Host registers preserved across guest mode execution */
187	unsigned long host_stack;
188	unsigned long host_gp;
189	unsigned long host_pgd;
190	unsigned long host_pgdhi;
191	unsigned long host_entryhi;
192
193	/* Host CSR registers used when handling exits from guest */
194	unsigned long badv;
195	unsigned long host_estat;
196	unsigned long badi;
197	unsigned long host_ecfg;
198	unsigned long host_percpu;
199
200	u32 is_hypcall;
201	/* GPRS */
202	unsigned long gprs[32];
203	unsigned long pc;
204
205	/* FPU State */
206	struct loongarch_fpu fpu FPU_ALIGN;
207	struct loongarch_lbt lbt;
208	/* Which auxiliary state is loaded (KVM_LOONGARCH_AUX_*) */
209	unsigned int aux_inuse;
210
211	/* CSR State */
212	struct loongarch_csrs *csr;
213
214	/* GPR used as IO source/target */
215	u32 io_gpr;
216
217	struct hrtimer swtimer;
218	/* Count timer control KVM register */
219	u32 count_ctl;
220
221	/* Bitmask of exceptions that are pending */
222	unsigned long irq_pending;
223	/* Bitmask of pending exceptions to be cleared */
224	unsigned long irq_clear;
225
226	/* Cache some mmu pages needed inside spinlock regions */
227	struct kvm_mmu_memory_cache mmu_page_cache;
228
229	/* vcpu's vpid is different on each host cpu in an smp system */
230	u64 vpid[NR_CPUS];
231
232	/* Period of stable timer tick in ns */
233	u64 timer_period;
234	/* Frequency of stable timer in Hz */
235	u64 timer_mhz;
236	/* Stable bias from the raw time */
237	u64 timer_bias;
238	/* Dynamic nanosecond bias (multiple of timer_period) to avoid overflow */
239	s64 timer_dyn_bias;
240	/* Save ktime */
241	ktime_t stable_ktime_saved;
242
243	u64 core_ext_ioisr[4];
244
245	/* Last CPU the VCPU state was loaded on */
246	int last_sched_cpu;
247	/* Last CPU the VCPU actually executed guest code on */
248	int last_exec_cpu;
249
250	u8 fpu_enabled;
251	u8 lsx_enabled;
252	/* paravirt steal time */
253	struct {
254		u64 guest_addr;
255		u64 last_steal;
256		struct gfn_to_pfn_cache cache;
257	} st;
258
259	/* pv related host specific info */
260	struct {
261		bool pv_unhalted;
262	} pv;
263
264	struct kvm_guest_debug_arch guest_debug;
265	/* save host pmu csr */
266	u64 perf_ctrl[4];
267	u64 perf_cntr[4];
268
269};
270
271static inline unsigned long readl_sw_gcsr(struct loongarch_csrs *csr, int reg)
272{
273	return csr->csrs[reg];
274}
275
276static inline void writel_sw_gcsr(struct loongarch_csrs *csr, int reg, \
277		unsigned long val)
278{
279	csr->csrs[reg] = val;
280}
281
282/* Helpers */
283static inline bool _kvm_guest_has_fpu(struct kvm_vcpu_arch *arch)
284{
285	return cpu_has_fpu && arch->fpu_enabled;
286}
287
288
289static inline bool _kvm_guest_has_lsx(struct kvm_vcpu_arch *arch)
290{
291	return cpu_has_lsx && arch->lsx_enabled;
292}
293
294bool _kvm_guest_has_lasx(struct kvm_vcpu *vcpu);
295void _kvm_init_fault(void);
296
297/* Debug: dump vcpu state */
298int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
299
300/* MMU handling */
301int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long badv, bool write);
302void kvm_flush_tlb_all(void);
303void _kvm_destroy_mm(struct kvm *kvm);
304pgd_t *kvm_pgd_alloc(void);
305void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
306
307enum _kvm_fault_result {
308	KVM_LOONGARCH_MAPPED = 0,
309	KVM_LOONGARCH_GVA,
310	KVM_LOONGARCH_GPA,
311	KVM_LOONGARCH_TLB,
312	KVM_LOONGARCH_TLBINV,
313	KVM_LOONGARCH_TLBMOD,
314};
315
316#define KVM_ARCH_WANT_MMU_NOTIFIER
317int kvm_unmap_hva_range(struct kvm *kvm,
318			unsigned long start, unsigned long end, bool blockable);
319int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
320int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
321int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
322
323static inline void update_pc(struct kvm_vcpu_arch *arch)
324{
325	arch->pc += 4;
326}
327
328/**
329 * kvm_is_ifetch_fault() - Find whether a TLBL exception is due to ifetch fault.
330 * @vcpu:	Virtual CPU.
331 *
332 * Returns:	Whether the TLBL exception was likely due to an instruction
333 *		fetch fault rather than a data load fault.
334 */
335static inline bool kvm_is_ifetch_fault(struct kvm_vcpu_arch *arch)
336{
337	if (arch->pc == arch->badv)
338		return true;
339
340	return false;
341}
342
343/* Misc */
344static inline void kvm_arch_hardware_unsetup(void) {}
345static inline void kvm_arch_sync_events(struct kvm *kvm) {}
346static inline void kvm_arch_free_memslot(struct kvm *kvm,
347					 struct kvm_memory_slot *slot) {}
348static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
349static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
350static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
351static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
352static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
353
354extern int kvm_enter_guest(struct kvm_run *run, struct kvm_vcpu *vcpu);
355extern void kvm_exception_entry(void);
356#endif /* __LOONGARCH_KVM_HOST_H__ */
357