1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
4  */
5 
6 #include <linux/linkage.h>
7 #include <asm/stackframe.h>
8 #include <asm/asm.h>
9 #include <asm/asmmacro.h>
10 #include <asm/regdef.h>
11 #include "kvm_compat.h"
12 
13 #define RESUME_HOST	(1 << 1)
14 
15 #define GGPR_OFFSET(x)	(KVM_ARCH_GGPR + 8*x)
16 #define PT_GPR_OFFSET(x)	(PT_R0 + 8*x)
17 
18 	.text
19 
20 .macro kvm_save_guest_gprs base
21 	.irp n,1,2,3,4,5,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
22 	KVM_LONG_S	$r\n, \base, GGPR_OFFSET(\n)
23 	.endr
24 .endm
25 
26 .macro kvm_restore_guest_gprs base
27 	.irp n,1,2,3,4,5,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
28 	KVM_LONG_L	$r\n, \base, GGPR_OFFSET(\n)
29 	.endr
30 .endm
31 
32 .macro kvm_save_host_gpr base
33 	.irp n,1,2,3,22,23,24,25,26,27,28,29,30,31
34 	KVM_LONG_S	$r\n, \base, PT_GPR_OFFSET(\n)
35 	.endr
36 .endm
37 
38 .macro kvm_restore_host_gpr base
39 	.irp n,1,2,3,22,23,24,25,26,27,28,29,30,31
40 	KVM_LONG_L	$r\n, \base, PT_GPR_OFFSET(\n)
41 	.endr
42 .endm
43 
44 /*
45  * prepare switch to guest
46  * @param:
47  *  KVM_ARCH: kvm_vcpu_arch, don't touch it until 'ertn'
48  *  GPRNUM: KVM_ARCH gpr number
49  *  tmp, tmp1: temp register
50  */
51 .macro kvm_switch_to_guest KVM_ARCH GPRNUM tmp tmp1
52 	/* set host excfg.VS=0, all exceptions share one exception entry */
53 	csrrd	\tmp, KVM_CSR_ECFG
54 	bstrins.w	\tmp, zero, (KVM_ECFG_VS_SHIFT + KVM_ECFG_VS_WIDTH - 1), KVM_ECFG_VS_SHIFT
55 	csrwr	\tmp, KVM_CSR_ECFG
56 
57 	/* Load up the new EENTRY */
58 	KVM_LONG_L	\tmp, \KVM_ARCH, KVM_ARCH_GEENTRY
59 	csrwr		\tmp, KVM_CSR_EENTRY
60 
61 	/* Set Guest ERA */
62 	KVM_LONG_L	\tmp, \KVM_ARCH, KVM_ARCH_GPC
63 	csrwr		\tmp, KVM_CSR_ERA
64 
65 	/* Save host PGDL */
66 	csrrd	\tmp, KVM_CSR_PGDL
67 	KVM_LONG_S	\tmp, \KVM_ARCH, KVM_ARCH_HPGD
68 
69 	/* Switch to kvm */
70 	KVM_LONG_L	\tmp1, \KVM_ARCH, KVM_VCPU_KVM - KVM_VCPU_ARCH
71 
72 	/* Load guest PGDL */
73 	lu12i.w \tmp, KVM_GPGD
74 	srli.w \tmp, \tmp, 12
75 	ldx.d  \tmp, \tmp1, \tmp
76 	csrwr	\tmp, KVM_CSR_PGDL
77 
78 	/* Mix GID and RID */
79 	csrrd	\tmp1, KVM_CSR_GSTAT
80 	bstrpick.w	\tmp1, \tmp1, (KVM_GSTAT_GID_SHIFT + KVM_GSTAT_GID_WIDTH - 1), KVM_GSTAT_GID_SHIFT
81 	csrrd	\tmp, KVM_CSR_GTLBC
82 	bstrins.w	\tmp, \tmp1, (KVM_GTLBC_TGID_SHIFT + KVM_GTLBC_TGID_WIDTH - 1), KVM_GTLBC_TGID_SHIFT
83 	csrwr	\tmp, KVM_CSR_GTLBC
84 
85 	/*
86 	 * Switch to guest:
87 	 *  GSTAT.PGM = 1, ERRCTL.ISERR = 0, TLBRPRMD.ISTLBR = 0
88 	 *  ertn
89 	 */
90 
91 	/*
92          * Enable intr in root mode with future ertn so that host interrupt
93          * can be responsed during VM runs
94          * guest crmd comes from separate gcsr_CRMD register
95          */
96 	ori	\tmp, zero, KVM_PRMD_PIE
97 	csrxchg	\tmp, \tmp, KVM_CSR_PRMD
98 
99 	/* Set PVM bit to setup ertn to guest context */
100 	ori	\tmp, zero, KVM_GSTAT_PVM
101 	csrxchg	\tmp, \tmp, KVM_CSR_GSTAT
102 
103 	/* Load Guest gprs */
104 	kvm_restore_guest_gprs	\KVM_ARCH
105 
106 	/* Load KVM_ARCH register */
107 	KVM_LONG_L	\KVM_ARCH, \KVM_ARCH, GGPR_OFFSET(\GPRNUM)
108 
109 	ertn
110 .endm
111 
112 #ifndef	EXCPTION_ENTRY
113 #define EXCPTION_ENTRY(name)	\
114 	.globl name ASM_NL	\
115 	.p2align	12;	\
116 	name:			\
117 	.cfi_startproc;
118 #endif
119 #ifndef	EXCPTION_ENDPROC
120 #define EXCPTION_ENDPROC(name)	\
121 	.cfi_endproc;		\
122 	SYM_END(name, SYM_T_FUNC)
123 #endif
124 
125 /* load kvm_vcpu to a2 and store a1 for free use */
126 EXCPTION_ENTRY(kvm_exception_entry)
127 	csrwr	a2, KVM_TEMP_KS
128 	csrrd	a2, KVM_VCPU_KS
129 	KVM_LONG_ADDI	a2, a2, KVM_VCPU_ARCH
130 
131 	/* After save gprs, free to use any gpr */
132 	kvm_save_guest_gprs	a2
133 	/* Save guest a2 */
134 	csrrd	t0, KVM_TEMP_KS
135 	KVM_LONG_S	t0, a2, GGPR_OFFSET(REG_A2)
136 
137 	b	kvm_exit_entry
138 EXCPTION_ENDPROC(kvm_exception_entry)
139 
140 /* a2: kvm_vcpu_arch, a1 is free to use */
141 SYM_FUNC_START(kvm_exit_entry)
142 	csrrd	s1, KVM_VCPU_KS
143 	KVM_LONG_L	s0, s1, KVM_VCPU_RUN
144 
145 	csrrd		t0, KVM_CSR_ESTAT
146 	KVM_LONG_S	t0, a2, KVM_ARCH_HESTAT
147 	csrrd		t0, KVM_CSR_ERA
148 	KVM_LONG_S	t0, a2, KVM_ARCH_GPC
149 	csrrd		t0, KVM_CSR_BADV
150 	KVM_LONG_S	t0, a2, KVM_ARCH_HBADV
151 	csrrd		t0, KVM_CSR_BADI
152 	KVM_LONG_S	t0, a2, KVM_ARCH_HBADI
153 
154 	/* Restore host excfg.VS */
155 	csrrd		t0, KVM_CSR_ECFG
156 	KVM_LONG_L	t1, a2, KVM_ARCH_HECFG
157 	or		t0, t0, t1
158 	csrwr		t0, KVM_CSR_ECFG
159 
160 	/* Restore host eentry */
161 	KVM_LONG_L	t0, a2, KVM_ARCH_HEENTRY
162 	csrwr		t0, KVM_CSR_EENTRY
163 
164 	/* restore host pgd table */
165 	KVM_LONG_L	t0, a2, KVM_ARCH_HPGD
166 	csrwr	t0, KVM_CSR_PGDL
167 
168 	/*
169 	 * Disable PGM bit to enter root mode by default with next ertn
170 	 */
171 	ori	t0, zero, KVM_GSTAT_PVM
172 	csrxchg	zero, t0, KVM_CSR_GSTAT
173 
174 	/*
175 	 * Clear GTLBC.TGID field
176 	 *       0: for root  tlb update in future tlb instr
177          *  others: for guest tlb update like gpa to hpa in future tlb instr
178 	 */
179 	csrrd	t0, KVM_CSR_GTLBC
180 	bstrins.w	t0, zero, KVM_GTLBC_TGID_SHIFT + KVM_GTLBC_TGID_WIDTH - 1, KVM_GTLBC_TGID_SHIFT
181 	csrwr	t0, KVM_CSR_GTLBC
182 
183 	KVM_LONG_L	tp, a2, KVM_ARCH_HGP
184 	KVM_LONG_L	sp, a2, KVM_ARCH_HSTACK
185 	/* Restore per cpu base register */
186 	KVM_LONG_L	$r21, a2, KVM_ARCH_HPERCPU
187 
188 	KVM_LONG_ADDI	sp, sp, -PT_SIZE
189 
190 	/* Prepare handle exception */
191 	or		a0, s0, zero
192 	or		a1, s1, zero
193 	KVM_LONG_L	t8, a2, KVM_ARCH_HANDLE_EXIT
194 	jirl		ra,t8, 0
195 
196 	or	a2, s1, zero
197 	KVM_LONG_ADDI	a2, a2, KVM_VCPU_ARCH
198 
199 	andi	t0, a0, RESUME_HOST
200 	bnez	t0, ret_to_host
201 	INT_S	zero, a2, KVM_ARCH_ISHYPCALL
202 
203 ret_to_guest:
204 	/* Save per cpu register again, maybe switched to another cpu */
205 	KVM_LONG_S	$r21, a2, KVM_ARCH_HPERCPU
206 
207 	/* Save kvm_vcpu to kscratch */
208 	csrwr	s1, KVM_VCPU_KS
209 	kvm_switch_to_guest a2 REG_A2 t0 t1
210 
211 ret_to_host:
212 	KVM_LONG_L  a2, a2, KVM_ARCH_HSTACK
213 	addi.d  a2, a2, -PT_SIZE
214 	srai.w  a3, a0, 2
215 	or      a0, a3, zero
216 	kvm_restore_host_gpr    a2
217 	jirl    zero, ra, 0
218 SYM_FUNC_END(kvm_exit_entry)
219 
220 /*
221  * int kvm_enter_guest(struct kvm_run *run, struct kvm_vcpu *vcpu)
222  *
223  * @register_param:
224  *  a0: kvm_run* run
225  *  a1: kvm_vcpu* vcpu
226  */
227 SYM_FUNC_START(kvm_enter_guest)
228 	/* allocate space in stack bottom */
229 	KVM_LONG_ADDI	a2, sp, -PT_SIZE
230 
231 	/* save host gprs */
232 	kvm_save_host_gpr a2
233 
234 	/* save host crmd,prmd csr to stack */
235 	csrrd		a3, KVM_CSR_CRMD
236 	KVM_LONG_S	a3, a2, PT_CRMD
237 	csrrd		a3, KVM_CSR_PRMD
238 	KVM_LONG_S	a3, a2, PT_PRMD
239 
240 	KVM_LONG_ADDI	a2, a1, KVM_VCPU_ARCH
241 	KVM_LONG_S	sp, a2, KVM_ARCH_HSTACK
242 	KVM_LONG_S	tp, a2, KVM_ARCH_HGP
243 	/* Save per cpu base register */
244 	KVM_LONG_S	$r21, a2, KVM_ARCH_HPERCPU
245 
246 	/* Save kvm_vcpu to kscratch */
247 	csrwr	a1, KVM_VCPU_KS
248 
249 	kvm_switch_to_guest	a2 REG_A2 t0 t1
250 
251 SYM_FUNC_END(kvm_enter_guest)
252 
253 SYM_FUNC_START(__kvm_save_fpu)
254 	fpu_save_csr    a0 t1
255 	fpu_save_double a0 t1
256 	fpu_save_cc     a0 t1 t2
257 	jirl    zero, ra, 0
258 SYM_FUNC_END(__kvm_save_fpu)
259 
260 SYM_FUNC_START(__kvm_restore_fpu)
261 	fpu_restore_double a0 t1                # clobbers t1
262 	fpu_restore_cc     a0 t1 t2             # clobbers t1, t2
263 	fpu_restore_csr    a0 t1
264 	jirl    zero, ra, 0
265 SYM_FUNC_END(__kvm_restore_fpu)
266 
267 #ifdef CONFIG_CPU_HAS_LSX
268 SYM_FUNC_START(__kvm_save_lsx)
269 	fpu_save_csr    a0 t1
270 	fpu_save_cc     a0 t1 t2
271 	lsx_save_data   a0 t1
272 	jirl    zero, ra, 0
273 SYM_FUNC_END(__kvm_save_lsx)
274 
275 SYM_FUNC_START(__kvm_restore_lsx)
276 	lsx_restore_data a0 t1
277 	fpu_restore_cc   a0 t1 t2                # clobbers t1, t2
278 	fpu_restore_csr  a0 t1
279 	jirl    zero, ra, 0
280 SYM_FUNC_END(__kvm_restore_lsx)
281 
282 SYM_FUNC_START(__kvm_restore_lsx_upper)
283 	lsx_restore_all_upper a0 t0 t1
284 
285 	jirl    zero, ra, 0
286 SYM_FUNC_END(__kvm_restore_lsx_upper)
287 #endif
288 
289 #ifdef CONFIG_CPU_HAS_LASX
290 SYM_FUNC_START(__kvm_save_lasx)
291 	fpu_save_csr    a0 t1
292 	fpu_save_cc     a0 t1 t2
293 	lasx_save_data  a0 t1
294 
295 	jirl    zero, ra, 0
296 SYM_FUNC_END(__kvm_save_lasx)
297 
298 SYM_FUNC_START(__kvm_restore_lasx)
299 	lasx_restore_data a0 t1
300 	fpu_restore_cc    a0 t1 t2                # clobbers t1, t2
301 	fpu_restore_csr   a0 t1
302 	jirl    zero, ra, 0
303 SYM_FUNC_END(__kvm_restore_lasx)
304 #endif
305