1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2012,2013 - ARM Ltd
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  */
6 
7 #include <linux/arm-smccc.h>
8 #include <linux/linkage.h>
9 
10 #include <asm/alternative.h>
11 #include <asm/assembler.h>
12 #include <asm/el2_setup.h>
13 #include <asm/kvm_arm.h>
14 #include <asm/kvm_asm.h>
15 #include <asm/kvm_mmu.h>
16 #include <asm/pgtable-hwdef.h>
17 #include <asm/sysreg.h>
18 #include <asm/virt.h>
19 
20 	.text
21 	.pushsection	.idmap.text, "ax"
22 
23 	.align	11
24 
25 SYM_CODE_START(__kvm_hyp_init)
26 	ventry	__invalid		// Synchronous EL2t
27 	ventry	__invalid		// IRQ EL2t
28 	ventry	__invalid		// FIQ EL2t
29 	ventry	__invalid		// Error EL2t
30 
31 	ventry	__invalid		// Synchronous EL2h
32 	ventry	__invalid		// IRQ EL2h
33 	ventry	__invalid		// FIQ EL2h
34 	ventry	__invalid		// Error EL2h
35 
36 	ventry	__do_hyp_init		// Synchronous 64-bit EL1
37 	ventry	__invalid		// IRQ 64-bit EL1
38 	ventry	__invalid		// FIQ 64-bit EL1
39 	ventry	__invalid		// Error 64-bit EL1
40 
41 	ventry	__invalid		// Synchronous 32-bit EL1
42 	ventry	__invalid		// IRQ 32-bit EL1
43 	ventry	__invalid		// FIQ 32-bit EL1
44 	ventry	__invalid		// Error 32-bit EL1
45 
46 __invalid:
47 	b	.
48 
49 	/*
50 	 * Only uses x0..x3 so as to not clobber callee-saved SMCCC registers.
51 	 *
52 	 * x0: SMCCC function ID
53 	 * x1: struct kvm_nvhe_init_params PA
54 	 */
55 __do_hyp_init:
56 	/* Check for a stub HVC call */
57 	cmp	x0, #HVC_STUB_HCALL_NR
58 	b.lo	__kvm_handle_stub_hvc
59 
60 	bic	x0, x0, #ARM_SMCCC_CALL_HINTS
61 	mov	x3, #KVM_HOST_SMCCC_FUNC(__kvm_hyp_init)
62 	cmp	x0, x3
63 	b.eq	1f
64 
65 	mov	x0, #SMCCC_RET_NOT_SUPPORTED
66 	eret
67 
68 1:	mov	x0, x1
69 	mov	x3, lr
70 	bl	___kvm_hyp_init			// Clobbers x0..x2
71 	mov	lr, x3
72 
73 	/* Hello, World! */
74 	mov	x0, #SMCCC_RET_SUCCESS
75 	eret
76 SYM_CODE_END(__kvm_hyp_init)
77 
78 /*
79  * Initialize the hypervisor in EL2.
80  *
81  * Only uses x0..x2 so as to not clobber callee-saved SMCCC registers
82  * and leave x3 for the caller.
83  *
84  * x0: struct kvm_nvhe_init_params PA
85  */
86 SYM_CODE_START_LOCAL(___kvm_hyp_init)
87 	ldr	x1, [x0, #NVHE_INIT_STACK_HYP_VA]
88 	mov	sp, x1
89 
90 	ldr	x1, [x0, #NVHE_INIT_MAIR_EL2]
91 	msr	mair_el2, x1
92 
93 	ldr	x1, [x0, #NVHE_INIT_HCR_EL2]
94 	msr	hcr_el2, x1
95 
96 	mov	x2, #HCR_E2H
97 	and	x2, x1, x2
98 	cbz	x2, 1f
99 
100 	// hVHE: Replay the EL2 setup to account for the E2H bit
101 	// TPIDR_EL2 is used to preserve x0 across the macro maze...
102 	isb
103 	msr	tpidr_el2, x0
104 	init_el2_state
105 	finalise_el2_state
106 	mrs	x0, tpidr_el2
107 
108 1:
109 	ldr	x1, [x0, #NVHE_INIT_TPIDR_EL2]
110 	msr	tpidr_el2, x1
111 
112 	ldr	x1, [x0, #NVHE_INIT_VTTBR]
113 	msr	vttbr_el2, x1
114 
115 	ldr	x1, [x0, #NVHE_INIT_VTCR]
116 	msr	vtcr_el2, x1
117 
118 	ldr	x1, [x0, #NVHE_INIT_PGD_PA]
119 	phys_to_ttbr x2, x1
120 alternative_if ARM64_HAS_CNP
121 	orr	x2, x2, #TTBR_CNP_BIT
122 alternative_else_nop_endif
123 	msr	ttbr0_el2, x2
124 
125 	/*
126 	 * Set the PS bits in TCR_EL2.
127 	 */
128 	ldr	x0, [x0, #NVHE_INIT_TCR_EL2]
129 	tcr_compute_pa_size x0, #TCR_EL2_PS_SHIFT, x1, x2
130 	msr	tcr_el2, x0
131 
132 	isb
133 
134 	/* Invalidate the stale TLBs from Bootloader */
135 	tlbi	alle2
136 	tlbi	vmalls12e1
137 	dsb	sy
138 
139 	mov_q	x0, INIT_SCTLR_EL2_MMU_ON
140 alternative_if ARM64_HAS_ADDRESS_AUTH
141 	mov_q	x1, (SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | \
142 		     SCTLR_ELx_ENDA | SCTLR_ELx_ENDB)
143 	orr	x0, x0, x1
144 alternative_else_nop_endif
145 
146 #ifdef CONFIG_ARM64_BTI_KERNEL
147 alternative_if ARM64_BTI
148 	orr	x0, x0, #SCTLR_EL2_BT
149 alternative_else_nop_endif
150 #endif /* CONFIG_ARM64_BTI_KERNEL */
151 
152 	msr	sctlr_el2, x0
153 	isb
154 
155 	/* Set the host vector */
156 	ldr	x0, =__kvm_hyp_host_vector
157 	msr	vbar_el2, x0
158 
159 	ret
160 SYM_CODE_END(___kvm_hyp_init)
161 
162 /*
163  * PSCI CPU_ON entry point
164  *
165  * x0: struct kvm_nvhe_init_params PA
166  */
167 SYM_CODE_START(kvm_hyp_cpu_entry)
168 	mov	x1, #1				// is_cpu_on = true
169 	b	__kvm_hyp_init_cpu
170 SYM_CODE_END(kvm_hyp_cpu_entry)
171 
172 /*
173  * PSCI CPU_SUSPEND / SYSTEM_SUSPEND entry point
174  *
175  * x0: struct kvm_nvhe_init_params PA
176  */
177 SYM_CODE_START(kvm_hyp_cpu_resume)
178 	mov	x1, #0				// is_cpu_on = false
179 	b	__kvm_hyp_init_cpu
180 SYM_CODE_END(kvm_hyp_cpu_resume)
181 
182 /*
183  * Common code for CPU entry points. Initializes EL2 state and
184  * installs the hypervisor before handing over to a C handler.
185  *
186  * x0: struct kvm_nvhe_init_params PA
187  * x1: bool is_cpu_on
188  */
189 SYM_CODE_START_LOCAL(__kvm_hyp_init_cpu)
190 	mov	x28, x0				// Stash arguments
191 	mov	x29, x1
192 
193 	/* Check that the core was booted in EL2. */
194 	mrs	x0, CurrentEL
195 	cmp	x0, #CurrentEL_EL2
196 	b.eq	2f
197 
198 	/* The core booted in EL1. KVM cannot be initialized on it. */
199 1:	wfe
200 	wfi
201 	b	1b
202 
203 2:	msr	SPsel, #1			// We want to use SP_EL{1,2}
204 
205 	/* Initialize EL2 CPU state to sane values. */
206 	init_el2_state				// Clobbers x0..x2
207 	finalise_el2_state
208 	__init_el2_nvhe_prepare_eret
209 
210 	/* Enable MMU, set vectors and stack. */
211 	mov	x0, x28
212 	bl	___kvm_hyp_init			// Clobbers x0..x2
213 
214 	/* Leave idmap. */
215 	mov	x0, x29
216 	ldr	x1, =kvm_host_psci_cpu_entry
217 	br	x1
218 SYM_CODE_END(__kvm_hyp_init_cpu)
219 
220 SYM_CODE_START(__kvm_handle_stub_hvc)
221 	/*
222 	 * __kvm_handle_stub_hvc called from __host_hvc through branch instruction(br) so
223 	 * we need bti j at beginning.
224 	 */
225 	bti j
226 	cmp	x0, #HVC_SOFT_RESTART
227 	b.ne	1f
228 
229 	/* This is where we're about to jump, staying at EL2 */
230 	msr	elr_el2, x1
231 	mov	x0, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT | PSR_MODE_EL2h)
232 	msr	spsr_el2, x0
233 
234 	/* Shuffle the arguments, and don't come back */
235 	mov	x0, x2
236 	mov	x1, x3
237 	mov	x2, x4
238 	b	reset
239 
240 1:	cmp	x0, #HVC_RESET_VECTORS
241 	b.ne	1f
242 
243 	/*
244 	 * Set the HVC_RESET_VECTORS return code before entering the common
245 	 * path so that we do not clobber x0-x2 in case we are coming via
246 	 * HVC_SOFT_RESTART.
247 	 */
248 	mov	x0, xzr
249 reset:
250 	/* Reset kvm back to the hyp stub. */
251 	mov_q	x5, INIT_SCTLR_EL2_MMU_OFF
252 	pre_disable_mmu_workaround
253 	msr	sctlr_el2, x5
254 	isb
255 
256 alternative_if ARM64_KVM_PROTECTED_MODE
257 	mov_q	x5, HCR_HOST_NVHE_FLAGS
258 	msr	hcr_el2, x5
259 alternative_else_nop_endif
260 
261 	/* Install stub vectors */
262 	adr_l	x5, __hyp_stub_vectors
263 	msr	vbar_el2, x5
264 	eret
265 
266 1:	/* Bad stub call */
267 	mov_q	x0, HVC_STUB_ERR
268 	eret
269 
270 SYM_CODE_END(__kvm_handle_stub_hvc)
271 
272 SYM_FUNC_START(__pkvm_init_switch_pgd)
273 	/* Turn the MMU off */
274 	pre_disable_mmu_workaround
275 	mrs	x2, sctlr_el2
276 	bic	x3, x2, #SCTLR_ELx_M
277 	msr	sctlr_el2, x3
278 	isb
279 
280 	tlbi	alle2
281 
282 	/* Install the new pgtables */
283 	ldr	x3, [x0, #NVHE_INIT_PGD_PA]
284 	phys_to_ttbr x4, x3
285 alternative_if ARM64_HAS_CNP
286 	orr	x4, x4, #TTBR_CNP_BIT
287 alternative_else_nop_endif
288 	msr	ttbr0_el2, x4
289 
290 	/* Set the new stack pointer */
291 	ldr	x0, [x0, #NVHE_INIT_STACK_HYP_VA]
292 	mov	sp, x0
293 
294 	/* And turn the MMU back on! */
295 	set_sctlr_el2	x2
296 	ret	x1
297 SYM_FUNC_END(__pkvm_init_switch_pgd)
298 
299 	.popsection
300