1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2012,2013 - ARM Ltd
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  */
6 
7 #include <linux/arm-smccc.h>
8 #include <linux/linkage.h>
9 
10 #include <asm/alternative.h>
11 #include <asm/assembler.h>
12 #include <asm/kvm_arm.h>
13 #include <asm/kvm_asm.h>
14 #include <asm/kvm_mmu.h>
15 #include <asm/pgtable-hwdef.h>
16 #include <asm/sysreg.h>
17 #include <asm/virt.h>
18 
19 	.text
20 	.pushsection	.hyp.idmap.text, "ax"
21 
22 	.align	11
23 
24 SYM_CODE_START(__kvm_hyp_init)
25 	ventry	__invalid		// Synchronous EL2t
26 	ventry	__invalid		// IRQ EL2t
27 	ventry	__invalid		// FIQ EL2t
28 	ventry	__invalid		// Error EL2t
29 
30 	ventry	__invalid		// Synchronous EL2h
31 	ventry	__invalid		// IRQ EL2h
32 	ventry	__invalid		// FIQ EL2h
33 	ventry	__invalid		// Error EL2h
34 
35 	ventry	__do_hyp_init		// Synchronous 64-bit EL1
36 	ventry	__invalid		// IRQ 64-bit EL1
37 	ventry	__invalid		// FIQ 64-bit EL1
38 	ventry	__invalid		// Error 64-bit EL1
39 
40 	ventry	__invalid		// Synchronous 32-bit EL1
41 	ventry	__invalid		// IRQ 32-bit EL1
42 	ventry	__invalid		// FIQ 32-bit EL1
43 	ventry	__invalid		// Error 32-bit EL1
44 
45 __invalid:
46 	b	.
47 
48 	/*
49 	 * x0: SMCCC function ID
50 	 * x1: HYP pgd
51 	 * x2: per-CPU offset
52 	 * x3: HYP stack
53 	 * x4: HYP vectors
54 	 */
55 __do_hyp_init:
56 	/* Check for a stub HVC call */
57 	cmp	x0, #HVC_STUB_HCALL_NR
58 	b.lo	__kvm_handle_stub_hvc
59 
60 	// We only actively check bits [24:31], and everything
61 	// else has to be zero, which we check at build time.
62 #if (KVM_HOST_SMCCC_FUNC(__kvm_hyp_init) & 0xFFFFFFFF00FFFFFF)
63 #error Unexpected __KVM_HOST_SMCCC_FUNC___kvm_hyp_init value
64 #endif
65 
66 	ror	x0, x0, #24
67 	eor	x0, x0, #((KVM_HOST_SMCCC_FUNC(__kvm_hyp_init) >> 24) & 0xF)
68 	ror	x0, x0, #4
69 	eor	x0, x0, #((KVM_HOST_SMCCC_FUNC(__kvm_hyp_init) >> 28) & 0xF)
70 	cbz	x0, 1f
71 	mov	x0, #SMCCC_RET_NOT_SUPPORTED
72 	eret
73 
74 1:
75 	/* Set tpidr_el2 for use by HYP to free a register */
76 	msr	tpidr_el2, x2
77 
78 	phys_to_ttbr x0, x1
79 alternative_if ARM64_HAS_CNP
80 	orr	x0, x0, #TTBR_CNP_BIT
81 alternative_else_nop_endif
82 	msr	ttbr0_el2, x0
83 
84 	mrs	x0, tcr_el1
85 	mov_q	x1, TCR_EL2_MASK
86 	and	x0, x0, x1
87 	mov	x1, #TCR_EL2_RES1
88 	orr	x0, x0, x1
89 
90 	/*
91 	 * The ID map may be configured to use an extended virtual address
92 	 * range. This is only the case if system RAM is out of range for the
93 	 * currently configured page size and VA_BITS, in which case we will
94 	 * also need the extended virtual range for the HYP ID map, or we won't
95 	 * be able to enable the EL2 MMU.
96 	 *
97 	 * However, at EL2, there is only one TTBR register, and we can't switch
98 	 * between translation tables *and* update TCR_EL2.T0SZ at the same
99 	 * time. Bottom line: we need to use the extended range with *both* our
100 	 * translation tables.
101 	 *
102 	 * So use the same T0SZ value we use for the ID map.
103 	 */
104 	ldr_l	x1, idmap_t0sz
105 	bfi	x0, x1, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH
106 
107 	/*
108 	 * Set the PS bits in TCR_EL2.
109 	 */
110 	tcr_compute_pa_size x0, #TCR_EL2_PS_SHIFT, x1, x2
111 
112 	msr	tcr_el2, x0
113 
114 	mrs	x0, mair_el1
115 	msr	mair_el2, x0
116 	isb
117 
118 	/* Invalidate the stale TLBs from Bootloader */
119 	tlbi	alle2
120 	dsb	sy
121 
122 	/*
123 	 * Preserve all the RES1 bits while setting the default flags,
124 	 * as well as the EE bit on BE. Drop the A flag since the compiler
125 	 * is allowed to generate unaligned accesses.
126 	 */
127 	mov_q	x0, (SCTLR_EL2_RES1 | (SCTLR_ELx_FLAGS & ~SCTLR_ELx_A))
128 CPU_BE(	orr	x0, x0, #SCTLR_ELx_EE)
129 alternative_if ARM64_HAS_ADDRESS_AUTH
130 	mov_q	x1, (SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | \
131 		     SCTLR_ELx_ENDA | SCTLR_ELx_ENDB)
132 	orr	x0, x0, x1
133 alternative_else_nop_endif
134 	msr	sctlr_el2, x0
135 	isb
136 
137 	/* Set the stack and new vectors */
138 	mov	sp, x3
139 	msr	vbar_el2, x4
140 
141 	/* Hello, World! */
142 	mov	x0, #SMCCC_RET_SUCCESS
143 	eret
144 SYM_CODE_END(__kvm_hyp_init)
145 
146 SYM_CODE_START(__kvm_handle_stub_hvc)
147 	cmp	x0, #HVC_SOFT_RESTART
148 	b.ne	1f
149 
150 	/* This is where we're about to jump, staying at EL2 */
151 	msr	elr_el2, x1
152 	mov	x0, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT | PSR_MODE_EL2h)
153 	msr	spsr_el2, x0
154 
155 	/* Shuffle the arguments, and don't come back */
156 	mov	x0, x2
157 	mov	x1, x3
158 	mov	x2, x4
159 	b	reset
160 
161 1:	cmp	x0, #HVC_RESET_VECTORS
162 	b.ne	1f
163 
164 	/*
165 	 * Set the HVC_RESET_VECTORS return code before entering the common
166 	 * path so that we do not clobber x0-x2 in case we are coming via
167 	 * HVC_SOFT_RESTART.
168 	 */
169 	mov	x0, xzr
170 reset:
171 	/* Reset kvm back to the hyp stub. */
172 	mrs	x5, sctlr_el2
173 	mov_q	x6, SCTLR_ELx_FLAGS
174 	bic	x5, x5, x6		// Clear SCTL_M and etc
175 	pre_disable_mmu_workaround
176 	msr	sctlr_el2, x5
177 	isb
178 
179 	/* Install stub vectors */
180 	adr_l	x5, __hyp_stub_vectors
181 	msr	vbar_el2, x5
182 	eret
183 
184 1:	/* Bad stub call */
185 	mov_q	x0, HVC_STUB_ERR
186 	eret
187 
188 SYM_CODE_END(__kvm_handle_stub_hvc)
189 
190 	.popsection
191