1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2015 - ARM Ltd
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  */
6 
7 #include <linux/linkage.h>
8 
9 #include <asm/alternative.h>
10 #include <asm/assembler.h>
11 #include <asm/fpsimdmacros.h>
12 #include <asm/kvm.h>
13 #include <asm/kvm_arm.h>
14 #include <asm/kvm_asm.h>
15 #include <asm/kvm_mmu.h>
16 #include <asm/kvm_mte.h>
17 #include <asm/kvm_ptrauth.h>
18 
19 	.text
20 
21 /*
22  * u64 __guest_enter(struct kvm_vcpu *vcpu);
23  */
24 SYM_FUNC_START(__guest_enter)
25 	// x0: vcpu
26 	// x1-x17: clobbered by macros
27 	// x29: guest context
28 
29 	adr_this_cpu x1, kvm_hyp_ctxt, x2
30 
31 	// Store the hyp regs
32 	save_callee_saved_regs x1
33 
34 	// Save hyp's sp_el0
35 	save_sp_el0	x1, x2
36 
37 	// Now the hyp state is stored if we have a pending RAS SError it must
38 	// affect the host or hyp. If any asynchronous exception is pending we
39 	// defer the guest entry. The DSB isn't necessary before v8.2 as any
40 	// SError would be fatal.
41 alternative_if ARM64_HAS_RAS_EXTN
42 	dsb	nshst
43 	isb
44 alternative_else_nop_endif
45 	mrs	x1, isr_el1
46 	cbz	x1,  1f
47 	mov	x0, #ARM_EXCEPTION_IRQ
48 	ret
49 
50 1:
51 	set_loaded_vcpu x0, x1, x2
52 
53 	add	x29, x0, #VCPU_CONTEXT
54 
55 	// mte_switch_to_guest(g_ctxt, h_ctxt, tmp1)
56 	mte_switch_to_guest x29, x1, x2
57 
58 	// Macro ptrauth_switch_to_guest format:
59 	// 	ptrauth_switch_to_guest(guest cxt, tmp1, tmp2, tmp3)
60 	// The below macro to restore guest keys is not implemented in C code
61 	// as it may cause Pointer Authentication key signing mismatch errors
62 	// when this feature is enabled for kernel code.
63 	ptrauth_switch_to_guest x29, x0, x1, x2
64 
65 	// Restore the guest's sp_el0
66 	restore_sp_el0 x29, x0
67 
68 	// Restore guest regs x0-x17
69 	ldp	x0, x1,   [x29, #CPU_XREG_OFFSET(0)]
70 	ldp	x2, x3,   [x29, #CPU_XREG_OFFSET(2)]
71 	ldp	x4, x5,   [x29, #CPU_XREG_OFFSET(4)]
72 	ldp	x6, x7,   [x29, #CPU_XREG_OFFSET(6)]
73 	ldp	x8, x9,   [x29, #CPU_XREG_OFFSET(8)]
74 	ldp	x10, x11, [x29, #CPU_XREG_OFFSET(10)]
75 	ldp	x12, x13, [x29, #CPU_XREG_OFFSET(12)]
76 	ldp	x14, x15, [x29, #CPU_XREG_OFFSET(14)]
77 	ldp	x16, x17, [x29, #CPU_XREG_OFFSET(16)]
78 
79 	// Restore guest regs x18-x29, lr
80 	restore_callee_saved_regs x29
81 
82 	// Do not touch any register after this!
83 	eret
84 	sb
85 
86 SYM_INNER_LABEL(__guest_exit_panic, SYM_L_GLOBAL)
87 	// x2-x29,lr: vcpu regs
88 	// vcpu x0-x1 on the stack
89 
90 	// If the hyp context is loaded, go straight to hyp_panic
91 	get_loaded_vcpu x0, x1
92 	cbnz	x0, 1f
93 	b	hyp_panic
94 
95 1:
96 	// The hyp context is saved so make sure it is restored to allow
97 	// hyp_panic to run at hyp and, subsequently, panic to run in the host.
98 	// This makes use of __guest_exit to avoid duplication but sets the
99 	// return address to tail call into hyp_panic. As a side effect, the
100 	// current state is saved to the guest context but it will only be
101 	// accurate if the guest had been completely restored.
102 	adr_this_cpu x0, kvm_hyp_ctxt, x1
103 	adr_l	x1, hyp_panic
104 	str	x1, [x0, #CPU_XREG_OFFSET(30)]
105 
106 	get_vcpu_ptr	x1, x0
107 
108 SYM_INNER_LABEL(__guest_exit, SYM_L_GLOBAL)
109 	// x0: return code
110 	// x1: vcpu
111 	// x2-x29,lr: vcpu regs
112 	// vcpu x0-x1 on the stack
113 
114 	add	x1, x1, #VCPU_CONTEXT
115 
116 	ALTERNATIVE(nop, SET_PSTATE_PAN(1), ARM64_HAS_PAN, CONFIG_ARM64_PAN)
117 
118 	// Store the guest regs x2 and x3
119 	stp	x2, x3,   [x1, #CPU_XREG_OFFSET(2)]
120 
121 	// Retrieve the guest regs x0-x1 from the stack
122 	ldp	x2, x3, [sp], #16	// x0, x1
123 
124 	// Store the guest regs x0-x1 and x4-x17
125 	stp	x2, x3,   [x1, #CPU_XREG_OFFSET(0)]
126 	stp	x4, x5,   [x1, #CPU_XREG_OFFSET(4)]
127 	stp	x6, x7,   [x1, #CPU_XREG_OFFSET(6)]
128 	stp	x8, x9,   [x1, #CPU_XREG_OFFSET(8)]
129 	stp	x10, x11, [x1, #CPU_XREG_OFFSET(10)]
130 	stp	x12, x13, [x1, #CPU_XREG_OFFSET(12)]
131 	stp	x14, x15, [x1, #CPU_XREG_OFFSET(14)]
132 	stp	x16, x17, [x1, #CPU_XREG_OFFSET(16)]
133 
134 	// Store the guest regs x18-x29, lr
135 	save_callee_saved_regs x1
136 
137 	// Store the guest's sp_el0
138 	save_sp_el0	x1, x2
139 
140 	adr_this_cpu x2, kvm_hyp_ctxt, x3
141 
142 	// Macro ptrauth_switch_to_hyp format:
143 	// 	ptrauth_switch_to_hyp(guest cxt, host cxt, tmp1, tmp2, tmp3)
144 	// The below macro to save/restore keys is not implemented in C code
145 	// as it may cause Pointer Authentication key signing mismatch errors
146 	// when this feature is enabled for kernel code.
147 	ptrauth_switch_to_hyp x1, x2, x3, x4, x5
148 
149 	// mte_switch_to_hyp(g_ctxt, h_ctxt, reg1)
150 	mte_switch_to_hyp x1, x2, x3
151 
152 	// Restore hyp's sp_el0
153 	restore_sp_el0 x2, x3
154 
155 	// Now restore the hyp regs
156 	restore_callee_saved_regs x2
157 
158 	set_loaded_vcpu xzr, x2, x3
159 
160 alternative_if ARM64_HAS_RAS_EXTN
161 	// If we have the RAS extensions we can consume a pending error
162 	// without an unmask-SError and isb. The ESB-instruction consumed any
163 	// pending guest error when we took the exception from the guest.
164 	mrs_s	x2, SYS_DISR_EL1
165 	str	x2, [x1, #(VCPU_FAULT_DISR - VCPU_CONTEXT)]
166 	cbz	x2, 1f
167 	msr_s	SYS_DISR_EL1, xzr
168 	orr	x0, x0, #(1<<ARM_EXIT_WITH_SERROR_BIT)
169 1:	ret
170 alternative_else
171 	dsb	sy		// Synchronize against in-flight ld/st
172 	isb			// Prevent an early read of side-effect free ISR
173 	mrs	x2, isr_el1
174 	tbnz	x2, #ISR_EL1_A_SHIFT, 2f
175 	ret
176 	nop
177 2:
178 alternative_endif
179 	// We know we have a pending asynchronous abort, now is the
180 	// time to flush it out. From your VAXorcist book, page 666:
181 	// "Threaten me not, oh Evil one!  For I speak with
182 	// the power of DEC, and I command thee to show thyself!"
183 	mrs	x2, elr_el2
184 	mrs	x3, esr_el2
185 	mrs	x4, spsr_el2
186 	mov	x5, x0
187 
188 	msr	daifclr, #4	// Unmask aborts
189 
190 	// This is our single instruction exception window. A pending
191 	// SError is guaranteed to occur at the earliest when we unmask
192 	// it, and at the latest just after the ISB.
193 abort_guest_exit_start:
194 
195 	isb
196 
197 abort_guest_exit_end:
198 
199 	msr	daifset, #4	// Mask aborts
200 	ret
201 
202 	_kvm_extable	abort_guest_exit_start, 9997f
203 	_kvm_extable	abort_guest_exit_end, 9997f
204 9997:
205 	msr	daifset, #4	// Mask aborts
206 	mov	x0, #(1 << ARM_EXIT_WITH_SERROR_BIT)
207 
208 	// restore the EL1 exception context so that we can report some
209 	// information. Merge the exception code with the SError pending bit.
210 	msr	elr_el2, x2
211 	msr	esr_el2, x3
212 	msr	spsr_el2, x4
213 	orr	x0, x0, x5
214 1:	ret
215 SYM_FUNC_END(__guest_enter)
216