1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 *
6 * Derived from arch/arm/include/kvm_emulate.h
7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
9 */
10
11#ifndef __ARM64_KVM_EMULATE_H__
12#define __ARM64_KVM_EMULATE_H__
13
14#include <linux/kvm_host.h>
15
16#include <asm/debug-monitors.h>
17#include <asm/esr.h>
18#include <asm/kvm_arm.h>
19#include <asm/kvm_hyp.h>
20#include <asm/ptrace.h>
21#include <asm/cputype.h>
22#include <asm/virt.h>
23
24unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num);
25unsigned long vcpu_read_spsr32(const struct kvm_vcpu *vcpu);
26void vcpu_write_spsr32(struct kvm_vcpu *vcpu, unsigned long v);
27
28bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
29void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr);
30
31void kvm_inject_undefined(struct kvm_vcpu *vcpu);
32void kvm_inject_vabt(struct kvm_vcpu *vcpu);
33void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
34void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
35void kvm_inject_undef32(struct kvm_vcpu *vcpu);
36void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr);
37void kvm_inject_pabt32(struct kvm_vcpu *vcpu, unsigned long addr);
38
39static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
40{
41	return !(vcpu->arch.hcr_el2 & HCR_RW);
42}
43
44static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
45{
46	vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
47	if (is_kernel_in_hyp_mode())
48		vcpu->arch.hcr_el2 |= HCR_E2H;
49	if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN)) {
50		/* route synchronous external abort exceptions to EL2 */
51		vcpu->arch.hcr_el2 |= HCR_TEA;
52		/* trap error record accesses */
53		vcpu->arch.hcr_el2 |= HCR_TERR;
54	}
55
56	if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) {
57		vcpu->arch.hcr_el2 |= HCR_FWB;
58	} else {
59		/*
60		 * For non-FWB CPUs, we trap VM ops (HCR_EL2.TVM) until M+C
61		 * get set in SCTLR_EL1 such that we can detect when the guest
62		 * MMU gets turned on and do the necessary cache maintenance
63		 * then.
64		 */
65		vcpu->arch.hcr_el2 |= HCR_TVM;
66	}
67
68	if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features))
69		vcpu->arch.hcr_el2 &= ~HCR_RW;
70
71	/*
72	 * TID3: trap feature register accesses that we virtualise.
73	 * For now this is conditional, since no AArch32 feature regs
74	 * are currently virtualised.
75	 */
76	if (!vcpu_el1_is_32bit(vcpu))
77		vcpu->arch.hcr_el2 |= HCR_TID3;
78
79	if (cpus_have_const_cap(ARM64_MISMATCHED_CACHE_TYPE) ||
80	    vcpu_el1_is_32bit(vcpu))
81		vcpu->arch.hcr_el2 |= HCR_TID2;
82}
83
84static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu)
85{
86	return (unsigned long *)&vcpu->arch.hcr_el2;
87}
88
89static inline void vcpu_clear_wfx_traps(struct kvm_vcpu *vcpu)
90{
91	vcpu->arch.hcr_el2 &= ~HCR_TWE;
92	if (atomic_read(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count) ||
93	    vcpu->kvm->arch.vgic.nassgireq)
94		vcpu->arch.hcr_el2 &= ~HCR_TWI;
95	else
96		vcpu->arch.hcr_el2 |= HCR_TWI;
97}
98
99static inline void vcpu_set_wfx_traps(struct kvm_vcpu *vcpu)
100{
101	vcpu->arch.hcr_el2 |= HCR_TWE;
102	vcpu->arch.hcr_el2 |= HCR_TWI;
103}
104
105static inline void vcpu_ptrauth_enable(struct kvm_vcpu *vcpu)
106{
107	vcpu->arch.hcr_el2 |= (HCR_API | HCR_APK);
108}
109
110static inline void vcpu_ptrauth_disable(struct kvm_vcpu *vcpu)
111{
112	vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK);
113}
114
115static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu)
116{
117	return vcpu->arch.vsesr_el2;
118}
119
120static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr)
121{
122	vcpu->arch.vsesr_el2 = vsesr;
123}
124
125static __always_inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
126{
127	return (unsigned long *)&vcpu_gp_regs(vcpu)->pc;
128}
129
130static __always_inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
131{
132	return (unsigned long *)&vcpu_gp_regs(vcpu)->pstate;
133}
134
135static __always_inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
136{
137	return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT);
138}
139
140static __always_inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
141{
142	if (vcpu_mode_is_32bit(vcpu))
143		return kvm_condition_valid32(vcpu);
144
145	return true;
146}
147
148static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
149{
150	*vcpu_cpsr(vcpu) |= PSR_AA32_T_BIT;
151}
152
153/*
154 * vcpu_get_reg and vcpu_set_reg should always be passed a register number
155 * coming from a read of ESR_EL2. Otherwise, it may give the wrong result on
156 * AArch32 with banked registers.
157 */
158static __always_inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
159					 u8 reg_num)
160{
161	return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs[reg_num];
162}
163
164static __always_inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
165				unsigned long val)
166{
167	if (reg_num != 31)
168		vcpu_gp_regs(vcpu)->regs[reg_num] = val;
169}
170
171static inline unsigned long vcpu_read_spsr(const struct kvm_vcpu *vcpu)
172{
173	if (vcpu_mode_is_32bit(vcpu))
174		return vcpu_read_spsr32(vcpu);
175
176	if (vcpu->arch.sysregs_loaded_on_cpu)
177		return read_sysreg_el1(SYS_SPSR);
178	else
179		return __vcpu_sys_reg(vcpu, SPSR_EL1);
180}
181
182static inline void vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long v)
183{
184	if (vcpu_mode_is_32bit(vcpu)) {
185		vcpu_write_spsr32(vcpu, v);
186		return;
187	}
188
189	if (vcpu->arch.sysregs_loaded_on_cpu)
190		write_sysreg_el1(v, SYS_SPSR);
191	else
192		__vcpu_sys_reg(vcpu, SPSR_EL1) = v;
193}
194
195/*
196 * The layout of SPSR for an AArch32 state is different when observed from an
197 * AArch64 SPSR_ELx or an AArch32 SPSR_*. This function generates the AArch32
198 * view given an AArch64 view.
199 *
200 * In ARM DDI 0487E.a see:
201 *
202 * - The AArch64 view (SPSR_EL2) in section C5.2.18, page C5-426
203 * - The AArch32 view (SPSR_abt) in section G8.2.126, page G8-6256
204 * - The AArch32 view (SPSR_und) in section G8.2.132, page G8-6280
205 *
206 * Which show the following differences:
207 *
208 * | Bit | AA64 | AA32 | Notes                       |
209 * +-----+------+------+-----------------------------|
210 * | 24  | DIT  | J    | J is RES0 in ARMv8          |
211 * | 21  | SS   | DIT  | SS doesn't exist in AArch32 |
212 *
213 * ... and all other bits are (currently) common.
214 */
215static inline unsigned long host_spsr_to_spsr32(unsigned long spsr)
216{
217	const unsigned long overlap = BIT(24) | BIT(21);
218	unsigned long dit = !!(spsr & PSR_AA32_DIT_BIT);
219
220	spsr &= ~overlap;
221
222	spsr |= dit << 21;
223
224	return spsr;
225}
226
227static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
228{
229	u32 mode;
230
231	if (vcpu_mode_is_32bit(vcpu)) {
232		mode = *vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK;
233		return mode > PSR_AA32_MODE_USR;
234	}
235
236	mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK;
237
238	return mode != PSR_MODE_EL0t;
239}
240
241static __always_inline u32 kvm_vcpu_get_esr(const struct kvm_vcpu *vcpu)
242{
243	return vcpu->arch.fault.esr_el2;
244}
245
246static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
247{
248	u32 esr = kvm_vcpu_get_esr(vcpu);
249
250	if (esr & ESR_ELx_CV)
251		return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
252
253	return -1;
254}
255
256static __always_inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu)
257{
258	return vcpu->arch.fault.far_el2;
259}
260
261static __always_inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
262{
263	return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8;
264}
265
266static inline u64 kvm_vcpu_get_disr(const struct kvm_vcpu *vcpu)
267{
268	return vcpu->arch.fault.disr_el1;
269}
270
271static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
272{
273	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_xVC_IMM_MASK;
274}
275
276static __always_inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
277{
278	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_ISV);
279}
280
281static inline unsigned long kvm_vcpu_dabt_iss_nisv_sanitized(const struct kvm_vcpu *vcpu)
282{
283	return kvm_vcpu_get_esr(vcpu) & (ESR_ELx_CM | ESR_ELx_WNR | ESR_ELx_FSC);
284}
285
286static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
287{
288	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SSE);
289}
290
291static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu)
292{
293	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SF);
294}
295
296static __always_inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
297{
298	return (kvm_vcpu_get_esr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
299}
300
301static __always_inline bool kvm_vcpu_abt_iss1tw(const struct kvm_vcpu *vcpu)
302{
303	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_S1PTW);
304}
305
306/* Always check for S1PTW *before* using this. */
307static __always_inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
308{
309	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_WNR;
310}
311
312static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
313{
314	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_CM);
315}
316
317static __always_inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
318{
319	return 1 << ((kvm_vcpu_get_esr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
320}
321
322/* This one is not specific to Data Abort */
323static __always_inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
324{
325	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_IL);
326}
327
328static __always_inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
329{
330	return ESR_ELx_EC(kvm_vcpu_get_esr(vcpu));
331}
332
333static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
334{
335	return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
336}
337
338static inline bool kvm_vcpu_trap_is_exec_fault(const struct kvm_vcpu *vcpu)
339{
340	return kvm_vcpu_trap_is_iabt(vcpu) && !kvm_vcpu_abt_iss1tw(vcpu);
341}
342
343static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
344{
345	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC;
346}
347
348static __always_inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
349{
350	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC_TYPE;
351}
352
353static __always_inline u8 kvm_vcpu_trap_get_fault_level(const struct kvm_vcpu *vcpu)
354{
355	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC_LEVEL;
356}
357
358static __always_inline bool kvm_vcpu_abt_issea(const struct kvm_vcpu *vcpu)
359{
360	switch (kvm_vcpu_trap_get_fault(vcpu)) {
361	case FSC_SEA:
362	case FSC_SEA_TTW0:
363	case FSC_SEA_TTW1:
364	case FSC_SEA_TTW2:
365	case FSC_SEA_TTW3:
366	case FSC_SECC:
367	case FSC_SECC_TTW0:
368	case FSC_SECC_TTW1:
369	case FSC_SECC_TTW2:
370	case FSC_SECC_TTW3:
371		return true;
372	default:
373		return false;
374	}
375}
376
377static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
378{
379	u32 esr = kvm_vcpu_get_esr(vcpu);
380	return ESR_ELx_SYS64_ISS_RT(esr);
381}
382
383static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
384{
385	if (kvm_vcpu_abt_iss1tw(vcpu)) {
386		/*
387		 * Only a permission fault on a S1PTW should be
388		 * considered as a write. Otherwise, page tables baked
389		 * in a read-only memslot will result in an exception
390		 * being delivered in the guest.
391		 *
392		 * The drawback is that we end-up faulting twice if the
393		 * guest is using any of HW AF/DB: a translation fault
394		 * to map the page containing the PT (read only at
395		 * first), then a permission fault to allow the flags
396		 * to be set.
397		 */
398		switch (kvm_vcpu_trap_get_fault_type(vcpu)) {
399		case ESR_ELx_FSC_PERM:
400			return true;
401		default:
402			return false;
403		}
404	}
405
406	if (kvm_vcpu_trap_is_iabt(vcpu))
407		return false;
408
409	return kvm_vcpu_dabt_iswrite(vcpu);
410}
411
412static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
413{
414	return vcpu_read_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
415}
416
417static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
418{
419	if (vcpu_mode_is_32bit(vcpu)) {
420		*vcpu_cpsr(vcpu) |= PSR_AA32_E_BIT;
421	} else {
422		u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
423		sctlr |= (1 << 25);
424		vcpu_write_sys_reg(vcpu, sctlr, SCTLR_EL1);
425	}
426}
427
428static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
429{
430	if (vcpu_mode_is_32bit(vcpu))
431		return !!(*vcpu_cpsr(vcpu) & PSR_AA32_E_BIT);
432
433	return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & (1 << 25));
434}
435
436static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
437						    unsigned long data,
438						    unsigned int len)
439{
440	if (kvm_vcpu_is_be(vcpu)) {
441		switch (len) {
442		case 1:
443			return data & 0xff;
444		case 2:
445			return be16_to_cpu(data & 0xffff);
446		case 4:
447			return be32_to_cpu(data & 0xffffffff);
448		default:
449			return be64_to_cpu(data);
450		}
451	} else {
452		switch (len) {
453		case 1:
454			return data & 0xff;
455		case 2:
456			return le16_to_cpu(data & 0xffff);
457		case 4:
458			return le32_to_cpu(data & 0xffffffff);
459		default:
460			return le64_to_cpu(data);
461		}
462	}
463
464	return data;		/* Leave LE untouched */
465}
466
467static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
468						    unsigned long data,
469						    unsigned int len)
470{
471	if (kvm_vcpu_is_be(vcpu)) {
472		switch (len) {
473		case 1:
474			return data & 0xff;
475		case 2:
476			return cpu_to_be16(data & 0xffff);
477		case 4:
478			return cpu_to_be32(data & 0xffffffff);
479		default:
480			return cpu_to_be64(data);
481		}
482	} else {
483		switch (len) {
484		case 1:
485			return data & 0xff;
486		case 2:
487			return cpu_to_le16(data & 0xffff);
488		case 4:
489			return cpu_to_le32(data & 0xffffffff);
490		default:
491			return cpu_to_le64(data);
492		}
493	}
494
495	return data;		/* Leave LE untouched */
496}
497
498static __always_inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
499{
500	if (vcpu_mode_is_32bit(vcpu)) {
501		kvm_skip_instr32(vcpu, is_wide_instr);
502	} else {
503		*vcpu_pc(vcpu) += 4;
504		*vcpu_cpsr(vcpu) &= ~PSR_BTYPE_MASK;
505	}
506
507	/* advance the singlestep state machine */
508	*vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS;
509}
510
511/*
512 * Skip an instruction which has been emulated at hyp while most guest sysregs
513 * are live.
514 */
515static __always_inline void __kvm_skip_instr(struct kvm_vcpu *vcpu)
516{
517	*vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR);
518	vcpu_gp_regs(vcpu)->pstate = read_sysreg_el2(SYS_SPSR);
519
520	kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
521
522	write_sysreg_el2(vcpu_gp_regs(vcpu)->pstate, SYS_SPSR);
523	write_sysreg_el2(*vcpu_pc(vcpu), SYS_ELR);
524}
525
526static inline bool vcpu_has_feature(struct kvm_vcpu *vcpu, int feature)
527{
528	return test_bit(feature, vcpu->arch.features);
529}
530
531#endif /* __ARM64_KVM_EMULATE_H__ */
532