1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 *
6 * Derived from arch/arm/include/kvm_emulate.h
7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
9 */
10
11#ifndef __ARM64_KVM_EMULATE_H__
12#define __ARM64_KVM_EMULATE_H__
13
14#include <linux/kvm_host.h>
15
16#include <asm/debug-monitors.h>
17#include <asm/esr.h>
18#include <asm/kvm_arm.h>
19#include <asm/kvm_hyp.h>
20#include <asm/ptrace.h>
21#include <asm/cputype.h>
22#include <asm/virt.h>
23
24#define CURRENT_EL_SP_EL0_VECTOR	0x0
25#define CURRENT_EL_SP_ELx_VECTOR	0x200
26#define LOWER_EL_AArch64_VECTOR		0x400
27#define LOWER_EL_AArch32_VECTOR		0x600
28
29enum exception_type {
30	except_type_sync	= 0,
31	except_type_irq		= 0x80,
32	except_type_fiq		= 0x100,
33	except_type_serror	= 0x180,
34};
35
36#define kvm_exception_type_names		\
37	{ except_type_sync,	"SYNC"   },	\
38	{ except_type_irq,	"IRQ"    },	\
39	{ except_type_fiq,	"FIQ"    },	\
40	{ except_type_serror,	"SERROR" }
41
42bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
43void kvm_skip_instr32(struct kvm_vcpu *vcpu);
44
45void kvm_inject_undefined(struct kvm_vcpu *vcpu);
46void kvm_inject_vabt(struct kvm_vcpu *vcpu);
47void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
48void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
49void kvm_inject_size_fault(struct kvm_vcpu *vcpu);
50
51void kvm_vcpu_wfi(struct kvm_vcpu *vcpu);
52
53void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu);
54int kvm_inject_nested_sync(struct kvm_vcpu *vcpu, u64 esr_el2);
55int kvm_inject_nested_irq(struct kvm_vcpu *vcpu);
56
57#if defined(__KVM_VHE_HYPERVISOR__) || defined(__KVM_NVHE_HYPERVISOR__)
58static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
59{
60	return !(vcpu->arch.hcr_el2 & HCR_RW);
61}
62#else
63static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
64{
65	return test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features);
66}
67#endif
68
69static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
70{
71	vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
72	if (has_vhe() || has_hvhe())
73		vcpu->arch.hcr_el2 |= HCR_E2H;
74	if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN)) {
75		/* route synchronous external abort exceptions to EL2 */
76		vcpu->arch.hcr_el2 |= HCR_TEA;
77		/* trap error record accesses */
78		vcpu->arch.hcr_el2 |= HCR_TERR;
79	}
80
81	if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) {
82		vcpu->arch.hcr_el2 |= HCR_FWB;
83	} else {
84		/*
85		 * For non-FWB CPUs, we trap VM ops (HCR_EL2.TVM) until M+C
86		 * get set in SCTLR_EL1 such that we can detect when the guest
87		 * MMU gets turned on and do the necessary cache maintenance
88		 * then.
89		 */
90		vcpu->arch.hcr_el2 |= HCR_TVM;
91	}
92
93	if (cpus_have_final_cap(ARM64_HAS_EVT) &&
94	    !cpus_have_final_cap(ARM64_MISMATCHED_CACHE_TYPE))
95		vcpu->arch.hcr_el2 |= HCR_TID4;
96	else
97		vcpu->arch.hcr_el2 |= HCR_TID2;
98
99	if (vcpu_el1_is_32bit(vcpu))
100		vcpu->arch.hcr_el2 &= ~HCR_RW;
101
102	if (kvm_has_mte(vcpu->kvm))
103		vcpu->arch.hcr_el2 |= HCR_ATA;
104}
105
106static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu)
107{
108	return (unsigned long *)&vcpu->arch.hcr_el2;
109}
110
111static inline void vcpu_clear_wfx_traps(struct kvm_vcpu *vcpu)
112{
113	vcpu->arch.hcr_el2 &= ~HCR_TWE;
114	if (atomic_read(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count) ||
115	    vcpu->kvm->arch.vgic.nassgireq)
116		vcpu->arch.hcr_el2 &= ~HCR_TWI;
117	else
118		vcpu->arch.hcr_el2 |= HCR_TWI;
119}
120
121static inline void vcpu_set_wfx_traps(struct kvm_vcpu *vcpu)
122{
123	vcpu->arch.hcr_el2 |= HCR_TWE;
124	vcpu->arch.hcr_el2 |= HCR_TWI;
125}
126
127static inline void vcpu_ptrauth_enable(struct kvm_vcpu *vcpu)
128{
129	vcpu->arch.hcr_el2 |= (HCR_API | HCR_APK);
130}
131
132static inline void vcpu_ptrauth_disable(struct kvm_vcpu *vcpu)
133{
134	vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK);
135}
136
137static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu)
138{
139	return vcpu->arch.vsesr_el2;
140}
141
142static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr)
143{
144	vcpu->arch.vsesr_el2 = vsesr;
145}
146
147static __always_inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
148{
149	return (unsigned long *)&vcpu_gp_regs(vcpu)->pc;
150}
151
152static __always_inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
153{
154	return (unsigned long *)&vcpu_gp_regs(vcpu)->pstate;
155}
156
157static __always_inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
158{
159	return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT);
160}
161
162static __always_inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
163{
164	if (vcpu_mode_is_32bit(vcpu))
165		return kvm_condition_valid32(vcpu);
166
167	return true;
168}
169
170static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
171{
172	*vcpu_cpsr(vcpu) |= PSR_AA32_T_BIT;
173}
174
175/*
176 * vcpu_get_reg and vcpu_set_reg should always be passed a register number
177 * coming from a read of ESR_EL2. Otherwise, it may give the wrong result on
178 * AArch32 with banked registers.
179 */
180static __always_inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
181					 u8 reg_num)
182{
183	return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs[reg_num];
184}
185
186static __always_inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
187				unsigned long val)
188{
189	if (reg_num != 31)
190		vcpu_gp_regs(vcpu)->regs[reg_num] = val;
191}
192
193static inline bool vcpu_is_el2_ctxt(const struct kvm_cpu_context *ctxt)
194{
195	switch (ctxt->regs.pstate & (PSR_MODE32_BIT | PSR_MODE_MASK)) {
196	case PSR_MODE_EL2h:
197	case PSR_MODE_EL2t:
198		return true;
199	default:
200		return false;
201	}
202}
203
204static inline bool vcpu_is_el2(const struct kvm_vcpu *vcpu)
205{
206	return vcpu_is_el2_ctxt(&vcpu->arch.ctxt);
207}
208
209static inline bool __vcpu_el2_e2h_is_set(const struct kvm_cpu_context *ctxt)
210{
211	return ctxt_sys_reg(ctxt, HCR_EL2) & HCR_E2H;
212}
213
214static inline bool vcpu_el2_e2h_is_set(const struct kvm_vcpu *vcpu)
215{
216	return __vcpu_el2_e2h_is_set(&vcpu->arch.ctxt);
217}
218
219static inline bool __vcpu_el2_tge_is_set(const struct kvm_cpu_context *ctxt)
220{
221	return ctxt_sys_reg(ctxt, HCR_EL2) & HCR_TGE;
222}
223
224static inline bool vcpu_el2_tge_is_set(const struct kvm_vcpu *vcpu)
225{
226	return __vcpu_el2_tge_is_set(&vcpu->arch.ctxt);
227}
228
229static inline bool __is_hyp_ctxt(const struct kvm_cpu_context *ctxt)
230{
231	/*
232	 * We are in a hypervisor context if the vcpu mode is EL2 or
233	 * E2H and TGE bits are set. The latter means we are in the user space
234	 * of the VHE kernel. ARMv8.1 ARM describes this as 'InHost'
235	 *
236	 * Note that the HCR_EL2.{E2H,TGE}={0,1} isn't really handled in the
237	 * rest of the KVM code, and will result in a misbehaving guest.
238	 */
239	return vcpu_is_el2_ctxt(ctxt) ||
240		(__vcpu_el2_e2h_is_set(ctxt) && __vcpu_el2_tge_is_set(ctxt)) ||
241		__vcpu_el2_tge_is_set(ctxt);
242}
243
244static inline bool is_hyp_ctxt(const struct kvm_vcpu *vcpu)
245{
246	return __is_hyp_ctxt(&vcpu->arch.ctxt);
247}
248
249/*
250 * The layout of SPSR for an AArch32 state is different when observed from an
251 * AArch64 SPSR_ELx or an AArch32 SPSR_*. This function generates the AArch32
252 * view given an AArch64 view.
253 *
254 * In ARM DDI 0487E.a see:
255 *
256 * - The AArch64 view (SPSR_EL2) in section C5.2.18, page C5-426
257 * - The AArch32 view (SPSR_abt) in section G8.2.126, page G8-6256
258 * - The AArch32 view (SPSR_und) in section G8.2.132, page G8-6280
259 *
260 * Which show the following differences:
261 *
262 * | Bit | AA64 | AA32 | Notes                       |
263 * +-----+------+------+-----------------------------|
264 * | 24  | DIT  | J    | J is RES0 in ARMv8          |
265 * | 21  | SS   | DIT  | SS doesn't exist in AArch32 |
266 *
267 * ... and all other bits are (currently) common.
268 */
269static inline unsigned long host_spsr_to_spsr32(unsigned long spsr)
270{
271	const unsigned long overlap = BIT(24) | BIT(21);
272	unsigned long dit = !!(spsr & PSR_AA32_DIT_BIT);
273
274	spsr &= ~overlap;
275
276	spsr |= dit << 21;
277
278	return spsr;
279}
280
281static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
282{
283	u32 mode;
284
285	if (vcpu_mode_is_32bit(vcpu)) {
286		mode = *vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK;
287		return mode > PSR_AA32_MODE_USR;
288	}
289
290	mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK;
291
292	return mode != PSR_MODE_EL0t;
293}
294
295static __always_inline u64 kvm_vcpu_get_esr(const struct kvm_vcpu *vcpu)
296{
297	return vcpu->arch.fault.esr_el2;
298}
299
300static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
301{
302	u64 esr = kvm_vcpu_get_esr(vcpu);
303
304	if (esr & ESR_ELx_CV)
305		return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
306
307	return -1;
308}
309
310static __always_inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu)
311{
312	return vcpu->arch.fault.far_el2;
313}
314
315static __always_inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
316{
317	return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8;
318}
319
320static inline u64 kvm_vcpu_get_disr(const struct kvm_vcpu *vcpu)
321{
322	return vcpu->arch.fault.disr_el1;
323}
324
325static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
326{
327	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_xVC_IMM_MASK;
328}
329
330static __always_inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
331{
332	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_ISV);
333}
334
335static inline unsigned long kvm_vcpu_dabt_iss_nisv_sanitized(const struct kvm_vcpu *vcpu)
336{
337	return kvm_vcpu_get_esr(vcpu) & (ESR_ELx_CM | ESR_ELx_WNR | ESR_ELx_FSC);
338}
339
340static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
341{
342	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SSE);
343}
344
345static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu)
346{
347	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SF);
348}
349
350static __always_inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
351{
352	return (kvm_vcpu_get_esr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
353}
354
355static __always_inline bool kvm_vcpu_abt_iss1tw(const struct kvm_vcpu *vcpu)
356{
357	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_S1PTW);
358}
359
360/* Always check for S1PTW *before* using this. */
361static __always_inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
362{
363	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_WNR;
364}
365
366static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
367{
368	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_CM);
369}
370
371static __always_inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
372{
373	return 1 << ((kvm_vcpu_get_esr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
374}
375
376/* This one is not specific to Data Abort */
377static __always_inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
378{
379	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_IL);
380}
381
382static __always_inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
383{
384	return ESR_ELx_EC(kvm_vcpu_get_esr(vcpu));
385}
386
387static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
388{
389	return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
390}
391
392static inline bool kvm_vcpu_trap_is_exec_fault(const struct kvm_vcpu *vcpu)
393{
394	return kvm_vcpu_trap_is_iabt(vcpu) && !kvm_vcpu_abt_iss1tw(vcpu);
395}
396
397static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
398{
399	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC;
400}
401
402static __always_inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
403{
404	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC_TYPE;
405}
406
407static __always_inline u8 kvm_vcpu_trap_get_fault_level(const struct kvm_vcpu *vcpu)
408{
409	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC_LEVEL;
410}
411
412static __always_inline bool kvm_vcpu_abt_issea(const struct kvm_vcpu *vcpu)
413{
414	switch (kvm_vcpu_trap_get_fault(vcpu)) {
415	case ESR_ELx_FSC_EXTABT:
416	case ESR_ELx_FSC_SEA_TTW0:
417	case ESR_ELx_FSC_SEA_TTW1:
418	case ESR_ELx_FSC_SEA_TTW2:
419	case ESR_ELx_FSC_SEA_TTW3:
420	case ESR_ELx_FSC_SECC:
421	case ESR_ELx_FSC_SECC_TTW0:
422	case ESR_ELx_FSC_SECC_TTW1:
423	case ESR_ELx_FSC_SECC_TTW2:
424	case ESR_ELx_FSC_SECC_TTW3:
425		return true;
426	default:
427		return false;
428	}
429}
430
431static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
432{
433	u64 esr = kvm_vcpu_get_esr(vcpu);
434	return ESR_ELx_SYS64_ISS_RT(esr);
435}
436
437static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
438{
439	if (kvm_vcpu_abt_iss1tw(vcpu)) {
440		/*
441		 * Only a permission fault on a S1PTW should be
442		 * considered as a write. Otherwise, page tables baked
443		 * in a read-only memslot will result in an exception
444		 * being delivered in the guest.
445		 *
446		 * The drawback is that we end-up faulting twice if the
447		 * guest is using any of HW AF/DB: a translation fault
448		 * to map the page containing the PT (read only at
449		 * first), then a permission fault to allow the flags
450		 * to be set.
451		 */
452		switch (kvm_vcpu_trap_get_fault_type(vcpu)) {
453		case ESR_ELx_FSC_PERM:
454			return true;
455		default:
456			return false;
457		}
458	}
459
460	if (kvm_vcpu_trap_is_iabt(vcpu))
461		return false;
462
463	return kvm_vcpu_dabt_iswrite(vcpu);
464}
465
466static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
467{
468	return vcpu_read_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
469}
470
471static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
472{
473	if (vcpu_mode_is_32bit(vcpu)) {
474		*vcpu_cpsr(vcpu) |= PSR_AA32_E_BIT;
475	} else {
476		u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
477		sctlr |= SCTLR_ELx_EE;
478		vcpu_write_sys_reg(vcpu, sctlr, SCTLR_EL1);
479	}
480}
481
482static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
483{
484	if (vcpu_mode_is_32bit(vcpu))
485		return !!(*vcpu_cpsr(vcpu) & PSR_AA32_E_BIT);
486
487	if (vcpu_mode_priv(vcpu))
488		return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & SCTLR_ELx_EE);
489	else
490		return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & SCTLR_EL1_E0E);
491}
492
493static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
494						    unsigned long data,
495						    unsigned int len)
496{
497	if (kvm_vcpu_is_be(vcpu)) {
498		switch (len) {
499		case 1:
500			return data & 0xff;
501		case 2:
502			return be16_to_cpu(data & 0xffff);
503		case 4:
504			return be32_to_cpu(data & 0xffffffff);
505		default:
506			return be64_to_cpu(data);
507		}
508	} else {
509		switch (len) {
510		case 1:
511			return data & 0xff;
512		case 2:
513			return le16_to_cpu(data & 0xffff);
514		case 4:
515			return le32_to_cpu(data & 0xffffffff);
516		default:
517			return le64_to_cpu(data);
518		}
519	}
520
521	return data;		/* Leave LE untouched */
522}
523
524static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
525						    unsigned long data,
526						    unsigned int len)
527{
528	if (kvm_vcpu_is_be(vcpu)) {
529		switch (len) {
530		case 1:
531			return data & 0xff;
532		case 2:
533			return cpu_to_be16(data & 0xffff);
534		case 4:
535			return cpu_to_be32(data & 0xffffffff);
536		default:
537			return cpu_to_be64(data);
538		}
539	} else {
540		switch (len) {
541		case 1:
542			return data & 0xff;
543		case 2:
544			return cpu_to_le16(data & 0xffff);
545		case 4:
546			return cpu_to_le32(data & 0xffffffff);
547		default:
548			return cpu_to_le64(data);
549		}
550	}
551
552	return data;		/* Leave LE untouched */
553}
554
555static __always_inline void kvm_incr_pc(struct kvm_vcpu *vcpu)
556{
557	WARN_ON(vcpu_get_flag(vcpu, PENDING_EXCEPTION));
558	vcpu_set_flag(vcpu, INCREMENT_PC);
559}
560
561#define kvm_pend_exception(v, e)					\
562	do {								\
563		WARN_ON(vcpu_get_flag((v), INCREMENT_PC));		\
564		vcpu_set_flag((v), PENDING_EXCEPTION);			\
565		vcpu_set_flag((v), e);					\
566	} while (0)
567
568
569static inline bool vcpu_has_feature(struct kvm_vcpu *vcpu, int feature)
570{
571	return test_bit(feature, vcpu->arch.features);
572}
573
574static __always_inline void kvm_write_cptr_el2(u64 val)
575{
576	if (has_vhe() || has_hvhe())
577		write_sysreg(val, cpacr_el1);
578	else
579		write_sysreg(val, cptr_el2);
580}
581
582static __always_inline u64 kvm_get_reset_cptr_el2(struct kvm_vcpu *vcpu)
583{
584	u64 val;
585
586	if (has_vhe()) {
587		val = (CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN |
588		       CPACR_EL1_ZEN_EL1EN);
589		if (cpus_have_final_cap(ARM64_SME))
590			val |= CPACR_EL1_SMEN_EL1EN;
591	} else if (has_hvhe()) {
592		val = (CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN);
593
594		if (!vcpu_has_sve(vcpu) ||
595		    (vcpu->arch.fp_state != FP_STATE_GUEST_OWNED))
596			val |= CPACR_EL1_ZEN_EL1EN | CPACR_EL1_ZEN_EL0EN;
597		if (cpus_have_final_cap(ARM64_SME))
598			val |= CPACR_EL1_SMEN_EL1EN | CPACR_EL1_SMEN_EL0EN;
599	} else {
600		val = CPTR_NVHE_EL2_RES1;
601
602		if (vcpu_has_sve(vcpu) &&
603		    (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED))
604			val |= CPTR_EL2_TZ;
605		if (cpus_have_final_cap(ARM64_SME))
606			val &= ~CPTR_EL2_TSM;
607	}
608
609	return val;
610}
611
612static __always_inline void kvm_reset_cptr_el2(struct kvm_vcpu *vcpu)
613{
614	u64 val = kvm_get_reset_cptr_el2(vcpu);
615
616	kvm_write_cptr_el2(val);
617}
618#endif /* __ARM64_KVM_EMULATE_H__ */
619