1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2015 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 */
6
7#ifndef __ARM64_KVM_HYP_H__
8#define __ARM64_KVM_HYP_H__
9
10#include <linux/compiler.h>
11#include <linux/kvm_host.h>
12#include <asm/alternative.h>
13#include <asm/sysreg.h>
14
15DECLARE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
16DECLARE_PER_CPU(unsigned long, kvm_hyp_vector);
17
18#define read_sysreg_elx(r,nvh,vh)					\
19	({								\
20		u64 reg;						\
21		asm volatile(ALTERNATIVE(__mrs_s("%0", r##nvh),	\
22					 __mrs_s("%0", r##vh),		\
23					 ARM64_HAS_VIRT_HOST_EXTN)	\
24			     : "=r" (reg));				\
25		reg;							\
26	})
27
28#define write_sysreg_elx(v,r,nvh,vh)					\
29	do {								\
30		u64 __val = (u64)(v);					\
31		asm volatile(ALTERNATIVE(__msr_s(r##nvh, "%x0"),	\
32					 __msr_s(r##vh, "%x0"),		\
33					 ARM64_HAS_VIRT_HOST_EXTN)	\
34					 : : "rZ" (__val));		\
35	} while (0)
36
37/*
38 * Unified accessors for registers that have a different encoding
39 * between VHE and non-VHE. They must be specified without their "ELx"
40 * encoding, but with the SYS_ prefix, as defined in asm/sysreg.h.
41 */
42
43#define read_sysreg_el0(r)	read_sysreg_elx(r, _EL0, _EL02)
44#define write_sysreg_el0(v,r)	write_sysreg_elx(v, r, _EL0, _EL02)
45#define read_sysreg_el1(r)	read_sysreg_elx(r, _EL1, _EL12)
46#define write_sysreg_el1(v,r)	write_sysreg_elx(v, r, _EL1, _EL12)
47#define read_sysreg_el2(r)	read_sysreg_elx(r, _EL2, _EL1)
48#define write_sysreg_el2(v,r)	write_sysreg_elx(v, r, _EL2, _EL1)
49
50/*
51 * Without an __arch_swab32(), we fall back to ___constant_swab32(), but the
52 * static inline can allow the compiler to out-of-line this. KVM always wants
53 * the macro version as its always inlined.
54 */
55#define __kvm_swab32(x)	___constant_swab32(x)
56
57int __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu);
58
59void __vgic_v3_save_state(struct vgic_v3_cpu_if *cpu_if);
60void __vgic_v3_restore_state(struct vgic_v3_cpu_if *cpu_if);
61void __vgic_v3_activate_traps(struct vgic_v3_cpu_if *cpu_if);
62void __vgic_v3_deactivate_traps(struct vgic_v3_cpu_if *cpu_if);
63void __vgic_v3_save_aprs(struct vgic_v3_cpu_if *cpu_if);
64void __vgic_v3_restore_aprs(struct vgic_v3_cpu_if *cpu_if);
65int __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu);
66
67#ifdef __KVM_NVHE_HYPERVISOR__
68void __timer_enable_traps(struct kvm_vcpu *vcpu);
69void __timer_disable_traps(struct kvm_vcpu *vcpu);
70#endif
71
72#ifdef __KVM_NVHE_HYPERVISOR__
73void __sysreg_save_state_nvhe(struct kvm_cpu_context *ctxt);
74void __sysreg_restore_state_nvhe(struct kvm_cpu_context *ctxt);
75#else
76void sysreg_save_host_state_vhe(struct kvm_cpu_context *ctxt);
77void sysreg_restore_host_state_vhe(struct kvm_cpu_context *ctxt);
78void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt);
79void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt);
80#endif
81
82void __debug_switch_to_guest(struct kvm_vcpu *vcpu);
83void __debug_switch_to_host(struct kvm_vcpu *vcpu);
84
85#ifdef __KVM_NVHE_HYPERVISOR__
86void __debug_save_host_buffers_nvhe(struct kvm_vcpu *vcpu);
87void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu);
88#endif
89
90void __fpsimd_save_state(struct user_fpsimd_state *fp_regs);
91void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs);
92
93#ifndef __KVM_NVHE_HYPERVISOR__
94void activate_traps_vhe_load(struct kvm_vcpu *vcpu);
95void deactivate_traps_vhe_put(void);
96#endif
97
98u64 __guest_enter(struct kvm_vcpu *vcpu);
99
100void __noreturn hyp_panic(void);
101#ifdef __KVM_NVHE_HYPERVISOR__
102void __noreturn __hyp_do_panic(struct kvm_cpu_context *host_ctxt, u64 spsr,
103			       u64 elr, u64 par);
104#endif
105
106#endif /* __ARM64_KVM_HYP_H__ */
107
108