xref: /kernel/linux/linux-5.10/arch/arm64/kvm/regmap.c (revision 8c2ecf20)
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 *
6 * Derived from arch/arm/kvm/emulate.c:
7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
9 */
10
11#include <linux/mm.h>
12#include <linux/kvm_host.h>
13#include <asm/kvm_emulate.h>
14#include <asm/ptrace.h>
15
16#define VCPU_NR_MODES 6
17#define REG_OFFSET(_reg) \
18	(offsetof(struct user_pt_regs, _reg) / sizeof(unsigned long))
19
20#define USR_REG_OFFSET(R) REG_OFFSET(compat_usr(R))
21
22static const unsigned long vcpu_reg_offsets[VCPU_NR_MODES][16] = {
23	/* USR Registers */
24	{
25		USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
26		USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
27		USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
28		USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
29		USR_REG_OFFSET(12), USR_REG_OFFSET(13),	USR_REG_OFFSET(14),
30		REG_OFFSET(pc)
31	},
32
33	/* FIQ Registers */
34	{
35		USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
36		USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
37		USR_REG_OFFSET(6), USR_REG_OFFSET(7),
38		REG_OFFSET(compat_r8_fiq),  /* r8 */
39		REG_OFFSET(compat_r9_fiq),  /* r9 */
40		REG_OFFSET(compat_r10_fiq), /* r10 */
41		REG_OFFSET(compat_r11_fiq), /* r11 */
42		REG_OFFSET(compat_r12_fiq), /* r12 */
43		REG_OFFSET(compat_sp_fiq),  /* r13 */
44		REG_OFFSET(compat_lr_fiq),  /* r14 */
45		REG_OFFSET(pc)
46	},
47
48	/* IRQ Registers */
49	{
50		USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
51		USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
52		USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
53		USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
54		USR_REG_OFFSET(12),
55		REG_OFFSET(compat_sp_irq), /* r13 */
56		REG_OFFSET(compat_lr_irq), /* r14 */
57		REG_OFFSET(pc)
58	},
59
60	/* SVC Registers */
61	{
62		USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
63		USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
64		USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
65		USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
66		USR_REG_OFFSET(12),
67		REG_OFFSET(compat_sp_svc), /* r13 */
68		REG_OFFSET(compat_lr_svc), /* r14 */
69		REG_OFFSET(pc)
70	},
71
72	/* ABT Registers */
73	{
74		USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
75		USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
76		USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
77		USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
78		USR_REG_OFFSET(12),
79		REG_OFFSET(compat_sp_abt), /* r13 */
80		REG_OFFSET(compat_lr_abt), /* r14 */
81		REG_OFFSET(pc)
82	},
83
84	/* UND Registers */
85	{
86		USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
87		USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
88		USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
89		USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
90		USR_REG_OFFSET(12),
91		REG_OFFSET(compat_sp_und), /* r13 */
92		REG_OFFSET(compat_lr_und), /* r14 */
93		REG_OFFSET(pc)
94	},
95};
96
97/*
98 * Return a pointer to the register number valid in the current mode of
99 * the virtual CPU.
100 */
101unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num)
102{
103	unsigned long *reg_array = (unsigned long *)&vcpu->arch.ctxt.regs;
104	unsigned long mode = *vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK;
105
106	switch (mode) {
107	case PSR_AA32_MODE_USR ... PSR_AA32_MODE_SVC:
108		mode &= ~PSR_MODE32_BIT; /* 0 ... 3 */
109		break;
110
111	case PSR_AA32_MODE_ABT:
112		mode = 4;
113		break;
114
115	case PSR_AA32_MODE_UND:
116		mode = 5;
117		break;
118
119	case PSR_AA32_MODE_SYS:
120		mode = 0;	/* SYS maps to USR */
121		break;
122
123	default:
124		BUG();
125	}
126
127	return reg_array + vcpu_reg_offsets[mode][reg_num];
128}
129
130/*
131 * Return the SPSR for the current mode of the virtual CPU.
132 */
133static int vcpu_spsr32_mode(const struct kvm_vcpu *vcpu)
134{
135	unsigned long mode = *vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK;
136	switch (mode) {
137	case PSR_AA32_MODE_SVC: return KVM_SPSR_SVC;
138	case PSR_AA32_MODE_ABT: return KVM_SPSR_ABT;
139	case PSR_AA32_MODE_UND: return KVM_SPSR_UND;
140	case PSR_AA32_MODE_IRQ: return KVM_SPSR_IRQ;
141	case PSR_AA32_MODE_FIQ: return KVM_SPSR_FIQ;
142	default: BUG();
143	}
144}
145
146unsigned long vcpu_read_spsr32(const struct kvm_vcpu *vcpu)
147{
148	int spsr_idx = vcpu_spsr32_mode(vcpu);
149
150	if (!vcpu->arch.sysregs_loaded_on_cpu) {
151		switch (spsr_idx) {
152		case KVM_SPSR_SVC:
153			return __vcpu_sys_reg(vcpu, SPSR_EL1);
154		case KVM_SPSR_ABT:
155			return vcpu->arch.ctxt.spsr_abt;
156		case KVM_SPSR_UND:
157			return vcpu->arch.ctxt.spsr_und;
158		case KVM_SPSR_IRQ:
159			return vcpu->arch.ctxt.spsr_irq;
160		case KVM_SPSR_FIQ:
161			return vcpu->arch.ctxt.spsr_fiq;
162		}
163	}
164
165	switch (spsr_idx) {
166	case KVM_SPSR_SVC:
167		return read_sysreg_el1(SYS_SPSR);
168	case KVM_SPSR_ABT:
169		return read_sysreg(spsr_abt);
170	case KVM_SPSR_UND:
171		return read_sysreg(spsr_und);
172	case KVM_SPSR_IRQ:
173		return read_sysreg(spsr_irq);
174	case KVM_SPSR_FIQ:
175		return read_sysreg(spsr_fiq);
176	default:
177		BUG();
178	}
179}
180
181void vcpu_write_spsr32(struct kvm_vcpu *vcpu, unsigned long v)
182{
183	int spsr_idx = vcpu_spsr32_mode(vcpu);
184
185	if (!vcpu->arch.sysregs_loaded_on_cpu) {
186		switch (spsr_idx) {
187		case KVM_SPSR_SVC:
188			__vcpu_sys_reg(vcpu, SPSR_EL1) = v;
189			break;
190		case KVM_SPSR_ABT:
191			vcpu->arch.ctxt.spsr_abt = v;
192			break;
193		case KVM_SPSR_UND:
194			vcpu->arch.ctxt.spsr_und = v;
195			break;
196		case KVM_SPSR_IRQ:
197			vcpu->arch.ctxt.spsr_irq = v;
198			break;
199		case KVM_SPSR_FIQ:
200			vcpu->arch.ctxt.spsr_fiq = v;
201			break;
202		}
203
204		return;
205	}
206
207	switch (spsr_idx) {
208	case KVM_SPSR_SVC:
209		write_sysreg_el1(v, SYS_SPSR);
210		break;
211	case KVM_SPSR_ABT:
212		write_sysreg(v, spsr_abt);
213		break;
214	case KVM_SPSR_UND:
215		write_sysreg(v, spsr_und);
216		break;
217	case KVM_SPSR_IRQ:
218		write_sysreg(v, spsr_irq);
219		break;
220	case KVM_SPSR_FIQ:
221		write_sysreg(v, spsr_fiq);
222		break;
223	}
224}
225