1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
4 */
5
6 #include <linux/kvm_host.h>
7 #include <asm/inst.h>
8 #include <asm/numa.h>
9 #include "kvmcpu.h"
10 #include "intc/ls3a_ipi.h"
11 #include "intc/ls3a_ext_irq.h"
12 #include "ls_irq.h"
13 #include "kvm_compat.h"
14 #include "kvmcsr.h"
15
16 #define CASE_READ_SW_GCSR(csr, regid, csrid) \
17 do { \
18 if (regid == csrid) { \
19 return kvm_read_sw_gcsr(csr, csrid); \
20 } \
21 } while (0)
22
_kvm_emu_read_csr(struct kvm_vcpu *vcpu, int csrid)23 unsigned long _kvm_emu_read_csr(struct kvm_vcpu *vcpu, int csrid)
24 {
25 struct loongarch_csrs *csr = vcpu->arch.csr;
26 unsigned long val = 0;
27
28 CASE_READ_SW_GCSR(csr, csrid, KVM_CSR_MERRCTL);
29 CASE_READ_SW_GCSR(csr, csrid, KVM_CSR_MERRINFO1);
30 CASE_READ_SW_GCSR(csr, csrid, KVM_CSR_MERRINFO2);
31 CASE_READ_SW_GCSR(csr, csrid, KVM_CSR_MERRENTRY);
32 CASE_READ_SW_GCSR(csr, csrid, KVM_CSR_MERRERA);
33 CASE_READ_SW_GCSR(csr, csrid, KVM_CSR_MERRSAVE);
34 /* read sw csr when not config pmu to guest */
35 CASE_READ_SW_GCSR(csr, csrid, KVM_CSR_PERFCTRL0);
36 CASE_READ_SW_GCSR(csr, csrid, KVM_CSR_PERFCTRL1);
37 CASE_READ_SW_GCSR(csr, csrid, KVM_CSR_PERFCTRL2);
38 CASE_READ_SW_GCSR(csr, csrid, KVM_CSR_PERFCTRL3);
39 CASE_READ_SW_GCSR(csr, csrid, KVM_CSR_PERFCNTR0);
40 CASE_READ_SW_GCSR(csr, csrid, KVM_CSR_PERFCNTR1);
41 CASE_READ_SW_GCSR(csr, csrid, KVM_CSR_PERFCNTR2);
42 CASE_READ_SW_GCSR(csr, csrid, KVM_CSR_PERFCNTR3);
43
44 val = 0;
45 if (csrid < 4096)
46 val = kvm_read_sw_gcsr(csr, csrid);
47 else
48 pr_warn_once("Unsupport csrread 0x%x with pc %lx\n",
49 csrid, vcpu->arch.pc);
50
51 return val;
52 }
53
54 #define CASE_WRITE_SW_GCSR(csr, regid, csrid, val) \
55 do { \
56 if (regid == csrid) { \
57 kvm_write_sw_gcsr(csr, csrid, val); \
58 return ; \
59 } \
60 } while (0)
61
_kvm_emu_write_csr(struct kvm_vcpu *vcpu, int csrid, unsigned long val)62 void _kvm_emu_write_csr(struct kvm_vcpu *vcpu, int csrid,
63 unsigned long val)
64 {
65 struct loongarch_csrs *csr = vcpu->arch.csr;
66
67 CASE_WRITE_SW_GCSR(csr, csrid, KVM_CSR_MERRCTL, val);
68 CASE_WRITE_SW_GCSR(csr, csrid, KVM_CSR_MERRINFO1, val);
69 CASE_WRITE_SW_GCSR(csr, csrid, KVM_CSR_MERRINFO2, val);
70 CASE_WRITE_SW_GCSR(csr, csrid, KVM_CSR_MERRENTRY, val);
71 CASE_WRITE_SW_GCSR(csr, csrid, KVM_CSR_MERRERA, val);
72 CASE_WRITE_SW_GCSR(csr, csrid, KVM_CSR_MERRSAVE, val);
73
74 /* give pmu register to guest when config perfctrl */
75 CASE_WRITE_HW_PMU(vcpu, csr, csrid, KVM_CSR_PERFCTRL0, val);
76 CASE_WRITE_HW_PMU(vcpu, csr, csrid, KVM_CSR_PERFCTRL1, val);
77 CASE_WRITE_HW_PMU(vcpu, csr, csrid, KVM_CSR_PERFCTRL2, val);
78 CASE_WRITE_HW_PMU(vcpu, csr, csrid, KVM_CSR_PERFCTRL3, val);
79 /* write sw pmu csr if not config ctrl */
80 CASE_WRITE_SW_GCSR(csr, csrid, KVM_CSR_PERFCNTR0, val);
81 CASE_WRITE_SW_GCSR(csr, csrid, KVM_CSR_PERFCNTR1, val);
82 CASE_WRITE_SW_GCSR(csr, csrid, KVM_CSR_PERFCNTR2, val);
83 CASE_WRITE_SW_GCSR(csr, csrid, KVM_CSR_PERFCNTR3, val);
84
85
86 if (csrid < 4096)
87 kvm_write_sw_gcsr(csr, csrid, val);
88 else
89 pr_warn_once("Unsupport csrwrite 0x%x with pc %lx\n",
90 csrid, vcpu->arch.pc);
91 }
92
93 #define CASE_CHANGE_SW_GCSR(csr, regid, csrid, mask, val) \
94 do { \
95 if (regid == csrid) { \
96 kvm_change_sw_gcsr(csr, csrid, mask, val); \
97 return ; \
98 } \
99 } while (0)
100
_kvm_emu_xchg_csr(struct kvm_vcpu *vcpu, int csrid, unsigned long csr_mask, unsigned long val)101 void _kvm_emu_xchg_csr(struct kvm_vcpu *vcpu, int csrid,
102 unsigned long csr_mask, unsigned long val)
103 {
104 struct loongarch_csrs *csr = vcpu->arch.csr;
105
106 CASE_CHANGE_SW_GCSR(csr, csrid, KVM_CSR_IMPCTL1, csr_mask, val);
107 CASE_CHANGE_SW_GCSR(csr, csrid, KVM_CSR_MERRCTL, csr_mask, val);
108 CASE_CHANGE_SW_GCSR(csr, csrid, KVM_CSR_MERRINFO1, csr_mask, val);
109 CASE_CHANGE_SW_GCSR(csr, csrid, KVM_CSR_MERRINFO2, csr_mask, val);
110 CASE_CHANGE_SW_GCSR(csr, csrid, KVM_CSR_MERRENTRY, csr_mask, val);
111 CASE_CHANGE_SW_GCSR(csr, csrid, KVM_CSR_MERRERA, csr_mask, val);
112 CASE_CHANGE_SW_GCSR(csr, csrid, KVM_CSR_MERRSAVE, csr_mask, val);
113
114 if (csrid < 4096) {
115 unsigned long orig;
116
117 orig = kvm_read_sw_gcsr(csr, csrid);
118 orig &= ~csr_mask;
119 orig |= val & csr_mask;
120 kvm_write_sw_gcsr(csr, csrid, orig);
121 }
122 pr_warn_once("Unsupport csrxchg 0x%x with pc %lx\n",
123 csrid, vcpu->arch.pc);
124 }
125
_kvm_getcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 *v, int force)126 int _kvm_getcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 *v, int force)
127 {
128 struct loongarch_csrs *csr = vcpu->arch.csr;
129
130 GET_HW_GCSR(id, KVM_CSR_CRMD, v);
131 GET_HW_GCSR(id, KVM_CSR_PRMD, v);
132 GET_HW_GCSR(id, KVM_CSR_EUEN, v);
133 GET_HW_GCSR(id, KVM_CSR_MISC, v);
134 GET_HW_GCSR(id, KVM_CSR_ECFG, v);
135 GET_HW_GCSR(id, KVM_CSR_ESTAT, v);
136 GET_HW_GCSR(id, KVM_CSR_ERA, v);
137 GET_HW_GCSR(id, KVM_CSR_BADV, v);
138 GET_HW_GCSR(id, KVM_CSR_BADI, v);
139 GET_HW_GCSR(id, KVM_CSR_EENTRY, v);
140 GET_HW_GCSR(id, KVM_CSR_TLBIDX, v);
141 GET_HW_GCSR(id, KVM_CSR_TLBEHI, v);
142 GET_HW_GCSR(id, KVM_CSR_TLBELO0, v);
143 GET_HW_GCSR(id, KVM_CSR_TLBELO1, v);
144 GET_HW_GCSR(id, KVM_CSR_ASID, v);
145 GET_HW_GCSR(id, KVM_CSR_PGDL, v);
146 GET_HW_GCSR(id, KVM_CSR_PGDH, v);
147 GET_HW_GCSR(id, KVM_CSR_PWCTL0, v);
148 GET_HW_GCSR(id, KVM_CSR_PWCTL1, v);
149 GET_HW_GCSR(id, KVM_CSR_STLBPGSIZE, v);
150 GET_HW_GCSR(id, KVM_CSR_RVACFG, v);
151 GET_HW_GCSR(id, KVM_CSR_CPUID, v);
152 GET_HW_GCSR(id, KVM_CSR_PRCFG1, v);
153 GET_HW_GCSR(id, KVM_CSR_PRCFG2, v);
154 GET_HW_GCSR(id, KVM_CSR_PRCFG3, v);
155 GET_HW_GCSR(id, KVM_CSR_KS0, v);
156 GET_HW_GCSR(id, KVM_CSR_KS1, v);
157 GET_HW_GCSR(id, KVM_CSR_KS2, v);
158 GET_HW_GCSR(id, KVM_CSR_KS3, v);
159 GET_HW_GCSR(id, KVM_CSR_KS4, v);
160 GET_HW_GCSR(id, KVM_CSR_KS5, v);
161 GET_HW_GCSR(id, KVM_CSR_KS6, v);
162 GET_HW_GCSR(id, KVM_CSR_KS7, v);
163 GET_HW_GCSR(id, KVM_CSR_TMID, v);
164 GET_HW_GCSR(id, KVM_CSR_TCFG, v);
165 GET_HW_GCSR(id, KVM_CSR_TVAL, v);
166 GET_HW_GCSR(id, KVM_CSR_CNTC, v);
167 GET_HW_GCSR(id, KVM_CSR_LLBCTL, v);
168 GET_HW_GCSR(id, KVM_CSR_TLBRENTRY, v);
169 GET_HW_GCSR(id, KVM_CSR_TLBRBADV, v);
170 GET_HW_GCSR(id, KVM_CSR_TLBRERA, v);
171 GET_HW_GCSR(id, KVM_CSR_TLBRSAVE, v);
172 GET_HW_GCSR(id, KVM_CSR_TLBRELO0, v);
173 GET_HW_GCSR(id, KVM_CSR_TLBRELO1, v);
174 GET_HW_GCSR(id, KVM_CSR_TLBREHI, v);
175 GET_HW_GCSR(id, KVM_CSR_TLBRPRMD, v);
176 GET_HW_GCSR(id, KVM_CSR_DMWIN0, v);
177 GET_HW_GCSR(id, KVM_CSR_DMWIN1, v);
178 GET_HW_GCSR(id, KVM_CSR_DMWIN2, v);
179 GET_HW_GCSR(id, KVM_CSR_DMWIN3, v);
180 GET_HW_GCSR(id, KVM_CSR_MWPS, v);
181 GET_HW_GCSR(id, KVM_CSR_FWPS, v);
182
183 GET_SW_GCSR(csr, id, KVM_CSR_IMPCTL1, v);
184 GET_SW_GCSR(csr, id, KVM_CSR_IMPCTL2, v);
185 GET_SW_GCSR(csr, id, KVM_CSR_MERRCTL, v);
186 GET_SW_GCSR(csr, id, KVM_CSR_MERRINFO1, v);
187 GET_SW_GCSR(csr, id, KVM_CSR_MERRINFO2, v);
188 GET_SW_GCSR(csr, id, KVM_CSR_MERRENTRY, v);
189 GET_SW_GCSR(csr, id, KVM_CSR_MERRERA, v);
190 GET_SW_GCSR(csr, id, KVM_CSR_MERRSAVE, v);
191 GET_SW_GCSR(csr, id, KVM_CSR_CTAG, v);
192 GET_SW_GCSR(csr, id, KVM_CSR_DEBUG, v);
193 GET_SW_GCSR(csr, id, KVM_CSR_DERA, v);
194 GET_SW_GCSR(csr, id, KVM_CSR_DESAVE, v);
195
196 GET_SW_GCSR(csr, id, KVM_CSR_TINTCLR, v);
197
198 if (force && (id < CSR_ALL_SIZE)) {
199 *v = kvm_read_sw_gcsr(csr, id);
200 return 0;
201 }
202
203 return -1;
204 }
205
_kvm_setcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 *v, int force)206 int _kvm_setcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 *v, int force)
207 {
208 struct loongarch_csrs *csr = vcpu->arch.csr;
209 int ret;
210
211 SET_HW_GCSR(csr, id, KVM_CSR_CRMD, v);
212 SET_HW_GCSR(csr, id, KVM_CSR_PRMD, v);
213 SET_HW_GCSR(csr, id, KVM_CSR_EUEN, v);
214 SET_HW_GCSR(csr, id, KVM_CSR_MISC, v);
215 SET_HW_GCSR(csr, id, KVM_CSR_ECFG, v);
216 SET_HW_GCSR(csr, id, KVM_CSR_ERA, v);
217 SET_HW_GCSR(csr, id, KVM_CSR_BADV, v);
218 SET_HW_GCSR(csr, id, KVM_CSR_BADI, v);
219 SET_HW_GCSR(csr, id, KVM_CSR_EENTRY, v);
220 SET_HW_GCSR(csr, id, KVM_CSR_TLBIDX, v);
221 SET_HW_GCSR(csr, id, KVM_CSR_TLBEHI, v);
222 SET_HW_GCSR(csr, id, KVM_CSR_TLBELO0, v);
223 SET_HW_GCSR(csr, id, KVM_CSR_TLBELO1, v);
224 SET_HW_GCSR(csr, id, KVM_CSR_ASID, v);
225 SET_HW_GCSR(csr, id, KVM_CSR_PGDL, v);
226 SET_HW_GCSR(csr, id, KVM_CSR_PGDH, v);
227 SET_HW_GCSR(csr, id, KVM_CSR_PWCTL0, v);
228 SET_HW_GCSR(csr, id, KVM_CSR_PWCTL1, v);
229 SET_HW_GCSR(csr, id, KVM_CSR_STLBPGSIZE, v);
230 SET_HW_GCSR(csr, id, KVM_CSR_RVACFG, v);
231 SET_HW_GCSR(csr, id, KVM_CSR_CPUID, v);
232 SET_HW_GCSR(csr, id, KVM_CSR_KS0, v);
233 SET_HW_GCSR(csr, id, KVM_CSR_KS1, v);
234 SET_HW_GCSR(csr, id, KVM_CSR_KS2, v);
235 SET_HW_GCSR(csr, id, KVM_CSR_KS3, v);
236 SET_HW_GCSR(csr, id, KVM_CSR_KS4, v);
237 SET_HW_GCSR(csr, id, KVM_CSR_KS5, v);
238 SET_HW_GCSR(csr, id, KVM_CSR_KS6, v);
239 SET_HW_GCSR(csr, id, KVM_CSR_KS7, v);
240 SET_HW_GCSR(csr, id, KVM_CSR_TMID, v);
241 SET_HW_GCSR(csr, id, KVM_CSR_TCFG, v);
242 SET_HW_GCSR(csr, id, KVM_CSR_TVAL, v);
243 SET_HW_GCSR(csr, id, KVM_CSR_CNTC, v);
244 SET_HW_GCSR(csr, id, KVM_CSR_LLBCTL, v);
245 SET_HW_GCSR(csr, id, KVM_CSR_TLBRENTRY, v);
246 SET_HW_GCSR(csr, id, KVM_CSR_TLBRBADV, v);
247 SET_HW_GCSR(csr, id, KVM_CSR_TLBRERA, v);
248 SET_HW_GCSR(csr, id, KVM_CSR_TLBRSAVE, v);
249 SET_HW_GCSR(csr, id, KVM_CSR_TLBRELO0, v);
250 SET_HW_GCSR(csr, id, KVM_CSR_TLBRELO1, v);
251 SET_HW_GCSR(csr, id, KVM_CSR_TLBREHI, v);
252 SET_HW_GCSR(csr, id, KVM_CSR_TLBRPRMD, v);
253 SET_HW_GCSR(csr, id, KVM_CSR_DMWIN0, v);
254 SET_HW_GCSR(csr, id, KVM_CSR_DMWIN1, v);
255 SET_HW_GCSR(csr, id, KVM_CSR_DMWIN2, v);
256 SET_HW_GCSR(csr, id, KVM_CSR_DMWIN3, v);
257 SET_HW_GCSR(csr, id, KVM_CSR_MWPS, v);
258 SET_HW_GCSR(csr, id, KVM_CSR_FWPS, v);
259
260 SET_SW_GCSR(csr, id, KVM_CSR_IMPCTL1, v);
261 SET_SW_GCSR(csr, id, KVM_CSR_IMPCTL2, v);
262 SET_SW_GCSR(csr, id, KVM_CSR_MERRCTL, v);
263 SET_SW_GCSR(csr, id, KVM_CSR_MERRINFO1, v);
264 SET_SW_GCSR(csr, id, KVM_CSR_MERRINFO2, v);
265 SET_SW_GCSR(csr, id, KVM_CSR_MERRENTRY, v);
266 SET_SW_GCSR(csr, id, KVM_CSR_MERRERA, v);
267 SET_SW_GCSR(csr, id, KVM_CSR_MERRSAVE, v);
268 SET_SW_GCSR(csr, id, KVM_CSR_CTAG, v);
269 SET_SW_GCSR(csr, id, KVM_CSR_DEBUG, v);
270 SET_SW_GCSR(csr, id, KVM_CSR_DERA, v);
271 SET_SW_GCSR(csr, id, KVM_CSR_DESAVE, v);
272 SET_SW_GCSR(csr, id, KVM_CSR_PRCFG1, v);
273 SET_SW_GCSR(csr, id, KVM_CSR_PRCFG2, v);
274 SET_SW_GCSR(csr, id, KVM_CSR_PRCFG3, v);
275
276 SET_SW_GCSR(csr, id, KVM_CSR_PGD, v);
277 SET_SW_GCSR(csr, id, KVM_CSR_TINTCLR, v);
278
279 ret = -1;
280 switch (id) {
281 case KVM_CSR_ESTAT:
282 kvm_write_gcsr_estat(*v);
283 /* estat IP0~IP7 inject through guestexcept */
284 kvm_write_csr_gintc(((*v) >> 2) & 0xff);
285 ret = 0;
286 break;
287 default:
288 if (force && (id < CSR_ALL_SIZE)) {
289 kvm_set_sw_gcsr(csr, id, *v);
290 ret = 0;
291 }
292 break;
293 }
294
295 return ret;
296 }
297
298 struct kvm_iocsr {
299 u32 start, end;
300 int (*get) (struct kvm_run *run, struct kvm_vcpu *vcpu, u32 addr, u64 *res);
301 int (*set) (struct kvm_run *run, struct kvm_vcpu *vcpu, u32 addr, u64 val);
302 };
303
_kvm_find_iocsr(struct kvm *kvm, u32 addr)304 static struct kvm_iocsr_entry *_kvm_find_iocsr(struct kvm *kvm, u32 addr)
305 {
306 int i = 0;
307
308 for (i = 0; i < IOCSR_MAX; i++) {
309 if (addr == kvm->arch.iocsr[i].addr)
310 return &kvm->arch.iocsr[i];
311 }
312
313 return NULL;
314 }
315
kvm_iocsr_common_get(struct kvm_run *run, struct kvm_vcpu *vcpu, u32 addr, u64 *res)316 static int kvm_iocsr_common_get(struct kvm_run *run, struct kvm_vcpu *vcpu,
317 u32 addr, u64 *res)
318 {
319 int r = EMULATE_FAIL;
320 struct kvm_iocsr_entry *entry;
321
322 spin_lock(&vcpu->kvm->arch.iocsr_lock);
323 entry = _kvm_find_iocsr(vcpu->kvm, addr);
324 if (entry) {
325 r = EMULATE_DONE;
326 *res = entry->data;
327 }
328 spin_unlock(&vcpu->kvm->arch.iocsr_lock);
329 return r;
330 }
331
kvm_iocsr_common_set(struct kvm_run *run, struct kvm_vcpu *vcpu, u32 addr, u64 val)332 static int kvm_iocsr_common_set(struct kvm_run *run, struct kvm_vcpu *vcpu,
333 u32 addr, u64 val)
334 {
335 int r = EMULATE_FAIL;
336 struct kvm_iocsr_entry *entry;
337
338 spin_lock(&vcpu->kvm->arch.iocsr_lock);
339 entry = _kvm_find_iocsr(vcpu->kvm, addr);
340 if (entry) {
341 r = EMULATE_DONE;
342 entry->data = val;
343 }
344 spin_unlock(&vcpu->kvm->arch.iocsr_lock);
345 return r;
346 }
347
kvm_misc_set(struct kvm_run *run, struct kvm_vcpu *vcpu, u32 addr, u64 val)348 static int kvm_misc_set(struct kvm_run *run, struct kvm_vcpu *vcpu, u32 addr,
349 u64 val)
350 {
351 return kvm_iocsr_common_set(run, vcpu, addr, val);
352 }
353
kvm_ipi_get(struct kvm_run *run, struct kvm_vcpu *vcpu, u32 addr, u64 *res)354 static int kvm_ipi_get(struct kvm_run *run, struct kvm_vcpu *vcpu, u32 addr,
355 u64 *res)
356 {
357 int ret;
358
359 ++vcpu->stat.rdcsr_ipi_access_exits;
360 run->mmio.phys_addr = KVM_IPI_REG_ADDRESS(vcpu->vcpu_id, (addr & 0xff));
361 ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
362 run->mmio.len, res);
363 if (ret) {
364 run->mmio.is_write = 0;
365 vcpu->mmio_needed = 1;
366 vcpu->mmio_is_write = 0;
367 return EMULATE_DO_MMIO;
368 }
369 return EMULATE_DONE;
370 }
371
kvm_extioi_isr_get(struct kvm_run *run, struct kvm_vcpu *vcpu, u32 addr, u64 *res)372 static int kvm_extioi_isr_get(struct kvm_run *run, struct kvm_vcpu *vcpu,
373 u32 addr, u64 *res)
374 {
375 int ret;
376
377 run->mmio.phys_addr = EXTIOI_PERCORE_ADDR(vcpu->vcpu_id, (addr & 0xff));
378 ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
379 run->mmio.len, res);
380 if (ret) {
381 run->mmio.is_write = 0;
382 vcpu->mmio_needed = 1;
383 vcpu->mmio_is_write = 0;
384 return EMULATE_FAIL;
385 }
386
387 return EMULATE_DONE;
388 }
389
kvm_ipi_set(struct kvm_run *run, struct kvm_vcpu *vcpu, u32 addr, u64 val)390 static int kvm_ipi_set(struct kvm_run *run, struct kvm_vcpu *vcpu, u32 addr,
391 u64 val)
392 {
393 int ret;
394
395 run->mmio.phys_addr = KVM_IPI_REG_ADDRESS(vcpu->vcpu_id, (addr & 0xff));
396 ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
397 run->mmio.len, &val);
398 if (ret < 0) {
399 run->mmio.is_write = 1;
400 vcpu->mmio_needed = 1;
401 vcpu->mmio_is_write = 1;
402 return EMULATE_DO_MMIO;
403 }
404
405 return EMULATE_DONE;
406 }
407
kvm_extioi_set(struct kvm_run *run, struct kvm_vcpu *vcpu, u32 addr, u64 val)408 static int kvm_extioi_set(struct kvm_run *run, struct kvm_vcpu *vcpu, u32 addr,
409 u64 val)
410 {
411 int ret;
412
413 if ((addr & 0x1f00) == KVM_IOCSR_EXTIOI_ISR_BASE) {
414 run->mmio.phys_addr = EXTIOI_PERCORE_ADDR(vcpu->vcpu_id, (addr & 0xff));
415 } else {
416 run->mmio.phys_addr = EXTIOI_ADDR((addr & 0x1fff));
417 }
418
419 ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
420 run->mmio.len, &val);
421 if (ret < 0) {
422 memcpy(run->mmio.data, &val, run->mmio.len);
423 run->mmio.is_write = 1;
424 vcpu->mmio_needed = 1;
425 vcpu->mmio_is_write = 1;
426 return EMULATE_DO_MMIO;
427 }
428
429 return EMULATE_DONE;
430 }
431
kvm_nop_set(struct kvm_run *run, struct kvm_vcpu *vcpu, u32 addr, u64 val)432 static int kvm_nop_set(struct kvm_run *run, struct kvm_vcpu *vcpu, u32 addr,
433 u64 val)
434 {
435 return EMULATE_DONE;
436 }
437
438 /* we put these iocsrs with access frequency, from high to low */
439 static struct kvm_iocsr kvm_iocsrs[] = {
440 /* extioi iocsr */
441 {KVM_IOCSR_EXTIOI_EN_BASE, KVM_IOCSR_EXTIOI_EN_BASE + 0x100,
442 NULL, kvm_extioi_set},
443 {KVM_IOCSR_EXTIOI_NODEMAP_BASE, KVM_IOCSR_EXTIOI_NODEMAP_BASE+0x28,
444 NULL, kvm_extioi_set},
445 {KVM_IOCSR_EXTIOI_ROUTE_BASE, KVM_IOCSR_EXTIOI_ROUTE_BASE + 0x100,
446 NULL, kvm_extioi_set},
447 {KVM_IOCSR_EXTIOI_ISR_BASE, KVM_IOCSR_EXTIOI_ISR_BASE + 0x1c,
448 kvm_extioi_isr_get, kvm_extioi_set},
449
450 {KVM_IOCSR_IPI_STATUS, KVM_IOCSR_IPI_STATUS + 0x40,
451 kvm_ipi_get, kvm_ipi_set},
452 {KVM_IOCSR_IPI_SEND, KVM_IOCSR_IPI_SEND + 0x1,
453 NULL, kvm_ipi_set},
454 {KVM_IOCSR_MBUF_SEND, KVM_IOCSR_MBUF_SEND + 0x1,
455 NULL, kvm_ipi_set},
456
457 {KVM_IOCSR_FEATURES, KVM_IOCSR_FEATURES + 0x1,
458 kvm_iocsr_common_get, kvm_nop_set},
459 {KVM_IOCSR_VENDOR, KVM_IOCSR_VENDOR + 0x1,
460 kvm_iocsr_common_get, kvm_nop_set},
461 {KVM_IOCSR_CPUNAME, KVM_IOCSR_CPUNAME + 0x1,
462 kvm_iocsr_common_get, kvm_nop_set},
463 {KVM_IOCSR_NODECNT, KVM_IOCSR_NODECNT + 0x1,
464 kvm_iocsr_common_get, kvm_nop_set},
465 {KVM_IOCSR_MISC_FUNC, KVM_IOCSR_MISC_FUNC + 0x1,
466 kvm_iocsr_common_get, kvm_misc_set},
467 };
468
_kvm_emu_iocsr_read(struct kvm_run *run, struct kvm_vcpu *vcpu, u32 addr, u64 *res)469 static int _kvm_emu_iocsr_read(struct kvm_run *run, struct kvm_vcpu *vcpu,
470 u32 addr, u64 *res)
471 {
472 enum emulation_result er = EMULATE_FAIL;
473 int i = 0;
474 struct kvm_iocsr *iocsr = NULL;
475
476 if (!irqchip_in_kernel(vcpu->kvm)) {
477 run->iocsr_io.len = run->mmio.len;
478 run->iocsr_io.phys_addr = addr;
479 run->iocsr_io.is_write = 0;
480 return EMULATE_DO_IOCSR;
481 }
482 for (i = 0; i < sizeof(kvm_iocsrs) / sizeof(struct kvm_iocsr); i++) {
483 iocsr = &kvm_iocsrs[i];
484 if (addr >= iocsr->start && addr < iocsr->end) {
485 if (iocsr->get)
486 er = iocsr->get(run, vcpu, addr, res);
487 }
488 }
489
490 if (er != EMULATE_DONE)
491 kvm_debug("%s iocsr 0x%x not support in kvm\n", __func__, addr);
492
493 return er;
494 }
495
_kvm_emu_iocsr_write(struct kvm_run *run, struct kvm_vcpu *vcpu, u32 addr, u64 val)496 static int _kvm_emu_iocsr_write(struct kvm_run *run, struct kvm_vcpu *vcpu,
497 u32 addr, u64 val)
498 {
499 enum emulation_result er = EMULATE_FAIL;
500 int i = 0;
501 struct kvm_iocsr *iocsr = NULL;
502
503 if (!irqchip_in_kernel(vcpu->kvm)) {
504 run->iocsr_io.len = run->mmio.len;
505 memcpy(run->iocsr_io.data, &val, run->iocsr_io.len);
506 run->iocsr_io.phys_addr = addr;
507 run->iocsr_io.is_write = 1;
508 return EMULATE_DO_IOCSR;
509 }
510 for (i = 0; i < sizeof(kvm_iocsrs) / sizeof(struct kvm_iocsr); i++) {
511 iocsr = &kvm_iocsrs[i];
512 if (addr >= iocsr->start && addr < iocsr->end) {
513 if (iocsr->set)
514 er = iocsr->set(run, vcpu, addr, val);
515 }
516 }
517 if (er != EMULATE_DONE)
518 kvm_debug("%s iocsr 0x%x not support in kvm\n", __func__, addr);
519
520 return er;
521 }
522
523 /* all iocsr operation should in kvm, no mmio */
_kvm_emu_iocsr(larch_inst inst, struct kvm_run *run, struct kvm_vcpu *vcpu)524 int _kvm_emu_iocsr(larch_inst inst,
525 struct kvm_run *run, struct kvm_vcpu *vcpu)
526 {
527 u32 rd, rj, opcode;
528 u32 val;
529 u64 res = 0;
530 int ret;
531
532 /*
533 * Each IOCSR with different opcode
534 */
535 rd = inst.reg2_format.rd;
536 rj = inst.reg2_format.rj;
537 opcode = inst.reg2_format.opcode;
538 val = vcpu->arch.gprs[rj];
539 res = vcpu->arch.gprs[rd];
540 /* LoongArch is Little endian */
541 switch (opcode) {
542 case iocsrrdb_op:
543 run->mmio.len = 1;
544 ret = _kvm_emu_iocsr_read(run, vcpu, val, &res);
545 vcpu->arch.gprs[rd] = (u8) res;
546 break;
547 case iocsrrdh_op:
548 run->mmio.len = 2;
549 ret = _kvm_emu_iocsr_read(run, vcpu, val, &res);
550 vcpu->arch.gprs[rd] = (u16) res;
551 break;
552 case iocsrrdw_op:
553 run->mmio.len = 4;
554 ret = _kvm_emu_iocsr_read(run, vcpu, val, &res);
555 vcpu->arch.gprs[rd] = (u32) res;
556 break;
557 case iocsrrdd_op:
558 run->mmio.len = 8;
559 ret = _kvm_emu_iocsr_read(run, vcpu, val, &res);
560 vcpu->arch.gprs[rd] = res;
561 break;
562 case iocsrwrb_op:
563 run->mmio.len = 1;
564 ret = _kvm_emu_iocsr_write(run, vcpu, val, (u8)res);
565 break;
566 case iocsrwrh_op:
567 run->mmio.len = 2;
568 ret = _kvm_emu_iocsr_write(run, vcpu, val, (u16)res);
569 break;
570 case iocsrwrw_op:
571 run->mmio.len = 4;
572 ret = _kvm_emu_iocsr_write(run, vcpu, val, (u32)res);
573 break;
574 case iocsrwrd_op:
575 run->mmio.len = 8;
576 ret = _kvm_emu_iocsr_write(run, vcpu, val, res);
577 break;
578 default:
579 ret = EMULATE_FAIL;
580 break;
581 }
582
583 if (ret == EMULATE_DO_IOCSR) {
584 vcpu->arch.io_gpr = rd;
585 }
586
587 return ret;
588 }
589
_kvm_complete_iocsr_read(struct kvm_vcpu *vcpu, struct kvm_run *run)590 int _kvm_complete_iocsr_read(struct kvm_vcpu *vcpu, struct kvm_run *run)
591 {
592 unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
593 enum emulation_result er = EMULATE_DONE;
594
595 switch (run->iocsr_io.len) {
596 case 8:
597 *gpr = *(s64 *)run->iocsr_io.data;
598 break;
599 case 4:
600 *gpr = *(int *)run->iocsr_io.data;
601 break;
602 case 2:
603 *gpr = *(short *)run->iocsr_io.data;
604 break;
605 case 1:
606 *gpr = *(char *) run->iocsr_io.data;
607 break;
608 default:
609 kvm_err("Bad IOCSR length: %d,addr is 0x%lx",
610 run->iocsr_io.len, vcpu->arch.badv);
611 er = EMULATE_FAIL;
612 break;
613 }
614
615 return er;
616 }
617
_kvm_get_iocsr(struct kvm *kvm, struct kvm_iocsr_entry *__user argp)618 int _kvm_get_iocsr(struct kvm *kvm, struct kvm_iocsr_entry *__user argp)
619 {
620 struct kvm_iocsr_entry *entry, tmp;
621 int r = -EFAULT;
622
623 if (copy_from_user(&tmp, argp, sizeof(tmp)))
624 goto out;
625
626 spin_lock(&kvm->arch.iocsr_lock);
627 entry = _kvm_find_iocsr(kvm, tmp.addr);
628 if (entry != NULL)
629 tmp.data = entry->data;
630 spin_unlock(&kvm->arch.iocsr_lock);
631
632 if (entry)
633 r = copy_to_user(argp, &tmp, sizeof(tmp));
634
635 out:
636 return r;
637 }
638
_kvm_set_iocsr(struct kvm *kvm, struct kvm_iocsr_entry *__user argp)639 int _kvm_set_iocsr(struct kvm *kvm, struct kvm_iocsr_entry *__user argp)
640 {
641 struct kvm_iocsr_entry *entry, tmp;
642 int r = -EFAULT;
643
644 if (copy_from_user(&tmp, argp, sizeof(tmp)))
645 goto out;
646
647 spin_lock(&kvm->arch.iocsr_lock);
648 entry = _kvm_find_iocsr(kvm, tmp.addr);
649 if (entry != NULL) {
650 r = 0;
651 entry->data = tmp.data;
652 }
653 spin_unlock(&kvm->arch.iocsr_lock);
654
655 out:
656 return r;
657 }
658
659 static struct kvm_iocsr_entry iocsr_array[IOCSR_MAX] = {
660 {KVM_IOCSR_FEATURES, .data = KVM_IOCSRF_NODECNT|KVM_IOCSRF_MSI
661 |KVM_IOCSRF_EXTIOI|KVM_IOCSRF_CSRIPI|KVM_IOCSRF_VM},
662 {KVM_IOCSR_VENDOR, .data = 0x6e6f73676e6f6f4c}, /* Loongson */
663 {KVM_IOCSR_CPUNAME, .data = 0x303030354133}, /* 3A5000 */
664 {KVM_IOCSR_NODECNT, .data = 0x4},
665 {KVM_IOCSR_MISC_FUNC, .data = 0x0},
666 };
667
_kvm_init_iocsr(struct kvm *kvm)668 int _kvm_init_iocsr(struct kvm *kvm)
669 {
670 int i = 0;
671
672 spin_lock_init(&kvm->arch.iocsr_lock);
673 for (i = 0; i < IOCSR_MAX; i++) {
674 kvm->arch.iocsr[i].addr = iocsr_array[i].addr;
675 kvm->arch.iocsr[i].data = iocsr_array[i].data;
676 }
677 return 0;
678 }
679