1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
4 */
5
6 #include <linux/random.h>
7 #include "kvmcpu.h"
8 #include "kvm_compat.h"
9 #include "ls3a_ipi.h"
10 #include "ls7a_irq.h"
11 #include "ls3a_ext_irq.h"
12
13 #define ls3a_ext_irq_lock(s, flags) spin_lock_irqsave(&s->lock, flags)
14 #define ls3a_ext_irq_unlock(s, flags) spin_unlock_irqrestore(&s->lock, flags)
15
16 extern int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
17 struct kvm_loongarch_interrupt *irq);
ext_deactive_core_isr(struct kvm *kvm, int irq_num, int vcpu_id)18 void ext_deactive_core_isr(struct kvm *kvm, int irq_num, int vcpu_id)
19 {
20 int ipnum;
21 unsigned long found1;
22 struct kvm_loongarch_interrupt irq;
23 struct ls3a_kvm_extirq *s = ls3a_ext_irqchip(kvm);
24 struct kvm_ls3a_extirq_state *state = &(s->ls3a_ext_irq);
25
26 ipnum = state->ext_sw_ipmap[irq_num];
27
28 bitmap_clear((void *)state->ext_isr.reg_u8, irq_num, 1);
29 bitmap_clear((void *)state->ext_core_isr.reg_u8[vcpu_id], irq_num, 1);
30
31 bitmap_clear((void *)state->ext_sw_ipisr[vcpu_id][ipnum + 2], irq_num, 1);
32 found1 = find_next_bit((void *)state->ext_sw_ipisr[vcpu_id][ipnum + 2], EXTIOI_IRQS, 0);
33 kvm_debug("vcpu_id %d irqnum %d found:0x%lx ipnum %d down\n", vcpu_id, irq_num, found1, ipnum);
34 if (found1 == EXTIOI_IRQS) {
35 irq.cpu = vcpu_id;
36 irq.irq = -(ipnum + 2); /* IP2~IP5 */
37 if (likely(kvm->vcpus[vcpu_id]))
38 kvm_vcpu_ioctl_interrupt(kvm->vcpus[vcpu_id], &irq);
39 kvm->stat.trigger_ls3a_ext_irq++;
40 }
41 }
42
43 /**
44 * ext_irq_update_core()
45 * @kvm: KVM structure pointer
46 * @irq_num: 0~256 ext irq num
47 * @level: 0~1 High and low level
48 *
49 * Route the status of the extended interrupt to the host CPU core.
50 *
51 */
ext_irq_update_core(struct kvm *kvm, int irq_num, int level)52 void ext_irq_update_core(struct kvm *kvm, int irq_num, int level)
53 {
54 int nrcpus, ipnum, vcpu_id;
55 unsigned long found1;
56 struct kvm_loongarch_interrupt irq;
57 struct ls3a_kvm_extirq *s = ls3a_ext_irqchip(kvm);
58 struct kvm_ls3a_extirq_state *state = &(s->ls3a_ext_irq);
59
60 nrcpus = atomic_read(&kvm->online_vcpus);
61 vcpu_id = state->ext_sw_coremap[irq_num];
62 ipnum = state->ext_sw_ipmap[irq_num];
63
64 if (vcpu_id > (nrcpus - 1)) {
65 vcpu_id = 0;
66 }
67
68 if (level == 1) {
69 if (test_bit(irq_num, (void *)state->ext_en.reg_u8) == false) {
70 return;
71 }
72 if (test_bit(irq_num, (void *)state->ext_isr.reg_u8) == false) {
73 return;
74 }
75 bitmap_set((void *)state->ext_core_isr.reg_u8[vcpu_id], irq_num, 1);
76
77 found1 = find_next_bit((void *)state->ext_sw_ipisr[vcpu_id][ipnum + 2], EXTIOI_IRQS, 0);
78 bitmap_set((void *)state->ext_sw_ipisr[vcpu_id][ipnum + 2], irq_num, 1);
79 kvm_debug("%s:%d --- vcpu_id %d irqnum %d found1 0x%lx ipnum %d\n",
80 __FUNCTION__, __LINE__, vcpu_id, irq_num, found1, ipnum);
81 if (found1 == EXTIOI_IRQS) {
82 irq.cpu = vcpu_id;
83 irq.irq = ipnum + 2; /* IP2~IP5 */
84 kvm_debug("%s:%d --- vcpu_id %d ipnum %d raise\n",
85 __FUNCTION__, __LINE__, vcpu_id, ipnum);
86 if (likely(kvm->vcpus[vcpu_id]))
87 kvm_vcpu_ioctl_interrupt(kvm->vcpus[vcpu_id], &irq);
88 kvm->stat.trigger_ls3a_ext_irq++;
89 }
90 } else {
91 bitmap_clear((void *)state->ext_isr.reg_u8, irq_num, 1);
92 bitmap_clear((void *)state->ext_core_isr.reg_u8[vcpu_id], irq_num, 1);
93
94 bitmap_clear((void *)state->ext_sw_ipisr[vcpu_id][ipnum + 2], irq_num, 1);
95 found1 = find_next_bit((void *)state->ext_sw_ipisr[vcpu_id][ipnum + 2], EXTIOI_IRQS, 0);
96 if (found1 == EXTIOI_IRQS) {
97 irq.cpu = vcpu_id;
98 irq.irq = -(ipnum + 2); /* IP2~IP5 */
99 if (likely(kvm->vcpus[vcpu_id]))
100 kvm_vcpu_ioctl_interrupt(kvm->vcpus[vcpu_id], &irq);
101 kvm->stat.trigger_ls3a_ext_irq++;
102 }
103
104 }
105 }
106
msi_irq_handler(struct kvm *kvm, int irq, int level)107 void msi_irq_handler(struct kvm *kvm, int irq, int level)
108 {
109 unsigned long flags;
110 struct ls3a_kvm_extirq *s = ls3a_ext_irqchip(kvm);
111 struct kvm_ls3a_extirq_state *state = &(s->ls3a_ext_irq);
112
113 kvm_debug("ext_irq_handler:irq = %d,level = %d\n", irq, level);
114
115 ls3a_ext_irq_lock(s, flags);
116 if (level == 1) {
117 if (test_bit(irq, (void *)&state->ext_isr))
118 goto out;
119 __set_bit(irq, (void *)&state->ext_isr);
120 } else {
121 if (!test_bit(irq, (void *)&state->ext_isr))
122 goto out;
123 __clear_bit(irq, (void *)&state->ext_isr);
124 }
125
126 ext_irq_update_core(kvm, irq, level);
127 out:
128 ls3a_ext_irq_unlock(s, flags);
129 }
130
ls3a_ext_intctl_readb(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, gpa_t addr, void *val)131 static int ls3a_ext_intctl_readb(struct kvm_vcpu *vcpu,
132 struct kvm_io_device *dev,
133 gpa_t addr, void *val)
134 {
135 uint64_t offset, reg_count;
136 struct ls3a_kvm_extirq *s = NULL;
137 struct kvm_ls3a_extirq_state *state = NULL;
138 int vcpu_id;
139
140 s = container_of(dev, struct ls3a_kvm_extirq, device);
141
142 state = &(s->ls3a_ext_irq);
143
144 offset = addr & 0xfffff;
145
146 if ((offset >= EXTIOI_ENABLE_START) && (offset < EXTIOI_ENABLE_END)) {
147 reg_count = (offset - EXTIOI_ENABLE_START);
148 *(uint8_t *)val = state->ext_en.reg_u8[reg_count];
149 } else if ((offset >= EXTIOI_BOUNCE_START) && (offset < EXTIOI_BOUNCE_END)) {
150 reg_count = (offset - EXTIOI_BOUNCE_START);
151 *(uint8_t *)val = state->bounce.reg_u8[reg_count];
152 } else if ((offset >= EXTIOI_ISR_START) && (offset < EXTIOI_ISR_END)) {
153 reg_count = (offset - EXTIOI_ISR_START);
154 *(uint8_t *)val = state->ext_isr.reg_u8[reg_count];
155 } else if ((offset >= EXTIOI_COREISR_START) && (offset < EXTIOI_COREISR_END)) {
156 /* percpu(32 bytes) coreisr reg_count is 0~31 */
157 vcpu_id = (offset >> 8) & 0xff;
158 reg_count = offset & 0xff;
159 *(uint8_t *)val = state->ext_core_isr.reg_u8[vcpu_id][reg_count];
160 } else if ((offset >= EXTIOI_IPMAP_START) && (offset < EXTIOI_IPMAP_END)) {
161 reg_count = (offset - EXTIOI_IPMAP_START);
162 *(uint8_t *)val = state->ip_map.reg_u8[reg_count];
163 } else if ((offset >= EXTIOI_COREMAP_START) && (offset < EXTIOI_COREMAP_END)) {
164 reg_count = (offset - EXTIOI_COREMAP_START);
165 *(uint8_t *)val = state->core_map.reg_u8[reg_count];
166 } else if ((offset >= EXTIOI_NODETYPE_START) && (offset < EXTIOI_NODETYPE_END)) {
167 reg_count = (offset - EXTIOI_NODETYPE_START);
168 *(uint8_t *)val = state->node_type.reg_u8[reg_count];
169 }
170 kvm_debug("%s: addr=0x%llx,val=0x%x\n",
171 __FUNCTION__, addr, *(uint8_t *)val);
172 return 0;
173 }
174
ls3a_ext_intctl_readw(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, gpa_t addr, void *val)175 static int ls3a_ext_intctl_readw(struct kvm_vcpu *vcpu,
176 struct kvm_io_device *dev,
177 gpa_t addr, void *val)
178 {
179 uint64_t offset, reg_count;
180 struct ls3a_kvm_extirq *s = NULL;
181 struct kvm_ls3a_extirq_state *state = NULL;
182 int vcpu_id;
183
184 s = container_of(dev, struct ls3a_kvm_extirq, device);
185
186 state = &(s->ls3a_ext_irq);
187
188 offset = addr & 0xfffff;
189
190 if ((offset >= EXTIOI_ENABLE_START) && (offset < EXTIOI_ENABLE_END)) {
191 reg_count = (offset - EXTIOI_ENABLE_START) / 4;
192 *(uint32_t *)val = state->ext_en.reg_u32[reg_count];
193 } else if ((offset >= EXTIOI_BOUNCE_START) && (offset < EXTIOI_BOUNCE_END)) {
194 reg_count = (offset - EXTIOI_BOUNCE_START) / 4;
195 *(uint32_t *)val = state->bounce.reg_u32[reg_count];
196 } else if ((offset >= EXTIOI_ISR_START) && (offset < EXTIOI_ISR_END)) {
197 reg_count = (offset - EXTIOI_ISR_START) / 4;
198 *(uint32_t *)val = state->ext_isr.reg_u32[reg_count];
199 } else if ((offset >= EXTIOI_COREISR_START) && (offset < EXTIOI_COREISR_END)) {
200 /* percpu(32 bytes) coreisr reg_count is 0~7*/
201 vcpu_id = (offset >> 8) & 0xff;
202 reg_count = (offset & 0xff) / 4;
203 *(uint32_t *)val = state->ext_core_isr.reg_u32[vcpu_id][reg_count];
204 } else if ((offset >= EXTIOI_IPMAP_START) && (offset < EXTIOI_IPMAP_END)) {
205 reg_count = (offset - EXTIOI_IPMAP_START) / 4;
206 *(uint32_t *)val = state->ip_map.reg_u32[reg_count];
207 } else if ((offset >= EXTIOI_COREMAP_START) && (offset < EXTIOI_COREMAP_END)) {
208 reg_count = (offset - EXTIOI_COREMAP_START) / 4;
209 *(uint32_t *)val = state->core_map.reg_u32[reg_count];
210 } else if ((offset >= EXTIOI_NODETYPE_START) && (offset < EXTIOI_NODETYPE_END)) {
211 reg_count = (offset - EXTIOI_NODETYPE_START) / 4;
212 *(uint32_t *)val = state->node_type.reg_u32[reg_count];
213 }
214 kvm_debug("%s: addr=0x%llx,val=0x%x\n",
215 __FUNCTION__, addr, *(uint32_t *)val);
216
217 return 0;
218 }
219
ls3a_ext_intctl_readl(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, gpa_t addr, void *val)220 static int ls3a_ext_intctl_readl(struct kvm_vcpu *vcpu,
221 struct kvm_io_device *dev,
222 gpa_t addr, void *val)
223 {
224 uint64_t offset, reg_count;
225 struct ls3a_kvm_extirq *s = NULL;
226 struct kvm_ls3a_extirq_state *state = NULL;
227 int vcpu_id;
228
229 s = container_of(dev, struct ls3a_kvm_extirq, device);
230
231 state = &(s->ls3a_ext_irq);
232
233 offset = addr & 0xfffff;
234
235 if ((offset >= EXTIOI_ENABLE_START) && (offset < EXTIOI_ENABLE_END)) {
236 reg_count = (offset - EXTIOI_ENABLE_START) / 8;
237 *(uint64_t *)val = state->ext_en.reg_u64[reg_count];
238 } else if ((offset >= EXTIOI_BOUNCE_START) && (offset < EXTIOI_BOUNCE_END)) {
239 reg_count = (offset - EXTIOI_BOUNCE_START) / 8;
240 *(uint64_t *)val = state->bounce.reg_u64[reg_count];
241 } else if ((offset >= EXTIOI_ISR_START) && (offset < EXTIOI_ISR_END)) {
242 reg_count = (offset - EXTIOI_ISR_START) / 8;
243 *(uint64_t *)val = state->ext_isr.reg_u64[reg_count];
244 } else if ((offset >= EXTIOI_COREISR_START) && (offset < EXTIOI_COREISR_END)) {
245 /* percpu(32 bytes) coreisr reg_count is 0~3*/
246 vcpu_id = (offset >> 8) & 0xff;
247 reg_count = (offset & 0xff) / 8;
248
249 *(uint64_t *)val = state->ext_core_isr.reg_u64[vcpu_id][reg_count];
250 } else if ((offset >= EXTIOI_IPMAP_START) && (offset < EXTIOI_IPMAP_END)) {
251 *(uint64_t *)val = state->ip_map.reg_u64;
252 } else if ((offset >= EXTIOI_COREMAP_START) && (offset < EXTIOI_COREMAP_END)) {
253 reg_count = (offset - EXTIOI_COREMAP_START) / 8;
254 *(uint64_t *)val = state->core_map.reg_u64[reg_count];
255 } else if ((offset >= EXTIOI_NODETYPE_START) && (offset < EXTIOI_NODETYPE_END)) {
256 reg_count = (offset - EXTIOI_NODETYPE_START) / 8;
257 *(uint64_t *)val = state->node_type.reg_u64[reg_count];
258 }
259 kvm_debug("%s: addr=0x%llx,val=0x%llx\n",
260 __FUNCTION__, addr, *(uint64_t *)val);
261 return 0;
262 }
263 /**
264 * ls3a_ext_intctl_read()
265 * @kvm: KVM structure pointer
266 * @addr: Register address
267 * @size: The width of the register to be read.
268 * @val: The pointer to the read result.
269 *
270 * Analog extended interrupt related register read.
271 *
272 */
ls3a_ext_intctl_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, gpa_t addr, int size, void *val)273 static int ls3a_ext_intctl_read(struct kvm_vcpu *vcpu,
274 struct kvm_io_device *dev,
275 gpa_t addr, int size, void *val)
276 {
277 struct ls3a_kvm_extirq *s = NULL;
278 unsigned long flags;
279 uint64_t offset;
280
281 s = container_of(dev, struct ls3a_kvm_extirq, device);
282
283 offset = addr & 0xfffff;
284 if (offset & (size - 1)) {
285 printk("%s:unaligned address access %llx size %d\n",
286 __FUNCTION__, addr, size);
287 return 0;
288 }
289 addr = (addr & 0xfffff) - EXTIOI_ADDR_OFF;
290 ls3a_ext_irq_lock(s, flags);
291
292 switch (size) {
293 case 1:
294 ls3a_ext_intctl_readb(vcpu, dev, addr, val);
295 break;
296 case 4:
297 ls3a_ext_intctl_readw(vcpu, dev, addr, val);
298 break;
299 case 8:
300 ls3a_ext_intctl_readl(vcpu, dev, addr, val);
301 break;
302 default:
303 WARN_ONCE(1, "%s: Abnormal address access:addr 0x%llx, size %d\n",
304 __FUNCTION__, addr, size);
305 }
306 ls3a_ext_irq_unlock(s, flags);
307 kvm_debug("%s(%d):address access %llx size %d\n",
308 __FUNCTION__, __LINE__, offset, size);
309
310 return 0;
311 }
312
ls3a_ext_intctl_writeb(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, gpa_t addr, const void *__val)313 static int ls3a_ext_intctl_writeb(struct kvm_vcpu *vcpu,
314 struct kvm_io_device *dev,
315 gpa_t addr, const void *__val)
316 {
317 uint64_t offset, reg_count;
318 uint8_t val_data_u8, old_data_u8;
319 struct ls3a_kvm_extirq *s = NULL;
320 struct kvm_ls3a_extirq_state *state = NULL;
321 struct kvm *kvm = NULL;
322 int mask, level, i, irqnum, ipnum;
323 int vcpu_id;
324
325 unsigned long val = *(unsigned long *)__val;
326
327 s = container_of(dev, struct ls3a_kvm_extirq, device);
328
329 state = &(s->ls3a_ext_irq);
330 kvm = s->kvm;
331
332 offset = addr & 0xfffff;
333 val_data_u8 = val & 0xffUL;
334
335 kvm_debug("%s: addr=0x%llx,val=0x%lx\n", __FUNCTION__, addr, val);
336
337 if ((offset >= EXTIOI_ENABLE_START) && (offset < EXTIOI_ENABLE_END)) {
338 reg_count = (offset - EXTIOI_ENABLE_START);
339 old_data_u8 = state->ext_en.reg_u8[reg_count];
340 if (old_data_u8 != val_data_u8) {
341 state->ext_en.reg_u8[reg_count] = val_data_u8;
342 old_data_u8 = old_data_u8 ^ val_data_u8;
343 mask = 0x1;
344 for (i = 0; i < 8; i++) {
345 if (old_data_u8 & mask) {
346 level = !!(val_data_u8 & (0x1 << i));
347 if (level)
348 ext_irq_update_core(kvm, i + reg_count * 8, level);
349 }
350 mask = mask << 1;
351 }
352 }
353 } else if ((offset >= EXTIOI_BOUNCE_START) && (offset < EXTIOI_BOUNCE_END)) {
354 reg_count = (offset - EXTIOI_BOUNCE_START);
355 state->bounce.reg_u8[reg_count] = val_data_u8;
356 } else if ((offset >= EXTIOI_ISR_START) && (offset < EXTIOI_ISR_END)) {
357 /*can not be writen*/
358 reg_count = (offset - EXTIOI_ISR_START) & 0x1f;
359 old_data_u8 = state->ext_isr.reg_u8[reg_count];
360 state->ext_isr.reg_u8[reg_count] = old_data_u8 & (~val_data_u8);
361
362 mask = 0x1;
363 for (i = 0; i < 8; i++) {
364 if ((old_data_u8 & mask) && (val_data_u8 & mask)) {
365 ext_irq_update_core(kvm, i + reg_count * 8, 0);
366 }
367 mask = mask << 1;
368 }
369
370 } else if ((offset >= EXTIOI_COREISR_START) && (offset < EXTIOI_COREISR_END)) {
371 int bits;
372 /* percpu(32 bytes) coreisr reg_count is 0~31 */
373 vcpu_id = (offset >> 8) & 0xff;
374 reg_count = offset & 0xff;
375
376 state->ext_core_isr.reg_u8[vcpu_id][reg_count] &= ~val_data_u8;
377
378 bits = sizeof(val_data_u8) * 8;
379 i = find_first_bit((void *)&val_data_u8, bits);
380 while (i < bits) {
381 ext_deactive_core_isr(kvm, i + reg_count * bits, vcpu_id);
382 bitmap_clear((void *)&val_data_u8, i, 1);
383 i = find_first_bit((void *)&val_data_u8, bits);
384 }
385 } else if ((offset >= EXTIOI_IPMAP_START) && (offset < EXTIOI_IPMAP_END)) {
386 /*drop arch.core_ip_mask use state->ip_map*/
387 reg_count = (offset - EXTIOI_IPMAP_START);
388 state->ip_map.reg_u8[reg_count] = val_data_u8;
389
390 ipnum = 0;
391
392 for (i = 0; i < 4; i++) {
393 if (val_data_u8 & (0x1 << i)) {
394 ipnum = i;
395 break;
396 }
397 }
398
399 if (val_data_u8) {
400 for (i = 0; i < 32; i++) {
401 irqnum = reg_count * 32 + i;
402 state->ext_sw_ipmap[irqnum] = ipnum;
403 }
404 } else {
405 for (i = 0; i < 32; i++) {
406 irqnum = reg_count * 32 + i;
407 state->ext_sw_ipmap[irqnum] = 0;
408 }
409 }
410 } else if ((offset >= EXTIOI_COREMAP_START) && (offset < EXTIOI_COREMAP_END)) {
411 reg_count = (offset - EXTIOI_COREMAP_START);
412 state->core_map.reg_u8[reg_count] = val_data_u8;
413 state->ext_sw_coremap[reg_count] = val_data_u8;
414 } else if ((offset >= EXTIOI_NODETYPE_START) && (offset < EXTIOI_NODETYPE_END)) {
415 reg_count = (offset - EXTIOI_NODETYPE_START);
416 state->node_type.reg_u8[reg_count] = val_data_u8;
417 } else {
418 WARN_ONCE(1, "%s: Abnormal address access:addr 0x%llx\n",
419 __FUNCTION__, addr);
420 }
421
422 return 0;
423 }
424
ls3a_ext_intctl_writew(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, gpa_t addr, const void *__val)425 static int ls3a_ext_intctl_writew(struct kvm_vcpu *vcpu,
426 struct kvm_io_device *dev,
427 gpa_t addr, const void *__val)
428 {
429 uint64_t offset, reg_count;
430 uint32_t val_data_u32, old_data_u32, mask;
431 struct ls3a_kvm_extirq *s = NULL;
432 struct kvm_ls3a_extirq_state *state = NULL;
433 struct kvm *kvm = NULL;
434 uint8_t tmp_data_u8;
435 int i, level, vcpu_id;
436 unsigned long val;
437
438 val = *(unsigned long *)__val;
439
440 s = container_of(dev, struct ls3a_kvm_extirq, device);
441
442 state = &(s->ls3a_ext_irq);
443 kvm = s->kvm;
444
445 offset = addr & 0xfffff;
446 val_data_u32 = val & 0xffffffffUL;
447
448 kvm_debug("%s: addr=0x%llx,val=0x%lx\n", __FUNCTION__, addr, val);
449
450 if ((offset >= EXTIOI_ENABLE_START) && (offset < EXTIOI_ENABLE_END)) {
451 reg_count = (offset - EXTIOI_ENABLE_START) / 4;
452 old_data_u32 = state->ext_en.reg_u32[reg_count];
453 if (old_data_u32 != val_data_u32) {
454 state->ext_en.reg_u32[reg_count] = val_data_u32;
455 old_data_u32 = old_data_u32 ^ val_data_u32;
456
457 mask = 0x1;
458 for (i = 0; i < 8 * sizeof(old_data_u32); i++) {
459 if (old_data_u32 & mask) {
460 level = !!(val_data_u32 & (0x1 << i));
461 if (level)
462 ext_irq_update_core(kvm, i + reg_count * 32, level);
463 }
464 mask = mask << 1;
465 }
466 }
467 } else if ((offset >= EXTIOI_BOUNCE_START) && (offset < EXTIOI_BOUNCE_END)) {
468 reg_count = (offset - EXTIOI_BOUNCE_START) / 4;
469 state->bounce.reg_u32[reg_count] = val_data_u32;
470 } else if ((offset >= EXTIOI_ISR_START) && (offset < EXTIOI_ISR_END)) {
471 /*can not be writen*/
472 reg_count = (offset - EXTIOI_ISR_START) / 4;
473 old_data_u32 = state->ext_isr.reg_u32[reg_count];
474 state->ext_isr.reg_u32[reg_count] = old_data_u32 & (~val_data_u32);
475
476 mask = 0x1;
477 for (i = 0; i < 8 * sizeof(old_data_u32); i++) {
478 if ((old_data_u32 & mask) && (val_data_u32 & mask)) {
479 ext_irq_update_core(kvm, i + reg_count * 32, 0);
480 }
481 mask = mask << 1;
482 }
483 } else if ((offset >= EXTIOI_COREISR_START) && (offset < EXTIOI_COREISR_END)) {
484 int bits;
485 /* percpu(32 bytes) coreisr reg_count is 0~7*/
486 vcpu_id = (offset >> 8) & 0xff;
487 reg_count = (offset & 0xff) / 4;
488
489 /*ext_core_ioisr*/
490 state->ext_core_isr.reg_u32[vcpu_id][reg_count] &= ~val_data_u32;
491
492 bits = sizeof(val_data_u32) * 8;
493 i = find_first_bit((void *)&val_data_u32, bits);
494 while (i < bits) {
495 ext_deactive_core_isr(kvm, i + reg_count * bits, vcpu_id);
496 bitmap_clear((void *)&val_data_u32, i, 1);
497 i = find_first_bit((void *)&val_data_u32, bits);
498 }
499 } else if ((offset >= EXTIOI_IPMAP_START) && (offset < EXTIOI_IPMAP_END)) {
500 tmp_data_u8 = val_data_u32 & 0xff;
501 ls3a_ext_intctl_writeb(vcpu, dev, addr, &tmp_data_u8);
502 tmp_data_u8 = (val_data_u32 >> 8) & 0xff;
503 ls3a_ext_intctl_writeb(vcpu, dev, addr + 1, &tmp_data_u8);
504 tmp_data_u8 = (val_data_u32 >> 16) & 0xff;
505 ls3a_ext_intctl_writeb(vcpu, dev, addr + 2, &tmp_data_u8);
506 tmp_data_u8 = (val_data_u32 >> 24) & 0xff;
507 ls3a_ext_intctl_writeb(vcpu, dev, addr + 3, &tmp_data_u8);
508 } else if ((offset >= EXTIOI_COREMAP_START) && (offset < EXTIOI_COREMAP_END)) {
509 tmp_data_u8 = val_data_u32 & 0xff;
510 ls3a_ext_intctl_writeb(vcpu, dev, addr, &tmp_data_u8);
511 tmp_data_u8 = (val_data_u32 >> 8) & 0xff;
512 ls3a_ext_intctl_writeb(vcpu, dev, addr + 1, &tmp_data_u8);
513 tmp_data_u8 = (val_data_u32 >> 16) & 0xff;
514 ls3a_ext_intctl_writeb(vcpu, dev, addr + 2, &tmp_data_u8);
515 tmp_data_u8 = (val_data_u32 >> 24) & 0xff;
516 ls3a_ext_intctl_writeb(vcpu, dev, addr + 3, &tmp_data_u8);
517 kvm_debug("%s:id:%d addr=0x%llx, offset 0x%llx val 0x%x\n",
518 __FUNCTION__, vcpu->vcpu_id, addr, offset, val_data_u32);
519
520 } else if ((offset >= EXTIOI_NODETYPE_START) && (offset < EXTIOI_NODETYPE_END)) {
521 reg_count = (offset - EXTIOI_NODETYPE_START) / 4;
522 state->node_type.reg_u32[reg_count] = val_data_u32;
523 } else {
524 WARN_ONCE(1, "%s:%d Abnormal address access:addr 0x%llx\n",
525 __FUNCTION__, __LINE__, addr);
526 }
527
528 return 0;
529 }
530
ls3a_ext_intctl_writel(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, gpa_t addr, const void *__val)531 static int ls3a_ext_intctl_writel(struct kvm_vcpu *vcpu,
532 struct kvm_io_device *dev,
533 gpa_t addr, const void *__val)
534 {
535 uint64_t offset, val_data_u64, old_data_u64, reg_count, mask, i;
536 struct ls3a_kvm_extirq *s = NULL;
537 struct kvm_ls3a_extirq_state *state = NULL;
538 struct kvm *kvm = NULL;
539 uint8_t tmp_data_u8;
540 int level, vcpu_id;
541
542 unsigned long val = *(unsigned long *)__val;
543
544 s = container_of(dev, struct ls3a_kvm_extirq, device);
545
546 state = &(s->ls3a_ext_irq);
547 kvm = s->kvm;
548
549 offset = addr & 0xfffff;
550 val_data_u64 = val;
551
552 kvm_debug("%s: addr=0x%llx,val=0x%lx\n", __FUNCTION__, addr, val);
553
554 if ((offset >= EXTIOI_ENABLE_START) && (offset < EXTIOI_ENABLE_END)) {
555 reg_count = (offset - EXTIOI_ENABLE_START) / 8;
556 old_data_u64 = state->ext_en.reg_u64[reg_count];
557 if (old_data_u64 != val_data_u64) {
558 state->ext_en.reg_u64[reg_count] = val_data_u64;
559 old_data_u64 = old_data_u64 ^ val_data_u64;
560
561 mask = 0x1;
562 for (i = 0; i < 8 * sizeof(old_data_u64); i++) {
563 if (old_data_u64 & mask) {
564 level = !!(val_data_u64 & (0x1 << i));
565 if (level)
566 ext_irq_update_core(kvm, i + reg_count * 64, level);
567 }
568 mask = mask << 1;
569 }
570 }
571 } else if ((offset >= EXTIOI_BOUNCE_START) && (offset < EXTIOI_BOUNCE_END)) {
572 reg_count = (offset - EXTIOI_BOUNCE_START) / 8;
573 state->bounce.reg_u64[reg_count] = val_data_u64;
574 } else if ((offset >= EXTIOI_ISR_START) && (offset < EXTIOI_ISR_END)) {
575 /*can not be writen*/
576 reg_count = (offset - EXTIOI_ISR_START) / 8;
577 old_data_u64 = state->ext_isr.reg_u64[reg_count];
578 state->ext_isr.reg_u64[reg_count] = old_data_u64 & (~val_data_u64);
579
580 mask = 0x1;
581 for (i = 0; i < 8 * sizeof(old_data_u64); i++) {
582 if ((old_data_u64 & mask) && (val_data_u64 & mask)) {
583 ext_irq_update_core(kvm, i + reg_count * 64, 0);
584 }
585 mask = mask << 1;
586 }
587 } else if ((offset >= EXTIOI_COREISR_START) && (offset < EXTIOI_COREISR_END)) {
588 int bits;
589 vcpu_id = (offset >> 8) & 0xff;
590 reg_count = (offset & 0x1f) / 8;
591
592 /*core_ext_ioisr*/
593 state->ext_core_isr.reg_u64[vcpu_id][reg_count] &= ~val_data_u64;
594
595 bits = sizeof(val_data_u64) * 8;
596 i = find_first_bit((void *)&val_data_u64, bits);
597 while (i < bits) {
598 ext_deactive_core_isr(kvm, i + reg_count * bits, vcpu_id);
599 bitmap_clear((void *)&val_data_u64, i, 1);
600 i = find_first_bit((void *)&val_data_u64, bits);
601 }
602 } else if ((offset >= EXTIOI_IPMAP_START) && (offset < EXTIOI_IPMAP_END)) {
603 tmp_data_u8 = val_data_u64 & 0xff;
604 ls3a_ext_intctl_writeb(vcpu, dev, addr, &tmp_data_u8);
605 tmp_data_u8 = (val_data_u64 >> 8) & 0xff;
606 ls3a_ext_intctl_writeb(vcpu, dev, addr + 1, &tmp_data_u8);
607 tmp_data_u8 = (val_data_u64 >> 16) & 0xff;
608 ls3a_ext_intctl_writeb(vcpu, dev, addr + 2, &tmp_data_u8);
609 tmp_data_u8 = (val_data_u64 >> 24) & 0xff;
610 ls3a_ext_intctl_writeb(vcpu, dev, addr + 3, &tmp_data_u8);
611
612 tmp_data_u8 = (val_data_u64 >> 32) & 0xff;
613 ls3a_ext_intctl_writeb(vcpu, dev, addr + 4, &tmp_data_u8);
614 tmp_data_u8 = (val_data_u64 >> 40) & 0xff;
615 ls3a_ext_intctl_writeb(vcpu, dev, addr + 5, &tmp_data_u8);
616 tmp_data_u8 = (val_data_u64 >> 48) & 0xff;
617 ls3a_ext_intctl_writeb(vcpu, dev, addr + 6, &tmp_data_u8);
618 tmp_data_u8 = (val_data_u64 >> 56) & 0xff;
619 ls3a_ext_intctl_writeb(vcpu, dev, addr + 7, &tmp_data_u8);
620 } else if ((offset >= EXTIOI_COREMAP_START) && (offset < EXTIOI_COREMAP_END)) {
621 tmp_data_u8 = val_data_u64 & 0xff;
622 ls3a_ext_intctl_writeb(vcpu, dev, addr, &tmp_data_u8);
623 tmp_data_u8 = (val_data_u64 >> 8) & 0xff;
624 ls3a_ext_intctl_writeb(vcpu, dev, addr + 1, &tmp_data_u8);
625 tmp_data_u8 = (val_data_u64 >> 16) & 0xff;
626 ls3a_ext_intctl_writeb(vcpu, dev, addr + 2, &tmp_data_u8);
627 tmp_data_u8 = (val_data_u64 >> 24) & 0xff;
628 ls3a_ext_intctl_writeb(vcpu, dev, addr + 3, &tmp_data_u8);
629
630 tmp_data_u8 = (val_data_u64 >> 32) & 0xff;
631 ls3a_ext_intctl_writeb(vcpu, dev, addr + 4, &tmp_data_u8);
632 tmp_data_u8 = (val_data_u64 >> 40) & 0xff;
633 ls3a_ext_intctl_writeb(vcpu, dev, addr + 5, &tmp_data_u8);
634 tmp_data_u8 = (val_data_u64 >> 48) & 0xff;
635 ls3a_ext_intctl_writeb(vcpu, dev, addr + 6, &tmp_data_u8);
636 tmp_data_u8 = (val_data_u64 >> 56) & 0xff;
637 ls3a_ext_intctl_writeb(vcpu, dev, addr + 7, &tmp_data_u8);
638 } else if ((offset >= EXTIOI_NODETYPE_START) && (offset < EXTIOI_NODETYPE_END)) {
639 reg_count = (offset - EXTIOI_NODETYPE_START) / 8;
640 state->node_type.reg_u64[reg_count] = val_data_u64;
641 } else {
642 WARN_ONCE(1, "%s:%d Abnormal address access:addr 0x%llx\n",
643 __FUNCTION__, __LINE__, addr);
644 }
645 return 0;
646 }
647 /**
648 * ls3a_ext_intctl_write()
649 * @kvm: KVM structure pointer
650 * @addr: Register address
651 * @size: The width of the register to be writen.
652 * @val: Value to be written.
653 *
654 * Analog extended interrupt related register write.
655 *
656 */
ls3a_ext_intctl_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, gpa_t addr, int size, const void *__val)657 static int ls3a_ext_intctl_write(struct kvm_vcpu *vcpu,
658 struct kvm_io_device *dev,
659 gpa_t addr, int size, const void *__val)
660 {
661 struct ls3a_kvm_extirq *s = NULL;
662 unsigned long flags;
663 uint64_t offset;
664
665 s = container_of(dev, struct ls3a_kvm_extirq, device);
666
667 offset = addr & 0xfffff;
668 if (offset & (size - 1)) {
669 printk("%s(%d):unaligned address access %llx size %d\n",
670 __FUNCTION__, __LINE__, addr, size);
671 return 0;
672 }
673
674 addr = (addr & 0xfffff) - EXTIOI_ADDR_OFF;
675 ls3a_ext_irq_lock(s, flags);
676
677 switch (size) {
678 case 1:
679 ls3a_ext_intctl_writeb(vcpu, dev, addr, __val);
680 break;
681 case 4:
682 ls3a_ext_intctl_writew(vcpu, dev, addr, __val);
683 break;
684 case 8:
685 ls3a_ext_intctl_writel(vcpu, dev, addr, __val);
686 break;
687 default:
688 WARN_ONCE(1, "%s: Abnormal address access:addr 0x%llx,size %d\n",
689 __FUNCTION__, addr, size);
690 }
691
692 ls3a_ext_irq_unlock(s, flags);
693
694 kvm_debug("%s(%d):address access %llx size %d\n",
695 __FUNCTION__, __LINE__, offset, size);
696 return 0;
697 }
698
699 static const struct kvm_io_device_ops kvm_ls3a_ext_irq_ops = {
700 .read = ls3a_ext_intctl_read,
701 .write = ls3a_ext_intctl_write,
702 };
703
kvm_destroy_ls3a_ext_irq(struct kvm *kvm)704 void kvm_destroy_ls3a_ext_irq(struct kvm *kvm)
705 {
706 struct ls3a_kvm_extirq *s = kvm->arch.v_extirq;
707
708 if (!s)
709 return;
710 mutex_lock(&kvm->slots_lock);
711 kvm_io_bus_unregister_dev(s->kvm, KVM_MMIO_BUS, &s->device);
712 mutex_unlock(&kvm->slots_lock);
713 kfree(s);
714 }
715 /*
716 * kvm_create_ls3a_ext_irq()
717 * @kvm KVM structure pointer
718 * Create an extended interrupt resource instance for a virtual machine
719 * Returns: Extended interrupt structure pointer
720 */
kvm_create_ls3a_ext_irq(struct kvm *kvm)721 int kvm_create_ls3a_ext_irq(struct kvm *kvm)
722 {
723 struct ls3a_kvm_extirq *s;
724 int ret;
725
726 s = kzalloc(sizeof(struct ls3a_kvm_extirq), GFP_KERNEL);
727 if (!s)
728 return -ENOMEM;
729
730 memset((void *)&s->ls3a_ext_irq, 0x0, sizeof(struct kvm_ls3a_extirq_state));
731
732 spin_lock_init(&s->lock);
733 s->kvm = kvm;
734
735 /*
736 * Initialize MMIO device
737 */
738 kvm_iodevice_init(&s->device, &kvm_ls3a_ext_irq_ops);
739 mutex_lock(&kvm->slots_lock);
740 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS,
741 EXTIOI_REG_BASE, EXTIOI_ADDR_SIZE, &s->device);
742 mutex_unlock(&kvm->slots_lock);
743 if (ret < 0) {
744 printk("%s dev_ls3a_ext_irq register error ret %d\n", __FUNCTION__, ret);
745 goto err_register;
746 }
747
748 kvm->arch.v_extirq = s;
749
750 return 0;
751
752 err_register:
753 kfree(s);
754 return -EFAULT;
755 }
756
kvm_set_ext_sw_ipmap(struct kvm_ls3a_extirq_state *state)757 static int kvm_set_ext_sw_ipmap(struct kvm_ls3a_extirq_state *state)
758 {
759 uint8_t val_data_u8;
760 int i, j, base_irq, irqnum, ipnum;
761
762 ipnum = 0;
763 for (i = 0; i < EXTIOI_IRQS_IPMAP_SIZE; i++) {
764 val_data_u8 = state->ip_map.reg_u8[i];
765 for (j = 0; j < 4; j++) {
766 if (val_data_u8 & (0x1 << j)) {
767 ipnum = j;
768 break;
769 }
770 }
771 kvm_debug("%s:%d ipnum:%d i:%d val_data_u8:0x%x\n", __FUNCTION__, __LINE__,
772 ipnum, i, val_data_u8);
773
774 if (val_data_u8) {
775 for (base_irq = 0; base_irq < EXTIOI_IRQS_PER_GROUP; base_irq++) {
776 irqnum = i * EXTIOI_IRQS_PER_GROUP + base_irq;
777 state->ext_sw_ipmap[irqnum] = ipnum;
778 }
779 } else {
780 for (base_irq = 0; base_irq < EXTIOI_IRQS_PER_GROUP; base_irq++) {
781 irqnum = i * EXTIOI_IRQS_PER_GROUP + base_irq;
782 state->ext_sw_ipmap[irqnum] = 0;
783 }
784 }
785 }
786
787 return 0;
788 }
789
kvm_set_ext_sw_coremap(struct kvm *kvm, struct kvm_ls3a_extirq_state *state)790 static int kvm_set_ext_sw_coremap(struct kvm *kvm, struct kvm_ls3a_extirq_state *state)
791 {
792 int reg_count;
793
794 for (reg_count = 0; reg_count < EXTIOI_IRQS; reg_count++) {
795 state->ext_sw_coremap[reg_count] = state->core_map.reg_u8[reg_count];
796
797 kvm_debug("%s:%d -- reg_count:%d vcpu %d\n",
798 __FUNCTION__, __LINE__, reg_count, state->core_map.reg_u8[reg_count]);
799 }
800
801 return 0;
802 }
803
kvm_set_ext_sw_ipisr(struct kvm *kvm, struct kvm_ls3a_extirq_state *state)804 static int kvm_set_ext_sw_ipisr(struct kvm *kvm, struct kvm_ls3a_extirq_state *state)
805 {
806 int ipnum, core, irq_num;
807
808 for (irq_num = 0; irq_num < EXTIOI_IRQS; irq_num++) {
809 core = state->ext_sw_coremap[irq_num];
810 ipnum = state->ext_sw_ipmap[irq_num];
811
812 if (test_bit(irq_num, (void *)state->ext_core_isr.reg_u8[core]) == false) {
813 bitmap_clear((void *)state->ext_sw_ipisr[core][ipnum + 2], irq_num, 1);
814 } else {
815 bitmap_set((void *)state->ext_sw_ipisr[core][ipnum + 2], irq_num, 1);
816 }
817
818 }
819 return 0;
820 }
821
kvm_get_ls3a_extirq(struct kvm *kvm, struct kvm_loongarch_ls3a_extirq_state *state)822 int kvm_get_ls3a_extirq(struct kvm *kvm, struct kvm_loongarch_ls3a_extirq_state *state)
823 {
824 struct ls3a_kvm_extirq *v_extirq = ls3a_ext_irqchip(kvm);
825 struct kvm_ls3a_extirq_state *extirq_state = &(v_extirq->ls3a_ext_irq);
826 unsigned long flags;
827 if (!v_extirq)
828 return -EINVAL;
829
830 ls3a_ext_irq_lock(v_extirq, flags);
831 memcpy(state, extirq_state,
832 sizeof(struct kvm_loongarch_ls3a_extirq_state));
833 ls3a_ext_irq_unlock(v_extirq, flags);
834 kvm->stat.get_ls3a_ext_irq++;
835
836 return 0;
837 }
838
kvm_set_ls3a_extirq(struct kvm *kvm, struct kvm_loongarch_ls3a_extirq_state *state)839 int kvm_set_ls3a_extirq(struct kvm *kvm, struct kvm_loongarch_ls3a_extirq_state *state)
840 {
841 struct ls3a_kvm_extirq *v_extirq = ls3a_ext_irqchip(kvm);
842 struct kvm_ls3a_extirq_state *extirq_state = &(v_extirq->ls3a_ext_irq);
843 unsigned long flags;
844 if (!v_extirq)
845 return -EINVAL;
846
847 ls3a_ext_irq_lock(v_extirq, flags);
848 memcpy(extirq_state, state,
849 sizeof(struct kvm_loongarch_ls3a_extirq_state));
850 kvm_set_ext_sw_ipmap(extirq_state);
851 kvm_set_ext_sw_coremap(kvm, extirq_state);
852 kvm_set_ext_sw_ipisr(kvm, extirq_state);
853
854 ls3a_ext_irq_unlock(v_extirq, flags);
855 kvm->stat.set_ls3a_ext_irq++;
856
857 return 0;
858 }
859
kvm_setup_ls3a_extirq(struct kvm *kvm)860 int kvm_setup_ls3a_extirq(struct kvm *kvm)
861 {
862 struct ls3a_kvm_extirq *v_extirq = ls3a_ext_irqchip(kvm);
863 struct kvm_ls3a_extirq_state *extirq_state = &(v_extirq->ls3a_ext_irq);
864 unsigned long flags;
865
866 if (!v_extirq)
867 return -EINVAL;
868
869 ls3a_ext_irq_lock(v_extirq, flags);
870 memset(extirq_state, 0, sizeof(struct kvm_ls3a_extirq_state));
871 ls3a_ext_irq_unlock(v_extirq, flags);
872
873 return 0;
874 }
875
kvm_dump_ls3a_extirq_state(struct seq_file *s, struct ls3a_kvm_extirq *irqchip)876 void kvm_dump_ls3a_extirq_state(struct seq_file *s,
877 struct ls3a_kvm_extirq *irqchip)
878 {
879 struct kvm_ls3a_extirq_state *extirq;
880 int i, j = 0;
881 unsigned long flags;
882
883 seq_puts(s, "LS3A ext irqchip state:\n");
884
885 if (!irqchip)
886 return;
887
888 extirq = &(irqchip->ls3a_ext_irq);
889 ls3a_ext_irq_lock(irqchip, flags);
890 seq_puts(s, "ext irq enabled");
891 seq_puts(s, "\nenabled:(Not Enabled)");
892 for (i = 0; i < EXTIOI_IRQS; i++) {
893 if (!test_bit(i, (void *)&extirq->ext_en))
894 seq_printf(s, "%d ", i);
895 }
896 seq_puts(s, "\nbounce:(Not bounce)");
897 for (i = 0; i < EXTIOI_IRQS; i++) {
898 if (!test_bit(i, (void *)&extirq->bounce))
899 seq_printf(s, "%d ", i);
900 }
901 seq_puts(s, "\next_isr:");
902 for (i = 0; i < EXTIOI_IRQS; i++) {
903 if (test_bit(i, (void *)&extirq->ext_isr))
904 seq_printf(s, "%d ", i);
905 }
906
907 seq_puts(s, "\ncore_isr:");
908 for (i = 0; i < KVM_MAX_VCPUS && kvm_get_vcpu_by_id(irqchip->kvm, i); i++) {
909 seq_printf(s, "\n\t CPU%d:", i);
910 for (j = 0; j < EXTIOI_IRQS; j++) {
911 if (test_bit(j, (void *)&extirq->ext_core_isr.reg_u8[i]))
912 seq_printf(s, "%d ", j);
913 }
914 }
915 seq_printf(s, "\nip_map:%llx", extirq->ip_map.reg_u64);
916 seq_puts(s, "\ncore_map: (only display router to slave cpu)\n");
917 for (i = 0; i < EXTIOI_IRQS_COREMAP_SIZE; i++)
918 if (extirq->core_map.reg_u8[i])
919 seq_printf(s, "\tirq:%d -> cpu:%d\n", i,
920 extirq->core_map.reg_u8[i]);
921 ls3a_ext_irq_unlock(irqchip, flags);
922 }
923