1// SPDX-License-Identifier: GPL-2.0 2/* 3 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited 4 */ 5 6#include <linux/highmem.h> 7#include <linux/mm.h> 8#include <linux/bitops.h> 9#include "ls3a_ipi.h" 10#include "ls7a_irq.h" 11#include "ls3a_ext_irq.h" 12 13void ls7a_ioapic_lock(struct ls7a_kvm_ioapic *s, unsigned long *flags) 14{ 15 unsigned long tmp; 16 spin_lock_irqsave(&s->lock, tmp); 17 *flags = tmp; 18} 19 20void ls7a_ioapic_unlock(struct ls7a_kvm_ioapic *s, unsigned long *flags) 21{ 22 unsigned long tmp; 23 tmp = *flags; 24 spin_unlock_irqrestore(&s->lock, tmp); 25} 26 27static void kvm_ls7a_ioapic_raise(struct kvm *kvm, unsigned long mask) 28{ 29 unsigned long irqnum, val; 30 struct ls7a_kvm_ioapic *s = ls7a_ioapic_irqchip(kvm); 31 struct kvm_ls7a_ioapic_state *state; 32 struct kvm_loongarch_interrupt irq; 33 int i; 34 35 state = &s->ls7a_ioapic; 36 irq.cpu = -1; 37 val = mask & state->intirr & (~state->int_mask); 38 val &= ~state->intisr; 39 for_each_set_bit(i, &val, 64) { 40 state->intisr |= 0x1ULL << i; 41 irqnum = state->htmsi_vector[i]; 42 kvm_debug("msi_irq_handler,%ld,up\n", irqnum); 43 msi_irq_handler(kvm, irqnum, 1); 44 } 45 46 kvm->stat.ls7a_ioapic_update++; 47} 48 49static void kvm_ls7a_ioapic_lower(struct kvm *kvm, unsigned long mask) 50{ 51 unsigned long irqnum, val; 52 struct ls7a_kvm_ioapic *s = ls7a_ioapic_irqchip(kvm); 53 struct kvm_ls7a_ioapic_state *state; 54 struct kvm_loongarch_interrupt irq; 55 int i; 56 57 state = &s->ls7a_ioapic; 58 irq.cpu = -1; 59 val = mask & state->intisr; 60 for_each_set_bit(i, &val, 64) { 61 state->intisr &= ~(0x1ULL << i); 62 irqnum = state->htmsi_vector[i]; 63 kvm_debug("msi_irq_handler,%ld,down\n", irqnum); 64 msi_irq_handler(kvm, irqnum, 0); 65 } 66 67 kvm->stat.ls7a_ioapic_update++; 68} 69 70int kvm_ls7a_set_msi(struct kvm_kernel_irq_routing_entry *e, 71 struct kvm *kvm, int irq_source_id, int level, bool line_status) 72{ 73 if (!level) 74 return -1; 75 76 kvm_debug("msi data is 0x%x\n", e->msi.data); 77 msi_irq_handler(kvm, e->msi.data, 1); 78 return 0; 79} 80 81int kvm_ls7a_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi) 82{ 83 struct kvm_kernel_irq_routing_entry route; 84 85 if (msi->flags != 0) 86 return -EINVAL; 87 88 kvm->stat.ls7a_msi_irq++; 89 route.msi.address_lo = msi->address_lo; 90 route.msi.address_hi = msi->address_hi; 91 route.msi.data = msi->data; 92 93 kvm_debug("msi data is 0x%x\n", route.msi.data); 94 return kvm_ls7a_set_msi(&route, kvm, 95 KVM_USERSPACE_IRQ_SOURCE_ID, 1, false); 96 97} 98 99int kvm_ls7a_ioapic_set_irq(struct kvm *kvm, int irq, int level) 100{ 101 struct ls7a_kvm_ioapic *s; 102 struct kvm_ls7a_ioapic_state *state; 103 uint64_t mask = 1ULL << irq; 104 s = ls7a_ioapic_irqchip(kvm); 105 state = &s->ls7a_ioapic; 106 BUG_ON(irq < 0 || irq >= LS7A_IOAPIC_NUM_PINS); 107 108 if (state->intedge & mask) { 109 /* edge triggered */ 110 if (level) { 111 if ((state->last_intirr & mask) == 0) { 112 state->intirr |= mask; 113 kvm_ls7a_ioapic_raise(kvm, mask); 114 } 115 state->last_intirr |= mask; 116 } else 117 state->last_intirr &= ~mask; 118 } else { 119 /* level triggered */ 120 if (!!level) { 121 if ((state->intirr & mask) == 0) { 122 state->intirr |= mask; 123 kvm_ls7a_ioapic_raise(kvm, mask); 124 } 125 } else { 126 if (state->intirr & mask) { 127 state->intirr &= ~mask; 128 kvm_ls7a_ioapic_lower(kvm, mask); 129 } 130 } 131 } 132 kvm->stat.ls7a_ioapic_set_irq++; 133 return 0; 134} 135 136static int ls7a_ioapic_reg_write(struct ls7a_kvm_ioapic *s, 137 gpa_t addr, int len, const void *val) 138{ 139 struct kvm *kvm; 140 struct kvm_ls7a_ioapic_state *state; 141 int64_t offset_tmp; 142 uint64_t offset; 143 uint64_t old, himask, lowmask; 144 unsigned long data, flags; 145 146 offset = addr & 0xfff; 147 kvm = s->kvm; 148 state = &(s->ls7a_ioapic); 149 lowmask = 0xFFFFFFFFUL; 150 himask = lowmask << 32; 151 152 if (offset & (len - 1)) { 153 printk("%s(%d):unaligned address access %llx size %d \n", 154 __FUNCTION__, __LINE__, addr, len); 155 return 0; 156 } 157 158 if (8 == len) { 159 data = *(uint64_t *)val; 160 switch (offset) { 161 case LS7A_INT_MASK_OFFSET: 162 old = state->int_mask; 163 state->int_mask = data; 164 if (old & ~data) 165 kvm_ls7a_ioapic_raise(kvm, old & ~data); 166 if (~old & data) 167 kvm_ls7a_ioapic_lower(kvm, ~old & data); 168 break; 169 case LS7A_INT_STATUS_OFFSET: 170 state->intisr = data; 171 break; 172 case LS7A_INT_EDGE_OFFSET: 173 state->intedge = data; 174 break; 175 case LS7A_INT_CLEAR_OFFSET: 176 /* 177 * For emulated device, only clear edge triggered irq 178 * on writing INTCLR reg no effect on level triggered 179 * irq 180 * However for pass-through device with level-triggered 181 * intx, here need clear interrupt 182 */ 183 old = data; 184 data = data & state->intedge; 185 state->intirr &= ~data; 186 kvm_ls7a_ioapic_lower(kvm, data); 187 state->intisr &= ~data; 188 189 data = old & ~state->intedge; 190 ls7a_ioapic_unlock(kvm->arch.v_ioapic, &flags); 191 for_each_set_bit(offset_tmp, &data, 64) 192 kvm_notify_acked_irq(kvm, 0, offset_tmp); 193 ls7a_ioapic_lock(kvm->arch.v_ioapic, &flags); 194 break; 195 case LS7A_INT_POL_OFFSET: 196 state->int_polarity = data; 197 break; 198 case LS7A_HTMSI_EN_OFFSET: 199 state->htmsi_en = data; 200 break; 201 case LS7A_AUTO_CTRL0_OFFSET: 202 case LS7A_AUTO_CTRL1_OFFSET: 203 break; 204 default: 205 WARN_ONCE(1, "Abnormal address access:addr 0x%llx,len %d\n", addr, len); 206 break; 207 } 208 } else if (4 == len) { 209 data = *(uint32_t *)val; 210 switch (offset) { 211 case LS7A_INT_MASK_OFFSET: 212 old = state->int_mask & lowmask; 213 state->int_mask = (state->int_mask & himask) | data; 214 if (old & ~data) 215 kvm_ls7a_ioapic_raise(kvm, old & ~data); 216 if (~old & data) 217 kvm_ls7a_ioapic_lower(kvm, ~old & data); 218 break; 219 case LS7A_INT_MASK_OFFSET + 4: 220 data = data << 32; 221 old = state->int_mask & himask; 222 state->int_mask = (state->int_mask & lowmask) | data; 223 if (old & ~data) 224 kvm_ls7a_ioapic_raise(kvm, old & ~data); 225 if (~old & data) 226 kvm_ls7a_ioapic_lower(kvm, ~old & data); 227 break; 228 case LS7A_INT_STATUS_OFFSET: 229 state->intisr = (state->intisr & himask) | data; 230 break; 231 case LS7A_INT_STATUS_OFFSET + 4: 232 data = data << 32; 233 state->intisr = (state->intisr & lowmask) | data; 234 break; 235 case LS7A_INT_EDGE_OFFSET: 236 state->intedge = (state->intedge & himask) | data; 237 break; 238 case LS7A_INT_EDGE_OFFSET + 4: 239 data = data << 32; 240 state->intedge = (state->intedge & lowmask) | data; 241 break; 242 case LS7A_INT_CLEAR_OFFSET + 4: 243 data = data << 32; 244 fallthrough; 245 case LS7A_INT_CLEAR_OFFSET: 246 old = data; 247 data = data & state->intedge; 248 state->intirr &= ~data; 249 kvm_ls7a_ioapic_lower(kvm, data); 250 state->intisr &= ~data; 251 252 data = old & ~state->intedge; 253 ls7a_ioapic_unlock(kvm->arch.v_ioapic, &flags); 254 for_each_set_bit(offset_tmp, &data, 64) 255 kvm_notify_acked_irq(kvm, 0, offset_tmp); 256 ls7a_ioapic_lock(kvm->arch.v_ioapic, &flags); 257 break; 258 case LS7A_INT_POL_OFFSET: 259 state->int_polarity = (state->int_polarity & himask) | data; 260 break; 261 case LS7A_INT_POL_OFFSET+4: 262 data = data << 32; 263 state->int_polarity = (state->int_polarity & lowmask) | data; 264 break; 265 case LS7A_HTMSI_EN_OFFSET: 266 state->htmsi_en = (state->htmsi_en & himask) | data; 267 break; 268 case LS7A_HTMSI_EN_OFFSET+4: 269 data = data << 32; 270 state->htmsi_en = (state->htmsi_en & lowmask) | data; 271 break; 272 case LS7A_AUTO_CTRL0_OFFSET: 273 case LS7A_AUTO_CTRL0_OFFSET+4: 274 case LS7A_AUTO_CTRL1_OFFSET: 275 case LS7A_AUTO_CTRL1_OFFSET+4: 276 break; 277 default: 278 WARN_ONCE(1, "Abnormal address access:addr 0x%llx,len %d\n", addr, len); 279 break; 280 } 281 } else if (1 == len) { 282 data = *(unsigned char *)val; 283 if (offset >= LS7A_HTMSI_VEC_OFFSET) { 284 offset_tmp = offset - LS7A_HTMSI_VEC_OFFSET; 285 if (offset_tmp >= 0 && offset_tmp < 64) { 286 state->htmsi_vector[offset_tmp] = 287 (uint8_t)(data & 0xff); 288 } 289 } else if (offset >= LS7A_ROUTE_ENTRY_OFFSET) { 290 offset_tmp = offset - LS7A_ROUTE_ENTRY_OFFSET; 291 if (offset_tmp >= 0 && offset_tmp < 64) { 292 state->route_entry[offset_tmp] = 293 (uint8_t)(data & 0xff); 294 } 295 } else { 296 WARN_ONCE(1, "Abnormal address access:addr 0x%llx,len %d\n", addr, len); 297 } 298 } else { 299 WARN_ONCE(1, "Abnormal address access:addr 0x%llx,len %d\n", addr, len); 300 } 301 kvm->stat.ioapic_reg_write++; 302 return 0; 303} 304 305static inline struct ls7a_kvm_ioapic *to_ioapic(struct kvm_io_device *dev) 306{ 307 return container_of(dev, struct ls7a_kvm_ioapic, dev_ls7a_ioapic); 308} 309 310static int kvm_ls7a_ioapic_write(struct kvm_vcpu *vcpu, 311 struct kvm_io_device *this, 312 gpa_t addr, int len, const void *val) 313{ 314 struct ls7a_kvm_ioapic *s = to_ioapic(this); 315 unsigned long flags; 316 317 ls7a_ioapic_lock(s->kvm->arch.v_ioapic, &flags); 318 ls7a_ioapic_reg_write(s, addr, len, val); 319 ls7a_ioapic_unlock(s->kvm->arch.v_ioapic, &flags); 320 321 return 0; 322} 323 324static int ls7a_ioapic_reg_read(struct ls7a_kvm_ioapic *s, 325 gpa_t addr, int len, void *val) 326{ 327 uint64_t offset, offset_tmp; 328 struct kvm *kvm; 329 struct kvm_ls7a_ioapic_state *state; 330 uint64_t result = 0, lowmask, himask; 331 332 state = &(s->ls7a_ioapic); 333 kvm = s->kvm; 334 offset = addr & 0xfff; 335 lowmask = 0xFFFFFFFFUL; 336 himask = lowmask << 32; 337 if (offset & (len - 1)) { 338 printk("%s(%d):unaligned address access %llx size %d \n", 339 __FUNCTION__, __LINE__, addr, len); 340 return 0; 341 } 342 343 if (8 == len) { 344 switch (offset) { 345 case LS7A_INT_MASK_OFFSET: 346 result = state->int_mask; 347 break; 348 case LS7A_INT_STATUS_OFFSET: 349 result = state->intisr & (~state->int_mask); 350 break; 351 case LS7A_INT_EDGE_OFFSET: 352 result = state->intedge; 353 break; 354 case LS7A_INT_POL_OFFSET: 355 result = state->int_polarity; 356 break; 357 case LS7A_HTMSI_EN_OFFSET: 358 result = state->htmsi_en; 359 break; 360 case LS7A_AUTO_CTRL0_OFFSET: 361 case LS7A_AUTO_CTRL1_OFFSET: 362 break; 363 case LS7A_INT_ID_OFFSET: 364 result = LS7A_INT_ID_VER; 365 result = (result << 32) + LS7A_INT_ID_VAL; 366 break; 367 default: 368 WARN_ONCE(1, "Abnormal address access:addr 0x%llx,len %d\n", addr, len); 369 break; 370 } 371 if (val != NULL) 372 *(uint64_t *)val = result; 373 } else if (4 == len) { 374 switch (offset) { 375 case LS7A_INT_MASK_OFFSET: 376 result = state->int_mask & lowmask; 377 break; 378 case LS7A_INT_MASK_OFFSET + 4: 379 result = state->int_mask & himask; 380 result = result >> 32; 381 break; 382 case LS7A_INT_STATUS_OFFSET: 383 result = state->intisr & (~state->int_mask) & lowmask; 384 break; 385 case LS7A_INT_STATUS_OFFSET + 4: 386 result = state->intisr & (~state->int_mask) & himask; 387 result = result >> 32; 388 break; 389 case LS7A_INT_EDGE_OFFSET: 390 result = state->intedge & lowmask; 391 break; 392 case LS7A_INT_EDGE_OFFSET + 4: 393 result = state->intedge & himask; 394 result = result >> 32; 395 break; 396 case LS7A_INT_POL_OFFSET: 397 result = state->int_polarity & lowmask; 398 break; 399 case LS7A_INT_POL_OFFSET + 4: 400 result = state->int_polarity & himask; 401 result = result >> 32; 402 break; 403 case LS7A_HTMSI_EN_OFFSET: 404 result = state->htmsi_en & lowmask; 405 break; 406 case LS7A_HTMSI_EN_OFFSET + 4: 407 result = state->htmsi_en & himask; 408 result = result >> 32; 409 break; 410 case LS7A_AUTO_CTRL0_OFFSET: 411 case LS7A_AUTO_CTRL0_OFFSET + 4: 412 case LS7A_AUTO_CTRL1_OFFSET: 413 case LS7A_AUTO_CTRL1_OFFSET + 4: 414 break; 415 case LS7A_INT_ID_OFFSET: 416 result = LS7A_INT_ID_VAL; 417 break; 418 case LS7A_INT_ID_OFFSET + 4: 419 result = LS7A_INT_ID_VER; 420 break; 421 default: 422 WARN_ONCE(1, "Abnormal address access:addr 0x%llx,len %d\n", addr, len); 423 break; 424 } 425 if (val != NULL) 426 *(uint32_t *)val = result; 427 } else if (1 == len) { 428 if (offset >= LS7A_HTMSI_VEC_OFFSET) { 429 offset_tmp = offset - LS7A_HTMSI_VEC_OFFSET; 430 if (offset_tmp >= 0 && offset_tmp < 64) { 431 result = state->htmsi_vector[offset_tmp]; 432 } 433 } else if (offset >= LS7A_ROUTE_ENTRY_OFFSET) { 434 offset_tmp = offset - LS7A_ROUTE_ENTRY_OFFSET; 435 if (offset_tmp >= 0 && offset_tmp < 64) { 436 result = state->route_entry[offset_tmp]; 437 } 438 } else { 439 WARN_ONCE(1, "Abnormal address access:addr 0x%llx,len %d\n", addr, len); 440 } 441 if (val != NULL) 442 *(unsigned char *)val = result; 443 } else { 444 WARN_ONCE(1, "Abnormal address access:addr 0x%llx,len %d\n", addr, len); 445 } 446 kvm->stat.ioapic_reg_read++; 447 return result; 448} 449 450static int kvm_ls7a_ioapic_read(struct kvm_vcpu *vcpu, 451 struct kvm_io_device *this, 452 gpa_t addr, int len, void *val) 453{ 454 struct ls7a_kvm_ioapic *s = to_ioapic(this); 455 unsigned long flags; 456 uint64_t result = 0; 457 458 ls7a_ioapic_lock(s->kvm->arch.v_ioapic, &flags); 459 result = ls7a_ioapic_reg_read(s, addr, len, val); 460 ls7a_ioapic_unlock(s->kvm->arch.v_ioapic, &flags); 461 return 0; 462} 463 464static const struct kvm_io_device_ops kvm_ls7a_ioapic_ops = { 465 .read = kvm_ls7a_ioapic_read, 466 .write = kvm_ls7a_ioapic_write, 467}; 468 469static int kvm_ls7a_ioapic_alias_read(struct kvm_vcpu *vcpu, 470 struct kvm_io_device *this, gpa_t addr, int len, void *val) 471{ 472 struct ls7a_kvm_ioapic *s; 473 unsigned long flags; 474 475 s = container_of(this, struct ls7a_kvm_ioapic, ls7a_ioapic_alias); 476 ls7a_ioapic_lock(s->kvm->arch.v_ioapic, &flags); 477 ls7a_ioapic_reg_read(s, addr, len, val); 478 ls7a_ioapic_unlock(s->kvm->arch.v_ioapic, &flags); 479 return 0; 480} 481 482static int kvm_ls7a_ioapic_alias_write(struct kvm_vcpu *vcpu, 483 struct kvm_io_device *this, gpa_t addr, int len, const void *val) 484{ 485 struct ls7a_kvm_ioapic *s; 486 unsigned long flags; 487 488 s = container_of(this, struct ls7a_kvm_ioapic, ls7a_ioapic_alias); 489 ls7a_ioapic_lock(s->kvm->arch.v_ioapic, &flags); 490 ls7a_ioapic_reg_write(s, addr, len, val); 491 ls7a_ioapic_unlock(s->kvm->arch.v_ioapic, &flags); 492 493 return 0; 494} 495 496static const struct kvm_io_device_ops kvm_ls7a_ioapic_ops_alias = { 497 .read = kvm_ls7a_ioapic_alias_read, 498 .write = kvm_ls7a_ioapic_alias_write, 499}; 500 501int kvm_create_ls7a_ioapic(struct kvm *kvm) 502{ 503 struct ls7a_kvm_ioapic *s; 504 int ret; 505 unsigned long ls7a_ioapic_reg_base; 506 507 s = kzalloc(sizeof(struct ls7a_kvm_ioapic), GFP_KERNEL); 508 if (!s) 509 return -ENOMEM; 510 spin_lock_init(&s->lock); 511 s->kvm = kvm; 512 513 ls7a_ioapic_reg_base = LS7A_IOAPIC_GUEST_REG_BASE; 514 515 /* 516 * Initialize MMIO device 517 */ 518 kvm_iodevice_init(&s->dev_ls7a_ioapic, &kvm_ls7a_ioapic_ops); 519 mutex_lock(&kvm->slots_lock); 520 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, ls7a_ioapic_reg_base, 521 0x1000, &s->dev_ls7a_ioapic); 522 if (ret < 0) { 523 kvm_err("Failed register ioapic, err:%d\n", ret); 524 goto fail_unlock; 525 } 526 527 ls7a_ioapic_reg_base = LS7A_IOAPIC_GUEST_REG_BASE_ALIAS; 528 kvm_iodevice_init(&s->ls7a_ioapic_alias, &kvm_ls7a_ioapic_ops_alias); 529 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, ls7a_ioapic_reg_base, 530 0x1000, &s->ls7a_ioapic_alias); 531 if (ret < 0) { 532 kvm_err("Failed register alias ioapic, err:%d\n", ret); 533 kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, 534 &s->dev_ls7a_ioapic); 535 goto fail_unlock; 536 } 537 mutex_unlock(&kvm->slots_lock); 538 539 kvm->arch.v_ioapic = s; 540 541 return 0; 542 543fail_unlock: 544 mutex_unlock(&kvm->slots_lock); 545 kfree(s); 546 547 return -EFAULT; 548} 549 550 551int kvm_get_ls7a_ioapic(struct kvm *kvm, struct ls7a_ioapic_state *state) 552{ 553 struct ls7a_kvm_ioapic *ls7a_ioapic = ls7a_ioapic_irqchip(kvm); 554 struct kvm_ls7a_ioapic_state *ioapic_state = 555 &(ls7a_ioapic->ls7a_ioapic); 556 unsigned long flags; 557 558 ls7a_ioapic_lock(ls7a_ioapic, &flags); 559 memcpy(state, ioapic_state, sizeof(struct kvm_ls7a_ioapic_state)); 560 ls7a_ioapic_unlock(ls7a_ioapic, &flags); 561 kvm->stat.get_ls7a_ioapic++; 562 return 0; 563} 564 565int kvm_set_ls7a_ioapic(struct kvm *kvm, struct ls7a_ioapic_state *state) 566{ 567 struct ls7a_kvm_ioapic *ls7a_ioapic = ls7a_ioapic_irqchip(kvm); 568 struct kvm_ls7a_ioapic_state *ioapic_state = 569 &(ls7a_ioapic->ls7a_ioapic); 570 unsigned long flags; 571 572 if (!ls7a_ioapic) 573 return -EINVAL; 574 575 ls7a_ioapic_lock(ls7a_ioapic, &flags); 576 memcpy(ioapic_state, state, sizeof(struct kvm_ls7a_ioapic_state)); 577 ls7a_ioapic_unlock(ls7a_ioapic, &flags); 578 kvm->stat.set_ls7a_ioapic++; 579 return 0; 580} 581 582void kvm_destroy_ls7a_ioapic(struct kvm *kvm) 583{ 584 struct ls7a_kvm_ioapic *vpic = kvm->arch.v_ioapic; 585 if (!vpic) 586 return; 587 mutex_lock(&kvm->slots_lock); 588 kvm_io_bus_unregister_dev(vpic->kvm, KVM_MMIO_BUS, 589 &vpic->ls7a_ioapic_alias); 590 kvm_io_bus_unregister_dev(vpic->kvm, KVM_MMIO_BUS, 591 &vpic->dev_ls7a_ioapic); 592 mutex_unlock(&kvm->slots_lock); 593 kfree(vpic); 594} 595 596void kvm_dump_ls7a_ioapic_state(struct seq_file *m, 597 struct ls7a_kvm_ioapic *ioapic) 598{ 599 struct kvm_ls7a_ioapic_state *ioapic_state; 600 unsigned long flags; 601 int i = 0; 602 603 if (!ioapic) 604 return; 605 606 seq_puts(m, "\nIOAPIC state:\n"); 607 ioapic_state = &(ioapic->ls7a_ioapic); 608 609 ls7a_ioapic_lock(ioapic, &flags); 610 seq_puts(m, "irq masked: "); 611 for (i = 0; i < 64; i++) { 612 if (!test_bit(i, (void *)&ioapic_state->int_mask)) 613 seq_printf(m, "%d ", i); 614 } 615 seq_printf(m, "\nhtmsi_en:0x%016llx\n" 616 "intedge:0x%016llx", 617 ioapic_state->htmsi_en, 618 ioapic_state->intedge); 619 620 seq_puts(m, "\nroute_entry: "); 621 for (i = 0; i < 64; i++) 622 seq_printf(m, "%d ", ioapic_state->route_entry[i]); 623 624 seq_puts(m, "\nhtmsi_vector: "); 625 for (i = 0; i < 64; i++) 626 seq_printf(m, "%d ", ioapic_state->htmsi_vector[i]); 627 628 seq_printf(m, "\nintirr:%016llx\n" 629 "intisr:%016llx\n", 630 ioapic_state->intirr, 631 ioapic_state->intisr); 632 ls7a_ioapic_unlock(ioapic, &flags); 633} 634