1// SPDX-License-Identifier: GPL-2.0-or-later 2/* 3* Filename: core.c 4* 5* Authors: Joshua Morris <josh.h.morris@us.ibm.com> 6* Philip Kelleher <pjk1939@linux.vnet.ibm.com> 7* 8* (C) Copyright 2013 IBM Corporation 9*/ 10 11#include <linux/kernel.h> 12#include <linux/init.h> 13#include <linux/interrupt.h> 14#include <linux/module.h> 15#include <linux/pci.h> 16#include <linux/reboot.h> 17#include <linux/slab.h> 18#include <linux/bitops.h> 19#include <linux/delay.h> 20#include <linux/debugfs.h> 21#include <linux/seq_file.h> 22 23#include <linux/genhd.h> 24#include <linux/idr.h> 25 26#include "rsxx_priv.h" 27#include "rsxx_cfg.h" 28 29#define NO_LEGACY 0 30#define SYNC_START_TIMEOUT (10 * 60) /* 10 minutes */ 31 32MODULE_DESCRIPTION("IBM Flash Adapter 900GB Full Height Device Driver"); 33MODULE_AUTHOR("Joshua Morris/Philip Kelleher, IBM"); 34MODULE_LICENSE("GPL"); 35MODULE_VERSION(DRIVER_VERSION); 36 37static unsigned int force_legacy = NO_LEGACY; 38module_param(force_legacy, uint, 0444); 39MODULE_PARM_DESC(force_legacy, "Force the use of legacy type PCI interrupts"); 40 41static unsigned int sync_start = 1; 42module_param(sync_start, uint, 0444); 43MODULE_PARM_DESC(sync_start, "On by Default: Driver load will not complete " 44 "until the card startup has completed."); 45 46static DEFINE_IDA(rsxx_disk_ida); 47 48/* --------------------Debugfs Setup ------------------- */ 49 50static int rsxx_attr_pci_regs_show(struct seq_file *m, void *p) 51{ 52 struct rsxx_cardinfo *card = m->private; 53 54 seq_printf(m, "HWID 0x%08x\n", 55 ioread32(card->regmap + HWID)); 56 seq_printf(m, "SCRATCH 0x%08x\n", 57 ioread32(card->regmap + SCRATCH)); 58 seq_printf(m, "IER 0x%08x\n", 59 ioread32(card->regmap + IER)); 60 seq_printf(m, "IPR 0x%08x\n", 61 ioread32(card->regmap + IPR)); 62 seq_printf(m, "CREG_CMD 0x%08x\n", 63 ioread32(card->regmap + CREG_CMD)); 64 seq_printf(m, "CREG_ADD 0x%08x\n", 65 ioread32(card->regmap + CREG_ADD)); 66 seq_printf(m, "CREG_CNT 0x%08x\n", 67 ioread32(card->regmap + CREG_CNT)); 68 seq_printf(m, "CREG_STAT 0x%08x\n", 69 ioread32(card->regmap + CREG_STAT)); 70 seq_printf(m, "CREG_DATA0 0x%08x\n", 71 ioread32(card->regmap + CREG_DATA0)); 72 seq_printf(m, "CREG_DATA1 0x%08x\n", 73 ioread32(card->regmap + CREG_DATA1)); 74 seq_printf(m, "CREG_DATA2 0x%08x\n", 75 ioread32(card->regmap + CREG_DATA2)); 76 seq_printf(m, "CREG_DATA3 0x%08x\n", 77 ioread32(card->regmap + CREG_DATA3)); 78 seq_printf(m, "CREG_DATA4 0x%08x\n", 79 ioread32(card->regmap + CREG_DATA4)); 80 seq_printf(m, "CREG_DATA5 0x%08x\n", 81 ioread32(card->regmap + CREG_DATA5)); 82 seq_printf(m, "CREG_DATA6 0x%08x\n", 83 ioread32(card->regmap + CREG_DATA6)); 84 seq_printf(m, "CREG_DATA7 0x%08x\n", 85 ioread32(card->regmap + CREG_DATA7)); 86 seq_printf(m, "INTR_COAL 0x%08x\n", 87 ioread32(card->regmap + INTR_COAL)); 88 seq_printf(m, "HW_ERROR 0x%08x\n", 89 ioread32(card->regmap + HW_ERROR)); 90 seq_printf(m, "DEBUG0 0x%08x\n", 91 ioread32(card->regmap + PCI_DEBUG0)); 92 seq_printf(m, "DEBUG1 0x%08x\n", 93 ioread32(card->regmap + PCI_DEBUG1)); 94 seq_printf(m, "DEBUG2 0x%08x\n", 95 ioread32(card->regmap + PCI_DEBUG2)); 96 seq_printf(m, "DEBUG3 0x%08x\n", 97 ioread32(card->regmap + PCI_DEBUG3)); 98 seq_printf(m, "DEBUG4 0x%08x\n", 99 ioread32(card->regmap + PCI_DEBUG4)); 100 seq_printf(m, "DEBUG5 0x%08x\n", 101 ioread32(card->regmap + PCI_DEBUG5)); 102 seq_printf(m, "DEBUG6 0x%08x\n", 103 ioread32(card->regmap + PCI_DEBUG6)); 104 seq_printf(m, "DEBUG7 0x%08x\n", 105 ioread32(card->regmap + PCI_DEBUG7)); 106 seq_printf(m, "RECONFIG 0x%08x\n", 107 ioread32(card->regmap + PCI_RECONFIG)); 108 109 return 0; 110} 111 112static int rsxx_attr_stats_show(struct seq_file *m, void *p) 113{ 114 struct rsxx_cardinfo *card = m->private; 115 int i; 116 117 for (i = 0; i < card->n_targets; i++) { 118 seq_printf(m, "Ctrl %d CRC Errors = %d\n", 119 i, card->ctrl[i].stats.crc_errors); 120 seq_printf(m, "Ctrl %d Hard Errors = %d\n", 121 i, card->ctrl[i].stats.hard_errors); 122 seq_printf(m, "Ctrl %d Soft Errors = %d\n", 123 i, card->ctrl[i].stats.soft_errors); 124 seq_printf(m, "Ctrl %d Writes Issued = %d\n", 125 i, card->ctrl[i].stats.writes_issued); 126 seq_printf(m, "Ctrl %d Writes Failed = %d\n", 127 i, card->ctrl[i].stats.writes_failed); 128 seq_printf(m, "Ctrl %d Reads Issued = %d\n", 129 i, card->ctrl[i].stats.reads_issued); 130 seq_printf(m, "Ctrl %d Reads Failed = %d\n", 131 i, card->ctrl[i].stats.reads_failed); 132 seq_printf(m, "Ctrl %d Reads Retried = %d\n", 133 i, card->ctrl[i].stats.reads_retried); 134 seq_printf(m, "Ctrl %d Discards Issued = %d\n", 135 i, card->ctrl[i].stats.discards_issued); 136 seq_printf(m, "Ctrl %d Discards Failed = %d\n", 137 i, card->ctrl[i].stats.discards_failed); 138 seq_printf(m, "Ctrl %d DMA SW Errors = %d\n", 139 i, card->ctrl[i].stats.dma_sw_err); 140 seq_printf(m, "Ctrl %d DMA HW Faults = %d\n", 141 i, card->ctrl[i].stats.dma_hw_fault); 142 seq_printf(m, "Ctrl %d DMAs Cancelled = %d\n", 143 i, card->ctrl[i].stats.dma_cancelled); 144 seq_printf(m, "Ctrl %d SW Queue Depth = %d\n", 145 i, card->ctrl[i].stats.sw_q_depth); 146 seq_printf(m, "Ctrl %d HW Queue Depth = %d\n", 147 i, atomic_read(&card->ctrl[i].stats.hw_q_depth)); 148 } 149 150 return 0; 151} 152 153static int rsxx_attr_stats_open(struct inode *inode, struct file *file) 154{ 155 return single_open(file, rsxx_attr_stats_show, inode->i_private); 156} 157 158static int rsxx_attr_pci_regs_open(struct inode *inode, struct file *file) 159{ 160 return single_open(file, rsxx_attr_pci_regs_show, inode->i_private); 161} 162 163static ssize_t rsxx_cram_read(struct file *fp, char __user *ubuf, 164 size_t cnt, loff_t *ppos) 165{ 166 struct rsxx_cardinfo *card = file_inode(fp)->i_private; 167 char *buf; 168 int st; 169 170 buf = kzalloc(cnt, GFP_KERNEL); 171 if (!buf) 172 return -ENOMEM; 173 174 st = rsxx_creg_read(card, CREG_ADD_CRAM + (u32)*ppos, cnt, buf, 1); 175 if (!st) { 176 if (copy_to_user(ubuf, buf, cnt)) 177 st = -EFAULT; 178 } 179 kfree(buf); 180 if (st) 181 return st; 182 *ppos += cnt; 183 return cnt; 184} 185 186static ssize_t rsxx_cram_write(struct file *fp, const char __user *ubuf, 187 size_t cnt, loff_t *ppos) 188{ 189 struct rsxx_cardinfo *card = file_inode(fp)->i_private; 190 char *buf; 191 ssize_t st; 192 193 buf = memdup_user(ubuf, cnt); 194 if (IS_ERR(buf)) 195 return PTR_ERR(buf); 196 197 st = rsxx_creg_write(card, CREG_ADD_CRAM + (u32)*ppos, cnt, buf, 1); 198 kfree(buf); 199 if (st) 200 return st; 201 *ppos += cnt; 202 return cnt; 203} 204 205static const struct file_operations debugfs_cram_fops = { 206 .owner = THIS_MODULE, 207 .read = rsxx_cram_read, 208 .write = rsxx_cram_write, 209}; 210 211static const struct file_operations debugfs_stats_fops = { 212 .owner = THIS_MODULE, 213 .open = rsxx_attr_stats_open, 214 .read = seq_read, 215 .llseek = seq_lseek, 216 .release = single_release, 217}; 218 219static const struct file_operations debugfs_pci_regs_fops = { 220 .owner = THIS_MODULE, 221 .open = rsxx_attr_pci_regs_open, 222 .read = seq_read, 223 .llseek = seq_lseek, 224 .release = single_release, 225}; 226 227static void rsxx_debugfs_dev_new(struct rsxx_cardinfo *card) 228{ 229 struct dentry *debugfs_stats; 230 struct dentry *debugfs_pci_regs; 231 struct dentry *debugfs_cram; 232 233 card->debugfs_dir = debugfs_create_dir(card->gendisk->disk_name, NULL); 234 if (IS_ERR_OR_NULL(card->debugfs_dir)) 235 goto failed_debugfs_dir; 236 237 debugfs_stats = debugfs_create_file("stats", 0444, 238 card->debugfs_dir, card, 239 &debugfs_stats_fops); 240 if (IS_ERR_OR_NULL(debugfs_stats)) 241 goto failed_debugfs_stats; 242 243 debugfs_pci_regs = debugfs_create_file("pci_regs", 0444, 244 card->debugfs_dir, card, 245 &debugfs_pci_regs_fops); 246 if (IS_ERR_OR_NULL(debugfs_pci_regs)) 247 goto failed_debugfs_pci_regs; 248 249 debugfs_cram = debugfs_create_file("cram", 0644, 250 card->debugfs_dir, card, 251 &debugfs_cram_fops); 252 if (IS_ERR_OR_NULL(debugfs_cram)) 253 goto failed_debugfs_cram; 254 255 return; 256failed_debugfs_cram: 257 debugfs_remove(debugfs_pci_regs); 258failed_debugfs_pci_regs: 259 debugfs_remove(debugfs_stats); 260failed_debugfs_stats: 261 debugfs_remove(card->debugfs_dir); 262failed_debugfs_dir: 263 card->debugfs_dir = NULL; 264} 265 266/*----------------- Interrupt Control & Handling -------------------*/ 267 268static void rsxx_mask_interrupts(struct rsxx_cardinfo *card) 269{ 270 card->isr_mask = 0; 271 card->ier_mask = 0; 272} 273 274static void __enable_intr(unsigned int *mask, unsigned int intr) 275{ 276 *mask |= intr; 277} 278 279static void __disable_intr(unsigned int *mask, unsigned int intr) 280{ 281 *mask &= ~intr; 282} 283 284/* 285 * NOTE: Disabling the IER will disable the hardware interrupt. 286 * Disabling the ISR will disable the software handling of the ISR bit. 287 * 288 * Enable/Disable interrupt functions assume the card->irq_lock 289 * is held by the caller. 290 */ 291void rsxx_enable_ier(struct rsxx_cardinfo *card, unsigned int intr) 292{ 293 if (unlikely(card->halt) || 294 unlikely(card->eeh_state)) 295 return; 296 297 __enable_intr(&card->ier_mask, intr); 298 iowrite32(card->ier_mask, card->regmap + IER); 299} 300 301void rsxx_disable_ier(struct rsxx_cardinfo *card, unsigned int intr) 302{ 303 if (unlikely(card->eeh_state)) 304 return; 305 306 __disable_intr(&card->ier_mask, intr); 307 iowrite32(card->ier_mask, card->regmap + IER); 308} 309 310void rsxx_enable_ier_and_isr(struct rsxx_cardinfo *card, 311 unsigned int intr) 312{ 313 if (unlikely(card->halt) || 314 unlikely(card->eeh_state)) 315 return; 316 317 __enable_intr(&card->isr_mask, intr); 318 __enable_intr(&card->ier_mask, intr); 319 iowrite32(card->ier_mask, card->regmap + IER); 320} 321void rsxx_disable_ier_and_isr(struct rsxx_cardinfo *card, 322 unsigned int intr) 323{ 324 if (unlikely(card->eeh_state)) 325 return; 326 327 __disable_intr(&card->isr_mask, intr); 328 __disable_intr(&card->ier_mask, intr); 329 iowrite32(card->ier_mask, card->regmap + IER); 330} 331 332static irqreturn_t rsxx_isr(int irq, void *pdata) 333{ 334 struct rsxx_cardinfo *card = pdata; 335 unsigned int isr; 336 int handled = 0; 337 int reread_isr; 338 int i; 339 340 spin_lock(&card->irq_lock); 341 342 do { 343 reread_isr = 0; 344 345 if (unlikely(card->eeh_state)) 346 break; 347 348 isr = ioread32(card->regmap + ISR); 349 if (isr == 0xffffffff) { 350 /* 351 * A few systems seem to have an intermittent issue 352 * where PCI reads return all Fs, but retrying the read 353 * a little later will return as expected. 354 */ 355 dev_info(CARD_TO_DEV(card), 356 "ISR = 0xFFFFFFFF, retrying later\n"); 357 break; 358 } 359 360 isr &= card->isr_mask; 361 if (!isr) 362 break; 363 364 for (i = 0; i < card->n_targets; i++) { 365 if (isr & CR_INTR_DMA(i)) { 366 if (card->ier_mask & CR_INTR_DMA(i)) { 367 rsxx_disable_ier(card, CR_INTR_DMA(i)); 368 reread_isr = 1; 369 } 370 queue_work(card->ctrl[i].done_wq, 371 &card->ctrl[i].dma_done_work); 372 handled++; 373 } 374 } 375 376 if (isr & CR_INTR_CREG) { 377 queue_work(card->creg_ctrl.creg_wq, 378 &card->creg_ctrl.done_work); 379 handled++; 380 } 381 382 if (isr & CR_INTR_EVENT) { 383 queue_work(card->event_wq, &card->event_work); 384 rsxx_disable_ier_and_isr(card, CR_INTR_EVENT); 385 handled++; 386 } 387 } while (reread_isr); 388 389 spin_unlock(&card->irq_lock); 390 391 return handled ? IRQ_HANDLED : IRQ_NONE; 392} 393 394/*----------------- Card Event Handler -------------------*/ 395static const char * const rsxx_card_state_to_str(unsigned int state) 396{ 397 static const char * const state_strings[] = { 398 "Unknown", "Shutdown", "Starting", "Formatting", 399 "Uninitialized", "Good", "Shutting Down", 400 "Fault", "Read Only Fault", "dStroying" 401 }; 402 403 return state_strings[ffs(state)]; 404} 405 406static void card_state_change(struct rsxx_cardinfo *card, 407 unsigned int new_state) 408{ 409 int st; 410 411 dev_info(CARD_TO_DEV(card), 412 "card state change detected.(%s -> %s)\n", 413 rsxx_card_state_to_str(card->state), 414 rsxx_card_state_to_str(new_state)); 415 416 card->state = new_state; 417 418 /* Don't attach DMA interfaces if the card has an invalid config */ 419 if (!card->config_valid) 420 return; 421 422 switch (new_state) { 423 case CARD_STATE_RD_ONLY_FAULT: 424 dev_crit(CARD_TO_DEV(card), 425 "Hardware has entered read-only mode!\n"); 426 /* 427 * Fall through so the DMA devices can be attached and 428 * the user can attempt to pull off their data. 429 */ 430 fallthrough; 431 case CARD_STATE_GOOD: 432 st = rsxx_get_card_size8(card, &card->size8); 433 if (st) 434 dev_err(CARD_TO_DEV(card), 435 "Failed attaching DMA devices\n"); 436 437 if (card->config_valid) 438 set_capacity(card->gendisk, card->size8 >> 9); 439 break; 440 441 case CARD_STATE_FAULT: 442 dev_crit(CARD_TO_DEV(card), 443 "Hardware Fault reported!\n"); 444 fallthrough; 445 446 /* Everything else, detach DMA interface if it's attached. */ 447 case CARD_STATE_SHUTDOWN: 448 case CARD_STATE_STARTING: 449 case CARD_STATE_FORMATTING: 450 case CARD_STATE_UNINITIALIZED: 451 case CARD_STATE_SHUTTING_DOWN: 452 /* 453 * dStroy is a term coined by marketing to represent the low level 454 * secure erase. 455 */ 456 case CARD_STATE_DSTROYING: 457 set_capacity(card->gendisk, 0); 458 break; 459 } 460} 461 462static void card_event_handler(struct work_struct *work) 463{ 464 struct rsxx_cardinfo *card; 465 unsigned int state; 466 unsigned long flags; 467 int st; 468 469 card = container_of(work, struct rsxx_cardinfo, event_work); 470 471 if (unlikely(card->halt)) 472 return; 473 474 /* 475 * Enable the interrupt now to avoid any weird race conditions where a 476 * state change might occur while rsxx_get_card_state() is 477 * processing a returned creg cmd. 478 */ 479 spin_lock_irqsave(&card->irq_lock, flags); 480 rsxx_enable_ier_and_isr(card, CR_INTR_EVENT); 481 spin_unlock_irqrestore(&card->irq_lock, flags); 482 483 st = rsxx_get_card_state(card, &state); 484 if (st) { 485 dev_info(CARD_TO_DEV(card), 486 "Failed reading state after event.\n"); 487 return; 488 } 489 490 if (card->state != state) 491 card_state_change(card, state); 492 493 if (card->creg_ctrl.creg_stats.stat & CREG_STAT_LOG_PENDING) 494 rsxx_read_hw_log(card); 495} 496 497/*----------------- Card Operations -------------------*/ 498static int card_shutdown(struct rsxx_cardinfo *card) 499{ 500 unsigned int state; 501 signed long start; 502 const int timeout = msecs_to_jiffies(120000); 503 int st; 504 505 /* We can't issue a shutdown if the card is in a transition state */ 506 start = jiffies; 507 do { 508 st = rsxx_get_card_state(card, &state); 509 if (st) 510 return st; 511 } while (state == CARD_STATE_STARTING && 512 (jiffies - start < timeout)); 513 514 if (state == CARD_STATE_STARTING) 515 return -ETIMEDOUT; 516 517 /* Only issue a shutdown if we need to */ 518 if ((state != CARD_STATE_SHUTTING_DOWN) && 519 (state != CARD_STATE_SHUTDOWN)) { 520 st = rsxx_issue_card_cmd(card, CARD_CMD_SHUTDOWN); 521 if (st) 522 return st; 523 } 524 525 start = jiffies; 526 do { 527 st = rsxx_get_card_state(card, &state); 528 if (st) 529 return st; 530 } while (state != CARD_STATE_SHUTDOWN && 531 (jiffies - start < timeout)); 532 533 if (state != CARD_STATE_SHUTDOWN) 534 return -ETIMEDOUT; 535 536 return 0; 537} 538 539static int rsxx_eeh_frozen(struct pci_dev *dev) 540{ 541 struct rsxx_cardinfo *card = pci_get_drvdata(dev); 542 int i; 543 int st; 544 545 dev_warn(&dev->dev, "IBM Flash Adapter PCI: preparing for slot reset.\n"); 546 547 card->eeh_state = 1; 548 rsxx_mask_interrupts(card); 549 550 /* 551 * We need to guarantee that the write for eeh_state and masking 552 * interrupts does not become reordered. This will prevent a possible 553 * race condition with the EEH code. 554 */ 555 wmb(); 556 557 pci_disable_device(dev); 558 559 st = rsxx_eeh_save_issued_dmas(card); 560 if (st) 561 return st; 562 563 rsxx_eeh_save_issued_creg(card); 564 565 for (i = 0; i < card->n_targets; i++) { 566 if (card->ctrl[i].status.buf) 567 dma_free_coherent(&card->dev->dev, 568 STATUS_BUFFER_SIZE8, 569 card->ctrl[i].status.buf, 570 card->ctrl[i].status.dma_addr); 571 if (card->ctrl[i].cmd.buf) 572 dma_free_coherent(&card->dev->dev, 573 COMMAND_BUFFER_SIZE8, 574 card->ctrl[i].cmd.buf, 575 card->ctrl[i].cmd.dma_addr); 576 } 577 578 return 0; 579} 580 581static void rsxx_eeh_failure(struct pci_dev *dev) 582{ 583 struct rsxx_cardinfo *card = pci_get_drvdata(dev); 584 int i; 585 int cnt = 0; 586 587 dev_err(&dev->dev, "IBM Flash Adapter PCI: disabling failed card.\n"); 588 589 card->eeh_state = 1; 590 card->halt = 1; 591 592 for (i = 0; i < card->n_targets; i++) { 593 spin_lock_bh(&card->ctrl[i].queue_lock); 594 cnt = rsxx_cleanup_dma_queue(&card->ctrl[i], 595 &card->ctrl[i].queue, 596 COMPLETE_DMA); 597 spin_unlock_bh(&card->ctrl[i].queue_lock); 598 599 cnt += rsxx_dma_cancel(&card->ctrl[i]); 600 601 if (cnt) 602 dev_info(CARD_TO_DEV(card), 603 "Freed %d queued DMAs on channel %d\n", 604 cnt, card->ctrl[i].id); 605 } 606} 607 608static int rsxx_eeh_fifo_flush_poll(struct rsxx_cardinfo *card) 609{ 610 unsigned int status; 611 int iter = 0; 612 613 /* We need to wait for the hardware to reset */ 614 while (iter++ < 10) { 615 status = ioread32(card->regmap + PCI_RECONFIG); 616 617 if (status & RSXX_FLUSH_BUSY) { 618 ssleep(1); 619 continue; 620 } 621 622 if (status & RSXX_FLUSH_TIMEOUT) 623 dev_warn(CARD_TO_DEV(card), "HW: flash controller timeout\n"); 624 return 0; 625 } 626 627 /* Hardware failed resetting itself. */ 628 return -1; 629} 630 631static pci_ers_result_t rsxx_error_detected(struct pci_dev *dev, 632 pci_channel_state_t error) 633{ 634 int st; 635 636 if (dev->revision < RSXX_EEH_SUPPORT) 637 return PCI_ERS_RESULT_NONE; 638 639 if (error == pci_channel_io_perm_failure) { 640 rsxx_eeh_failure(dev); 641 return PCI_ERS_RESULT_DISCONNECT; 642 } 643 644 st = rsxx_eeh_frozen(dev); 645 if (st) { 646 dev_err(&dev->dev, "Slot reset setup failed\n"); 647 rsxx_eeh_failure(dev); 648 return PCI_ERS_RESULT_DISCONNECT; 649 } 650 651 return PCI_ERS_RESULT_NEED_RESET; 652} 653 654static pci_ers_result_t rsxx_slot_reset(struct pci_dev *dev) 655{ 656 struct rsxx_cardinfo *card = pci_get_drvdata(dev); 657 unsigned long flags; 658 int i; 659 int st; 660 661 dev_warn(&dev->dev, 662 "IBM Flash Adapter PCI: recovering from slot reset.\n"); 663 664 st = pci_enable_device(dev); 665 if (st) 666 goto failed_hw_setup; 667 668 pci_set_master(dev); 669 670 st = rsxx_eeh_fifo_flush_poll(card); 671 if (st) 672 goto failed_hw_setup; 673 674 rsxx_dma_queue_reset(card); 675 676 for (i = 0; i < card->n_targets; i++) { 677 st = rsxx_hw_buffers_init(dev, &card->ctrl[i]); 678 if (st) 679 goto failed_hw_buffers_init; 680 } 681 682 if (card->config_valid) 683 rsxx_dma_configure(card); 684 685 /* Clears the ISR register from spurious interrupts */ 686 st = ioread32(card->regmap + ISR); 687 688 card->eeh_state = 0; 689 690 spin_lock_irqsave(&card->irq_lock, flags); 691 if (card->n_targets & RSXX_MAX_TARGETS) 692 rsxx_enable_ier_and_isr(card, CR_INTR_ALL_G); 693 else 694 rsxx_enable_ier_and_isr(card, CR_INTR_ALL_C); 695 spin_unlock_irqrestore(&card->irq_lock, flags); 696 697 rsxx_kick_creg_queue(card); 698 699 for (i = 0; i < card->n_targets; i++) { 700 spin_lock(&card->ctrl[i].queue_lock); 701 if (list_empty(&card->ctrl[i].queue)) { 702 spin_unlock(&card->ctrl[i].queue_lock); 703 continue; 704 } 705 spin_unlock(&card->ctrl[i].queue_lock); 706 707 queue_work(card->ctrl[i].issue_wq, 708 &card->ctrl[i].issue_dma_work); 709 } 710 711 dev_info(&dev->dev, "IBM Flash Adapter PCI: recovery complete.\n"); 712 713 return PCI_ERS_RESULT_RECOVERED; 714 715failed_hw_buffers_init: 716 for (i = 0; i < card->n_targets; i++) { 717 if (card->ctrl[i].status.buf) 718 dma_free_coherent(&card->dev->dev, 719 STATUS_BUFFER_SIZE8, 720 card->ctrl[i].status.buf, 721 card->ctrl[i].status.dma_addr); 722 if (card->ctrl[i].cmd.buf) 723 dma_free_coherent(&card->dev->dev, 724 COMMAND_BUFFER_SIZE8, 725 card->ctrl[i].cmd.buf, 726 card->ctrl[i].cmd.dma_addr); 727 } 728failed_hw_setup: 729 rsxx_eeh_failure(dev); 730 return PCI_ERS_RESULT_DISCONNECT; 731 732} 733 734/*----------------- Driver Initialization & Setup -------------------*/ 735/* Returns: 0 if the driver is compatible with the device 736 -1 if the driver is NOT compatible with the device */ 737static int rsxx_compatibility_check(struct rsxx_cardinfo *card) 738{ 739 unsigned char pci_rev; 740 741 pci_read_config_byte(card->dev, PCI_REVISION_ID, &pci_rev); 742 743 if (pci_rev > RS70_PCI_REV_SUPPORTED) 744 return -1; 745 return 0; 746} 747 748static int rsxx_pci_probe(struct pci_dev *dev, 749 const struct pci_device_id *id) 750{ 751 struct rsxx_cardinfo *card; 752 int st; 753 unsigned int sync_timeout; 754 755 dev_info(&dev->dev, "PCI-Flash SSD discovered\n"); 756 757 card = kzalloc(sizeof(*card), GFP_KERNEL); 758 if (!card) 759 return -ENOMEM; 760 761 card->dev = dev; 762 pci_set_drvdata(dev, card); 763 764 st = ida_alloc(&rsxx_disk_ida, GFP_KERNEL); 765 if (st < 0) 766 goto failed_ida_get; 767 card->disk_id = st; 768 769 st = pci_enable_device(dev); 770 if (st) 771 goto failed_enable; 772 773 pci_set_master(dev); 774 775 st = dma_set_mask(&dev->dev, DMA_BIT_MASK(64)); 776 if (st) { 777 dev_err(CARD_TO_DEV(card), 778 "No usable DMA configuration,aborting\n"); 779 goto failed_dma_mask; 780 } 781 782 st = pci_request_regions(dev, DRIVER_NAME); 783 if (st) { 784 dev_err(CARD_TO_DEV(card), 785 "Failed to request memory region\n"); 786 goto failed_request_regions; 787 } 788 789 if (pci_resource_len(dev, 0) == 0) { 790 dev_err(CARD_TO_DEV(card), "BAR0 has length 0!\n"); 791 st = -ENOMEM; 792 goto failed_iomap; 793 } 794 795 card->regmap = pci_iomap(dev, 0, 0); 796 if (!card->regmap) { 797 dev_err(CARD_TO_DEV(card), "Failed to map BAR0\n"); 798 st = -ENOMEM; 799 goto failed_iomap; 800 } 801 802 spin_lock_init(&card->irq_lock); 803 card->halt = 0; 804 card->eeh_state = 0; 805 806 spin_lock_irq(&card->irq_lock); 807 rsxx_disable_ier_and_isr(card, CR_INTR_ALL); 808 spin_unlock_irq(&card->irq_lock); 809 810 if (!force_legacy) { 811 st = pci_enable_msi(dev); 812 if (st) 813 dev_warn(CARD_TO_DEV(card), 814 "Failed to enable MSI\n"); 815 } 816 817 st = request_irq(dev->irq, rsxx_isr, IRQF_SHARED, 818 DRIVER_NAME, card); 819 if (st) { 820 dev_err(CARD_TO_DEV(card), 821 "Failed requesting IRQ%d\n", dev->irq); 822 goto failed_irq; 823 } 824 825 /************* Setup Processor Command Interface *************/ 826 st = rsxx_creg_setup(card); 827 if (st) { 828 dev_err(CARD_TO_DEV(card), "Failed to setup creg interface.\n"); 829 goto failed_creg_setup; 830 } 831 832 spin_lock_irq(&card->irq_lock); 833 rsxx_enable_ier_and_isr(card, CR_INTR_CREG); 834 spin_unlock_irq(&card->irq_lock); 835 836 st = rsxx_compatibility_check(card); 837 if (st) { 838 dev_warn(CARD_TO_DEV(card), 839 "Incompatible driver detected. Please update the driver.\n"); 840 st = -EINVAL; 841 goto failed_compatiblity_check; 842 } 843 844 /************* Load Card Config *************/ 845 st = rsxx_load_config(card); 846 if (st) 847 dev_err(CARD_TO_DEV(card), 848 "Failed loading card config\n"); 849 850 /************* Setup DMA Engine *************/ 851 st = rsxx_get_num_targets(card, &card->n_targets); 852 if (st) 853 dev_info(CARD_TO_DEV(card), 854 "Failed reading the number of DMA targets\n"); 855 856 card->ctrl = kcalloc(card->n_targets, sizeof(*card->ctrl), 857 GFP_KERNEL); 858 if (!card->ctrl) { 859 st = -ENOMEM; 860 goto failed_dma_setup; 861 } 862 863 st = rsxx_dma_setup(card); 864 if (st) { 865 dev_info(CARD_TO_DEV(card), 866 "Failed to setup DMA engine\n"); 867 goto failed_dma_setup; 868 } 869 870 /************* Setup Card Event Handler *************/ 871 card->event_wq = create_singlethread_workqueue(DRIVER_NAME"_event"); 872 if (!card->event_wq) { 873 dev_err(CARD_TO_DEV(card), "Failed card event setup.\n"); 874 st = -ENOMEM; 875 goto failed_event_handler; 876 } 877 878 INIT_WORK(&card->event_work, card_event_handler); 879 880 st = rsxx_setup_dev(card); 881 if (st) 882 goto failed_create_dev; 883 884 rsxx_get_card_state(card, &card->state); 885 886 dev_info(CARD_TO_DEV(card), 887 "card state: %s\n", 888 rsxx_card_state_to_str(card->state)); 889 890 /* 891 * Now that the DMA Engine and devices have been setup, 892 * we can enable the event interrupt(it kicks off actions in 893 * those layers so we couldn't enable it right away.) 894 */ 895 spin_lock_irq(&card->irq_lock); 896 rsxx_enable_ier_and_isr(card, CR_INTR_EVENT); 897 spin_unlock_irq(&card->irq_lock); 898 899 if (card->state == CARD_STATE_SHUTDOWN) { 900 st = rsxx_issue_card_cmd(card, CARD_CMD_STARTUP); 901 if (st) 902 dev_crit(CARD_TO_DEV(card), 903 "Failed issuing card startup\n"); 904 if (sync_start) { 905 sync_timeout = SYNC_START_TIMEOUT; 906 907 dev_info(CARD_TO_DEV(card), 908 "Waiting for card to startup\n"); 909 910 do { 911 ssleep(1); 912 sync_timeout--; 913 914 rsxx_get_card_state(card, &card->state); 915 } while (sync_timeout && 916 (card->state == CARD_STATE_STARTING)); 917 918 if (card->state == CARD_STATE_STARTING) { 919 dev_warn(CARD_TO_DEV(card), 920 "Card startup timed out\n"); 921 card->size8 = 0; 922 } else { 923 dev_info(CARD_TO_DEV(card), 924 "card state: %s\n", 925 rsxx_card_state_to_str(card->state)); 926 st = rsxx_get_card_size8(card, &card->size8); 927 if (st) 928 card->size8 = 0; 929 } 930 } 931 } else if (card->state == CARD_STATE_GOOD || 932 card->state == CARD_STATE_RD_ONLY_FAULT) { 933 st = rsxx_get_card_size8(card, &card->size8); 934 if (st) 935 card->size8 = 0; 936 } 937 938 rsxx_attach_dev(card); 939 940 /************* Setup Debugfs *************/ 941 rsxx_debugfs_dev_new(card); 942 943 return 0; 944 945failed_create_dev: 946 destroy_workqueue(card->event_wq); 947 card->event_wq = NULL; 948failed_event_handler: 949 rsxx_dma_destroy(card); 950failed_dma_setup: 951failed_compatiblity_check: 952 destroy_workqueue(card->creg_ctrl.creg_wq); 953 card->creg_ctrl.creg_wq = NULL; 954failed_creg_setup: 955 spin_lock_irq(&card->irq_lock); 956 rsxx_disable_ier_and_isr(card, CR_INTR_ALL); 957 spin_unlock_irq(&card->irq_lock); 958 free_irq(dev->irq, card); 959 if (!force_legacy) 960 pci_disable_msi(dev); 961failed_irq: 962 pci_iounmap(dev, card->regmap); 963failed_iomap: 964 pci_release_regions(dev); 965failed_request_regions: 966failed_dma_mask: 967 pci_disable_device(dev); 968failed_enable: 969 ida_free(&rsxx_disk_ida, card->disk_id); 970failed_ida_get: 971 kfree(card); 972 973 return st; 974} 975 976static void rsxx_pci_remove(struct pci_dev *dev) 977{ 978 struct rsxx_cardinfo *card = pci_get_drvdata(dev); 979 unsigned long flags; 980 int st; 981 int i; 982 983 if (!card) 984 return; 985 986 dev_info(CARD_TO_DEV(card), 987 "Removing PCI-Flash SSD.\n"); 988 989 rsxx_detach_dev(card); 990 991 for (i = 0; i < card->n_targets; i++) { 992 spin_lock_irqsave(&card->irq_lock, flags); 993 rsxx_disable_ier_and_isr(card, CR_INTR_DMA(i)); 994 spin_unlock_irqrestore(&card->irq_lock, flags); 995 } 996 997 st = card_shutdown(card); 998 if (st) 999 dev_crit(CARD_TO_DEV(card), "Shutdown failed!\n"); 1000 1001 /* Sync outstanding event handlers. */ 1002 spin_lock_irqsave(&card->irq_lock, flags); 1003 rsxx_disable_ier_and_isr(card, CR_INTR_EVENT); 1004 spin_unlock_irqrestore(&card->irq_lock, flags); 1005 1006 cancel_work_sync(&card->event_work); 1007 1008 destroy_workqueue(card->event_wq); 1009 rsxx_destroy_dev(card); 1010 rsxx_dma_destroy(card); 1011 destroy_workqueue(card->creg_ctrl.creg_wq); 1012 1013 spin_lock_irqsave(&card->irq_lock, flags); 1014 rsxx_disable_ier_and_isr(card, CR_INTR_ALL); 1015 spin_unlock_irqrestore(&card->irq_lock, flags); 1016 1017 /* Prevent work_structs from re-queuing themselves. */ 1018 card->halt = 1; 1019 1020 debugfs_remove_recursive(card->debugfs_dir); 1021 1022 free_irq(dev->irq, card); 1023 1024 if (!force_legacy) 1025 pci_disable_msi(dev); 1026 1027 rsxx_creg_destroy(card); 1028 1029 pci_iounmap(dev, card->regmap); 1030 1031 pci_disable_device(dev); 1032 pci_release_regions(dev); 1033 1034 ida_free(&rsxx_disk_ida, card->disk_id); 1035 kfree(card); 1036} 1037 1038static int rsxx_pci_suspend(struct pci_dev *dev, pm_message_t state) 1039{ 1040 /* We don't support suspend at this time. */ 1041 return -ENOSYS; 1042} 1043 1044static void rsxx_pci_shutdown(struct pci_dev *dev) 1045{ 1046 struct rsxx_cardinfo *card = pci_get_drvdata(dev); 1047 unsigned long flags; 1048 int i; 1049 1050 if (!card) 1051 return; 1052 1053 dev_info(CARD_TO_DEV(card), "Shutting down PCI-Flash SSD.\n"); 1054 1055 rsxx_detach_dev(card); 1056 1057 for (i = 0; i < card->n_targets; i++) { 1058 spin_lock_irqsave(&card->irq_lock, flags); 1059 rsxx_disable_ier_and_isr(card, CR_INTR_DMA(i)); 1060 spin_unlock_irqrestore(&card->irq_lock, flags); 1061 } 1062 1063 card_shutdown(card); 1064} 1065 1066static const struct pci_error_handlers rsxx_err_handler = { 1067 .error_detected = rsxx_error_detected, 1068 .slot_reset = rsxx_slot_reset, 1069}; 1070 1071static const struct pci_device_id rsxx_pci_ids[] = { 1072 {PCI_DEVICE(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_FS70_FLASH)}, 1073 {PCI_DEVICE(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_FS80_FLASH)}, 1074 {0,}, 1075}; 1076 1077MODULE_DEVICE_TABLE(pci, rsxx_pci_ids); 1078 1079static struct pci_driver rsxx_pci_driver = { 1080 .name = DRIVER_NAME, 1081 .id_table = rsxx_pci_ids, 1082 .probe = rsxx_pci_probe, 1083 .remove = rsxx_pci_remove, 1084 .suspend = rsxx_pci_suspend, 1085 .shutdown = rsxx_pci_shutdown, 1086 .err_handler = &rsxx_err_handler, 1087}; 1088 1089static int __init rsxx_core_init(void) 1090{ 1091 int st; 1092 1093 st = rsxx_dev_init(); 1094 if (st) 1095 return st; 1096 1097 st = rsxx_dma_init(); 1098 if (st) 1099 goto dma_init_failed; 1100 1101 st = rsxx_creg_init(); 1102 if (st) 1103 goto creg_init_failed; 1104 1105 return pci_register_driver(&rsxx_pci_driver); 1106 1107creg_init_failed: 1108 rsxx_dma_cleanup(); 1109dma_init_failed: 1110 rsxx_dev_cleanup(); 1111 1112 return st; 1113} 1114 1115static void __exit rsxx_core_cleanup(void) 1116{ 1117 pci_unregister_driver(&rsxx_pci_driver); 1118 rsxx_creg_cleanup(); 1119 rsxx_dma_cleanup(); 1120 rsxx_dev_cleanup(); 1121} 1122 1123module_init(rsxx_core_init); 1124module_exit(rsxx_core_cleanup); 1125