1/* 2 * IDE I/O functions 3 * 4 * Basic PIO and command management functionality. 5 * 6 * This code was split off from ide.c. See ide.c for history and original 7 * copyrights. 8 * 9 * This program is free software; you can redistribute it and/or modify it 10 * under the terms of the GNU General Public License as published by the 11 * Free Software Foundation; either version 2, or (at your option) any 12 * later version. 13 * 14 * This program is distributed in the hope that it will be useful, but 15 * WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 17 * General Public License for more details. 18 * 19 * For the avoidance of doubt the "preferred form" of this code is one which 20 * is in an open non patent encumbered format. Where cryptographic key signing 21 * forms part of the process of creating an executable the information 22 * including keys needed to generate an equivalently functional executable 23 * are deemed to be part of the source code. 24 */ 25 26 27#include <linux/module.h> 28#include <linux/types.h> 29#include <linux/string.h> 30#include <linux/kernel.h> 31#include <linux/timer.h> 32#include <linux/mm.h> 33#include <linux/interrupt.h> 34#include <linux/major.h> 35#include <linux/errno.h> 36#include <linux/genhd.h> 37#include <linux/blkpg.h> 38#include <linux/slab.h> 39#include <linux/init.h> 40#include <linux/pci.h> 41#include <linux/delay.h> 42#include <linux/ide.h> 43#include <linux/completion.h> 44#include <linux/reboot.h> 45#include <linux/cdrom.h> 46#include <linux/seq_file.h> 47#include <linux/device.h> 48#include <linux/kmod.h> 49#include <linux/scatterlist.h> 50#include <linux/bitops.h> 51 52#include <asm/byteorder.h> 53#include <asm/irq.h> 54#include <linux/uaccess.h> 55#include <asm/io.h> 56 57int ide_end_rq(ide_drive_t *drive, struct request *rq, blk_status_t error, 58 unsigned int nr_bytes) 59{ 60 /* 61 * decide whether to reenable DMA -- 3 is a random magic for now, 62 * if we DMA timeout more than 3 times, just stay in PIO 63 */ 64 if ((drive->dev_flags & IDE_DFLAG_DMA_PIO_RETRY) && 65 drive->retry_pio <= 3) { 66 drive->dev_flags &= ~IDE_DFLAG_DMA_PIO_RETRY; 67 ide_dma_on(drive); 68 } 69 70 if (!blk_update_request(rq, error, nr_bytes)) { 71 if (rq == drive->sense_rq) { 72 drive->sense_rq = NULL; 73 drive->sense_rq_active = false; 74 } 75 76 __blk_mq_end_request(rq, error); 77 return 0; 78 } 79 80 return 1; 81} 82EXPORT_SYMBOL_GPL(ide_end_rq); 83 84void ide_complete_cmd(ide_drive_t *drive, struct ide_cmd *cmd, u8 stat, u8 err) 85{ 86 const struct ide_tp_ops *tp_ops = drive->hwif->tp_ops; 87 struct ide_taskfile *tf = &cmd->tf; 88 struct request *rq = cmd->rq; 89 u8 tf_cmd = tf->command; 90 91 tf->error = err; 92 tf->status = stat; 93 94 if (cmd->ftf_flags & IDE_FTFLAG_IN_DATA) { 95 u8 data[2]; 96 97 tp_ops->input_data(drive, cmd, data, 2); 98 99 cmd->tf.data = data[0]; 100 cmd->hob.data = data[1]; 101 } 102 103 ide_tf_readback(drive, cmd); 104 105 if ((cmd->tf_flags & IDE_TFLAG_CUSTOM_HANDLER) && 106 tf_cmd == ATA_CMD_IDLEIMMEDIATE) { 107 if (tf->lbal != 0xc4) { 108 printk(KERN_ERR "%s: head unload failed!\n", 109 drive->name); 110 ide_tf_dump(drive->name, cmd); 111 } else 112 drive->dev_flags |= IDE_DFLAG_PARKED; 113 } 114 115 if (rq && ata_taskfile_request(rq)) { 116 struct ide_cmd *orig_cmd = ide_req(rq)->special; 117 118 if (cmd->tf_flags & IDE_TFLAG_DYN) 119 kfree(orig_cmd); 120 else if (cmd != orig_cmd) 121 memcpy(orig_cmd, cmd, sizeof(*cmd)); 122 } 123} 124 125int ide_complete_rq(ide_drive_t *drive, blk_status_t error, unsigned int nr_bytes) 126{ 127 ide_hwif_t *hwif = drive->hwif; 128 struct request *rq = hwif->rq; 129 int rc; 130 131 /* 132 * if failfast is set on a request, override number of sectors 133 * and complete the whole request right now 134 */ 135 if (blk_noretry_request(rq) && error) 136 nr_bytes = blk_rq_sectors(rq) << 9; 137 138 rc = ide_end_rq(drive, rq, error, nr_bytes); 139 if (rc == 0) 140 hwif->rq = NULL; 141 142 return rc; 143} 144EXPORT_SYMBOL(ide_complete_rq); 145 146void ide_kill_rq(ide_drive_t *drive, struct request *rq) 147{ 148 u8 drv_req = ata_misc_request(rq) && rq->rq_disk; 149 u8 media = drive->media; 150 151 drive->failed_pc = NULL; 152 153 if ((media == ide_floppy || media == ide_tape) && drv_req) { 154 scsi_req(rq)->result = 0; 155 } else { 156 if (media == ide_tape) 157 scsi_req(rq)->result = IDE_DRV_ERROR_GENERAL; 158 else if (blk_rq_is_passthrough(rq) && scsi_req(rq)->result == 0) 159 scsi_req(rq)->result = -EIO; 160 } 161 162 ide_complete_rq(drive, BLK_STS_IOERR, blk_rq_bytes(rq)); 163} 164 165static void ide_tf_set_specify_cmd(ide_drive_t *drive, struct ide_taskfile *tf) 166{ 167 tf->nsect = drive->sect; 168 tf->lbal = drive->sect; 169 tf->lbam = drive->cyl; 170 tf->lbah = drive->cyl >> 8; 171 tf->device = (drive->head - 1) | drive->select; 172 tf->command = ATA_CMD_INIT_DEV_PARAMS; 173} 174 175static void ide_tf_set_restore_cmd(ide_drive_t *drive, struct ide_taskfile *tf) 176{ 177 tf->nsect = drive->sect; 178 tf->command = ATA_CMD_RESTORE; 179} 180 181static void ide_tf_set_setmult_cmd(ide_drive_t *drive, struct ide_taskfile *tf) 182{ 183 tf->nsect = drive->mult_req; 184 tf->command = ATA_CMD_SET_MULTI; 185} 186 187/** 188 * do_special - issue some special commands 189 * @drive: drive the command is for 190 * 191 * do_special() is used to issue ATA_CMD_INIT_DEV_PARAMS, 192 * ATA_CMD_RESTORE and ATA_CMD_SET_MULTI commands to a drive. 193 */ 194 195static ide_startstop_t do_special(ide_drive_t *drive) 196{ 197 struct ide_cmd cmd; 198 199#ifdef DEBUG 200 printk(KERN_DEBUG "%s: %s: 0x%02x\n", drive->name, __func__, 201 drive->special_flags); 202#endif 203 if (drive->media != ide_disk) { 204 drive->special_flags = 0; 205 drive->mult_req = 0; 206 return ide_stopped; 207 } 208 209 memset(&cmd, 0, sizeof(cmd)); 210 cmd.protocol = ATA_PROT_NODATA; 211 212 if (drive->special_flags & IDE_SFLAG_SET_GEOMETRY) { 213 drive->special_flags &= ~IDE_SFLAG_SET_GEOMETRY; 214 ide_tf_set_specify_cmd(drive, &cmd.tf); 215 } else if (drive->special_flags & IDE_SFLAG_RECALIBRATE) { 216 drive->special_flags &= ~IDE_SFLAG_RECALIBRATE; 217 ide_tf_set_restore_cmd(drive, &cmd.tf); 218 } else if (drive->special_flags & IDE_SFLAG_SET_MULTMODE) { 219 drive->special_flags &= ~IDE_SFLAG_SET_MULTMODE; 220 ide_tf_set_setmult_cmd(drive, &cmd.tf); 221 } else 222 BUG(); 223 224 cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE; 225 cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE; 226 cmd.tf_flags = IDE_TFLAG_CUSTOM_HANDLER; 227 228 do_rw_taskfile(drive, &cmd); 229 230 return ide_started; 231} 232 233void ide_map_sg(ide_drive_t *drive, struct ide_cmd *cmd) 234{ 235 ide_hwif_t *hwif = drive->hwif; 236 struct scatterlist *sg = hwif->sg_table, *last_sg = NULL; 237 struct request *rq = cmd->rq; 238 239 cmd->sg_nents = __blk_rq_map_sg(drive->queue, rq, sg, &last_sg); 240 if (blk_rq_bytes(rq) && (blk_rq_bytes(rq) & rq->q->dma_pad_mask)) 241 last_sg->length += 242 (rq->q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1; 243} 244EXPORT_SYMBOL_GPL(ide_map_sg); 245 246void ide_init_sg_cmd(struct ide_cmd *cmd, unsigned int nr_bytes) 247{ 248 cmd->nbytes = cmd->nleft = nr_bytes; 249 cmd->cursg_ofs = 0; 250 cmd->cursg = NULL; 251} 252EXPORT_SYMBOL_GPL(ide_init_sg_cmd); 253 254/** 255 * execute_drive_command - issue special drive command 256 * @drive: the drive to issue the command on 257 * @rq: the request structure holding the command 258 * 259 * execute_drive_cmd() issues a special drive command, usually 260 * initiated by ioctl() from the external hdparm program. The 261 * command can be a drive command, drive task or taskfile 262 * operation. Weirdly you can call it with NULL to wait for 263 * all commands to finish. Don't do this as that is due to change 264 */ 265 266static ide_startstop_t execute_drive_cmd (ide_drive_t *drive, 267 struct request *rq) 268{ 269 struct ide_cmd *cmd = ide_req(rq)->special; 270 271 if (cmd) { 272 if (cmd->protocol == ATA_PROT_PIO) { 273 ide_init_sg_cmd(cmd, blk_rq_sectors(rq) << 9); 274 ide_map_sg(drive, cmd); 275 } 276 277 return do_rw_taskfile(drive, cmd); 278 } 279 280 /* 281 * NULL is actually a valid way of waiting for 282 * all current requests to be flushed from the queue. 283 */ 284#ifdef DEBUG 285 printk("%s: DRIVE_CMD (null)\n", drive->name); 286#endif 287 scsi_req(rq)->result = 0; 288 ide_complete_rq(drive, BLK_STS_OK, blk_rq_bytes(rq)); 289 290 return ide_stopped; 291} 292 293static ide_startstop_t ide_special_rq(ide_drive_t *drive, struct request *rq) 294{ 295 u8 cmd = scsi_req(rq)->cmd[0]; 296 297 switch (cmd) { 298 case REQ_PARK_HEADS: 299 case REQ_UNPARK_HEADS: 300 return ide_do_park_unpark(drive, rq); 301 case REQ_DEVSET_EXEC: 302 return ide_do_devset(drive, rq); 303 case REQ_DRIVE_RESET: 304 return ide_do_reset(drive); 305 default: 306 BUG(); 307 } 308} 309 310/** 311 * start_request - start of I/O and command issuing for IDE 312 * 313 * start_request() initiates handling of a new I/O request. It 314 * accepts commands and I/O (read/write) requests. 315 * 316 * FIXME: this function needs a rename 317 */ 318 319static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq) 320{ 321 ide_startstop_t startstop; 322 323#ifdef DEBUG 324 printk("%s: start_request: current=0x%08lx\n", 325 drive->hwif->name, (unsigned long) rq); 326#endif 327 328 /* bail early if we've exceeded max_failures */ 329 if (drive->max_failures && (drive->failures > drive->max_failures)) { 330 rq->rq_flags |= RQF_FAILED; 331 goto kill_rq; 332 } 333 334 if (drive->prep_rq && !drive->prep_rq(drive, rq)) 335 return ide_stopped; 336 337 if (ata_pm_request(rq)) 338 ide_check_pm_state(drive, rq); 339 340 drive->hwif->tp_ops->dev_select(drive); 341 if (ide_wait_stat(&startstop, drive, drive->ready_stat, 342 ATA_BUSY | ATA_DRQ, WAIT_READY)) { 343 printk(KERN_ERR "%s: drive not ready for command\n", drive->name); 344 return startstop; 345 } 346 347 if (drive->special_flags == 0) { 348 struct ide_driver *drv; 349 350 /* 351 * We reset the drive so we need to issue a SETFEATURES. 352 * Do it _after_ do_special() restored device parameters. 353 */ 354 if (drive->current_speed == 0xff) 355 ide_config_drive_speed(drive, drive->desired_speed); 356 357 if (ata_taskfile_request(rq)) 358 return execute_drive_cmd(drive, rq); 359 else if (ata_pm_request(rq)) { 360 struct ide_pm_state *pm = ide_req(rq)->special; 361#ifdef DEBUG_PM 362 printk("%s: start_power_step(step: %d)\n", 363 drive->name, pm->pm_step); 364#endif 365 startstop = ide_start_power_step(drive, rq); 366 if (startstop == ide_stopped && 367 pm->pm_step == IDE_PM_COMPLETED) 368 ide_complete_pm_rq(drive, rq); 369 return startstop; 370 } else if (!rq->rq_disk && ata_misc_request(rq)) 371 /* 372 * TODO: Once all ULDs have been modified to 373 * check for specific op codes rather than 374 * blindly accepting any special request, the 375 * check for ->rq_disk above may be replaced 376 * by a more suitable mechanism or even 377 * dropped entirely. 378 */ 379 return ide_special_rq(drive, rq); 380 381 drv = *(struct ide_driver **)rq->rq_disk->private_data; 382 383 return drv->do_request(drive, rq, blk_rq_pos(rq)); 384 } 385 return do_special(drive); 386kill_rq: 387 ide_kill_rq(drive, rq); 388 return ide_stopped; 389} 390 391/** 392 * ide_stall_queue - pause an IDE device 393 * @drive: drive to stall 394 * @timeout: time to stall for (jiffies) 395 * 396 * ide_stall_queue() can be used by a drive to give excess bandwidth back 397 * to the port by sleeping for timeout jiffies. 398 */ 399 400void ide_stall_queue (ide_drive_t *drive, unsigned long timeout) 401{ 402 if (timeout > WAIT_WORSTCASE) 403 timeout = WAIT_WORSTCASE; 404 drive->sleep = timeout + jiffies; 405 drive->dev_flags |= IDE_DFLAG_SLEEPING; 406} 407EXPORT_SYMBOL(ide_stall_queue); 408 409static inline int ide_lock_port(ide_hwif_t *hwif) 410{ 411 if (hwif->busy) 412 return 1; 413 414 hwif->busy = 1; 415 416 return 0; 417} 418 419static inline void ide_unlock_port(ide_hwif_t *hwif) 420{ 421 hwif->busy = 0; 422} 423 424static inline int ide_lock_host(struct ide_host *host, ide_hwif_t *hwif) 425{ 426 int rc = 0; 427 428 if (host->host_flags & IDE_HFLAG_SERIALIZE) { 429 rc = test_and_set_bit_lock(IDE_HOST_BUSY, &host->host_busy); 430 if (rc == 0) { 431 if (host->get_lock) 432 host->get_lock(ide_intr, hwif); 433 } 434 } 435 return rc; 436} 437 438static inline void ide_unlock_host(struct ide_host *host) 439{ 440 if (host->host_flags & IDE_HFLAG_SERIALIZE) { 441 if (host->release_lock) 442 host->release_lock(); 443 clear_bit_unlock(IDE_HOST_BUSY, &host->host_busy); 444 } 445} 446 447void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq) 448{ 449 struct request_queue *q = drive->queue; 450 451 /* Use 3ms as that was the old plug delay */ 452 if (rq) { 453 blk_mq_requeue_request(rq, false); 454 blk_mq_delay_kick_requeue_list(q, 3); 455 } else 456 blk_mq_delay_run_hw_queue(q->queue_hw_ctx[0], 3); 457} 458 459blk_status_t ide_issue_rq(ide_drive_t *drive, struct request *rq, 460 bool local_requeue) 461{ 462 ide_hwif_t *hwif = drive->hwif; 463 struct ide_host *host = hwif->host; 464 ide_startstop_t startstop; 465 466 if (!blk_rq_is_passthrough(rq) && !(rq->rq_flags & RQF_DONTPREP)) { 467 rq->rq_flags |= RQF_DONTPREP; 468 ide_req(rq)->special = NULL; 469 } 470 471 /* HLD do_request() callback might sleep, make sure it's okay */ 472 might_sleep(); 473 474 if (ide_lock_host(host, hwif)) 475 return BLK_STS_DEV_RESOURCE; 476 477 spin_lock_irq(&hwif->lock); 478 479 if (!ide_lock_port(hwif)) { 480 ide_hwif_t *prev_port; 481 482 WARN_ON_ONCE(hwif->rq); 483repeat: 484 prev_port = hwif->host->cur_port; 485 if (drive->dev_flags & IDE_DFLAG_SLEEPING && 486 time_after(drive->sleep, jiffies)) { 487 ide_unlock_port(hwif); 488 goto plug_device; 489 } 490 491 if ((hwif->host->host_flags & IDE_HFLAG_SERIALIZE) && 492 hwif != prev_port) { 493 ide_drive_t *cur_dev = 494 prev_port ? prev_port->cur_dev : NULL; 495 496 /* 497 * set nIEN for previous port, drives in the 498 * quirk list may not like intr setups/cleanups 499 */ 500 if (cur_dev && 501 (cur_dev->dev_flags & IDE_DFLAG_NIEN_QUIRK) == 0) 502 prev_port->tp_ops->write_devctl(prev_port, 503 ATA_NIEN | 504 ATA_DEVCTL_OBS); 505 506 hwif->host->cur_port = hwif; 507 } 508 hwif->cur_dev = drive; 509 drive->dev_flags &= ~(IDE_DFLAG_SLEEPING | IDE_DFLAG_PARKED); 510 511 /* 512 * Sanity: don't accept a request that isn't a PM request 513 * if we are currently power managed. This is very important as 514 * blk_stop_queue() doesn't prevent the blk_fetch_request() 515 * above to return us whatever is in the queue. Since we call 516 * ide_do_request() ourselves, we end up taking requests while 517 * the queue is blocked... 518 */ 519 if ((drive->dev_flags & IDE_DFLAG_BLOCKED) && 520 ata_pm_request(rq) == 0 && 521 (rq->rq_flags & RQF_PM) == 0) { 522 /* there should be no pending command at this point */ 523 ide_unlock_port(hwif); 524 goto plug_device; 525 } 526 527 scsi_req(rq)->resid_len = blk_rq_bytes(rq); 528 hwif->rq = rq; 529 530 spin_unlock_irq(&hwif->lock); 531 startstop = start_request(drive, rq); 532 spin_lock_irq(&hwif->lock); 533 534 if (startstop == ide_stopped) { 535 rq = hwif->rq; 536 hwif->rq = NULL; 537 if (rq) 538 goto repeat; 539 ide_unlock_port(hwif); 540 goto out; 541 } 542 } else { 543plug_device: 544 if (local_requeue) 545 list_add(&rq->queuelist, &drive->rq_list); 546 spin_unlock_irq(&hwif->lock); 547 ide_unlock_host(host); 548 if (!local_requeue) 549 ide_requeue_and_plug(drive, rq); 550 return BLK_STS_OK; 551 } 552 553out: 554 spin_unlock_irq(&hwif->lock); 555 if (rq == NULL) 556 ide_unlock_host(host); 557 return BLK_STS_OK; 558} 559 560/* 561 * Issue a new request to a device. 562 */ 563blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *hctx, 564 const struct blk_mq_queue_data *bd) 565{ 566 ide_drive_t *drive = hctx->queue->queuedata; 567 ide_hwif_t *hwif = drive->hwif; 568 569 spin_lock_irq(&hwif->lock); 570 if (drive->sense_rq_active) { 571 spin_unlock_irq(&hwif->lock); 572 return BLK_STS_DEV_RESOURCE; 573 } 574 spin_unlock_irq(&hwif->lock); 575 576 blk_mq_start_request(bd->rq); 577 return ide_issue_rq(drive, bd->rq, false); 578} 579 580static int drive_is_ready(ide_drive_t *drive) 581{ 582 ide_hwif_t *hwif = drive->hwif; 583 u8 stat = 0; 584 585 if (drive->waiting_for_dma) 586 return hwif->dma_ops->dma_test_irq(drive); 587 588 if (hwif->io_ports.ctl_addr && 589 (hwif->host_flags & IDE_HFLAG_BROKEN_ALTSTATUS) == 0) 590 stat = hwif->tp_ops->read_altstatus(hwif); 591 else 592 /* Note: this may clear a pending IRQ!! */ 593 stat = hwif->tp_ops->read_status(hwif); 594 595 if (stat & ATA_BUSY) 596 /* drive busy: definitely not interrupting */ 597 return 0; 598 599 /* drive ready: *might* be interrupting */ 600 return 1; 601} 602 603/** 604 * ide_timer_expiry - handle lack of an IDE interrupt 605 * @data: timer callback magic (hwif) 606 * 607 * An IDE command has timed out before the expected drive return 608 * occurred. At this point we attempt to clean up the current 609 * mess. If the current handler includes an expiry handler then 610 * we invoke the expiry handler, and providing it is happy the 611 * work is done. If that fails we apply generic recovery rules 612 * invoking the handler and checking the drive DMA status. We 613 * have an excessively incestuous relationship with the DMA 614 * logic that wants cleaning up. 615 */ 616 617void ide_timer_expiry (struct timer_list *t) 618{ 619 ide_hwif_t *hwif = from_timer(hwif, t, timer); 620 ide_drive_t *drive; 621 ide_handler_t *handler; 622 unsigned long flags; 623 int wait = -1; 624 int plug_device = 0; 625 struct request *rq_in_flight; 626 627 spin_lock_irqsave(&hwif->lock, flags); 628 629 handler = hwif->handler; 630 631 if (handler == NULL || hwif->req_gen != hwif->req_gen_timer) { 632 /* 633 * Either a marginal timeout occurred 634 * (got the interrupt just as timer expired), 635 * or we were "sleeping" to give other devices a chance. 636 * Either way, we don't really want to complain about anything. 637 */ 638 } else { 639 ide_expiry_t *expiry = hwif->expiry; 640 ide_startstop_t startstop = ide_stopped; 641 642 drive = hwif->cur_dev; 643 644 if (expiry) { 645 wait = expiry(drive); 646 if (wait > 0) { /* continue */ 647 /* reset timer */ 648 hwif->timer.expires = jiffies + wait; 649 hwif->req_gen_timer = hwif->req_gen; 650 add_timer(&hwif->timer); 651 spin_unlock_irqrestore(&hwif->lock, flags); 652 return; 653 } 654 } 655 hwif->handler = NULL; 656 hwif->expiry = NULL; 657 /* 658 * We need to simulate a real interrupt when invoking 659 * the handler() function, which means we need to 660 * globally mask the specific IRQ: 661 */ 662 spin_unlock(&hwif->lock); 663 /* disable_irq_nosync ?? */ 664 disable_irq(hwif->irq); 665 666 if (hwif->polling) { 667 startstop = handler(drive); 668 } else if (drive_is_ready(drive)) { 669 if (drive->waiting_for_dma) 670 hwif->dma_ops->dma_lost_irq(drive); 671 if (hwif->port_ops && hwif->port_ops->clear_irq) 672 hwif->port_ops->clear_irq(drive); 673 674 printk(KERN_WARNING "%s: lost interrupt\n", 675 drive->name); 676 startstop = handler(drive); 677 } else { 678 if (drive->waiting_for_dma) 679 startstop = ide_dma_timeout_retry(drive, wait); 680 else 681 startstop = ide_error(drive, "irq timeout", 682 hwif->tp_ops->read_status(hwif)); 683 } 684 /* Disable interrupts again, `handler' might have enabled it */ 685 spin_lock_irq(&hwif->lock); 686 enable_irq(hwif->irq); 687 if (startstop == ide_stopped && hwif->polling == 0) { 688 rq_in_flight = hwif->rq; 689 hwif->rq = NULL; 690 ide_unlock_port(hwif); 691 plug_device = 1; 692 } 693 } 694 spin_unlock_irqrestore(&hwif->lock, flags); 695 696 if (plug_device) { 697 ide_unlock_host(hwif->host); 698 ide_requeue_and_plug(drive, rq_in_flight); 699 } 700} 701 702/** 703 * unexpected_intr - handle an unexpected IDE interrupt 704 * @irq: interrupt line 705 * @hwif: port being processed 706 * 707 * There's nothing really useful we can do with an unexpected interrupt, 708 * other than reading the status register (to clear it), and logging it. 709 * There should be no way that an irq can happen before we're ready for it, 710 * so we needn't worry much about losing an "important" interrupt here. 711 * 712 * On laptops (and "green" PCs), an unexpected interrupt occurs whenever 713 * the drive enters "idle", "standby", or "sleep" mode, so if the status 714 * looks "good", we just ignore the interrupt completely. 715 * 716 * This routine assumes __cli() is in effect when called. 717 * 718 * If an unexpected interrupt happens on irq15 while we are handling irq14 719 * and if the two interfaces are "serialized" (CMD640), then it looks like 720 * we could screw up by interfering with a new request being set up for 721 * irq15. 722 * 723 * In reality, this is a non-issue. The new command is not sent unless 724 * the drive is ready to accept one, in which case we know the drive is 725 * not trying to interrupt us. And ide_set_handler() is always invoked 726 * before completing the issuance of any new drive command, so we will not 727 * be accidentally invoked as a result of any valid command completion 728 * interrupt. 729 */ 730 731static void unexpected_intr(int irq, ide_hwif_t *hwif) 732{ 733 u8 stat = hwif->tp_ops->read_status(hwif); 734 735 if (!OK_STAT(stat, ATA_DRDY, BAD_STAT)) { 736 /* Try to not flood the console with msgs */ 737 static unsigned long last_msgtime, count; 738 ++count; 739 740 if (time_after(jiffies, last_msgtime + HZ)) { 741 last_msgtime = jiffies; 742 printk(KERN_ERR "%s: unexpected interrupt, " 743 "status=0x%02x, count=%ld\n", 744 hwif->name, stat, count); 745 } 746 } 747} 748 749/** 750 * ide_intr - default IDE interrupt handler 751 * @irq: interrupt number 752 * @dev_id: hwif 753 * @regs: unused weirdness from the kernel irq layer 754 * 755 * This is the default IRQ handler for the IDE layer. You should 756 * not need to override it. If you do be aware it is subtle in 757 * places 758 * 759 * hwif is the interface in the group currently performing 760 * a command. hwif->cur_dev is the drive and hwif->handler is 761 * the IRQ handler to call. As we issue a command the handlers 762 * step through multiple states, reassigning the handler to the 763 * next step in the process. Unlike a smart SCSI controller IDE 764 * expects the main processor to sequence the various transfer 765 * stages. We also manage a poll timer to catch up with most 766 * timeout situations. There are still a few where the handlers 767 * don't ever decide to give up. 768 * 769 * The handler eventually returns ide_stopped to indicate the 770 * request completed. At this point we issue the next request 771 * on the port and the process begins again. 772 */ 773 774irqreturn_t ide_intr (int irq, void *dev_id) 775{ 776 ide_hwif_t *hwif = (ide_hwif_t *)dev_id; 777 struct ide_host *host = hwif->host; 778 ide_drive_t *drive; 779 ide_handler_t *handler; 780 unsigned long flags; 781 ide_startstop_t startstop; 782 irqreturn_t irq_ret = IRQ_NONE; 783 int plug_device = 0; 784 struct request *rq_in_flight; 785 786 if (host->host_flags & IDE_HFLAG_SERIALIZE) { 787 if (hwif != host->cur_port) 788 goto out_early; 789 } 790 791 spin_lock_irqsave(&hwif->lock, flags); 792 793 if (hwif->port_ops && hwif->port_ops->test_irq && 794 hwif->port_ops->test_irq(hwif) == 0) 795 goto out; 796 797 handler = hwif->handler; 798 799 if (handler == NULL || hwif->polling) { 800 /* 801 * Not expecting an interrupt from this drive. 802 * That means this could be: 803 * (1) an interrupt from another PCI device 804 * sharing the same PCI INT# as us. 805 * or (2) a drive just entered sleep or standby mode, 806 * and is interrupting to let us know. 807 * or (3) a spurious interrupt of unknown origin. 808 * 809 * For PCI, we cannot tell the difference, 810 * so in that case we just ignore it and hope it goes away. 811 */ 812 if ((host->irq_flags & IRQF_SHARED) == 0) { 813 /* 814 * Probably not a shared PCI interrupt, 815 * so we can safely try to do something about it: 816 */ 817 unexpected_intr(irq, hwif); 818 } else { 819 /* 820 * Whack the status register, just in case 821 * we have a leftover pending IRQ. 822 */ 823 (void)hwif->tp_ops->read_status(hwif); 824 } 825 goto out; 826 } 827 828 drive = hwif->cur_dev; 829 830 if (!drive_is_ready(drive)) 831 /* 832 * This happens regularly when we share a PCI IRQ with 833 * another device. Unfortunately, it can also happen 834 * with some buggy drives that trigger the IRQ before 835 * their status register is up to date. Hopefully we have 836 * enough advance overhead that the latter isn't a problem. 837 */ 838 goto out; 839 840 hwif->handler = NULL; 841 hwif->expiry = NULL; 842 hwif->req_gen++; 843 del_timer(&hwif->timer); 844 spin_unlock(&hwif->lock); 845 846 if (hwif->port_ops && hwif->port_ops->clear_irq) 847 hwif->port_ops->clear_irq(drive); 848 849 if (drive->dev_flags & IDE_DFLAG_UNMASK) 850 local_irq_enable_in_hardirq(); 851 852 /* service this interrupt, may set handler for next interrupt */ 853 startstop = handler(drive); 854 855 spin_lock_irq(&hwif->lock); 856 /* 857 * Note that handler() may have set things up for another 858 * interrupt to occur soon, but it cannot happen until 859 * we exit from this routine, because it will be the 860 * same irq as is currently being serviced here, and Linux 861 * won't allow another of the same (on any CPU) until we return. 862 */ 863 if (startstop == ide_stopped && hwif->polling == 0) { 864 BUG_ON(hwif->handler); 865 rq_in_flight = hwif->rq; 866 hwif->rq = NULL; 867 ide_unlock_port(hwif); 868 plug_device = 1; 869 } 870 irq_ret = IRQ_HANDLED; 871out: 872 spin_unlock_irqrestore(&hwif->lock, flags); 873out_early: 874 if (plug_device) { 875 ide_unlock_host(hwif->host); 876 ide_requeue_and_plug(drive, rq_in_flight); 877 } 878 879 return irq_ret; 880} 881EXPORT_SYMBOL_GPL(ide_intr); 882 883void ide_pad_transfer(ide_drive_t *drive, int write, int len) 884{ 885 ide_hwif_t *hwif = drive->hwif; 886 u8 buf[4] = { 0 }; 887 888 while (len > 0) { 889 if (write) 890 hwif->tp_ops->output_data(drive, NULL, buf, min(4, len)); 891 else 892 hwif->tp_ops->input_data(drive, NULL, buf, min(4, len)); 893 len -= 4; 894 } 895} 896EXPORT_SYMBOL_GPL(ide_pad_transfer); 897 898void ide_insert_request_head(ide_drive_t *drive, struct request *rq) 899{ 900 drive->sense_rq_active = true; 901 list_add_tail(&rq->queuelist, &drive->rq_list); 902 kblockd_schedule_work(&drive->rq_work); 903} 904EXPORT_SYMBOL_GPL(ide_insert_request_head); 905