Lines Matching refs:esp
33 #define DRV_MODULE_NAME "esp"
58 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
63 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
68 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
73 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
78 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
83 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
88 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
93 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
98 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
103 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
108 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
113 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
116 #define esp_read8(REG) esp->ops->esp_read8(esp, REG)
117 #define esp_write8(VAL,REG) esp->ops->esp_write8(esp, VAL, REG)
119 static void esp_log_fill_regs(struct esp *esp,
122 p->sreg = esp->sreg;
123 p->seqreg = esp->seqreg;
124 p->sreg2 = esp->sreg2;
125 p->ireg = esp->ireg;
126 p->select_state = esp->select_state;
127 p->event = esp->event;
130 void scsi_esp_cmd(struct esp *esp, u8 val)
133 int idx = esp->esp_event_cur;
135 p = &esp->esp_event_log[idx];
138 esp_log_fill_regs(esp, p);
140 esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
147 static void esp_send_dma_cmd(struct esp *esp, int len, int max_len, int cmd)
149 if (esp->flags & ESP_FLAG_USE_FIFO) {
152 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
154 esp_write8(esp->command_block[i], ESP_FDATA);
155 scsi_esp_cmd(esp, cmd);
157 if (esp->rev == FASHME)
158 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
160 esp->ops->send_dma_cmd(esp, esp->command_block_dma,
165 static void esp_event(struct esp *esp, u8 val)
168 int idx = esp->esp_event_cur;
170 p = &esp->esp_event_log[idx];
173 esp_log_fill_regs(esp, p);
175 esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
177 esp->event = val;
180 static void esp_dump_cmd_log(struct esp *esp)
182 int idx = esp->esp_event_cur;
185 shost_printk(KERN_INFO, esp->host, "Dumping command log\n");
187 struct esp_event_ent *p = &esp->esp_event_log[idx];
189 shost_printk(KERN_INFO, esp->host,
201 static void esp_flush_fifo(struct esp *esp)
203 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
204 if (esp->rev == ESP236) {
209 shost_printk(KERN_ALERT, esp->host,
218 static void hme_read_fifo(struct esp *esp)
224 esp->fifo[idx++] = esp_read8(ESP_FDATA);
225 esp->fifo[idx++] = esp_read8(ESP_FDATA);
227 if (esp->sreg2 & ESP_STAT2_F1BYTE) {
229 esp->fifo[idx++] = esp_read8(ESP_FDATA);
230 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
232 esp->fifo_cnt = idx;
235 static void esp_set_all_config3(struct esp *esp, u8 val)
240 esp->target[i].esp_config3 = val;
244 static void esp_reset_esp(struct esp *esp)
247 scsi_esp_cmd(esp, ESP_CMD_RC);
248 scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
249 if (esp->rev == FAST)
251 scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
256 esp->max_period = ((35 * esp->ccycle) / 1000);
257 if (esp->rev == FAST) {
261 esp->rev = FAS236;
263 esp->rev = FASHME; /* Version is usually '5'. */
265 esp->rev = FSC;
269 esp->rev = FAS100A;
271 esp->min_period = ((4 * esp->ccycle) / 1000);
273 esp->min_period = ((5 * esp->ccycle) / 1000);
275 if (esp->rev == FAS236) {
284 esp->rev = PCSCSI;
285 esp_write8(esp->config4, ESP_CFG4);
288 esp->max_period = (esp->max_period + 3)>>2;
289 esp->min_period = (esp->min_period + 3)>>2;
291 esp_write8(esp->config1, ESP_CFG1);
292 switch (esp->rev) {
298 esp_write8(esp->config2, ESP_CFG2);
303 esp_write8(esp->config2, ESP_CFG2);
304 esp->prev_cfg3 = esp->target[0].esp_config3;
305 esp_write8(esp->prev_cfg3, ESP_CFG3);
309 esp->config2 |= (ESP_CONFIG2_HME32 | ESP_CONFIG2_HMEFENAB);
315 esp_write8(esp->config2, ESP_CFG2);
316 if (esp->rev == FASHME) {
317 u8 cfg3 = esp->target[0].esp_config3;
320 if (esp->scsi_id >= 8)
322 esp_set_all_config3(esp, cfg3);
324 u32 cfg3 = esp->target[0].esp_config3;
327 esp_set_all_config3(esp, cfg3);
329 esp->prev_cfg3 = esp->target[0].esp_config3;
330 esp_write8(esp->prev_cfg3, ESP_CFG3);
331 if (esp->rev == FASHME) {
332 esp->radelay = 80;
334 if (esp->flags & ESP_FLAG_DIFFERENTIAL)
335 esp->radelay = 0;
337 esp->radelay = 96;
343 esp_write8(esp->config2, ESP_CFG2);
344 esp_set_all_config3(esp,
345 (esp->target[0].esp_config3 |
347 esp->prev_cfg3 = esp->target[0].esp_config3;
348 esp_write8(esp->prev_cfg3, ESP_CFG3);
349 esp->radelay = 32;
357 esp_write8(esp->cfact, ESP_CFACT);
359 esp->prev_stp = 0;
360 esp_write8(esp->prev_stp, ESP_STP);
362 esp->prev_soff = 0;
363 esp_write8(esp->prev_soff, ESP_SOFF);
365 esp_write8(esp->neg_defp, ESP_TIMEO);
372 static void esp_map_dma(struct esp *esp, struct scsi_cmnd *cmd)
382 if (esp->flags & ESP_FLAG_NO_DMA_MAP) {
431 static void esp_advance_dma(struct esp *esp, struct esp_cmd_entry *ent,
444 shost_printk(KERN_ERR, esp->host,
446 shost_printk(KERN_ERR, esp->host,
459 static void esp_unmap_dma(struct esp *esp, struct scsi_cmnd *cmd)
461 if (!(esp->flags & ESP_FLAG_NO_DMA_MAP))
465 static void esp_save_pointers(struct esp *esp, struct esp_cmd_entry *ent)
480 static void esp_restore_pointers(struct esp *esp, struct esp_cmd_entry *ent)
495 static void esp_write_tgt_config3(struct esp *esp, int tgt)
497 if (esp->rev > ESP100A) {
498 u8 val = esp->target[tgt].esp_config3;
500 if (val != esp->prev_cfg3) {
501 esp->prev_cfg3 = val;
507 static void esp_write_tgt_sync(struct esp *esp, int tgt)
509 u8 off = esp->target[tgt].esp_offset;
510 u8 per = esp->target[tgt].esp_period;
512 if (off != esp->prev_soff) {
513 esp->prev_soff = off;
516 if (per != esp->prev_stp) {
517 esp->prev_stp = per;
522 static u32 esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len)
524 if (esp->rev == FASHME) {
630 static void esp_map_sense(struct esp *esp, struct esp_cmd_entry *ent)
633 if (esp->flags & ESP_FLAG_NO_DMA_MAP) {
638 ent->sense_dma = dma_map_single(esp->dev, ent->sense_ptr,
642 static void esp_unmap_sense(struct esp *esp, struct esp_cmd_entry *ent)
644 if (!(esp->flags & ESP_FLAG_NO_DMA_MAP))
645 dma_unmap_single(esp->dev, ent->sense_dma,
657 static void esp_autosense(struct esp *esp, struct esp_cmd_entry *ent)
671 esp_map_sense(esp, ent);
675 esp->active_cmd = ent;
677 p = esp->command_block;
678 esp->msg_out_len = 0;
689 esp->select_state = ESP_SELECT_BASIC;
692 if (esp->rev == FASHME)
696 esp_write_tgt_sync(esp, tgt);
697 esp_write_tgt_config3(esp, tgt);
699 val = (p - esp->command_block);
701 esp_send_dma_cmd(esp, val, 16, ESP_CMD_SELA);
704 static struct esp_cmd_entry *find_and_prep_issuable_command(struct esp *esp)
708 list_for_each_entry(ent, &esp->queued_cmds, list) {
735 static void esp_maybe_execute_command(struct esp *esp)
746 if (esp->active_cmd ||
747 (esp->flags & ESP_FLAG_RESETTING))
750 ent = find_and_prep_issuable_command(esp);
755 esp_autosense(esp, ent);
763 tp = &esp->target[tgt];
765 list_move(&ent->list, &esp->active_cmds);
767 esp->active_cmd = ent;
769 esp_map_dma(esp, cmd);
770 esp_save_pointers(esp, ent);
775 p = esp->command_block;
777 esp->msg_out_len = 0;
798 if (esp->rev == FASHME && esp_need_to_nego_wide(tp)) {
799 esp->msg_out_len =
800 spi_populate_width_msg(&esp->msg_out[0],
805 esp->msg_out_len =
806 spi_populate_sync_msg(&esp->msg_out[0],
815 if (esp->msg_out_len)
822 if (ent->tag[0] && esp->rev == ESP100) {
830 esp->cmd_bytes_left = cmd->cmd_len;
831 esp->cmd_bytes_ptr = &cmd->cmnd[0];
834 for (i = esp->msg_out_len - 1;
836 esp->msg_out[i + 2] = esp->msg_out[i];
837 esp->msg_out[0] = ent->tag[0];
838 esp->msg_out[1] = ent->tag[1];
839 esp->msg_out_len += 2;
843 esp->select_state = ESP_SELECT_MSGOUT;
856 esp->select_state = ESP_SELECT_BASIC;
859 if (esp->rev == FASHME)
863 esp_write_tgt_sync(esp, tgt);
864 esp_write_tgt_config3(esp, tgt);
866 val = (p - esp->command_block);
875 esp_send_dma_cmd(esp, val, 16, start_cmd);
878 static struct esp_cmd_entry *esp_get_ent(struct esp *esp)
880 struct list_head *head = &esp->esp_cmd_pool;
893 static void esp_put_ent(struct esp *esp, struct esp_cmd_entry *ent)
895 list_add(&ent->list, &esp->esp_cmd_pool);
898 static void esp_cmd_is_done(struct esp *esp, struct esp_cmd_entry *ent,
905 esp->active_cmd = NULL;
906 esp_unmap_dma(esp, cmd);
916 esp_unmap_sense(esp, ent);
931 printk("esp%d: tgt[%d] lun[%d] AUTO SENSE[ ",
932 esp->host->unique_id, tgt, lun);
942 esp_put_ent(esp, ent);
944 esp_maybe_execute_command(esp);
953 static void esp_event_queue_full(struct esp *esp, struct esp_cmd_entry *ent)
964 struct esp *esp = shost_priv(dev->host);
968 ent = esp_get_ent(esp);
979 list_add_tail(&ent->list, &esp->queued_cmds);
981 esp_maybe_execute_command(esp);
988 static int esp_check_gross_error(struct esp *esp)
990 if (esp->sreg & ESP_STAT_SPAM) {
997 shost_printk(KERN_ERR, esp->host,
998 "Gross error sreg[%02x]\n", esp->sreg);
1005 static int esp_check_spur_intr(struct esp *esp)
1007 switch (esp->rev) {
1013 esp->sreg &= ~ESP_STAT_INTR;
1017 if (!(esp->sreg & ESP_STAT_INTR)) {
1018 if (esp->ireg & ESP_INTR_SR)
1024 if (!esp->ops->dma_error(esp)) {
1025 shost_printk(KERN_ERR, esp->host,
1027 esp->sreg);
1031 shost_printk(KERN_ERR, esp->host, "DMA error\n");
1042 static void esp_schedule_reset(struct esp *esp)
1046 esp->flags |= ESP_FLAG_RESETTING;
1047 esp_event(esp, ESP_EVENT_RESET);
1054 static struct esp_cmd_entry *esp_reconnect_with_tag(struct esp *esp,
1061 shost_printk(KERN_ERR, esp->host,
1069 if (esp->ops->irq_pending(esp))
1073 shost_printk(KERN_ERR, esp->host,
1078 esp->sreg = esp_read8(ESP_STATUS);
1079 esp->ireg = esp_read8(ESP_INTRPT);
1082 i, esp->ireg, esp->sreg);
1084 if (esp->ireg & ESP_INTR_DC) {
1085 shost_printk(KERN_ERR, esp->host,
1090 if ((esp->sreg & ESP_STAT_PMASK) != ESP_MIP) {
1091 shost_printk(KERN_ERR, esp->host,
1092 "Reconnect, not MIP sreg[%02x].\n", esp->sreg);
1097 esp->command_block[0] = 0xff;
1098 esp->command_block[1] = 0xff;
1099 esp->ops->send_dma_cmd(esp, esp->command_block_dma,
1103 scsi_esp_cmd(esp, ESP_CMD_MOK);
1106 if (esp->ops->irq_pending(esp)) {
1107 esp->sreg = esp_read8(ESP_STATUS);
1108 esp->ireg = esp_read8(ESP_INTRPT);
1109 if (esp->ireg & ESP_INTR_FDONE)
1115 shost_printk(KERN_ERR, esp->host, "Reconnect IRQ2 timeout\n");
1118 esp->ops->dma_drain(esp);
1119 esp->ops->dma_invalidate(esp);
1122 i, esp->ireg, esp->sreg,
1123 esp->command_block[0],
1124 esp->command_block[1]);
1126 if (esp->command_block[0] < SIMPLE_QUEUE_TAG ||
1127 esp->command_block[0] > ORDERED_QUEUE_TAG) {
1128 shost_printk(KERN_ERR, esp->host,
1130 esp->command_block[0]);
1134 ent = lp->tagged_cmds[esp->command_block[1]];
1136 shost_printk(KERN_ERR, esp->host,
1138 esp->command_block[1]);
1145 static int esp_reconnect(struct esp *esp)
1153 BUG_ON(esp->active_cmd);
1154 if (esp->rev == FASHME) {
1158 target = esp->fifo[0];
1159 lun = esp->fifo[1] & 0x7;
1169 if (!(bits & esp->scsi_id_mask))
1171 bits &= ~esp->scsi_id_mask;
1178 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1179 if (esp->rev == ESP100) {
1189 scsi_esp_cmd(esp, ESP_CMD_NULL);
1192 esp_write_tgt_sync(esp, target);
1193 esp_write_tgt_config3(esp, target);
1195 scsi_esp_cmd(esp, ESP_CMD_MOK);
1197 if (esp->rev == FASHME)
1201 tp = &esp->target[target];
1204 shost_printk(KERN_ERR, esp->host,
1213 ent = esp_reconnect_with_tag(esp, lp);
1218 esp->active_cmd = ent;
1220 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1221 esp_restore_pointers(esp, ent);
1222 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1226 esp_schedule_reset(esp);
1230 static int esp_finish_select(struct esp *esp)
1236 esp->select_state = ESP_SELECT_NONE;
1238 esp->seqreg = esp_read8(ESP_SSTEP) & ESP_STEP_VBITS;
1239 ent = esp->active_cmd;
1242 if (esp->ops->dma_error(esp)) {
1246 esp_schedule_reset(esp);
1247 esp_cmd_is_done(esp, ent, cmd, (DID_ERROR << 16));
1251 esp->ops->dma_invalidate(esp);
1253 if (esp->ireg == (ESP_INTR_RSEL | ESP_INTR_FDONE)) {
1254 struct esp_target_data *tp = &esp->target[cmd->device->id];
1261 esp_unmap_dma(esp, cmd);
1264 esp->cmd_bytes_ptr = NULL;
1265 esp->cmd_bytes_left = 0;
1267 esp_unmap_sense(esp, ent);
1273 list_move(&ent->list, &esp->queued_cmds);
1274 esp->active_cmd = NULL;
1282 if (esp->ireg == ESP_INTR_DC) {
1289 esp->target[dev->id].flags |= ESP_TGT_CHECK_NEGO;
1291 scsi_esp_cmd(esp, ESP_CMD_ESEL);
1292 esp_cmd_is_done(esp, ent, cmd, (DID_BAD_TARGET << 16));
1296 if (esp->ireg == (ESP_INTR_FDONE | ESP_INTR_BSERV)) {
1300 if (esp->rev <= ESP236) {
1303 scsi_esp_cmd(esp, ESP_CMD_NULL);
1306 (!esp->prev_soff ||
1307 ((esp->sreg & ESP_STAT_PMASK) != ESP_DIP)))
1308 esp_flush_fifo(esp);
1314 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1318 shost_printk(KERN_INFO, esp->host,
1319 "Unexpected selection completion ireg[%x]\n", esp->ireg);
1320 esp_schedule_reset(esp);
1324 static int esp_data_bytes_sent(struct esp *esp, struct esp_cmd_entry *ent,
1330 if (esp->prev_cfg3 & ESP_CONFIG3_EWIDE)
1334 if (!(esp->sreg & ESP_STAT_TCNT)) {
1337 if (esp->rev == FASHME)
1339 if (esp->rev == PCSCSI && (esp->config2 & ESP_CONFIG2_FENAB))
1343 bytes_sent = esp->data_dma_len;
1345 bytes_sent -= esp->send_cmd_residual;
1380 if (!esp->prev_soff) {
1384 if (esp->rev == ESP100) {
1398 esp->sreg = esp_read8(ESP_STATUS);
1399 phase = esp->sreg & ESP_STAT_PMASK;
1413 esp_flush_fifo(esp);
1418 static void esp_setsync(struct esp *esp, struct esp_target_data *tp,
1428 esp_soff |= esp->radelay;
1429 if (esp->rev >= FAS236) {
1431 if (esp->rev >= FAS100A)
1435 if (esp->rev == FASHME)
1436 esp_soff &= ~esp->radelay;
1441 esp->prev_cfg3 = tp->esp_config3;
1442 esp_write8(esp->prev_cfg3, ESP_CFG3);
1446 tp->esp_period = esp->prev_stp = esp_stp;
1447 tp->esp_offset = esp->prev_soff = esp_soff;
1457 static void esp_msgin_reject(struct esp *esp)
1459 struct esp_cmd_entry *ent = esp->active_cmd;
1465 tp = &esp->target[tgt];
1472 scsi_esp_cmd(esp, ESP_CMD_RATN);
1474 esp->msg_out_len =
1475 spi_populate_sync_msg(&esp->msg_out[0],
1479 scsi_esp_cmd(esp, ESP_CMD_SATN);
1488 esp_setsync(esp, tp, 0, 0, 0, 0);
1489 scsi_esp_cmd(esp, ESP_CMD_RATN);
1493 shost_printk(KERN_INFO, esp->host, "Unexpected MESSAGE REJECT\n");
1494 esp_schedule_reset(esp);
1497 static void esp_msgin_sdtr(struct esp *esp, struct esp_target_data *tp)
1499 u8 period = esp->msg_in[3];
1500 u8 offset = esp->msg_in[4];
1512 if (period > esp->max_period) {
1516 if (period < esp->min_period)
1519 one_clock = esp->ccycle / 1000;
1521 if (stp && esp->rev >= FAS236) {
1529 esp_setsync(esp, tp, period, offset, stp, offset);
1533 esp->msg_out[0] = MESSAGE_REJECT;
1534 esp->msg_out_len = 1;
1535 scsi_esp_cmd(esp, ESP_CMD_SATN);
1541 esp->msg_out_len =
1542 spi_populate_sync_msg(&esp->msg_out[0],
1545 scsi_esp_cmd(esp, ESP_CMD_SATN);
1548 static void esp_msgin_wdtr(struct esp *esp, struct esp_target_data *tp)
1550 int size = 8 << esp->msg_in[3];
1553 if (esp->rev != FASHME)
1571 esp->prev_cfg3 = cfg3;
1580 scsi_esp_cmd(esp, ESP_CMD_RATN);
1582 esp->msg_out_len =
1583 spi_populate_sync_msg(&esp->msg_out[0],
1587 scsi_esp_cmd(esp, ESP_CMD_SATN);
1592 esp->msg_out[0] = MESSAGE_REJECT;
1593 esp->msg_out_len = 1;
1594 scsi_esp_cmd(esp, ESP_CMD_SATN);
1597 static void esp_msgin_extended(struct esp *esp)
1599 struct esp_cmd_entry *ent = esp->active_cmd;
1604 tp = &esp->target[tgt];
1605 if (esp->msg_in[2] == EXTENDED_SDTR) {
1606 esp_msgin_sdtr(esp, tp);
1609 if (esp->msg_in[2] == EXTENDED_WDTR) {
1610 esp_msgin_wdtr(esp, tp);
1614 shost_printk(KERN_INFO, esp->host,
1615 "Unexpected extended msg type %x\n", esp->msg_in[2]);
1617 esp->msg_out[0] = MESSAGE_REJECT;
1618 esp->msg_out_len = 1;
1619 scsi_esp_cmd(esp, ESP_CMD_SATN);
1625 static int esp_msgin_process(struct esp *esp)
1627 u8 msg0 = esp->msg_in[0];
1628 int len = esp->msg_in_len;
1632 shost_printk(KERN_INFO, esp->host,
1641 if (len < esp->msg_in[1] + 2)
1643 esp_msgin_extended(esp);
1652 if (esp->msg_in[1] != 1)
1655 ent = esp->active_cmd;
1669 esp_restore_pointers(esp, esp->active_cmd);
1672 esp_save_pointers(esp, esp->active_cmd);
1677 struct esp_cmd_entry *ent = esp->active_cmd;
1680 esp_event(esp, ESP_EVENT_FREE_BUS);
1681 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1685 esp_msgin_reject(esp);
1690 esp->msg_out[0] = MESSAGE_REJECT;
1691 esp->msg_out_len = 1;
1692 scsi_esp_cmd(esp, ESP_CMD_SATN);
1697 static int esp_process_event(struct esp *esp)
1704 esp->event, esp->sreg & ESP_STAT_PMASK);
1705 switch (esp->event) {
1707 switch (esp->sreg & ESP_STAT_PMASK) {
1709 esp_event(esp, ESP_EVENT_DATA_OUT);
1712 esp_event(esp, ESP_EVENT_DATA_IN);
1715 esp_flush_fifo(esp);
1716 scsi_esp_cmd(esp, ESP_CMD_ICCSEQ);
1717 esp_event(esp, ESP_EVENT_STATUS);
1718 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1722 esp_event(esp, ESP_EVENT_MSGOUT);
1726 esp_event(esp, ESP_EVENT_MSGIN);
1730 esp_event(esp, ESP_EVENT_CMD_START);
1734 shost_printk(KERN_INFO, esp->host,
1736 esp->sreg);
1737 esp_schedule_reset(esp);
1747 struct esp_cmd_entry *ent = esp->active_cmd;
1752 if (esp->rev == ESP100)
1753 scsi_esp_cmd(esp, ESP_CMD_NULL);
1760 if (esp->ops->dma_length_limit)
1761 dma_len = esp->ops->dma_length_limit(esp, dma_addr,
1764 dma_len = esp_dma_length_limit(esp, dma_addr, dma_len);
1766 esp->data_dma_len = dma_len;
1769 shost_printk(KERN_ERR, esp->host,
1771 shost_printk(KERN_ERR, esp->host,
1775 esp_schedule_reset(esp);
1782 esp->ops->send_dma_cmd(esp, dma_addr, dma_len, dma_len,
1784 esp_event(esp, ESP_EVENT_DATA_DONE);
1788 struct esp_cmd_entry *ent = esp->active_cmd;
1792 if (esp->ops->dma_error(esp)) {
1793 shost_printk(KERN_INFO, esp->host,
1795 esp_schedule_reset(esp);
1802 esp->ops->dma_drain(esp);
1804 esp->ops->dma_invalidate(esp);
1806 if (esp->ireg != ESP_INTR_BSERV) {
1810 shost_printk(KERN_INFO, esp->host,
1812 esp_schedule_reset(esp);
1816 bytes_sent = esp_data_bytes_sent(esp, ent, cmd);
1823 esp_schedule_reset(esp);
1827 esp_advance_dma(esp, ent, cmd, bytes_sent);
1828 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1833 struct esp_cmd_entry *ent = esp->active_cmd;
1835 if (esp->ireg & ESP_INTR_FDONE) {
1838 scsi_esp_cmd(esp, ESP_CMD_MOK);
1839 } else if (esp->ireg == ESP_INTR_BSERV) {
1842 esp_event(esp, ESP_EVENT_MSGIN);
1847 shost_printk(KERN_INFO, esp->host,
1850 esp_schedule_reset(esp);
1854 esp_event(esp, ESP_EVENT_FREE_BUS);
1855 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1859 struct esp_cmd_entry *ent = esp->active_cmd;
1864 scsi_esp_cmd(esp, ESP_CMD_ESEL);
1870 esp_event_queue_full(esp, ent);
1875 esp_autosense(esp, ent);
1877 esp_cmd_is_done(esp, ent, cmd,
1887 esp->active_cmd = NULL;
1888 esp_maybe_execute_command(esp);
1890 shost_printk(KERN_INFO, esp->host,
1893 esp_schedule_reset(esp);
1896 if (esp->active_cmd)
1897 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1901 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1906 for (i = 0; i < esp->msg_out_len; i++)
1907 printk("%02x ", esp->msg_out[i]);
1911 if (esp->rev == FASHME) {
1915 for (i = 0; i < esp->msg_out_len; i++) {
1916 esp_write8(esp->msg_out[i], ESP_FDATA);
1919 scsi_esp_cmd(esp, ESP_CMD_TI);
1921 if (esp->msg_out_len == 1) {
1922 esp_write8(esp->msg_out[0], ESP_FDATA);
1923 scsi_esp_cmd(esp, ESP_CMD_TI);
1924 } else if (esp->flags & ESP_FLAG_USE_FIFO) {
1925 for (i = 0; i < esp->msg_out_len; i++)
1926 esp_write8(esp->msg_out[i], ESP_FDATA);
1927 scsi_esp_cmd(esp, ESP_CMD_TI);
1930 memcpy(esp->command_block,
1931 esp->msg_out,
1932 esp->msg_out_len);
1934 esp->ops->send_dma_cmd(esp,
1935 esp->command_block_dma,
1936 esp->msg_out_len,
1937 esp->msg_out_len,
1942 esp_event(esp, ESP_EVENT_MSGOUT_DONE);
1946 if (esp->rev == FASHME) {
1947 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1949 if (esp->msg_out_len > 1)
1950 esp->ops->dma_invalidate(esp);
1955 if (!(esp->ireg & ESP_INTR_DC))
1956 scsi_esp_cmd(esp, ESP_CMD_NULL);
1959 esp->msg_out_len = 0;
1961 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1964 if (esp->ireg & ESP_INTR_BSERV) {
1965 if (esp->rev == FASHME) {
1968 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1970 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1971 if (esp->rev == ESP100)
1972 scsi_esp_cmd(esp, ESP_CMD_NULL);
1974 scsi_esp_cmd(esp, ESP_CMD_TI);
1975 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1978 if (esp->ireg & ESP_INTR_FDONE) {
1981 if (esp->rev == FASHME)
1982 val = esp->fifo[0];
1985 esp->msg_in[esp->msg_in_len++] = val;
1989 if (!esp_msgin_process(esp))
1990 esp->msg_in_len = 0;
1992 if (esp->rev == FASHME)
1993 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1995 scsi_esp_cmd(esp, ESP_CMD_MOK);
1998 if (esp->event == ESP_EVENT_RESET)
2001 if (esp->event != ESP_EVENT_FREE_BUS)
2002 esp_event(esp, ESP_EVENT_CHECK_PHASE);
2004 shost_printk(KERN_INFO, esp->host,
2006 esp_schedule_reset(esp);
2011 memcpy(esp->command_block, esp->cmd_bytes_ptr,
2012 esp->cmd_bytes_left);
2013 esp_send_dma_cmd(esp, esp->cmd_bytes_left, 16, ESP_CMD_TI);
2014 esp_event(esp, ESP_EVENT_CMD_DONE);
2015 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
2018 esp->ops->dma_invalidate(esp);
2019 if (esp->ireg & ESP_INTR_BSERV) {
2020 esp_event(esp, ESP_EVENT_CHECK_PHASE);
2023 esp_schedule_reset(esp);
2027 scsi_esp_cmd(esp, ESP_CMD_RS);
2031 shost_printk(KERN_INFO, esp->host,
2032 "Unexpected event %x, resetting\n", esp->event);
2033 esp_schedule_reset(esp);
2039 static void esp_reset_cleanup_one(struct esp *esp, struct esp_cmd_entry *ent)
2043 esp_unmap_dma(esp, cmd);
2048 esp_unmap_sense(esp, ent);
2052 esp_put_ent(esp, ent);
2063 static void esp_reset_cleanup(struct esp *esp)
2068 list_for_each_entry_safe(ent, tmp, &esp->queued_cmds, list) {
2074 esp_put_ent(esp, ent);
2077 list_for_each_entry_safe(ent, tmp, &esp->active_cmds, list) {
2078 if (ent == esp->active_cmd)
2079 esp->active_cmd = NULL;
2080 esp_reset_cleanup_one(esp, ent);
2083 BUG_ON(esp->active_cmd != NULL);
2087 struct esp_target_data *tp = &esp->target[i];
2101 esp->flags &= ~ESP_FLAG_RESETTING;
2105 static void __esp_interrupt(struct esp *esp)
2113 esp->sreg = esp_read8(ESP_STATUS);
2114 esp->seqreg = esp_read8(ESP_SSTEP);
2115 esp->ireg = esp_read8(ESP_INTRPT);
2117 if (esp->flags & ESP_FLAG_RESETTING) {
2120 if (esp_check_gross_error(esp))
2123 finish_reset = esp_check_spur_intr(esp);
2128 if (esp->ireg & ESP_INTR_SR)
2132 esp_reset_cleanup(esp);
2133 if (esp->eh_reset) {
2134 complete(esp->eh_reset);
2135 esp->eh_reset = NULL;
2140 phase = (esp->sreg & ESP_STAT_PMASK);
2141 if (esp->rev == FASHME) {
2143 esp->select_state == ESP_SELECT_NONE &&
2144 esp->event != ESP_EVENT_STATUS &&
2145 esp->event != ESP_EVENT_DATA_DONE) ||
2146 (esp->ireg & ESP_INTR_RSEL)) {
2147 esp->sreg2 = esp_read8(ESP_STATUS2);
2148 if (!(esp->sreg2 & ESP_STAT2_FEMPTY) ||
2149 (esp->sreg2 & ESP_STAT2_F1BYTE))
2150 hme_read_fifo(esp);
2156 esp->sreg, esp->seqreg, esp->sreg2, esp->ireg);
2160 if (esp->ireg & (ESP_INTR_S | ESP_INTR_SATN | ESP_INTR_IC)) {
2161 shost_printk(KERN_INFO, esp->host,
2162 "unexpected IREG %02x\n", esp->ireg);
2163 if (esp->ireg & ESP_INTR_IC)
2164 esp_dump_cmd_log(esp);
2166 esp_schedule_reset(esp);
2168 if (esp->ireg & ESP_INTR_RSEL) {
2169 if (esp->active_cmd)
2170 (void) esp_finish_select(esp);
2171 intr_done = esp_reconnect(esp);
2174 if (esp->select_state != ESP_SELECT_NONE)
2175 intr_done = esp_finish_select(esp);
2179 intr_done = esp_process_event(esp);
2184 struct esp *esp = dev_id;
2188 spin_lock_irqsave(esp->host->host_lock, flags);
2190 if (esp->ops->irq_pending(esp)) {
2195 __esp_interrupt(esp);
2196 if (!(esp->flags & ESP_FLAG_QUICKIRQ_CHECK))
2198 esp->flags &= ~ESP_FLAG_QUICKIRQ_CHECK;
2201 if (esp->ops->irq_pending(esp))
2208 spin_unlock_irqrestore(esp->host->host_lock, flags);
2214 static void esp_get_revision(struct esp *esp)
2218 esp->config1 = (ESP_CONFIG1_PENABLE | (esp->scsi_id & 7));
2219 if (esp->config2 == 0) {
2220 esp->config2 = (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY);
2221 esp_write8(esp->config2, ESP_CFG2);
2226 esp->config2 = 0;
2233 esp->rev = ESP100;
2238 esp_set_all_config3(esp, 5);
2239 esp->prev_cfg3 = 5;
2240 esp_write8(esp->config2, ESP_CFG2);
2242 esp_write8(esp->prev_cfg3, ESP_CFG3);
2249 esp->rev = ESP100A;
2251 esp_set_all_config3(esp, 0);
2252 esp->prev_cfg3 = 0;
2253 esp_write8(esp->prev_cfg3, ESP_CFG3);
2258 if (esp->cfact == 0 || esp->cfact > ESP_CCF_F5) {
2259 esp->rev = FAST;
2260 esp->sync_defp = SYNC_DEFP_FAST;
2262 esp->rev = ESP236;
2267 static void esp_init_swstate(struct esp *esp)
2271 INIT_LIST_HEAD(&esp->queued_cmds);
2272 INIT_LIST_HEAD(&esp->active_cmds);
2273 INIT_LIST_HEAD(&esp->esp_cmd_pool);
2280 esp->target[i].flags = 0;
2281 esp->target[i].nego_goal_period = 0;
2282 esp->target[i].nego_goal_offset = 0;
2283 esp->target[i].nego_goal_width = 0;
2284 esp->target[i].nego_goal_tags = 0;
2289 static void esp_bootup_reset(struct esp *esp)
2294 esp->ops->reset_dma(esp);
2297 esp_reset_esp(esp);
2304 scsi_esp_cmd(esp, ESP_CMD_RS);
2307 esp_write8(esp->config1, ESP_CFG1);
2313 static void esp_set_clock_params(struct esp *esp)
2350 fhz = esp->cfreq;
2366 esp->cfact = (ccf == 8 ? 0 : ccf);
2367 esp->cfreq = fhz;
2368 esp->ccycle = ESP_HZ_TO_CYCLE(fhz);
2369 esp->ctick = ESP_TICK(ccf, esp->ccycle);
2370 esp->neg_defp = ESP_NEG_DEFP(fhz, ccf);
2371 esp->sync_defp = SYNC_DEFP_SLOW;
2388 int scsi_esp_register(struct esp *esp)
2393 if (!esp->num_tags)
2394 esp->num_tags = ESP_DEFAULT_TAGS;
2395 esp->host->transportt = esp_transport_template;
2396 esp->host->max_lun = ESP_MAX_LUN;
2397 esp->host->cmd_per_lun = 2;
2398 esp->host->unique_id = instance;
2400 esp_set_clock_params(esp);
2402 esp_get_revision(esp);
2404 esp_init_swstate(esp);
2406 esp_bootup_reset(esp);
2408 dev_printk(KERN_INFO, esp->dev, "esp%u: regs[%1p:%1p] irq[%u]\n",
2409 esp->host->unique_id, esp->regs, esp->dma_regs,
2410 esp->host->irq);
2411 dev_printk(KERN_INFO, esp->dev,
2412 "esp%u: is a %s, %u MHz (ccf=%u), SCSI ID %u\n",
2413 esp->host->unique_id, esp_chip_names[esp->rev],
2414 esp->cfreq / 1000000, esp->cfact, esp->scsi_id);
2419 err = scsi_add_host(esp->host, esp->dev);
2425 scsi_scan_host(esp->host);
2431 void scsi_esp_unregister(struct esp *esp)
2433 scsi_remove_host(esp->host);
2439 struct esp *esp = shost_priv(dev_to_shost(&starget->dev));
2440 struct esp_target_data *tp = &esp->target[starget->id];
2449 struct esp *esp = shost_priv(dev_to_shost(&starget->dev));
2450 struct esp_target_data *tp = &esp->target[starget->id];
2457 struct esp *esp = shost_priv(dev->host);
2458 struct esp_target_data *tp = &esp->target[dev->id];
2466 spi_min_period(tp->starget) = esp->min_period;
2469 if (esp->flags & ESP_FLAG_WIDE_CAPABLE)
2479 struct esp *esp = shost_priv(dev->host);
2480 struct esp_target_data *tp = &esp->target[dev->id];
2483 scsi_change_queue_depth(dev, esp->num_tags);
2503 struct esp *esp = shost_priv(cmd->device->host);
2511 spin_lock_irqsave(esp->host->host_lock, flags);
2512 shost_printk(KERN_ERR, esp->host, "Aborting command [%p:%02x]\n",
2514 ent = esp->active_cmd;
2516 shost_printk(KERN_ERR, esp->host,
2519 list_for_each_entry(ent, &esp->queued_cmds, list) {
2520 shost_printk(KERN_ERR, esp->host, "Queued command [%p:%02x]\n",
2523 list_for_each_entry(ent, &esp->active_cmds, list) {
2524 shost_printk(KERN_ERR, esp->host, " Active command [%p:%02x]\n",
2527 esp_dump_cmd_log(esp);
2528 spin_unlock_irqrestore(esp->host->host_lock, flags);
2530 spin_lock_irqsave(esp->host->host_lock, flags);
2533 list_for_each_entry(tmp, &esp->queued_cmds, list) {
2549 esp_put_ent(esp, ent);
2556 ent = esp->active_cmd;
2562 if (esp->msg_out_len)
2568 esp->msg_out[0] = ABORT_TASK_SET;
2569 esp->msg_out_len = 1;
2572 scsi_esp_cmd(esp, ESP_CMD_SATN);
2593 spin_unlock_irqrestore(esp->host->host_lock, flags);
2596 spin_lock_irqsave(esp->host->host_lock, flags);
2598 spin_unlock_irqrestore(esp->host->host_lock, flags);
2606 spin_unlock_irqrestore(esp->host->host_lock, flags);
2614 spin_unlock_irqrestore(esp->host->host_lock, flags);
2620 struct esp *esp = shost_priv(cmd->device->host);
2626 spin_lock_irqsave(esp->host->host_lock, flags);
2628 esp->eh_reset = &eh_reset;
2635 esp->flags |= ESP_FLAG_RESETTING;
2636 scsi_esp_cmd(esp, ESP_CMD_RS);
2638 spin_unlock_irqrestore(esp->host->host_lock, flags);
2643 spin_lock_irqsave(esp->host->host_lock, flags);
2644 esp->eh_reset = NULL;
2645 spin_unlock_irqrestore(esp->host->host_lock, flags);
2656 struct esp *esp = shost_priv(cmd->device->host);
2659 spin_lock_irqsave(esp->host->host_lock, flags);
2660 esp_bootup_reset(esp);
2661 esp_reset_cleanup(esp);
2662 spin_unlock_irqrestore(esp->host->host_lock, flags);
2671 return "esp";
2676 .name = "esp",
2697 struct esp *esp = shost_priv(host);
2700 if (esp->flags & ESP_FLAG_DIFFERENTIAL)
2711 struct esp *esp = shost_priv(host);
2712 struct esp_target_data *tp = &esp->target[target->id];
2714 if (esp->flags & ESP_FLAG_DISABLE_SYNC)
2724 struct esp *esp = shost_priv(host);
2725 struct esp_target_data *tp = &esp->target[target->id];
2734 struct esp *esp = shost_priv(host);
2735 struct esp_target_data *tp = &esp->target[target->id];
2797 static inline unsigned int esp_wait_for_fifo(struct esp *esp)
2810 shost_printk(KERN_ERR, esp->host, "FIFO is empty. sreg [%02x]\n",
2815 static inline int esp_wait_for_intr(struct esp *esp)
2820 esp->sreg = esp_read8(ESP_STATUS);
2821 if (esp->sreg & ESP_STAT_INTR)
2827 shost_printk(KERN_ERR, esp->host, "IRQ timeout. sreg [%02x]\n",
2828 esp->sreg);
2834 void esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count,
2837 u8 phase = esp->sreg & ESP_STAT_PMASK;
2840 esp->send_cmd_error = 0;
2846 scsi_esp_cmd(esp, cmd);
2849 if (!esp_wait_for_fifo(esp))
2852 *dst++ = readb(esp->fifo_reg);
2858 if (esp_wait_for_intr(esp)) {
2859 esp->send_cmd_error = 1;
2863 if ((esp->sreg & ESP_STAT_PMASK) != phase)
2866 esp->ireg = esp_read8(ESP_INTRPT);
2867 if (esp->ireg & mask) {
2868 esp->send_cmd_error = 1;
2881 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
2885 writesb(esp->fifo_reg, src, n);
2889 scsi_esp_cmd(esp, cmd);
2892 if (esp_wait_for_intr(esp)) {
2893 esp->send_cmd_error = 1;
2897 if ((esp->sreg & ESP_STAT_PMASK) != phase)
2900 esp->ireg = esp_read8(ESP_INTRPT);
2901 if (esp->ireg & ~ESP_INTR_BSERV) {
2902 esp->send_cmd_error = 1;
2911 writesb(esp->fifo_reg, src, n);
2919 esp->send_cmd_residual = esp_count;