xref: /kernel/linux/linux-5.10/drivers/scsi/53c700.c (revision 8c2ecf20)
1// SPDX-License-Identifier: GPL-2.0-or-later
2/* -*- mode: c; c-basic-offset: 8 -*- */
3
4/* NCR (or Symbios) 53c700 and 53c700-66 Driver
5 *
6 * Copyright (C) 2001 by James.Bottomley@HansenPartnership.com
7**-----------------------------------------------------------------------------
8**
9**
10**-----------------------------------------------------------------------------
11 */
12
13/* Notes:
14 *
15 * This driver is designed exclusively for these chips (virtually the
16 * earliest of the scripts engine chips).  They need their own drivers
17 * because they are missing so many of the scripts and snazzy register
18 * features of their elder brothers (the 710, 720 and 770).
19 *
20 * The 700 is the lowliest of the line, it can only do async SCSI.
21 * The 700-66 can at least do synchronous SCSI up to 10MHz.
22 *
23 * The 700 chip has no host bus interface logic of its own.  However,
24 * it is usually mapped to a location with well defined register
25 * offsets.  Therefore, if you can determine the base address and the
26 * irq your board incorporating this chip uses, you can probably use
27 * this driver to run it (although you'll probably have to write a
28 * minimal wrapper for the purpose---see the NCR_D700 driver for
29 * details about how to do this).
30 *
31 *
32 * TODO List:
33 *
34 * 1. Better statistics in the proc fs
35 *
36 * 2. Implement message queue (queues SCSI messages like commands) and make
37 *    the abort and device reset functions use them.
38 * */
39
40/* CHANGELOG
41 *
42 * Version 2.8
43 *
44 * Fixed bad bug affecting tag starvation processing (previously the
45 * driver would hang the system if too many tags starved.  Also fixed
46 * bad bug having to do with 10 byte command processing and REQUEST
47 * SENSE (the command would loop forever getting a transfer length
48 * mismatch in the CMD phase).
49 *
50 * Version 2.7
51 *
52 * Fixed scripts problem which caused certain devices (notably CDRWs)
53 * to hang on initial INQUIRY.  Updated NCR_700_readl/writel to use
54 * __raw_readl/writel for parisc compatibility (Thomas
55 * Bogendoerfer). Added missing SCp->request_bufflen initialisation
56 * for sense requests (Ryan Bradetich).
57 *
58 * Version 2.6
59 *
60 * Following test of the 64 bit parisc kernel by Richard Hirst,
61 * several problems have now been corrected.  Also adds support for
62 * consistent memory allocation.
63 *
64 * Version 2.5
65 *
66 * More Compatibility changes for 710 (now actually works).  Enhanced
67 * support for odd clock speeds which constrain SDTR negotiations.
68 * correct cacheline separation for scsi messages and status for
69 * incoherent architectures.  Use of the pci mapping functions on
70 * buffers to begin support for 64 bit drivers.
71 *
72 * Version 2.4
73 *
74 * Added support for the 53c710 chip (in 53c700 emulation mode only---no
75 * special 53c710 instructions or registers are used).
76 *
77 * Version 2.3
78 *
79 * More endianness/cache coherency changes.
80 *
81 * Better bad device handling (handles devices lying about tag
82 * queueing support and devices which fail to provide sense data on
83 * contingent allegiance conditions)
84 *
85 * Many thanks to Richard Hirst <rhirst@linuxcare.com> for patiently
86 * debugging this driver on the parisc architecture and suggesting
87 * many improvements and bug fixes.
88 *
89 * Thanks also go to Linuxcare Inc. for providing several PARISC
90 * machines for me to debug the driver on.
91 *
92 * Version 2.2
93 *
94 * Made the driver mem or io mapped; added endian invariance; added
95 * dma cache flushing operations for architectures which need it;
96 * added support for more varied clocking speeds.
97 *
98 * Version 2.1
99 *
100 * Initial modularisation from the D700.  See NCR_D700.c for the rest of
101 * the changelog.
102 * */
103#define NCR_700_VERSION "2.8"
104
105#include <linux/kernel.h>
106#include <linux/types.h>
107#include <linux/string.h>
108#include <linux/slab.h>
109#include <linux/ioport.h>
110#include <linux/delay.h>
111#include <linux/spinlock.h>
112#include <linux/completion.h>
113#include <linux/init.h>
114#include <linux/proc_fs.h>
115#include <linux/blkdev.h>
116#include <linux/module.h>
117#include <linux/interrupt.h>
118#include <linux/device.h>
119#include <linux/pgtable.h>
120#include <asm/dma.h>
121#include <asm/io.h>
122#include <asm/byteorder.h>
123
124#include <scsi/scsi.h>
125#include <scsi/scsi_cmnd.h>
126#include <scsi/scsi_dbg.h>
127#include <scsi/scsi_eh.h>
128#include <scsi/scsi_host.h>
129#include <scsi/scsi_tcq.h>
130#include <scsi/scsi_transport.h>
131#include <scsi/scsi_transport_spi.h>
132
133#include "53c700.h"
134
135/* NOTE: For 64 bit drivers there are points in the code where we use
136 * a non dereferenceable pointer to point to a structure in dma-able
137 * memory (which is 32 bits) so that we can use all of the structure
138 * operations but take the address at the end.  This macro allows us
139 * to truncate the 64 bit pointer down to 32 bits without the compiler
140 * complaining */
141#define to32bit(x)	((__u32)((unsigned long)(x)))
142
143#ifdef NCR_700_DEBUG
144#define STATIC
145#else
146#define STATIC static
147#endif
148
149MODULE_AUTHOR("James Bottomley");
150MODULE_DESCRIPTION("53c700 and 53c700-66 Driver");
151MODULE_LICENSE("GPL");
152
153/* This is the script */
154#include "53c700_d.h"
155
156
157STATIC int NCR_700_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *);
158STATIC int NCR_700_abort(struct scsi_cmnd * SCpnt);
159STATIC int NCR_700_host_reset(struct scsi_cmnd * SCpnt);
160STATIC void NCR_700_chip_setup(struct Scsi_Host *host);
161STATIC void NCR_700_chip_reset(struct Scsi_Host *host);
162STATIC int NCR_700_slave_alloc(struct scsi_device *SDpnt);
163STATIC int NCR_700_slave_configure(struct scsi_device *SDpnt);
164STATIC void NCR_700_slave_destroy(struct scsi_device *SDpnt);
165static int NCR_700_change_queue_depth(struct scsi_device *SDpnt, int depth);
166
167STATIC struct device_attribute *NCR_700_dev_attrs[];
168
169STATIC struct scsi_transport_template *NCR_700_transport_template = NULL;
170
171static char *NCR_700_phase[] = {
172	"",
173	"after selection",
174	"before command phase",
175	"after command phase",
176	"after status phase",
177	"after data in phase",
178	"after data out phase",
179	"during data phase",
180};
181
182static char *NCR_700_condition[] = {
183	"",
184	"NOT MSG_OUT",
185	"UNEXPECTED PHASE",
186	"NOT MSG_IN",
187	"UNEXPECTED MSG",
188	"MSG_IN",
189	"SDTR_MSG RECEIVED",
190	"REJECT_MSG RECEIVED",
191	"DISCONNECT_MSG RECEIVED",
192	"MSG_OUT",
193	"DATA_IN",
194
195};
196
197static char *NCR_700_fatal_messages[] = {
198	"unexpected message after reselection",
199	"still MSG_OUT after message injection",
200	"not MSG_IN after selection",
201	"Illegal message length received",
202};
203
204static char *NCR_700_SBCL_bits[] = {
205	"IO ",
206	"CD ",
207	"MSG ",
208	"ATN ",
209	"SEL ",
210	"BSY ",
211	"ACK ",
212	"REQ ",
213};
214
215static char *NCR_700_SBCL_to_phase[] = {
216	"DATA_OUT",
217	"DATA_IN",
218	"CMD_OUT",
219	"STATE",
220	"ILLEGAL PHASE",
221	"ILLEGAL PHASE",
222	"MSG OUT",
223	"MSG IN",
224};
225
226/* This translates the SDTR message offset and period to a value
227 * which can be loaded into the SXFER_REG.
228 *
229 * NOTE: According to SCSI-2, the true transfer period (in ns) is
230 *       actually four times this period value */
231static inline __u8
232NCR_700_offset_period_to_sxfer(struct NCR_700_Host_Parameters *hostdata,
233			       __u8 offset, __u8 period)
234{
235	int XFERP;
236
237	__u8 min_xferp = (hostdata->chip710
238			  ? NCR_710_MIN_XFERP : NCR_700_MIN_XFERP);
239	__u8 max_offset = (hostdata->chip710
240			   ? NCR_710_MAX_OFFSET : NCR_700_MAX_OFFSET);
241
242	if(offset == 0)
243		return 0;
244
245	if(period < hostdata->min_period) {
246		printk(KERN_WARNING "53c700: Period %dns is less than this chip's minimum, setting to %d\n", period*4, NCR_700_MIN_PERIOD*4);
247		period = hostdata->min_period;
248	}
249	XFERP = (period*4 * hostdata->sync_clock)/1000 - 4;
250	if(offset > max_offset) {
251		printk(KERN_WARNING "53c700: Offset %d exceeds chip maximum, setting to %d\n",
252		       offset, max_offset);
253		offset = max_offset;
254	}
255	if(XFERP < min_xferp) {
256		XFERP =  min_xferp;
257	}
258	return (offset & 0x0f) | (XFERP & 0x07)<<4;
259}
260
261static inline __u8
262NCR_700_get_SXFER(struct scsi_device *SDp)
263{
264	struct NCR_700_Host_Parameters *hostdata =
265		(struct NCR_700_Host_Parameters *)SDp->host->hostdata[0];
266
267	return NCR_700_offset_period_to_sxfer(hostdata,
268					      spi_offset(SDp->sdev_target),
269					      spi_period(SDp->sdev_target));
270}
271
272static inline dma_addr_t virt_to_dma(struct NCR_700_Host_Parameters *h, void *p)
273{
274	return h->pScript + ((uintptr_t)p - (uintptr_t)h->script);
275}
276
277static inline void dma_sync_to_dev(struct NCR_700_Host_Parameters *h,
278		void *addr, size_t size)
279{
280	if (h->noncoherent)
281		dma_sync_single_for_device(h->dev, virt_to_dma(h, addr),
282					   size, DMA_BIDIRECTIONAL);
283}
284
285static inline void dma_sync_from_dev(struct NCR_700_Host_Parameters *h,
286		void *addr, size_t size)
287{
288	if (h->noncoherent)
289		dma_sync_single_for_device(h->dev, virt_to_dma(h, addr), size,
290					   DMA_BIDIRECTIONAL);
291}
292
293struct Scsi_Host *
294NCR_700_detect(struct scsi_host_template *tpnt,
295	       struct NCR_700_Host_Parameters *hostdata, struct device *dev)
296{
297	dma_addr_t pScript, pSlots;
298	__u8 *memory;
299	__u32 *script;
300	struct Scsi_Host *host;
301	static int banner = 0;
302	int j;
303
304	if(tpnt->sdev_attrs == NULL)
305		tpnt->sdev_attrs = NCR_700_dev_attrs;
306
307	memory = dma_alloc_coherent(dev, TOTAL_MEM_SIZE, &pScript, GFP_KERNEL);
308	if (!memory) {
309		hostdata->noncoherent = 1;
310		memory = dma_alloc_noncoherent(dev, TOTAL_MEM_SIZE, &pScript,
311					 DMA_BIDIRECTIONAL, GFP_KERNEL);
312	}
313	if (!memory) {
314		printk(KERN_ERR "53c700: Failed to allocate memory for driver, detaching\n");
315		return NULL;
316	}
317
318	script = (__u32 *)memory;
319	hostdata->msgin = memory + MSGIN_OFFSET;
320	hostdata->msgout = memory + MSGOUT_OFFSET;
321	hostdata->status = memory + STATUS_OFFSET;
322	hostdata->slots = (struct NCR_700_command_slot *)(memory + SLOTS_OFFSET);
323	hostdata->dev = dev;
324
325	pSlots = pScript + SLOTS_OFFSET;
326
327	/* Fill in the missing routines from the host template */
328	tpnt->queuecommand = NCR_700_queuecommand;
329	tpnt->eh_abort_handler = NCR_700_abort;
330	tpnt->eh_host_reset_handler = NCR_700_host_reset;
331	tpnt->can_queue = NCR_700_COMMAND_SLOTS_PER_HOST;
332	tpnt->sg_tablesize = NCR_700_SG_SEGMENTS;
333	tpnt->cmd_per_lun = NCR_700_CMD_PER_LUN;
334	tpnt->slave_configure = NCR_700_slave_configure;
335	tpnt->slave_destroy = NCR_700_slave_destroy;
336	tpnt->slave_alloc = NCR_700_slave_alloc;
337	tpnt->change_queue_depth = NCR_700_change_queue_depth;
338
339	if(tpnt->name == NULL)
340		tpnt->name = "53c700";
341	if(tpnt->proc_name == NULL)
342		tpnt->proc_name = "53c700";
343
344	host = scsi_host_alloc(tpnt, 4);
345	if (!host)
346		return NULL;
347	memset(hostdata->slots, 0, sizeof(struct NCR_700_command_slot)
348	       * NCR_700_COMMAND_SLOTS_PER_HOST);
349	for (j = 0; j < NCR_700_COMMAND_SLOTS_PER_HOST; j++) {
350		dma_addr_t offset = (dma_addr_t)((unsigned long)&hostdata->slots[j].SG[0]
351					  - (unsigned long)&hostdata->slots[0].SG[0]);
352		hostdata->slots[j].pSG = (struct NCR_700_SG_List *)((unsigned long)(pSlots + offset));
353		if(j == 0)
354			hostdata->free_list = &hostdata->slots[j];
355		else
356			hostdata->slots[j-1].ITL_forw = &hostdata->slots[j];
357		hostdata->slots[j].state = NCR_700_SLOT_FREE;
358	}
359
360	for (j = 0; j < ARRAY_SIZE(SCRIPT); j++)
361		script[j] = bS_to_host(SCRIPT[j]);
362
363	/* adjust all labels to be bus physical */
364	for (j = 0; j < PATCHES; j++)
365		script[LABELPATCHES[j]] = bS_to_host(pScript + SCRIPT[LABELPATCHES[j]]);
366	/* now patch up fixed addresses. */
367	script_patch_32(hostdata, script, MessageLocation,
368			pScript + MSGOUT_OFFSET);
369	script_patch_32(hostdata, script, StatusAddress,
370			pScript + STATUS_OFFSET);
371	script_patch_32(hostdata, script, ReceiveMsgAddress,
372			pScript + MSGIN_OFFSET);
373
374	hostdata->script = script;
375	hostdata->pScript = pScript;
376	dma_sync_single_for_device(hostdata->dev, pScript, sizeof(SCRIPT), DMA_TO_DEVICE);
377	hostdata->state = NCR_700_HOST_FREE;
378	hostdata->cmd = NULL;
379	host->max_id = 8;
380	host->max_lun = NCR_700_MAX_LUNS;
381	BUG_ON(NCR_700_transport_template == NULL);
382	host->transportt = NCR_700_transport_template;
383	host->unique_id = (unsigned long)hostdata->base;
384	hostdata->eh_complete = NULL;
385	host->hostdata[0] = (unsigned long)hostdata;
386	/* kick the chip */
387	NCR_700_writeb(0xff, host, CTEST9_REG);
388	if (hostdata->chip710)
389		hostdata->rev = (NCR_700_readb(host, CTEST8_REG)>>4) & 0x0f;
390	else
391		hostdata->rev = (NCR_700_readb(host, CTEST7_REG)>>4) & 0x0f;
392	hostdata->fast = (NCR_700_readb(host, CTEST9_REG) == 0);
393	if (banner == 0) {
394		printk(KERN_NOTICE "53c700: Version " NCR_700_VERSION " By James.Bottomley@HansenPartnership.com\n");
395		banner = 1;
396	}
397	printk(KERN_NOTICE "scsi%d: %s rev %d %s\n", host->host_no,
398	       hostdata->chip710 ? "53c710" :
399	       (hostdata->fast ? "53c700-66" : "53c700"),
400	       hostdata->rev, hostdata->differential ?
401	       "(Differential)" : "");
402	/* reset the chip */
403	NCR_700_chip_reset(host);
404
405	if (scsi_add_host(host, dev)) {
406		dev_printk(KERN_ERR, dev, "53c700: scsi_add_host failed\n");
407		scsi_host_put(host);
408		return NULL;
409	}
410
411	spi_signalling(host) = hostdata->differential ? SPI_SIGNAL_HVD :
412		SPI_SIGNAL_SE;
413
414	return host;
415}
416
417int
418NCR_700_release(struct Scsi_Host *host)
419{
420	struct NCR_700_Host_Parameters *hostdata =
421		(struct NCR_700_Host_Parameters *)host->hostdata[0];
422
423	if (hostdata->noncoherent)
424		dma_free_noncoherent(hostdata->dev, TOTAL_MEM_SIZE,
425				hostdata->script, hostdata->pScript,
426				DMA_BIDIRECTIONAL);
427	else
428		dma_free_coherent(hostdata->dev, TOTAL_MEM_SIZE,
429				  hostdata->script, hostdata->pScript);
430	return 1;
431}
432
433static inline __u8
434NCR_700_identify(int can_disconnect, __u8 lun)
435{
436	return IDENTIFY_BASE |
437		((can_disconnect) ? 0x40 : 0) |
438		(lun & NCR_700_LUN_MASK);
439}
440
441/*
442 * Function : static int data_residual (Scsi_Host *host)
443 *
444 * Purpose : return residual data count of what's in the chip.  If you
445 * really want to know what this function is doing, it's almost a
446 * direct transcription of the algorithm described in the 53c710
447 * guide, except that the DBC and DFIFO registers are only 6 bits
448 * wide on a 53c700.
449 *
450 * Inputs : host - SCSI host */
451static inline int
452NCR_700_data_residual (struct Scsi_Host *host) {
453	struct NCR_700_Host_Parameters *hostdata =
454		(struct NCR_700_Host_Parameters *)host->hostdata[0];
455	int count, synchronous = 0;
456	unsigned int ddir;
457
458	if(hostdata->chip710) {
459		count = ((NCR_700_readb(host, DFIFO_REG) & 0x7f) -
460			 (NCR_700_readl(host, DBC_REG) & 0x7f)) & 0x7f;
461	} else {
462		count = ((NCR_700_readb(host, DFIFO_REG) & 0x3f) -
463			 (NCR_700_readl(host, DBC_REG) & 0x3f)) & 0x3f;
464	}
465
466	if(hostdata->fast)
467		synchronous = NCR_700_readb(host, SXFER_REG) & 0x0f;
468
469	/* get the data direction */
470	ddir = NCR_700_readb(host, CTEST0_REG) & 0x01;
471
472	if (ddir) {
473		/* Receive */
474		if (synchronous)
475			count += (NCR_700_readb(host, SSTAT2_REG) & 0xf0) >> 4;
476		else
477			if (NCR_700_readb(host, SSTAT1_REG) & SIDL_REG_FULL)
478				++count;
479	} else {
480		/* Send */
481		__u8 sstat = NCR_700_readb(host, SSTAT1_REG);
482		if (sstat & SODL_REG_FULL)
483			++count;
484		if (synchronous && (sstat & SODR_REG_FULL))
485			++count;
486	}
487#ifdef NCR_700_DEBUG
488	if(count)
489		printk("RESIDUAL IS %d (ddir %d)\n", count, ddir);
490#endif
491	return count;
492}
493
494/* print out the SCSI wires and corresponding phase from the SBCL register
495 * in the chip */
496static inline char *
497sbcl_to_string(__u8 sbcl)
498{
499	int i;
500	static char ret[256];
501
502	ret[0]='\0';
503	for(i=0; i<8; i++) {
504		if((1<<i) & sbcl)
505			strcat(ret, NCR_700_SBCL_bits[i]);
506	}
507	strcat(ret, NCR_700_SBCL_to_phase[sbcl & 0x07]);
508	return ret;
509}
510
511static inline __u8
512bitmap_to_number(__u8 bitmap)
513{
514	__u8 i;
515
516	for(i=0; i<8 && !(bitmap &(1<<i)); i++)
517		;
518	return i;
519}
520
521/* Pull a slot off the free list */
522STATIC struct NCR_700_command_slot *
523find_empty_slot(struct NCR_700_Host_Parameters *hostdata)
524{
525	struct NCR_700_command_slot *slot = hostdata->free_list;
526
527	if(slot == NULL) {
528		/* sanity check */
529		if(hostdata->command_slot_count != NCR_700_COMMAND_SLOTS_PER_HOST)
530			printk(KERN_ERR "SLOTS FULL, but count is %d, should be %d\n", hostdata->command_slot_count, NCR_700_COMMAND_SLOTS_PER_HOST);
531		return NULL;
532	}
533
534	if(slot->state != NCR_700_SLOT_FREE)
535		/* should panic! */
536		printk(KERN_ERR "BUSY SLOT ON FREE LIST!!!\n");
537
538
539	hostdata->free_list = slot->ITL_forw;
540	slot->ITL_forw = NULL;
541
542
543	/* NOTE: set the state to busy here, not queued, since this
544	 * indicates the slot is in use and cannot be run by the IRQ
545	 * finish routine.  If we cannot queue the command when it
546	 * is properly build, we then change to NCR_700_SLOT_QUEUED */
547	slot->state = NCR_700_SLOT_BUSY;
548	slot->flags = 0;
549	hostdata->command_slot_count++;
550
551	return slot;
552}
553
554STATIC void
555free_slot(struct NCR_700_command_slot *slot,
556	  struct NCR_700_Host_Parameters *hostdata)
557{
558	if((slot->state & NCR_700_SLOT_MASK) != NCR_700_SLOT_MAGIC) {
559		printk(KERN_ERR "53c700: SLOT %p is not MAGIC!!!\n", slot);
560	}
561	if(slot->state == NCR_700_SLOT_FREE) {
562		printk(KERN_ERR "53c700: SLOT %p is FREE!!!\n", slot);
563	}
564
565	slot->resume_offset = 0;
566	slot->cmnd = NULL;
567	slot->state = NCR_700_SLOT_FREE;
568	slot->ITL_forw = hostdata->free_list;
569	hostdata->free_list = slot;
570	hostdata->command_slot_count--;
571}
572
573
574/* This routine really does very little.  The command is indexed on
575   the ITL and (if tagged) the ITLQ lists in _queuecommand */
576STATIC void
577save_for_reselection(struct NCR_700_Host_Parameters *hostdata,
578		     struct scsi_cmnd *SCp, __u32 dsp)
579{
580	/* Its just possible that this gets executed twice */
581	if(SCp != NULL) {
582		struct NCR_700_command_slot *slot =
583			(struct NCR_700_command_slot *)SCp->host_scribble;
584
585		slot->resume_offset = dsp;
586	}
587	hostdata->state = NCR_700_HOST_FREE;
588	hostdata->cmd = NULL;
589}
590
591STATIC inline void
592NCR_700_unmap(struct NCR_700_Host_Parameters *hostdata, struct scsi_cmnd *SCp,
593	      struct NCR_700_command_slot *slot)
594{
595	if(SCp->sc_data_direction != DMA_NONE &&
596	   SCp->sc_data_direction != DMA_BIDIRECTIONAL)
597		scsi_dma_unmap(SCp);
598}
599
600STATIC inline void
601NCR_700_scsi_done(struct NCR_700_Host_Parameters *hostdata,
602	       struct scsi_cmnd *SCp, int result)
603{
604	hostdata->state = NCR_700_HOST_FREE;
605	hostdata->cmd = NULL;
606
607	if(SCp != NULL) {
608		struct NCR_700_command_slot *slot =
609			(struct NCR_700_command_slot *)SCp->host_scribble;
610
611		dma_unmap_single(hostdata->dev, slot->pCmd,
612				 MAX_COMMAND_SIZE, DMA_TO_DEVICE);
613		if (slot->flags == NCR_700_FLAG_AUTOSENSE) {
614			char *cmnd = NCR_700_get_sense_cmnd(SCp->device);
615
616			dma_unmap_single(hostdata->dev, slot->dma_handle,
617					 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
618			/* restore the old result if the request sense was
619			 * successful */
620			if (result == 0)
621				result = cmnd[7];
622			/* restore the original length */
623			SCp->cmd_len = cmnd[8];
624		} else
625			NCR_700_unmap(hostdata, SCp, slot);
626
627		free_slot(slot, hostdata);
628#ifdef NCR_700_DEBUG
629		if(NCR_700_get_depth(SCp->device) == 0 ||
630		   NCR_700_get_depth(SCp->device) > SCp->device->queue_depth)
631			printk(KERN_ERR "Invalid depth in NCR_700_scsi_done(): %d\n",
632			       NCR_700_get_depth(SCp->device));
633#endif /* NCR_700_DEBUG */
634		NCR_700_set_depth(SCp->device, NCR_700_get_depth(SCp->device) - 1);
635
636		SCp->host_scribble = NULL;
637		SCp->result = result;
638		SCp->scsi_done(SCp);
639	} else {
640		printk(KERN_ERR "53c700: SCSI DONE HAS NULL SCp\n");
641	}
642}
643
644
645STATIC void
646NCR_700_internal_bus_reset(struct Scsi_Host *host)
647{
648	/* Bus reset */
649	NCR_700_writeb(ASSERT_RST, host, SCNTL1_REG);
650	udelay(50);
651	NCR_700_writeb(0, host, SCNTL1_REG);
652
653}
654
655STATIC void
656NCR_700_chip_setup(struct Scsi_Host *host)
657{
658	struct NCR_700_Host_Parameters *hostdata =
659		(struct NCR_700_Host_Parameters *)host->hostdata[0];
660	__u8 min_period;
661	__u8 min_xferp = (hostdata->chip710 ? NCR_710_MIN_XFERP : NCR_700_MIN_XFERP);
662
663	if(hostdata->chip710) {
664		__u8 burst_disable = 0;
665		__u8 burst_length = 0;
666
667		switch (hostdata->burst_length) {
668			case 1:
669			        burst_length = BURST_LENGTH_1;
670			        break;
671			case 2:
672			        burst_length = BURST_LENGTH_2;
673			        break;
674			case 4:
675			        burst_length = BURST_LENGTH_4;
676			        break;
677			case 8:
678			        burst_length = BURST_LENGTH_8;
679			        break;
680			default:
681			        burst_disable = BURST_DISABLE;
682			        break;
683		}
684		hostdata->dcntl_extra |= COMPAT_700_MODE;
685
686		NCR_700_writeb(hostdata->dcntl_extra, host, DCNTL_REG);
687		NCR_700_writeb(burst_length | hostdata->dmode_extra,
688			       host, DMODE_710_REG);
689		NCR_700_writeb(burst_disable | hostdata->ctest7_extra |
690			       (hostdata->differential ? DIFF : 0),
691			       host, CTEST7_REG);
692		NCR_700_writeb(BTB_TIMER_DISABLE, host, CTEST0_REG);
693		NCR_700_writeb(FULL_ARBITRATION | ENABLE_PARITY | PARITY
694			       | AUTO_ATN, host, SCNTL0_REG);
695	} else {
696		NCR_700_writeb(BURST_LENGTH_8 | hostdata->dmode_extra,
697			       host, DMODE_700_REG);
698		NCR_700_writeb(hostdata->differential ?
699			       DIFF : 0, host, CTEST7_REG);
700		if(hostdata->fast) {
701			/* this is for 700-66, does nothing on 700 */
702			NCR_700_writeb(LAST_DIS_ENBL | ENABLE_ACTIVE_NEGATION
703				       | GENERATE_RECEIVE_PARITY, host,
704				       CTEST8_REG);
705		} else {
706			NCR_700_writeb(FULL_ARBITRATION | ENABLE_PARITY
707				       | PARITY | AUTO_ATN, host, SCNTL0_REG);
708		}
709	}
710
711	NCR_700_writeb(1 << host->this_id, host, SCID_REG);
712	NCR_700_writeb(0, host, SBCL_REG);
713	NCR_700_writeb(ASYNC_OPERATION, host, SXFER_REG);
714
715	NCR_700_writeb(PHASE_MM_INT | SEL_TIMEOUT_INT | GROSS_ERR_INT | UX_DISC_INT
716	     | RST_INT | PAR_ERR_INT | SELECT_INT, host, SIEN_REG);
717
718	NCR_700_writeb(ABORT_INT | INT_INST_INT | ILGL_INST_INT, host, DIEN_REG);
719	NCR_700_writeb(ENABLE_SELECT, host, SCNTL1_REG);
720	if(hostdata->clock > 75) {
721		printk(KERN_ERR "53c700: Clock speed %dMHz is too high: 75Mhz is the maximum this chip can be driven at\n", hostdata->clock);
722		/* do the best we can, but the async clock will be out
723		 * of spec: sync divider 2, async divider 3 */
724		DEBUG(("53c700: sync 2 async 3\n"));
725		NCR_700_writeb(SYNC_DIV_2_0, host, SBCL_REG);
726		NCR_700_writeb(ASYNC_DIV_3_0 | hostdata->dcntl_extra, host, DCNTL_REG);
727		hostdata->sync_clock = hostdata->clock/2;
728	} else	if(hostdata->clock > 50  && hostdata->clock <= 75) {
729		/* sync divider 1.5, async divider 3 */
730		DEBUG(("53c700: sync 1.5 async 3\n"));
731		NCR_700_writeb(SYNC_DIV_1_5, host, SBCL_REG);
732		NCR_700_writeb(ASYNC_DIV_3_0 | hostdata->dcntl_extra, host, DCNTL_REG);
733		hostdata->sync_clock = hostdata->clock*2;
734		hostdata->sync_clock /= 3;
735
736	} else if(hostdata->clock > 37 && hostdata->clock <= 50) {
737		/* sync divider 1, async divider 2 */
738		DEBUG(("53c700: sync 1 async 2\n"));
739		NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG);
740		NCR_700_writeb(ASYNC_DIV_2_0 | hostdata->dcntl_extra, host, DCNTL_REG);
741		hostdata->sync_clock = hostdata->clock;
742	} else if(hostdata->clock > 25 && hostdata->clock <=37) {
743		/* sync divider 1, async divider 1.5 */
744		DEBUG(("53c700: sync 1 async 1.5\n"));
745		NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG);
746		NCR_700_writeb(ASYNC_DIV_1_5 | hostdata->dcntl_extra, host, DCNTL_REG);
747		hostdata->sync_clock = hostdata->clock;
748	} else {
749		DEBUG(("53c700: sync 1 async 1\n"));
750		NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG);
751		NCR_700_writeb(ASYNC_DIV_1_0 | hostdata->dcntl_extra, host, DCNTL_REG);
752		/* sync divider 1, async divider 1 */
753		hostdata->sync_clock = hostdata->clock;
754	}
755	/* Calculate the actual minimum period that can be supported
756	 * by our synchronous clock speed.  See the 710 manual for
757	 * exact details of this calculation which is based on a
758	 * setting of the SXFER register */
759	min_period = 1000*(4+min_xferp)/(4*hostdata->sync_clock);
760	hostdata->min_period = NCR_700_MIN_PERIOD;
761	if(min_period > NCR_700_MIN_PERIOD)
762		hostdata->min_period = min_period;
763}
764
765STATIC void
766NCR_700_chip_reset(struct Scsi_Host *host)
767{
768	struct NCR_700_Host_Parameters *hostdata =
769		(struct NCR_700_Host_Parameters *)host->hostdata[0];
770	if(hostdata->chip710) {
771		NCR_700_writeb(SOFTWARE_RESET_710, host, ISTAT_REG);
772		udelay(100);
773
774		NCR_700_writeb(0, host, ISTAT_REG);
775	} else {
776		NCR_700_writeb(SOFTWARE_RESET, host, DCNTL_REG);
777		udelay(100);
778
779		NCR_700_writeb(0, host, DCNTL_REG);
780	}
781
782	mdelay(1000);
783
784	NCR_700_chip_setup(host);
785}
786
787/* The heart of the message processing engine is that the instruction
788 * immediately after the INT is the normal case (and so must be CLEAR
789 * ACK).  If we want to do something else, we call that routine in
790 * scripts and set temp to be the normal case + 8 (skipping the CLEAR
791 * ACK) so that the routine returns correctly to resume its activity
792 * */
793STATIC __u32
794process_extended_message(struct Scsi_Host *host,
795			 struct NCR_700_Host_Parameters *hostdata,
796			 struct scsi_cmnd *SCp, __u32 dsp, __u32 dsps)
797{
798	__u32 resume_offset = dsp, temp = dsp + 8;
799	__u8 pun = 0xff, lun = 0xff;
800
801	if(SCp != NULL) {
802		pun = SCp->device->id;
803		lun = SCp->device->lun;
804	}
805
806	switch(hostdata->msgin[2]) {
807	case A_SDTR_MSG:
808		if(SCp != NULL && NCR_700_is_flag_set(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION)) {
809			struct scsi_target *starget = SCp->device->sdev_target;
810			__u8 period = hostdata->msgin[3];
811			__u8 offset = hostdata->msgin[4];
812
813			if(offset == 0 || period == 0) {
814				offset = 0;
815				period = 0;
816			}
817
818			spi_offset(starget) = offset;
819			spi_period(starget) = period;
820
821			if(NCR_700_is_flag_set(SCp->device, NCR_700_DEV_PRINT_SYNC_NEGOTIATION)) {
822				spi_display_xfer_agreement(starget);
823				NCR_700_clear_flag(SCp->device, NCR_700_DEV_PRINT_SYNC_NEGOTIATION);
824			}
825
826			NCR_700_set_flag(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC);
827			NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
828
829			NCR_700_writeb(NCR_700_get_SXFER(SCp->device),
830				       host, SXFER_REG);
831
832		} else {
833			/* SDTR message out of the blue, reject it */
834			shost_printk(KERN_WARNING, host,
835				"Unexpected SDTR msg\n");
836			hostdata->msgout[0] = A_REJECT_MSG;
837			dma_sync_to_dev(hostdata, hostdata->msgout, 1);
838			script_patch_16(hostdata, hostdata->script,
839			                MessageCount, 1);
840			/* SendMsgOut returns, so set up the return
841			 * address */
842			resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
843		}
844		break;
845
846	case A_WDTR_MSG:
847		printk(KERN_INFO "scsi%d: (%d:%d), Unsolicited WDTR after CMD, Rejecting\n",
848		       host->host_no, pun, lun);
849		hostdata->msgout[0] = A_REJECT_MSG;
850		dma_sync_to_dev(hostdata, hostdata->msgout, 1);
851		script_patch_16(hostdata, hostdata->script, MessageCount, 1);
852		resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
853
854		break;
855
856	default:
857		printk(KERN_INFO "scsi%d (%d:%d): Unexpected message %s: ",
858		       host->host_no, pun, lun,
859		       NCR_700_phase[(dsps & 0xf00) >> 8]);
860		spi_print_msg(hostdata->msgin);
861		printk("\n");
862		/* just reject it */
863		hostdata->msgout[0] = A_REJECT_MSG;
864		dma_sync_to_dev(hostdata, hostdata->msgout, 1);
865		script_patch_16(hostdata, hostdata->script, MessageCount, 1);
866		/* SendMsgOut returns, so set up the return
867		 * address */
868		resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
869	}
870	NCR_700_writel(temp, host, TEMP_REG);
871	return resume_offset;
872}
873
874STATIC __u32
875process_message(struct Scsi_Host *host,	struct NCR_700_Host_Parameters *hostdata,
876		struct scsi_cmnd *SCp, __u32 dsp, __u32 dsps)
877{
878	/* work out where to return to */
879	__u32 temp = dsp + 8, resume_offset = dsp;
880	__u8 pun = 0xff, lun = 0xff;
881
882	if(SCp != NULL) {
883		pun = SCp->device->id;
884		lun = SCp->device->lun;
885	}
886
887#ifdef NCR_700_DEBUG
888	printk("scsi%d (%d:%d): message %s: ", host->host_no, pun, lun,
889	       NCR_700_phase[(dsps & 0xf00) >> 8]);
890	spi_print_msg(hostdata->msgin);
891	printk("\n");
892#endif
893
894	switch(hostdata->msgin[0]) {
895
896	case A_EXTENDED_MSG:
897		resume_offset =  process_extended_message(host, hostdata, SCp,
898							  dsp, dsps);
899		break;
900
901	case A_REJECT_MSG:
902		if(SCp != NULL && NCR_700_is_flag_set(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION)) {
903			/* Rejected our sync negotiation attempt */
904			spi_period(SCp->device->sdev_target) =
905				spi_offset(SCp->device->sdev_target) = 0;
906			NCR_700_set_flag(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC);
907			NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
908		} else if(SCp != NULL && NCR_700_get_tag_neg_state(SCp->device) == NCR_700_DURING_TAG_NEGOTIATION) {
909			/* rejected our first simple tag message */
910			scmd_printk(KERN_WARNING, SCp,
911				"Rejected first tag queue attempt, turning off tag queueing\n");
912			/* we're done negotiating */
913			NCR_700_set_tag_neg_state(SCp->device, NCR_700_FINISHED_TAG_NEGOTIATION);
914			hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
915
916			SCp->device->tagged_supported = 0;
917			SCp->device->simple_tags = 0;
918			scsi_change_queue_depth(SCp->device, host->cmd_per_lun);
919		} else {
920			shost_printk(KERN_WARNING, host,
921				"(%d:%d) Unexpected REJECT Message %s\n",
922			       pun, lun,
923			       NCR_700_phase[(dsps & 0xf00) >> 8]);
924			/* however, just ignore it */
925		}
926		break;
927
928	case A_PARITY_ERROR_MSG:
929		printk(KERN_ERR "scsi%d (%d:%d) Parity Error!\n", host->host_no,
930		       pun, lun);
931		NCR_700_internal_bus_reset(host);
932		break;
933	case A_SIMPLE_TAG_MSG:
934		printk(KERN_INFO "scsi%d (%d:%d) SIMPLE TAG %d %s\n", host->host_no,
935		       pun, lun, hostdata->msgin[1],
936		       NCR_700_phase[(dsps & 0xf00) >> 8]);
937		/* just ignore it */
938		break;
939	default:
940		printk(KERN_INFO "scsi%d (%d:%d): Unexpected message %s: ",
941		       host->host_no, pun, lun,
942		       NCR_700_phase[(dsps & 0xf00) >> 8]);
943
944		spi_print_msg(hostdata->msgin);
945		printk("\n");
946		/* just reject it */
947		hostdata->msgout[0] = A_REJECT_MSG;
948		dma_sync_to_dev(hostdata, hostdata->msgout, 1);
949		script_patch_16(hostdata, hostdata->script, MessageCount, 1);
950		/* SendMsgOut returns, so set up the return
951		 * address */
952		resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
953
954		break;
955	}
956	NCR_700_writel(temp, host, TEMP_REG);
957	/* set us up to receive another message */
958	dma_sync_from_dev(hostdata, hostdata->msgin, MSG_ARRAY_SIZE);
959	return resume_offset;
960}
961
962STATIC __u32
963process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
964			 struct Scsi_Host *host,
965			 struct NCR_700_Host_Parameters *hostdata)
966{
967	__u32 resume_offset = 0;
968	__u8 pun = 0xff, lun=0xff;
969
970	if(SCp != NULL) {
971		pun = SCp->device->id;
972		lun = SCp->device->lun;
973	}
974
975	if(dsps == A_GOOD_STATUS_AFTER_STATUS) {
976		DEBUG(("  COMMAND COMPLETE, status=%02x\n",
977		       hostdata->status[0]));
978		/* OK, if TCQ still under negotiation, we now know it works */
979		if (NCR_700_get_tag_neg_state(SCp->device) == NCR_700_DURING_TAG_NEGOTIATION)
980			NCR_700_set_tag_neg_state(SCp->device,
981						  NCR_700_FINISHED_TAG_NEGOTIATION);
982
983		/* check for contingent allegiance contitions */
984		if(status_byte(hostdata->status[0]) == CHECK_CONDITION ||
985		   status_byte(hostdata->status[0]) == COMMAND_TERMINATED) {
986			struct NCR_700_command_slot *slot =
987				(struct NCR_700_command_slot *)SCp->host_scribble;
988			if(slot->flags == NCR_700_FLAG_AUTOSENSE) {
989				/* OOPS: bad device, returning another
990				 * contingent allegiance condition */
991				scmd_printk(KERN_ERR, SCp,
992					"broken device is looping in contingent allegiance: ignoring\n");
993				NCR_700_scsi_done(hostdata, SCp, hostdata->status[0]);
994			} else {
995				char *cmnd =
996					NCR_700_get_sense_cmnd(SCp->device);
997#ifdef NCR_DEBUG
998				scsi_print_command(SCp);
999				printk("  cmd %p has status %d, requesting sense\n",
1000				       SCp, hostdata->status[0]);
1001#endif
1002				/* we can destroy the command here
1003				 * because the contingent allegiance
1004				 * condition will cause a retry which
1005				 * will re-copy the command from the
1006				 * saved data_cmnd.  We also unmap any
1007				 * data associated with the command
1008				 * here */
1009				NCR_700_unmap(hostdata, SCp, slot);
1010				dma_unmap_single(hostdata->dev, slot->pCmd,
1011						 MAX_COMMAND_SIZE,
1012						 DMA_TO_DEVICE);
1013
1014				cmnd[0] = REQUEST_SENSE;
1015				cmnd[1] = (lun & 0x7) << 5;
1016				cmnd[2] = 0;
1017				cmnd[3] = 0;
1018				cmnd[4] = SCSI_SENSE_BUFFERSIZE;
1019				cmnd[5] = 0;
1020				/* Here's a quiet hack: the
1021				 * REQUEST_SENSE command is six bytes,
1022				 * so store a flag indicating that
1023				 * this was an internal sense request
1024				 * and the original status at the end
1025				 * of the command */
1026				cmnd[6] = NCR_700_INTERNAL_SENSE_MAGIC;
1027				cmnd[7] = hostdata->status[0];
1028				cmnd[8] = SCp->cmd_len;
1029				SCp->cmd_len = 6; /* command length for
1030						   * REQUEST_SENSE */
1031				slot->pCmd = dma_map_single(hostdata->dev, cmnd, MAX_COMMAND_SIZE, DMA_TO_DEVICE);
1032				slot->dma_handle = dma_map_single(hostdata->dev, SCp->sense_buffer, SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
1033				slot->SG[0].ins = bS_to_host(SCRIPT_MOVE_DATA_IN | SCSI_SENSE_BUFFERSIZE);
1034				slot->SG[0].pAddr = bS_to_host(slot->dma_handle);
1035				slot->SG[1].ins = bS_to_host(SCRIPT_RETURN);
1036				slot->SG[1].pAddr = 0;
1037				slot->resume_offset = hostdata->pScript;
1038				dma_sync_to_dev(hostdata, slot->SG, sizeof(slot->SG[0])*2);
1039				dma_sync_from_dev(hostdata, SCp->sense_buffer, SCSI_SENSE_BUFFERSIZE);
1040
1041				/* queue the command for reissue */
1042				slot->state = NCR_700_SLOT_QUEUED;
1043				slot->flags = NCR_700_FLAG_AUTOSENSE;
1044				hostdata->state = NCR_700_HOST_FREE;
1045				hostdata->cmd = NULL;
1046			}
1047		} else {
1048			// Currently rely on the mid layer evaluation
1049			// of the tag queuing capability
1050			//
1051			//if(status_byte(hostdata->status[0]) == GOOD &&
1052			//   SCp->cmnd[0] == INQUIRY && SCp->use_sg == 0) {
1053			//	/* Piggy back the tag queueing support
1054			//	 * on this command */
1055			//	dma_sync_single_for_cpu(hostdata->dev,
1056			//			    slot->dma_handle,
1057			//			    SCp->request_bufflen,
1058			//			    DMA_FROM_DEVICE);
1059			//	if(((char *)SCp->request_buffer)[7] & 0x02) {
1060			//		scmd_printk(KERN_INFO, SCp,
1061			//		     "Enabling Tag Command Queuing\n");
1062			//		hostdata->tag_negotiated |= (1<<scmd_id(SCp));
1063			//		NCR_700_set_flag(SCp->device, NCR_700_DEV_BEGIN_TAG_QUEUEING);
1064			//	} else {
1065			//		NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_TAG_QUEUEING);
1066			//		hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
1067			//	}
1068			//}
1069			NCR_700_scsi_done(hostdata, SCp, hostdata->status[0]);
1070		}
1071	} else if((dsps & 0xfffff0f0) == A_UNEXPECTED_PHASE) {
1072		__u8 i = (dsps & 0xf00) >> 8;
1073
1074		scmd_printk(KERN_ERR, SCp, "UNEXPECTED PHASE %s (%s)\n",
1075		       NCR_700_phase[i],
1076		       sbcl_to_string(NCR_700_readb(host, SBCL_REG)));
1077		scmd_printk(KERN_ERR, SCp, "         len = %d, cmd =",
1078			SCp->cmd_len);
1079		scsi_print_command(SCp);
1080
1081		NCR_700_internal_bus_reset(host);
1082	} else if((dsps & 0xfffff000) == A_FATAL) {
1083		int i = (dsps & 0xfff);
1084
1085		printk(KERN_ERR "scsi%d: (%d:%d) FATAL ERROR: %s\n",
1086		       host->host_no, pun, lun, NCR_700_fatal_messages[i]);
1087		if(dsps == A_FATAL_ILLEGAL_MSG_LENGTH) {
1088			printk(KERN_ERR "     msg begins %02x %02x\n",
1089			       hostdata->msgin[0], hostdata->msgin[1]);
1090		}
1091		NCR_700_internal_bus_reset(host);
1092	} else if((dsps & 0xfffff0f0) == A_DISCONNECT) {
1093#ifdef NCR_700_DEBUG
1094		__u8 i = (dsps & 0xf00) >> 8;
1095
1096		printk("scsi%d: (%d:%d), DISCONNECTED (%d) %s\n",
1097		       host->host_no, pun, lun,
1098		       i, NCR_700_phase[i]);
1099#endif
1100		save_for_reselection(hostdata, SCp, dsp);
1101
1102	} else if(dsps == A_RESELECTION_IDENTIFIED) {
1103		__u8 lun;
1104		struct NCR_700_command_slot *slot;
1105		__u8 reselection_id = hostdata->reselection_id;
1106		struct scsi_device *SDp;
1107
1108		lun = hostdata->msgin[0] & 0x1f;
1109
1110		hostdata->reselection_id = 0xff;
1111		DEBUG(("scsi%d: (%d:%d) RESELECTED!\n",
1112		       host->host_no, reselection_id, lun));
1113		/* clear the reselection indicator */
1114		SDp = __scsi_device_lookup(host, 0, reselection_id, lun);
1115		if(unlikely(SDp == NULL)) {
1116			printk(KERN_ERR "scsi%d: (%d:%d) HAS NO device\n",
1117			       host->host_no, reselection_id, lun);
1118			BUG();
1119		}
1120		if(hostdata->msgin[1] == A_SIMPLE_TAG_MSG) {
1121			struct scsi_cmnd *SCp;
1122
1123			SCp = scsi_host_find_tag(SDp->host, hostdata->msgin[2]);
1124			if(unlikely(SCp == NULL)) {
1125				printk(KERN_ERR "scsi%d: (%d:%d) no saved request for tag %d\n",
1126				       host->host_no, reselection_id, lun, hostdata->msgin[2]);
1127				BUG();
1128			}
1129
1130			slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1131			DDEBUG(KERN_DEBUG, SDp,
1132				"reselection is tag %d, slot %p(%d)\n",
1133				hostdata->msgin[2], slot, slot->tag);
1134		} else {
1135			struct NCR_700_Device_Parameters *p = SDp->hostdata;
1136			struct scsi_cmnd *SCp = p->current_cmnd;
1137
1138			if(unlikely(SCp == NULL)) {
1139				sdev_printk(KERN_ERR, SDp,
1140					"no saved request for untagged cmd\n");
1141				BUG();
1142			}
1143			slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1144		}
1145
1146		if(slot == NULL) {
1147			printk(KERN_ERR "scsi%d: (%d:%d) RESELECTED but no saved command (MSG = %02x %02x %02x)!!\n",
1148			       host->host_no, reselection_id, lun,
1149			       hostdata->msgin[0], hostdata->msgin[1],
1150			       hostdata->msgin[2]);
1151		} else {
1152			if(hostdata->state != NCR_700_HOST_BUSY)
1153				printk(KERN_ERR "scsi%d: FATAL, host not busy during valid reselection!\n",
1154				       host->host_no);
1155			resume_offset = slot->resume_offset;
1156			hostdata->cmd = slot->cmnd;
1157
1158			/* re-patch for this command */
1159			script_patch_32_abs(hostdata, hostdata->script,
1160			                    CommandAddress, slot->pCmd);
1161			script_patch_16(hostdata, hostdata->script,
1162					CommandCount, slot->cmnd->cmd_len);
1163			script_patch_32_abs(hostdata, hostdata->script,
1164			                    SGScriptStartAddress,
1165					    to32bit(&slot->pSG[0].ins));
1166
1167			/* Note: setting SXFER only works if we're
1168			 * still in the MESSAGE phase, so it is vital
1169			 * that ACK is still asserted when we process
1170			 * the reselection message.  The resume offset
1171			 * should therefore always clear ACK */
1172			NCR_700_writeb(NCR_700_get_SXFER(hostdata->cmd->device),
1173				       host, SXFER_REG);
1174			dma_sync_from_dev(hostdata, hostdata->msgin,
1175				       MSG_ARRAY_SIZE);
1176			dma_sync_to_dev(hostdata, hostdata->msgout,
1177				       MSG_ARRAY_SIZE);
1178			/* I'm just being paranoid here, the command should
1179			 * already have been flushed from the cache */
1180			dma_sync_to_dev(hostdata, slot->cmnd->cmnd,
1181				       slot->cmnd->cmd_len);
1182
1183
1184
1185		}
1186	} else if(dsps == A_RESELECTED_DURING_SELECTION) {
1187
1188		/* This section is full of debugging code because I've
1189		 * never managed to reach it.  I think what happens is
1190		 * that, because the 700 runs with selection
1191		 * interrupts enabled the whole time that we take a
1192		 * selection interrupt before we manage to get to the
1193		 * reselected script interrupt */
1194
1195		__u8 reselection_id = NCR_700_readb(host, SFBR_REG);
1196		struct NCR_700_command_slot *slot;
1197
1198		/* Take out our own ID */
1199		reselection_id &= ~(1<<host->this_id);
1200
1201		/* I've never seen this happen, so keep this as a printk rather
1202		 * than a debug */
1203		printk(KERN_INFO "scsi%d: (%d:%d) RESELECTION DURING SELECTION, dsp=%08x[%04x] state=%d, count=%d\n",
1204		       host->host_no, reselection_id, lun, dsp, dsp - hostdata->pScript, hostdata->state, hostdata->command_slot_count);
1205
1206		{
1207			/* FIXME: DEBUGGING CODE */
1208			__u32 SG = (__u32)bS_to_cpu(hostdata->script[A_SGScriptStartAddress_used[0]]);
1209			int i;
1210
1211			for(i=0; i< NCR_700_COMMAND_SLOTS_PER_HOST; i++) {
1212				if(SG >= to32bit(&hostdata->slots[i].pSG[0])
1213				   && SG <= to32bit(&hostdata->slots[i].pSG[NCR_700_SG_SEGMENTS]))
1214					break;
1215			}
1216			printk(KERN_INFO "IDENTIFIED SG segment as being %08x in slot %p, cmd %p, slot->resume_offset=%08x\n", SG, &hostdata->slots[i], hostdata->slots[i].cmnd, hostdata->slots[i].resume_offset);
1217			SCp =  hostdata->slots[i].cmnd;
1218		}
1219
1220		if(SCp != NULL) {
1221			slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1222			/* change slot from busy to queued to redo command */
1223			slot->state = NCR_700_SLOT_QUEUED;
1224		}
1225		hostdata->cmd = NULL;
1226
1227		if(reselection_id == 0) {
1228			if(hostdata->reselection_id == 0xff) {
1229				printk(KERN_ERR "scsi%d: Invalid reselection during selection!!\n", host->host_no);
1230				return 0;
1231			} else {
1232				printk(KERN_ERR "scsi%d: script reselected and we took a selection interrupt\n",
1233				       host->host_no);
1234				reselection_id = hostdata->reselection_id;
1235			}
1236		} else {
1237
1238			/* convert to real ID */
1239			reselection_id = bitmap_to_number(reselection_id);
1240		}
1241		hostdata->reselection_id = reselection_id;
1242		/* just in case we have a stale simple tag message, clear it */
1243		hostdata->msgin[1] = 0;
1244		dma_sync_to_dev(hostdata, hostdata->msgin, MSG_ARRAY_SIZE);
1245		if(hostdata->tag_negotiated & (1<<reselection_id)) {
1246			resume_offset = hostdata->pScript + Ent_GetReselectionWithTag;
1247		} else {
1248			resume_offset = hostdata->pScript + Ent_GetReselectionData;
1249		}
1250	} else if(dsps == A_COMPLETED_SELECTION_AS_TARGET) {
1251		/* we've just disconnected from the bus, do nothing since
1252		 * a return here will re-run the queued command slot
1253		 * that may have been interrupted by the initial selection */
1254		DEBUG((" SELECTION COMPLETED\n"));
1255	} else if((dsps & 0xfffff0f0) == A_MSG_IN) {
1256		resume_offset = process_message(host, hostdata, SCp,
1257						dsp, dsps);
1258	} else if((dsps &  0xfffff000) == 0) {
1259		__u8 i = (dsps & 0xf0) >> 4, j = (dsps & 0xf00) >> 8;
1260		printk(KERN_ERR "scsi%d: (%d:%d), unhandled script condition %s %s at %04x\n",
1261		       host->host_no, pun, lun, NCR_700_condition[i],
1262		       NCR_700_phase[j], dsp - hostdata->pScript);
1263		if(SCp != NULL) {
1264			struct scatterlist *sg;
1265
1266			scsi_print_command(SCp);
1267			scsi_for_each_sg(SCp, sg, scsi_sg_count(SCp) + 1, i) {
1268				printk(KERN_INFO " SG[%d].length = %d, move_insn=%08x, addr %08x\n", i, sg->length, ((struct NCR_700_command_slot *)SCp->host_scribble)->SG[i].ins, ((struct NCR_700_command_slot *)SCp->host_scribble)->SG[i].pAddr);
1269			}
1270		}
1271		NCR_700_internal_bus_reset(host);
1272	} else if((dsps & 0xfffff000) == A_DEBUG_INTERRUPT) {
1273		printk(KERN_NOTICE "scsi%d (%d:%d) DEBUG INTERRUPT %d AT %08x[%04x], continuing\n",
1274		       host->host_no, pun, lun, dsps & 0xfff, dsp, dsp - hostdata->pScript);
1275		resume_offset = dsp;
1276	} else {
1277		printk(KERN_ERR "scsi%d: (%d:%d), unidentified script interrupt 0x%x at %04x\n",
1278		       host->host_no, pun, lun, dsps, dsp - hostdata->pScript);
1279		NCR_700_internal_bus_reset(host);
1280	}
1281	return resume_offset;
1282}
1283
1284/* We run the 53c700 with selection interrupts always enabled.  This
1285 * means that the chip may be selected as soon as the bus frees.  On a
1286 * busy bus, this can be before the scripts engine finishes its
1287 * processing.  Therefore, part of the selection processing has to be
1288 * to find out what the scripts engine is doing and complete the
1289 * function if necessary (i.e. process the pending disconnect or save
1290 * the interrupted initial selection */
1291STATIC inline __u32
1292process_selection(struct Scsi_Host *host, __u32 dsp)
1293{
1294	__u8 id = 0;	/* Squash compiler warning */
1295	int count = 0;
1296	__u32 resume_offset = 0;
1297	struct NCR_700_Host_Parameters *hostdata =
1298		(struct NCR_700_Host_Parameters *)host->hostdata[0];
1299	struct scsi_cmnd *SCp = hostdata->cmd;
1300	__u8 sbcl;
1301
1302	for(count = 0; count < 5; count++) {
1303		id = NCR_700_readb(host, hostdata->chip710 ?
1304				   CTEST9_REG : SFBR_REG);
1305
1306		/* Take out our own ID */
1307		id &= ~(1<<host->this_id);
1308		if(id != 0)
1309			break;
1310		udelay(5);
1311	}
1312	sbcl = NCR_700_readb(host, SBCL_REG);
1313	if((sbcl & SBCL_IO) == 0) {
1314		/* mark as having been selected rather than reselected */
1315		id = 0xff;
1316	} else {
1317		/* convert to real ID */
1318		hostdata->reselection_id = id = bitmap_to_number(id);
1319		DEBUG(("scsi%d:  Reselected by %d\n",
1320		       host->host_no, id));
1321	}
1322	if(hostdata->state == NCR_700_HOST_BUSY && SCp != NULL) {
1323		struct NCR_700_command_slot *slot =
1324			(struct NCR_700_command_slot *)SCp->host_scribble;
1325		DEBUG(("  ID %d WARNING: RESELECTION OF BUSY HOST, saving cmd %p, slot %p, addr %x [%04x], resume %x!\n", id, hostdata->cmd, slot, dsp, dsp - hostdata->pScript, resume_offset));
1326
1327		switch(dsp - hostdata->pScript) {
1328		case Ent_Disconnect1:
1329		case Ent_Disconnect2:
1330			save_for_reselection(hostdata, SCp, Ent_Disconnect2 + hostdata->pScript);
1331			break;
1332		case Ent_Disconnect3:
1333		case Ent_Disconnect4:
1334			save_for_reselection(hostdata, SCp, Ent_Disconnect4 + hostdata->pScript);
1335			break;
1336		case Ent_Disconnect5:
1337		case Ent_Disconnect6:
1338			save_for_reselection(hostdata, SCp, Ent_Disconnect6 + hostdata->pScript);
1339			break;
1340		case Ent_Disconnect7:
1341		case Ent_Disconnect8:
1342			save_for_reselection(hostdata, SCp, Ent_Disconnect8 + hostdata->pScript);
1343			break;
1344		case Ent_Finish1:
1345		case Ent_Finish2:
1346			process_script_interrupt(A_GOOD_STATUS_AFTER_STATUS, dsp, SCp, host, hostdata);
1347			break;
1348
1349		default:
1350			slot->state = NCR_700_SLOT_QUEUED;
1351			break;
1352			}
1353	}
1354	hostdata->state = NCR_700_HOST_BUSY;
1355	hostdata->cmd = NULL;
1356	/* clear any stale simple tag message */
1357	hostdata->msgin[1] = 0;
1358	dma_sync_to_dev(hostdata, hostdata->msgin, MSG_ARRAY_SIZE);
1359
1360	if(id == 0xff) {
1361		/* Selected as target, Ignore */
1362		resume_offset = hostdata->pScript + Ent_SelectedAsTarget;
1363	} else if(hostdata->tag_negotiated & (1<<id)) {
1364		resume_offset = hostdata->pScript + Ent_GetReselectionWithTag;
1365	} else {
1366		resume_offset = hostdata->pScript + Ent_GetReselectionData;
1367	}
1368	return resume_offset;
1369}
1370
1371static inline void
1372NCR_700_clear_fifo(struct Scsi_Host *host) {
1373	const struct NCR_700_Host_Parameters *hostdata
1374		= (struct NCR_700_Host_Parameters *)host->hostdata[0];
1375	if(hostdata->chip710) {
1376		NCR_700_writeb(CLR_FIFO_710, host, CTEST8_REG);
1377	} else {
1378		NCR_700_writeb(CLR_FIFO, host, DFIFO_REG);
1379	}
1380}
1381
1382static inline void
1383NCR_700_flush_fifo(struct Scsi_Host *host) {
1384	const struct NCR_700_Host_Parameters *hostdata
1385		= (struct NCR_700_Host_Parameters *)host->hostdata[0];
1386	if(hostdata->chip710) {
1387		NCR_700_writeb(FLUSH_DMA_FIFO_710, host, CTEST8_REG);
1388		udelay(10);
1389		NCR_700_writeb(0, host, CTEST8_REG);
1390	} else {
1391		NCR_700_writeb(FLUSH_DMA_FIFO, host, DFIFO_REG);
1392		udelay(10);
1393		NCR_700_writeb(0, host, DFIFO_REG);
1394	}
1395}
1396
1397
1398/* The queue lock with interrupts disabled must be held on entry to
1399 * this function */
1400STATIC int
1401NCR_700_start_command(struct scsi_cmnd *SCp)
1402{
1403	struct NCR_700_command_slot *slot =
1404		(struct NCR_700_command_slot *)SCp->host_scribble;
1405	struct NCR_700_Host_Parameters *hostdata =
1406		(struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
1407	__u16 count = 1;	/* for IDENTIFY message */
1408	u8 lun = SCp->device->lun;
1409
1410	if(hostdata->state != NCR_700_HOST_FREE) {
1411		/* keep this inside the lock to close the race window where
1412		 * the running command finishes on another CPU while we don't
1413		 * change the state to queued on this one */
1414		slot->state = NCR_700_SLOT_QUEUED;
1415
1416		DEBUG(("scsi%d: host busy, queueing command %p, slot %p\n",
1417		       SCp->device->host->host_no, slot->cmnd, slot));
1418		return 0;
1419	}
1420	hostdata->state = NCR_700_HOST_BUSY;
1421	hostdata->cmd = SCp;
1422	slot->state = NCR_700_SLOT_BUSY;
1423	/* keep interrupts disabled until we have the command correctly
1424	 * set up so we cannot take a selection interrupt */
1425
1426	hostdata->msgout[0] = NCR_700_identify((SCp->cmnd[0] != REQUEST_SENSE &&
1427						slot->flags != NCR_700_FLAG_AUTOSENSE),
1428					       lun);
1429	/* for INQUIRY or REQUEST_SENSE commands, we cannot be sure
1430	 * if the negotiated transfer parameters still hold, so
1431	 * always renegotiate them */
1432	if(SCp->cmnd[0] == INQUIRY || SCp->cmnd[0] == REQUEST_SENSE ||
1433	   slot->flags == NCR_700_FLAG_AUTOSENSE) {
1434		NCR_700_clear_flag(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC);
1435	}
1436
1437	/* REQUEST_SENSE is asking for contingent I_T_L(_Q) status.
1438	 * If a contingent allegiance condition exists, the device
1439	 * will refuse all tags, so send the request sense as untagged
1440	 * */
1441	if((hostdata->tag_negotiated & (1<<scmd_id(SCp)))
1442	   && (slot->tag != SCSI_NO_TAG && SCp->cmnd[0] != REQUEST_SENSE &&
1443	       slot->flags != NCR_700_FLAG_AUTOSENSE)) {
1444		count += spi_populate_tag_msg(&hostdata->msgout[count], SCp);
1445	}
1446
1447	if(hostdata->fast &&
1448	   NCR_700_is_flag_clear(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC)) {
1449		count += spi_populate_sync_msg(&hostdata->msgout[count],
1450				spi_period(SCp->device->sdev_target),
1451				spi_offset(SCp->device->sdev_target));
1452		NCR_700_set_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
1453	}
1454
1455	script_patch_16(hostdata, hostdata->script, MessageCount, count);
1456
1457	script_patch_ID(hostdata, hostdata->script, Device_ID, 1<<scmd_id(SCp));
1458
1459	script_patch_32_abs(hostdata, hostdata->script, CommandAddress,
1460			    slot->pCmd);
1461	script_patch_16(hostdata, hostdata->script, CommandCount, SCp->cmd_len);
1462	/* finally plumb the beginning of the SG list into the script
1463	 * */
1464	script_patch_32_abs(hostdata, hostdata->script,
1465	                    SGScriptStartAddress, to32bit(&slot->pSG[0].ins));
1466	NCR_700_clear_fifo(SCp->device->host);
1467
1468	if(slot->resume_offset == 0)
1469		slot->resume_offset = hostdata->pScript;
1470	/* now perform all the writebacks and invalidates */
1471	dma_sync_to_dev(hostdata, hostdata->msgout, count);
1472	dma_sync_from_dev(hostdata, hostdata->msgin, MSG_ARRAY_SIZE);
1473	dma_sync_to_dev(hostdata, SCp->cmnd, SCp->cmd_len);
1474	dma_sync_from_dev(hostdata, hostdata->status, 1);
1475
1476	/* set the synchronous period/offset */
1477	NCR_700_writeb(NCR_700_get_SXFER(SCp->device),
1478		       SCp->device->host, SXFER_REG);
1479	NCR_700_writel(slot->temp, SCp->device->host, TEMP_REG);
1480	NCR_700_writel(slot->resume_offset, SCp->device->host, DSP_REG);
1481
1482	return 1;
1483}
1484
1485irqreturn_t
1486NCR_700_intr(int irq, void *dev_id)
1487{
1488	struct Scsi_Host *host = (struct Scsi_Host *)dev_id;
1489	struct NCR_700_Host_Parameters *hostdata =
1490		(struct NCR_700_Host_Parameters *)host->hostdata[0];
1491	__u8 istat;
1492	__u32 resume_offset = 0;
1493	__u8 pun = 0xff, lun = 0xff;
1494	unsigned long flags;
1495	int handled = 0;
1496
1497	/* Use the host lock to serialise access to the 53c700
1498	 * hardware.  Note: In future, we may need to take the queue
1499	 * lock to enter the done routines.  When that happens, we
1500	 * need to ensure that for this driver, the host lock and the
1501	 * queue lock point to the same thing. */
1502	spin_lock_irqsave(host->host_lock, flags);
1503	if((istat = NCR_700_readb(host, ISTAT_REG))
1504	      & (SCSI_INT_PENDING | DMA_INT_PENDING)) {
1505		__u32 dsps;
1506		__u8 sstat0 = 0, dstat = 0;
1507		__u32 dsp;
1508		struct scsi_cmnd *SCp = hostdata->cmd;
1509
1510		handled = 1;
1511		SCp = hostdata->cmd;
1512
1513		if(istat & SCSI_INT_PENDING) {
1514			udelay(10);
1515
1516			sstat0 = NCR_700_readb(host, SSTAT0_REG);
1517		}
1518
1519		if(istat & DMA_INT_PENDING) {
1520			udelay(10);
1521
1522			dstat = NCR_700_readb(host, DSTAT_REG);
1523		}
1524
1525		dsps = NCR_700_readl(host, DSPS_REG);
1526		dsp = NCR_700_readl(host, DSP_REG);
1527
1528		DEBUG(("scsi%d: istat %02x sstat0 %02x dstat %02x dsp %04x[%08x] dsps 0x%x\n",
1529		       host->host_no, istat, sstat0, dstat,
1530		       (dsp - (__u32)(hostdata->pScript))/4,
1531		       dsp, dsps));
1532
1533		if(SCp != NULL) {
1534			pun = SCp->device->id;
1535			lun = SCp->device->lun;
1536		}
1537
1538		if(sstat0 & SCSI_RESET_DETECTED) {
1539			struct scsi_device *SDp;
1540			int i;
1541
1542			hostdata->state = NCR_700_HOST_BUSY;
1543
1544			printk(KERN_ERR "scsi%d: Bus Reset detected, executing command %p, slot %p, dsp %08x[%04x]\n",
1545			       host->host_no, SCp, SCp == NULL ? NULL : SCp->host_scribble, dsp, dsp - hostdata->pScript);
1546
1547			scsi_report_bus_reset(host, 0);
1548
1549			/* clear all the negotiated parameters */
1550			__shost_for_each_device(SDp, host)
1551				NCR_700_clear_flag(SDp, ~0);
1552
1553			/* clear all the slots and their pending commands */
1554			for(i = 0; i < NCR_700_COMMAND_SLOTS_PER_HOST; i++) {
1555				struct scsi_cmnd *SCp;
1556				struct NCR_700_command_slot *slot =
1557					&hostdata->slots[i];
1558
1559				if(slot->state == NCR_700_SLOT_FREE)
1560					continue;
1561
1562				SCp = slot->cmnd;
1563				printk(KERN_ERR " failing command because of reset, slot %p, cmnd %p\n",
1564				       slot, SCp);
1565				free_slot(slot, hostdata);
1566				SCp->host_scribble = NULL;
1567				NCR_700_set_depth(SCp->device, 0);
1568				/* NOTE: deadlock potential here: we
1569				 * rely on mid-layer guarantees that
1570				 * scsi_done won't try to issue the
1571				 * command again otherwise we'll
1572				 * deadlock on the
1573				 * hostdata->state_lock */
1574				SCp->result = DID_RESET << 16;
1575				SCp->scsi_done(SCp);
1576			}
1577			mdelay(25);
1578			NCR_700_chip_setup(host);
1579
1580			hostdata->state = NCR_700_HOST_FREE;
1581			hostdata->cmd = NULL;
1582			/* signal back if this was an eh induced reset */
1583			if(hostdata->eh_complete != NULL)
1584				complete(hostdata->eh_complete);
1585			goto out_unlock;
1586		} else if(sstat0 & SELECTION_TIMEOUT) {
1587			DEBUG(("scsi%d: (%d:%d) selection timeout\n",
1588			       host->host_no, pun, lun));
1589			NCR_700_scsi_done(hostdata, SCp, DID_NO_CONNECT<<16);
1590		} else if(sstat0 & PHASE_MISMATCH) {
1591			struct NCR_700_command_slot *slot = (SCp == NULL) ? NULL :
1592				(struct NCR_700_command_slot *)SCp->host_scribble;
1593
1594			if(dsp == Ent_SendMessage + 8 + hostdata->pScript) {
1595				/* It wants to reply to some part of
1596				 * our message */
1597#ifdef NCR_700_DEBUG
1598				__u32 temp = NCR_700_readl(host, TEMP_REG);
1599				int count = (hostdata->script[Ent_SendMessage/4] & 0xffffff) - ((NCR_700_readl(host, DBC_REG) & 0xffffff) + NCR_700_data_residual(host));
1600				printk("scsi%d (%d:%d) PHASE MISMATCH IN SEND MESSAGE %d remain, return %p[%04x], phase %s\n", host->host_no, pun, lun, count, (void *)temp, temp - hostdata->pScript, sbcl_to_string(NCR_700_readb(host, SBCL_REG)));
1601#endif
1602				resume_offset = hostdata->pScript + Ent_SendMessagePhaseMismatch;
1603			} else if (slot && dsp >= to32bit(&slot->pSG[0].ins) &&
1604				  dsp <= to32bit(&slot->pSG[NCR_700_SG_SEGMENTS].ins)) {
1605				int data_transfer = NCR_700_readl(host, DBC_REG) & 0xffffff;
1606				int SGcount = (dsp - to32bit(&slot->pSG[0].ins))/sizeof(struct NCR_700_SG_List);
1607				int residual = NCR_700_data_residual(host);
1608				int i;
1609#ifdef NCR_700_DEBUG
1610				__u32 naddr = NCR_700_readl(host, DNAD_REG);
1611
1612				printk("scsi%d: (%d:%d) Expected phase mismatch in slot->SG[%d], transferred 0x%x\n",
1613				       host->host_no, pun, lun,
1614				       SGcount, data_transfer);
1615				scsi_print_command(SCp);
1616				if(residual) {
1617					printk("scsi%d: (%d:%d) Expected phase mismatch in slot->SG[%d], transferred 0x%x, residual %d\n",
1618				       host->host_no, pun, lun,
1619				       SGcount, data_transfer, residual);
1620				}
1621#endif
1622				data_transfer += residual;
1623
1624				if(data_transfer != 0) {
1625					int count;
1626					__u32 pAddr;
1627
1628					SGcount--;
1629
1630					count = (bS_to_cpu(slot->SG[SGcount].ins) & 0x00ffffff);
1631					DEBUG(("DATA TRANSFER MISMATCH, count = %d, transferred %d\n", count, count-data_transfer));
1632					slot->SG[SGcount].ins &= bS_to_host(0xff000000);
1633					slot->SG[SGcount].ins |= bS_to_host(data_transfer);
1634					pAddr = bS_to_cpu(slot->SG[SGcount].pAddr);
1635					pAddr += (count - data_transfer);
1636#ifdef NCR_700_DEBUG
1637					if(pAddr != naddr) {
1638						printk("scsi%d (%d:%d) transfer mismatch pAddr=%lx, naddr=%lx, data_transfer=%d, residual=%d\n", host->host_no, pun, lun, (unsigned long)pAddr, (unsigned long)naddr, data_transfer, residual);
1639					}
1640#endif
1641					slot->SG[SGcount].pAddr = bS_to_host(pAddr);
1642				}
1643				/* set the executed moves to nops */
1644				for(i=0; i<SGcount; i++) {
1645					slot->SG[i].ins = bS_to_host(SCRIPT_NOP);
1646					slot->SG[i].pAddr = 0;
1647				}
1648				dma_sync_to_dev(hostdata, slot->SG, sizeof(slot->SG));
1649				/* and pretend we disconnected after
1650				 * the command phase */
1651				resume_offset = hostdata->pScript + Ent_MsgInDuringData;
1652				/* make sure all the data is flushed */
1653				NCR_700_flush_fifo(host);
1654			} else {
1655				__u8 sbcl = NCR_700_readb(host, SBCL_REG);
1656				printk(KERN_ERR "scsi%d: (%d:%d) phase mismatch at %04x, phase %s\n",
1657				       host->host_no, pun, lun, dsp - hostdata->pScript, sbcl_to_string(sbcl));
1658				NCR_700_internal_bus_reset(host);
1659			}
1660
1661		} else if(sstat0 & SCSI_GROSS_ERROR) {
1662			printk(KERN_ERR "scsi%d: (%d:%d) GROSS ERROR\n",
1663			       host->host_no, pun, lun);
1664			NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1665		} else if(sstat0 & PARITY_ERROR) {
1666			printk(KERN_ERR "scsi%d: (%d:%d) PARITY ERROR\n",
1667			       host->host_no, pun, lun);
1668			NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1669		} else if(dstat & SCRIPT_INT_RECEIVED) {
1670			DEBUG(("scsi%d: (%d:%d) ====>SCRIPT INTERRUPT<====\n",
1671			       host->host_no, pun, lun));
1672			resume_offset = process_script_interrupt(dsps, dsp, SCp, host, hostdata);
1673		} else if(dstat & (ILGL_INST_DETECTED)) {
1674			printk(KERN_ERR "scsi%d: (%d:%d) Illegal Instruction detected at 0x%08x[0x%x]!!!\n"
1675			       "         Please email James.Bottomley@HansenPartnership.com with the details\n",
1676			       host->host_no, pun, lun,
1677			       dsp, dsp - hostdata->pScript);
1678			NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1679		} else if(dstat & (WATCH_DOG_INTERRUPT|ABORTED)) {
1680			printk(KERN_ERR "scsi%d: (%d:%d) serious DMA problem, dstat=%02x\n",
1681			       host->host_no, pun, lun, dstat);
1682			NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1683		}
1684
1685
1686		/* NOTE: selection interrupt processing MUST occur
1687		 * after script interrupt processing to correctly cope
1688		 * with the case where we process a disconnect and
1689		 * then get reselected before we process the
1690		 * disconnection */
1691		if(sstat0 & SELECTED) {
1692			/* FIXME: It currently takes at least FOUR
1693			 * interrupts to complete a command that
1694			 * disconnects: one for the disconnect, one
1695			 * for the reselection, one to get the
1696			 * reselection data and one to complete the
1697			 * command.  If we guess the reselected
1698			 * command here and prepare it, we only need
1699			 * to get a reselection data interrupt if we
1700			 * guessed wrongly.  Since the interrupt
1701			 * overhead is much greater than the command
1702			 * setup, this would be an efficient
1703			 * optimisation particularly as we probably
1704			 * only have one outstanding command on a
1705			 * target most of the time */
1706
1707			resume_offset = process_selection(host, dsp);
1708
1709		}
1710
1711	}
1712
1713	if(resume_offset) {
1714		if(hostdata->state != NCR_700_HOST_BUSY) {
1715			printk(KERN_ERR "scsi%d: Driver error: resume at 0x%08x [0x%04x] with non busy host!\n",
1716			       host->host_no, resume_offset, resume_offset - hostdata->pScript);
1717			hostdata->state = NCR_700_HOST_BUSY;
1718		}
1719
1720		DEBUG(("Attempting to resume at %x\n", resume_offset));
1721		NCR_700_clear_fifo(host);
1722		NCR_700_writel(resume_offset, host, DSP_REG);
1723	}
1724	/* There is probably a technical no-no about this: If we're a
1725	 * shared interrupt and we got this interrupt because the
1726	 * other device needs servicing not us, we're still going to
1727	 * check our queued commands here---of course, there shouldn't
1728	 * be any outstanding.... */
1729	if(hostdata->state == NCR_700_HOST_FREE) {
1730		int i;
1731
1732		for(i = 0; i < NCR_700_COMMAND_SLOTS_PER_HOST; i++) {
1733			/* fairness: always run the queue from the last
1734			 * position we left off */
1735			int j = (i + hostdata->saved_slot_position)
1736				% NCR_700_COMMAND_SLOTS_PER_HOST;
1737
1738			if(hostdata->slots[j].state != NCR_700_SLOT_QUEUED)
1739				continue;
1740			if(NCR_700_start_command(hostdata->slots[j].cmnd)) {
1741				DEBUG(("scsi%d: Issuing saved command slot %p, cmd %p\t\n",
1742				       host->host_no, &hostdata->slots[j],
1743				       hostdata->slots[j].cmnd));
1744				hostdata->saved_slot_position = j + 1;
1745			}
1746
1747			break;
1748		}
1749	}
1750 out_unlock:
1751	spin_unlock_irqrestore(host->host_lock, flags);
1752	return IRQ_RETVAL(handled);
1753}
1754
1755static int
1756NCR_700_queuecommand_lck(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *))
1757{
1758	struct NCR_700_Host_Parameters *hostdata =
1759		(struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
1760	__u32 move_ins;
1761	struct NCR_700_command_slot *slot;
1762
1763	if(hostdata->command_slot_count >= NCR_700_COMMAND_SLOTS_PER_HOST) {
1764		/* We're over our allocation, this should never happen
1765		 * since we report the max allocation to the mid layer */
1766		printk(KERN_WARNING "scsi%d: Command depth has gone over queue depth\n", SCp->device->host->host_no);
1767		return 1;
1768	}
1769	/* check for untagged commands.  We cannot have any outstanding
1770	 * commands if we accept them.  Commands could be untagged because:
1771	 *
1772	 * - The tag negotiated bitmap is clear
1773	 * - The blk layer sent and untagged command
1774	 */
1775	if(NCR_700_get_depth(SCp->device) != 0
1776	   && (!(hostdata->tag_negotiated & (1<<scmd_id(SCp)))
1777	       || !(SCp->flags & SCMD_TAGGED))) {
1778		CDEBUG(KERN_ERR, SCp, "has non zero depth %d\n",
1779		       NCR_700_get_depth(SCp->device));
1780		return SCSI_MLQUEUE_DEVICE_BUSY;
1781	}
1782	if(NCR_700_get_depth(SCp->device) >= SCp->device->queue_depth) {
1783		CDEBUG(KERN_ERR, SCp, "has max tag depth %d\n",
1784		       NCR_700_get_depth(SCp->device));
1785		return SCSI_MLQUEUE_DEVICE_BUSY;
1786	}
1787	NCR_700_set_depth(SCp->device, NCR_700_get_depth(SCp->device) + 1);
1788
1789	/* begin the command here */
1790	/* no need to check for NULL, test for command_slot_count above
1791	 * ensures a slot is free */
1792	slot = find_empty_slot(hostdata);
1793
1794	slot->cmnd = SCp;
1795
1796	SCp->scsi_done = done;
1797	SCp->host_scribble = (unsigned char *)slot;
1798	SCp->SCp.ptr = NULL;
1799	SCp->SCp.buffer = NULL;
1800
1801#ifdef NCR_700_DEBUG
1802	printk("53c700: scsi%d, command ", SCp->device->host->host_no);
1803	scsi_print_command(SCp);
1804#endif
1805	if ((SCp->flags & SCMD_TAGGED)
1806	   && (hostdata->tag_negotiated &(1<<scmd_id(SCp))) == 0
1807	   && NCR_700_get_tag_neg_state(SCp->device) == NCR_700_START_TAG_NEGOTIATION) {
1808		scmd_printk(KERN_ERR, SCp, "Enabling Tag Command Queuing\n");
1809		hostdata->tag_negotiated |= (1<<scmd_id(SCp));
1810		NCR_700_set_tag_neg_state(SCp->device, NCR_700_DURING_TAG_NEGOTIATION);
1811	}
1812
1813	/* here we may have to process an untagged command.  The gate
1814	 * above ensures that this will be the only one outstanding,
1815	 * so clear the tag negotiated bit.
1816	 *
1817	 * FIXME: This will royally screw up on multiple LUN devices
1818	 * */
1819	if (!(SCp->flags & SCMD_TAGGED)
1820	   && (hostdata->tag_negotiated &(1<<scmd_id(SCp)))) {
1821		scmd_printk(KERN_INFO, SCp, "Disabling Tag Command Queuing\n");
1822		hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
1823	}
1824
1825	if ((hostdata->tag_negotiated & (1<<scmd_id(SCp))) &&
1826	    SCp->device->simple_tags) {
1827		slot->tag = SCp->request->tag;
1828		CDEBUG(KERN_DEBUG, SCp, "sending out tag %d, slot %p\n",
1829		       slot->tag, slot);
1830	} else {
1831		struct NCR_700_Device_Parameters *p = SCp->device->hostdata;
1832
1833		slot->tag = SCSI_NO_TAG;
1834		/* save current command for reselection */
1835		p->current_cmnd = SCp;
1836	}
1837	/* sanity check: some of the commands generated by the mid-layer
1838	 * have an eccentric idea of their sc_data_direction */
1839	if(!scsi_sg_count(SCp) && !scsi_bufflen(SCp) &&
1840	   SCp->sc_data_direction != DMA_NONE) {
1841#ifdef NCR_700_DEBUG
1842		printk("53c700: Command");
1843		scsi_print_command(SCp);
1844		printk("Has wrong data direction %d\n", SCp->sc_data_direction);
1845#endif
1846		SCp->sc_data_direction = DMA_NONE;
1847	}
1848
1849	switch (SCp->cmnd[0]) {
1850	case REQUEST_SENSE:
1851		/* clear the internal sense magic */
1852		SCp->cmnd[6] = 0;
1853		fallthrough;
1854	default:
1855		/* OK, get it from the command */
1856		switch(SCp->sc_data_direction) {
1857		case DMA_BIDIRECTIONAL:
1858		default:
1859			printk(KERN_ERR "53c700: Unknown command for data direction ");
1860			scsi_print_command(SCp);
1861
1862			move_ins = 0;
1863			break;
1864		case DMA_NONE:
1865			move_ins = 0;
1866			break;
1867		case DMA_FROM_DEVICE:
1868			move_ins = SCRIPT_MOVE_DATA_IN;
1869			break;
1870		case DMA_TO_DEVICE:
1871			move_ins = SCRIPT_MOVE_DATA_OUT;
1872			break;
1873		}
1874	}
1875
1876	/* now build the scatter gather list */
1877	if(move_ins != 0) {
1878		int i;
1879		int sg_count;
1880		dma_addr_t vPtr = 0;
1881		struct scatterlist *sg;
1882		__u32 count = 0;
1883
1884		sg_count = scsi_dma_map(SCp);
1885		BUG_ON(sg_count < 0);
1886
1887		scsi_for_each_sg(SCp, sg, sg_count, i) {
1888			vPtr = sg_dma_address(sg);
1889			count = sg_dma_len(sg);
1890
1891			slot->SG[i].ins = bS_to_host(move_ins | count);
1892			DEBUG((" scatter block %d: move %d[%08x] from 0x%lx\n",
1893			       i, count, slot->SG[i].ins, (unsigned long)vPtr));
1894			slot->SG[i].pAddr = bS_to_host(vPtr);
1895		}
1896		slot->SG[i].ins = bS_to_host(SCRIPT_RETURN);
1897		slot->SG[i].pAddr = 0;
1898		dma_sync_to_dev(hostdata, slot->SG, sizeof(slot->SG));
1899		DEBUG((" SETTING %p to %x\n",
1900		       (&slot->pSG[i].ins),
1901		       slot->SG[i].ins));
1902	}
1903	slot->resume_offset = 0;
1904	slot->pCmd = dma_map_single(hostdata->dev, SCp->cmnd,
1905				    MAX_COMMAND_SIZE, DMA_TO_DEVICE);
1906	NCR_700_start_command(SCp);
1907	return 0;
1908}
1909
1910STATIC DEF_SCSI_QCMD(NCR_700_queuecommand)
1911
1912STATIC int
1913NCR_700_abort(struct scsi_cmnd * SCp)
1914{
1915	struct NCR_700_command_slot *slot;
1916
1917	scmd_printk(KERN_INFO, SCp, "abort command\n");
1918
1919	slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1920
1921	if(slot == NULL)
1922		/* no outstanding command to abort */
1923		return SUCCESS;
1924	if(SCp->cmnd[0] == TEST_UNIT_READY) {
1925		/* FIXME: This is because of a problem in the new
1926		 * error handler.  When it is in error recovery, it
1927		 * will send a TUR to a device it thinks may still be
1928		 * showing a problem.  If the TUR isn't responded to,
1929		 * it will abort it and mark the device off line.
1930		 * Unfortunately, it does no other error recovery, so
1931		 * this would leave us with an outstanding command
1932		 * occupying a slot.  Rather than allow this to
1933		 * happen, we issue a bus reset to force all
1934		 * outstanding commands to terminate here. */
1935		NCR_700_internal_bus_reset(SCp->device->host);
1936		/* still drop through and return failed */
1937	}
1938	return FAILED;
1939
1940}
1941
1942STATIC int
1943NCR_700_host_reset(struct scsi_cmnd * SCp)
1944{
1945	DECLARE_COMPLETION_ONSTACK(complete);
1946	struct NCR_700_Host_Parameters *hostdata =
1947		(struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
1948
1949	scmd_printk(KERN_INFO, SCp,
1950		"New error handler wants HOST reset, cmd %p\n\t", SCp);
1951	scsi_print_command(SCp);
1952
1953	/* In theory, eh_complete should always be null because the
1954	 * eh is single threaded, but just in case we're handling a
1955	 * reset via sg or something */
1956	spin_lock_irq(SCp->device->host->host_lock);
1957	while (hostdata->eh_complete != NULL) {
1958		spin_unlock_irq(SCp->device->host->host_lock);
1959		msleep_interruptible(100);
1960		spin_lock_irq(SCp->device->host->host_lock);
1961	}
1962
1963	hostdata->eh_complete = &complete;
1964	NCR_700_internal_bus_reset(SCp->device->host);
1965	NCR_700_chip_reset(SCp->device->host);
1966
1967	spin_unlock_irq(SCp->device->host->host_lock);
1968	wait_for_completion(&complete);
1969	spin_lock_irq(SCp->device->host->host_lock);
1970
1971	hostdata->eh_complete = NULL;
1972	/* Revalidate the transport parameters of the failing device */
1973	if(hostdata->fast)
1974		spi_schedule_dv_device(SCp->device);
1975
1976	spin_unlock_irq(SCp->device->host->host_lock);
1977	return SUCCESS;
1978}
1979
1980STATIC void
1981NCR_700_set_period(struct scsi_target *STp, int period)
1982{
1983	struct Scsi_Host *SHp = dev_to_shost(STp->dev.parent);
1984	struct NCR_700_Host_Parameters *hostdata =
1985		(struct NCR_700_Host_Parameters *)SHp->hostdata[0];
1986
1987	if(!hostdata->fast)
1988		return;
1989
1990	if(period < hostdata->min_period)
1991		period = hostdata->min_period;
1992
1993	spi_period(STp) = period;
1994	spi_flags(STp) &= ~(NCR_700_DEV_NEGOTIATED_SYNC |
1995			    NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
1996	spi_flags(STp) |= NCR_700_DEV_PRINT_SYNC_NEGOTIATION;
1997}
1998
1999STATIC void
2000NCR_700_set_offset(struct scsi_target *STp, int offset)
2001{
2002	struct Scsi_Host *SHp = dev_to_shost(STp->dev.parent);
2003	struct NCR_700_Host_Parameters *hostdata =
2004		(struct NCR_700_Host_Parameters *)SHp->hostdata[0];
2005	int max_offset = hostdata->chip710
2006		? NCR_710_MAX_OFFSET : NCR_700_MAX_OFFSET;
2007
2008	if(!hostdata->fast)
2009		return;
2010
2011	if(offset > max_offset)
2012		offset = max_offset;
2013
2014	/* if we're currently async, make sure the period is reasonable */
2015	if(spi_offset(STp) == 0 && (spi_period(STp) < hostdata->min_period ||
2016				    spi_period(STp) > 0xff))
2017		spi_period(STp) = hostdata->min_period;
2018
2019	spi_offset(STp) = offset;
2020	spi_flags(STp) &= ~(NCR_700_DEV_NEGOTIATED_SYNC |
2021			    NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
2022	spi_flags(STp) |= NCR_700_DEV_PRINT_SYNC_NEGOTIATION;
2023}
2024
2025STATIC int
2026NCR_700_slave_alloc(struct scsi_device *SDp)
2027{
2028	SDp->hostdata = kzalloc(sizeof(struct NCR_700_Device_Parameters),
2029				GFP_KERNEL);
2030
2031	if (!SDp->hostdata)
2032		return -ENOMEM;
2033
2034	return 0;
2035}
2036
2037STATIC int
2038NCR_700_slave_configure(struct scsi_device *SDp)
2039{
2040	struct NCR_700_Host_Parameters *hostdata =
2041		(struct NCR_700_Host_Parameters *)SDp->host->hostdata[0];
2042
2043	/* to do here: allocate memory; build a queue_full list */
2044	if(SDp->tagged_supported) {
2045		scsi_change_queue_depth(SDp, NCR_700_DEFAULT_TAGS);
2046		NCR_700_set_tag_neg_state(SDp, NCR_700_START_TAG_NEGOTIATION);
2047	}
2048
2049	if(hostdata->fast) {
2050		/* Find the correct offset and period via domain validation */
2051		if (!spi_initial_dv(SDp->sdev_target))
2052			spi_dv_device(SDp);
2053	} else {
2054		spi_offset(SDp->sdev_target) = 0;
2055		spi_period(SDp->sdev_target) = 0;
2056	}
2057	return 0;
2058}
2059
2060STATIC void
2061NCR_700_slave_destroy(struct scsi_device *SDp)
2062{
2063	kfree(SDp->hostdata);
2064	SDp->hostdata = NULL;
2065}
2066
2067static int
2068NCR_700_change_queue_depth(struct scsi_device *SDp, int depth)
2069{
2070	if (depth > NCR_700_MAX_TAGS)
2071		depth = NCR_700_MAX_TAGS;
2072	return scsi_change_queue_depth(SDp, depth);
2073}
2074
2075static ssize_t
2076NCR_700_show_active_tags(struct device *dev, struct device_attribute *attr, char *buf)
2077{
2078	struct scsi_device *SDp = to_scsi_device(dev);
2079
2080	return snprintf(buf, 20, "%d\n", NCR_700_get_depth(SDp));
2081}
2082
2083static struct device_attribute NCR_700_active_tags_attr = {
2084	.attr = {
2085		.name =		"active_tags",
2086		.mode =		S_IRUGO,
2087	},
2088	.show = NCR_700_show_active_tags,
2089};
2090
2091STATIC struct device_attribute *NCR_700_dev_attrs[] = {
2092	&NCR_700_active_tags_attr,
2093	NULL,
2094};
2095
2096EXPORT_SYMBOL(NCR_700_detect);
2097EXPORT_SYMBOL(NCR_700_release);
2098EXPORT_SYMBOL(NCR_700_intr);
2099
2100static struct spi_function_template NCR_700_transport_functions =  {
2101	.set_period	= NCR_700_set_period,
2102	.show_period	= 1,
2103	.set_offset	= NCR_700_set_offset,
2104	.show_offset	= 1,
2105};
2106
2107static int __init NCR_700_init(void)
2108{
2109	NCR_700_transport_template = spi_attach_transport(&NCR_700_transport_functions);
2110	if(!NCR_700_transport_template)
2111		return -ENODEV;
2112	return 0;
2113}
2114
2115static void __exit NCR_700_exit(void)
2116{
2117	spi_release_transport(NCR_700_transport_template);
2118}
2119
2120module_init(NCR_700_init);
2121module_exit(NCR_700_exit);
2122
2123