[TG3]: Set minimal hw interrupt mitigation.
[linux-2.6/verdex.git] / drivers / scsi / 53c700.c
blob4b1bb529f676dcbb158bbc05d43b90378ce9c313
1 /* -*- mode: c; c-basic-offset: 8 -*- */
3 /* NCR (or Symbios) 53c700 and 53c700-66 Driver
5 * Copyright (C) 2001 by James.Bottomley@HansenPartnership.com
6 **-----------------------------------------------------------------------------
7 **
8 ** This program is free software; you can redistribute it and/or modify
9 ** it under the terms of the GNU General Public License as published by
10 ** the Free Software Foundation; either version 2 of the License, or
11 ** (at your option) any later version.
13 ** This program is distributed in the hope that it will be useful,
14 ** but WITHOUT ANY WARRANTY; without even the implied warranty of
15 ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 ** GNU General Public License for more details.
18 ** You should have received a copy of the GNU General Public License
19 ** along with this program; if not, write to the Free Software
20 ** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 **-----------------------------------------------------------------------------
25 /* Notes:
27 * This driver is designed exclusively for these chips (virtually the
28 * earliest of the scripts engine chips). They need their own drivers
29 * because they are missing so many of the scripts and snazzy register
30 * features of their elder brothers (the 710, 720 and 770).
32 * The 700 is the lowliest of the line, it can only do async SCSI.
33 * The 700-66 can at least do synchronous SCSI up to 10MHz.
35 * The 700 chip has no host bus interface logic of its own. However,
36 * it is usually mapped to a location with well defined register
37 * offsets. Therefore, if you can determine the base address and the
38 * irq your board incorporating this chip uses, you can probably use
39 * this driver to run it (although you'll probably have to write a
40 * minimal wrapper for the purpose---see the NCR_D700 driver for
41 * details about how to do this).
44 * TODO List:
46 * 1. Better statistics in the proc fs
48 * 2. Implement message queue (queues SCSI messages like commands) and make
49 * the abort and device reset functions use them.
50 * */
52 /* CHANGELOG
54 * Version 2.8
56 * Fixed bad bug affecting tag starvation processing (previously the
57 * driver would hang the system if too many tags starved. Also fixed
58 * bad bug having to do with 10 byte command processing and REQUEST
59 * SENSE (the command would loop forever getting a transfer length
60 * mismatch in the CMD phase).
62 * Version 2.7
64 * Fixed scripts problem which caused certain devices (notably CDRWs)
65 * to hang on initial INQUIRY. Updated NCR_700_readl/writel to use
66 * __raw_readl/writel for parisc compatibility (Thomas
67 * Bogendoerfer). Added missing SCp->request_bufflen initialisation
68 * for sense requests (Ryan Bradetich).
70 * Version 2.6
72 * Following test of the 64 bit parisc kernel by Richard Hirst,
73 * several problems have now been corrected. Also adds support for
74 * consistent memory allocation.
76 * Version 2.5
78 * More Compatibility changes for 710 (now actually works). Enhanced
79 * support for odd clock speeds which constrain SDTR negotiations.
80 * correct cacheline separation for scsi messages and status for
81 * incoherent architectures. Use of the pci mapping functions on
82 * buffers to begin support for 64 bit drivers.
84 * Version 2.4
86 * Added support for the 53c710 chip (in 53c700 emulation mode only---no
87 * special 53c710 instructions or registers are used).
89 * Version 2.3
91 * More endianness/cache coherency changes.
93 * Better bad device handling (handles devices lying about tag
94 * queueing support and devices which fail to provide sense data on
95 * contingent allegiance conditions)
97 * Many thanks to Richard Hirst <rhirst@linuxcare.com> for patiently
98 * debugging this driver on the parisc architecture and suggesting
99 * many improvements and bug fixes.
101 * Thanks also go to Linuxcare Inc. for providing several PARISC
102 * machines for me to debug the driver on.
104 * Version 2.2
106 * Made the driver mem or io mapped; added endian invariance; added
107 * dma cache flushing operations for architectures which need it;
108 * added support for more varied clocking speeds.
110 * Version 2.1
112 * Initial modularisation from the D700. See NCR_D700.c for the rest of
113 * the changelog.
114 * */
115 #define NCR_700_VERSION "2.8"
117 #include <linux/config.h>
118 #include <linux/kernel.h>
119 #include <linux/types.h>
120 #include <linux/string.h>
121 #include <linux/ioport.h>
122 #include <linux/delay.h>
123 #include <linux/spinlock.h>
124 #include <linux/completion.h>
125 #include <linux/sched.h>
126 #include <linux/init.h>
127 #include <linux/proc_fs.h>
128 #include <linux/blkdev.h>
129 #include <linux/module.h>
130 #include <linux/interrupt.h>
131 #include <asm/dma.h>
132 #include <asm/system.h>
133 #include <asm/io.h>
134 #include <asm/pgtable.h>
135 #include <asm/byteorder.h>
137 #include <scsi/scsi.h>
138 #include <scsi/scsi_cmnd.h>
139 #include <scsi/scsi_dbg.h>
140 #include <scsi/scsi_eh.h>
141 #include <scsi/scsi_host.h>
142 #include <scsi/scsi_tcq.h>
143 #include <scsi/scsi_transport.h>
144 #include <scsi/scsi_transport_spi.h>
146 #include "53c700.h"
148 /* NOTE: For 64 bit drivers there are points in the code where we use
149 * a non dereferenceable pointer to point to a structure in dma-able
150 * memory (which is 32 bits) so that we can use all of the structure
151 * operations but take the address at the end. This macro allows us
152 * to truncate the 64 bit pointer down to 32 bits without the compiler
153 * complaining */
154 #define to32bit(x) ((__u32)((unsigned long)(x)))
156 #ifdef NCR_700_DEBUG
157 #define STATIC
158 #else
159 #define STATIC static
160 #endif
162 MODULE_AUTHOR("James Bottomley");
163 MODULE_DESCRIPTION("53c700 and 53c700-66 Driver");
164 MODULE_LICENSE("GPL");
166 /* This is the script */
167 #include "53c700_d.h"
170 STATIC int NCR_700_queuecommand(struct scsi_cmnd *, void (*done)(struct scsi_cmnd *));
171 STATIC int NCR_700_abort(struct scsi_cmnd * SCpnt);
172 STATIC int NCR_700_bus_reset(struct scsi_cmnd * SCpnt);
173 STATIC int NCR_700_dev_reset(struct scsi_cmnd * SCpnt);
174 STATIC int NCR_700_host_reset(struct scsi_cmnd * SCpnt);
175 STATIC void NCR_700_chip_setup(struct Scsi_Host *host);
176 STATIC void NCR_700_chip_reset(struct Scsi_Host *host);
177 STATIC int NCR_700_slave_configure(struct scsi_device *SDpnt);
178 STATIC void NCR_700_slave_destroy(struct scsi_device *SDpnt);
179 static int NCR_700_change_queue_depth(struct scsi_device *SDpnt, int depth);
180 static int NCR_700_change_queue_type(struct scsi_device *SDpnt, int depth);
182 STATIC struct device_attribute *NCR_700_dev_attrs[];
184 STATIC struct scsi_transport_template *NCR_700_transport_template = NULL;
186 static char *NCR_700_phase[] = {
188 "after selection",
189 "before command phase",
190 "after command phase",
191 "after status phase",
192 "after data in phase",
193 "after data out phase",
194 "during data phase",
197 static char *NCR_700_condition[] = {
199 "NOT MSG_OUT",
200 "UNEXPECTED PHASE",
201 "NOT MSG_IN",
202 "UNEXPECTED MSG",
203 "MSG_IN",
204 "SDTR_MSG RECEIVED",
205 "REJECT_MSG RECEIVED",
206 "DISCONNECT_MSG RECEIVED",
207 "MSG_OUT",
208 "DATA_IN",
212 static char *NCR_700_fatal_messages[] = {
213 "unexpected message after reselection",
214 "still MSG_OUT after message injection",
215 "not MSG_IN after selection",
216 "Illegal message length received",
219 static char *NCR_700_SBCL_bits[] = {
220 "IO ",
221 "CD ",
222 "MSG ",
223 "ATN ",
224 "SEL ",
225 "BSY ",
226 "ACK ",
227 "REQ ",
230 static char *NCR_700_SBCL_to_phase[] = {
231 "DATA_OUT",
232 "DATA_IN",
233 "CMD_OUT",
234 "STATE",
235 "ILLEGAL PHASE",
236 "ILLEGAL PHASE",
237 "MSG OUT",
238 "MSG IN",
241 static __u8 NCR_700_SDTR_msg[] = {
242 0x01, /* Extended message */
243 0x03, /* Extended message Length */
244 0x01, /* SDTR Extended message */
245 NCR_700_MIN_PERIOD,
246 NCR_700_MAX_OFFSET
249 /* This translates the SDTR message offset and period to a value
250 * which can be loaded into the SXFER_REG.
252 * NOTE: According to SCSI-2, the true transfer period (in ns) is
253 * actually four times this period value */
254 static inline __u8
255 NCR_700_offset_period_to_sxfer(struct NCR_700_Host_Parameters *hostdata,
256 __u8 offset, __u8 period)
258 int XFERP;
260 __u8 min_xferp = (hostdata->chip710
261 ? NCR_710_MIN_XFERP : NCR_700_MIN_XFERP);
262 __u8 max_offset = (hostdata->chip710
263 ? NCR_710_MAX_OFFSET : NCR_700_MAX_OFFSET);
265 if(offset == 0)
266 return 0;
268 if(period < hostdata->min_period) {
269 printk(KERN_WARNING "53c700: Period %dns is less than this chip's minimum, setting to %d\n", period*4, NCR_700_SDTR_msg[3]*4);
270 period = hostdata->min_period;
272 XFERP = (period*4 * hostdata->sync_clock)/1000 - 4;
273 if(offset > max_offset) {
274 printk(KERN_WARNING "53c700: Offset %d exceeds chip maximum, setting to %d\n",
275 offset, max_offset);
276 offset = max_offset;
278 if(XFERP < min_xferp) {
279 printk(KERN_WARNING "53c700: XFERP %d is less than minium, setting to %d\n",
280 XFERP, min_xferp);
281 XFERP = min_xferp;
283 return (offset & 0x0f) | (XFERP & 0x07)<<4;
286 static inline __u8
287 NCR_700_get_SXFER(struct scsi_device *SDp)
289 struct NCR_700_Host_Parameters *hostdata =
290 (struct NCR_700_Host_Parameters *)SDp->host->hostdata[0];
292 return NCR_700_offset_period_to_sxfer(hostdata,
293 spi_offset(SDp->sdev_target),
294 spi_period(SDp->sdev_target));
297 struct Scsi_Host *
298 NCR_700_detect(struct scsi_host_template *tpnt,
299 struct NCR_700_Host_Parameters *hostdata, struct device *dev)
301 dma_addr_t pScript, pSlots;
302 __u8 *memory;
303 __u32 *script;
304 struct Scsi_Host *host;
305 static int banner = 0;
306 int j;
308 if(tpnt->sdev_attrs == NULL)
309 tpnt->sdev_attrs = NCR_700_dev_attrs;
311 memory = dma_alloc_noncoherent(hostdata->dev, TOTAL_MEM_SIZE,
312 &pScript, GFP_KERNEL);
313 if(memory == NULL) {
314 printk(KERN_ERR "53c700: Failed to allocate memory for driver, detatching\n");
315 return NULL;
318 script = (__u32 *)memory;
319 hostdata->msgin = memory + MSGIN_OFFSET;
320 hostdata->msgout = memory + MSGOUT_OFFSET;
321 hostdata->status = memory + STATUS_OFFSET;
322 /* all of these offsets are L1_CACHE_BYTES separated. It is fatal
323 * if this isn't sufficient separation to avoid dma flushing issues */
324 BUG_ON(!dma_is_consistent(pScript) && L1_CACHE_BYTES < dma_get_cache_alignment());
325 hostdata->slots = (struct NCR_700_command_slot *)(memory + SLOTS_OFFSET);
326 hostdata->dev = dev;
328 pSlots = pScript + SLOTS_OFFSET;
330 /* Fill in the missing routines from the host template */
331 tpnt->queuecommand = NCR_700_queuecommand;
332 tpnt->eh_abort_handler = NCR_700_abort;
333 tpnt->eh_device_reset_handler = NCR_700_dev_reset;
334 tpnt->eh_bus_reset_handler = NCR_700_bus_reset;
335 tpnt->eh_host_reset_handler = NCR_700_host_reset;
336 tpnt->can_queue = NCR_700_COMMAND_SLOTS_PER_HOST;
337 tpnt->sg_tablesize = NCR_700_SG_SEGMENTS;
338 tpnt->cmd_per_lun = NCR_700_CMD_PER_LUN;
339 tpnt->use_clustering = ENABLE_CLUSTERING;
340 tpnt->slave_configure = NCR_700_slave_configure;
341 tpnt->slave_destroy = NCR_700_slave_destroy;
342 tpnt->change_queue_depth = NCR_700_change_queue_depth;
343 tpnt->change_queue_type = NCR_700_change_queue_type;
345 if(tpnt->name == NULL)
346 tpnt->name = "53c700";
347 if(tpnt->proc_name == NULL)
348 tpnt->proc_name = "53c700";
351 host = scsi_host_alloc(tpnt, 4);
352 if (!host)
353 return NULL;
354 memset(hostdata->slots, 0, sizeof(struct NCR_700_command_slot)
355 * NCR_700_COMMAND_SLOTS_PER_HOST);
356 for(j = 0; j < NCR_700_COMMAND_SLOTS_PER_HOST; j++) {
357 dma_addr_t offset = (dma_addr_t)((unsigned long)&hostdata->slots[j].SG[0]
358 - (unsigned long)&hostdata->slots[0].SG[0]);
359 hostdata->slots[j].pSG = (struct NCR_700_SG_List *)((unsigned long)(pSlots + offset));
360 if(j == 0)
361 hostdata->free_list = &hostdata->slots[j];
362 else
363 hostdata->slots[j-1].ITL_forw = &hostdata->slots[j];
364 hostdata->slots[j].state = NCR_700_SLOT_FREE;
367 for(j = 0; j < sizeof(SCRIPT)/sizeof(SCRIPT[0]); j++) {
368 script[j] = bS_to_host(SCRIPT[j]);
371 /* adjust all labels to be bus physical */
372 for(j = 0; j < PATCHES; j++) {
373 script[LABELPATCHES[j]] = bS_to_host(pScript + SCRIPT[LABELPATCHES[j]]);
375 /* now patch up fixed addresses. */
376 script_patch_32(script, MessageLocation,
377 pScript + MSGOUT_OFFSET);
378 script_patch_32(script, StatusAddress,
379 pScript + STATUS_OFFSET);
380 script_patch_32(script, ReceiveMsgAddress,
381 pScript + MSGIN_OFFSET);
383 hostdata->script = script;
384 hostdata->pScript = pScript;
385 dma_sync_single_for_device(hostdata->dev, pScript, sizeof(SCRIPT), DMA_TO_DEVICE);
386 hostdata->state = NCR_700_HOST_FREE;
387 hostdata->cmd = NULL;
388 host->max_id = 7;
389 host->max_lun = NCR_700_MAX_LUNS;
390 BUG_ON(NCR_700_transport_template == NULL);
391 host->transportt = NCR_700_transport_template;
392 host->unique_id = (unsigned long)hostdata->base;
393 hostdata->eh_complete = NULL;
394 host->hostdata[0] = (unsigned long)hostdata;
395 /* kick the chip */
396 NCR_700_writeb(0xff, host, CTEST9_REG);
397 if(hostdata->chip710)
398 hostdata->rev = (NCR_700_readb(host, CTEST8_REG)>>4) & 0x0f;
399 else
400 hostdata->rev = (NCR_700_readb(host, CTEST7_REG)>>4) & 0x0f;
401 hostdata->fast = (NCR_700_readb(host, CTEST9_REG) == 0);
402 if(banner == 0) {
403 printk(KERN_NOTICE "53c700: Version " NCR_700_VERSION " By James.Bottomley@HansenPartnership.com\n");
404 banner = 1;
406 printk(KERN_NOTICE "scsi%d: %s rev %d %s\n", host->host_no,
407 hostdata->chip710 ? "53c710" :
408 (hostdata->fast ? "53c700-66" : "53c700"),
409 hostdata->rev, hostdata->differential ?
410 "(Differential)" : "");
411 /* reset the chip */
412 NCR_700_chip_reset(host);
414 if (scsi_add_host(host, dev)) {
415 dev_printk(KERN_ERR, dev, "53c700: scsi_add_host failed\n");
416 scsi_host_put(host);
417 return NULL;
420 spi_signalling(host) = hostdata->differential ? SPI_SIGNAL_HVD :
421 SPI_SIGNAL_SE;
423 return host;
427 NCR_700_release(struct Scsi_Host *host)
429 struct NCR_700_Host_Parameters *hostdata =
430 (struct NCR_700_Host_Parameters *)host->hostdata[0];
432 dma_free_noncoherent(hostdata->dev, TOTAL_MEM_SIZE,
433 hostdata->script, hostdata->pScript);
434 return 1;
437 static inline __u8
438 NCR_700_identify(int can_disconnect, __u8 lun)
440 return IDENTIFY_BASE |
441 ((can_disconnect) ? 0x40 : 0) |
442 (lun & NCR_700_LUN_MASK);
446 * Function : static int data_residual (Scsi_Host *host)
448 * Purpose : return residual data count of what's in the chip. If you
449 * really want to know what this function is doing, it's almost a
450 * direct transcription of the algorithm described in the 53c710
451 * guide, except that the DBC and DFIFO registers are only 6 bits
452 * wide on a 53c700.
454 * Inputs : host - SCSI host */
455 static inline int
456 NCR_700_data_residual (struct Scsi_Host *host) {
457 struct NCR_700_Host_Parameters *hostdata =
458 (struct NCR_700_Host_Parameters *)host->hostdata[0];
459 int count, synchronous = 0;
460 unsigned int ddir;
462 if(hostdata->chip710) {
463 count = ((NCR_700_readb(host, DFIFO_REG) & 0x7f) -
464 (NCR_700_readl(host, DBC_REG) & 0x7f)) & 0x7f;
465 } else {
466 count = ((NCR_700_readb(host, DFIFO_REG) & 0x3f) -
467 (NCR_700_readl(host, DBC_REG) & 0x3f)) & 0x3f;
470 if(hostdata->fast)
471 synchronous = NCR_700_readb(host, SXFER_REG) & 0x0f;
473 /* get the data direction */
474 ddir = NCR_700_readb(host, CTEST0_REG) & 0x01;
476 if (ddir) {
477 /* Receive */
478 if (synchronous)
479 count += (NCR_700_readb(host, SSTAT2_REG) & 0xf0) >> 4;
480 else
481 if (NCR_700_readb(host, SSTAT1_REG) & SIDL_REG_FULL)
482 ++count;
483 } else {
484 /* Send */
485 __u8 sstat = NCR_700_readb(host, SSTAT1_REG);
486 if (sstat & SODL_REG_FULL)
487 ++count;
488 if (synchronous && (sstat & SODR_REG_FULL))
489 ++count;
491 #ifdef NCR_700_DEBUG
492 if(count)
493 printk("RESIDUAL IS %d (ddir %d)\n", count, ddir);
494 #endif
495 return count;
498 /* print out the SCSI wires and corresponding phase from the SBCL register
499 * in the chip */
500 static inline char *
501 sbcl_to_string(__u8 sbcl)
503 int i;
504 static char ret[256];
506 ret[0]='\0';
507 for(i=0; i<8; i++) {
508 if((1<<i) & sbcl)
509 strcat(ret, NCR_700_SBCL_bits[i]);
511 strcat(ret, NCR_700_SBCL_to_phase[sbcl & 0x07]);
512 return ret;
515 static inline __u8
516 bitmap_to_number(__u8 bitmap)
518 __u8 i;
520 for(i=0; i<8 && !(bitmap &(1<<i)); i++)
522 return i;
525 /* Pull a slot off the free list */
526 STATIC struct NCR_700_command_slot *
527 find_empty_slot(struct NCR_700_Host_Parameters *hostdata)
529 struct NCR_700_command_slot *slot = hostdata->free_list;
531 if(slot == NULL) {
532 /* sanity check */
533 if(hostdata->command_slot_count != NCR_700_COMMAND_SLOTS_PER_HOST)
534 printk(KERN_ERR "SLOTS FULL, but count is %d, should be %d\n", hostdata->command_slot_count, NCR_700_COMMAND_SLOTS_PER_HOST);
535 return NULL;
538 if(slot->state != NCR_700_SLOT_FREE)
539 /* should panic! */
540 printk(KERN_ERR "BUSY SLOT ON FREE LIST!!!\n");
543 hostdata->free_list = slot->ITL_forw;
544 slot->ITL_forw = NULL;
547 /* NOTE: set the state to busy here, not queued, since this
548 * indicates the slot is in use and cannot be run by the IRQ
549 * finish routine. If we cannot queue the command when it
550 * is properly build, we then change to NCR_700_SLOT_QUEUED */
551 slot->state = NCR_700_SLOT_BUSY;
552 hostdata->command_slot_count++;
554 return slot;
557 STATIC void
558 free_slot(struct NCR_700_command_slot *slot,
559 struct NCR_700_Host_Parameters *hostdata)
561 if((slot->state & NCR_700_SLOT_MASK) != NCR_700_SLOT_MAGIC) {
562 printk(KERN_ERR "53c700: SLOT %p is not MAGIC!!!\n", slot);
564 if(slot->state == NCR_700_SLOT_FREE) {
565 printk(KERN_ERR "53c700: SLOT %p is FREE!!!\n", slot);
568 slot->resume_offset = 0;
569 slot->cmnd = NULL;
570 slot->state = NCR_700_SLOT_FREE;
571 slot->ITL_forw = hostdata->free_list;
572 hostdata->free_list = slot;
573 hostdata->command_slot_count--;
577 /* This routine really does very little. The command is indexed on
578 the ITL and (if tagged) the ITLQ lists in _queuecommand */
579 STATIC void
580 save_for_reselection(struct NCR_700_Host_Parameters *hostdata,
581 struct scsi_cmnd *SCp, __u32 dsp)
583 /* Its just possible that this gets executed twice */
584 if(SCp != NULL) {
585 struct NCR_700_command_slot *slot =
586 (struct NCR_700_command_slot *)SCp->host_scribble;
588 slot->resume_offset = dsp;
590 hostdata->state = NCR_700_HOST_FREE;
591 hostdata->cmd = NULL;
594 STATIC inline void
595 NCR_700_unmap(struct NCR_700_Host_Parameters *hostdata, struct scsi_cmnd *SCp,
596 struct NCR_700_command_slot *slot)
598 if(SCp->sc_data_direction != DMA_NONE &&
599 SCp->sc_data_direction != DMA_BIDIRECTIONAL) {
600 if(SCp->use_sg) {
601 dma_unmap_sg(hostdata->dev, SCp->buffer,
602 SCp->use_sg, SCp->sc_data_direction);
603 } else {
604 dma_unmap_single(hostdata->dev, slot->dma_handle,
605 SCp->request_bufflen,
606 SCp->sc_data_direction);
611 STATIC inline void
612 NCR_700_scsi_done(struct NCR_700_Host_Parameters *hostdata,
613 struct scsi_cmnd *SCp, int result)
615 hostdata->state = NCR_700_HOST_FREE;
616 hostdata->cmd = NULL;
618 if(SCp != NULL) {
619 struct NCR_700_command_slot *slot =
620 (struct NCR_700_command_slot *)SCp->host_scribble;
622 NCR_700_unmap(hostdata, SCp, slot);
623 dma_unmap_single(hostdata->dev, slot->pCmd,
624 sizeof(SCp->cmnd), DMA_TO_DEVICE);
625 if(SCp->cmnd[0] == REQUEST_SENSE && SCp->cmnd[6] == NCR_700_INTERNAL_SENSE_MAGIC) {
626 #ifdef NCR_700_DEBUG
627 printk(" ORIGINAL CMD %p RETURNED %d, new return is %d sense is\n",
628 SCp, SCp->cmnd[7], result);
629 scsi_print_sense("53c700", SCp);
631 #endif
632 /* restore the old result if the request sense was
633 * successful */
634 if(result == 0)
635 result = SCp->cmnd[7];
636 /* now restore the original command */
637 memcpy((void *) SCp->cmnd, (void *) SCp->data_cmnd,
638 sizeof(SCp->data_cmnd));
639 SCp->request_buffer = SCp->buffer;
640 SCp->request_bufflen = SCp->bufflen;
641 SCp->use_sg = SCp->old_use_sg;
642 SCp->cmd_len = SCp->old_cmd_len;
643 SCp->sc_data_direction = SCp->sc_old_data_direction;
644 SCp->underflow = SCp->old_underflow;
647 free_slot(slot, hostdata);
648 #ifdef NCR_700_DEBUG
649 if(NCR_700_get_depth(SCp->device) == 0 ||
650 NCR_700_get_depth(SCp->device) > SCp->device->queue_depth)
651 printk(KERN_ERR "Invalid depth in NCR_700_scsi_done(): %d\n",
652 NCR_700_get_depth(SCp->device));
653 #endif /* NCR_700_DEBUG */
654 NCR_700_set_depth(SCp->device, NCR_700_get_depth(SCp->device) - 1);
656 SCp->host_scribble = NULL;
657 SCp->result = result;
658 SCp->scsi_done(SCp);
659 } else {
660 printk(KERN_ERR "53c700: SCSI DONE HAS NULL SCp\n");
665 STATIC void
666 NCR_700_internal_bus_reset(struct Scsi_Host *host)
668 /* Bus reset */
669 NCR_700_writeb(ASSERT_RST, host, SCNTL1_REG);
670 udelay(50);
671 NCR_700_writeb(0, host, SCNTL1_REG);
675 STATIC void
676 NCR_700_chip_setup(struct Scsi_Host *host)
678 struct NCR_700_Host_Parameters *hostdata =
679 (struct NCR_700_Host_Parameters *)host->hostdata[0];
680 __u32 dcntl_extra = 0;
681 __u8 min_period;
682 __u8 min_xferp = (hostdata->chip710 ? NCR_710_MIN_XFERP : NCR_700_MIN_XFERP);
684 if(hostdata->chip710) {
685 __u8 burst_disable = hostdata->burst_disable
686 ? BURST_DISABLE : 0;
687 dcntl_extra = COMPAT_700_MODE;
689 NCR_700_writeb(dcntl_extra, host, DCNTL_REG);
690 NCR_700_writeb(BURST_LENGTH_8 | hostdata->dmode_extra,
691 host, DMODE_710_REG);
692 NCR_700_writeb(burst_disable | (hostdata->differential ?
693 DIFF : 0), host, CTEST7_REG);
694 NCR_700_writeb(BTB_TIMER_DISABLE, host, CTEST0_REG);
695 NCR_700_writeb(FULL_ARBITRATION | ENABLE_PARITY | PARITY
696 | AUTO_ATN, host, SCNTL0_REG);
697 } else {
698 NCR_700_writeb(BURST_LENGTH_8 | hostdata->dmode_extra,
699 host, DMODE_700_REG);
700 NCR_700_writeb(hostdata->differential ?
701 DIFF : 0, host, CTEST7_REG);
702 if(hostdata->fast) {
703 /* this is for 700-66, does nothing on 700 */
704 NCR_700_writeb(LAST_DIS_ENBL | ENABLE_ACTIVE_NEGATION
705 | GENERATE_RECEIVE_PARITY, host,
706 CTEST8_REG);
707 } else {
708 NCR_700_writeb(FULL_ARBITRATION | ENABLE_PARITY
709 | PARITY | AUTO_ATN, host, SCNTL0_REG);
713 NCR_700_writeb(1 << host->this_id, host, SCID_REG);
714 NCR_700_writeb(0, host, SBCL_REG);
715 NCR_700_writeb(ASYNC_OPERATION, host, SXFER_REG);
717 NCR_700_writeb(PHASE_MM_INT | SEL_TIMEOUT_INT | GROSS_ERR_INT | UX_DISC_INT
718 | RST_INT | PAR_ERR_INT | SELECT_INT, host, SIEN_REG);
720 NCR_700_writeb(ABORT_INT | INT_INST_INT | ILGL_INST_INT, host, DIEN_REG);
721 NCR_700_writeb(ENABLE_SELECT, host, SCNTL1_REG);
722 if(hostdata->clock > 75) {
723 printk(KERN_ERR "53c700: Clock speed %dMHz is too high: 75Mhz is the maximum this chip can be driven at\n", hostdata->clock);
724 /* do the best we can, but the async clock will be out
725 * of spec: sync divider 2, async divider 3 */
726 DEBUG(("53c700: sync 2 async 3\n"));
727 NCR_700_writeb(SYNC_DIV_2_0, host, SBCL_REG);
728 NCR_700_writeb(ASYNC_DIV_3_0 | dcntl_extra, host, DCNTL_REG);
729 hostdata->sync_clock = hostdata->clock/2;
730 } else if(hostdata->clock > 50 && hostdata->clock <= 75) {
731 /* sync divider 1.5, async divider 3 */
732 DEBUG(("53c700: sync 1.5 async 3\n"));
733 NCR_700_writeb(SYNC_DIV_1_5, host, SBCL_REG);
734 NCR_700_writeb(ASYNC_DIV_3_0 | dcntl_extra, host, DCNTL_REG);
735 hostdata->sync_clock = hostdata->clock*2;
736 hostdata->sync_clock /= 3;
738 } else if(hostdata->clock > 37 && hostdata->clock <= 50) {
739 /* sync divider 1, async divider 2 */
740 DEBUG(("53c700: sync 1 async 2\n"));
741 NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG);
742 NCR_700_writeb(ASYNC_DIV_2_0 | dcntl_extra, host, DCNTL_REG);
743 hostdata->sync_clock = hostdata->clock;
744 } else if(hostdata->clock > 25 && hostdata->clock <=37) {
745 /* sync divider 1, async divider 1.5 */
746 DEBUG(("53c700: sync 1 async 1.5\n"));
747 NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG);
748 NCR_700_writeb(ASYNC_DIV_1_5 | dcntl_extra, host, DCNTL_REG);
749 hostdata->sync_clock = hostdata->clock;
750 } else {
751 DEBUG(("53c700: sync 1 async 1\n"));
752 NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG);
753 NCR_700_writeb(ASYNC_DIV_1_0 | dcntl_extra, host, DCNTL_REG);
754 /* sync divider 1, async divider 1 */
755 hostdata->sync_clock = hostdata->clock;
757 /* Calculate the actual minimum period that can be supported
758 * by our synchronous clock speed. See the 710 manual for
759 * exact details of this calculation which is based on a
760 * setting of the SXFER register */
761 min_period = 1000*(4+min_xferp)/(4*hostdata->sync_clock);
762 hostdata->min_period = NCR_700_MIN_PERIOD;
763 if(min_period > NCR_700_MIN_PERIOD)
764 hostdata->min_period = min_period;
767 STATIC void
768 NCR_700_chip_reset(struct Scsi_Host *host)
770 struct NCR_700_Host_Parameters *hostdata =
771 (struct NCR_700_Host_Parameters *)host->hostdata[0];
772 if(hostdata->chip710) {
773 NCR_700_writeb(SOFTWARE_RESET_710, host, ISTAT_REG);
774 udelay(100);
776 NCR_700_writeb(0, host, ISTAT_REG);
777 } else {
778 NCR_700_writeb(SOFTWARE_RESET, host, DCNTL_REG);
779 udelay(100);
781 NCR_700_writeb(0, host, DCNTL_REG);
784 mdelay(1000);
786 NCR_700_chip_setup(host);
789 /* The heart of the message processing engine is that the instruction
790 * immediately after the INT is the normal case (and so must be CLEAR
791 * ACK). If we want to do something else, we call that routine in
792 * scripts and set temp to be the normal case + 8 (skipping the CLEAR
793 * ACK) so that the routine returns correctly to resume its activity
794 * */
795 STATIC __u32
796 process_extended_message(struct Scsi_Host *host,
797 struct NCR_700_Host_Parameters *hostdata,
798 struct scsi_cmnd *SCp, __u32 dsp, __u32 dsps)
800 __u32 resume_offset = dsp, temp = dsp + 8;
801 __u8 pun = 0xff, lun = 0xff;
803 if(SCp != NULL) {
804 pun = SCp->device->id;
805 lun = SCp->device->lun;
808 switch(hostdata->msgin[2]) {
809 case A_SDTR_MSG:
810 if(SCp != NULL && NCR_700_is_flag_set(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION)) {
811 struct scsi_target *starget = SCp->device->sdev_target;
812 __u8 period = hostdata->msgin[3];
813 __u8 offset = hostdata->msgin[4];
815 if(offset == 0 || period == 0) {
816 offset = 0;
817 period = 0;
820 spi_offset(starget) = offset;
821 spi_period(starget) = period;
823 if(NCR_700_is_flag_set(SCp->device, NCR_700_DEV_PRINT_SYNC_NEGOTIATION)) {
824 spi_display_xfer_agreement(starget);
825 NCR_700_clear_flag(SCp->device, NCR_700_DEV_PRINT_SYNC_NEGOTIATION);
828 NCR_700_set_flag(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC);
829 NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
831 NCR_700_writeb(NCR_700_get_SXFER(SCp->device),
832 host, SXFER_REG);
834 } else {
835 /* SDTR message out of the blue, reject it */
836 printk(KERN_WARNING "scsi%d Unexpected SDTR msg\n",
837 host->host_no);
838 hostdata->msgout[0] = A_REJECT_MSG;
839 dma_cache_sync(hostdata->msgout, 1, DMA_TO_DEVICE);
840 script_patch_16(hostdata->script, MessageCount, 1);
841 /* SendMsgOut returns, so set up the return
842 * address */
843 resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
845 break;
847 case A_WDTR_MSG:
848 printk(KERN_INFO "scsi%d: (%d:%d), Unsolicited WDTR after CMD, Rejecting\n",
849 host->host_no, pun, lun);
850 hostdata->msgout[0] = A_REJECT_MSG;
851 dma_cache_sync(hostdata->msgout, 1, DMA_TO_DEVICE);
852 script_patch_16(hostdata->script, MessageCount, 1);
853 resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
855 break;
857 default:
858 printk(KERN_INFO "scsi%d (%d:%d): Unexpected message %s: ",
859 host->host_no, pun, lun,
860 NCR_700_phase[(dsps & 0xf00) >> 8]);
861 scsi_print_msg(hostdata->msgin);
862 printk("\n");
863 /* just reject it */
864 hostdata->msgout[0] = A_REJECT_MSG;
865 dma_cache_sync(hostdata->msgout, 1, DMA_TO_DEVICE);
866 script_patch_16(hostdata->script, MessageCount, 1);
867 /* SendMsgOut returns, so set up the return
868 * address */
869 resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
871 NCR_700_writel(temp, host, TEMP_REG);
872 return resume_offset;
875 STATIC __u32
876 process_message(struct Scsi_Host *host, struct NCR_700_Host_Parameters *hostdata,
877 struct scsi_cmnd *SCp, __u32 dsp, __u32 dsps)
879 /* work out where to return to */
880 __u32 temp = dsp + 8, resume_offset = dsp;
881 __u8 pun = 0xff, lun = 0xff;
883 if(SCp != NULL) {
884 pun = SCp->device->id;
885 lun = SCp->device->lun;
888 #ifdef NCR_700_DEBUG
889 printk("scsi%d (%d:%d): message %s: ", host->host_no, pun, lun,
890 NCR_700_phase[(dsps & 0xf00) >> 8]);
891 scsi_print_msg(hostdata->msgin);
892 printk("\n");
893 #endif
895 switch(hostdata->msgin[0]) {
897 case A_EXTENDED_MSG:
898 resume_offset = process_extended_message(host, hostdata, SCp,
899 dsp, dsps);
900 break;
902 case A_REJECT_MSG:
903 if(SCp != NULL && NCR_700_is_flag_set(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION)) {
904 /* Rejected our sync negotiation attempt */
905 spi_period(SCp->device->sdev_target) =
906 spi_offset(SCp->device->sdev_target) = 0;
907 NCR_700_set_flag(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC);
908 NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
909 } else if(SCp != NULL && NCR_700_get_tag_neg_state(SCp->device) == NCR_700_DURING_TAG_NEGOTIATION) {
910 /* rejected our first simple tag message */
911 printk(KERN_WARNING "scsi%d (%d:%d) Rejected first tag queue attempt, turning off tag queueing\n", host->host_no, pun, lun);
912 /* we're done negotiating */
913 NCR_700_set_tag_neg_state(SCp->device, NCR_700_FINISHED_TAG_NEGOTIATION);
914 hostdata->tag_negotiated &= ~(1<<SCp->device->id);
915 SCp->device->tagged_supported = 0;
916 scsi_deactivate_tcq(SCp->device, host->cmd_per_lun);
917 } else {
918 printk(KERN_WARNING "scsi%d (%d:%d) Unexpected REJECT Message %s\n",
919 host->host_no, pun, lun,
920 NCR_700_phase[(dsps & 0xf00) >> 8]);
921 /* however, just ignore it */
923 break;
925 case A_PARITY_ERROR_MSG:
926 printk(KERN_ERR "scsi%d (%d:%d) Parity Error!\n", host->host_no,
927 pun, lun);
928 NCR_700_internal_bus_reset(host);
929 break;
930 case A_SIMPLE_TAG_MSG:
931 printk(KERN_INFO "scsi%d (%d:%d) SIMPLE TAG %d %s\n", host->host_no,
932 pun, lun, hostdata->msgin[1],
933 NCR_700_phase[(dsps & 0xf00) >> 8]);
934 /* just ignore it */
935 break;
936 default:
937 printk(KERN_INFO "scsi%d (%d:%d): Unexpected message %s: ",
938 host->host_no, pun, lun,
939 NCR_700_phase[(dsps & 0xf00) >> 8]);
941 scsi_print_msg(hostdata->msgin);
942 printk("\n");
943 /* just reject it */
944 hostdata->msgout[0] = A_REJECT_MSG;
945 dma_cache_sync(hostdata->msgout, 1, DMA_TO_DEVICE);
946 script_patch_16(hostdata->script, MessageCount, 1);
947 /* SendMsgOut returns, so set up the return
948 * address */
949 resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
951 break;
953 NCR_700_writel(temp, host, TEMP_REG);
954 /* set us up to receive another message */
955 dma_cache_sync(hostdata->msgin, MSG_ARRAY_SIZE, DMA_FROM_DEVICE);
956 return resume_offset;
959 STATIC __u32
960 process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
961 struct Scsi_Host *host,
962 struct NCR_700_Host_Parameters *hostdata)
964 __u32 resume_offset = 0;
965 __u8 pun = 0xff, lun=0xff;
967 if(SCp != NULL) {
968 pun = SCp->device->id;
969 lun = SCp->device->lun;
972 if(dsps == A_GOOD_STATUS_AFTER_STATUS) {
973 DEBUG((" COMMAND COMPLETE, status=%02x\n",
974 hostdata->status[0]));
975 /* OK, if TCQ still under negotiation, we now know it works */
976 if (NCR_700_get_tag_neg_state(SCp->device) == NCR_700_DURING_TAG_NEGOTIATION)
977 NCR_700_set_tag_neg_state(SCp->device,
978 NCR_700_FINISHED_TAG_NEGOTIATION);
980 /* check for contingent allegiance contitions */
981 if(status_byte(hostdata->status[0]) == CHECK_CONDITION ||
982 status_byte(hostdata->status[0]) == COMMAND_TERMINATED) {
983 struct NCR_700_command_slot *slot =
984 (struct NCR_700_command_slot *)SCp->host_scribble;
985 if(SCp->cmnd[0] == REQUEST_SENSE) {
986 /* OOPS: bad device, returning another
987 * contingent allegiance condition */
988 printk(KERN_ERR "scsi%d (%d:%d) broken device is looping in contingent allegiance: ignoring\n", host->host_no, pun, lun);
989 NCR_700_scsi_done(hostdata, SCp, hostdata->status[0]);
990 } else {
991 #ifdef NCR_DEBUG
992 scsi_print_command(SCp);
993 printk(" cmd %p has status %d, requesting sense\n",
994 SCp, hostdata->status[0]);
995 #endif
996 /* we can destroy the command here
997 * because the contingent allegiance
998 * condition will cause a retry which
999 * will re-copy the command from the
1000 * saved data_cmnd. We also unmap any
1001 * data associated with the command
1002 * here */
1003 NCR_700_unmap(hostdata, SCp, slot);
1005 SCp->cmnd[0] = REQUEST_SENSE;
1006 SCp->cmnd[1] = (SCp->device->lun & 0x7) << 5;
1007 SCp->cmnd[2] = 0;
1008 SCp->cmnd[3] = 0;
1009 SCp->cmnd[4] = sizeof(SCp->sense_buffer);
1010 SCp->cmnd[5] = 0;
1011 SCp->cmd_len = 6;
1012 /* Here's a quiet hack: the
1013 * REQUEST_SENSE command is six bytes,
1014 * so store a flag indicating that
1015 * this was an internal sense request
1016 * and the original status at the end
1017 * of the command */
1018 SCp->cmnd[6] = NCR_700_INTERNAL_SENSE_MAGIC;
1019 SCp->cmnd[7] = hostdata->status[0];
1020 SCp->use_sg = 0;
1021 SCp->sc_data_direction = DMA_FROM_DEVICE;
1022 dma_sync_single_for_device(hostdata->dev, slot->pCmd,
1023 SCp->cmd_len, DMA_TO_DEVICE);
1024 SCp->request_bufflen = sizeof(SCp->sense_buffer);
1025 slot->dma_handle = dma_map_single(hostdata->dev, SCp->sense_buffer, sizeof(SCp->sense_buffer), DMA_FROM_DEVICE);
1026 slot->SG[0].ins = bS_to_host(SCRIPT_MOVE_DATA_IN | sizeof(SCp->sense_buffer));
1027 slot->SG[0].pAddr = bS_to_host(slot->dma_handle);
1028 slot->SG[1].ins = bS_to_host(SCRIPT_RETURN);
1029 slot->SG[1].pAddr = 0;
1030 slot->resume_offset = hostdata->pScript;
1031 dma_cache_sync(slot->SG, sizeof(slot->SG[0])*2, DMA_TO_DEVICE);
1032 dma_cache_sync(SCp->sense_buffer, sizeof(SCp->sense_buffer), DMA_FROM_DEVICE);
1034 /* queue the command for reissue */
1035 slot->state = NCR_700_SLOT_QUEUED;
1036 hostdata->state = NCR_700_HOST_FREE;
1037 hostdata->cmd = NULL;
1039 } else {
1040 // Currently rely on the mid layer evaluation
1041 // of the tag queuing capability
1043 //if(status_byte(hostdata->status[0]) == GOOD &&
1044 // SCp->cmnd[0] == INQUIRY && SCp->use_sg == 0) {
1045 // /* Piggy back the tag queueing support
1046 // * on this command */
1047 // dma_sync_single_for_cpu(hostdata->dev,
1048 // slot->dma_handle,
1049 // SCp->request_bufflen,
1050 // DMA_FROM_DEVICE);
1051 // if(((char *)SCp->request_buffer)[7] & 0x02) {
1052 // printk(KERN_INFO "scsi%d: (%d:%d) Enabling Tag Command Queuing\n", host->host_no, pun, lun);
1053 // hostdata->tag_negotiated |= (1<<SCp->device->id);
1054 // NCR_700_set_flag(SCp->device, NCR_700_DEV_BEGIN_TAG_QUEUEING);
1055 // } else {
1056 // NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_TAG_QUEUEING);
1057 // hostdata->tag_negotiated &= ~(1<<SCp->device->id);
1058 // }
1060 NCR_700_scsi_done(hostdata, SCp, hostdata->status[0]);
1062 } else if((dsps & 0xfffff0f0) == A_UNEXPECTED_PHASE) {
1063 __u8 i = (dsps & 0xf00) >> 8;
1065 printk(KERN_ERR "scsi%d: (%d:%d), UNEXPECTED PHASE %s (%s)\n",
1066 host->host_no, pun, lun,
1067 NCR_700_phase[i],
1068 sbcl_to_string(NCR_700_readb(host, SBCL_REG)));
1069 printk(KERN_ERR " len = %d, cmd =", SCp->cmd_len);
1070 scsi_print_command(SCp);
1072 NCR_700_internal_bus_reset(host);
1073 } else if((dsps & 0xfffff000) == A_FATAL) {
1074 int i = (dsps & 0xfff);
1076 printk(KERN_ERR "scsi%d: (%d:%d) FATAL ERROR: %s\n",
1077 host->host_no, pun, lun, NCR_700_fatal_messages[i]);
1078 if(dsps == A_FATAL_ILLEGAL_MSG_LENGTH) {
1079 printk(KERN_ERR " msg begins %02x %02x\n",
1080 hostdata->msgin[0], hostdata->msgin[1]);
1082 NCR_700_internal_bus_reset(host);
1083 } else if((dsps & 0xfffff0f0) == A_DISCONNECT) {
1084 #ifdef NCR_700_DEBUG
1085 __u8 i = (dsps & 0xf00) >> 8;
1087 printk("scsi%d: (%d:%d), DISCONNECTED (%d) %s\n",
1088 host->host_no, pun, lun,
1089 i, NCR_700_phase[i]);
1090 #endif
1091 save_for_reselection(hostdata, SCp, dsp);
1093 } else if(dsps == A_RESELECTION_IDENTIFIED) {
1094 __u8 lun;
1095 struct NCR_700_command_slot *slot;
1096 __u8 reselection_id = hostdata->reselection_id;
1097 struct scsi_device *SDp;
1099 lun = hostdata->msgin[0] & 0x1f;
1101 hostdata->reselection_id = 0xff;
1102 DEBUG(("scsi%d: (%d:%d) RESELECTED!\n",
1103 host->host_no, reselection_id, lun));
1104 /* clear the reselection indicator */
1105 SDp = __scsi_device_lookup(host, 0, reselection_id, lun);
1106 if(unlikely(SDp == NULL)) {
1107 printk(KERN_ERR "scsi%d: (%d:%d) HAS NO device\n",
1108 host->host_no, reselection_id, lun);
1109 BUG();
1111 if(hostdata->msgin[1] == A_SIMPLE_TAG_MSG) {
1112 struct scsi_cmnd *SCp = scsi_find_tag(SDp, hostdata->msgin[2]);
1113 if(unlikely(SCp == NULL)) {
1114 printk(KERN_ERR "scsi%d: (%d:%d) no saved request for tag %d\n",
1115 host->host_no, reselection_id, lun, hostdata->msgin[2]);
1116 BUG();
1119 slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1120 DEBUG(("53c700: %d:%d:%d, reselection is tag %d, slot %p(%d)\n",
1121 host->host_no, SDp->id, SDp->lun,
1122 hostdata->msgin[2], slot, slot->tag));
1123 } else {
1124 struct scsi_cmnd *SCp = scsi_find_tag(SDp, SCSI_NO_TAG);
1125 if(unlikely(SCp == NULL)) {
1126 printk(KERN_ERR "scsi%d: (%d:%d) no saved request for untagged cmd\n",
1127 host->host_no, reselection_id, lun);
1128 BUG();
1130 slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1133 if(slot == NULL) {
1134 printk(KERN_ERR "scsi%d: (%d:%d) RESELECTED but no saved command (MSG = %02x %02x %02x)!!\n",
1135 host->host_no, reselection_id, lun,
1136 hostdata->msgin[0], hostdata->msgin[1],
1137 hostdata->msgin[2]);
1138 } else {
1139 if(hostdata->state != NCR_700_HOST_BUSY)
1140 printk(KERN_ERR "scsi%d: FATAL, host not busy during valid reselection!\n",
1141 host->host_no);
1142 resume_offset = slot->resume_offset;
1143 hostdata->cmd = slot->cmnd;
1145 /* re-patch for this command */
1146 script_patch_32_abs(hostdata->script, CommandAddress,
1147 slot->pCmd);
1148 script_patch_16(hostdata->script,
1149 CommandCount, slot->cmnd->cmd_len);
1150 script_patch_32_abs(hostdata->script, SGScriptStartAddress,
1151 to32bit(&slot->pSG[0].ins));
1153 /* Note: setting SXFER only works if we're
1154 * still in the MESSAGE phase, so it is vital
1155 * that ACK is still asserted when we process
1156 * the reselection message. The resume offset
1157 * should therefore always clear ACK */
1158 NCR_700_writeb(NCR_700_get_SXFER(hostdata->cmd->device),
1159 host, SXFER_REG);
1160 dma_cache_sync(hostdata->msgin,
1161 MSG_ARRAY_SIZE, DMA_FROM_DEVICE);
1162 dma_cache_sync(hostdata->msgout,
1163 MSG_ARRAY_SIZE, DMA_TO_DEVICE);
1164 /* I'm just being paranoid here, the command should
1165 * already have been flushed from the cache */
1166 dma_cache_sync(slot->cmnd->cmnd,
1167 slot->cmnd->cmd_len, DMA_TO_DEVICE);
1172 } else if(dsps == A_RESELECTED_DURING_SELECTION) {
1174 /* This section is full of debugging code because I've
1175 * never managed to reach it. I think what happens is
1176 * that, because the 700 runs with selection
1177 * interrupts enabled the whole time that we take a
1178 * selection interrupt before we manage to get to the
1179 * reselected script interrupt */
1181 __u8 reselection_id = NCR_700_readb(host, SFBR_REG);
1182 struct NCR_700_command_slot *slot;
1184 /* Take out our own ID */
1185 reselection_id &= ~(1<<host->this_id);
1187 /* I've never seen this happen, so keep this as a printk rather
1188 * than a debug */
1189 printk(KERN_INFO "scsi%d: (%d:%d) RESELECTION DURING SELECTION, dsp=%08x[%04x] state=%d, count=%d\n",
1190 host->host_no, reselection_id, lun, dsp, dsp - hostdata->pScript, hostdata->state, hostdata->command_slot_count);
1193 /* FIXME: DEBUGGING CODE */
1194 __u32 SG = (__u32)bS_to_cpu(hostdata->script[A_SGScriptStartAddress_used[0]]);
1195 int i;
1197 for(i=0; i< NCR_700_COMMAND_SLOTS_PER_HOST; i++) {
1198 if(SG >= to32bit(&hostdata->slots[i].pSG[0])
1199 && SG <= to32bit(&hostdata->slots[i].pSG[NCR_700_SG_SEGMENTS]))
1200 break;
1202 printk(KERN_INFO "IDENTIFIED SG segment as being %08x in slot %p, cmd %p, slot->resume_offset=%08x\n", SG, &hostdata->slots[i], hostdata->slots[i].cmnd, hostdata->slots[i].resume_offset);
1203 SCp = hostdata->slots[i].cmnd;
1206 if(SCp != NULL) {
1207 slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1208 /* change slot from busy to queued to redo command */
1209 slot->state = NCR_700_SLOT_QUEUED;
1211 hostdata->cmd = NULL;
1213 if(reselection_id == 0) {
1214 if(hostdata->reselection_id == 0xff) {
1215 printk(KERN_ERR "scsi%d: Invalid reselection during selection!!\n", host->host_no);
1216 return 0;
1217 } else {
1218 printk(KERN_ERR "scsi%d: script reselected and we took a selection interrupt\n",
1219 host->host_no);
1220 reselection_id = hostdata->reselection_id;
1222 } else {
1224 /* convert to real ID */
1225 reselection_id = bitmap_to_number(reselection_id);
1227 hostdata->reselection_id = reselection_id;
1228 /* just in case we have a stale simple tag message, clear it */
1229 hostdata->msgin[1] = 0;
1230 dma_cache_sync(hostdata->msgin,
1231 MSG_ARRAY_SIZE, DMA_BIDIRECTIONAL);
1232 if(hostdata->tag_negotiated & (1<<reselection_id)) {
1233 resume_offset = hostdata->pScript + Ent_GetReselectionWithTag;
1234 } else {
1235 resume_offset = hostdata->pScript + Ent_GetReselectionData;
1237 } else if(dsps == A_COMPLETED_SELECTION_AS_TARGET) {
1238 /* we've just disconnected from the bus, do nothing since
1239 * a return here will re-run the queued command slot
1240 * that may have been interrupted by the initial selection */
1241 DEBUG((" SELECTION COMPLETED\n"));
1242 } else if((dsps & 0xfffff0f0) == A_MSG_IN) {
1243 resume_offset = process_message(host, hostdata, SCp,
1244 dsp, dsps);
1245 } else if((dsps & 0xfffff000) == 0) {
1246 __u8 i = (dsps & 0xf0) >> 4, j = (dsps & 0xf00) >> 8;
1247 printk(KERN_ERR "scsi%d: (%d:%d), unhandled script condition %s %s at %04x\n",
1248 host->host_no, pun, lun, NCR_700_condition[i],
1249 NCR_700_phase[j], dsp - hostdata->pScript);
1250 if(SCp != NULL) {
1251 scsi_print_command(SCp);
1253 if(SCp->use_sg) {
1254 for(i = 0; i < SCp->use_sg + 1; i++) {
1255 printk(KERN_INFO " SG[%d].length = %d, move_insn=%08x, addr %08x\n", i, ((struct scatterlist *)SCp->buffer)[i].length, ((struct NCR_700_command_slot *)SCp->host_scribble)->SG[i].ins, ((struct NCR_700_command_slot *)SCp->host_scribble)->SG[i].pAddr);
1259 NCR_700_internal_bus_reset(host);
1260 } else if((dsps & 0xfffff000) == A_DEBUG_INTERRUPT) {
1261 printk(KERN_NOTICE "scsi%d (%d:%d) DEBUG INTERRUPT %d AT %08x[%04x], continuing\n",
1262 host->host_no, pun, lun, dsps & 0xfff, dsp, dsp - hostdata->pScript);
1263 resume_offset = dsp;
1264 } else {
1265 printk(KERN_ERR "scsi%d: (%d:%d), unidentified script interrupt 0x%x at %04x\n",
1266 host->host_no, pun, lun, dsps, dsp - hostdata->pScript);
1267 NCR_700_internal_bus_reset(host);
1269 return resume_offset;
1272 /* We run the 53c700 with selection interrupts always enabled. This
1273 * means that the chip may be selected as soon as the bus frees. On a
1274 * busy bus, this can be before the scripts engine finishes its
1275 * processing. Therefore, part of the selection processing has to be
1276 * to find out what the scripts engine is doing and complete the
1277 * function if necessary (i.e. process the pending disconnect or save
1278 * the interrupted initial selection */
1279 STATIC inline __u32
1280 process_selection(struct Scsi_Host *host, __u32 dsp)
1282 __u8 id = 0; /* Squash compiler warning */
1283 int count = 0;
1284 __u32 resume_offset = 0;
1285 struct NCR_700_Host_Parameters *hostdata =
1286 (struct NCR_700_Host_Parameters *)host->hostdata[0];
1287 struct scsi_cmnd *SCp = hostdata->cmd;
1288 __u8 sbcl;
1290 for(count = 0; count < 5; count++) {
1291 id = NCR_700_readb(host, hostdata->chip710 ?
1292 CTEST9_REG : SFBR_REG);
1294 /* Take out our own ID */
1295 id &= ~(1<<host->this_id);
1296 if(id != 0)
1297 break;
1298 udelay(5);
1300 sbcl = NCR_700_readb(host, SBCL_REG);
1301 if((sbcl & SBCL_IO) == 0) {
1302 /* mark as having been selected rather than reselected */
1303 id = 0xff;
1304 } else {
1305 /* convert to real ID */
1306 hostdata->reselection_id = id = bitmap_to_number(id);
1307 DEBUG(("scsi%d: Reselected by %d\n",
1308 host->host_no, id));
1310 if(hostdata->state == NCR_700_HOST_BUSY && SCp != NULL) {
1311 struct NCR_700_command_slot *slot =
1312 (struct NCR_700_command_slot *)SCp->host_scribble;
1313 DEBUG((" ID %d WARNING: RESELECTION OF BUSY HOST, saving cmd %p, slot %p, addr %x [%04x], resume %x!\n", id, hostdata->cmd, slot, dsp, dsp - hostdata->pScript, resume_offset));
1315 switch(dsp - hostdata->pScript) {
1316 case Ent_Disconnect1:
1317 case Ent_Disconnect2:
1318 save_for_reselection(hostdata, SCp, Ent_Disconnect2 + hostdata->pScript);
1319 break;
1320 case Ent_Disconnect3:
1321 case Ent_Disconnect4:
1322 save_for_reselection(hostdata, SCp, Ent_Disconnect4 + hostdata->pScript);
1323 break;
1324 case Ent_Disconnect5:
1325 case Ent_Disconnect6:
1326 save_for_reselection(hostdata, SCp, Ent_Disconnect6 + hostdata->pScript);
1327 break;
1328 case Ent_Disconnect7:
1329 case Ent_Disconnect8:
1330 save_for_reselection(hostdata, SCp, Ent_Disconnect8 + hostdata->pScript);
1331 break;
1332 case Ent_Finish1:
1333 case Ent_Finish2:
1334 process_script_interrupt(A_GOOD_STATUS_AFTER_STATUS, dsp, SCp, host, hostdata);
1335 break;
1337 default:
1338 slot->state = NCR_700_SLOT_QUEUED;
1339 break;
1342 hostdata->state = NCR_700_HOST_BUSY;
1343 hostdata->cmd = NULL;
1344 /* clear any stale simple tag message */
1345 hostdata->msgin[1] = 0;
1346 dma_cache_sync(hostdata->msgin, MSG_ARRAY_SIZE,
1347 DMA_BIDIRECTIONAL);
1349 if(id == 0xff) {
1350 /* Selected as target, Ignore */
1351 resume_offset = hostdata->pScript + Ent_SelectedAsTarget;
1352 } else if(hostdata->tag_negotiated & (1<<id)) {
1353 resume_offset = hostdata->pScript + Ent_GetReselectionWithTag;
1354 } else {
1355 resume_offset = hostdata->pScript + Ent_GetReselectionData;
1357 return resume_offset;
1360 static inline void
1361 NCR_700_clear_fifo(struct Scsi_Host *host) {
1362 const struct NCR_700_Host_Parameters *hostdata
1363 = (struct NCR_700_Host_Parameters *)host->hostdata[0];
1364 if(hostdata->chip710) {
1365 NCR_700_writeb(CLR_FIFO_710, host, CTEST8_REG);
1366 } else {
1367 NCR_700_writeb(CLR_FIFO, host, DFIFO_REG);
1371 static inline void
1372 NCR_700_flush_fifo(struct Scsi_Host *host) {
1373 const struct NCR_700_Host_Parameters *hostdata
1374 = (struct NCR_700_Host_Parameters *)host->hostdata[0];
1375 if(hostdata->chip710) {
1376 NCR_700_writeb(FLUSH_DMA_FIFO_710, host, CTEST8_REG);
1377 udelay(10);
1378 NCR_700_writeb(0, host, CTEST8_REG);
1379 } else {
1380 NCR_700_writeb(FLUSH_DMA_FIFO, host, DFIFO_REG);
1381 udelay(10);
1382 NCR_700_writeb(0, host, DFIFO_REG);
1387 /* The queue lock with interrupts disabled must be held on entry to
1388 * this function */
1389 STATIC int
1390 NCR_700_start_command(struct scsi_cmnd *SCp)
1392 struct NCR_700_command_slot *slot =
1393 (struct NCR_700_command_slot *)SCp->host_scribble;
1394 struct NCR_700_Host_Parameters *hostdata =
1395 (struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
1396 __u16 count = 1; /* for IDENTIFY message */
1398 if(hostdata->state != NCR_700_HOST_FREE) {
1399 /* keep this inside the lock to close the race window where
1400 * the running command finishes on another CPU while we don't
1401 * change the state to queued on this one */
1402 slot->state = NCR_700_SLOT_QUEUED;
1404 DEBUG(("scsi%d: host busy, queueing command %p, slot %p\n",
1405 SCp->device->host->host_no, slot->cmnd, slot));
1406 return 0;
1408 hostdata->state = NCR_700_HOST_BUSY;
1409 hostdata->cmd = SCp;
1410 slot->state = NCR_700_SLOT_BUSY;
1411 /* keep interrupts disabled until we have the command correctly
1412 * set up so we cannot take a selection interrupt */
1414 hostdata->msgout[0] = NCR_700_identify(SCp->cmnd[0] != REQUEST_SENSE,
1415 SCp->device->lun);
1416 /* for INQUIRY or REQUEST_SENSE commands, we cannot be sure
1417 * if the negotiated transfer parameters still hold, so
1418 * always renegotiate them */
1419 if(SCp->cmnd[0] == INQUIRY || SCp->cmnd[0] == REQUEST_SENSE) {
1420 NCR_700_clear_flag(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC);
1423 /* REQUEST_SENSE is asking for contingent I_T_L(_Q) status.
1424 * If a contingent allegiance condition exists, the device
1425 * will refuse all tags, so send the request sense as untagged
1426 * */
1427 if((hostdata->tag_negotiated & (1<<SCp->device->id))
1428 && (slot->tag != SCSI_NO_TAG && SCp->cmnd[0] != REQUEST_SENSE)) {
1429 count += scsi_populate_tag_msg(SCp, &hostdata->msgout[count]);
1432 if(hostdata->fast &&
1433 NCR_700_is_flag_clear(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC)) {
1434 memcpy(&hostdata->msgout[count], NCR_700_SDTR_msg,
1435 sizeof(NCR_700_SDTR_msg));
1436 hostdata->msgout[count+3] = spi_period(SCp->device->sdev_target);
1437 hostdata->msgout[count+4] = spi_offset(SCp->device->sdev_target);
1438 count += sizeof(NCR_700_SDTR_msg);
1439 NCR_700_set_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
1442 script_patch_16(hostdata->script, MessageCount, count);
1445 script_patch_ID(hostdata->script,
1446 Device_ID, 1<<SCp->device->id);
1448 script_patch_32_abs(hostdata->script, CommandAddress,
1449 slot->pCmd);
1450 script_patch_16(hostdata->script, CommandCount, SCp->cmd_len);
1451 /* finally plumb the beginning of the SG list into the script
1452 * */
1453 script_patch_32_abs(hostdata->script, SGScriptStartAddress,
1454 to32bit(&slot->pSG[0].ins));
1455 NCR_700_clear_fifo(SCp->device->host);
1457 if(slot->resume_offset == 0)
1458 slot->resume_offset = hostdata->pScript;
1459 /* now perform all the writebacks and invalidates */
1460 dma_cache_sync(hostdata->msgout, count, DMA_TO_DEVICE);
1461 dma_cache_sync(hostdata->msgin, MSG_ARRAY_SIZE,
1462 DMA_FROM_DEVICE);
1463 dma_cache_sync(SCp->cmnd, SCp->cmd_len, DMA_TO_DEVICE);
1464 dma_cache_sync(hostdata->status, 1, DMA_FROM_DEVICE);
1466 /* set the synchronous period/offset */
1467 NCR_700_writeb(NCR_700_get_SXFER(SCp->device),
1468 SCp->device->host, SXFER_REG);
1469 NCR_700_writel(slot->temp, SCp->device->host, TEMP_REG);
1470 NCR_700_writel(slot->resume_offset, SCp->device->host, DSP_REG);
1472 return 1;
1475 irqreturn_t
1476 NCR_700_intr(int irq, void *dev_id, struct pt_regs *regs)
1478 struct Scsi_Host *host = (struct Scsi_Host *)dev_id;
1479 struct NCR_700_Host_Parameters *hostdata =
1480 (struct NCR_700_Host_Parameters *)host->hostdata[0];
1481 __u8 istat;
1482 __u32 resume_offset = 0;
1483 __u8 pun = 0xff, lun = 0xff;
1484 unsigned long flags;
1485 int handled = 0;
1487 /* Use the host lock to serialise acess to the 53c700
1488 * hardware. Note: In future, we may need to take the queue
1489 * lock to enter the done routines. When that happens, we
1490 * need to ensure that for this driver, the host lock and the
1491 * queue lock point to the same thing. */
1492 spin_lock_irqsave(host->host_lock, flags);
1493 if((istat = NCR_700_readb(host, ISTAT_REG))
1494 & (SCSI_INT_PENDING | DMA_INT_PENDING)) {
1495 __u32 dsps;
1496 __u8 sstat0 = 0, dstat = 0;
1497 __u32 dsp;
1498 struct scsi_cmnd *SCp = hostdata->cmd;
1499 enum NCR_700_Host_State state;
1501 handled = 1;
1502 state = hostdata->state;
1503 SCp = hostdata->cmd;
1505 if(istat & SCSI_INT_PENDING) {
1506 udelay(10);
1508 sstat0 = NCR_700_readb(host, SSTAT0_REG);
1511 if(istat & DMA_INT_PENDING) {
1512 udelay(10);
1514 dstat = NCR_700_readb(host, DSTAT_REG);
1517 dsps = NCR_700_readl(host, DSPS_REG);
1518 dsp = NCR_700_readl(host, DSP_REG);
1520 DEBUG(("scsi%d: istat %02x sstat0 %02x dstat %02x dsp %04x[%08x] dsps 0x%x\n",
1521 host->host_no, istat, sstat0, dstat,
1522 (dsp - (__u32)(hostdata->pScript))/4,
1523 dsp, dsps));
1525 if(SCp != NULL) {
1526 pun = SCp->device->id;
1527 lun = SCp->device->lun;
1530 if(sstat0 & SCSI_RESET_DETECTED) {
1531 struct scsi_device *SDp;
1532 int i;
1534 hostdata->state = NCR_700_HOST_BUSY;
1536 printk(KERN_ERR "scsi%d: Bus Reset detected, executing command %p, slot %p, dsp %08x[%04x]\n",
1537 host->host_no, SCp, SCp == NULL ? NULL : SCp->host_scribble, dsp, dsp - hostdata->pScript);
1539 scsi_report_bus_reset(host, 0);
1541 /* clear all the negotiated parameters */
1542 __shost_for_each_device(SDp, host)
1543 SDp->hostdata = NULL;
1545 /* clear all the slots and their pending commands */
1546 for(i = 0; i < NCR_700_COMMAND_SLOTS_PER_HOST; i++) {
1547 struct scsi_cmnd *SCp;
1548 struct NCR_700_command_slot *slot =
1549 &hostdata->slots[i];
1551 if(slot->state == NCR_700_SLOT_FREE)
1552 continue;
1554 SCp = slot->cmnd;
1555 printk(KERN_ERR " failing command because of reset, slot %p, cmnd %p\n",
1556 slot, SCp);
1557 free_slot(slot, hostdata);
1558 SCp->host_scribble = NULL;
1559 NCR_700_set_depth(SCp->device, 0);
1560 /* NOTE: deadlock potential here: we
1561 * rely on mid-layer guarantees that
1562 * scsi_done won't try to issue the
1563 * command again otherwise we'll
1564 * deadlock on the
1565 * hostdata->state_lock */
1566 SCp->result = DID_RESET << 16;
1567 SCp->scsi_done(SCp);
1569 mdelay(25);
1570 NCR_700_chip_setup(host);
1572 hostdata->state = NCR_700_HOST_FREE;
1573 hostdata->cmd = NULL;
1574 /* signal back if this was an eh induced reset */
1575 if(hostdata->eh_complete != NULL)
1576 complete(hostdata->eh_complete);
1577 goto out_unlock;
1578 } else if(sstat0 & SELECTION_TIMEOUT) {
1579 DEBUG(("scsi%d: (%d:%d) selection timeout\n",
1580 host->host_no, pun, lun));
1581 NCR_700_scsi_done(hostdata, SCp, DID_NO_CONNECT<<16);
1582 } else if(sstat0 & PHASE_MISMATCH) {
1583 struct NCR_700_command_slot *slot = (SCp == NULL) ? NULL :
1584 (struct NCR_700_command_slot *)SCp->host_scribble;
1586 if(dsp == Ent_SendMessage + 8 + hostdata->pScript) {
1587 /* It wants to reply to some part of
1588 * our message */
1589 #ifdef NCR_700_DEBUG
1590 __u32 temp = NCR_700_readl(host, TEMP_REG);
1591 int count = (hostdata->script[Ent_SendMessage/4] & 0xffffff) - ((NCR_700_readl(host, DBC_REG) & 0xffffff) + NCR_700_data_residual(host));
1592 printk("scsi%d (%d:%d) PHASE MISMATCH IN SEND MESSAGE %d remain, return %p[%04x], phase %s\n", host->host_no, pun, lun, count, (void *)temp, temp - hostdata->pScript, sbcl_to_string(NCR_700_readb(host, SBCL_REG)));
1593 #endif
1594 resume_offset = hostdata->pScript + Ent_SendMessagePhaseMismatch;
1595 } else if(dsp >= to32bit(&slot->pSG[0].ins) &&
1596 dsp <= to32bit(&slot->pSG[NCR_700_SG_SEGMENTS].ins)) {
1597 int data_transfer = NCR_700_readl(host, DBC_REG) & 0xffffff;
1598 int SGcount = (dsp - to32bit(&slot->pSG[0].ins))/sizeof(struct NCR_700_SG_List);
1599 int residual = NCR_700_data_residual(host);
1600 int i;
1601 #ifdef NCR_700_DEBUG
1602 __u32 naddr = NCR_700_readl(host, DNAD_REG);
1604 printk("scsi%d: (%d:%d) Expected phase mismatch in slot->SG[%d], transferred 0x%x\n",
1605 host->host_no, pun, lun,
1606 SGcount, data_transfer);
1607 scsi_print_command(SCp);
1608 if(residual) {
1609 printk("scsi%d: (%d:%d) Expected phase mismatch in slot->SG[%d], transferred 0x%x, residual %d\n",
1610 host->host_no, pun, lun,
1611 SGcount, data_transfer, residual);
1613 #endif
1614 data_transfer += residual;
1616 if(data_transfer != 0) {
1617 int count;
1618 __u32 pAddr;
1620 SGcount--;
1622 count = (bS_to_cpu(slot->SG[SGcount].ins) & 0x00ffffff);
1623 DEBUG(("DATA TRANSFER MISMATCH, count = %d, transferred %d\n", count, count-data_transfer));
1624 slot->SG[SGcount].ins &= bS_to_host(0xff000000);
1625 slot->SG[SGcount].ins |= bS_to_host(data_transfer);
1626 pAddr = bS_to_cpu(slot->SG[SGcount].pAddr);
1627 pAddr += (count - data_transfer);
1628 #ifdef NCR_700_DEBUG
1629 if(pAddr != naddr) {
1630 printk("scsi%d (%d:%d) transfer mismatch pAddr=%lx, naddr=%lx, data_transfer=%d, residual=%d\n", host->host_no, pun, lun, (unsigned long)pAddr, (unsigned long)naddr, data_transfer, residual);
1632 #endif
1633 slot->SG[SGcount].pAddr = bS_to_host(pAddr);
1635 /* set the executed moves to nops */
1636 for(i=0; i<SGcount; i++) {
1637 slot->SG[i].ins = bS_to_host(SCRIPT_NOP);
1638 slot->SG[i].pAddr = 0;
1640 dma_cache_sync(slot->SG, sizeof(slot->SG), DMA_TO_DEVICE);
1641 /* and pretend we disconnected after
1642 * the command phase */
1643 resume_offset = hostdata->pScript + Ent_MsgInDuringData;
1644 /* make sure all the data is flushed */
1645 NCR_700_flush_fifo(host);
1646 } else {
1647 __u8 sbcl = NCR_700_readb(host, SBCL_REG);
1648 printk(KERN_ERR "scsi%d: (%d:%d) phase mismatch at %04x, phase %s\n",
1649 host->host_no, pun, lun, dsp - hostdata->pScript, sbcl_to_string(sbcl));
1650 NCR_700_internal_bus_reset(host);
1653 } else if(sstat0 & SCSI_GROSS_ERROR) {
1654 printk(KERN_ERR "scsi%d: (%d:%d) GROSS ERROR\n",
1655 host->host_no, pun, lun);
1656 NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1657 } else if(sstat0 & PARITY_ERROR) {
1658 printk(KERN_ERR "scsi%d: (%d:%d) PARITY ERROR\n",
1659 host->host_no, pun, lun);
1660 NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1661 } else if(dstat & SCRIPT_INT_RECEIVED) {
1662 DEBUG(("scsi%d: (%d:%d) ====>SCRIPT INTERRUPT<====\n",
1663 host->host_no, pun, lun));
1664 resume_offset = process_script_interrupt(dsps, dsp, SCp, host, hostdata);
1665 } else if(dstat & (ILGL_INST_DETECTED)) {
1666 printk(KERN_ERR "scsi%d: (%d:%d) Illegal Instruction detected at 0x%08x[0x%x]!!!\n"
1667 " Please email James.Bottomley@HansenPartnership.com with the details\n",
1668 host->host_no, pun, lun,
1669 dsp, dsp - hostdata->pScript);
1670 NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1671 } else if(dstat & (WATCH_DOG_INTERRUPT|ABORTED)) {
1672 printk(KERN_ERR "scsi%d: (%d:%d) serious DMA problem, dstat=%02x\n",
1673 host->host_no, pun, lun, dstat);
1674 NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1678 /* NOTE: selection interrupt processing MUST occur
1679 * after script interrupt processing to correctly cope
1680 * with the case where we process a disconnect and
1681 * then get reselected before we process the
1682 * disconnection */
1683 if(sstat0 & SELECTED) {
1684 /* FIXME: It currently takes at least FOUR
1685 * interrupts to complete a command that
1686 * disconnects: one for the disconnect, one
1687 * for the reselection, one to get the
1688 * reselection data and one to complete the
1689 * command. If we guess the reselected
1690 * command here and prepare it, we only need
1691 * to get a reselection data interrupt if we
1692 * guessed wrongly. Since the interrupt
1693 * overhead is much greater than the command
1694 * setup, this would be an efficient
1695 * optimisation particularly as we probably
1696 * only have one outstanding command on a
1697 * target most of the time */
1699 resume_offset = process_selection(host, dsp);
1705 if(resume_offset) {
1706 if(hostdata->state != NCR_700_HOST_BUSY) {
1707 printk(KERN_ERR "scsi%d: Driver error: resume at 0x%08x [0x%04x] with non busy host!\n",
1708 host->host_no, resume_offset, resume_offset - hostdata->pScript);
1709 hostdata->state = NCR_700_HOST_BUSY;
1712 DEBUG(("Attempting to resume at %x\n", resume_offset));
1713 NCR_700_clear_fifo(host);
1714 NCR_700_writel(resume_offset, host, DSP_REG);
1716 /* There is probably a technical no-no about this: If we're a
1717 * shared interrupt and we got this interrupt because the
1718 * other device needs servicing not us, we're still going to
1719 * check our queued commands here---of course, there shouldn't
1720 * be any outstanding.... */
1721 if(hostdata->state == NCR_700_HOST_FREE) {
1722 int i;
1724 for(i = 0; i < NCR_700_COMMAND_SLOTS_PER_HOST; i++) {
1725 /* fairness: always run the queue from the last
1726 * position we left off */
1727 int j = (i + hostdata->saved_slot_position)
1728 % NCR_700_COMMAND_SLOTS_PER_HOST;
1730 if(hostdata->slots[j].state != NCR_700_SLOT_QUEUED)
1731 continue;
1732 if(NCR_700_start_command(hostdata->slots[j].cmnd)) {
1733 DEBUG(("scsi%d: Issuing saved command slot %p, cmd %p\t\n",
1734 host->host_no, &hostdata->slots[j],
1735 hostdata->slots[j].cmnd));
1736 hostdata->saved_slot_position = j + 1;
1739 break;
1742 out_unlock:
1743 spin_unlock_irqrestore(host->host_lock, flags);
1744 return IRQ_RETVAL(handled);
1747 STATIC int
1748 NCR_700_queuecommand(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *))
1750 struct NCR_700_Host_Parameters *hostdata =
1751 (struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
1752 __u32 move_ins;
1753 enum dma_data_direction direction;
1754 struct NCR_700_command_slot *slot;
1756 if(hostdata->command_slot_count >= NCR_700_COMMAND_SLOTS_PER_HOST) {
1757 /* We're over our allocation, this should never happen
1758 * since we report the max allocation to the mid layer */
1759 printk(KERN_WARNING "scsi%d: Command depth has gone over queue depth\n", SCp->device->host->host_no);
1760 return 1;
1762 /* check for untagged commands. We cannot have any outstanding
1763 * commands if we accept them. Commands could be untagged because:
1765 * - The tag negotiated bitmap is clear
1766 * - The blk layer sent and untagged command
1768 if(NCR_700_get_depth(SCp->device) != 0
1769 && (!(hostdata->tag_negotiated & (1<<SCp->device->id))
1770 || !blk_rq_tagged(SCp->request))) {
1771 DEBUG((KERN_ERR "scsi%d (%d:%d) has non zero depth %d\n",
1772 SCp->device->host->host_no, SCp->device->id, SCp->device->lun,
1773 NCR_700_get_depth(SCp->device)));
1774 return SCSI_MLQUEUE_DEVICE_BUSY;
1776 if(NCR_700_get_depth(SCp->device) >= SCp->device->queue_depth) {
1777 DEBUG((KERN_ERR "scsi%d (%d:%d) has max tag depth %d\n",
1778 SCp->device->host->host_no, SCp->device->id, SCp->device->lun,
1779 NCR_700_get_depth(SCp->device)));
1780 return SCSI_MLQUEUE_DEVICE_BUSY;
1782 NCR_700_set_depth(SCp->device, NCR_700_get_depth(SCp->device) + 1);
1784 /* begin the command here */
1785 /* no need to check for NULL, test for command_slot_count above
1786 * ensures a slot is free */
1787 slot = find_empty_slot(hostdata);
1789 slot->cmnd = SCp;
1791 SCp->scsi_done = done;
1792 SCp->host_scribble = (unsigned char *)slot;
1793 SCp->SCp.ptr = NULL;
1794 SCp->SCp.buffer = NULL;
1796 #ifdef NCR_700_DEBUG
1797 printk("53c700: scsi%d, command ", SCp->device->host->host_no);
1798 scsi_print_command(SCp);
1799 #endif
1800 if(blk_rq_tagged(SCp->request)
1801 && (hostdata->tag_negotiated &(1<<SCp->device->id)) == 0
1802 && NCR_700_get_tag_neg_state(SCp->device) == NCR_700_START_TAG_NEGOTIATION) {
1803 printk(KERN_ERR "scsi%d: (%d:%d) Enabling Tag Command Queuing\n", SCp->device->host->host_no, SCp->device->id, SCp->device->lun);
1804 hostdata->tag_negotiated |= (1<<SCp->device->id);
1805 NCR_700_set_tag_neg_state(SCp->device, NCR_700_DURING_TAG_NEGOTIATION);
1808 /* here we may have to process an untagged command. The gate
1809 * above ensures that this will be the only one outstanding,
1810 * so clear the tag negotiated bit.
1812 * FIXME: This will royally screw up on multiple LUN devices
1813 * */
1814 if(!blk_rq_tagged(SCp->request)
1815 && (hostdata->tag_negotiated &(1<<SCp->device->id))) {
1816 printk(KERN_INFO "scsi%d: (%d:%d) Disabling Tag Command Queuing\n", SCp->device->host->host_no, SCp->device->id, SCp->device->lun);
1817 hostdata->tag_negotiated &= ~(1<<SCp->device->id);
1820 if((hostdata->tag_negotiated &(1<<SCp->device->id))
1821 && scsi_get_tag_type(SCp->device)) {
1822 slot->tag = SCp->request->tag;
1823 DEBUG(("53c700 %d:%d:%d, sending out tag %d, slot %p\n",
1824 SCp->device->host->host_no, SCp->device->id, SCp->device->lun, slot->tag,
1825 slot));
1826 } else {
1827 slot->tag = SCSI_NO_TAG;
1828 /* must populate current_cmnd for scsi_find_tag to work */
1829 SCp->device->current_cmnd = SCp;
1831 /* sanity check: some of the commands generated by the mid-layer
1832 * have an eccentric idea of their sc_data_direction */
1833 if(!SCp->use_sg && !SCp->request_bufflen
1834 && SCp->sc_data_direction != DMA_NONE) {
1835 #ifdef NCR_700_DEBUG
1836 printk("53c700: Command");
1837 scsi_print_command(SCp);
1838 printk("Has wrong data direction %d\n", SCp->sc_data_direction);
1839 #endif
1840 SCp->sc_data_direction = DMA_NONE;
1843 switch (SCp->cmnd[0]) {
1844 case REQUEST_SENSE:
1845 /* clear the internal sense magic */
1846 SCp->cmnd[6] = 0;
1847 /* fall through */
1848 default:
1849 /* OK, get it from the command */
1850 switch(SCp->sc_data_direction) {
1851 case DMA_BIDIRECTIONAL:
1852 default:
1853 printk(KERN_ERR "53c700: Unknown command for data direction ");
1854 scsi_print_command(SCp);
1856 move_ins = 0;
1857 break;
1858 case DMA_NONE:
1859 move_ins = 0;
1860 break;
1861 case DMA_FROM_DEVICE:
1862 move_ins = SCRIPT_MOVE_DATA_IN;
1863 break;
1864 case DMA_TO_DEVICE:
1865 move_ins = SCRIPT_MOVE_DATA_OUT;
1866 break;
1870 /* now build the scatter gather list */
1871 direction = SCp->sc_data_direction;
1872 if(move_ins != 0) {
1873 int i;
1874 int sg_count;
1875 dma_addr_t vPtr = 0;
1876 __u32 count = 0;
1878 if(SCp->use_sg) {
1879 sg_count = dma_map_sg(hostdata->dev, SCp->buffer,
1880 SCp->use_sg, direction);
1881 } else {
1882 vPtr = dma_map_single(hostdata->dev,
1883 SCp->request_buffer,
1884 SCp->request_bufflen,
1885 direction);
1886 count = SCp->request_bufflen;
1887 slot->dma_handle = vPtr;
1888 sg_count = 1;
1892 for(i = 0; i < sg_count; i++) {
1894 if(SCp->use_sg) {
1895 struct scatterlist *sg = SCp->buffer;
1897 vPtr = sg_dma_address(&sg[i]);
1898 count = sg_dma_len(&sg[i]);
1901 slot->SG[i].ins = bS_to_host(move_ins | count);
1902 DEBUG((" scatter block %d: move %d[%08x] from 0x%lx\n",
1903 i, count, slot->SG[i].ins, (unsigned long)vPtr));
1904 slot->SG[i].pAddr = bS_to_host(vPtr);
1906 slot->SG[i].ins = bS_to_host(SCRIPT_RETURN);
1907 slot->SG[i].pAddr = 0;
1908 dma_cache_sync(slot->SG, sizeof(slot->SG), DMA_TO_DEVICE);
1909 DEBUG((" SETTING %08lx to %x\n",
1910 (&slot->pSG[i].ins),
1911 slot->SG[i].ins));
1913 slot->resume_offset = 0;
1914 slot->pCmd = dma_map_single(hostdata->dev, SCp->cmnd,
1915 sizeof(SCp->cmnd), DMA_TO_DEVICE);
1916 NCR_700_start_command(SCp);
1917 return 0;
1920 STATIC int
1921 NCR_700_abort(struct scsi_cmnd * SCp)
1923 struct NCR_700_command_slot *slot;
1925 printk(KERN_INFO "scsi%d (%d:%d) New error handler wants to abort command\n\t",
1926 SCp->device->host->host_no, SCp->device->id, SCp->device->lun);
1927 scsi_print_command(SCp);
1929 slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1931 if(slot == NULL)
1932 /* no outstanding command to abort */
1933 return SUCCESS;
1934 if(SCp->cmnd[0] == TEST_UNIT_READY) {
1935 /* FIXME: This is because of a problem in the new
1936 * error handler. When it is in error recovery, it
1937 * will send a TUR to a device it thinks may still be
1938 * showing a problem. If the TUR isn't responded to,
1939 * it will abort it and mark the device off line.
1940 * Unfortunately, it does no other error recovery, so
1941 * this would leave us with an outstanding command
1942 * occupying a slot. Rather than allow this to
1943 * happen, we issue a bus reset to force all
1944 * outstanding commands to terminate here. */
1945 NCR_700_internal_bus_reset(SCp->device->host);
1946 /* still drop through and return failed */
1948 return FAILED;
1952 STATIC int
1953 NCR_700_bus_reset(struct scsi_cmnd * SCp)
1955 DECLARE_COMPLETION(complete);
1956 struct NCR_700_Host_Parameters *hostdata =
1957 (struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
1959 printk(KERN_INFO "scsi%d (%d:%d) New error handler wants BUS reset, cmd %p\n\t",
1960 SCp->device->host->host_no, SCp->device->id, SCp->device->lun, SCp);
1961 scsi_print_command(SCp);
1962 /* In theory, eh_complete should always be null because the
1963 * eh is single threaded, but just in case we're handling a
1964 * reset via sg or something */
1965 while(hostdata->eh_complete != NULL) {
1966 spin_unlock_irq(SCp->device->host->host_lock);
1967 msleep_interruptible(100);
1968 spin_lock_irq(SCp->device->host->host_lock);
1970 hostdata->eh_complete = &complete;
1971 NCR_700_internal_bus_reset(SCp->device->host);
1972 spin_unlock_irq(SCp->device->host->host_lock);
1973 wait_for_completion(&complete);
1974 spin_lock_irq(SCp->device->host->host_lock);
1975 hostdata->eh_complete = NULL;
1976 /* Revalidate the transport parameters of the failing device */
1977 if(hostdata->fast)
1978 spi_schedule_dv_device(SCp->device);
1979 return SUCCESS;
1982 STATIC int
1983 NCR_700_dev_reset(struct scsi_cmnd * SCp)
1985 printk(KERN_INFO "scsi%d (%d:%d) New error handler wants device reset\n\t",
1986 SCp->device->host->host_no, SCp->device->id, SCp->device->lun);
1987 scsi_print_command(SCp);
1989 return FAILED;
1992 STATIC int
1993 NCR_700_host_reset(struct scsi_cmnd * SCp)
1995 printk(KERN_INFO "scsi%d (%d:%d) New error handler wants HOST reset\n\t",
1996 SCp->device->host->host_no, SCp->device->id, SCp->device->lun);
1997 scsi_print_command(SCp);
1999 NCR_700_internal_bus_reset(SCp->device->host);
2000 NCR_700_chip_reset(SCp->device->host);
2001 return SUCCESS;
2004 STATIC void
2005 NCR_700_set_period(struct scsi_target *STp, int period)
2007 struct Scsi_Host *SHp = dev_to_shost(STp->dev.parent);
2008 struct NCR_700_Host_Parameters *hostdata =
2009 (struct NCR_700_Host_Parameters *)SHp->hostdata[0];
2011 if(!hostdata->fast)
2012 return;
2014 if(period < hostdata->min_period)
2015 period = hostdata->min_period;
2017 spi_period(STp) = period;
2018 spi_flags(STp) &= ~(NCR_700_DEV_NEGOTIATED_SYNC |
2019 NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
2020 spi_flags(STp) |= NCR_700_DEV_PRINT_SYNC_NEGOTIATION;
2023 STATIC void
2024 NCR_700_set_offset(struct scsi_target *STp, int offset)
2026 struct Scsi_Host *SHp = dev_to_shost(STp->dev.parent);
2027 struct NCR_700_Host_Parameters *hostdata =
2028 (struct NCR_700_Host_Parameters *)SHp->hostdata[0];
2029 int max_offset = hostdata->chip710
2030 ? NCR_710_MAX_OFFSET : NCR_700_MAX_OFFSET;
2032 if(!hostdata->fast)
2033 return;
2035 if(offset > max_offset)
2036 offset = max_offset;
2038 /* if we're currently async, make sure the period is reasonable */
2039 if(spi_offset(STp) == 0 && (spi_period(STp) < hostdata->min_period ||
2040 spi_period(STp) > 0xff))
2041 spi_period(STp) = hostdata->min_period;
2043 spi_offset(STp) = offset;
2044 spi_flags(STp) &= ~(NCR_700_DEV_NEGOTIATED_SYNC |
2045 NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
2046 spi_flags(STp) |= NCR_700_DEV_PRINT_SYNC_NEGOTIATION;
2051 STATIC int
2052 NCR_700_slave_configure(struct scsi_device *SDp)
2054 struct NCR_700_Host_Parameters *hostdata =
2055 (struct NCR_700_Host_Parameters *)SDp->host->hostdata[0];
2057 /* to do here: allocate memory; build a queue_full list */
2058 if(SDp->tagged_supported) {
2059 scsi_set_tag_type(SDp, MSG_ORDERED_TAG);
2060 scsi_activate_tcq(SDp, NCR_700_DEFAULT_TAGS);
2061 NCR_700_set_tag_neg_state(SDp, NCR_700_START_TAG_NEGOTIATION);
2062 } else {
2063 /* initialise to default depth */
2064 scsi_adjust_queue_depth(SDp, 0, SDp->host->cmd_per_lun);
2066 if(hostdata->fast) {
2067 /* Find the correct offset and period via domain validation */
2068 if (!spi_initial_dv(SDp->sdev_target))
2069 spi_dv_device(SDp);
2070 } else {
2071 spi_offset(SDp->sdev_target) = 0;
2072 spi_period(SDp->sdev_target) = 0;
2074 return 0;
2077 STATIC void
2078 NCR_700_slave_destroy(struct scsi_device *SDp)
2080 /* to do here: deallocate memory */
2083 static int
2084 NCR_700_change_queue_depth(struct scsi_device *SDp, int depth)
2086 if (depth > NCR_700_MAX_TAGS)
2087 depth = NCR_700_MAX_TAGS;
2089 scsi_adjust_queue_depth(SDp, scsi_get_tag_type(SDp), depth);
2090 return depth;
2093 static int NCR_700_change_queue_type(struct scsi_device *SDp, int tag_type)
2095 int change_tag = ((tag_type ==0 && scsi_get_tag_type(SDp) != 0)
2096 || (tag_type != 0 && scsi_get_tag_type(SDp) == 0));
2097 struct NCR_700_Host_Parameters *hostdata =
2098 (struct NCR_700_Host_Parameters *)SDp->host->hostdata[0];
2100 scsi_set_tag_type(SDp, tag_type);
2102 /* We have a global (per target) flag to track whether TCQ is
2103 * enabled, so we'll be turning it off for the entire target here.
2104 * our tag algorithm will fail if we mix tagged and untagged commands,
2105 * so quiesce the device before doing this */
2106 if (change_tag)
2107 scsi_target_quiesce(SDp->sdev_target);
2109 if (!tag_type) {
2110 /* shift back to the default unqueued number of commands
2111 * (the user can still raise this) */
2112 scsi_deactivate_tcq(SDp, SDp->host->cmd_per_lun);
2113 hostdata->tag_negotiated &= ~(1 << SDp->id);
2114 } else {
2115 /* Here, we cleared the negotiation flag above, so this
2116 * will force the driver to renegotiate */
2117 scsi_activate_tcq(SDp, SDp->queue_depth);
2118 if (change_tag)
2119 NCR_700_set_tag_neg_state(SDp, NCR_700_START_TAG_NEGOTIATION);
2121 if (change_tag)
2122 scsi_target_resume(SDp->sdev_target);
2124 return tag_type;
2127 static ssize_t
2128 NCR_700_show_active_tags(struct device *dev, char *buf)
2130 struct scsi_device *SDp = to_scsi_device(dev);
2132 return snprintf(buf, 20, "%d\n", NCR_700_get_depth(SDp));
2135 static struct device_attribute NCR_700_active_tags_attr = {
2136 .attr = {
2137 .name = "active_tags",
2138 .mode = S_IRUGO,
2140 .show = NCR_700_show_active_tags,
2143 STATIC struct device_attribute *NCR_700_dev_attrs[] = {
2144 &NCR_700_active_tags_attr,
2145 NULL,
2148 EXPORT_SYMBOL(NCR_700_detect);
2149 EXPORT_SYMBOL(NCR_700_release);
2150 EXPORT_SYMBOL(NCR_700_intr);
2152 static struct spi_function_template NCR_700_transport_functions = {
2153 .set_period = NCR_700_set_period,
2154 .show_period = 1,
2155 .set_offset = NCR_700_set_offset,
2156 .show_offset = 1,
2159 static int __init NCR_700_init(void)
2161 NCR_700_transport_template = spi_attach_transport(&NCR_700_transport_functions);
2162 if(!NCR_700_transport_template)
2163 return -ENODEV;
2164 return 0;
2167 static void __exit NCR_700_exit(void)
2169 spi_release_transport(NCR_700_transport_template);
2172 module_init(NCR_700_init);
2173 module_exit(NCR_700_exit);