[POWERPC] boot: export flush_cache
[linux-2.6/next.git] / drivers / scsi / 53c700.c
blobcb02656eb54c1e9592fd552f1014a556981ca8e0
1 /* -*- mode: c; c-basic-offset: 8 -*- */
3 /* NCR (or Symbios) 53c700 and 53c700-66 Driver
5 * Copyright (C) 2001 by James.Bottomley@HansenPartnership.com
6 **-----------------------------------------------------------------------------
7 **
8 ** This program is free software; you can redistribute it and/or modify
9 ** it under the terms of the GNU General Public License as published by
10 ** the Free Software Foundation; either version 2 of the License, or
11 ** (at your option) any later version.
13 ** This program is distributed in the hope that it will be useful,
14 ** but WITHOUT ANY WARRANTY; without even the implied warranty of
15 ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 ** GNU General Public License for more details.
18 ** You should have received a copy of the GNU General Public License
19 ** along with this program; if not, write to the Free Software
20 ** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 **-----------------------------------------------------------------------------
25 /* Notes:
27 * This driver is designed exclusively for these chips (virtually the
28 * earliest of the scripts engine chips). They need their own drivers
29 * because they are missing so many of the scripts and snazzy register
30 * features of their elder brothers (the 710, 720 and 770).
32 * The 700 is the lowliest of the line, it can only do async SCSI.
33 * The 700-66 can at least do synchronous SCSI up to 10MHz.
35 * The 700 chip has no host bus interface logic of its own. However,
36 * it is usually mapped to a location with well defined register
37 * offsets. Therefore, if you can determine the base address and the
38 * irq your board incorporating this chip uses, you can probably use
39 * this driver to run it (although you'll probably have to write a
40 * minimal wrapper for the purpose---see the NCR_D700 driver for
41 * details about how to do this).
44 * TODO List:
46 * 1. Better statistics in the proc fs
48 * 2. Implement message queue (queues SCSI messages like commands) and make
49 * the abort and device reset functions use them.
50 * */
52 /* CHANGELOG
54 * Version 2.8
56 * Fixed bad bug affecting tag starvation processing (previously the
57 * driver would hang the system if too many tags starved. Also fixed
58 * bad bug having to do with 10 byte command processing and REQUEST
59 * SENSE (the command would loop forever getting a transfer length
60 * mismatch in the CMD phase).
62 * Version 2.7
64 * Fixed scripts problem which caused certain devices (notably CDRWs)
65 * to hang on initial INQUIRY. Updated NCR_700_readl/writel to use
66 * __raw_readl/writel for parisc compatibility (Thomas
67 * Bogendoerfer). Added missing SCp->request_bufflen initialisation
68 * for sense requests (Ryan Bradetich).
70 * Version 2.6
72 * Following test of the 64 bit parisc kernel by Richard Hirst,
73 * several problems have now been corrected. Also adds support for
74 * consistent memory allocation.
76 * Version 2.5
78 * More Compatibility changes for 710 (now actually works). Enhanced
79 * support for odd clock speeds which constrain SDTR negotiations.
80 * correct cacheline separation for scsi messages and status for
81 * incoherent architectures. Use of the pci mapping functions on
82 * buffers to begin support for 64 bit drivers.
84 * Version 2.4
86 * Added support for the 53c710 chip (in 53c700 emulation mode only---no
87 * special 53c710 instructions or registers are used).
89 * Version 2.3
91 * More endianness/cache coherency changes.
93 * Better bad device handling (handles devices lying about tag
94 * queueing support and devices which fail to provide sense data on
95 * contingent allegiance conditions)
97 * Many thanks to Richard Hirst <rhirst@linuxcare.com> for patiently
98 * debugging this driver on the parisc architecture and suggesting
99 * many improvements and bug fixes.
101 * Thanks also go to Linuxcare Inc. for providing several PARISC
102 * machines for me to debug the driver on.
104 * Version 2.2
106 * Made the driver mem or io mapped; added endian invariance; added
107 * dma cache flushing operations for architectures which need it;
108 * added support for more varied clocking speeds.
110 * Version 2.1
112 * Initial modularisation from the D700. See NCR_D700.c for the rest of
113 * the changelog.
114 * */
115 #define NCR_700_VERSION "2.8"
117 #include <linux/kernel.h>
118 #include <linux/types.h>
119 #include <linux/string.h>
120 #include <linux/ioport.h>
121 #include <linux/delay.h>
122 #include <linux/spinlock.h>
123 #include <linux/completion.h>
124 #include <linux/init.h>
125 #include <linux/proc_fs.h>
126 #include <linux/blkdev.h>
127 #include <linux/module.h>
128 #include <linux/interrupt.h>
129 #include <linux/device.h>
130 #include <asm/dma.h>
131 #include <asm/system.h>
132 #include <asm/io.h>
133 #include <asm/pgtable.h>
134 #include <asm/byteorder.h>
136 #include <scsi/scsi.h>
137 #include <scsi/scsi_cmnd.h>
138 #include <scsi/scsi_dbg.h>
139 #include <scsi/scsi_eh.h>
140 #include <scsi/scsi_host.h>
141 #include <scsi/scsi_tcq.h>
142 #include <scsi/scsi_transport.h>
143 #include <scsi/scsi_transport_spi.h>
145 #include "53c700.h"
147 /* NOTE: For 64 bit drivers there are points in the code where we use
148 * a non dereferenceable pointer to point to a structure in dma-able
149 * memory (which is 32 bits) so that we can use all of the structure
150 * operations but take the address at the end. This macro allows us
151 * to truncate the 64 bit pointer down to 32 bits without the compiler
152 * complaining */
153 #define to32bit(x) ((__u32)((unsigned long)(x)))
155 #ifdef NCR_700_DEBUG
156 #define STATIC
157 #else
158 #define STATIC static
159 #endif
161 MODULE_AUTHOR("James Bottomley");
162 MODULE_DESCRIPTION("53c700 and 53c700-66 Driver");
163 MODULE_LICENSE("GPL");
165 /* This is the script */
166 #include "53c700_d.h"
169 STATIC int NCR_700_queuecommand(struct scsi_cmnd *, void (*done)(struct scsi_cmnd *));
170 STATIC int NCR_700_abort(struct scsi_cmnd * SCpnt);
171 STATIC int NCR_700_bus_reset(struct scsi_cmnd * SCpnt);
172 STATIC int NCR_700_host_reset(struct scsi_cmnd * SCpnt);
173 STATIC void NCR_700_chip_setup(struct Scsi_Host *host);
174 STATIC void NCR_700_chip_reset(struct Scsi_Host *host);
175 STATIC int NCR_700_slave_alloc(struct scsi_device *SDpnt);
176 STATIC int NCR_700_slave_configure(struct scsi_device *SDpnt);
177 STATIC void NCR_700_slave_destroy(struct scsi_device *SDpnt);
178 static int NCR_700_change_queue_depth(struct scsi_device *SDpnt, int depth);
179 static int NCR_700_change_queue_type(struct scsi_device *SDpnt, int depth);
181 STATIC struct device_attribute *NCR_700_dev_attrs[];
183 STATIC struct scsi_transport_template *NCR_700_transport_template = NULL;
185 static char *NCR_700_phase[] = {
187 "after selection",
188 "before command phase",
189 "after command phase",
190 "after status phase",
191 "after data in phase",
192 "after data out phase",
193 "during data phase",
196 static char *NCR_700_condition[] = {
198 "NOT MSG_OUT",
199 "UNEXPECTED PHASE",
200 "NOT MSG_IN",
201 "UNEXPECTED MSG",
202 "MSG_IN",
203 "SDTR_MSG RECEIVED",
204 "REJECT_MSG RECEIVED",
205 "DISCONNECT_MSG RECEIVED",
206 "MSG_OUT",
207 "DATA_IN",
211 static char *NCR_700_fatal_messages[] = {
212 "unexpected message after reselection",
213 "still MSG_OUT after message injection",
214 "not MSG_IN after selection",
215 "Illegal message length received",
218 static char *NCR_700_SBCL_bits[] = {
219 "IO ",
220 "CD ",
221 "MSG ",
222 "ATN ",
223 "SEL ",
224 "BSY ",
225 "ACK ",
226 "REQ ",
229 static char *NCR_700_SBCL_to_phase[] = {
230 "DATA_OUT",
231 "DATA_IN",
232 "CMD_OUT",
233 "STATE",
234 "ILLEGAL PHASE",
235 "ILLEGAL PHASE",
236 "MSG OUT",
237 "MSG IN",
240 /* This translates the SDTR message offset and period to a value
241 * which can be loaded into the SXFER_REG.
243 * NOTE: According to SCSI-2, the true transfer period (in ns) is
244 * actually four times this period value */
245 static inline __u8
246 NCR_700_offset_period_to_sxfer(struct NCR_700_Host_Parameters *hostdata,
247 __u8 offset, __u8 period)
249 int XFERP;
251 __u8 min_xferp = (hostdata->chip710
252 ? NCR_710_MIN_XFERP : NCR_700_MIN_XFERP);
253 __u8 max_offset = (hostdata->chip710
254 ? NCR_710_MAX_OFFSET : NCR_700_MAX_OFFSET);
256 if(offset == 0)
257 return 0;
259 if(period < hostdata->min_period) {
260 printk(KERN_WARNING "53c700: Period %dns is less than this chip's minimum, setting to %d\n", period*4, NCR_700_MIN_PERIOD*4);
261 period = hostdata->min_period;
263 XFERP = (period*4 * hostdata->sync_clock)/1000 - 4;
264 if(offset > max_offset) {
265 printk(KERN_WARNING "53c700: Offset %d exceeds chip maximum, setting to %d\n",
266 offset, max_offset);
267 offset = max_offset;
269 if(XFERP < min_xferp) {
270 printk(KERN_WARNING "53c700: XFERP %d is less than minium, setting to %d\n",
271 XFERP, min_xferp);
272 XFERP = min_xferp;
274 return (offset & 0x0f) | (XFERP & 0x07)<<4;
277 static inline __u8
278 NCR_700_get_SXFER(struct scsi_device *SDp)
280 struct NCR_700_Host_Parameters *hostdata =
281 (struct NCR_700_Host_Parameters *)SDp->host->hostdata[0];
283 return NCR_700_offset_period_to_sxfer(hostdata,
284 spi_offset(SDp->sdev_target),
285 spi_period(SDp->sdev_target));
288 struct Scsi_Host *
289 NCR_700_detect(struct scsi_host_template *tpnt,
290 struct NCR_700_Host_Parameters *hostdata, struct device *dev)
292 dma_addr_t pScript, pSlots;
293 __u8 *memory;
294 __u32 *script;
295 struct Scsi_Host *host;
296 static int banner = 0;
297 int j;
299 if(tpnt->sdev_attrs == NULL)
300 tpnt->sdev_attrs = NCR_700_dev_attrs;
302 memory = dma_alloc_noncoherent(hostdata->dev, TOTAL_MEM_SIZE,
303 &pScript, GFP_KERNEL);
304 if(memory == NULL) {
305 printk(KERN_ERR "53c700: Failed to allocate memory for driver, detatching\n");
306 return NULL;
309 script = (__u32 *)memory;
310 hostdata->msgin = memory + MSGIN_OFFSET;
311 hostdata->msgout = memory + MSGOUT_OFFSET;
312 hostdata->status = memory + STATUS_OFFSET;
313 /* all of these offsets are L1_CACHE_BYTES separated. It is fatal
314 * if this isn't sufficient separation to avoid dma flushing issues */
315 BUG_ON(!dma_is_consistent(hostdata->dev, pScript) && L1_CACHE_BYTES < dma_get_cache_alignment());
316 hostdata->slots = (struct NCR_700_command_slot *)(memory + SLOTS_OFFSET);
317 hostdata->dev = dev;
319 pSlots = pScript + SLOTS_OFFSET;
321 /* Fill in the missing routines from the host template */
322 tpnt->queuecommand = NCR_700_queuecommand;
323 tpnt->eh_abort_handler = NCR_700_abort;
324 tpnt->eh_bus_reset_handler = NCR_700_bus_reset;
325 tpnt->eh_host_reset_handler = NCR_700_host_reset;
326 tpnt->can_queue = NCR_700_COMMAND_SLOTS_PER_HOST;
327 tpnt->sg_tablesize = NCR_700_SG_SEGMENTS;
328 tpnt->cmd_per_lun = NCR_700_CMD_PER_LUN;
329 tpnt->use_clustering = ENABLE_CLUSTERING;
330 tpnt->slave_configure = NCR_700_slave_configure;
331 tpnt->slave_destroy = NCR_700_slave_destroy;
332 tpnt->slave_alloc = NCR_700_slave_alloc;
333 tpnt->change_queue_depth = NCR_700_change_queue_depth;
334 tpnt->change_queue_type = NCR_700_change_queue_type;
336 if(tpnt->name == NULL)
337 tpnt->name = "53c700";
338 if(tpnt->proc_name == NULL)
339 tpnt->proc_name = "53c700";
341 host = scsi_host_alloc(tpnt, 4);
342 if (!host)
343 return NULL;
344 memset(hostdata->slots, 0, sizeof(struct NCR_700_command_slot)
345 * NCR_700_COMMAND_SLOTS_PER_HOST);
346 for (j = 0; j < NCR_700_COMMAND_SLOTS_PER_HOST; j++) {
347 dma_addr_t offset = (dma_addr_t)((unsigned long)&hostdata->slots[j].SG[0]
348 - (unsigned long)&hostdata->slots[0].SG[0]);
349 hostdata->slots[j].pSG = (struct NCR_700_SG_List *)((unsigned long)(pSlots + offset));
350 if(j == 0)
351 hostdata->free_list = &hostdata->slots[j];
352 else
353 hostdata->slots[j-1].ITL_forw = &hostdata->slots[j];
354 hostdata->slots[j].state = NCR_700_SLOT_FREE;
357 for (j = 0; j < ARRAY_SIZE(SCRIPT); j++)
358 script[j] = bS_to_host(SCRIPT[j]);
360 /* adjust all labels to be bus physical */
361 for (j = 0; j < PATCHES; j++)
362 script[LABELPATCHES[j]] = bS_to_host(pScript + SCRIPT[LABELPATCHES[j]]);
363 /* now patch up fixed addresses. */
364 script_patch_32(hostdata->dev, script, MessageLocation,
365 pScript + MSGOUT_OFFSET);
366 script_patch_32(hostdata->dev, script, StatusAddress,
367 pScript + STATUS_OFFSET);
368 script_patch_32(hostdata->dev, script, ReceiveMsgAddress,
369 pScript + MSGIN_OFFSET);
371 hostdata->script = script;
372 hostdata->pScript = pScript;
373 dma_sync_single_for_device(hostdata->dev, pScript, sizeof(SCRIPT), DMA_TO_DEVICE);
374 hostdata->state = NCR_700_HOST_FREE;
375 hostdata->cmd = NULL;
376 host->max_id = 8;
377 host->max_lun = NCR_700_MAX_LUNS;
378 BUG_ON(NCR_700_transport_template == NULL);
379 host->transportt = NCR_700_transport_template;
380 host->unique_id = (unsigned long)hostdata->base;
381 hostdata->eh_complete = NULL;
382 host->hostdata[0] = (unsigned long)hostdata;
383 /* kick the chip */
384 NCR_700_writeb(0xff, host, CTEST9_REG);
385 if (hostdata->chip710)
386 hostdata->rev = (NCR_700_readb(host, CTEST8_REG)>>4) & 0x0f;
387 else
388 hostdata->rev = (NCR_700_readb(host, CTEST7_REG)>>4) & 0x0f;
389 hostdata->fast = (NCR_700_readb(host, CTEST9_REG) == 0);
390 if (banner == 0) {
391 printk(KERN_NOTICE "53c700: Version " NCR_700_VERSION " By James.Bottomley@HansenPartnership.com\n");
392 banner = 1;
394 printk(KERN_NOTICE "scsi%d: %s rev %d %s\n", host->host_no,
395 hostdata->chip710 ? "53c710" :
396 (hostdata->fast ? "53c700-66" : "53c700"),
397 hostdata->rev, hostdata->differential ?
398 "(Differential)" : "");
399 /* reset the chip */
400 NCR_700_chip_reset(host);
402 if (scsi_add_host(host, dev)) {
403 dev_printk(KERN_ERR, dev, "53c700: scsi_add_host failed\n");
404 scsi_host_put(host);
405 return NULL;
408 spi_signalling(host) = hostdata->differential ? SPI_SIGNAL_HVD :
409 SPI_SIGNAL_SE;
411 return host;
415 NCR_700_release(struct Scsi_Host *host)
417 struct NCR_700_Host_Parameters *hostdata =
418 (struct NCR_700_Host_Parameters *)host->hostdata[0];
420 dma_free_noncoherent(hostdata->dev, TOTAL_MEM_SIZE,
421 hostdata->script, hostdata->pScript);
422 return 1;
425 static inline __u8
426 NCR_700_identify(int can_disconnect, __u8 lun)
428 return IDENTIFY_BASE |
429 ((can_disconnect) ? 0x40 : 0) |
430 (lun & NCR_700_LUN_MASK);
434 * Function : static int data_residual (Scsi_Host *host)
436 * Purpose : return residual data count of what's in the chip. If you
437 * really want to know what this function is doing, it's almost a
438 * direct transcription of the algorithm described in the 53c710
439 * guide, except that the DBC and DFIFO registers are only 6 bits
440 * wide on a 53c700.
442 * Inputs : host - SCSI host */
443 static inline int
444 NCR_700_data_residual (struct Scsi_Host *host) {
445 struct NCR_700_Host_Parameters *hostdata =
446 (struct NCR_700_Host_Parameters *)host->hostdata[0];
447 int count, synchronous = 0;
448 unsigned int ddir;
450 if(hostdata->chip710) {
451 count = ((NCR_700_readb(host, DFIFO_REG) & 0x7f) -
452 (NCR_700_readl(host, DBC_REG) & 0x7f)) & 0x7f;
453 } else {
454 count = ((NCR_700_readb(host, DFIFO_REG) & 0x3f) -
455 (NCR_700_readl(host, DBC_REG) & 0x3f)) & 0x3f;
458 if(hostdata->fast)
459 synchronous = NCR_700_readb(host, SXFER_REG) & 0x0f;
461 /* get the data direction */
462 ddir = NCR_700_readb(host, CTEST0_REG) & 0x01;
464 if (ddir) {
465 /* Receive */
466 if (synchronous)
467 count += (NCR_700_readb(host, SSTAT2_REG) & 0xf0) >> 4;
468 else
469 if (NCR_700_readb(host, SSTAT1_REG) & SIDL_REG_FULL)
470 ++count;
471 } else {
472 /* Send */
473 __u8 sstat = NCR_700_readb(host, SSTAT1_REG);
474 if (sstat & SODL_REG_FULL)
475 ++count;
476 if (synchronous && (sstat & SODR_REG_FULL))
477 ++count;
479 #ifdef NCR_700_DEBUG
480 if(count)
481 printk("RESIDUAL IS %d (ddir %d)\n", count, ddir);
482 #endif
483 return count;
486 /* print out the SCSI wires and corresponding phase from the SBCL register
487 * in the chip */
488 static inline char *
489 sbcl_to_string(__u8 sbcl)
491 int i;
492 static char ret[256];
494 ret[0]='\0';
495 for(i=0; i<8; i++) {
496 if((1<<i) & sbcl)
497 strcat(ret, NCR_700_SBCL_bits[i]);
499 strcat(ret, NCR_700_SBCL_to_phase[sbcl & 0x07]);
500 return ret;
503 static inline __u8
504 bitmap_to_number(__u8 bitmap)
506 __u8 i;
508 for(i=0; i<8 && !(bitmap &(1<<i)); i++)
510 return i;
513 /* Pull a slot off the free list */
514 STATIC struct NCR_700_command_slot *
515 find_empty_slot(struct NCR_700_Host_Parameters *hostdata)
517 struct NCR_700_command_slot *slot = hostdata->free_list;
519 if(slot == NULL) {
520 /* sanity check */
521 if(hostdata->command_slot_count != NCR_700_COMMAND_SLOTS_PER_HOST)
522 printk(KERN_ERR "SLOTS FULL, but count is %d, should be %d\n", hostdata->command_slot_count, NCR_700_COMMAND_SLOTS_PER_HOST);
523 return NULL;
526 if(slot->state != NCR_700_SLOT_FREE)
527 /* should panic! */
528 printk(KERN_ERR "BUSY SLOT ON FREE LIST!!!\n");
531 hostdata->free_list = slot->ITL_forw;
532 slot->ITL_forw = NULL;
535 /* NOTE: set the state to busy here, not queued, since this
536 * indicates the slot is in use and cannot be run by the IRQ
537 * finish routine. If we cannot queue the command when it
538 * is properly build, we then change to NCR_700_SLOT_QUEUED */
539 slot->state = NCR_700_SLOT_BUSY;
540 slot->flags = 0;
541 hostdata->command_slot_count++;
543 return slot;
546 STATIC void
547 free_slot(struct NCR_700_command_slot *slot,
548 struct NCR_700_Host_Parameters *hostdata)
550 if((slot->state & NCR_700_SLOT_MASK) != NCR_700_SLOT_MAGIC) {
551 printk(KERN_ERR "53c700: SLOT %p is not MAGIC!!!\n", slot);
553 if(slot->state == NCR_700_SLOT_FREE) {
554 printk(KERN_ERR "53c700: SLOT %p is FREE!!!\n", slot);
557 slot->resume_offset = 0;
558 slot->cmnd = NULL;
559 slot->state = NCR_700_SLOT_FREE;
560 slot->ITL_forw = hostdata->free_list;
561 hostdata->free_list = slot;
562 hostdata->command_slot_count--;
566 /* This routine really does very little. The command is indexed on
567 the ITL and (if tagged) the ITLQ lists in _queuecommand */
568 STATIC void
569 save_for_reselection(struct NCR_700_Host_Parameters *hostdata,
570 struct scsi_cmnd *SCp, __u32 dsp)
572 /* Its just possible that this gets executed twice */
573 if(SCp != NULL) {
574 struct NCR_700_command_slot *slot =
575 (struct NCR_700_command_slot *)SCp->host_scribble;
577 slot->resume_offset = dsp;
579 hostdata->state = NCR_700_HOST_FREE;
580 hostdata->cmd = NULL;
583 STATIC inline void
584 NCR_700_unmap(struct NCR_700_Host_Parameters *hostdata, struct scsi_cmnd *SCp,
585 struct NCR_700_command_slot *slot)
587 if(SCp->sc_data_direction != DMA_NONE &&
588 SCp->sc_data_direction != DMA_BIDIRECTIONAL) {
589 if(SCp->use_sg) {
590 dma_unmap_sg(hostdata->dev, SCp->request_buffer,
591 SCp->use_sg, SCp->sc_data_direction);
592 } else {
593 dma_unmap_single(hostdata->dev, slot->dma_handle,
594 SCp->request_bufflen,
595 SCp->sc_data_direction);
600 STATIC inline void
601 NCR_700_scsi_done(struct NCR_700_Host_Parameters *hostdata,
602 struct scsi_cmnd *SCp, int result)
604 hostdata->state = NCR_700_HOST_FREE;
605 hostdata->cmd = NULL;
607 if(SCp != NULL) {
608 struct NCR_700_command_slot *slot =
609 (struct NCR_700_command_slot *)SCp->host_scribble;
611 dma_unmap_single(hostdata->dev, slot->pCmd,
612 sizeof(SCp->cmnd), DMA_TO_DEVICE);
613 if (slot->flags == NCR_700_FLAG_AUTOSENSE) {
614 char *cmnd = NCR_700_get_sense_cmnd(SCp->device);
615 #ifdef NCR_700_DEBUG
616 printk(" ORIGINAL CMD %p RETURNED %d, new return is %d sense is\n",
617 SCp, SCp->cmnd[7], result);
618 scsi_print_sense("53c700", SCp);
620 #endif
621 dma_unmap_single(hostdata->dev, slot->dma_handle, sizeof(SCp->sense_buffer), DMA_FROM_DEVICE);
622 /* restore the old result if the request sense was
623 * successful */
624 if (result == 0)
625 result = cmnd[7];
626 /* restore the original length */
627 SCp->cmd_len = cmnd[8];
628 } else
629 NCR_700_unmap(hostdata, SCp, slot);
631 free_slot(slot, hostdata);
632 #ifdef NCR_700_DEBUG
633 if(NCR_700_get_depth(SCp->device) == 0 ||
634 NCR_700_get_depth(SCp->device) > SCp->device->queue_depth)
635 printk(KERN_ERR "Invalid depth in NCR_700_scsi_done(): %d\n",
636 NCR_700_get_depth(SCp->device));
637 #endif /* NCR_700_DEBUG */
638 NCR_700_set_depth(SCp->device, NCR_700_get_depth(SCp->device) - 1);
640 SCp->host_scribble = NULL;
641 SCp->result = result;
642 SCp->scsi_done(SCp);
643 } else {
644 printk(KERN_ERR "53c700: SCSI DONE HAS NULL SCp\n");
649 STATIC void
650 NCR_700_internal_bus_reset(struct Scsi_Host *host)
652 /* Bus reset */
653 NCR_700_writeb(ASSERT_RST, host, SCNTL1_REG);
654 udelay(50);
655 NCR_700_writeb(0, host, SCNTL1_REG);
659 STATIC void
660 NCR_700_chip_setup(struct Scsi_Host *host)
662 struct NCR_700_Host_Parameters *hostdata =
663 (struct NCR_700_Host_Parameters *)host->hostdata[0];
664 __u32 dcntl_extra = 0;
665 __u8 min_period;
666 __u8 min_xferp = (hostdata->chip710 ? NCR_710_MIN_XFERP : NCR_700_MIN_XFERP);
668 if(hostdata->chip710) {
669 __u8 burst_disable = 0;
670 __u8 burst_length = 0;
672 switch (hostdata->burst_length) {
673 case 1:
674 burst_length = BURST_LENGTH_1;
675 break;
676 case 2:
677 burst_length = BURST_LENGTH_2;
678 break;
679 case 4:
680 burst_length = BURST_LENGTH_4;
681 break;
682 case 8:
683 burst_length = BURST_LENGTH_8;
684 break;
685 default:
686 burst_disable = BURST_DISABLE;
687 break;
689 dcntl_extra = COMPAT_700_MODE;
691 NCR_700_writeb(dcntl_extra, host, DCNTL_REG);
692 NCR_700_writeb(burst_length | hostdata->dmode_extra,
693 host, DMODE_710_REG);
694 NCR_700_writeb(burst_disable | (hostdata->differential ?
695 DIFF : 0), host, CTEST7_REG);
696 NCR_700_writeb(BTB_TIMER_DISABLE, host, CTEST0_REG);
697 NCR_700_writeb(FULL_ARBITRATION | ENABLE_PARITY | PARITY
698 | AUTO_ATN, host, SCNTL0_REG);
699 } else {
700 NCR_700_writeb(BURST_LENGTH_8 | hostdata->dmode_extra,
701 host, DMODE_700_REG);
702 NCR_700_writeb(hostdata->differential ?
703 DIFF : 0, host, CTEST7_REG);
704 if(hostdata->fast) {
705 /* this is for 700-66, does nothing on 700 */
706 NCR_700_writeb(LAST_DIS_ENBL | ENABLE_ACTIVE_NEGATION
707 | GENERATE_RECEIVE_PARITY, host,
708 CTEST8_REG);
709 } else {
710 NCR_700_writeb(FULL_ARBITRATION | ENABLE_PARITY
711 | PARITY | AUTO_ATN, host, SCNTL0_REG);
715 NCR_700_writeb(1 << host->this_id, host, SCID_REG);
716 NCR_700_writeb(0, host, SBCL_REG);
717 NCR_700_writeb(ASYNC_OPERATION, host, SXFER_REG);
719 NCR_700_writeb(PHASE_MM_INT | SEL_TIMEOUT_INT | GROSS_ERR_INT | UX_DISC_INT
720 | RST_INT | PAR_ERR_INT | SELECT_INT, host, SIEN_REG);
722 NCR_700_writeb(ABORT_INT | INT_INST_INT | ILGL_INST_INT, host, DIEN_REG);
723 NCR_700_writeb(ENABLE_SELECT, host, SCNTL1_REG);
724 if(hostdata->clock > 75) {
725 printk(KERN_ERR "53c700: Clock speed %dMHz is too high: 75Mhz is the maximum this chip can be driven at\n", hostdata->clock);
726 /* do the best we can, but the async clock will be out
727 * of spec: sync divider 2, async divider 3 */
728 DEBUG(("53c700: sync 2 async 3\n"));
729 NCR_700_writeb(SYNC_DIV_2_0, host, SBCL_REG);
730 NCR_700_writeb(ASYNC_DIV_3_0 | dcntl_extra, host, DCNTL_REG);
731 hostdata->sync_clock = hostdata->clock/2;
732 } else if(hostdata->clock > 50 && hostdata->clock <= 75) {
733 /* sync divider 1.5, async divider 3 */
734 DEBUG(("53c700: sync 1.5 async 3\n"));
735 NCR_700_writeb(SYNC_DIV_1_5, host, SBCL_REG);
736 NCR_700_writeb(ASYNC_DIV_3_0 | dcntl_extra, host, DCNTL_REG);
737 hostdata->sync_clock = hostdata->clock*2;
738 hostdata->sync_clock /= 3;
740 } else if(hostdata->clock > 37 && hostdata->clock <= 50) {
741 /* sync divider 1, async divider 2 */
742 DEBUG(("53c700: sync 1 async 2\n"));
743 NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG);
744 NCR_700_writeb(ASYNC_DIV_2_0 | dcntl_extra, host, DCNTL_REG);
745 hostdata->sync_clock = hostdata->clock;
746 } else if(hostdata->clock > 25 && hostdata->clock <=37) {
747 /* sync divider 1, async divider 1.5 */
748 DEBUG(("53c700: sync 1 async 1.5\n"));
749 NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG);
750 NCR_700_writeb(ASYNC_DIV_1_5 | dcntl_extra, host, DCNTL_REG);
751 hostdata->sync_clock = hostdata->clock;
752 } else {
753 DEBUG(("53c700: sync 1 async 1\n"));
754 NCR_700_writeb(SYNC_DIV_1_0, host, SBCL_REG);
755 NCR_700_writeb(ASYNC_DIV_1_0 | dcntl_extra, host, DCNTL_REG);
756 /* sync divider 1, async divider 1 */
757 hostdata->sync_clock = hostdata->clock;
759 /* Calculate the actual minimum period that can be supported
760 * by our synchronous clock speed. See the 710 manual for
761 * exact details of this calculation which is based on a
762 * setting of the SXFER register */
763 min_period = 1000*(4+min_xferp)/(4*hostdata->sync_clock);
764 hostdata->min_period = NCR_700_MIN_PERIOD;
765 if(min_period > NCR_700_MIN_PERIOD)
766 hostdata->min_period = min_period;
769 STATIC void
770 NCR_700_chip_reset(struct Scsi_Host *host)
772 struct NCR_700_Host_Parameters *hostdata =
773 (struct NCR_700_Host_Parameters *)host->hostdata[0];
774 if(hostdata->chip710) {
775 NCR_700_writeb(SOFTWARE_RESET_710, host, ISTAT_REG);
776 udelay(100);
778 NCR_700_writeb(0, host, ISTAT_REG);
779 } else {
780 NCR_700_writeb(SOFTWARE_RESET, host, DCNTL_REG);
781 udelay(100);
783 NCR_700_writeb(0, host, DCNTL_REG);
786 mdelay(1000);
788 NCR_700_chip_setup(host);
791 /* The heart of the message processing engine is that the instruction
792 * immediately after the INT is the normal case (and so must be CLEAR
793 * ACK). If we want to do something else, we call that routine in
794 * scripts and set temp to be the normal case + 8 (skipping the CLEAR
795 * ACK) so that the routine returns correctly to resume its activity
796 * */
797 STATIC __u32
798 process_extended_message(struct Scsi_Host *host,
799 struct NCR_700_Host_Parameters *hostdata,
800 struct scsi_cmnd *SCp, __u32 dsp, __u32 dsps)
802 __u32 resume_offset = dsp, temp = dsp + 8;
803 __u8 pun = 0xff, lun = 0xff;
805 if(SCp != NULL) {
806 pun = SCp->device->id;
807 lun = SCp->device->lun;
810 switch(hostdata->msgin[2]) {
811 case A_SDTR_MSG:
812 if(SCp != NULL && NCR_700_is_flag_set(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION)) {
813 struct scsi_target *starget = SCp->device->sdev_target;
814 __u8 period = hostdata->msgin[3];
815 __u8 offset = hostdata->msgin[4];
817 if(offset == 0 || period == 0) {
818 offset = 0;
819 period = 0;
822 spi_offset(starget) = offset;
823 spi_period(starget) = period;
825 if(NCR_700_is_flag_set(SCp->device, NCR_700_DEV_PRINT_SYNC_NEGOTIATION)) {
826 spi_display_xfer_agreement(starget);
827 NCR_700_clear_flag(SCp->device, NCR_700_DEV_PRINT_SYNC_NEGOTIATION);
830 NCR_700_set_flag(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC);
831 NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
833 NCR_700_writeb(NCR_700_get_SXFER(SCp->device),
834 host, SXFER_REG);
836 } else {
837 /* SDTR message out of the blue, reject it */
838 shost_printk(KERN_WARNING, host,
839 "Unexpected SDTR msg\n");
840 hostdata->msgout[0] = A_REJECT_MSG;
841 dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
842 script_patch_16(hostdata->dev, hostdata->script,
843 MessageCount, 1);
844 /* SendMsgOut returns, so set up the return
845 * address */
846 resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
848 break;
850 case A_WDTR_MSG:
851 printk(KERN_INFO "scsi%d: (%d:%d), Unsolicited WDTR after CMD, Rejecting\n",
852 host->host_no, pun, lun);
853 hostdata->msgout[0] = A_REJECT_MSG;
854 dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
855 script_patch_16(hostdata->dev, hostdata->script, MessageCount,
857 resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
859 break;
861 default:
862 printk(KERN_INFO "scsi%d (%d:%d): Unexpected message %s: ",
863 host->host_no, pun, lun,
864 NCR_700_phase[(dsps & 0xf00) >> 8]);
865 spi_print_msg(hostdata->msgin);
866 printk("\n");
867 /* just reject it */
868 hostdata->msgout[0] = A_REJECT_MSG;
869 dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
870 script_patch_16(hostdata->dev, hostdata->script, MessageCount,
872 /* SendMsgOut returns, so set up the return
873 * address */
874 resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
876 NCR_700_writel(temp, host, TEMP_REG);
877 return resume_offset;
880 STATIC __u32
881 process_message(struct Scsi_Host *host, struct NCR_700_Host_Parameters *hostdata,
882 struct scsi_cmnd *SCp, __u32 dsp, __u32 dsps)
884 /* work out where to return to */
885 __u32 temp = dsp + 8, resume_offset = dsp;
886 __u8 pun = 0xff, lun = 0xff;
888 if(SCp != NULL) {
889 pun = SCp->device->id;
890 lun = SCp->device->lun;
893 #ifdef NCR_700_DEBUG
894 printk("scsi%d (%d:%d): message %s: ", host->host_no, pun, lun,
895 NCR_700_phase[(dsps & 0xf00) >> 8]);
896 spi_print_msg(hostdata->msgin);
897 printk("\n");
898 #endif
900 switch(hostdata->msgin[0]) {
902 case A_EXTENDED_MSG:
903 resume_offset = process_extended_message(host, hostdata, SCp,
904 dsp, dsps);
905 break;
907 case A_REJECT_MSG:
908 if(SCp != NULL && NCR_700_is_flag_set(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION)) {
909 /* Rejected our sync negotiation attempt */
910 spi_period(SCp->device->sdev_target) =
911 spi_offset(SCp->device->sdev_target) = 0;
912 NCR_700_set_flag(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC);
913 NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
914 } else if(SCp != NULL && NCR_700_get_tag_neg_state(SCp->device) == NCR_700_DURING_TAG_NEGOTIATION) {
915 /* rejected our first simple tag message */
916 scmd_printk(KERN_WARNING, SCp,
917 "Rejected first tag queue attempt, turning off tag queueing\n");
918 /* we're done negotiating */
919 NCR_700_set_tag_neg_state(SCp->device, NCR_700_FINISHED_TAG_NEGOTIATION);
920 hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
921 SCp->device->tagged_supported = 0;
922 scsi_deactivate_tcq(SCp->device, host->cmd_per_lun);
923 } else {
924 shost_printk(KERN_WARNING, host,
925 "(%d:%d) Unexpected REJECT Message %s\n",
926 pun, lun,
927 NCR_700_phase[(dsps & 0xf00) >> 8]);
928 /* however, just ignore it */
930 break;
932 case A_PARITY_ERROR_MSG:
933 printk(KERN_ERR "scsi%d (%d:%d) Parity Error!\n", host->host_no,
934 pun, lun);
935 NCR_700_internal_bus_reset(host);
936 break;
937 case A_SIMPLE_TAG_MSG:
938 printk(KERN_INFO "scsi%d (%d:%d) SIMPLE TAG %d %s\n", host->host_no,
939 pun, lun, hostdata->msgin[1],
940 NCR_700_phase[(dsps & 0xf00) >> 8]);
941 /* just ignore it */
942 break;
943 default:
944 printk(KERN_INFO "scsi%d (%d:%d): Unexpected message %s: ",
945 host->host_no, pun, lun,
946 NCR_700_phase[(dsps & 0xf00) >> 8]);
948 spi_print_msg(hostdata->msgin);
949 printk("\n");
950 /* just reject it */
951 hostdata->msgout[0] = A_REJECT_MSG;
952 dma_cache_sync(hostdata->dev, hostdata->msgout, 1, DMA_TO_DEVICE);
953 script_patch_16(hostdata->dev, hostdata->script, MessageCount,
955 /* SendMsgOut returns, so set up the return
956 * address */
957 resume_offset = hostdata->pScript + Ent_SendMessageWithATN;
959 break;
961 NCR_700_writel(temp, host, TEMP_REG);
962 /* set us up to receive another message */
963 dma_cache_sync(hostdata->dev, hostdata->msgin, MSG_ARRAY_SIZE, DMA_FROM_DEVICE);
964 return resume_offset;
967 STATIC __u32
968 process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
969 struct Scsi_Host *host,
970 struct NCR_700_Host_Parameters *hostdata)
972 __u32 resume_offset = 0;
973 __u8 pun = 0xff, lun=0xff;
975 if(SCp != NULL) {
976 pun = SCp->device->id;
977 lun = SCp->device->lun;
980 if(dsps == A_GOOD_STATUS_AFTER_STATUS) {
981 DEBUG((" COMMAND COMPLETE, status=%02x\n",
982 hostdata->status[0]));
983 /* OK, if TCQ still under negotiation, we now know it works */
984 if (NCR_700_get_tag_neg_state(SCp->device) == NCR_700_DURING_TAG_NEGOTIATION)
985 NCR_700_set_tag_neg_state(SCp->device,
986 NCR_700_FINISHED_TAG_NEGOTIATION);
988 /* check for contingent allegiance contitions */
989 if(status_byte(hostdata->status[0]) == CHECK_CONDITION ||
990 status_byte(hostdata->status[0]) == COMMAND_TERMINATED) {
991 struct NCR_700_command_slot *slot =
992 (struct NCR_700_command_slot *)SCp->host_scribble;
993 if(slot->flags == NCR_700_FLAG_AUTOSENSE) {
994 /* OOPS: bad device, returning another
995 * contingent allegiance condition */
996 scmd_printk(KERN_ERR, SCp,
997 "broken device is looping in contingent allegiance: ignoring\n");
998 NCR_700_scsi_done(hostdata, SCp, hostdata->status[0]);
999 } else {
1000 char *cmnd =
1001 NCR_700_get_sense_cmnd(SCp->device);
1002 #ifdef NCR_DEBUG
1003 scsi_print_command(SCp);
1004 printk(" cmd %p has status %d, requesting sense\n",
1005 SCp, hostdata->status[0]);
1006 #endif
1007 /* we can destroy the command here
1008 * because the contingent allegiance
1009 * condition will cause a retry which
1010 * will re-copy the command from the
1011 * saved data_cmnd. We also unmap any
1012 * data associated with the command
1013 * here */
1014 NCR_700_unmap(hostdata, SCp, slot);
1015 dma_unmap_single(hostdata->dev, slot->pCmd,
1016 sizeof(SCp->cmnd),
1017 DMA_TO_DEVICE);
1019 cmnd[0] = REQUEST_SENSE;
1020 cmnd[1] = (SCp->device->lun & 0x7) << 5;
1021 cmnd[2] = 0;
1022 cmnd[3] = 0;
1023 cmnd[4] = sizeof(SCp->sense_buffer);
1024 cmnd[5] = 0;
1025 /* Here's a quiet hack: the
1026 * REQUEST_SENSE command is six bytes,
1027 * so store a flag indicating that
1028 * this was an internal sense request
1029 * and the original status at the end
1030 * of the command */
1031 cmnd[6] = NCR_700_INTERNAL_SENSE_MAGIC;
1032 cmnd[7] = hostdata->status[0];
1033 cmnd[8] = SCp->cmd_len;
1034 SCp->cmd_len = 6; /* command length for
1035 * REQUEST_SENSE */
1036 slot->pCmd = dma_map_single(hostdata->dev, cmnd, MAX_COMMAND_SIZE, DMA_TO_DEVICE);
1037 slot->dma_handle = dma_map_single(hostdata->dev, SCp->sense_buffer, sizeof(SCp->sense_buffer), DMA_FROM_DEVICE);
1038 slot->SG[0].ins = bS_to_host(SCRIPT_MOVE_DATA_IN | sizeof(SCp->sense_buffer));
1039 slot->SG[0].pAddr = bS_to_host(slot->dma_handle);
1040 slot->SG[1].ins = bS_to_host(SCRIPT_RETURN);
1041 slot->SG[1].pAddr = 0;
1042 slot->resume_offset = hostdata->pScript;
1043 dma_cache_sync(hostdata->dev, slot->SG, sizeof(slot->SG[0])*2, DMA_TO_DEVICE);
1044 dma_cache_sync(hostdata->dev, SCp->sense_buffer, sizeof(SCp->sense_buffer), DMA_FROM_DEVICE);
1046 /* queue the command for reissue */
1047 slot->state = NCR_700_SLOT_QUEUED;
1048 slot->flags = NCR_700_FLAG_AUTOSENSE;
1049 hostdata->state = NCR_700_HOST_FREE;
1050 hostdata->cmd = NULL;
1052 } else {
1053 // Currently rely on the mid layer evaluation
1054 // of the tag queuing capability
1056 //if(status_byte(hostdata->status[0]) == GOOD &&
1057 // SCp->cmnd[0] == INQUIRY && SCp->use_sg == 0) {
1058 // /* Piggy back the tag queueing support
1059 // * on this command */
1060 // dma_sync_single_for_cpu(hostdata->dev,
1061 // slot->dma_handle,
1062 // SCp->request_bufflen,
1063 // DMA_FROM_DEVICE);
1064 // if(((char *)SCp->request_buffer)[7] & 0x02) {
1065 // scmd_printk(KERN_INFO, SCp,
1066 // "Enabling Tag Command Queuing\n");
1067 // hostdata->tag_negotiated |= (1<<scmd_id(SCp));
1068 // NCR_700_set_flag(SCp->device, NCR_700_DEV_BEGIN_TAG_QUEUEING);
1069 // } else {
1070 // NCR_700_clear_flag(SCp->device, NCR_700_DEV_BEGIN_TAG_QUEUEING);
1071 // hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
1072 // }
1074 NCR_700_scsi_done(hostdata, SCp, hostdata->status[0]);
1076 } else if((dsps & 0xfffff0f0) == A_UNEXPECTED_PHASE) {
1077 __u8 i = (dsps & 0xf00) >> 8;
1079 scmd_printk(KERN_ERR, SCp, "UNEXPECTED PHASE %s (%s)\n",
1080 NCR_700_phase[i],
1081 sbcl_to_string(NCR_700_readb(host, SBCL_REG)));
1082 scmd_printk(KERN_ERR, SCp, " len = %d, cmd =",
1083 SCp->cmd_len);
1084 scsi_print_command(SCp);
1086 NCR_700_internal_bus_reset(host);
1087 } else if((dsps & 0xfffff000) == A_FATAL) {
1088 int i = (dsps & 0xfff);
1090 printk(KERN_ERR "scsi%d: (%d:%d) FATAL ERROR: %s\n",
1091 host->host_no, pun, lun, NCR_700_fatal_messages[i]);
1092 if(dsps == A_FATAL_ILLEGAL_MSG_LENGTH) {
1093 printk(KERN_ERR " msg begins %02x %02x\n",
1094 hostdata->msgin[0], hostdata->msgin[1]);
1096 NCR_700_internal_bus_reset(host);
1097 } else if((dsps & 0xfffff0f0) == A_DISCONNECT) {
1098 #ifdef NCR_700_DEBUG
1099 __u8 i = (dsps & 0xf00) >> 8;
1101 printk("scsi%d: (%d:%d), DISCONNECTED (%d) %s\n",
1102 host->host_no, pun, lun,
1103 i, NCR_700_phase[i]);
1104 #endif
1105 save_for_reselection(hostdata, SCp, dsp);
1107 } else if(dsps == A_RESELECTION_IDENTIFIED) {
1108 __u8 lun;
1109 struct NCR_700_command_slot *slot;
1110 __u8 reselection_id = hostdata->reselection_id;
1111 struct scsi_device *SDp;
1113 lun = hostdata->msgin[0] & 0x1f;
1115 hostdata->reselection_id = 0xff;
1116 DEBUG(("scsi%d: (%d:%d) RESELECTED!\n",
1117 host->host_no, reselection_id, lun));
1118 /* clear the reselection indicator */
1119 SDp = __scsi_device_lookup(host, 0, reselection_id, lun);
1120 if(unlikely(SDp == NULL)) {
1121 printk(KERN_ERR "scsi%d: (%d:%d) HAS NO device\n",
1122 host->host_no, reselection_id, lun);
1123 BUG();
1125 if(hostdata->msgin[1] == A_SIMPLE_TAG_MSG) {
1126 struct scsi_cmnd *SCp = scsi_find_tag(SDp, hostdata->msgin[2]);
1127 if(unlikely(SCp == NULL)) {
1128 printk(KERN_ERR "scsi%d: (%d:%d) no saved request for tag %d\n",
1129 host->host_no, reselection_id, lun, hostdata->msgin[2]);
1130 BUG();
1133 slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1134 DDEBUG(KERN_DEBUG, SDp,
1135 "reselection is tag %d, slot %p(%d)\n",
1136 hostdata->msgin[2], slot, slot->tag);
1137 } else {
1138 struct scsi_cmnd *SCp = scsi_find_tag(SDp, SCSI_NO_TAG);
1139 if(unlikely(SCp == NULL)) {
1140 sdev_printk(KERN_ERR, SDp,
1141 "no saved request for untagged cmd\n");
1142 BUG();
1144 slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1147 if(slot == NULL) {
1148 printk(KERN_ERR "scsi%d: (%d:%d) RESELECTED but no saved command (MSG = %02x %02x %02x)!!\n",
1149 host->host_no, reselection_id, lun,
1150 hostdata->msgin[0], hostdata->msgin[1],
1151 hostdata->msgin[2]);
1152 } else {
1153 if(hostdata->state != NCR_700_HOST_BUSY)
1154 printk(KERN_ERR "scsi%d: FATAL, host not busy during valid reselection!\n",
1155 host->host_no);
1156 resume_offset = slot->resume_offset;
1157 hostdata->cmd = slot->cmnd;
1159 /* re-patch for this command */
1160 script_patch_32_abs(hostdata->dev, hostdata->script,
1161 CommandAddress, slot->pCmd);
1162 script_patch_16(hostdata->dev, hostdata->script,
1163 CommandCount, slot->cmnd->cmd_len);
1164 script_patch_32_abs(hostdata->dev, hostdata->script,
1165 SGScriptStartAddress,
1166 to32bit(&slot->pSG[0].ins));
1168 /* Note: setting SXFER only works if we're
1169 * still in the MESSAGE phase, so it is vital
1170 * that ACK is still asserted when we process
1171 * the reselection message. The resume offset
1172 * should therefore always clear ACK */
1173 NCR_700_writeb(NCR_700_get_SXFER(hostdata->cmd->device),
1174 host, SXFER_REG);
1175 dma_cache_sync(hostdata->dev, hostdata->msgin,
1176 MSG_ARRAY_SIZE, DMA_FROM_DEVICE);
1177 dma_cache_sync(hostdata->dev, hostdata->msgout,
1178 MSG_ARRAY_SIZE, DMA_TO_DEVICE);
1179 /* I'm just being paranoid here, the command should
1180 * already have been flushed from the cache */
1181 dma_cache_sync(hostdata->dev, slot->cmnd->cmnd,
1182 slot->cmnd->cmd_len, DMA_TO_DEVICE);
1187 } else if(dsps == A_RESELECTED_DURING_SELECTION) {
1189 /* This section is full of debugging code because I've
1190 * never managed to reach it. I think what happens is
1191 * that, because the 700 runs with selection
1192 * interrupts enabled the whole time that we take a
1193 * selection interrupt before we manage to get to the
1194 * reselected script interrupt */
1196 __u8 reselection_id = NCR_700_readb(host, SFBR_REG);
1197 struct NCR_700_command_slot *slot;
1199 /* Take out our own ID */
1200 reselection_id &= ~(1<<host->this_id);
1202 /* I've never seen this happen, so keep this as a printk rather
1203 * than a debug */
1204 printk(KERN_INFO "scsi%d: (%d:%d) RESELECTION DURING SELECTION, dsp=%08x[%04x] state=%d, count=%d\n",
1205 host->host_no, reselection_id, lun, dsp, dsp - hostdata->pScript, hostdata->state, hostdata->command_slot_count);
1208 /* FIXME: DEBUGGING CODE */
1209 __u32 SG = (__u32)bS_to_cpu(hostdata->script[A_SGScriptStartAddress_used[0]]);
1210 int i;
1212 for(i=0; i< NCR_700_COMMAND_SLOTS_PER_HOST; i++) {
1213 if(SG >= to32bit(&hostdata->slots[i].pSG[0])
1214 && SG <= to32bit(&hostdata->slots[i].pSG[NCR_700_SG_SEGMENTS]))
1215 break;
1217 printk(KERN_INFO "IDENTIFIED SG segment as being %08x in slot %p, cmd %p, slot->resume_offset=%08x\n", SG, &hostdata->slots[i], hostdata->slots[i].cmnd, hostdata->slots[i].resume_offset);
1218 SCp = hostdata->slots[i].cmnd;
1221 if(SCp != NULL) {
1222 slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1223 /* change slot from busy to queued to redo command */
1224 slot->state = NCR_700_SLOT_QUEUED;
1226 hostdata->cmd = NULL;
1228 if(reselection_id == 0) {
1229 if(hostdata->reselection_id == 0xff) {
1230 printk(KERN_ERR "scsi%d: Invalid reselection during selection!!\n", host->host_no);
1231 return 0;
1232 } else {
1233 printk(KERN_ERR "scsi%d: script reselected and we took a selection interrupt\n",
1234 host->host_no);
1235 reselection_id = hostdata->reselection_id;
1237 } else {
1239 /* convert to real ID */
1240 reselection_id = bitmap_to_number(reselection_id);
1242 hostdata->reselection_id = reselection_id;
1243 /* just in case we have a stale simple tag message, clear it */
1244 hostdata->msgin[1] = 0;
1245 dma_cache_sync(hostdata->dev, hostdata->msgin,
1246 MSG_ARRAY_SIZE, DMA_BIDIRECTIONAL);
1247 if(hostdata->tag_negotiated & (1<<reselection_id)) {
1248 resume_offset = hostdata->pScript + Ent_GetReselectionWithTag;
1249 } else {
1250 resume_offset = hostdata->pScript + Ent_GetReselectionData;
1252 } else if(dsps == A_COMPLETED_SELECTION_AS_TARGET) {
1253 /* we've just disconnected from the bus, do nothing since
1254 * a return here will re-run the queued command slot
1255 * that may have been interrupted by the initial selection */
1256 DEBUG((" SELECTION COMPLETED\n"));
1257 } else if((dsps & 0xfffff0f0) == A_MSG_IN) {
1258 resume_offset = process_message(host, hostdata, SCp,
1259 dsp, dsps);
1260 } else if((dsps & 0xfffff000) == 0) {
1261 __u8 i = (dsps & 0xf0) >> 4, j = (dsps & 0xf00) >> 8;
1262 printk(KERN_ERR "scsi%d: (%d:%d), unhandled script condition %s %s at %04x\n",
1263 host->host_no, pun, lun, NCR_700_condition[i],
1264 NCR_700_phase[j], dsp - hostdata->pScript);
1265 if(SCp != NULL) {
1266 scsi_print_command(SCp);
1268 if(SCp->use_sg) {
1269 for(i = 0; i < SCp->use_sg + 1; i++) {
1270 printk(KERN_INFO " SG[%d].length = %d, move_insn=%08x, addr %08x\n", i, ((struct scatterlist *)SCp->request_buffer)[i].length, ((struct NCR_700_command_slot *)SCp->host_scribble)->SG[i].ins, ((struct NCR_700_command_slot *)SCp->host_scribble)->SG[i].pAddr);
1274 NCR_700_internal_bus_reset(host);
1275 } else if((dsps & 0xfffff000) == A_DEBUG_INTERRUPT) {
1276 printk(KERN_NOTICE "scsi%d (%d:%d) DEBUG INTERRUPT %d AT %08x[%04x], continuing\n",
1277 host->host_no, pun, lun, dsps & 0xfff, dsp, dsp - hostdata->pScript);
1278 resume_offset = dsp;
1279 } else {
1280 printk(KERN_ERR "scsi%d: (%d:%d), unidentified script interrupt 0x%x at %04x\n",
1281 host->host_no, pun, lun, dsps, dsp - hostdata->pScript);
1282 NCR_700_internal_bus_reset(host);
1284 return resume_offset;
1287 /* We run the 53c700 with selection interrupts always enabled. This
1288 * means that the chip may be selected as soon as the bus frees. On a
1289 * busy bus, this can be before the scripts engine finishes its
1290 * processing. Therefore, part of the selection processing has to be
1291 * to find out what the scripts engine is doing and complete the
1292 * function if necessary (i.e. process the pending disconnect or save
1293 * the interrupted initial selection */
1294 STATIC inline __u32
1295 process_selection(struct Scsi_Host *host, __u32 dsp)
1297 __u8 id = 0; /* Squash compiler warning */
1298 int count = 0;
1299 __u32 resume_offset = 0;
1300 struct NCR_700_Host_Parameters *hostdata =
1301 (struct NCR_700_Host_Parameters *)host->hostdata[0];
1302 struct scsi_cmnd *SCp = hostdata->cmd;
1303 __u8 sbcl;
1305 for(count = 0; count < 5; count++) {
1306 id = NCR_700_readb(host, hostdata->chip710 ?
1307 CTEST9_REG : SFBR_REG);
1309 /* Take out our own ID */
1310 id &= ~(1<<host->this_id);
1311 if(id != 0)
1312 break;
1313 udelay(5);
1315 sbcl = NCR_700_readb(host, SBCL_REG);
1316 if((sbcl & SBCL_IO) == 0) {
1317 /* mark as having been selected rather than reselected */
1318 id = 0xff;
1319 } else {
1320 /* convert to real ID */
1321 hostdata->reselection_id = id = bitmap_to_number(id);
1322 DEBUG(("scsi%d: Reselected by %d\n",
1323 host->host_no, id));
1325 if(hostdata->state == NCR_700_HOST_BUSY && SCp != NULL) {
1326 struct NCR_700_command_slot *slot =
1327 (struct NCR_700_command_slot *)SCp->host_scribble;
1328 DEBUG((" ID %d WARNING: RESELECTION OF BUSY HOST, saving cmd %p, slot %p, addr %x [%04x], resume %x!\n", id, hostdata->cmd, slot, dsp, dsp - hostdata->pScript, resume_offset));
1330 switch(dsp - hostdata->pScript) {
1331 case Ent_Disconnect1:
1332 case Ent_Disconnect2:
1333 save_for_reselection(hostdata, SCp, Ent_Disconnect2 + hostdata->pScript);
1334 break;
1335 case Ent_Disconnect3:
1336 case Ent_Disconnect4:
1337 save_for_reselection(hostdata, SCp, Ent_Disconnect4 + hostdata->pScript);
1338 break;
1339 case Ent_Disconnect5:
1340 case Ent_Disconnect6:
1341 save_for_reselection(hostdata, SCp, Ent_Disconnect6 + hostdata->pScript);
1342 break;
1343 case Ent_Disconnect7:
1344 case Ent_Disconnect8:
1345 save_for_reselection(hostdata, SCp, Ent_Disconnect8 + hostdata->pScript);
1346 break;
1347 case Ent_Finish1:
1348 case Ent_Finish2:
1349 process_script_interrupt(A_GOOD_STATUS_AFTER_STATUS, dsp, SCp, host, hostdata);
1350 break;
1352 default:
1353 slot->state = NCR_700_SLOT_QUEUED;
1354 break;
1357 hostdata->state = NCR_700_HOST_BUSY;
1358 hostdata->cmd = NULL;
1359 /* clear any stale simple tag message */
1360 hostdata->msgin[1] = 0;
1361 dma_cache_sync(hostdata->dev, hostdata->msgin, MSG_ARRAY_SIZE,
1362 DMA_BIDIRECTIONAL);
1364 if(id == 0xff) {
1365 /* Selected as target, Ignore */
1366 resume_offset = hostdata->pScript + Ent_SelectedAsTarget;
1367 } else if(hostdata->tag_negotiated & (1<<id)) {
1368 resume_offset = hostdata->pScript + Ent_GetReselectionWithTag;
1369 } else {
1370 resume_offset = hostdata->pScript + Ent_GetReselectionData;
1372 return resume_offset;
1375 static inline void
1376 NCR_700_clear_fifo(struct Scsi_Host *host) {
1377 const struct NCR_700_Host_Parameters *hostdata
1378 = (struct NCR_700_Host_Parameters *)host->hostdata[0];
1379 if(hostdata->chip710) {
1380 NCR_700_writeb(CLR_FIFO_710, host, CTEST8_REG);
1381 } else {
1382 NCR_700_writeb(CLR_FIFO, host, DFIFO_REG);
1386 static inline void
1387 NCR_700_flush_fifo(struct Scsi_Host *host) {
1388 const struct NCR_700_Host_Parameters *hostdata
1389 = (struct NCR_700_Host_Parameters *)host->hostdata[0];
1390 if(hostdata->chip710) {
1391 NCR_700_writeb(FLUSH_DMA_FIFO_710, host, CTEST8_REG);
1392 udelay(10);
1393 NCR_700_writeb(0, host, CTEST8_REG);
1394 } else {
1395 NCR_700_writeb(FLUSH_DMA_FIFO, host, DFIFO_REG);
1396 udelay(10);
1397 NCR_700_writeb(0, host, DFIFO_REG);
1402 /* The queue lock with interrupts disabled must be held on entry to
1403 * this function */
1404 STATIC int
1405 NCR_700_start_command(struct scsi_cmnd *SCp)
1407 struct NCR_700_command_slot *slot =
1408 (struct NCR_700_command_slot *)SCp->host_scribble;
1409 struct NCR_700_Host_Parameters *hostdata =
1410 (struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
1411 __u16 count = 1; /* for IDENTIFY message */
1413 if(hostdata->state != NCR_700_HOST_FREE) {
1414 /* keep this inside the lock to close the race window where
1415 * the running command finishes on another CPU while we don't
1416 * change the state to queued on this one */
1417 slot->state = NCR_700_SLOT_QUEUED;
1419 DEBUG(("scsi%d: host busy, queueing command %p, slot %p\n",
1420 SCp->device->host->host_no, slot->cmnd, slot));
1421 return 0;
1423 hostdata->state = NCR_700_HOST_BUSY;
1424 hostdata->cmd = SCp;
1425 slot->state = NCR_700_SLOT_BUSY;
1426 /* keep interrupts disabled until we have the command correctly
1427 * set up so we cannot take a selection interrupt */
1429 hostdata->msgout[0] = NCR_700_identify((SCp->cmnd[0] != REQUEST_SENSE &&
1430 slot->flags != NCR_700_FLAG_AUTOSENSE),
1431 SCp->device->lun);
1432 /* for INQUIRY or REQUEST_SENSE commands, we cannot be sure
1433 * if the negotiated transfer parameters still hold, so
1434 * always renegotiate them */
1435 if(SCp->cmnd[0] == INQUIRY || SCp->cmnd[0] == REQUEST_SENSE ||
1436 slot->flags == NCR_700_FLAG_AUTOSENSE) {
1437 NCR_700_clear_flag(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC);
1440 /* REQUEST_SENSE is asking for contingent I_T_L(_Q) status.
1441 * If a contingent allegiance condition exists, the device
1442 * will refuse all tags, so send the request sense as untagged
1443 * */
1444 if((hostdata->tag_negotiated & (1<<scmd_id(SCp)))
1445 && (slot->tag != SCSI_NO_TAG && SCp->cmnd[0] != REQUEST_SENSE &&
1446 slot->flags != NCR_700_FLAG_AUTOSENSE)) {
1447 count += scsi_populate_tag_msg(SCp, &hostdata->msgout[count]);
1450 if(hostdata->fast &&
1451 NCR_700_is_flag_clear(SCp->device, NCR_700_DEV_NEGOTIATED_SYNC)) {
1452 count += spi_populate_sync_msg(&hostdata->msgout[count],
1453 spi_period(SCp->device->sdev_target),
1454 spi_offset(SCp->device->sdev_target));
1455 NCR_700_set_flag(SCp->device, NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
1458 script_patch_16(hostdata->dev, hostdata->script, MessageCount, count);
1461 script_patch_ID(hostdata->dev, hostdata->script,
1462 Device_ID, 1<<scmd_id(SCp));
1464 script_patch_32_abs(hostdata->dev, hostdata->script, CommandAddress,
1465 slot->pCmd);
1466 script_patch_16(hostdata->dev, hostdata->script, CommandCount,
1467 SCp->cmd_len);
1468 /* finally plumb the beginning of the SG list into the script
1469 * */
1470 script_patch_32_abs(hostdata->dev, hostdata->script,
1471 SGScriptStartAddress, to32bit(&slot->pSG[0].ins));
1472 NCR_700_clear_fifo(SCp->device->host);
1474 if(slot->resume_offset == 0)
1475 slot->resume_offset = hostdata->pScript;
1476 /* now perform all the writebacks and invalidates */
1477 dma_cache_sync(hostdata->dev, hostdata->msgout, count, DMA_TO_DEVICE);
1478 dma_cache_sync(hostdata->dev, hostdata->msgin, MSG_ARRAY_SIZE,
1479 DMA_FROM_DEVICE);
1480 dma_cache_sync(hostdata->dev, SCp->cmnd, SCp->cmd_len, DMA_TO_DEVICE);
1481 dma_cache_sync(hostdata->dev, hostdata->status, 1, DMA_FROM_DEVICE);
1483 /* set the synchronous period/offset */
1484 NCR_700_writeb(NCR_700_get_SXFER(SCp->device),
1485 SCp->device->host, SXFER_REG);
1486 NCR_700_writel(slot->temp, SCp->device->host, TEMP_REG);
1487 NCR_700_writel(slot->resume_offset, SCp->device->host, DSP_REG);
1489 return 1;
1492 irqreturn_t
1493 NCR_700_intr(int irq, void *dev_id)
1495 struct Scsi_Host *host = (struct Scsi_Host *)dev_id;
1496 struct NCR_700_Host_Parameters *hostdata =
1497 (struct NCR_700_Host_Parameters *)host->hostdata[0];
1498 __u8 istat;
1499 __u32 resume_offset = 0;
1500 __u8 pun = 0xff, lun = 0xff;
1501 unsigned long flags;
1502 int handled = 0;
1504 /* Use the host lock to serialise acess to the 53c700
1505 * hardware. Note: In future, we may need to take the queue
1506 * lock to enter the done routines. When that happens, we
1507 * need to ensure that for this driver, the host lock and the
1508 * queue lock point to the same thing. */
1509 spin_lock_irqsave(host->host_lock, flags);
1510 if((istat = NCR_700_readb(host, ISTAT_REG))
1511 & (SCSI_INT_PENDING | DMA_INT_PENDING)) {
1512 __u32 dsps;
1513 __u8 sstat0 = 0, dstat = 0;
1514 __u32 dsp;
1515 struct scsi_cmnd *SCp = hostdata->cmd;
1516 enum NCR_700_Host_State state;
1518 handled = 1;
1519 state = hostdata->state;
1520 SCp = hostdata->cmd;
1522 if(istat & SCSI_INT_PENDING) {
1523 udelay(10);
1525 sstat0 = NCR_700_readb(host, SSTAT0_REG);
1528 if(istat & DMA_INT_PENDING) {
1529 udelay(10);
1531 dstat = NCR_700_readb(host, DSTAT_REG);
1534 dsps = NCR_700_readl(host, DSPS_REG);
1535 dsp = NCR_700_readl(host, DSP_REG);
1537 DEBUG(("scsi%d: istat %02x sstat0 %02x dstat %02x dsp %04x[%08x] dsps 0x%x\n",
1538 host->host_no, istat, sstat0, dstat,
1539 (dsp - (__u32)(hostdata->pScript))/4,
1540 dsp, dsps));
1542 if(SCp != NULL) {
1543 pun = SCp->device->id;
1544 lun = SCp->device->lun;
1547 if(sstat0 & SCSI_RESET_DETECTED) {
1548 struct scsi_device *SDp;
1549 int i;
1551 hostdata->state = NCR_700_HOST_BUSY;
1553 printk(KERN_ERR "scsi%d: Bus Reset detected, executing command %p, slot %p, dsp %08x[%04x]\n",
1554 host->host_no, SCp, SCp == NULL ? NULL : SCp->host_scribble, dsp, dsp - hostdata->pScript);
1556 scsi_report_bus_reset(host, 0);
1558 /* clear all the negotiated parameters */
1559 __shost_for_each_device(SDp, host)
1560 NCR_700_clear_flag(SDp, ~0);
1562 /* clear all the slots and their pending commands */
1563 for(i = 0; i < NCR_700_COMMAND_SLOTS_PER_HOST; i++) {
1564 struct scsi_cmnd *SCp;
1565 struct NCR_700_command_slot *slot =
1566 &hostdata->slots[i];
1568 if(slot->state == NCR_700_SLOT_FREE)
1569 continue;
1571 SCp = slot->cmnd;
1572 printk(KERN_ERR " failing command because of reset, slot %p, cmnd %p\n",
1573 slot, SCp);
1574 free_slot(slot, hostdata);
1575 SCp->host_scribble = NULL;
1576 NCR_700_set_depth(SCp->device, 0);
1577 /* NOTE: deadlock potential here: we
1578 * rely on mid-layer guarantees that
1579 * scsi_done won't try to issue the
1580 * command again otherwise we'll
1581 * deadlock on the
1582 * hostdata->state_lock */
1583 SCp->result = DID_RESET << 16;
1584 SCp->scsi_done(SCp);
1586 mdelay(25);
1587 NCR_700_chip_setup(host);
1589 hostdata->state = NCR_700_HOST_FREE;
1590 hostdata->cmd = NULL;
1591 /* signal back if this was an eh induced reset */
1592 if(hostdata->eh_complete != NULL)
1593 complete(hostdata->eh_complete);
1594 goto out_unlock;
1595 } else if(sstat0 & SELECTION_TIMEOUT) {
1596 DEBUG(("scsi%d: (%d:%d) selection timeout\n",
1597 host->host_no, pun, lun));
1598 NCR_700_scsi_done(hostdata, SCp, DID_NO_CONNECT<<16);
1599 } else if(sstat0 & PHASE_MISMATCH) {
1600 struct NCR_700_command_slot *slot = (SCp == NULL) ? NULL :
1601 (struct NCR_700_command_slot *)SCp->host_scribble;
1603 if(dsp == Ent_SendMessage + 8 + hostdata->pScript) {
1604 /* It wants to reply to some part of
1605 * our message */
1606 #ifdef NCR_700_DEBUG
1607 __u32 temp = NCR_700_readl(host, TEMP_REG);
1608 int count = (hostdata->script[Ent_SendMessage/4] & 0xffffff) - ((NCR_700_readl(host, DBC_REG) & 0xffffff) + NCR_700_data_residual(host));
1609 printk("scsi%d (%d:%d) PHASE MISMATCH IN SEND MESSAGE %d remain, return %p[%04x], phase %s\n", host->host_no, pun, lun, count, (void *)temp, temp - hostdata->pScript, sbcl_to_string(NCR_700_readb(host, SBCL_REG)));
1610 #endif
1611 resume_offset = hostdata->pScript + Ent_SendMessagePhaseMismatch;
1612 } else if(dsp >= to32bit(&slot->pSG[0].ins) &&
1613 dsp <= to32bit(&slot->pSG[NCR_700_SG_SEGMENTS].ins)) {
1614 int data_transfer = NCR_700_readl(host, DBC_REG) & 0xffffff;
1615 int SGcount = (dsp - to32bit(&slot->pSG[0].ins))/sizeof(struct NCR_700_SG_List);
1616 int residual = NCR_700_data_residual(host);
1617 int i;
1618 #ifdef NCR_700_DEBUG
1619 __u32 naddr = NCR_700_readl(host, DNAD_REG);
1621 printk("scsi%d: (%d:%d) Expected phase mismatch in slot->SG[%d], transferred 0x%x\n",
1622 host->host_no, pun, lun,
1623 SGcount, data_transfer);
1624 scsi_print_command(SCp);
1625 if(residual) {
1626 printk("scsi%d: (%d:%d) Expected phase mismatch in slot->SG[%d], transferred 0x%x, residual %d\n",
1627 host->host_no, pun, lun,
1628 SGcount, data_transfer, residual);
1630 #endif
1631 data_transfer += residual;
1633 if(data_transfer != 0) {
1634 int count;
1635 __u32 pAddr;
1637 SGcount--;
1639 count = (bS_to_cpu(slot->SG[SGcount].ins) & 0x00ffffff);
1640 DEBUG(("DATA TRANSFER MISMATCH, count = %d, transferred %d\n", count, count-data_transfer));
1641 slot->SG[SGcount].ins &= bS_to_host(0xff000000);
1642 slot->SG[SGcount].ins |= bS_to_host(data_transfer);
1643 pAddr = bS_to_cpu(slot->SG[SGcount].pAddr);
1644 pAddr += (count - data_transfer);
1645 #ifdef NCR_700_DEBUG
1646 if(pAddr != naddr) {
1647 printk("scsi%d (%d:%d) transfer mismatch pAddr=%lx, naddr=%lx, data_transfer=%d, residual=%d\n", host->host_no, pun, lun, (unsigned long)pAddr, (unsigned long)naddr, data_transfer, residual);
1649 #endif
1650 slot->SG[SGcount].pAddr = bS_to_host(pAddr);
1652 /* set the executed moves to nops */
1653 for(i=0; i<SGcount; i++) {
1654 slot->SG[i].ins = bS_to_host(SCRIPT_NOP);
1655 slot->SG[i].pAddr = 0;
1657 dma_cache_sync(hostdata->dev, slot->SG, sizeof(slot->SG), DMA_TO_DEVICE);
1658 /* and pretend we disconnected after
1659 * the command phase */
1660 resume_offset = hostdata->pScript + Ent_MsgInDuringData;
1661 /* make sure all the data is flushed */
1662 NCR_700_flush_fifo(host);
1663 } else {
1664 __u8 sbcl = NCR_700_readb(host, SBCL_REG);
1665 printk(KERN_ERR "scsi%d: (%d:%d) phase mismatch at %04x, phase %s\n",
1666 host->host_no, pun, lun, dsp - hostdata->pScript, sbcl_to_string(sbcl));
1667 NCR_700_internal_bus_reset(host);
1670 } else if(sstat0 & SCSI_GROSS_ERROR) {
1671 printk(KERN_ERR "scsi%d: (%d:%d) GROSS ERROR\n",
1672 host->host_no, pun, lun);
1673 NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1674 } else if(sstat0 & PARITY_ERROR) {
1675 printk(KERN_ERR "scsi%d: (%d:%d) PARITY ERROR\n",
1676 host->host_no, pun, lun);
1677 NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1678 } else if(dstat & SCRIPT_INT_RECEIVED) {
1679 DEBUG(("scsi%d: (%d:%d) ====>SCRIPT INTERRUPT<====\n",
1680 host->host_no, pun, lun));
1681 resume_offset = process_script_interrupt(dsps, dsp, SCp, host, hostdata);
1682 } else if(dstat & (ILGL_INST_DETECTED)) {
1683 printk(KERN_ERR "scsi%d: (%d:%d) Illegal Instruction detected at 0x%08x[0x%x]!!!\n"
1684 " Please email James.Bottomley@HansenPartnership.com with the details\n",
1685 host->host_no, pun, lun,
1686 dsp, dsp - hostdata->pScript);
1687 NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1688 } else if(dstat & (WATCH_DOG_INTERRUPT|ABORTED)) {
1689 printk(KERN_ERR "scsi%d: (%d:%d) serious DMA problem, dstat=%02x\n",
1690 host->host_no, pun, lun, dstat);
1691 NCR_700_scsi_done(hostdata, SCp, DID_ERROR<<16);
1695 /* NOTE: selection interrupt processing MUST occur
1696 * after script interrupt processing to correctly cope
1697 * with the case where we process a disconnect and
1698 * then get reselected before we process the
1699 * disconnection */
1700 if(sstat0 & SELECTED) {
1701 /* FIXME: It currently takes at least FOUR
1702 * interrupts to complete a command that
1703 * disconnects: one for the disconnect, one
1704 * for the reselection, one to get the
1705 * reselection data and one to complete the
1706 * command. If we guess the reselected
1707 * command here and prepare it, we only need
1708 * to get a reselection data interrupt if we
1709 * guessed wrongly. Since the interrupt
1710 * overhead is much greater than the command
1711 * setup, this would be an efficient
1712 * optimisation particularly as we probably
1713 * only have one outstanding command on a
1714 * target most of the time */
1716 resume_offset = process_selection(host, dsp);
1722 if(resume_offset) {
1723 if(hostdata->state != NCR_700_HOST_BUSY) {
1724 printk(KERN_ERR "scsi%d: Driver error: resume at 0x%08x [0x%04x] with non busy host!\n",
1725 host->host_no, resume_offset, resume_offset - hostdata->pScript);
1726 hostdata->state = NCR_700_HOST_BUSY;
1729 DEBUG(("Attempting to resume at %x\n", resume_offset));
1730 NCR_700_clear_fifo(host);
1731 NCR_700_writel(resume_offset, host, DSP_REG);
1733 /* There is probably a technical no-no about this: If we're a
1734 * shared interrupt and we got this interrupt because the
1735 * other device needs servicing not us, we're still going to
1736 * check our queued commands here---of course, there shouldn't
1737 * be any outstanding.... */
1738 if(hostdata->state == NCR_700_HOST_FREE) {
1739 int i;
1741 for(i = 0; i < NCR_700_COMMAND_SLOTS_PER_HOST; i++) {
1742 /* fairness: always run the queue from the last
1743 * position we left off */
1744 int j = (i + hostdata->saved_slot_position)
1745 % NCR_700_COMMAND_SLOTS_PER_HOST;
1747 if(hostdata->slots[j].state != NCR_700_SLOT_QUEUED)
1748 continue;
1749 if(NCR_700_start_command(hostdata->slots[j].cmnd)) {
1750 DEBUG(("scsi%d: Issuing saved command slot %p, cmd %p\t\n",
1751 host->host_no, &hostdata->slots[j],
1752 hostdata->slots[j].cmnd));
1753 hostdata->saved_slot_position = j + 1;
1756 break;
1759 out_unlock:
1760 spin_unlock_irqrestore(host->host_lock, flags);
1761 return IRQ_RETVAL(handled);
1764 STATIC int
1765 NCR_700_queuecommand(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *))
1767 struct NCR_700_Host_Parameters *hostdata =
1768 (struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
1769 __u32 move_ins;
1770 enum dma_data_direction direction;
1771 struct NCR_700_command_slot *slot;
1773 if(hostdata->command_slot_count >= NCR_700_COMMAND_SLOTS_PER_HOST) {
1774 /* We're over our allocation, this should never happen
1775 * since we report the max allocation to the mid layer */
1776 printk(KERN_WARNING "scsi%d: Command depth has gone over queue depth\n", SCp->device->host->host_no);
1777 return 1;
1779 /* check for untagged commands. We cannot have any outstanding
1780 * commands if we accept them. Commands could be untagged because:
1782 * - The tag negotiated bitmap is clear
1783 * - The blk layer sent and untagged command
1785 if(NCR_700_get_depth(SCp->device) != 0
1786 && (!(hostdata->tag_negotiated & (1<<scmd_id(SCp)))
1787 || !blk_rq_tagged(SCp->request))) {
1788 CDEBUG(KERN_ERR, SCp, "has non zero depth %d\n",
1789 NCR_700_get_depth(SCp->device));
1790 return SCSI_MLQUEUE_DEVICE_BUSY;
1792 if(NCR_700_get_depth(SCp->device) >= SCp->device->queue_depth) {
1793 CDEBUG(KERN_ERR, SCp, "has max tag depth %d\n",
1794 NCR_700_get_depth(SCp->device));
1795 return SCSI_MLQUEUE_DEVICE_BUSY;
1797 NCR_700_set_depth(SCp->device, NCR_700_get_depth(SCp->device) + 1);
1799 /* begin the command here */
1800 /* no need to check for NULL, test for command_slot_count above
1801 * ensures a slot is free */
1802 slot = find_empty_slot(hostdata);
1804 slot->cmnd = SCp;
1806 SCp->scsi_done = done;
1807 SCp->host_scribble = (unsigned char *)slot;
1808 SCp->SCp.ptr = NULL;
1809 SCp->SCp.buffer = NULL;
1811 #ifdef NCR_700_DEBUG
1812 printk("53c700: scsi%d, command ", SCp->device->host->host_no);
1813 scsi_print_command(SCp);
1814 #endif
1815 if(blk_rq_tagged(SCp->request)
1816 && (hostdata->tag_negotiated &(1<<scmd_id(SCp))) == 0
1817 && NCR_700_get_tag_neg_state(SCp->device) == NCR_700_START_TAG_NEGOTIATION) {
1818 scmd_printk(KERN_ERR, SCp, "Enabling Tag Command Queuing\n");
1819 hostdata->tag_negotiated |= (1<<scmd_id(SCp));
1820 NCR_700_set_tag_neg_state(SCp->device, NCR_700_DURING_TAG_NEGOTIATION);
1823 /* here we may have to process an untagged command. The gate
1824 * above ensures that this will be the only one outstanding,
1825 * so clear the tag negotiated bit.
1827 * FIXME: This will royally screw up on multiple LUN devices
1828 * */
1829 if(!blk_rq_tagged(SCp->request)
1830 && (hostdata->tag_negotiated &(1<<scmd_id(SCp)))) {
1831 scmd_printk(KERN_INFO, SCp, "Disabling Tag Command Queuing\n");
1832 hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
1835 if((hostdata->tag_negotiated &(1<<scmd_id(SCp)))
1836 && scsi_get_tag_type(SCp->device)) {
1837 slot->tag = SCp->request->tag;
1838 CDEBUG(KERN_DEBUG, SCp, "sending out tag %d, slot %p\n",
1839 slot->tag, slot);
1840 } else {
1841 slot->tag = SCSI_NO_TAG;
1842 /* must populate current_cmnd for scsi_find_tag to work */
1843 SCp->device->current_cmnd = SCp;
1845 /* sanity check: some of the commands generated by the mid-layer
1846 * have an eccentric idea of their sc_data_direction */
1847 if(!SCp->use_sg && !SCp->request_bufflen
1848 && SCp->sc_data_direction != DMA_NONE) {
1849 #ifdef NCR_700_DEBUG
1850 printk("53c700: Command");
1851 scsi_print_command(SCp);
1852 printk("Has wrong data direction %d\n", SCp->sc_data_direction);
1853 #endif
1854 SCp->sc_data_direction = DMA_NONE;
1857 switch (SCp->cmnd[0]) {
1858 case REQUEST_SENSE:
1859 /* clear the internal sense magic */
1860 SCp->cmnd[6] = 0;
1861 /* fall through */
1862 default:
1863 /* OK, get it from the command */
1864 switch(SCp->sc_data_direction) {
1865 case DMA_BIDIRECTIONAL:
1866 default:
1867 printk(KERN_ERR "53c700: Unknown command for data direction ");
1868 scsi_print_command(SCp);
1870 move_ins = 0;
1871 break;
1872 case DMA_NONE:
1873 move_ins = 0;
1874 break;
1875 case DMA_FROM_DEVICE:
1876 move_ins = SCRIPT_MOVE_DATA_IN;
1877 break;
1878 case DMA_TO_DEVICE:
1879 move_ins = SCRIPT_MOVE_DATA_OUT;
1880 break;
1884 /* now build the scatter gather list */
1885 direction = SCp->sc_data_direction;
1886 if(move_ins != 0) {
1887 int i;
1888 int sg_count;
1889 dma_addr_t vPtr = 0;
1890 __u32 count = 0;
1892 if(SCp->use_sg) {
1893 sg_count = dma_map_sg(hostdata->dev,
1894 SCp->request_buffer, SCp->use_sg,
1895 direction);
1896 } else {
1897 vPtr = dma_map_single(hostdata->dev,
1898 SCp->request_buffer,
1899 SCp->request_bufflen,
1900 direction);
1901 count = SCp->request_bufflen;
1902 slot->dma_handle = vPtr;
1903 sg_count = 1;
1907 for(i = 0; i < sg_count; i++) {
1909 if(SCp->use_sg) {
1910 struct scatterlist *sg = SCp->request_buffer;
1912 vPtr = sg_dma_address(&sg[i]);
1913 count = sg_dma_len(&sg[i]);
1916 slot->SG[i].ins = bS_to_host(move_ins | count);
1917 DEBUG((" scatter block %d: move %d[%08x] from 0x%lx\n",
1918 i, count, slot->SG[i].ins, (unsigned long)vPtr));
1919 slot->SG[i].pAddr = bS_to_host(vPtr);
1921 slot->SG[i].ins = bS_to_host(SCRIPT_RETURN);
1922 slot->SG[i].pAddr = 0;
1923 dma_cache_sync(hostdata->dev, slot->SG, sizeof(slot->SG), DMA_TO_DEVICE);
1924 DEBUG((" SETTING %08lx to %x\n",
1925 (&slot->pSG[i].ins),
1926 slot->SG[i].ins));
1928 slot->resume_offset = 0;
1929 slot->pCmd = dma_map_single(hostdata->dev, SCp->cmnd,
1930 sizeof(SCp->cmnd), DMA_TO_DEVICE);
1931 NCR_700_start_command(SCp);
1932 return 0;
1935 STATIC int
1936 NCR_700_abort(struct scsi_cmnd * SCp)
1938 struct NCR_700_command_slot *slot;
1940 scmd_printk(KERN_INFO, SCp,
1941 "New error handler wants to abort command\n\t");
1942 scsi_print_command(SCp);
1944 slot = (struct NCR_700_command_slot *)SCp->host_scribble;
1946 if(slot == NULL)
1947 /* no outstanding command to abort */
1948 return SUCCESS;
1949 if(SCp->cmnd[0] == TEST_UNIT_READY) {
1950 /* FIXME: This is because of a problem in the new
1951 * error handler. When it is in error recovery, it
1952 * will send a TUR to a device it thinks may still be
1953 * showing a problem. If the TUR isn't responded to,
1954 * it will abort it and mark the device off line.
1955 * Unfortunately, it does no other error recovery, so
1956 * this would leave us with an outstanding command
1957 * occupying a slot. Rather than allow this to
1958 * happen, we issue a bus reset to force all
1959 * outstanding commands to terminate here. */
1960 NCR_700_internal_bus_reset(SCp->device->host);
1961 /* still drop through and return failed */
1963 return FAILED;
1967 STATIC int
1968 NCR_700_bus_reset(struct scsi_cmnd * SCp)
1970 DECLARE_COMPLETION_ONSTACK(complete);
1971 struct NCR_700_Host_Parameters *hostdata =
1972 (struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0];
1974 scmd_printk(KERN_INFO, SCp,
1975 "New error handler wants BUS reset, cmd %p\n\t", SCp);
1976 scsi_print_command(SCp);
1978 /* In theory, eh_complete should always be null because the
1979 * eh is single threaded, but just in case we're handling a
1980 * reset via sg or something */
1981 spin_lock_irq(SCp->device->host->host_lock);
1982 while (hostdata->eh_complete != NULL) {
1983 spin_unlock_irq(SCp->device->host->host_lock);
1984 msleep_interruptible(100);
1985 spin_lock_irq(SCp->device->host->host_lock);
1988 hostdata->eh_complete = &complete;
1989 NCR_700_internal_bus_reset(SCp->device->host);
1991 spin_unlock_irq(SCp->device->host->host_lock);
1992 wait_for_completion(&complete);
1993 spin_lock_irq(SCp->device->host->host_lock);
1995 hostdata->eh_complete = NULL;
1996 /* Revalidate the transport parameters of the failing device */
1997 if(hostdata->fast)
1998 spi_schedule_dv_device(SCp->device);
2000 spin_unlock_irq(SCp->device->host->host_lock);
2001 return SUCCESS;
2004 STATIC int
2005 NCR_700_host_reset(struct scsi_cmnd * SCp)
2007 scmd_printk(KERN_INFO, SCp, "New error handler wants HOST reset\n\t");
2008 scsi_print_command(SCp);
2010 spin_lock_irq(SCp->device->host->host_lock);
2012 NCR_700_internal_bus_reset(SCp->device->host);
2013 NCR_700_chip_reset(SCp->device->host);
2015 spin_unlock_irq(SCp->device->host->host_lock);
2017 return SUCCESS;
2020 STATIC void
2021 NCR_700_set_period(struct scsi_target *STp, int period)
2023 struct Scsi_Host *SHp = dev_to_shost(STp->dev.parent);
2024 struct NCR_700_Host_Parameters *hostdata =
2025 (struct NCR_700_Host_Parameters *)SHp->hostdata[0];
2027 if(!hostdata->fast)
2028 return;
2030 if(period < hostdata->min_period)
2031 period = hostdata->min_period;
2033 spi_period(STp) = period;
2034 spi_flags(STp) &= ~(NCR_700_DEV_NEGOTIATED_SYNC |
2035 NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
2036 spi_flags(STp) |= NCR_700_DEV_PRINT_SYNC_NEGOTIATION;
2039 STATIC void
2040 NCR_700_set_offset(struct scsi_target *STp, int offset)
2042 struct Scsi_Host *SHp = dev_to_shost(STp->dev.parent);
2043 struct NCR_700_Host_Parameters *hostdata =
2044 (struct NCR_700_Host_Parameters *)SHp->hostdata[0];
2045 int max_offset = hostdata->chip710
2046 ? NCR_710_MAX_OFFSET : NCR_700_MAX_OFFSET;
2048 if(!hostdata->fast)
2049 return;
2051 if(offset > max_offset)
2052 offset = max_offset;
2054 /* if we're currently async, make sure the period is reasonable */
2055 if(spi_offset(STp) == 0 && (spi_period(STp) < hostdata->min_period ||
2056 spi_period(STp) > 0xff))
2057 spi_period(STp) = hostdata->min_period;
2059 spi_offset(STp) = offset;
2060 spi_flags(STp) &= ~(NCR_700_DEV_NEGOTIATED_SYNC |
2061 NCR_700_DEV_BEGIN_SYNC_NEGOTIATION);
2062 spi_flags(STp) |= NCR_700_DEV_PRINT_SYNC_NEGOTIATION;
2065 STATIC int
2066 NCR_700_slave_alloc(struct scsi_device *SDp)
2068 SDp->hostdata = kzalloc(sizeof(struct NCR_700_Device_Parameters),
2069 GFP_KERNEL);
2071 if (!SDp->hostdata)
2072 return -ENOMEM;
2074 return 0;
2077 STATIC int
2078 NCR_700_slave_configure(struct scsi_device *SDp)
2080 struct NCR_700_Host_Parameters *hostdata =
2081 (struct NCR_700_Host_Parameters *)SDp->host->hostdata[0];
2083 /* to do here: allocate memory; build a queue_full list */
2084 if(SDp->tagged_supported) {
2085 scsi_set_tag_type(SDp, MSG_ORDERED_TAG);
2086 scsi_activate_tcq(SDp, NCR_700_DEFAULT_TAGS);
2087 NCR_700_set_tag_neg_state(SDp, NCR_700_START_TAG_NEGOTIATION);
2088 } else {
2089 /* initialise to default depth */
2090 scsi_adjust_queue_depth(SDp, 0, SDp->host->cmd_per_lun);
2092 if(hostdata->fast) {
2093 /* Find the correct offset and period via domain validation */
2094 if (!spi_initial_dv(SDp->sdev_target))
2095 spi_dv_device(SDp);
2096 } else {
2097 spi_offset(SDp->sdev_target) = 0;
2098 spi_period(SDp->sdev_target) = 0;
2100 return 0;
2103 STATIC void
2104 NCR_700_slave_destroy(struct scsi_device *SDp)
2106 kfree(SDp->hostdata);
2107 SDp->hostdata = NULL;
2110 static int
2111 NCR_700_change_queue_depth(struct scsi_device *SDp, int depth)
2113 if (depth > NCR_700_MAX_TAGS)
2114 depth = NCR_700_MAX_TAGS;
2116 scsi_adjust_queue_depth(SDp, scsi_get_tag_type(SDp), depth);
2117 return depth;
2120 static int NCR_700_change_queue_type(struct scsi_device *SDp, int tag_type)
2122 int change_tag = ((tag_type ==0 && scsi_get_tag_type(SDp) != 0)
2123 || (tag_type != 0 && scsi_get_tag_type(SDp) == 0));
2124 struct NCR_700_Host_Parameters *hostdata =
2125 (struct NCR_700_Host_Parameters *)SDp->host->hostdata[0];
2127 scsi_set_tag_type(SDp, tag_type);
2129 /* We have a global (per target) flag to track whether TCQ is
2130 * enabled, so we'll be turning it off for the entire target here.
2131 * our tag algorithm will fail if we mix tagged and untagged commands,
2132 * so quiesce the device before doing this */
2133 if (change_tag)
2134 scsi_target_quiesce(SDp->sdev_target);
2136 if (!tag_type) {
2137 /* shift back to the default unqueued number of commands
2138 * (the user can still raise this) */
2139 scsi_deactivate_tcq(SDp, SDp->host->cmd_per_lun);
2140 hostdata->tag_negotiated &= ~(1 << sdev_id(SDp));
2141 } else {
2142 /* Here, we cleared the negotiation flag above, so this
2143 * will force the driver to renegotiate */
2144 scsi_activate_tcq(SDp, SDp->queue_depth);
2145 if (change_tag)
2146 NCR_700_set_tag_neg_state(SDp, NCR_700_START_TAG_NEGOTIATION);
2148 if (change_tag)
2149 scsi_target_resume(SDp->sdev_target);
2151 return tag_type;
2154 static ssize_t
2155 NCR_700_show_active_tags(struct device *dev, struct device_attribute *attr, char *buf)
2157 struct scsi_device *SDp = to_scsi_device(dev);
2159 return snprintf(buf, 20, "%d\n", NCR_700_get_depth(SDp));
2162 static struct device_attribute NCR_700_active_tags_attr = {
2163 .attr = {
2164 .name = "active_tags",
2165 .mode = S_IRUGO,
2167 .show = NCR_700_show_active_tags,
2170 STATIC struct device_attribute *NCR_700_dev_attrs[] = {
2171 &NCR_700_active_tags_attr,
2172 NULL,
2175 EXPORT_SYMBOL(NCR_700_detect);
2176 EXPORT_SYMBOL(NCR_700_release);
2177 EXPORT_SYMBOL(NCR_700_intr);
2179 static struct spi_function_template NCR_700_transport_functions = {
2180 .set_period = NCR_700_set_period,
2181 .show_period = 1,
2182 .set_offset = NCR_700_set_offset,
2183 .show_offset = 1,
2186 static int __init NCR_700_init(void)
2188 NCR_700_transport_template = spi_attach_transport(&NCR_700_transport_functions);
2189 if(!NCR_700_transport_template)
2190 return -ENODEV;
2191 return 0;
2194 static void __exit NCR_700_exit(void)
2196 spi_release_transport(NCR_700_transport_template);
2199 module_init(NCR_700_init);
2200 module_exit(NCR_700_exit);