Merge 5.0-rc6 into driver-core-next
[linux/fpc-iii.git] / drivers / scsi / dpt_i2o.c
blob70d1a18278aff984d8e620cb72fb24f909b4fca1
1 /***************************************************************************
2 dpti.c - description
3 -------------------
4 begin : Thu Sep 7 2000
5 copyright : (C) 2000 by Adaptec
7 July 30, 2001 First version being submitted
8 for inclusion in the kernel. V2.4
10 See Documentation/scsi/dpti.txt for history, notes, license info
11 and credits
12 ***************************************************************************/
14 /***************************************************************************
15 * *
16 * This program is free software; you can redistribute it and/or modify *
17 * it under the terms of the GNU General Public License as published by *
18 * the Free Software Foundation; either version 2 of the License, or *
19 * (at your option) any later version. *
20 * *
21 ***************************************************************************/
22 /***************************************************************************
23 * Sat Dec 20 2003 Go Taniguchi <go@turbolinux.co.jp>
24 - Support 2.6 kernel and DMA-mapping
25 - ioctl fix for raid tools
26 - use schedule_timeout in long long loop
27 **************************************************************************/
29 /*#define DEBUG 1 */
30 /*#define UARTDELAY 1 */
32 #include <linux/module.h>
34 MODULE_AUTHOR("Deanna Bonds, with _lots_ of help from Mark Salyzyn");
35 MODULE_DESCRIPTION("Adaptec I2O RAID Driver");
37 ////////////////////////////////////////////////////////////////
39 #include <linux/ioctl.h> /* For SCSI-Passthrough */
40 #include <linux/uaccess.h>
42 #include <linux/stat.h>
43 #include <linux/slab.h> /* for kmalloc() */
44 #include <linux/pci.h> /* for PCI support */
45 #include <linux/proc_fs.h>
46 #include <linux/blkdev.h>
47 #include <linux/delay.h> /* for udelay */
48 #include <linux/interrupt.h>
49 #include <linux/kernel.h> /* for printk */
50 #include <linux/sched.h>
51 #include <linux/reboot.h>
52 #include <linux/spinlock.h>
53 #include <linux/dma-mapping.h>
55 #include <linux/timer.h>
56 #include <linux/string.h>
57 #include <linux/ioport.h>
58 #include <linux/mutex.h>
60 #include <asm/processor.h> /* for boot_cpu_data */
61 #include <asm/pgtable.h>
62 #include <asm/io.h> /* for virt_to_bus, etc. */
64 #include <scsi/scsi.h>
65 #include <scsi/scsi_cmnd.h>
66 #include <scsi/scsi_device.h>
67 #include <scsi/scsi_host.h>
68 #include <scsi/scsi_tcq.h>
70 #include "dpt/dptsig.h"
71 #include "dpti.h"
73 /*============================================================================
74 * Create a binary signature - this is read by dptsig
75 * Needed for our management apps
76 *============================================================================
78 static DEFINE_MUTEX(adpt_mutex);
79 static dpt_sig_S DPTI_sig = {
80 {'d', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION,
81 #ifdef __i386__
82 PROC_INTEL, PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM,
83 #elif defined(__ia64__)
84 PROC_INTEL, PROC_IA64,
85 #elif defined(__sparc__)
86 PROC_ULTRASPARC, PROC_ULTRASPARC,
87 #elif defined(__alpha__)
88 PROC_ALPHA, PROC_ALPHA,
89 #else
90 (-1),(-1),
91 #endif
92 FT_HBADRVR, 0, OEM_DPT, OS_LINUX, CAP_OVERLAP, DEV_ALL,
93 ADF_ALL_SC5, 0, 0, DPT_VERSION, DPT_REVISION, DPT_SUBREVISION,
94 DPT_MONTH, DPT_DAY, DPT_YEAR, "Adaptec Linux I2O RAID Driver"
100 /*============================================================================
101 * Globals
102 *============================================================================
105 static DEFINE_MUTEX(adpt_configuration_lock);
107 static struct i2o_sys_tbl *sys_tbl;
108 static dma_addr_t sys_tbl_pa;
109 static int sys_tbl_ind;
110 static int sys_tbl_len;
112 static adpt_hba* hba_chain = NULL;
113 static int hba_count = 0;
115 static struct class *adpt_sysfs_class;
117 static long adpt_unlocked_ioctl(struct file *, unsigned int, unsigned long);
118 #ifdef CONFIG_COMPAT
119 static long compat_adpt_ioctl(struct file *, unsigned int, unsigned long);
120 #endif
122 static const struct file_operations adpt_fops = {
123 .unlocked_ioctl = adpt_unlocked_ioctl,
124 .open = adpt_open,
125 .release = adpt_close,
126 #ifdef CONFIG_COMPAT
127 .compat_ioctl = compat_adpt_ioctl,
128 #endif
129 .llseek = noop_llseek,
132 /* Structures and definitions for synchronous message posting.
133 * See adpt_i2o_post_wait() for description
134 * */
135 struct adpt_i2o_post_wait_data
137 int status;
138 u32 id;
139 adpt_wait_queue_head_t *wq;
140 struct adpt_i2o_post_wait_data *next;
143 static struct adpt_i2o_post_wait_data *adpt_post_wait_queue = NULL;
144 static u32 adpt_post_wait_id = 0;
145 static DEFINE_SPINLOCK(adpt_post_wait_lock);
148 /*============================================================================
149 * Functions
150 *============================================================================
153 static inline int dpt_dma64(adpt_hba *pHba)
155 return (sizeof(dma_addr_t) > 4 && (pHba)->dma64);
158 static inline u32 dma_high(dma_addr_t addr)
160 return upper_32_bits(addr);
163 static inline u32 dma_low(dma_addr_t addr)
165 return (u32)addr;
168 static u8 adpt_read_blink_led(adpt_hba* host)
170 if (host->FwDebugBLEDflag_P) {
171 if( readb(host->FwDebugBLEDflag_P) == 0xbc ){
172 return readb(host->FwDebugBLEDvalue_P);
175 return 0;
178 /*============================================================================
179 * Scsi host template interface functions
180 *============================================================================
183 #ifdef MODULE
184 static struct pci_device_id dptids[] = {
185 { PCI_DPT_VENDOR_ID, PCI_DPT_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
186 { PCI_DPT_VENDOR_ID, PCI_DPT_RAPTOR_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
187 { 0, }
189 #endif
191 MODULE_DEVICE_TABLE(pci,dptids);
193 static int adpt_detect(struct scsi_host_template* sht)
195 struct pci_dev *pDev = NULL;
196 adpt_hba *pHba;
197 adpt_hba *next;
199 PINFO("Detecting Adaptec I2O RAID controllers...\n");
201 /* search for all Adatpec I2O RAID cards */
202 while ((pDev = pci_get_device( PCI_DPT_VENDOR_ID, PCI_ANY_ID, pDev))) {
203 if(pDev->device == PCI_DPT_DEVICE_ID ||
204 pDev->device == PCI_DPT_RAPTOR_DEVICE_ID){
205 if(adpt_install_hba(sht, pDev) ){
206 PERROR("Could not Init an I2O RAID device\n");
207 PERROR("Will not try to detect others.\n");
208 return hba_count-1;
210 pci_dev_get(pDev);
214 /* In INIT state, Activate IOPs */
215 for (pHba = hba_chain; pHba; pHba = next) {
216 next = pHba->next;
217 // Activate does get status , init outbound, and get hrt
218 if (adpt_i2o_activate_hba(pHba) < 0) {
219 adpt_i2o_delete_hba(pHba);
224 /* Active IOPs in HOLD state */
226 rebuild_sys_tab:
227 if (hba_chain == NULL)
228 return 0;
231 * If build_sys_table fails, we kill everything and bail
232 * as we can't init the IOPs w/o a system table
234 if (adpt_i2o_build_sys_table() < 0) {
235 adpt_i2o_sys_shutdown();
236 return 0;
239 PDEBUG("HBA's in HOLD state\n");
241 /* If IOP don't get online, we need to rebuild the System table */
242 for (pHba = hba_chain; pHba; pHba = pHba->next) {
243 if (adpt_i2o_online_hba(pHba) < 0) {
244 adpt_i2o_delete_hba(pHba);
245 goto rebuild_sys_tab;
249 /* Active IOPs now in OPERATIONAL state */
250 PDEBUG("HBA's in OPERATIONAL state\n");
252 printk("dpti: If you have a lot of devices this could take a few minutes.\n");
253 for (pHba = hba_chain; pHba; pHba = next) {
254 next = pHba->next;
255 printk(KERN_INFO"%s: Reading the hardware resource table.\n", pHba->name);
256 if (adpt_i2o_lct_get(pHba) < 0){
257 adpt_i2o_delete_hba(pHba);
258 continue;
261 if (adpt_i2o_parse_lct(pHba) < 0){
262 adpt_i2o_delete_hba(pHba);
263 continue;
265 adpt_inquiry(pHba);
268 adpt_sysfs_class = class_create(THIS_MODULE, "dpt_i2o");
269 if (IS_ERR(adpt_sysfs_class)) {
270 printk(KERN_WARNING"dpti: unable to create dpt_i2o class\n");
271 adpt_sysfs_class = NULL;
274 for (pHba = hba_chain; pHba; pHba = next) {
275 next = pHba->next;
276 if (adpt_scsi_host_alloc(pHba, sht) < 0){
277 adpt_i2o_delete_hba(pHba);
278 continue;
280 pHba->initialized = TRUE;
281 pHba->state &= ~DPTI_STATE_RESET;
282 if (adpt_sysfs_class) {
283 struct device *dev = device_create(adpt_sysfs_class,
284 NULL, MKDEV(DPTI_I2O_MAJOR, pHba->unit), NULL,
285 "dpti%d", pHba->unit);
286 if (IS_ERR(dev)) {
287 printk(KERN_WARNING"dpti%d: unable to "
288 "create device in dpt_i2o class\n",
289 pHba->unit);
294 // Register our control device node
295 // nodes will need to be created in /dev to access this
296 // the nodes can not be created from within the driver
297 if (hba_count && register_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER, &adpt_fops)) {
298 adpt_i2o_sys_shutdown();
299 return 0;
301 return hba_count;
305 static void adpt_release(adpt_hba *pHba)
307 struct Scsi_Host *shost = pHba->host;
309 scsi_remove_host(shost);
310 // adpt_i2o_quiesce_hba(pHba);
311 adpt_i2o_delete_hba(pHba);
312 scsi_host_put(shost);
316 static void adpt_inquiry(adpt_hba* pHba)
318 u32 msg[17];
319 u32 *mptr;
320 u32 *lenptr;
321 int direction;
322 int scsidir;
323 u32 len;
324 u32 reqlen;
325 u8* buf;
326 dma_addr_t addr;
327 u8 scb[16];
328 s32 rcode;
330 memset(msg, 0, sizeof(msg));
331 buf = dma_alloc_coherent(&pHba->pDev->dev, 80, &addr, GFP_KERNEL);
332 if(!buf){
333 printk(KERN_ERR"%s: Could not allocate buffer\n",pHba->name);
334 return;
336 memset((void*)buf, 0, 36);
338 len = 36;
339 direction = 0x00000000;
340 scsidir =0x40000000; // DATA IN (iop<--dev)
342 if (dpt_dma64(pHba))
343 reqlen = 17; // SINGLE SGE, 64 bit
344 else
345 reqlen = 14; // SINGLE SGE, 32 bit
346 /* Stick the headers on */
347 msg[0] = reqlen<<16 | SGL_OFFSET_12;
348 msg[1] = (0xff<<24|HOST_TID<<12|ADAPTER_TID);
349 msg[2] = 0;
350 msg[3] = 0;
351 // Adaptec/DPT Private stuff
352 msg[4] = I2O_CMD_SCSI_EXEC|DPT_ORGANIZATION_ID<<16;
353 msg[5] = ADAPTER_TID | 1<<16 /* Interpret*/;
354 /* Direction, disconnect ok | sense data | simple queue , CDBLen */
355 // I2O_SCB_FLAG_ENABLE_DISCONNECT |
356 // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
357 // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
358 msg[6] = scsidir|0x20a00000| 6 /* cmd len*/;
360 mptr=msg+7;
362 memset(scb, 0, sizeof(scb));
363 // Write SCSI command into the message - always 16 byte block
364 scb[0] = INQUIRY;
365 scb[1] = 0;
366 scb[2] = 0;
367 scb[3] = 0;
368 scb[4] = 36;
369 scb[5] = 0;
370 // Don't care about the rest of scb
372 memcpy(mptr, scb, sizeof(scb));
373 mptr+=4;
374 lenptr=mptr++; /* Remember me - fill in when we know */
376 /* Now fill in the SGList and command */
377 *lenptr = len;
378 if (dpt_dma64(pHba)) {
379 *mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
380 *mptr++ = 1 << PAGE_SHIFT;
381 *mptr++ = 0xD0000000|direction|len;
382 *mptr++ = dma_low(addr);
383 *mptr++ = dma_high(addr);
384 } else {
385 *mptr++ = 0xD0000000|direction|len;
386 *mptr++ = addr;
389 // Send it on it's way
390 rcode = adpt_i2o_post_wait(pHba, msg, reqlen<<2, 120);
391 if (rcode != 0) {
392 sprintf(pHba->detail, "Adaptec I2O RAID");
393 printk(KERN_INFO "%s: Inquiry Error (%d)\n",pHba->name,rcode);
394 if (rcode != -ETIME && rcode != -EINTR)
395 dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
396 } else {
397 memset(pHba->detail, 0, sizeof(pHba->detail));
398 memcpy(&(pHba->detail), "Vendor: Adaptec ", 16);
399 memcpy(&(pHba->detail[16]), " Model: ", 8);
400 memcpy(&(pHba->detail[24]), (u8*) &buf[16], 16);
401 memcpy(&(pHba->detail[40]), " FW: ", 4);
402 memcpy(&(pHba->detail[44]), (u8*) &buf[32], 4);
403 pHba->detail[48] = '\0'; /* precautionary */
404 dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
406 adpt_i2o_status_get(pHba);
407 return ;
411 static int adpt_slave_configure(struct scsi_device * device)
413 struct Scsi_Host *host = device->host;
414 adpt_hba* pHba;
416 pHba = (adpt_hba *) host->hostdata[0];
418 if (host->can_queue && device->tagged_supported) {
419 scsi_change_queue_depth(device,
420 host->can_queue - 1);
422 return 0;
425 static int adpt_queue_lck(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *))
427 adpt_hba* pHba = NULL;
428 struct adpt_device* pDev = NULL; /* dpt per device information */
430 cmd->scsi_done = done;
432 * SCSI REQUEST_SENSE commands will be executed automatically by the
433 * Host Adapter for any errors, so they should not be executed
434 * explicitly unless the Sense Data is zero indicating that no error
435 * occurred.
438 if ((cmd->cmnd[0] == REQUEST_SENSE) && (cmd->sense_buffer[0] != 0)) {
439 cmd->result = (DID_OK << 16);
440 cmd->scsi_done(cmd);
441 return 0;
444 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
445 if (!pHba) {
446 return FAILED;
449 rmb();
450 if ((pHba->state) & DPTI_STATE_RESET)
451 return SCSI_MLQUEUE_HOST_BUSY;
453 // TODO if the cmd->device if offline then I may need to issue a bus rescan
454 // followed by a get_lct to see if the device is there anymore
455 if((pDev = (struct adpt_device*) (cmd->device->hostdata)) == NULL) {
457 * First command request for this device. Set up a pointer
458 * to the device structure. This should be a TEST_UNIT_READY
459 * command from scan_scsis_single.
461 if ((pDev = adpt_find_device(pHba, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun)) == NULL) {
462 // TODO: if any luns are at this bus, scsi id then fake a TEST_UNIT_READY and INQUIRY response
463 // with type 7F (for all luns less than the max for this bus,id) so the lun scan will continue.
464 cmd->result = (DID_NO_CONNECT << 16);
465 cmd->scsi_done(cmd);
466 return 0;
468 cmd->device->hostdata = pDev;
470 pDev->pScsi_dev = cmd->device;
473 * If we are being called from when the device is being reset,
474 * delay processing of the command until later.
476 if (pDev->state & DPTI_DEV_RESET ) {
477 return FAILED;
479 return adpt_scsi_to_i2o(pHba, cmd, pDev);
482 static DEF_SCSI_QCMD(adpt_queue)
484 static int adpt_bios_param(struct scsi_device *sdev, struct block_device *dev,
485 sector_t capacity, int geom[])
487 int heads=-1;
488 int sectors=-1;
489 int cylinders=-1;
491 // *** First lets set the default geometry ****
493 // If the capacity is less than ox2000
494 if (capacity < 0x2000 ) { // floppy
495 heads = 18;
496 sectors = 2;
498 // else if between 0x2000 and 0x20000
499 else if (capacity < 0x20000) {
500 heads = 64;
501 sectors = 32;
503 // else if between 0x20000 and 0x40000
504 else if (capacity < 0x40000) {
505 heads = 65;
506 sectors = 63;
508 // else if between 0x4000 and 0x80000
509 else if (capacity < 0x80000) {
510 heads = 128;
511 sectors = 63;
513 // else if greater than 0x80000
514 else {
515 heads = 255;
516 sectors = 63;
518 cylinders = sector_div(capacity, heads * sectors);
520 // Special case if CDROM
521 if(sdev->type == 5) { // CDROM
522 heads = 252;
523 sectors = 63;
524 cylinders = 1111;
527 geom[0] = heads;
528 geom[1] = sectors;
529 geom[2] = cylinders;
531 PDEBUG("adpt_bios_param: exit\n");
532 return 0;
536 static const char *adpt_info(struct Scsi_Host *host)
538 adpt_hba* pHba;
540 pHba = (adpt_hba *) host->hostdata[0];
541 return (char *) (pHba->detail);
544 static int adpt_show_info(struct seq_file *m, struct Scsi_Host *host)
546 struct adpt_device* d;
547 int id;
548 int chan;
549 adpt_hba* pHba;
550 int unit;
552 // Find HBA (host bus adapter) we are looking for
553 mutex_lock(&adpt_configuration_lock);
554 for (pHba = hba_chain; pHba; pHba = pHba->next) {
555 if (pHba->host == host) {
556 break; /* found adapter */
559 mutex_unlock(&adpt_configuration_lock);
560 if (pHba == NULL) {
561 return 0;
563 host = pHba->host;
565 seq_printf(m, "Adaptec I2O RAID Driver Version: %s\n\n", DPT_I2O_VERSION);
566 seq_printf(m, "%s\n", pHba->detail);
567 seq_printf(m, "SCSI Host=scsi%d Control Node=/dev/%s irq=%d\n",
568 pHba->host->host_no, pHba->name, host->irq);
569 seq_printf(m, "\tpost fifo size = %d\n\treply fifo size = %d\n\tsg table size = %d\n\n",
570 host->can_queue, (int) pHba->reply_fifo_size , host->sg_tablesize);
572 seq_puts(m, "Devices:\n");
573 for(chan = 0; chan < MAX_CHANNEL; chan++) {
574 for(id = 0; id < MAX_ID; id++) {
575 d = pHba->channel[chan].device[id];
576 while(d) {
577 seq_printf(m,"\t%-24.24s", d->pScsi_dev->vendor);
578 seq_printf(m," Rev: %-8.8s\n", d->pScsi_dev->rev);
580 unit = d->pI2o_dev->lct_data.tid;
581 seq_printf(m, "\tTID=%d, (Channel=%d, Target=%d, Lun=%llu) (%s)\n\n",
582 unit, (int)d->scsi_channel, (int)d->scsi_id, d->scsi_lun,
583 scsi_device_online(d->pScsi_dev)? "online":"offline");
584 d = d->next_lun;
588 return 0;
592 * Turn a struct scsi_cmnd * into a unique 32 bit 'context'.
594 static u32 adpt_cmd_to_context(struct scsi_cmnd *cmd)
596 return (u32)cmd->serial_number;
600 * Go from a u32 'context' to a struct scsi_cmnd * .
601 * This could probably be made more efficient.
603 static struct scsi_cmnd *
604 adpt_cmd_from_context(adpt_hba * pHba, u32 context)
606 struct scsi_cmnd * cmd;
607 struct scsi_device * d;
609 if (context == 0)
610 return NULL;
612 spin_unlock(pHba->host->host_lock);
613 shost_for_each_device(d, pHba->host) {
614 unsigned long flags;
615 spin_lock_irqsave(&d->list_lock, flags);
616 list_for_each_entry(cmd, &d->cmd_list, list) {
617 if (((u32)cmd->serial_number == context)) {
618 spin_unlock_irqrestore(&d->list_lock, flags);
619 scsi_device_put(d);
620 spin_lock(pHba->host->host_lock);
621 return cmd;
624 spin_unlock_irqrestore(&d->list_lock, flags);
626 spin_lock(pHba->host->host_lock);
628 return NULL;
632 * Turn a pointer to ioctl reply data into an u32 'context'
634 static u32 adpt_ioctl_to_context(adpt_hba * pHba, void *reply)
636 #if BITS_PER_LONG == 32
637 return (u32)(unsigned long)reply;
638 #else
639 ulong flags = 0;
640 u32 nr, i;
642 spin_lock_irqsave(pHba->host->host_lock, flags);
643 nr = ARRAY_SIZE(pHba->ioctl_reply_context);
644 for (i = 0; i < nr; i++) {
645 if (pHba->ioctl_reply_context[i] == NULL) {
646 pHba->ioctl_reply_context[i] = reply;
647 break;
650 spin_unlock_irqrestore(pHba->host->host_lock, flags);
651 if (i >= nr) {
652 printk(KERN_WARNING"%s: Too many outstanding "
653 "ioctl commands\n", pHba->name);
654 return (u32)-1;
657 return i;
658 #endif
662 * Go from an u32 'context' to a pointer to ioctl reply data.
664 static void *adpt_ioctl_from_context(adpt_hba *pHba, u32 context)
666 #if BITS_PER_LONG == 32
667 return (void *)(unsigned long)context;
668 #else
669 void *p = pHba->ioctl_reply_context[context];
670 pHba->ioctl_reply_context[context] = NULL;
672 return p;
673 #endif
676 /*===========================================================================
677 * Error Handling routines
678 *===========================================================================
681 static int adpt_abort(struct scsi_cmnd * cmd)
683 adpt_hba* pHba = NULL; /* host bus adapter structure */
684 struct adpt_device* dptdevice; /* dpt per device information */
685 u32 msg[5];
686 int rcode;
688 if(cmd->serial_number == 0){
689 return FAILED;
691 pHba = (adpt_hba*) cmd->device->host->hostdata[0];
692 printk(KERN_INFO"%s: Trying to Abort\n",pHba->name);
693 if ((dptdevice = (void*) (cmd->device->hostdata)) == NULL) {
694 printk(KERN_ERR "%s: Unable to abort: No device in cmnd\n",pHba->name);
695 return FAILED;
698 memset(msg, 0, sizeof(msg));
699 msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
700 msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|dptdevice->tid;
701 msg[2] = 0;
702 msg[3]= 0;
703 msg[4] = adpt_cmd_to_context(cmd);
704 if (pHba->host)
705 spin_lock_irq(pHba->host->host_lock);
706 rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER);
707 if (pHba->host)
708 spin_unlock_irq(pHba->host->host_lock);
709 if (rcode != 0) {
710 if(rcode == -EOPNOTSUPP ){
711 printk(KERN_INFO"%s: Abort cmd not supported\n",pHba->name);
712 return FAILED;
714 printk(KERN_INFO"%s: Abort failed.\n",pHba->name);
715 return FAILED;
717 printk(KERN_INFO"%s: Abort complete.\n",pHba->name);
718 return SUCCESS;
722 #define I2O_DEVICE_RESET 0x27
723 // This is the same for BLK and SCSI devices
724 // NOTE this is wrong in the i2o.h definitions
725 // This is not currently supported by our adapter but we issue it anyway
726 static int adpt_device_reset(struct scsi_cmnd* cmd)
728 adpt_hba* pHba;
729 u32 msg[4];
730 u32 rcode;
731 int old_state;
732 struct adpt_device* d = cmd->device->hostdata;
734 pHba = (void*) cmd->device->host->hostdata[0];
735 printk(KERN_INFO"%s: Trying to reset device\n",pHba->name);
736 if (!d) {
737 printk(KERN_INFO"%s: Reset Device: Device Not found\n",pHba->name);
738 return FAILED;
740 memset(msg, 0, sizeof(msg));
741 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
742 msg[1] = (I2O_DEVICE_RESET<<24|HOST_TID<<12|d->tid);
743 msg[2] = 0;
744 msg[3] = 0;
746 if (pHba->host)
747 spin_lock_irq(pHba->host->host_lock);
748 old_state = d->state;
749 d->state |= DPTI_DEV_RESET;
750 rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
751 d->state = old_state;
752 if (pHba->host)
753 spin_unlock_irq(pHba->host->host_lock);
754 if (rcode != 0) {
755 if(rcode == -EOPNOTSUPP ){
756 printk(KERN_INFO"%s: Device reset not supported\n",pHba->name);
757 return FAILED;
759 printk(KERN_INFO"%s: Device reset failed\n",pHba->name);
760 return FAILED;
761 } else {
762 printk(KERN_INFO"%s: Device reset successful\n",pHba->name);
763 return SUCCESS;
768 #define I2O_HBA_BUS_RESET 0x87
769 // This version of bus reset is called by the eh_error handler
770 static int adpt_bus_reset(struct scsi_cmnd* cmd)
772 adpt_hba* pHba;
773 u32 msg[4];
774 u32 rcode;
776 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
777 memset(msg, 0, sizeof(msg));
778 printk(KERN_WARNING"%s: Bus reset: SCSI Bus %d: tid: %d\n",pHba->name, cmd->device->channel,pHba->channel[cmd->device->channel].tid );
779 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
780 msg[1] = (I2O_HBA_BUS_RESET<<24|HOST_TID<<12|pHba->channel[cmd->device->channel].tid);
781 msg[2] = 0;
782 msg[3] = 0;
783 if (pHba->host)
784 spin_lock_irq(pHba->host->host_lock);
785 rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
786 if (pHba->host)
787 spin_unlock_irq(pHba->host->host_lock);
788 if (rcode != 0) {
789 printk(KERN_WARNING"%s: Bus reset failed.\n",pHba->name);
790 return FAILED;
791 } else {
792 printk(KERN_WARNING"%s: Bus reset success.\n",pHba->name);
793 return SUCCESS;
797 // This version of reset is called by the eh_error_handler
798 static int __adpt_reset(struct scsi_cmnd* cmd)
800 adpt_hba* pHba;
801 int rcode;
802 char name[32];
804 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
805 strncpy(name, pHba->name, sizeof(name));
806 printk(KERN_WARNING"%s: Hba Reset: scsi id %d: tid: %d\n", name, cmd->device->channel, pHba->channel[cmd->device->channel].tid);
807 rcode = adpt_hba_reset(pHba);
808 if(rcode == 0){
809 printk(KERN_WARNING"%s: HBA reset complete\n", name);
810 return SUCCESS;
811 } else {
812 printk(KERN_WARNING"%s: HBA reset failed (%x)\n", name, rcode);
813 return FAILED;
817 static int adpt_reset(struct scsi_cmnd* cmd)
819 int rc;
821 spin_lock_irq(cmd->device->host->host_lock);
822 rc = __adpt_reset(cmd);
823 spin_unlock_irq(cmd->device->host->host_lock);
825 return rc;
828 // This version of reset is called by the ioctls and indirectly from eh_error_handler via adpt_reset
829 static int adpt_hba_reset(adpt_hba* pHba)
831 int rcode;
833 pHba->state |= DPTI_STATE_RESET;
835 // Activate does get status , init outbound, and get hrt
836 if ((rcode=adpt_i2o_activate_hba(pHba)) < 0) {
837 printk(KERN_ERR "%s: Could not activate\n", pHba->name);
838 adpt_i2o_delete_hba(pHba);
839 return rcode;
842 if ((rcode=adpt_i2o_build_sys_table()) < 0) {
843 adpt_i2o_delete_hba(pHba);
844 return rcode;
846 PDEBUG("%s: in HOLD state\n",pHba->name);
848 if ((rcode=adpt_i2o_online_hba(pHba)) < 0) {
849 adpt_i2o_delete_hba(pHba);
850 return rcode;
852 PDEBUG("%s: in OPERATIONAL state\n",pHba->name);
854 if ((rcode=adpt_i2o_lct_get(pHba)) < 0){
855 adpt_i2o_delete_hba(pHba);
856 return rcode;
859 if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0){
860 adpt_i2o_delete_hba(pHba);
861 return rcode;
863 pHba->state &= ~DPTI_STATE_RESET;
865 adpt_fail_posted_scbs(pHba);
866 return 0; /* return success */
869 /*===========================================================================
871 *===========================================================================
875 static void adpt_i2o_sys_shutdown(void)
877 adpt_hba *pHba, *pNext;
878 struct adpt_i2o_post_wait_data *p1, *old;
880 printk(KERN_INFO"Shutting down Adaptec I2O controllers.\n");
881 printk(KERN_INFO" This could take a few minutes if there are many devices attached\n");
882 /* Delete all IOPs from the controller chain */
883 /* They should have already been released by the
884 * scsi-core
886 for (pHba = hba_chain; pHba; pHba = pNext) {
887 pNext = pHba->next;
888 adpt_i2o_delete_hba(pHba);
891 /* Remove any timedout entries from the wait queue. */
892 // spin_lock_irqsave(&adpt_post_wait_lock, flags);
893 /* Nothing should be outstanding at this point so just
894 * free them
896 for(p1 = adpt_post_wait_queue; p1;) {
897 old = p1;
898 p1 = p1->next;
899 kfree(old);
901 // spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
902 adpt_post_wait_queue = NULL;
904 printk(KERN_INFO "Adaptec I2O controllers down.\n");
907 static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev)
910 adpt_hba* pHba = NULL;
911 adpt_hba* p = NULL;
912 ulong base_addr0_phys = 0;
913 ulong base_addr1_phys = 0;
914 u32 hba_map0_area_size = 0;
915 u32 hba_map1_area_size = 0;
916 void __iomem *base_addr_virt = NULL;
917 void __iomem *msg_addr_virt = NULL;
918 int dma64 = 0;
920 int raptorFlag = FALSE;
922 if(pci_enable_device(pDev)) {
923 return -EINVAL;
926 if (pci_request_regions(pDev, "dpt_i2o")) {
927 PERROR("dpti: adpt_config_hba: pci request region failed\n");
928 return -EINVAL;
931 pci_set_master(pDev);
934 * See if we should enable dma64 mode.
936 if (sizeof(dma_addr_t) > 4 &&
937 dma_get_required_mask(&pDev->dev) > DMA_BIT_MASK(32) &&
938 dma_set_mask(&pDev->dev, DMA_BIT_MASK(64)) == 0)
939 dma64 = 1;
941 if (!dma64 && dma_set_mask(&pDev->dev, DMA_BIT_MASK(32)) != 0)
942 return -EINVAL;
944 /* adapter only supports message blocks below 4GB */
945 dma_set_coherent_mask(&pDev->dev, DMA_BIT_MASK(32));
947 base_addr0_phys = pci_resource_start(pDev,0);
948 hba_map0_area_size = pci_resource_len(pDev,0);
950 // Check if standard PCI card or single BAR Raptor
951 if(pDev->device == PCI_DPT_DEVICE_ID){
952 if(pDev->subsystem_device >=0xc032 && pDev->subsystem_device <= 0xc03b){
953 // Raptor card with this device id needs 4M
954 hba_map0_area_size = 0x400000;
955 } else { // Not Raptor - it is a PCI card
956 if(hba_map0_area_size > 0x100000 ){
957 hba_map0_area_size = 0x100000;
960 } else {// Raptor split BAR config
961 // Use BAR1 in this configuration
962 base_addr1_phys = pci_resource_start(pDev,1);
963 hba_map1_area_size = pci_resource_len(pDev,1);
964 raptorFlag = TRUE;
967 #if BITS_PER_LONG == 64
969 * The original Adaptec 64 bit driver has this comment here:
970 * "x86_64 machines need more optimal mappings"
972 * I assume some HBAs report ridiculously large mappings
973 * and we need to limit them on platforms with IOMMUs.
975 if (raptorFlag == TRUE) {
976 if (hba_map0_area_size > 128)
977 hba_map0_area_size = 128;
978 if (hba_map1_area_size > 524288)
979 hba_map1_area_size = 524288;
980 } else {
981 if (hba_map0_area_size > 524288)
982 hba_map0_area_size = 524288;
984 #endif
986 base_addr_virt = ioremap(base_addr0_phys,hba_map0_area_size);
987 if (!base_addr_virt) {
988 pci_release_regions(pDev);
989 PERROR("dpti: adpt_config_hba: io remap failed\n");
990 return -EINVAL;
993 if(raptorFlag == TRUE) {
994 msg_addr_virt = ioremap(base_addr1_phys, hba_map1_area_size );
995 if (!msg_addr_virt) {
996 PERROR("dpti: adpt_config_hba: io remap failed on BAR1\n");
997 iounmap(base_addr_virt);
998 pci_release_regions(pDev);
999 return -EINVAL;
1001 } else {
1002 msg_addr_virt = base_addr_virt;
1005 // Allocate and zero the data structure
1006 pHba = kzalloc(sizeof(adpt_hba), GFP_KERNEL);
1007 if (!pHba) {
1008 if (msg_addr_virt != base_addr_virt)
1009 iounmap(msg_addr_virt);
1010 iounmap(base_addr_virt);
1011 pci_release_regions(pDev);
1012 return -ENOMEM;
1015 mutex_lock(&adpt_configuration_lock);
1017 if(hba_chain != NULL){
1018 for(p = hba_chain; p->next; p = p->next);
1019 p->next = pHba;
1020 } else {
1021 hba_chain = pHba;
1023 pHba->next = NULL;
1024 pHba->unit = hba_count;
1025 sprintf(pHba->name, "dpti%d", hba_count);
1026 hba_count++;
1028 mutex_unlock(&adpt_configuration_lock);
1030 pHba->pDev = pDev;
1031 pHba->base_addr_phys = base_addr0_phys;
1033 // Set up the Virtual Base Address of the I2O Device
1034 pHba->base_addr_virt = base_addr_virt;
1035 pHba->msg_addr_virt = msg_addr_virt;
1036 pHba->irq_mask = base_addr_virt+0x30;
1037 pHba->post_port = base_addr_virt+0x40;
1038 pHba->reply_port = base_addr_virt+0x44;
1040 pHba->hrt = NULL;
1041 pHba->lct = NULL;
1042 pHba->lct_size = 0;
1043 pHba->status_block = NULL;
1044 pHba->post_count = 0;
1045 pHba->state = DPTI_STATE_RESET;
1046 pHba->pDev = pDev;
1047 pHba->devices = NULL;
1048 pHba->dma64 = dma64;
1050 // Initializing the spinlocks
1051 spin_lock_init(&pHba->state_lock);
1052 spin_lock_init(&adpt_post_wait_lock);
1054 if(raptorFlag == 0){
1055 printk(KERN_INFO "Adaptec I2O RAID controller"
1056 " %d at %p size=%x irq=%d%s\n",
1057 hba_count-1, base_addr_virt,
1058 hba_map0_area_size, pDev->irq,
1059 dma64 ? " (64-bit DMA)" : "");
1060 } else {
1061 printk(KERN_INFO"Adaptec I2O RAID controller %d irq=%d%s\n",
1062 hba_count-1, pDev->irq,
1063 dma64 ? " (64-bit DMA)" : "");
1064 printk(KERN_INFO" BAR0 %p - size= %x\n",base_addr_virt,hba_map0_area_size);
1065 printk(KERN_INFO" BAR1 %p - size= %x\n",msg_addr_virt,hba_map1_area_size);
1068 if (request_irq (pDev->irq, adpt_isr, IRQF_SHARED, pHba->name, pHba)) {
1069 printk(KERN_ERR"%s: Couldn't register IRQ %d\n", pHba->name, pDev->irq);
1070 adpt_i2o_delete_hba(pHba);
1071 return -EINVAL;
1074 return 0;
1078 static void adpt_i2o_delete_hba(adpt_hba* pHba)
1080 adpt_hba* p1;
1081 adpt_hba* p2;
1082 struct i2o_device* d;
1083 struct i2o_device* next;
1084 int i;
1085 int j;
1086 struct adpt_device* pDev;
1087 struct adpt_device* pNext;
1090 mutex_lock(&adpt_configuration_lock);
1091 if(pHba->host){
1092 free_irq(pHba->host->irq, pHba);
1094 p2 = NULL;
1095 for( p1 = hba_chain; p1; p2 = p1,p1=p1->next){
1096 if(p1 == pHba) {
1097 if(p2) {
1098 p2->next = p1->next;
1099 } else {
1100 hba_chain = p1->next;
1102 break;
1106 hba_count--;
1107 mutex_unlock(&adpt_configuration_lock);
1109 iounmap(pHba->base_addr_virt);
1110 pci_release_regions(pHba->pDev);
1111 if(pHba->msg_addr_virt != pHba->base_addr_virt){
1112 iounmap(pHba->msg_addr_virt);
1114 if(pHba->FwDebugBuffer_P)
1115 iounmap(pHba->FwDebugBuffer_P);
1116 if(pHba->hrt) {
1117 dma_free_coherent(&pHba->pDev->dev,
1118 pHba->hrt->num_entries * pHba->hrt->entry_len << 2,
1119 pHba->hrt, pHba->hrt_pa);
1121 if(pHba->lct) {
1122 dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
1123 pHba->lct, pHba->lct_pa);
1125 if(pHba->status_block) {
1126 dma_free_coherent(&pHba->pDev->dev, sizeof(i2o_status_block),
1127 pHba->status_block, pHba->status_block_pa);
1129 if(pHba->reply_pool) {
1130 dma_free_coherent(&pHba->pDev->dev,
1131 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
1132 pHba->reply_pool, pHba->reply_pool_pa);
1135 for(d = pHba->devices; d ; d = next){
1136 next = d->next;
1137 kfree(d);
1139 for(i = 0 ; i < pHba->top_scsi_channel ; i++){
1140 for(j = 0; j < MAX_ID; j++){
1141 if(pHba->channel[i].device[j] != NULL){
1142 for(pDev = pHba->channel[i].device[j]; pDev; pDev = pNext){
1143 pNext = pDev->next_lun;
1144 kfree(pDev);
1149 pci_dev_put(pHba->pDev);
1150 if (adpt_sysfs_class)
1151 device_destroy(adpt_sysfs_class,
1152 MKDEV(DPTI_I2O_MAJOR, pHba->unit));
1153 kfree(pHba);
1155 if(hba_count <= 0){
1156 unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER);
1157 if (adpt_sysfs_class) {
1158 class_destroy(adpt_sysfs_class);
1159 adpt_sysfs_class = NULL;
1164 static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u64 lun)
1166 struct adpt_device* d;
1168 if(chan < 0 || chan >= MAX_CHANNEL)
1169 return NULL;
1171 d = pHba->channel[chan].device[id];
1172 if(!d || d->tid == 0) {
1173 return NULL;
1176 /* If it is the only lun at that address then this should match*/
1177 if(d->scsi_lun == lun){
1178 return d;
1181 /* else we need to look through all the luns */
1182 for(d=d->next_lun ; d ; d = d->next_lun){
1183 if(d->scsi_lun == lun){
1184 return d;
1187 return NULL;
1191 static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout)
1193 // I used my own version of the WAIT_QUEUE_HEAD
1194 // to handle some version differences
1195 // When embedded in the kernel this could go back to the vanilla one
1196 ADPT_DECLARE_WAIT_QUEUE_HEAD(adpt_wq_i2o_post);
1197 int status = 0;
1198 ulong flags = 0;
1199 struct adpt_i2o_post_wait_data *p1, *p2;
1200 struct adpt_i2o_post_wait_data *wait_data =
1201 kmalloc(sizeof(struct adpt_i2o_post_wait_data), GFP_ATOMIC);
1202 DECLARE_WAITQUEUE(wait, current);
1204 if (!wait_data)
1205 return -ENOMEM;
1208 * The spin locking is needed to keep anyone from playing
1209 * with the queue pointers and id while we do the same
1211 spin_lock_irqsave(&adpt_post_wait_lock, flags);
1212 // TODO we need a MORE unique way of getting ids
1213 // to support async LCT get
1214 wait_data->next = adpt_post_wait_queue;
1215 adpt_post_wait_queue = wait_data;
1216 adpt_post_wait_id++;
1217 adpt_post_wait_id &= 0x7fff;
1218 wait_data->id = adpt_post_wait_id;
1219 spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1221 wait_data->wq = &adpt_wq_i2o_post;
1222 wait_data->status = -ETIMEDOUT;
1224 add_wait_queue(&adpt_wq_i2o_post, &wait);
1226 msg[2] |= 0x80000000 | ((u32)wait_data->id);
1227 timeout *= HZ;
1228 if((status = adpt_i2o_post_this(pHba, msg, len)) == 0){
1229 set_current_state(TASK_INTERRUPTIBLE);
1230 if(pHba->host)
1231 spin_unlock_irq(pHba->host->host_lock);
1232 if (!timeout)
1233 schedule();
1234 else{
1235 timeout = schedule_timeout(timeout);
1236 if (timeout == 0) {
1237 // I/O issued, but cannot get result in
1238 // specified time. Freeing resorces is
1239 // dangerous.
1240 status = -ETIME;
1243 if(pHba->host)
1244 spin_lock_irq(pHba->host->host_lock);
1246 remove_wait_queue(&adpt_wq_i2o_post, &wait);
1248 if(status == -ETIMEDOUT){
1249 printk(KERN_INFO"dpti%d: POST WAIT TIMEOUT\n",pHba->unit);
1250 // We will have to free the wait_data memory during shutdown
1251 return status;
1254 /* Remove the entry from the queue. */
1255 p2 = NULL;
1256 spin_lock_irqsave(&adpt_post_wait_lock, flags);
1257 for(p1 = adpt_post_wait_queue; p1; p2 = p1, p1 = p1->next) {
1258 if(p1 == wait_data) {
1259 if(p1->status == I2O_DETAIL_STATUS_UNSUPPORTED_FUNCTION ) {
1260 status = -EOPNOTSUPP;
1262 if(p2) {
1263 p2->next = p1->next;
1264 } else {
1265 adpt_post_wait_queue = p1->next;
1267 break;
1270 spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1272 kfree(wait_data);
1274 return status;
1278 static s32 adpt_i2o_post_this(adpt_hba* pHba, u32* data, int len)
1281 u32 m = EMPTY_QUEUE;
1282 u32 __iomem *msg;
1283 ulong timeout = jiffies + 30*HZ;
1284 do {
1285 rmb();
1286 m = readl(pHba->post_port);
1287 if (m != EMPTY_QUEUE) {
1288 break;
1290 if(time_after(jiffies,timeout)){
1291 printk(KERN_WARNING"dpti%d: Timeout waiting for message frame!\n", pHba->unit);
1292 return -ETIMEDOUT;
1294 schedule_timeout_uninterruptible(1);
1295 } while(m == EMPTY_QUEUE);
1297 msg = pHba->msg_addr_virt + m;
1298 memcpy_toio(msg, data, len);
1299 wmb();
1301 //post message
1302 writel(m, pHba->post_port);
1303 wmb();
1305 return 0;
1309 static void adpt_i2o_post_wait_complete(u32 context, int status)
1311 struct adpt_i2o_post_wait_data *p1 = NULL;
1313 * We need to search through the adpt_post_wait
1314 * queue to see if the given message is still
1315 * outstanding. If not, it means that the IOP
1316 * took longer to respond to the message than we
1317 * had allowed and timer has already expired.
1318 * Not much we can do about that except log
1319 * it for debug purposes, increase timeout, and recompile
1321 * Lock needed to keep anyone from moving queue pointers
1322 * around while we're looking through them.
1325 context &= 0x7fff;
1327 spin_lock(&adpt_post_wait_lock);
1328 for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1329 if(p1->id == context) {
1330 p1->status = status;
1331 spin_unlock(&adpt_post_wait_lock);
1332 wake_up_interruptible(p1->wq);
1333 return;
1336 spin_unlock(&adpt_post_wait_lock);
1337 // If this happens we lose commands that probably really completed
1338 printk(KERN_DEBUG"dpti: Could Not find task %d in wait queue\n",context);
1339 printk(KERN_DEBUG" Tasks in wait queue:\n");
1340 for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1341 printk(KERN_DEBUG" %d\n",p1->id);
1343 return;
1346 static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
1348 u32 msg[8];
1349 u8* status;
1350 dma_addr_t addr;
1351 u32 m = EMPTY_QUEUE ;
1352 ulong timeout = jiffies + (TMOUT_IOPRESET*HZ);
1354 if(pHba->initialized == FALSE) { // First time reset should be quick
1355 timeout = jiffies + (25*HZ);
1356 } else {
1357 adpt_i2o_quiesce_hba(pHba);
1360 do {
1361 rmb();
1362 m = readl(pHba->post_port);
1363 if (m != EMPTY_QUEUE) {
1364 break;
1366 if(time_after(jiffies,timeout)){
1367 printk(KERN_WARNING"Timeout waiting for message!\n");
1368 return -ETIMEDOUT;
1370 schedule_timeout_uninterruptible(1);
1371 } while (m == EMPTY_QUEUE);
1373 status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
1374 if(status == NULL) {
1375 adpt_send_nop(pHba, m);
1376 printk(KERN_ERR"IOP reset failed - no free memory.\n");
1377 return -ENOMEM;
1379 memset(status,0,4);
1381 msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0;
1382 msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID;
1383 msg[2]=0;
1384 msg[3]=0;
1385 msg[4]=0;
1386 msg[5]=0;
1387 msg[6]=dma_low(addr);
1388 msg[7]=dma_high(addr);
1390 memcpy_toio(pHba->msg_addr_virt+m, msg, sizeof(msg));
1391 wmb();
1392 writel(m, pHba->post_port);
1393 wmb();
1395 while(*status == 0){
1396 if(time_after(jiffies,timeout)){
1397 printk(KERN_WARNING"%s: IOP Reset Timeout\n",pHba->name);
1398 /* We lose 4 bytes of "status" here, but we cannot
1399 free these because controller may awake and corrupt
1400 those bytes at any time */
1401 /* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
1402 return -ETIMEDOUT;
1404 rmb();
1405 schedule_timeout_uninterruptible(1);
1408 if(*status == 0x01 /*I2O_EXEC_IOP_RESET_IN_PROGRESS*/) {
1409 PDEBUG("%s: Reset in progress...\n", pHba->name);
1410 // Here we wait for message frame to become available
1411 // indicated that reset has finished
1412 do {
1413 rmb();
1414 m = readl(pHba->post_port);
1415 if (m != EMPTY_QUEUE) {
1416 break;
1418 if(time_after(jiffies,timeout)){
1419 printk(KERN_ERR "%s:Timeout waiting for IOP Reset.\n",pHba->name);
1420 /* We lose 4 bytes of "status" here, but we
1421 cannot free these because controller may
1422 awake and corrupt those bytes at any time */
1423 /* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
1424 return -ETIMEDOUT;
1426 schedule_timeout_uninterruptible(1);
1427 } while (m == EMPTY_QUEUE);
1428 // Flush the offset
1429 adpt_send_nop(pHba, m);
1431 adpt_i2o_status_get(pHba);
1432 if(*status == 0x02 ||
1433 pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
1434 printk(KERN_WARNING"%s: Reset reject, trying to clear\n",
1435 pHba->name);
1436 } else {
1437 PDEBUG("%s: Reset completed.\n", pHba->name);
1440 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
1441 #ifdef UARTDELAY
1442 // This delay is to allow someone attached to the card through the debug UART to
1443 // set up the dump levels that they want before the rest of the initialization sequence
1444 adpt_delay(20000);
1445 #endif
1446 return 0;
1450 static int adpt_i2o_parse_lct(adpt_hba* pHba)
1452 int i;
1453 int max;
1454 int tid;
1455 struct i2o_device *d;
1456 i2o_lct *lct = pHba->lct;
1457 u8 bus_no = 0;
1458 s16 scsi_id;
1459 u64 scsi_lun;
1460 u32 buf[10]; // larger than 7, or 8 ...
1461 struct adpt_device* pDev;
1463 if (lct == NULL) {
1464 printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
1465 return -1;
1468 max = lct->table_size;
1469 max -= 3;
1470 max /= 9;
1472 for(i=0;i<max;i++) {
1473 if( lct->lct_entry[i].user_tid != 0xfff){
1475 * If we have hidden devices, we need to inform the upper layers about
1476 * the possible maximum id reference to handle device access when
1477 * an array is disassembled. This code has no other purpose but to
1478 * allow us future access to devices that are currently hidden
1479 * behind arrays, hotspares or have not been configured (JBOD mode).
1481 if( lct->lct_entry[i].class_id != I2O_CLASS_RANDOM_BLOCK_STORAGE &&
1482 lct->lct_entry[i].class_id != I2O_CLASS_SCSI_PERIPHERAL &&
1483 lct->lct_entry[i].class_id != I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1484 continue;
1486 tid = lct->lct_entry[i].tid;
1487 // I2O_DPT_DEVICE_INFO_GROUP_NO;
1488 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
1489 continue;
1491 bus_no = buf[0]>>16;
1492 scsi_id = buf[1];
1493 scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
1494 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1495 printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no);
1496 continue;
1498 if (scsi_id >= MAX_ID){
1499 printk(KERN_WARNING"%s: SCSI ID %d out of range \n", pHba->name, bus_no);
1500 continue;
1502 if(bus_no > pHba->top_scsi_channel){
1503 pHba->top_scsi_channel = bus_no;
1505 if(scsi_id > pHba->top_scsi_id){
1506 pHba->top_scsi_id = scsi_id;
1508 if(scsi_lun > pHba->top_scsi_lun){
1509 pHba->top_scsi_lun = scsi_lun;
1511 continue;
1513 d = kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
1514 if(d==NULL)
1516 printk(KERN_CRIT"%s: Out of memory for I2O device data.\n",pHba->name);
1517 return -ENOMEM;
1520 d->controller = pHba;
1521 d->next = NULL;
1523 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
1525 d->flags = 0;
1526 tid = d->lct_data.tid;
1527 adpt_i2o_report_hba_unit(pHba, d);
1528 adpt_i2o_install_device(pHba, d);
1530 bus_no = 0;
1531 for(d = pHba->devices; d ; d = d->next) {
1532 if(d->lct_data.class_id == I2O_CLASS_BUS_ADAPTER_PORT ||
1533 d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PORT){
1534 tid = d->lct_data.tid;
1535 // TODO get the bus_no from hrt-but for now they are in order
1536 //bus_no =
1537 if(bus_no > pHba->top_scsi_channel){
1538 pHba->top_scsi_channel = bus_no;
1540 pHba->channel[bus_no].type = d->lct_data.class_id;
1541 pHba->channel[bus_no].tid = tid;
1542 if(adpt_i2o_query_scalar(pHba, tid, 0x0200, -1, buf, 28)>=0)
1544 pHba->channel[bus_no].scsi_id = buf[1];
1545 PDEBUG("Bus %d - SCSI ID %d.\n", bus_no, buf[1]);
1547 // TODO remove - this is just until we get from hrt
1548 bus_no++;
1549 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1550 printk(KERN_WARNING"%s: Channel number %d out of range - LCT\n", pHba->name, bus_no);
1551 break;
1556 // Setup adpt_device table
1557 for(d = pHba->devices; d ; d = d->next) {
1558 if(d->lct_data.class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
1559 d->lct_data.class_id == I2O_CLASS_SCSI_PERIPHERAL ||
1560 d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1562 tid = d->lct_data.tid;
1563 scsi_id = -1;
1564 // I2O_DPT_DEVICE_INFO_GROUP_NO;
1565 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)>=0) {
1566 bus_no = buf[0]>>16;
1567 scsi_id = buf[1];
1568 scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
1569 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1570 continue;
1572 if (scsi_id >= MAX_ID) {
1573 continue;
1575 if( pHba->channel[bus_no].device[scsi_id] == NULL){
1576 pDev = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1577 if(pDev == NULL) {
1578 return -ENOMEM;
1580 pHba->channel[bus_no].device[scsi_id] = pDev;
1581 } else {
1582 for( pDev = pHba->channel[bus_no].device[scsi_id];
1583 pDev->next_lun; pDev = pDev->next_lun){
1585 pDev->next_lun = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1586 if(pDev->next_lun == NULL) {
1587 return -ENOMEM;
1589 pDev = pDev->next_lun;
1591 pDev->tid = tid;
1592 pDev->scsi_channel = bus_no;
1593 pDev->scsi_id = scsi_id;
1594 pDev->scsi_lun = scsi_lun;
1595 pDev->pI2o_dev = d;
1596 d->owner = pDev;
1597 pDev->type = (buf[0])&0xff;
1598 pDev->flags = (buf[0]>>8)&0xff;
1599 if(scsi_id > pHba->top_scsi_id){
1600 pHba->top_scsi_id = scsi_id;
1602 if(scsi_lun > pHba->top_scsi_lun){
1603 pHba->top_scsi_lun = scsi_lun;
1606 if(scsi_id == -1){
1607 printk(KERN_WARNING"Could not find SCSI ID for %s\n",
1608 d->lct_data.identity_tag);
1612 return 0;
1617 * Each I2O controller has a chain of devices on it - these match
1618 * the useful parts of the LCT of the board.
1621 static int adpt_i2o_install_device(adpt_hba* pHba, struct i2o_device *d)
1623 mutex_lock(&adpt_configuration_lock);
1624 d->controller=pHba;
1625 d->owner=NULL;
1626 d->next=pHba->devices;
1627 d->prev=NULL;
1628 if (pHba->devices != NULL){
1629 pHba->devices->prev=d;
1631 pHba->devices=d;
1632 *d->dev_name = 0;
1634 mutex_unlock(&adpt_configuration_lock);
1635 return 0;
1638 static int adpt_open(struct inode *inode, struct file *file)
1640 int minor;
1641 adpt_hba* pHba;
1643 mutex_lock(&adpt_mutex);
1644 //TODO check for root access
1646 minor = iminor(inode);
1647 if (minor >= hba_count) {
1648 mutex_unlock(&adpt_mutex);
1649 return -ENXIO;
1651 mutex_lock(&adpt_configuration_lock);
1652 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1653 if (pHba->unit == minor) {
1654 break; /* found adapter */
1657 if (pHba == NULL) {
1658 mutex_unlock(&adpt_configuration_lock);
1659 mutex_unlock(&adpt_mutex);
1660 return -ENXIO;
1663 // if(pHba->in_use){
1664 // mutex_unlock(&adpt_configuration_lock);
1665 // return -EBUSY;
1666 // }
1668 pHba->in_use = 1;
1669 mutex_unlock(&adpt_configuration_lock);
1670 mutex_unlock(&adpt_mutex);
1672 return 0;
1675 static int adpt_close(struct inode *inode, struct file *file)
1677 int minor;
1678 adpt_hba* pHba;
1680 minor = iminor(inode);
1681 if (minor >= hba_count) {
1682 return -ENXIO;
1684 mutex_lock(&adpt_configuration_lock);
1685 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1686 if (pHba->unit == minor) {
1687 break; /* found adapter */
1690 mutex_unlock(&adpt_configuration_lock);
1691 if (pHba == NULL) {
1692 return -ENXIO;
1695 pHba->in_use = 0;
1697 return 0;
1701 static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1703 u32 msg[MAX_MESSAGE_SIZE];
1704 u32* reply = NULL;
1705 u32 size = 0;
1706 u32 reply_size = 0;
1707 u32 __user *user_msg = arg;
1708 u32 __user * user_reply = NULL;
1709 void **sg_list = NULL;
1710 u32 sg_offset = 0;
1711 u32 sg_count = 0;
1712 int sg_index = 0;
1713 u32 i = 0;
1714 u32 rcode = 0;
1715 void *p = NULL;
1716 dma_addr_t addr;
1717 ulong flags = 0;
1719 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1720 // get user msg size in u32s
1721 if(get_user(size, &user_msg[0])){
1722 return -EFAULT;
1724 size = size>>16;
1726 user_reply = &user_msg[size];
1727 if(size > MAX_MESSAGE_SIZE){
1728 return -EFAULT;
1730 size *= 4; // Convert to bytes
1732 /* Copy in the user's I2O command */
1733 if(copy_from_user(msg, user_msg, size)) {
1734 return -EFAULT;
1736 get_user(reply_size, &user_reply[0]);
1737 reply_size = reply_size>>16;
1738 if(reply_size > REPLY_FRAME_SIZE){
1739 reply_size = REPLY_FRAME_SIZE;
1741 reply_size *= 4;
1742 reply = kzalloc(REPLY_FRAME_SIZE*4, GFP_KERNEL);
1743 if(reply == NULL) {
1744 printk(KERN_WARNING"%s: Could not allocate reply buffer\n",pHba->name);
1745 return -ENOMEM;
1747 sg_offset = (msg[0]>>4)&0xf;
1748 msg[2] = 0x40000000; // IOCTL context
1749 msg[3] = adpt_ioctl_to_context(pHba, reply);
1750 if (msg[3] == (u32)-1) {
1751 rcode = -EBUSY;
1752 goto free;
1755 sg_list = kcalloc(pHba->sg_tablesize, sizeof(*sg_list), GFP_KERNEL);
1756 if (!sg_list) {
1757 rcode = -ENOMEM;
1758 goto free;
1760 if(sg_offset) {
1761 // TODO add 64 bit API
1762 struct sg_simple_element *sg = (struct sg_simple_element*) (msg+sg_offset);
1763 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1764 if (sg_count > pHba->sg_tablesize){
1765 printk(KERN_DEBUG"%s:IOCTL SG List too large (%u)\n", pHba->name,sg_count);
1766 rcode = -EINVAL;
1767 goto free;
1770 for(i = 0; i < sg_count; i++) {
1771 int sg_size;
1773 if (!(sg[i].flag_count & 0x10000000 /*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT*/)) {
1774 printk(KERN_DEBUG"%s:Bad SG element %d - not simple (%x)\n",pHba->name,i, sg[i].flag_count);
1775 rcode = -EINVAL;
1776 goto cleanup;
1778 sg_size = sg[i].flag_count & 0xffffff;
1779 /* Allocate memory for the transfer */
1780 p = dma_alloc_coherent(&pHba->pDev->dev, sg_size, &addr, GFP_KERNEL);
1781 if(!p) {
1782 printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
1783 pHba->name,sg_size,i,sg_count);
1784 rcode = -ENOMEM;
1785 goto cleanup;
1787 sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame.
1788 /* Copy in the user's SG buffer if necessary */
1789 if(sg[i].flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR*/) {
1790 // sg_simple_element API is 32 bit
1791 if (copy_from_user(p,(void __user *)(ulong)sg[i].addr_bus, sg_size)) {
1792 printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",pHba->name,i);
1793 rcode = -EFAULT;
1794 goto cleanup;
1797 /* sg_simple_element API is 32 bit, but addr < 4GB */
1798 sg[i].addr_bus = addr;
1802 do {
1804 * Stop any new commands from enterring the
1805 * controller while processing the ioctl
1807 if (pHba->host) {
1808 scsi_block_requests(pHba->host);
1809 spin_lock_irqsave(pHba->host->host_lock, flags);
1811 rcode = adpt_i2o_post_wait(pHba, msg, size, FOREVER);
1812 if (rcode != 0)
1813 printk("adpt_i2o_passthru: post wait failed %d %p\n",
1814 rcode, reply);
1815 if (pHba->host) {
1816 spin_unlock_irqrestore(pHba->host->host_lock, flags);
1817 scsi_unblock_requests(pHba->host);
1819 } while (rcode == -ETIMEDOUT);
1821 if(rcode){
1822 goto cleanup;
1825 if(sg_offset) {
1826 /* Copy back the Scatter Gather buffers back to user space */
1827 u32 j;
1828 // TODO add 64 bit API
1829 struct sg_simple_element* sg;
1830 int sg_size;
1832 // re-acquire the original message to handle correctly the sg copy operation
1833 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1834 // get user msg size in u32s
1835 if(get_user(size, &user_msg[0])){
1836 rcode = -EFAULT;
1837 goto cleanup;
1839 size = size>>16;
1840 size *= 4;
1841 if (size > MAX_MESSAGE_SIZE) {
1842 rcode = -EINVAL;
1843 goto cleanup;
1845 /* Copy in the user's I2O command */
1846 if (copy_from_user (msg, user_msg, size)) {
1847 rcode = -EFAULT;
1848 goto cleanup;
1850 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1852 // TODO add 64 bit API
1853 sg = (struct sg_simple_element*)(msg + sg_offset);
1854 for (j = 0; j < sg_count; j++) {
1855 /* Copy out the SG list to user's buffer if necessary */
1856 if(! (sg[j].flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR*/)) {
1857 sg_size = sg[j].flag_count & 0xffffff;
1858 // sg_simple_element API is 32 bit
1859 if (copy_to_user((void __user *)(ulong)sg[j].addr_bus,sg_list[j], sg_size)) {
1860 printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus);
1861 rcode = -EFAULT;
1862 goto cleanup;
1868 /* Copy back the reply to user space */
1869 if (reply_size) {
1870 // we wrote our own values for context - now restore the user supplied ones
1871 if(copy_from_user(reply+2, user_msg+2, sizeof(u32)*2)) {
1872 printk(KERN_WARNING"%s: Could not copy message context FROM user\n",pHba->name);
1873 rcode = -EFAULT;
1875 if(copy_to_user(user_reply, reply, reply_size)) {
1876 printk(KERN_WARNING"%s: Could not copy reply TO user\n",pHba->name);
1877 rcode = -EFAULT;
1882 cleanup:
1883 if (rcode != -ETIME && rcode != -EINTR) {
1884 struct sg_simple_element *sg =
1885 (struct sg_simple_element*) (msg +sg_offset);
1886 while(sg_index) {
1887 if(sg_list[--sg_index]) {
1888 dma_free_coherent(&pHba->pDev->dev,
1889 sg[sg_index].flag_count & 0xffffff,
1890 sg_list[sg_index],
1891 sg[sg_index].addr_bus);
1896 free:
1897 kfree(sg_list);
1898 kfree(reply);
1899 return rcode;
1902 #if defined __ia64__
1903 static void adpt_ia64_info(sysInfo_S* si)
1905 // This is all the info we need for now
1906 // We will add more info as our new
1907 // managmenent utility requires it
1908 si->processorType = PROC_IA64;
1910 #endif
1912 #if defined __sparc__
1913 static void adpt_sparc_info(sysInfo_S* si)
1915 // This is all the info we need for now
1916 // We will add more info as our new
1917 // managmenent utility requires it
1918 si->processorType = PROC_ULTRASPARC;
1920 #endif
1921 #if defined __alpha__
1922 static void adpt_alpha_info(sysInfo_S* si)
1924 // This is all the info we need for now
1925 // We will add more info as our new
1926 // managmenent utility requires it
1927 si->processorType = PROC_ALPHA;
1929 #endif
1931 #if defined __i386__
1933 #include <uapi/asm/vm86.h>
1935 static void adpt_i386_info(sysInfo_S* si)
1937 // This is all the info we need for now
1938 // We will add more info as our new
1939 // managmenent utility requires it
1940 switch (boot_cpu_data.x86) {
1941 case CPU_386:
1942 si->processorType = PROC_386;
1943 break;
1944 case CPU_486:
1945 si->processorType = PROC_486;
1946 break;
1947 case CPU_586:
1948 si->processorType = PROC_PENTIUM;
1949 break;
1950 default: // Just in case
1951 si->processorType = PROC_PENTIUM;
1952 break;
1955 #endif
1958 * This routine returns information about the system. This does not effect
1959 * any logic and if the info is wrong - it doesn't matter.
1962 /* Get all the info we can not get from kernel services */
1963 static int adpt_system_info(void __user *buffer)
1965 sysInfo_S si;
1967 memset(&si, 0, sizeof(si));
1969 si.osType = OS_LINUX;
1970 si.osMajorVersion = 0;
1971 si.osMinorVersion = 0;
1972 si.osRevision = 0;
1973 si.busType = SI_PCI_BUS;
1974 si.processorFamily = DPTI_sig.dsProcessorFamily;
1976 #if defined __i386__
1977 adpt_i386_info(&si);
1978 #elif defined (__ia64__)
1979 adpt_ia64_info(&si);
1980 #elif defined(__sparc__)
1981 adpt_sparc_info(&si);
1982 #elif defined (__alpha__)
1983 adpt_alpha_info(&si);
1984 #else
1985 si.processorType = 0xff ;
1986 #endif
1987 if (copy_to_user(buffer, &si, sizeof(si))){
1988 printk(KERN_WARNING"dpti: Could not copy buffer TO user\n");
1989 return -EFAULT;
1992 return 0;
1995 static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
1997 int minor;
1998 int error = 0;
1999 adpt_hba* pHba;
2000 ulong flags = 0;
2001 void __user *argp = (void __user *)arg;
2003 minor = iminor(inode);
2004 if (minor >= DPTI_MAX_HBA){
2005 return -ENXIO;
2007 mutex_lock(&adpt_configuration_lock);
2008 for (pHba = hba_chain; pHba; pHba = pHba->next) {
2009 if (pHba->unit == minor) {
2010 break; /* found adapter */
2013 mutex_unlock(&adpt_configuration_lock);
2014 if(pHba == NULL){
2015 return -ENXIO;
2018 while((volatile u32) pHba->state & DPTI_STATE_RESET )
2019 schedule_timeout_uninterruptible(2);
2021 switch (cmd) {
2022 // TODO: handle 3 cases
2023 case DPT_SIGNATURE:
2024 if (copy_to_user(argp, &DPTI_sig, sizeof(DPTI_sig))) {
2025 return -EFAULT;
2027 break;
2028 case I2OUSRCMD:
2029 return adpt_i2o_passthru(pHba, argp);
2031 case DPT_CTRLINFO:{
2032 drvrHBAinfo_S HbaInfo;
2034 #define FLG_OSD_PCI_VALID 0x0001
2035 #define FLG_OSD_DMA 0x0002
2036 #define FLG_OSD_I2O 0x0004
2037 memset(&HbaInfo, 0, sizeof(HbaInfo));
2038 HbaInfo.drvrHBAnum = pHba->unit;
2039 HbaInfo.baseAddr = (ulong) pHba->base_addr_phys;
2040 HbaInfo.blinkState = adpt_read_blink_led(pHba);
2041 HbaInfo.pciBusNum = pHba->pDev->bus->number;
2042 HbaInfo.pciDeviceNum=PCI_SLOT(pHba->pDev->devfn);
2043 HbaInfo.Interrupt = pHba->pDev->irq;
2044 HbaInfo.hbaFlags = FLG_OSD_PCI_VALID | FLG_OSD_DMA | FLG_OSD_I2O;
2045 if(copy_to_user(argp, &HbaInfo, sizeof(HbaInfo))){
2046 printk(KERN_WARNING"%s: Could not copy HbaInfo TO user\n",pHba->name);
2047 return -EFAULT;
2049 break;
2051 case DPT_SYSINFO:
2052 return adpt_system_info(argp);
2053 case DPT_BLINKLED:{
2054 u32 value;
2055 value = (u32)adpt_read_blink_led(pHba);
2056 if (copy_to_user(argp, &value, sizeof(value))) {
2057 return -EFAULT;
2059 break;
2061 case I2ORESETCMD: {
2062 struct Scsi_Host *shost = pHba->host;
2064 if (shost)
2065 spin_lock_irqsave(shost->host_lock, flags);
2066 adpt_hba_reset(pHba);
2067 if (shost)
2068 spin_unlock_irqrestore(shost->host_lock, flags);
2069 break;
2071 case I2ORESCANCMD:
2072 adpt_rescan(pHba);
2073 break;
2074 default:
2075 return -EINVAL;
2078 return error;
2081 static long adpt_unlocked_ioctl(struct file *file, uint cmd, ulong arg)
2083 struct inode *inode;
2084 long ret;
2086 inode = file_inode(file);
2088 mutex_lock(&adpt_mutex);
2089 ret = adpt_ioctl(inode, file, cmd, arg);
2090 mutex_unlock(&adpt_mutex);
2092 return ret;
2095 #ifdef CONFIG_COMPAT
2096 static long compat_adpt_ioctl(struct file *file,
2097 unsigned int cmd, unsigned long arg)
2099 struct inode *inode;
2100 long ret;
2102 inode = file_inode(file);
2104 mutex_lock(&adpt_mutex);
2106 switch(cmd) {
2107 case DPT_SIGNATURE:
2108 case I2OUSRCMD:
2109 case DPT_CTRLINFO:
2110 case DPT_SYSINFO:
2111 case DPT_BLINKLED:
2112 case I2ORESETCMD:
2113 case I2ORESCANCMD:
2114 case (DPT_TARGET_BUSY & 0xFFFF):
2115 case DPT_TARGET_BUSY:
2116 ret = adpt_ioctl(inode, file, cmd, arg);
2117 break;
2118 default:
2119 ret = -ENOIOCTLCMD;
2122 mutex_unlock(&adpt_mutex);
2124 return ret;
2126 #endif
2128 static irqreturn_t adpt_isr(int irq, void *dev_id)
2130 struct scsi_cmnd* cmd;
2131 adpt_hba* pHba = dev_id;
2132 u32 m;
2133 void __iomem *reply;
2134 u32 status=0;
2135 u32 context;
2136 ulong flags = 0;
2137 int handled = 0;
2139 if (pHba == NULL){
2140 printk(KERN_WARNING"adpt_isr: NULL dev_id\n");
2141 return IRQ_NONE;
2143 if(pHba->host)
2144 spin_lock_irqsave(pHba->host->host_lock, flags);
2146 while( readl(pHba->irq_mask) & I2O_INTERRUPT_PENDING_B) {
2147 m = readl(pHba->reply_port);
2148 if(m == EMPTY_QUEUE){
2149 // Try twice then give up
2150 rmb();
2151 m = readl(pHba->reply_port);
2152 if(m == EMPTY_QUEUE){
2153 // This really should not happen
2154 printk(KERN_ERR"dpti: Could not get reply frame\n");
2155 goto out;
2158 if (pHba->reply_pool_pa <= m &&
2159 m < pHba->reply_pool_pa +
2160 (pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4)) {
2161 reply = (u8 *)pHba->reply_pool +
2162 (m - pHba->reply_pool_pa);
2163 } else {
2164 /* Ick, we should *never* be here */
2165 printk(KERN_ERR "dpti: reply frame not from pool\n");
2166 reply = (u8 *)bus_to_virt(m);
2169 if (readl(reply) & MSG_FAIL) {
2170 u32 old_m = readl(reply+28);
2171 void __iomem *msg;
2172 u32 old_context;
2173 PDEBUG("%s: Failed message\n",pHba->name);
2174 if(old_m >= 0x100000){
2175 printk(KERN_ERR"%s: Bad preserved MFA (%x)- dropping frame\n",pHba->name,old_m);
2176 writel(m,pHba->reply_port);
2177 continue;
2179 // Transaction context is 0 in failed reply frame
2180 msg = pHba->msg_addr_virt + old_m;
2181 old_context = readl(msg+12);
2182 writel(old_context, reply+12);
2183 adpt_send_nop(pHba, old_m);
2185 context = readl(reply+8);
2186 if(context & 0x40000000){ // IOCTL
2187 void *p = adpt_ioctl_from_context(pHba, readl(reply+12));
2188 if( p != NULL) {
2189 memcpy_fromio(p, reply, REPLY_FRAME_SIZE * 4);
2191 // All IOCTLs will also be post wait
2193 if(context & 0x80000000){ // Post wait message
2194 status = readl(reply+16);
2195 if(status >> 24){
2196 status &= 0xffff; /* Get detail status */
2197 } else {
2198 status = I2O_POST_WAIT_OK;
2200 if(!(context & 0x40000000)) {
2201 cmd = adpt_cmd_from_context(pHba,
2202 readl(reply+12));
2203 if(cmd != NULL) {
2204 printk(KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->name, cmd, context);
2207 adpt_i2o_post_wait_complete(context, status);
2208 } else { // SCSI message
2209 cmd = adpt_cmd_from_context (pHba, readl(reply+12));
2210 if(cmd != NULL){
2211 scsi_dma_unmap(cmd);
2212 if(cmd->serial_number != 0) { // If not timedout
2213 adpt_i2o_to_scsi(reply, cmd);
2217 writel(m, pHba->reply_port);
2218 wmb();
2219 rmb();
2221 handled = 1;
2222 out: if(pHba->host)
2223 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2224 return IRQ_RETVAL(handled);
2227 static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_device* d)
2229 int i;
2230 u32 msg[MAX_MESSAGE_SIZE];
2231 u32* mptr;
2232 u32* lptr;
2233 u32 *lenptr;
2234 int direction;
2235 int scsidir;
2236 int nseg;
2237 u32 len;
2238 u32 reqlen;
2239 s32 rcode;
2240 dma_addr_t addr;
2242 memset(msg, 0 , sizeof(msg));
2243 len = scsi_bufflen(cmd);
2244 direction = 0x00000000;
2246 scsidir = 0x00000000; // DATA NO XFER
2247 if(len) {
2249 * Set SCBFlags to indicate if data is being transferred
2250 * in or out, or no data transfer
2251 * Note: Do not have to verify index is less than 0 since
2252 * cmd->cmnd[0] is an unsigned char
2254 switch(cmd->sc_data_direction){
2255 case DMA_FROM_DEVICE:
2256 scsidir =0x40000000; // DATA IN (iop<--dev)
2257 break;
2258 case DMA_TO_DEVICE:
2259 direction=0x04000000; // SGL OUT
2260 scsidir =0x80000000; // DATA OUT (iop-->dev)
2261 break;
2262 case DMA_NONE:
2263 break;
2264 case DMA_BIDIRECTIONAL:
2265 scsidir =0x40000000; // DATA IN (iop<--dev)
2266 // Assume In - and continue;
2267 break;
2268 default:
2269 printk(KERN_WARNING"%s: scsi opcode 0x%x not supported.\n",
2270 pHba->name, cmd->cmnd[0]);
2271 cmd->result = (DID_OK <<16) | (INITIATOR_ERROR << 8);
2272 cmd->scsi_done(cmd);
2273 return 0;
2276 // msg[0] is set later
2277 // I2O_CMD_SCSI_EXEC
2278 msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid);
2279 msg[2] = 0;
2280 msg[3] = adpt_cmd_to_context(cmd); /* Want SCSI control block back */
2281 // Our cards use the transaction context as the tag for queueing
2282 // Adaptec/DPT Private stuff
2283 msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16);
2284 msg[5] = d->tid;
2285 /* Direction, disconnect ok | sense data | simple queue , CDBLen */
2286 // I2O_SCB_FLAG_ENABLE_DISCONNECT |
2287 // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
2288 // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
2289 msg[6] = scsidir|0x20a00000|cmd->cmd_len;
2291 mptr=msg+7;
2293 // Write SCSI command into the message - always 16 byte block
2294 memset(mptr, 0, 16);
2295 memcpy(mptr, cmd->cmnd, cmd->cmd_len);
2296 mptr+=4;
2297 lenptr=mptr++; /* Remember me - fill in when we know */
2298 if (dpt_dma64(pHba)) {
2299 reqlen = 16; // SINGLE SGE
2300 *mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
2301 *mptr++ = 1 << PAGE_SHIFT;
2302 } else {
2303 reqlen = 14; // SINGLE SGE
2305 /* Now fill in the SGList and command */
2307 nseg = scsi_dma_map(cmd);
2308 BUG_ON(nseg < 0);
2309 if (nseg) {
2310 struct scatterlist *sg;
2312 len = 0;
2313 scsi_for_each_sg(cmd, sg, nseg, i) {
2314 lptr = mptr;
2315 *mptr++ = direction|0x10000000|sg_dma_len(sg);
2316 len+=sg_dma_len(sg);
2317 addr = sg_dma_address(sg);
2318 *mptr++ = dma_low(addr);
2319 if (dpt_dma64(pHba))
2320 *mptr++ = dma_high(addr);
2321 /* Make this an end of list */
2322 if (i == nseg - 1)
2323 *lptr = direction|0xD0000000|sg_dma_len(sg);
2325 reqlen = mptr - msg;
2326 *lenptr = len;
2328 if(cmd->underflow && len != cmd->underflow){
2329 printk(KERN_WARNING"Cmd len %08X Cmd underflow %08X\n",
2330 len, cmd->underflow);
2332 } else {
2333 *lenptr = len = 0;
2334 reqlen = 12;
2337 /* Stick the headers on */
2338 msg[0] = reqlen<<16 | ((reqlen > 12) ? SGL_OFFSET_12 : SGL_OFFSET_0);
2340 // Send it on it's way
2341 rcode = adpt_i2o_post_this(pHba, msg, reqlen<<2);
2342 if (rcode == 0) {
2343 return 0;
2345 return rcode;
2349 static s32 adpt_scsi_host_alloc(adpt_hba* pHba, struct scsi_host_template *sht)
2351 struct Scsi_Host *host;
2353 host = scsi_host_alloc(sht, sizeof(adpt_hba*));
2354 if (host == NULL) {
2355 printk("%s: scsi_host_alloc returned NULL\n", pHba->name);
2356 return -1;
2358 host->hostdata[0] = (unsigned long)pHba;
2359 pHba->host = host;
2361 host->irq = pHba->pDev->irq;
2362 /* no IO ports, so don't have to set host->io_port and
2363 * host->n_io_port
2365 host->io_port = 0;
2366 host->n_io_port = 0;
2367 /* see comments in scsi_host.h */
2368 host->max_id = 16;
2369 host->max_lun = 256;
2370 host->max_channel = pHba->top_scsi_channel + 1;
2371 host->cmd_per_lun = 1;
2372 host->unique_id = (u32)sys_tbl_pa + pHba->unit;
2373 host->sg_tablesize = pHba->sg_tablesize;
2374 host->can_queue = pHba->post_fifo_size;
2375 host->use_cmd_list = 1;
2377 return 0;
2381 static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd)
2383 adpt_hba* pHba;
2384 u32 hba_status;
2385 u32 dev_status;
2386 u32 reply_flags = readl(reply) & 0xff00; // Leave it shifted up 8 bits
2387 // I know this would look cleaner if I just read bytes
2388 // but the model I have been using for all the rest of the
2389 // io is in 4 byte words - so I keep that model
2390 u16 detailed_status = readl(reply+16) &0xffff;
2391 dev_status = (detailed_status & 0xff);
2392 hba_status = detailed_status >> 8;
2394 // calculate resid for sg
2395 scsi_set_resid(cmd, scsi_bufflen(cmd) - readl(reply+20));
2397 pHba = (adpt_hba*) cmd->device->host->hostdata[0];
2399 cmd->sense_buffer[0] = '\0'; // initialize sense valid flag to false
2401 if(!(reply_flags & MSG_FAIL)) {
2402 switch(detailed_status & I2O_SCSI_DSC_MASK) {
2403 case I2O_SCSI_DSC_SUCCESS:
2404 cmd->result = (DID_OK << 16);
2405 // handle underflow
2406 if (readl(reply+20) < cmd->underflow) {
2407 cmd->result = (DID_ERROR <<16);
2408 printk(KERN_WARNING"%s: SCSI CMD underflow\n",pHba->name);
2410 break;
2411 case I2O_SCSI_DSC_REQUEST_ABORTED:
2412 cmd->result = (DID_ABORT << 16);
2413 break;
2414 case I2O_SCSI_DSC_PATH_INVALID:
2415 case I2O_SCSI_DSC_DEVICE_NOT_PRESENT:
2416 case I2O_SCSI_DSC_SELECTION_TIMEOUT:
2417 case I2O_SCSI_DSC_COMMAND_TIMEOUT:
2418 case I2O_SCSI_DSC_NO_ADAPTER:
2419 case I2O_SCSI_DSC_RESOURCE_UNAVAILABLE:
2420 printk(KERN_WARNING"%s: SCSI Timeout-Device (%d,%d,%llu) hba status=0x%x, dev status=0x%x, cmd=0x%x\n",
2421 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun, hba_status, dev_status, cmd->cmnd[0]);
2422 cmd->result = (DID_TIME_OUT << 16);
2423 break;
2424 case I2O_SCSI_DSC_ADAPTER_BUSY:
2425 case I2O_SCSI_DSC_BUS_BUSY:
2426 cmd->result = (DID_BUS_BUSY << 16);
2427 break;
2428 case I2O_SCSI_DSC_SCSI_BUS_RESET:
2429 case I2O_SCSI_DSC_BDR_MESSAGE_SENT:
2430 cmd->result = (DID_RESET << 16);
2431 break;
2432 case I2O_SCSI_DSC_PARITY_ERROR_FAILURE:
2433 printk(KERN_WARNING"%s: SCSI CMD parity error\n",pHba->name);
2434 cmd->result = (DID_PARITY << 16);
2435 break;
2436 case I2O_SCSI_DSC_UNABLE_TO_ABORT:
2437 case I2O_SCSI_DSC_COMPLETE_WITH_ERROR:
2438 case I2O_SCSI_DSC_UNABLE_TO_TERMINATE:
2439 case I2O_SCSI_DSC_MR_MESSAGE_RECEIVED:
2440 case I2O_SCSI_DSC_AUTOSENSE_FAILED:
2441 case I2O_SCSI_DSC_DATA_OVERRUN:
2442 case I2O_SCSI_DSC_UNEXPECTED_BUS_FREE:
2443 case I2O_SCSI_DSC_SEQUENCE_FAILURE:
2444 case I2O_SCSI_DSC_REQUEST_LENGTH_ERROR:
2445 case I2O_SCSI_DSC_PROVIDE_FAILURE:
2446 case I2O_SCSI_DSC_REQUEST_TERMINATED:
2447 case I2O_SCSI_DSC_IDE_MESSAGE_SENT:
2448 case I2O_SCSI_DSC_UNACKNOWLEDGED_EVENT:
2449 case I2O_SCSI_DSC_MESSAGE_RECEIVED:
2450 case I2O_SCSI_DSC_INVALID_CDB:
2451 case I2O_SCSI_DSC_LUN_INVALID:
2452 case I2O_SCSI_DSC_SCSI_TID_INVALID:
2453 case I2O_SCSI_DSC_FUNCTION_UNAVAILABLE:
2454 case I2O_SCSI_DSC_NO_NEXUS:
2455 case I2O_SCSI_DSC_CDB_RECEIVED:
2456 case I2O_SCSI_DSC_LUN_ALREADY_ENABLED:
2457 case I2O_SCSI_DSC_QUEUE_FROZEN:
2458 case I2O_SCSI_DSC_REQUEST_INVALID:
2459 default:
2460 printk(KERN_WARNING"%s: SCSI error %0x-Device(%d,%d,%llu) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2461 pHba->name, detailed_status & I2O_SCSI_DSC_MASK, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
2462 hba_status, dev_status, cmd->cmnd[0]);
2463 cmd->result = (DID_ERROR << 16);
2464 break;
2467 // copy over the request sense data if it was a check
2468 // condition status
2469 if (dev_status == SAM_STAT_CHECK_CONDITION) {
2470 u32 len = min(SCSI_SENSE_BUFFERSIZE, 40);
2471 // Copy over the sense data
2472 memcpy_fromio(cmd->sense_buffer, (reply+28) , len);
2473 if(cmd->sense_buffer[0] == 0x70 /* class 7 */ &&
2474 cmd->sense_buffer[2] == DATA_PROTECT ){
2475 /* This is to handle an array failed */
2476 cmd->result = (DID_TIME_OUT << 16);
2477 printk(KERN_WARNING"%s: SCSI Data Protect-Device (%d,%d,%llu) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2478 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
2479 hba_status, dev_status, cmd->cmnd[0]);
2483 } else {
2484 /* In this condtion we could not talk to the tid
2485 * the card rejected it. We should signal a retry
2486 * for a limitted number of retries.
2488 cmd->result = (DID_TIME_OUT << 16);
2489 printk(KERN_WARNING"%s: I2O MSG_FAIL - Device (%d,%d,%llu) tid=%d, cmd=0x%x\n",
2490 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
2491 ((struct adpt_device*)(cmd->device->hostdata))->tid, cmd->cmnd[0]);
2494 cmd->result |= (dev_status);
2496 if(cmd->scsi_done != NULL){
2497 cmd->scsi_done(cmd);
2499 return cmd->result;
2503 static s32 adpt_rescan(adpt_hba* pHba)
2505 s32 rcode;
2506 ulong flags = 0;
2508 if(pHba->host)
2509 spin_lock_irqsave(pHba->host->host_lock, flags);
2510 if ((rcode=adpt_i2o_lct_get(pHba)) < 0)
2511 goto out;
2512 if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0)
2513 goto out;
2514 rcode = 0;
2515 out: if(pHba->host)
2516 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2517 return rcode;
2521 static s32 adpt_i2o_reparse_lct(adpt_hba* pHba)
2523 int i;
2524 int max;
2525 int tid;
2526 struct i2o_device *d;
2527 i2o_lct *lct = pHba->lct;
2528 u8 bus_no = 0;
2529 s16 scsi_id;
2530 u64 scsi_lun;
2531 u32 buf[10]; // at least 8 u32's
2532 struct adpt_device* pDev = NULL;
2533 struct i2o_device* pI2o_dev = NULL;
2535 if (lct == NULL) {
2536 printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
2537 return -1;
2540 max = lct->table_size;
2541 max -= 3;
2542 max /= 9;
2544 // Mark each drive as unscanned
2545 for (d = pHba->devices; d; d = d->next) {
2546 pDev =(struct adpt_device*) d->owner;
2547 if(!pDev){
2548 continue;
2550 pDev->state |= DPTI_DEV_UNSCANNED;
2553 printk(KERN_INFO "%s: LCT has %d entries.\n", pHba->name,max);
2555 for(i=0;i<max;i++) {
2556 if( lct->lct_entry[i].user_tid != 0xfff){
2557 continue;
2560 if( lct->lct_entry[i].class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
2561 lct->lct_entry[i].class_id == I2O_CLASS_SCSI_PERIPHERAL ||
2562 lct->lct_entry[i].class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
2563 tid = lct->lct_entry[i].tid;
2564 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
2565 printk(KERN_ERR"%s: Could not query device\n",pHba->name);
2566 continue;
2568 bus_no = buf[0]>>16;
2569 if (bus_no >= MAX_CHANNEL) { /* Something wrong skip it */
2570 printk(KERN_WARNING
2571 "%s: Channel number %d out of range\n",
2572 pHba->name, bus_no);
2573 continue;
2576 scsi_id = buf[1];
2577 scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
2578 pDev = pHba->channel[bus_no].device[scsi_id];
2579 /* da lun */
2580 while(pDev) {
2581 if(pDev->scsi_lun == scsi_lun) {
2582 break;
2584 pDev = pDev->next_lun;
2586 if(!pDev ) { // Something new add it
2587 d = kmalloc(sizeof(struct i2o_device),
2588 GFP_ATOMIC);
2589 if(d==NULL)
2591 printk(KERN_CRIT "Out of memory for I2O device data.\n");
2592 return -ENOMEM;
2595 d->controller = pHba;
2596 d->next = NULL;
2598 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2600 d->flags = 0;
2601 adpt_i2o_report_hba_unit(pHba, d);
2602 adpt_i2o_install_device(pHba, d);
2604 pDev = pHba->channel[bus_no].device[scsi_id];
2605 if( pDev == NULL){
2606 pDev =
2607 kzalloc(sizeof(struct adpt_device),
2608 GFP_ATOMIC);
2609 if(pDev == NULL) {
2610 return -ENOMEM;
2612 pHba->channel[bus_no].device[scsi_id] = pDev;
2613 } else {
2614 while (pDev->next_lun) {
2615 pDev = pDev->next_lun;
2617 pDev = pDev->next_lun =
2618 kzalloc(sizeof(struct adpt_device),
2619 GFP_ATOMIC);
2620 if(pDev == NULL) {
2621 return -ENOMEM;
2624 pDev->tid = d->lct_data.tid;
2625 pDev->scsi_channel = bus_no;
2626 pDev->scsi_id = scsi_id;
2627 pDev->scsi_lun = scsi_lun;
2628 pDev->pI2o_dev = d;
2629 d->owner = pDev;
2630 pDev->type = (buf[0])&0xff;
2631 pDev->flags = (buf[0]>>8)&0xff;
2632 // Too late, SCSI system has made up it's mind, but what the hey ...
2633 if(scsi_id > pHba->top_scsi_id){
2634 pHba->top_scsi_id = scsi_id;
2636 if(scsi_lun > pHba->top_scsi_lun){
2637 pHba->top_scsi_lun = scsi_lun;
2639 continue;
2640 } // end of new i2o device
2642 // We found an old device - check it
2643 while(pDev) {
2644 if(pDev->scsi_lun == scsi_lun) {
2645 if(!scsi_device_online(pDev->pScsi_dev)) {
2646 printk(KERN_WARNING"%s: Setting device (%d,%d,%llu) back online\n",
2647 pHba->name,bus_no,scsi_id,scsi_lun);
2648 if (pDev->pScsi_dev) {
2649 scsi_device_set_state(pDev->pScsi_dev, SDEV_RUNNING);
2652 d = pDev->pI2o_dev;
2653 if(d->lct_data.tid != tid) { // something changed
2654 pDev->tid = tid;
2655 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2656 if (pDev->pScsi_dev) {
2657 pDev->pScsi_dev->changed = TRUE;
2658 pDev->pScsi_dev->removable = TRUE;
2661 // Found it - mark it scanned
2662 pDev->state = DPTI_DEV_ONLINE;
2663 break;
2665 pDev = pDev->next_lun;
2669 for (pI2o_dev = pHba->devices; pI2o_dev; pI2o_dev = pI2o_dev->next) {
2670 pDev =(struct adpt_device*) pI2o_dev->owner;
2671 if(!pDev){
2672 continue;
2674 // Drive offline drives that previously existed but could not be found
2675 // in the LCT table
2676 if (pDev->state & DPTI_DEV_UNSCANNED){
2677 pDev->state = DPTI_DEV_OFFLINE;
2678 printk(KERN_WARNING"%s: Device (%d,%d,%llu) offline\n",pHba->name,pDev->scsi_channel,pDev->scsi_id,pDev->scsi_lun);
2679 if (pDev->pScsi_dev) {
2680 scsi_device_set_state(pDev->pScsi_dev, SDEV_OFFLINE);
2684 return 0;
2687 static void adpt_fail_posted_scbs(adpt_hba* pHba)
2689 struct scsi_cmnd* cmd = NULL;
2690 struct scsi_device* d = NULL;
2692 shost_for_each_device(d, pHba->host) {
2693 unsigned long flags;
2694 spin_lock_irqsave(&d->list_lock, flags);
2695 list_for_each_entry(cmd, &d->cmd_list, list) {
2696 if(cmd->serial_number == 0){
2697 continue;
2699 cmd->result = (DID_OK << 16) | (QUEUE_FULL <<1);
2700 cmd->scsi_done(cmd);
2702 spin_unlock_irqrestore(&d->list_lock, flags);
2707 /*============================================================================
2708 * Routines from i2o subsystem
2709 *============================================================================
2715 * Bring an I2O controller into HOLD state. See the spec.
2717 static int adpt_i2o_activate_hba(adpt_hba* pHba)
2719 int rcode;
2721 if(pHba->initialized ) {
2722 if (adpt_i2o_status_get(pHba) < 0) {
2723 if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2724 printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2725 return rcode;
2727 if (adpt_i2o_status_get(pHba) < 0) {
2728 printk(KERN_INFO "HBA not responding.\n");
2729 return -1;
2733 if(pHba->status_block->iop_state == ADAPTER_STATE_FAULTED) {
2734 printk(KERN_CRIT "%s: hardware fault\n", pHba->name);
2735 return -1;
2738 if (pHba->status_block->iop_state == ADAPTER_STATE_READY ||
2739 pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL ||
2740 pHba->status_block->iop_state == ADAPTER_STATE_HOLD ||
2741 pHba->status_block->iop_state == ADAPTER_STATE_FAILED) {
2742 adpt_i2o_reset_hba(pHba);
2743 if (adpt_i2o_status_get(pHba) < 0 || pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
2744 printk(KERN_ERR "%s: Failed to initialize.\n", pHba->name);
2745 return -1;
2748 } else {
2749 if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2750 printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2751 return rcode;
2756 if (adpt_i2o_init_outbound_q(pHba) < 0) {
2757 return -1;
2760 /* In HOLD state */
2762 if (adpt_i2o_hrt_get(pHba) < 0) {
2763 return -1;
2766 return 0;
2770 * Bring a controller online into OPERATIONAL state.
2773 static int adpt_i2o_online_hba(adpt_hba* pHba)
2775 if (adpt_i2o_systab_send(pHba) < 0)
2776 return -1;
2777 /* In READY state */
2779 if (adpt_i2o_enable_hba(pHba) < 0)
2780 return -1;
2782 /* In OPERATIONAL state */
2783 return 0;
2786 static s32 adpt_send_nop(adpt_hba*pHba,u32 m)
2788 u32 __iomem *msg;
2789 ulong timeout = jiffies + 5*HZ;
2791 while(m == EMPTY_QUEUE){
2792 rmb();
2793 m = readl(pHba->post_port);
2794 if(m != EMPTY_QUEUE){
2795 break;
2797 if(time_after(jiffies,timeout)){
2798 printk(KERN_ERR "%s: Timeout waiting for message frame!\n",pHba->name);
2799 return 2;
2801 schedule_timeout_uninterruptible(1);
2803 msg = (u32 __iomem *)(pHba->msg_addr_virt + m);
2804 writel( THREE_WORD_MSG_SIZE | SGL_OFFSET_0,&msg[0]);
2805 writel( I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0,&msg[1]);
2806 writel( 0,&msg[2]);
2807 wmb();
2809 writel(m, pHba->post_port);
2810 wmb();
2811 return 0;
2814 static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
2816 u8 *status;
2817 dma_addr_t addr;
2818 u32 __iomem *msg = NULL;
2819 int i;
2820 ulong timeout = jiffies + TMOUT_INITOUTBOUND*HZ;
2821 u32 m;
2823 do {
2824 rmb();
2825 m = readl(pHba->post_port);
2826 if (m != EMPTY_QUEUE) {
2827 break;
2830 if(time_after(jiffies,timeout)){
2831 printk(KERN_WARNING"%s: Timeout waiting for message frame\n",pHba->name);
2832 return -ETIMEDOUT;
2834 schedule_timeout_uninterruptible(1);
2835 } while(m == EMPTY_QUEUE);
2837 msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2839 status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
2840 if (!status) {
2841 adpt_send_nop(pHba, m);
2842 printk(KERN_WARNING"%s: IOP reset failed - no free memory.\n",
2843 pHba->name);
2844 return -ENOMEM;
2846 memset(status, 0, 4);
2848 writel(EIGHT_WORD_MSG_SIZE| SGL_OFFSET_6, &msg[0]);
2849 writel(I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID, &msg[1]);
2850 writel(0, &msg[2]);
2851 writel(0x0106, &msg[3]); /* Transaction context */
2852 writel(4096, &msg[4]); /* Host page frame size */
2853 writel((REPLY_FRAME_SIZE)<<16|0x80, &msg[5]); /* Outbound msg frame size and Initcode */
2854 writel(0xD0000004, &msg[6]); /* Simple SG LE, EOB */
2855 writel((u32)addr, &msg[7]);
2857 writel(m, pHba->post_port);
2858 wmb();
2860 // Wait for the reply status to come back
2861 do {
2862 if (*status) {
2863 if (*status != 0x01 /*I2O_EXEC_OUTBOUND_INIT_IN_PROGRESS*/) {
2864 break;
2867 rmb();
2868 if(time_after(jiffies,timeout)){
2869 printk(KERN_WARNING"%s: Timeout Initializing\n",pHba->name);
2870 /* We lose 4 bytes of "status" here, but we
2871 cannot free these because controller may
2872 awake and corrupt those bytes at any time */
2873 /* dma_free_coherent(&pHba->pDev->dev, 4, status, addr); */
2874 return -ETIMEDOUT;
2876 schedule_timeout_uninterruptible(1);
2877 } while (1);
2879 // If the command was successful, fill the fifo with our reply
2880 // message packets
2881 if(*status != 0x04 /*I2O_EXEC_OUTBOUND_INIT_COMPLETE*/) {
2882 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
2883 return -2;
2885 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
2887 if(pHba->reply_pool != NULL) {
2888 dma_free_coherent(&pHba->pDev->dev,
2889 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2890 pHba->reply_pool, pHba->reply_pool_pa);
2893 pHba->reply_pool = dma_alloc_coherent(&pHba->pDev->dev,
2894 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2895 &pHba->reply_pool_pa, GFP_KERNEL);
2896 if (!pHba->reply_pool) {
2897 printk(KERN_ERR "%s: Could not allocate reply pool\n", pHba->name);
2898 return -ENOMEM;
2900 memset(pHba->reply_pool, 0 , pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4);
2902 for(i = 0; i < pHba->reply_fifo_size; i++) {
2903 writel(pHba->reply_pool_pa + (i * REPLY_FRAME_SIZE * 4),
2904 pHba->reply_port);
2905 wmb();
2907 adpt_i2o_status_get(pHba);
2908 return 0;
2913 * I2O System Table. Contains information about
2914 * all the IOPs in the system. Used to inform IOPs
2915 * about each other's existence.
2917 * sys_tbl_ver is the CurrentChangeIndicator that is
2918 * used by IOPs to track changes.
2923 static s32 adpt_i2o_status_get(adpt_hba* pHba)
2925 ulong timeout;
2926 u32 m;
2927 u32 __iomem *msg;
2928 u8 *status_block=NULL;
2930 if(pHba->status_block == NULL) {
2931 pHba->status_block = dma_alloc_coherent(&pHba->pDev->dev,
2932 sizeof(i2o_status_block),
2933 &pHba->status_block_pa, GFP_KERNEL);
2934 if(pHba->status_block == NULL) {
2935 printk(KERN_ERR
2936 "dpti%d: Get Status Block failed; Out of memory. \n",
2937 pHba->unit);
2938 return -ENOMEM;
2941 memset(pHba->status_block, 0, sizeof(i2o_status_block));
2942 status_block = (u8*)(pHba->status_block);
2943 timeout = jiffies+TMOUT_GETSTATUS*HZ;
2944 do {
2945 rmb();
2946 m = readl(pHba->post_port);
2947 if (m != EMPTY_QUEUE) {
2948 break;
2950 if(time_after(jiffies,timeout)){
2951 printk(KERN_ERR "%s: Timeout waiting for message !\n",
2952 pHba->name);
2953 return -ETIMEDOUT;
2955 schedule_timeout_uninterruptible(1);
2956 } while(m==EMPTY_QUEUE);
2959 msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2961 writel(NINE_WORD_MSG_SIZE|SGL_OFFSET_0, &msg[0]);
2962 writel(I2O_CMD_STATUS_GET<<24|HOST_TID<<12|ADAPTER_TID, &msg[1]);
2963 writel(1, &msg[2]);
2964 writel(0, &msg[3]);
2965 writel(0, &msg[4]);
2966 writel(0, &msg[5]);
2967 writel( dma_low(pHba->status_block_pa), &msg[6]);
2968 writel( dma_high(pHba->status_block_pa), &msg[7]);
2969 writel(sizeof(i2o_status_block), &msg[8]); // 88 bytes
2971 //post message
2972 writel(m, pHba->post_port);
2973 wmb();
2975 while(status_block[87]!=0xff){
2976 if(time_after(jiffies,timeout)){
2977 printk(KERN_ERR"dpti%d: Get status timeout.\n",
2978 pHba->unit);
2979 return -ETIMEDOUT;
2981 rmb();
2982 schedule_timeout_uninterruptible(1);
2985 // Set up our number of outbound and inbound messages
2986 pHba->post_fifo_size = pHba->status_block->max_inbound_frames;
2987 if (pHba->post_fifo_size > MAX_TO_IOP_MESSAGES) {
2988 pHba->post_fifo_size = MAX_TO_IOP_MESSAGES;
2991 pHba->reply_fifo_size = pHba->status_block->max_outbound_frames;
2992 if (pHba->reply_fifo_size > MAX_FROM_IOP_MESSAGES) {
2993 pHba->reply_fifo_size = MAX_FROM_IOP_MESSAGES;
2996 // Calculate the Scatter Gather list size
2997 if (dpt_dma64(pHba)) {
2998 pHba->sg_tablesize
2999 = ((pHba->status_block->inbound_frame_size * 4
3000 - 14 * sizeof(u32))
3001 / (sizeof(struct sg_simple_element) + sizeof(u32)));
3002 } else {
3003 pHba->sg_tablesize
3004 = ((pHba->status_block->inbound_frame_size * 4
3005 - 12 * sizeof(u32))
3006 / sizeof(struct sg_simple_element));
3008 if (pHba->sg_tablesize > SG_LIST_ELEMENTS) {
3009 pHba->sg_tablesize = SG_LIST_ELEMENTS;
3013 #ifdef DEBUG
3014 printk("dpti%d: State = ",pHba->unit);
3015 switch(pHba->status_block->iop_state) {
3016 case 0x01:
3017 printk("INIT\n");
3018 break;
3019 case 0x02:
3020 printk("RESET\n");
3021 break;
3022 case 0x04:
3023 printk("HOLD\n");
3024 break;
3025 case 0x05:
3026 printk("READY\n");
3027 break;
3028 case 0x08:
3029 printk("OPERATIONAL\n");
3030 break;
3031 case 0x10:
3032 printk("FAILED\n");
3033 break;
3034 case 0x11:
3035 printk("FAULTED\n");
3036 break;
3037 default:
3038 printk("%x (unknown!!)\n",pHba->status_block->iop_state);
3040 #endif
3041 return 0;
3045 * Get the IOP's Logical Configuration Table
3047 static int adpt_i2o_lct_get(adpt_hba* pHba)
3049 u32 msg[8];
3050 int ret;
3051 u32 buf[16];
3053 if ((pHba->lct_size == 0) || (pHba->lct == NULL)){
3054 pHba->lct_size = pHba->status_block->expected_lct_size;
3056 do {
3057 if (pHba->lct == NULL) {
3058 pHba->lct = dma_alloc_coherent(&pHba->pDev->dev,
3059 pHba->lct_size, &pHba->lct_pa,
3060 GFP_ATOMIC);
3061 if(pHba->lct == NULL) {
3062 printk(KERN_CRIT "%s: Lct Get failed. Out of memory.\n",
3063 pHba->name);
3064 return -ENOMEM;
3067 memset(pHba->lct, 0, pHba->lct_size);
3069 msg[0] = EIGHT_WORD_MSG_SIZE|SGL_OFFSET_6;
3070 msg[1] = I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID;
3071 msg[2] = 0;
3072 msg[3] = 0;
3073 msg[4] = 0xFFFFFFFF; /* All devices */
3074 msg[5] = 0x00000000; /* Report now */
3075 msg[6] = 0xD0000000|pHba->lct_size;
3076 msg[7] = (u32)pHba->lct_pa;
3078 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 360))) {
3079 printk(KERN_ERR "%s: LCT Get failed (status=%#10x.\n",
3080 pHba->name, ret);
3081 printk(KERN_ERR"Adaptec: Error Reading Hardware.\n");
3082 return ret;
3085 if ((pHba->lct->table_size << 2) > pHba->lct_size) {
3086 pHba->lct_size = pHba->lct->table_size << 2;
3087 dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
3088 pHba->lct, pHba->lct_pa);
3089 pHba->lct = NULL;
3091 } while (pHba->lct == NULL);
3093 PDEBUG("%s: Hardware resource table read.\n", pHba->name);
3096 // I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO;
3097 if(adpt_i2o_query_scalar(pHba, 0 , 0x8000, -1, buf, sizeof(buf))>=0) {
3098 pHba->FwDebugBufferSize = buf[1];
3099 pHba->FwDebugBuffer_P = ioremap(pHba->base_addr_phys + buf[0],
3100 pHba->FwDebugBufferSize);
3101 if (pHba->FwDebugBuffer_P) {
3102 pHba->FwDebugFlags_P = pHba->FwDebugBuffer_P +
3103 FW_DEBUG_FLAGS_OFFSET;
3104 pHba->FwDebugBLEDvalue_P = pHba->FwDebugBuffer_P +
3105 FW_DEBUG_BLED_OFFSET;
3106 pHba->FwDebugBLEDflag_P = pHba->FwDebugBLEDvalue_P + 1;
3107 pHba->FwDebugStrLength_P = pHba->FwDebugBuffer_P +
3108 FW_DEBUG_STR_LENGTH_OFFSET;
3109 pHba->FwDebugBuffer_P += buf[2];
3110 pHba->FwDebugFlags = 0;
3114 return 0;
3117 static int adpt_i2o_build_sys_table(void)
3119 adpt_hba* pHba = hba_chain;
3120 int count = 0;
3122 if (sys_tbl)
3123 dma_free_coherent(&pHba->pDev->dev, sys_tbl_len,
3124 sys_tbl, sys_tbl_pa);
3126 sys_tbl_len = sizeof(struct i2o_sys_tbl) + // Header + IOPs
3127 (hba_count) * sizeof(struct i2o_sys_tbl_entry);
3129 sys_tbl = dma_alloc_coherent(&pHba->pDev->dev,
3130 sys_tbl_len, &sys_tbl_pa, GFP_KERNEL);
3131 if (!sys_tbl) {
3132 printk(KERN_WARNING "SysTab Set failed. Out of memory.\n");
3133 return -ENOMEM;
3135 memset(sys_tbl, 0, sys_tbl_len);
3137 sys_tbl->num_entries = hba_count;
3138 sys_tbl->version = I2OVERSION;
3139 sys_tbl->change_ind = sys_tbl_ind++;
3141 for(pHba = hba_chain; pHba; pHba = pHba->next) {
3142 u64 addr;
3143 // Get updated Status Block so we have the latest information
3144 if (adpt_i2o_status_get(pHba)) {
3145 sys_tbl->num_entries--;
3146 continue; // try next one
3149 sys_tbl->iops[count].org_id = pHba->status_block->org_id;
3150 sys_tbl->iops[count].iop_id = pHba->unit + 2;
3151 sys_tbl->iops[count].seg_num = 0;
3152 sys_tbl->iops[count].i2o_version = pHba->status_block->i2o_version;
3153 sys_tbl->iops[count].iop_state = pHba->status_block->iop_state;
3154 sys_tbl->iops[count].msg_type = pHba->status_block->msg_type;
3155 sys_tbl->iops[count].frame_size = pHba->status_block->inbound_frame_size;
3156 sys_tbl->iops[count].last_changed = sys_tbl_ind - 1; // ??
3157 sys_tbl->iops[count].iop_capabilities = pHba->status_block->iop_capabilities;
3158 addr = pHba->base_addr_phys + 0x40;
3159 sys_tbl->iops[count].inbound_low = dma_low(addr);
3160 sys_tbl->iops[count].inbound_high = dma_high(addr);
3162 count++;
3165 #ifdef DEBUG
3167 u32 *table = (u32*)sys_tbl;
3168 printk(KERN_DEBUG"sys_tbl_len=%d in 32bit words\n",(sys_tbl_len >>2));
3169 for(count = 0; count < (sys_tbl_len >>2); count++) {
3170 printk(KERN_INFO "sys_tbl[%d] = %0#10x\n",
3171 count, table[count]);
3174 #endif
3176 return 0;
3181 * Dump the information block associated with a given unit (TID)
3184 static void adpt_i2o_report_hba_unit(adpt_hba* pHba, struct i2o_device *d)
3186 char buf[64];
3187 int unit = d->lct_data.tid;
3189 printk(KERN_INFO "TID %3.3d ", unit);
3191 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 3, buf, 16)>=0)
3193 buf[16]=0;
3194 printk(" Vendor: %-12.12s", buf);
3196 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 4, buf, 16)>=0)
3198 buf[16]=0;
3199 printk(" Device: %-12.12s", buf);
3201 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 6, buf, 8)>=0)
3203 buf[8]=0;
3204 printk(" Rev: %-12.12s\n", buf);
3206 #ifdef DEBUG
3207 printk(KERN_INFO "\tClass: %.21s\n", adpt_i2o_get_class_name(d->lct_data.class_id));
3208 printk(KERN_INFO "\tSubclass: 0x%04X\n", d->lct_data.sub_class);
3209 printk(KERN_INFO "\tFlags: ");
3211 if(d->lct_data.device_flags&(1<<0))
3212 printk("C"); // ConfigDialog requested
3213 if(d->lct_data.device_flags&(1<<1))
3214 printk("U"); // Multi-user capable
3215 if(!(d->lct_data.device_flags&(1<<4)))
3216 printk("P"); // Peer service enabled!
3217 if(!(d->lct_data.device_flags&(1<<5)))
3218 printk("M"); // Mgmt service enabled!
3219 printk("\n");
3220 #endif
3223 #ifdef DEBUG
3225 * Do i2o class name lookup
3227 static const char *adpt_i2o_get_class_name(int class)
3229 int idx = 16;
3230 static char *i2o_class_name[] = {
3231 "Executive",
3232 "Device Driver Module",
3233 "Block Device",
3234 "Tape Device",
3235 "LAN Interface",
3236 "WAN Interface",
3237 "Fibre Channel Port",
3238 "Fibre Channel Device",
3239 "SCSI Device",
3240 "ATE Port",
3241 "ATE Device",
3242 "Floppy Controller",
3243 "Floppy Device",
3244 "Secondary Bus Port",
3245 "Peer Transport Agent",
3246 "Peer Transport",
3247 "Unknown"
3250 switch(class&0xFFF) {
3251 case I2O_CLASS_EXECUTIVE:
3252 idx = 0; break;
3253 case I2O_CLASS_DDM:
3254 idx = 1; break;
3255 case I2O_CLASS_RANDOM_BLOCK_STORAGE:
3256 idx = 2; break;
3257 case I2O_CLASS_SEQUENTIAL_STORAGE:
3258 idx = 3; break;
3259 case I2O_CLASS_LAN:
3260 idx = 4; break;
3261 case I2O_CLASS_WAN:
3262 idx = 5; break;
3263 case I2O_CLASS_FIBRE_CHANNEL_PORT:
3264 idx = 6; break;
3265 case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
3266 idx = 7; break;
3267 case I2O_CLASS_SCSI_PERIPHERAL:
3268 idx = 8; break;
3269 case I2O_CLASS_ATE_PORT:
3270 idx = 9; break;
3271 case I2O_CLASS_ATE_PERIPHERAL:
3272 idx = 10; break;
3273 case I2O_CLASS_FLOPPY_CONTROLLER:
3274 idx = 11; break;
3275 case I2O_CLASS_FLOPPY_DEVICE:
3276 idx = 12; break;
3277 case I2O_CLASS_BUS_ADAPTER_PORT:
3278 idx = 13; break;
3279 case I2O_CLASS_PEER_TRANSPORT_AGENT:
3280 idx = 14; break;
3281 case I2O_CLASS_PEER_TRANSPORT:
3282 idx = 15; break;
3284 return i2o_class_name[idx];
3286 #endif
3289 static s32 adpt_i2o_hrt_get(adpt_hba* pHba)
3291 u32 msg[6];
3292 int ret, size = sizeof(i2o_hrt);
3294 do {
3295 if (pHba->hrt == NULL) {
3296 pHba->hrt = dma_alloc_coherent(&pHba->pDev->dev,
3297 size, &pHba->hrt_pa, GFP_KERNEL);
3298 if (pHba->hrt == NULL) {
3299 printk(KERN_CRIT "%s: Hrt Get failed; Out of memory.\n", pHba->name);
3300 return -ENOMEM;
3304 msg[0]= SIX_WORD_MSG_SIZE| SGL_OFFSET_4;
3305 msg[1]= I2O_CMD_HRT_GET<<24 | HOST_TID<<12 | ADAPTER_TID;
3306 msg[2]= 0;
3307 msg[3]= 0;
3308 msg[4]= (0xD0000000 | size); /* Simple transaction */
3309 msg[5]= (u32)pHba->hrt_pa; /* Dump it here */
3311 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg),20))) {
3312 printk(KERN_ERR "%s: Unable to get HRT (status=%#10x)\n", pHba->name, ret);
3313 return ret;
3316 if (pHba->hrt->num_entries * pHba->hrt->entry_len << 2 > size) {
3317 int newsize = pHba->hrt->num_entries * pHba->hrt->entry_len << 2;
3318 dma_free_coherent(&pHba->pDev->dev, size,
3319 pHba->hrt, pHba->hrt_pa);
3320 size = newsize;
3321 pHba->hrt = NULL;
3323 } while(pHba->hrt == NULL);
3324 return 0;
3328 * Query one scalar group value or a whole scalar group.
3330 static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
3331 int group, int field, void *buf, int buflen)
3333 u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field };
3334 u8 *opblk_va;
3335 dma_addr_t opblk_pa;
3336 u8 *resblk_va;
3337 dma_addr_t resblk_pa;
3339 int size;
3341 /* 8 bytes for header */
3342 resblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3343 sizeof(u8) * (8 + buflen), &resblk_pa, GFP_KERNEL);
3344 if (resblk_va == NULL) {
3345 printk(KERN_CRIT "%s: query scalar failed; Out of memory.\n", pHba->name);
3346 return -ENOMEM;
3349 opblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3350 sizeof(opblk), &opblk_pa, GFP_KERNEL);
3351 if (opblk_va == NULL) {
3352 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3353 resblk_va, resblk_pa);
3354 printk(KERN_CRIT "%s: query operation failed; Out of memory.\n",
3355 pHba->name);
3356 return -ENOMEM;
3358 if (field == -1) /* whole group */
3359 opblk[4] = -1;
3361 memcpy(opblk_va, opblk, sizeof(opblk));
3362 size = adpt_i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET, pHba, tid,
3363 opblk_va, opblk_pa, sizeof(opblk),
3364 resblk_va, resblk_pa, sizeof(u8)*(8+buflen));
3365 dma_free_coherent(&pHba->pDev->dev, sizeof(opblk), opblk_va, opblk_pa);
3366 if (size == -ETIME) {
3367 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3368 resblk_va, resblk_pa);
3369 printk(KERN_WARNING "%s: issue params failed; Timed out.\n", pHba->name);
3370 return -ETIME;
3371 } else if (size == -EINTR) {
3372 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3373 resblk_va, resblk_pa);
3374 printk(KERN_WARNING "%s: issue params failed; Interrupted.\n", pHba->name);
3375 return -EINTR;
3378 memcpy(buf, resblk_va+8, buflen); /* cut off header */
3380 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3381 resblk_va, resblk_pa);
3382 if (size < 0)
3383 return size;
3385 return buflen;
3389 /* Issue UTIL_PARAMS_GET or UTIL_PARAMS_SET
3391 * This function can be used for all UtilParamsGet/Set operations.
3392 * The OperationBlock is given in opblk-buffer,
3393 * and results are returned in resblk-buffer.
3394 * Note that the minimum sized resblk is 8 bytes and contains
3395 * ResultCount, ErrorInfoSize, BlockStatus and BlockSize.
3397 static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid,
3398 void *opblk_va, dma_addr_t opblk_pa, int oplen,
3399 void *resblk_va, dma_addr_t resblk_pa, int reslen)
3401 u32 msg[9];
3402 u32 *res = (u32 *)resblk_va;
3403 int wait_status;
3405 msg[0] = NINE_WORD_MSG_SIZE | SGL_OFFSET_5;
3406 msg[1] = cmd << 24 | HOST_TID << 12 | tid;
3407 msg[2] = 0;
3408 msg[3] = 0;
3409 msg[4] = 0;
3410 msg[5] = 0x54000000 | oplen; /* OperationBlock */
3411 msg[6] = (u32)opblk_pa;
3412 msg[7] = 0xD0000000 | reslen; /* ResultBlock */
3413 msg[8] = (u32)resblk_pa;
3415 if ((wait_status = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 20))) {
3416 printk("adpt_i2o_issue_params: post_wait failed (%p)\n", resblk_va);
3417 return wait_status; /* -DetailedStatus */
3420 if (res[1]&0x00FF0000) { /* BlockStatus != SUCCESS */
3421 printk(KERN_WARNING "%s: %s - Error:\n ErrorInfoSize = 0x%02x, "
3422 "BlockStatus = 0x%02x, BlockSize = 0x%04x\n",
3423 pHba->name,
3424 (cmd == I2O_CMD_UTIL_PARAMS_SET) ? "PARAMS_SET"
3425 : "PARAMS_GET",
3426 res[1]>>24, (res[1]>>16)&0xFF, res[1]&0xFFFF);
3427 return -((res[1] >> 16) & 0xFF); /* -BlockStatus */
3430 return 4 + ((res[1] & 0x0000FFFF) << 2); /* bytes used in resblk */
3434 static s32 adpt_i2o_quiesce_hba(adpt_hba* pHba)
3436 u32 msg[4];
3437 int ret;
3439 adpt_i2o_status_get(pHba);
3441 /* SysQuiesce discarded if IOP not in READY or OPERATIONAL state */
3443 if((pHba->status_block->iop_state != ADAPTER_STATE_READY) &&
3444 (pHba->status_block->iop_state != ADAPTER_STATE_OPERATIONAL)){
3445 return 0;
3448 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3449 msg[1] = I2O_CMD_SYS_QUIESCE<<24|HOST_TID<<12|ADAPTER_TID;
3450 msg[2] = 0;
3451 msg[3] = 0;
3453 if((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3454 printk(KERN_INFO"dpti%d: Unable to quiesce (status=%#x).\n",
3455 pHba->unit, -ret);
3456 } else {
3457 printk(KERN_INFO"dpti%d: Quiesced.\n",pHba->unit);
3460 adpt_i2o_status_get(pHba);
3461 return ret;
3466 * Enable IOP. Allows the IOP to resume external operations.
3468 static int adpt_i2o_enable_hba(adpt_hba* pHba)
3470 u32 msg[4];
3471 int ret;
3473 adpt_i2o_status_get(pHba);
3474 if(!pHba->status_block){
3475 return -ENOMEM;
3477 /* Enable only allowed on READY state */
3478 if(pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL)
3479 return 0;
3481 if(pHba->status_block->iop_state != ADAPTER_STATE_READY)
3482 return -EINVAL;
3484 msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3485 msg[1]=I2O_CMD_SYS_ENABLE<<24|HOST_TID<<12|ADAPTER_TID;
3486 msg[2]= 0;
3487 msg[3]= 0;
3489 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3490 printk(KERN_WARNING"%s: Could not enable (status=%#10x).\n",
3491 pHba->name, ret);
3492 } else {
3493 PDEBUG("%s: Enabled.\n", pHba->name);
3496 adpt_i2o_status_get(pHba);
3497 return ret;
3501 static int adpt_i2o_systab_send(adpt_hba* pHba)
3503 u32 msg[12];
3504 int ret;
3506 msg[0] = I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6;
3507 msg[1] = I2O_CMD_SYS_TAB_SET<<24 | HOST_TID<<12 | ADAPTER_TID;
3508 msg[2] = 0;
3509 msg[3] = 0;
3510 msg[4] = (0<<16) | ((pHba->unit+2) << 12); /* Host 0 IOP ID (unit + 2) */
3511 msg[5] = 0; /* Segment 0 */
3514 * Provide three SGL-elements:
3515 * System table (SysTab), Private memory space declaration and
3516 * Private i/o space declaration
3518 msg[6] = 0x54000000 | sys_tbl_len;
3519 msg[7] = (u32)sys_tbl_pa;
3520 msg[8] = 0x54000000 | 0;
3521 msg[9] = 0;
3522 msg[10] = 0xD4000000 | 0;
3523 msg[11] = 0;
3525 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 120))) {
3526 printk(KERN_INFO "%s: Unable to set SysTab (status=%#10x).\n",
3527 pHba->name, ret);
3529 #ifdef DEBUG
3530 else {
3531 PINFO("%s: SysTab set.\n", pHba->name);
3533 #endif
3535 return ret;
3539 /*============================================================================
3541 *============================================================================
3545 #ifdef UARTDELAY
3547 static static void adpt_delay(int millisec)
3549 int i;
3550 for (i = 0; i < millisec; i++) {
3551 udelay(1000); /* delay for one millisecond */
3555 #endif
3557 static struct scsi_host_template driver_template = {
3558 .module = THIS_MODULE,
3559 .name = "dpt_i2o",
3560 .proc_name = "dpt_i2o",
3561 .show_info = adpt_show_info,
3562 .info = adpt_info,
3563 .queuecommand = adpt_queue,
3564 .eh_abort_handler = adpt_abort,
3565 .eh_device_reset_handler = adpt_device_reset,
3566 .eh_bus_reset_handler = adpt_bus_reset,
3567 .eh_host_reset_handler = adpt_reset,
3568 .bios_param = adpt_bios_param,
3569 .slave_configure = adpt_slave_configure,
3570 .can_queue = MAX_TO_IOP_MESSAGES,
3571 .this_id = 7,
3574 static int __init adpt_init(void)
3576 int error;
3577 adpt_hba *pHba, *next;
3579 printk("Loading Adaptec I2O RAID: Version " DPT_I2O_VERSION "\n");
3581 error = adpt_detect(&driver_template);
3582 if (error < 0)
3583 return error;
3584 if (hba_chain == NULL)
3585 return -ENODEV;
3587 for (pHba = hba_chain; pHba; pHba = pHba->next) {
3588 error = scsi_add_host(pHba->host, &pHba->pDev->dev);
3589 if (error)
3590 goto fail;
3591 scsi_scan_host(pHba->host);
3593 return 0;
3594 fail:
3595 for (pHba = hba_chain; pHba; pHba = next) {
3596 next = pHba->next;
3597 scsi_remove_host(pHba->host);
3599 return error;
3602 static void __exit adpt_exit(void)
3604 adpt_hba *pHba, *next;
3606 for (pHba = hba_chain; pHba; pHba = next) {
3607 next = pHba->next;
3608 adpt_release(pHba);
3612 module_init(adpt_init);
3613 module_exit(adpt_exit);
3615 MODULE_LICENSE("GPL");