Linux 5.1.15
[linux/fpc-iii.git] / drivers / scsi / dpt_i2o.c
blobabdc34affdf678e2d0736251b8e0c8d5464f275b
1 /***************************************************************************
2 dpti.c - description
3 -------------------
4 begin : Thu Sep 7 2000
5 copyright : (C) 2000 by Adaptec
7 July 30, 2001 First version being submitted
8 for inclusion in the kernel. V2.4
10 See Documentation/scsi/dpti.txt for history, notes, license info
11 and credits
12 ***************************************************************************/
14 /***************************************************************************
15 * *
16 * This program is free software; you can redistribute it and/or modify *
17 * it under the terms of the GNU General Public License as published by *
18 * the Free Software Foundation; either version 2 of the License, or *
19 * (at your option) any later version. *
20 * *
21 ***************************************************************************/
22 /***************************************************************************
23 * Sat Dec 20 2003 Go Taniguchi <go@turbolinux.co.jp>
24 - Support 2.6 kernel and DMA-mapping
25 - ioctl fix for raid tools
26 - use schedule_timeout in long long loop
27 **************************************************************************/
29 /*#define DEBUG 1 */
30 /*#define UARTDELAY 1 */
32 #include <linux/module.h>
34 MODULE_AUTHOR("Deanna Bonds, with _lots_ of help from Mark Salyzyn");
35 MODULE_DESCRIPTION("Adaptec I2O RAID Driver");
37 ////////////////////////////////////////////////////////////////
39 #include <linux/ioctl.h> /* For SCSI-Passthrough */
40 #include <linux/uaccess.h>
42 #include <linux/stat.h>
43 #include <linux/slab.h> /* for kmalloc() */
44 #include <linux/pci.h> /* for PCI support */
45 #include <linux/proc_fs.h>
46 #include <linux/blkdev.h>
47 #include <linux/delay.h> /* for udelay */
48 #include <linux/interrupt.h>
49 #include <linux/kernel.h> /* for printk */
50 #include <linux/sched.h>
51 #include <linux/reboot.h>
52 #include <linux/spinlock.h>
53 #include <linux/dma-mapping.h>
55 #include <linux/timer.h>
56 #include <linux/string.h>
57 #include <linux/ioport.h>
58 #include <linux/mutex.h>
60 #include <asm/processor.h> /* for boot_cpu_data */
61 #include <asm/pgtable.h>
62 #include <asm/io.h> /* for virt_to_bus, etc. */
64 #include <scsi/scsi.h>
65 #include <scsi/scsi_cmnd.h>
66 #include <scsi/scsi_device.h>
67 #include <scsi/scsi_host.h>
68 #include <scsi/scsi_tcq.h>
70 #include "dpt/dptsig.h"
71 #include "dpti.h"
73 /*============================================================================
74 * Create a binary signature - this is read by dptsig
75 * Needed for our management apps
76 *============================================================================
78 static DEFINE_MUTEX(adpt_mutex);
79 static dpt_sig_S DPTI_sig = {
80 {'d', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION,
81 #ifdef __i386__
82 PROC_INTEL, PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM,
83 #elif defined(__ia64__)
84 PROC_INTEL, PROC_IA64,
85 #elif defined(__sparc__)
86 PROC_ULTRASPARC, PROC_ULTRASPARC,
87 #elif defined(__alpha__)
88 PROC_ALPHA, PROC_ALPHA,
89 #else
90 (-1),(-1),
91 #endif
92 FT_HBADRVR, 0, OEM_DPT, OS_LINUX, CAP_OVERLAP, DEV_ALL,
93 ADF_ALL_SC5, 0, 0, DPT_VERSION, DPT_REVISION, DPT_SUBREVISION,
94 DPT_MONTH, DPT_DAY, DPT_YEAR, "Adaptec Linux I2O RAID Driver"
100 /*============================================================================
101 * Globals
102 *============================================================================
105 static DEFINE_MUTEX(adpt_configuration_lock);
107 static struct i2o_sys_tbl *sys_tbl;
108 static dma_addr_t sys_tbl_pa;
109 static int sys_tbl_ind;
110 static int sys_tbl_len;
112 static adpt_hba* hba_chain = NULL;
113 static int hba_count = 0;
115 static struct class *adpt_sysfs_class;
117 static long adpt_unlocked_ioctl(struct file *, unsigned int, unsigned long);
118 #ifdef CONFIG_COMPAT
119 static long compat_adpt_ioctl(struct file *, unsigned int, unsigned long);
120 #endif
122 static const struct file_operations adpt_fops = {
123 .unlocked_ioctl = adpt_unlocked_ioctl,
124 .open = adpt_open,
125 .release = adpt_close,
126 #ifdef CONFIG_COMPAT
127 .compat_ioctl = compat_adpt_ioctl,
128 #endif
129 .llseek = noop_llseek,
132 /* Structures and definitions for synchronous message posting.
133 * See adpt_i2o_post_wait() for description
134 * */
135 struct adpt_i2o_post_wait_data
137 int status;
138 u32 id;
139 adpt_wait_queue_head_t *wq;
140 struct adpt_i2o_post_wait_data *next;
143 static struct adpt_i2o_post_wait_data *adpt_post_wait_queue = NULL;
144 static u32 adpt_post_wait_id = 0;
145 static DEFINE_SPINLOCK(adpt_post_wait_lock);
148 /*============================================================================
149 * Functions
150 *============================================================================
153 static inline int dpt_dma64(adpt_hba *pHba)
155 return (sizeof(dma_addr_t) > 4 && (pHba)->dma64);
158 static inline u32 dma_high(dma_addr_t addr)
160 return upper_32_bits(addr);
163 static inline u32 dma_low(dma_addr_t addr)
165 return (u32)addr;
168 static u8 adpt_read_blink_led(adpt_hba* host)
170 if (host->FwDebugBLEDflag_P) {
171 if( readb(host->FwDebugBLEDflag_P) == 0xbc ){
172 return readb(host->FwDebugBLEDvalue_P);
175 return 0;
178 /*============================================================================
179 * Scsi host template interface functions
180 *============================================================================
183 #ifdef MODULE
184 static struct pci_device_id dptids[] = {
185 { PCI_DPT_VENDOR_ID, PCI_DPT_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
186 { PCI_DPT_VENDOR_ID, PCI_DPT_RAPTOR_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
187 { 0, }
189 #endif
191 MODULE_DEVICE_TABLE(pci,dptids);
193 static int adpt_detect(struct scsi_host_template* sht)
195 struct pci_dev *pDev = NULL;
196 adpt_hba *pHba;
197 adpt_hba *next;
199 PINFO("Detecting Adaptec I2O RAID controllers...\n");
201 /* search for all Adatpec I2O RAID cards */
202 while ((pDev = pci_get_device( PCI_DPT_VENDOR_ID, PCI_ANY_ID, pDev))) {
203 if(pDev->device == PCI_DPT_DEVICE_ID ||
204 pDev->device == PCI_DPT_RAPTOR_DEVICE_ID){
205 if(adpt_install_hba(sht, pDev) ){
206 PERROR("Could not Init an I2O RAID device\n");
207 PERROR("Will not try to detect others.\n");
208 return hba_count-1;
210 pci_dev_get(pDev);
214 /* In INIT state, Activate IOPs */
215 for (pHba = hba_chain; pHba; pHba = next) {
216 next = pHba->next;
217 // Activate does get status , init outbound, and get hrt
218 if (adpt_i2o_activate_hba(pHba) < 0) {
219 adpt_i2o_delete_hba(pHba);
224 /* Active IOPs in HOLD state */
226 rebuild_sys_tab:
227 if (hba_chain == NULL)
228 return 0;
231 * If build_sys_table fails, we kill everything and bail
232 * as we can't init the IOPs w/o a system table
234 if (adpt_i2o_build_sys_table() < 0) {
235 adpt_i2o_sys_shutdown();
236 return 0;
239 PDEBUG("HBA's in HOLD state\n");
241 /* If IOP don't get online, we need to rebuild the System table */
242 for (pHba = hba_chain; pHba; pHba = pHba->next) {
243 if (adpt_i2o_online_hba(pHba) < 0) {
244 adpt_i2o_delete_hba(pHba);
245 goto rebuild_sys_tab;
249 /* Active IOPs now in OPERATIONAL state */
250 PDEBUG("HBA's in OPERATIONAL state\n");
252 printk("dpti: If you have a lot of devices this could take a few minutes.\n");
253 for (pHba = hba_chain; pHba; pHba = next) {
254 next = pHba->next;
255 printk(KERN_INFO"%s: Reading the hardware resource table.\n", pHba->name);
256 if (adpt_i2o_lct_get(pHba) < 0){
257 adpt_i2o_delete_hba(pHba);
258 continue;
261 if (adpt_i2o_parse_lct(pHba) < 0){
262 adpt_i2o_delete_hba(pHba);
263 continue;
265 adpt_inquiry(pHba);
268 adpt_sysfs_class = class_create(THIS_MODULE, "dpt_i2o");
269 if (IS_ERR(adpt_sysfs_class)) {
270 printk(KERN_WARNING"dpti: unable to create dpt_i2o class\n");
271 adpt_sysfs_class = NULL;
274 for (pHba = hba_chain; pHba; pHba = next) {
275 next = pHba->next;
276 if (adpt_scsi_host_alloc(pHba, sht) < 0){
277 adpt_i2o_delete_hba(pHba);
278 continue;
280 pHba->initialized = TRUE;
281 pHba->state &= ~DPTI_STATE_RESET;
282 if (adpt_sysfs_class) {
283 struct device *dev = device_create(adpt_sysfs_class,
284 NULL, MKDEV(DPTI_I2O_MAJOR, pHba->unit), NULL,
285 "dpti%d", pHba->unit);
286 if (IS_ERR(dev)) {
287 printk(KERN_WARNING"dpti%d: unable to "
288 "create device in dpt_i2o class\n",
289 pHba->unit);
294 // Register our control device node
295 // nodes will need to be created in /dev to access this
296 // the nodes can not be created from within the driver
297 if (hba_count && register_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER, &adpt_fops)) {
298 adpt_i2o_sys_shutdown();
299 return 0;
301 return hba_count;
305 static void adpt_release(adpt_hba *pHba)
307 struct Scsi_Host *shost = pHba->host;
309 scsi_remove_host(shost);
310 // adpt_i2o_quiesce_hba(pHba);
311 adpt_i2o_delete_hba(pHba);
312 scsi_host_put(shost);
316 static void adpt_inquiry(adpt_hba* pHba)
318 u32 msg[17];
319 u32 *mptr;
320 u32 *lenptr;
321 int direction;
322 int scsidir;
323 u32 len;
324 u32 reqlen;
325 u8* buf;
326 dma_addr_t addr;
327 u8 scb[16];
328 s32 rcode;
330 memset(msg, 0, sizeof(msg));
331 buf = dma_alloc_coherent(&pHba->pDev->dev, 80, &addr, GFP_KERNEL);
332 if(!buf){
333 printk(KERN_ERR"%s: Could not allocate buffer\n",pHba->name);
334 return;
336 memset((void*)buf, 0, 36);
338 len = 36;
339 direction = 0x00000000;
340 scsidir =0x40000000; // DATA IN (iop<--dev)
342 if (dpt_dma64(pHba))
343 reqlen = 17; // SINGLE SGE, 64 bit
344 else
345 reqlen = 14; // SINGLE SGE, 32 bit
346 /* Stick the headers on */
347 msg[0] = reqlen<<16 | SGL_OFFSET_12;
348 msg[1] = (0xff<<24|HOST_TID<<12|ADAPTER_TID);
349 msg[2] = 0;
350 msg[3] = 0;
351 // Adaptec/DPT Private stuff
352 msg[4] = I2O_CMD_SCSI_EXEC|DPT_ORGANIZATION_ID<<16;
353 msg[5] = ADAPTER_TID | 1<<16 /* Interpret*/;
354 /* Direction, disconnect ok | sense data | simple queue , CDBLen */
355 // I2O_SCB_FLAG_ENABLE_DISCONNECT |
356 // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
357 // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
358 msg[6] = scsidir|0x20a00000| 6 /* cmd len*/;
360 mptr=msg+7;
362 memset(scb, 0, sizeof(scb));
363 // Write SCSI command into the message - always 16 byte block
364 scb[0] = INQUIRY;
365 scb[1] = 0;
366 scb[2] = 0;
367 scb[3] = 0;
368 scb[4] = 36;
369 scb[5] = 0;
370 // Don't care about the rest of scb
372 memcpy(mptr, scb, sizeof(scb));
373 mptr+=4;
374 lenptr=mptr++; /* Remember me - fill in when we know */
376 /* Now fill in the SGList and command */
377 *lenptr = len;
378 if (dpt_dma64(pHba)) {
379 *mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
380 *mptr++ = 1 << PAGE_SHIFT;
381 *mptr++ = 0xD0000000|direction|len;
382 *mptr++ = dma_low(addr);
383 *mptr++ = dma_high(addr);
384 } else {
385 *mptr++ = 0xD0000000|direction|len;
386 *mptr++ = addr;
389 // Send it on it's way
390 rcode = adpt_i2o_post_wait(pHba, msg, reqlen<<2, 120);
391 if (rcode != 0) {
392 sprintf(pHba->detail, "Adaptec I2O RAID");
393 printk(KERN_INFO "%s: Inquiry Error (%d)\n",pHba->name,rcode);
394 if (rcode != -ETIME && rcode != -EINTR)
395 dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
396 } else {
397 memset(pHba->detail, 0, sizeof(pHba->detail));
398 memcpy(&(pHba->detail), "Vendor: Adaptec ", 16);
399 memcpy(&(pHba->detail[16]), " Model: ", 8);
400 memcpy(&(pHba->detail[24]), (u8*) &buf[16], 16);
401 memcpy(&(pHba->detail[40]), " FW: ", 4);
402 memcpy(&(pHba->detail[44]), (u8*) &buf[32], 4);
403 pHba->detail[48] = '\0'; /* precautionary */
404 dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
406 adpt_i2o_status_get(pHba);
407 return ;
411 static int adpt_slave_configure(struct scsi_device * device)
413 struct Scsi_Host *host = device->host;
414 adpt_hba* pHba;
416 pHba = (adpt_hba *) host->hostdata[0];
418 if (host->can_queue && device->tagged_supported) {
419 scsi_change_queue_depth(device,
420 host->can_queue - 1);
422 return 0;
425 static int adpt_queue_lck(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *))
427 adpt_hba* pHba = NULL;
428 struct adpt_device* pDev = NULL; /* dpt per device information */
430 cmd->scsi_done = done;
432 * SCSI REQUEST_SENSE commands will be executed automatically by the
433 * Host Adapter for any errors, so they should not be executed
434 * explicitly unless the Sense Data is zero indicating that no error
435 * occurred.
438 if ((cmd->cmnd[0] == REQUEST_SENSE) && (cmd->sense_buffer[0] != 0)) {
439 cmd->result = (DID_OK << 16);
440 cmd->scsi_done(cmd);
441 return 0;
444 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
445 if (!pHba) {
446 return FAILED;
449 rmb();
450 if ((pHba->state) & DPTI_STATE_RESET)
451 return SCSI_MLQUEUE_HOST_BUSY;
453 // TODO if the cmd->device if offline then I may need to issue a bus rescan
454 // followed by a get_lct to see if the device is there anymore
455 if((pDev = (struct adpt_device*) (cmd->device->hostdata)) == NULL) {
457 * First command request for this device. Set up a pointer
458 * to the device structure. This should be a TEST_UNIT_READY
459 * command from scan_scsis_single.
461 if ((pDev = adpt_find_device(pHba, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun)) == NULL) {
462 // TODO: if any luns are at this bus, scsi id then fake a TEST_UNIT_READY and INQUIRY response
463 // with type 7F (for all luns less than the max for this bus,id) so the lun scan will continue.
464 cmd->result = (DID_NO_CONNECT << 16);
465 cmd->scsi_done(cmd);
466 return 0;
468 cmd->device->hostdata = pDev;
470 pDev->pScsi_dev = cmd->device;
473 * If we are being called from when the device is being reset,
474 * delay processing of the command until later.
476 if (pDev->state & DPTI_DEV_RESET ) {
477 return FAILED;
479 return adpt_scsi_to_i2o(pHba, cmd, pDev);
482 static DEF_SCSI_QCMD(adpt_queue)
484 static int adpt_bios_param(struct scsi_device *sdev, struct block_device *dev,
485 sector_t capacity, int geom[])
487 int heads=-1;
488 int sectors=-1;
489 int cylinders=-1;
491 // *** First lets set the default geometry ****
493 // If the capacity is less than ox2000
494 if (capacity < 0x2000 ) { // floppy
495 heads = 18;
496 sectors = 2;
498 // else if between 0x2000 and 0x20000
499 else if (capacity < 0x20000) {
500 heads = 64;
501 sectors = 32;
503 // else if between 0x20000 and 0x40000
504 else if (capacity < 0x40000) {
505 heads = 65;
506 sectors = 63;
508 // else if between 0x4000 and 0x80000
509 else if (capacity < 0x80000) {
510 heads = 128;
511 sectors = 63;
513 // else if greater than 0x80000
514 else {
515 heads = 255;
516 sectors = 63;
518 cylinders = sector_div(capacity, heads * sectors);
520 // Special case if CDROM
521 if(sdev->type == 5) { // CDROM
522 heads = 252;
523 sectors = 63;
524 cylinders = 1111;
527 geom[0] = heads;
528 geom[1] = sectors;
529 geom[2] = cylinders;
531 PDEBUG("adpt_bios_param: exit\n");
532 return 0;
536 static const char *adpt_info(struct Scsi_Host *host)
538 adpt_hba* pHba;
540 pHba = (adpt_hba *) host->hostdata[0];
541 return (char *) (pHba->detail);
544 static int adpt_show_info(struct seq_file *m, struct Scsi_Host *host)
546 struct adpt_device* d;
547 int id;
548 int chan;
549 adpt_hba* pHba;
550 int unit;
552 // Find HBA (host bus adapter) we are looking for
553 mutex_lock(&adpt_configuration_lock);
554 for (pHba = hba_chain; pHba; pHba = pHba->next) {
555 if (pHba->host == host) {
556 break; /* found adapter */
559 mutex_unlock(&adpt_configuration_lock);
560 if (pHba == NULL) {
561 return 0;
563 host = pHba->host;
565 seq_printf(m, "Adaptec I2O RAID Driver Version: %s\n\n", DPT_I2O_VERSION);
566 seq_printf(m, "%s\n", pHba->detail);
567 seq_printf(m, "SCSI Host=scsi%d Control Node=/dev/%s irq=%d\n",
568 pHba->host->host_no, pHba->name, host->irq);
569 seq_printf(m, "\tpost fifo size = %d\n\treply fifo size = %d\n\tsg table size = %d\n\n",
570 host->can_queue, (int) pHba->reply_fifo_size , host->sg_tablesize);
572 seq_puts(m, "Devices:\n");
573 for(chan = 0; chan < MAX_CHANNEL; chan++) {
574 for(id = 0; id < MAX_ID; id++) {
575 d = pHba->channel[chan].device[id];
576 while(d) {
577 seq_printf(m,"\t%-24.24s", d->pScsi_dev->vendor);
578 seq_printf(m," Rev: %-8.8s\n", d->pScsi_dev->rev);
580 unit = d->pI2o_dev->lct_data.tid;
581 seq_printf(m, "\tTID=%d, (Channel=%d, Target=%d, Lun=%llu) (%s)\n\n",
582 unit, (int)d->scsi_channel, (int)d->scsi_id, d->scsi_lun,
583 scsi_device_online(d->pScsi_dev)? "online":"offline");
584 d = d->next_lun;
588 return 0;
592 * Turn a pointer to ioctl reply data into an u32 'context'
594 static u32 adpt_ioctl_to_context(adpt_hba * pHba, void *reply)
596 #if BITS_PER_LONG == 32
597 return (u32)(unsigned long)reply;
598 #else
599 ulong flags = 0;
600 u32 nr, i;
602 spin_lock_irqsave(pHba->host->host_lock, flags);
603 nr = ARRAY_SIZE(pHba->ioctl_reply_context);
604 for (i = 0; i < nr; i++) {
605 if (pHba->ioctl_reply_context[i] == NULL) {
606 pHba->ioctl_reply_context[i] = reply;
607 break;
610 spin_unlock_irqrestore(pHba->host->host_lock, flags);
611 if (i >= nr) {
612 printk(KERN_WARNING"%s: Too many outstanding "
613 "ioctl commands\n", pHba->name);
614 return (u32)-1;
617 return i;
618 #endif
622 * Go from an u32 'context' to a pointer to ioctl reply data.
624 static void *adpt_ioctl_from_context(adpt_hba *pHba, u32 context)
626 #if BITS_PER_LONG == 32
627 return (void *)(unsigned long)context;
628 #else
629 void *p = pHba->ioctl_reply_context[context];
630 pHba->ioctl_reply_context[context] = NULL;
632 return p;
633 #endif
636 /*===========================================================================
637 * Error Handling routines
638 *===========================================================================
641 static int adpt_abort(struct scsi_cmnd * cmd)
643 adpt_hba* pHba = NULL; /* host bus adapter structure */
644 struct adpt_device* dptdevice; /* dpt per device information */
645 u32 msg[5];
646 int rcode;
648 pHba = (adpt_hba*) cmd->device->host->hostdata[0];
649 printk(KERN_INFO"%s: Trying to Abort\n",pHba->name);
650 if ((dptdevice = (void*) (cmd->device->hostdata)) == NULL) {
651 printk(KERN_ERR "%s: Unable to abort: No device in cmnd\n",pHba->name);
652 return FAILED;
655 memset(msg, 0, sizeof(msg));
656 msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
657 msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|dptdevice->tid;
658 msg[2] = 0;
659 msg[3]= 0;
660 /* Add 1 to avoid firmware treating it as invalid command */
661 msg[4] = cmd->request->tag + 1;
662 if (pHba->host)
663 spin_lock_irq(pHba->host->host_lock);
664 rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER);
665 if (pHba->host)
666 spin_unlock_irq(pHba->host->host_lock);
667 if (rcode != 0) {
668 if(rcode == -EOPNOTSUPP ){
669 printk(KERN_INFO"%s: Abort cmd not supported\n",pHba->name);
670 return FAILED;
672 printk(KERN_INFO"%s: Abort failed.\n",pHba->name);
673 return FAILED;
675 printk(KERN_INFO"%s: Abort complete.\n",pHba->name);
676 return SUCCESS;
680 #define I2O_DEVICE_RESET 0x27
681 // This is the same for BLK and SCSI devices
682 // NOTE this is wrong in the i2o.h definitions
683 // This is not currently supported by our adapter but we issue it anyway
684 static int adpt_device_reset(struct scsi_cmnd* cmd)
686 adpt_hba* pHba;
687 u32 msg[4];
688 u32 rcode;
689 int old_state;
690 struct adpt_device* d = cmd->device->hostdata;
692 pHba = (void*) cmd->device->host->hostdata[0];
693 printk(KERN_INFO"%s: Trying to reset device\n",pHba->name);
694 if (!d) {
695 printk(KERN_INFO"%s: Reset Device: Device Not found\n",pHba->name);
696 return FAILED;
698 memset(msg, 0, sizeof(msg));
699 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
700 msg[1] = (I2O_DEVICE_RESET<<24|HOST_TID<<12|d->tid);
701 msg[2] = 0;
702 msg[3] = 0;
704 if (pHba->host)
705 spin_lock_irq(pHba->host->host_lock);
706 old_state = d->state;
707 d->state |= DPTI_DEV_RESET;
708 rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
709 d->state = old_state;
710 if (pHba->host)
711 spin_unlock_irq(pHba->host->host_lock);
712 if (rcode != 0) {
713 if(rcode == -EOPNOTSUPP ){
714 printk(KERN_INFO"%s: Device reset not supported\n",pHba->name);
715 return FAILED;
717 printk(KERN_INFO"%s: Device reset failed\n",pHba->name);
718 return FAILED;
719 } else {
720 printk(KERN_INFO"%s: Device reset successful\n",pHba->name);
721 return SUCCESS;
726 #define I2O_HBA_BUS_RESET 0x87
727 // This version of bus reset is called by the eh_error handler
728 static int adpt_bus_reset(struct scsi_cmnd* cmd)
730 adpt_hba* pHba;
731 u32 msg[4];
732 u32 rcode;
734 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
735 memset(msg, 0, sizeof(msg));
736 printk(KERN_WARNING"%s: Bus reset: SCSI Bus %d: tid: %d\n",pHba->name, cmd->device->channel,pHba->channel[cmd->device->channel].tid );
737 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
738 msg[1] = (I2O_HBA_BUS_RESET<<24|HOST_TID<<12|pHba->channel[cmd->device->channel].tid);
739 msg[2] = 0;
740 msg[3] = 0;
741 if (pHba->host)
742 spin_lock_irq(pHba->host->host_lock);
743 rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
744 if (pHba->host)
745 spin_unlock_irq(pHba->host->host_lock);
746 if (rcode != 0) {
747 printk(KERN_WARNING"%s: Bus reset failed.\n",pHba->name);
748 return FAILED;
749 } else {
750 printk(KERN_WARNING"%s: Bus reset success.\n",pHba->name);
751 return SUCCESS;
755 // This version of reset is called by the eh_error_handler
756 static int __adpt_reset(struct scsi_cmnd* cmd)
758 adpt_hba* pHba;
759 int rcode;
760 char name[32];
762 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
763 strncpy(name, pHba->name, sizeof(name));
764 printk(KERN_WARNING"%s: Hba Reset: scsi id %d: tid: %d\n", name, cmd->device->channel, pHba->channel[cmd->device->channel].tid);
765 rcode = adpt_hba_reset(pHba);
766 if(rcode == 0){
767 printk(KERN_WARNING"%s: HBA reset complete\n", name);
768 return SUCCESS;
769 } else {
770 printk(KERN_WARNING"%s: HBA reset failed (%x)\n", name, rcode);
771 return FAILED;
775 static int adpt_reset(struct scsi_cmnd* cmd)
777 int rc;
779 spin_lock_irq(cmd->device->host->host_lock);
780 rc = __adpt_reset(cmd);
781 spin_unlock_irq(cmd->device->host->host_lock);
783 return rc;
786 // This version of reset is called by the ioctls and indirectly from eh_error_handler via adpt_reset
787 static int adpt_hba_reset(adpt_hba* pHba)
789 int rcode;
791 pHba->state |= DPTI_STATE_RESET;
793 // Activate does get status , init outbound, and get hrt
794 if ((rcode=adpt_i2o_activate_hba(pHba)) < 0) {
795 printk(KERN_ERR "%s: Could not activate\n", pHba->name);
796 adpt_i2o_delete_hba(pHba);
797 return rcode;
800 if ((rcode=adpt_i2o_build_sys_table()) < 0) {
801 adpt_i2o_delete_hba(pHba);
802 return rcode;
804 PDEBUG("%s: in HOLD state\n",pHba->name);
806 if ((rcode=adpt_i2o_online_hba(pHba)) < 0) {
807 adpt_i2o_delete_hba(pHba);
808 return rcode;
810 PDEBUG("%s: in OPERATIONAL state\n",pHba->name);
812 if ((rcode=adpt_i2o_lct_get(pHba)) < 0){
813 adpt_i2o_delete_hba(pHba);
814 return rcode;
817 if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0){
818 adpt_i2o_delete_hba(pHba);
819 return rcode;
821 pHba->state &= ~DPTI_STATE_RESET;
823 adpt_fail_posted_scbs(pHba);
824 return 0; /* return success */
827 /*===========================================================================
829 *===========================================================================
833 static void adpt_i2o_sys_shutdown(void)
835 adpt_hba *pHba, *pNext;
836 struct adpt_i2o_post_wait_data *p1, *old;
838 printk(KERN_INFO"Shutting down Adaptec I2O controllers.\n");
839 printk(KERN_INFO" This could take a few minutes if there are many devices attached\n");
840 /* Delete all IOPs from the controller chain */
841 /* They should have already been released by the
842 * scsi-core
844 for (pHba = hba_chain; pHba; pHba = pNext) {
845 pNext = pHba->next;
846 adpt_i2o_delete_hba(pHba);
849 /* Remove any timedout entries from the wait queue. */
850 // spin_lock_irqsave(&adpt_post_wait_lock, flags);
851 /* Nothing should be outstanding at this point so just
852 * free them
854 for(p1 = adpt_post_wait_queue; p1;) {
855 old = p1;
856 p1 = p1->next;
857 kfree(old);
859 // spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
860 adpt_post_wait_queue = NULL;
862 printk(KERN_INFO "Adaptec I2O controllers down.\n");
865 static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev)
868 adpt_hba* pHba = NULL;
869 adpt_hba* p = NULL;
870 ulong base_addr0_phys = 0;
871 ulong base_addr1_phys = 0;
872 u32 hba_map0_area_size = 0;
873 u32 hba_map1_area_size = 0;
874 void __iomem *base_addr_virt = NULL;
875 void __iomem *msg_addr_virt = NULL;
876 int dma64 = 0;
878 int raptorFlag = FALSE;
880 if(pci_enable_device(pDev)) {
881 return -EINVAL;
884 if (pci_request_regions(pDev, "dpt_i2o")) {
885 PERROR("dpti: adpt_config_hba: pci request region failed\n");
886 return -EINVAL;
889 pci_set_master(pDev);
892 * See if we should enable dma64 mode.
894 if (sizeof(dma_addr_t) > 4 &&
895 dma_get_required_mask(&pDev->dev) > DMA_BIT_MASK(32) &&
896 dma_set_mask(&pDev->dev, DMA_BIT_MASK(64)) == 0)
897 dma64 = 1;
899 if (!dma64 && dma_set_mask(&pDev->dev, DMA_BIT_MASK(32)) != 0)
900 return -EINVAL;
902 /* adapter only supports message blocks below 4GB */
903 dma_set_coherent_mask(&pDev->dev, DMA_BIT_MASK(32));
905 base_addr0_phys = pci_resource_start(pDev,0);
906 hba_map0_area_size = pci_resource_len(pDev,0);
908 // Check if standard PCI card or single BAR Raptor
909 if(pDev->device == PCI_DPT_DEVICE_ID){
910 if(pDev->subsystem_device >=0xc032 && pDev->subsystem_device <= 0xc03b){
911 // Raptor card with this device id needs 4M
912 hba_map0_area_size = 0x400000;
913 } else { // Not Raptor - it is a PCI card
914 if(hba_map0_area_size > 0x100000 ){
915 hba_map0_area_size = 0x100000;
918 } else {// Raptor split BAR config
919 // Use BAR1 in this configuration
920 base_addr1_phys = pci_resource_start(pDev,1);
921 hba_map1_area_size = pci_resource_len(pDev,1);
922 raptorFlag = TRUE;
925 #if BITS_PER_LONG == 64
927 * The original Adaptec 64 bit driver has this comment here:
928 * "x86_64 machines need more optimal mappings"
930 * I assume some HBAs report ridiculously large mappings
931 * and we need to limit them on platforms with IOMMUs.
933 if (raptorFlag == TRUE) {
934 if (hba_map0_area_size > 128)
935 hba_map0_area_size = 128;
936 if (hba_map1_area_size > 524288)
937 hba_map1_area_size = 524288;
938 } else {
939 if (hba_map0_area_size > 524288)
940 hba_map0_area_size = 524288;
942 #endif
944 base_addr_virt = ioremap(base_addr0_phys,hba_map0_area_size);
945 if (!base_addr_virt) {
946 pci_release_regions(pDev);
947 PERROR("dpti: adpt_config_hba: io remap failed\n");
948 return -EINVAL;
951 if(raptorFlag == TRUE) {
952 msg_addr_virt = ioremap(base_addr1_phys, hba_map1_area_size );
953 if (!msg_addr_virt) {
954 PERROR("dpti: adpt_config_hba: io remap failed on BAR1\n");
955 iounmap(base_addr_virt);
956 pci_release_regions(pDev);
957 return -EINVAL;
959 } else {
960 msg_addr_virt = base_addr_virt;
963 // Allocate and zero the data structure
964 pHba = kzalloc(sizeof(adpt_hba), GFP_KERNEL);
965 if (!pHba) {
966 if (msg_addr_virt != base_addr_virt)
967 iounmap(msg_addr_virt);
968 iounmap(base_addr_virt);
969 pci_release_regions(pDev);
970 return -ENOMEM;
973 mutex_lock(&adpt_configuration_lock);
975 if(hba_chain != NULL){
976 for(p = hba_chain; p->next; p = p->next);
977 p->next = pHba;
978 } else {
979 hba_chain = pHba;
981 pHba->next = NULL;
982 pHba->unit = hba_count;
983 sprintf(pHba->name, "dpti%d", hba_count);
984 hba_count++;
986 mutex_unlock(&adpt_configuration_lock);
988 pHba->pDev = pDev;
989 pHba->base_addr_phys = base_addr0_phys;
991 // Set up the Virtual Base Address of the I2O Device
992 pHba->base_addr_virt = base_addr_virt;
993 pHba->msg_addr_virt = msg_addr_virt;
994 pHba->irq_mask = base_addr_virt+0x30;
995 pHba->post_port = base_addr_virt+0x40;
996 pHba->reply_port = base_addr_virt+0x44;
998 pHba->hrt = NULL;
999 pHba->lct = NULL;
1000 pHba->lct_size = 0;
1001 pHba->status_block = NULL;
1002 pHba->post_count = 0;
1003 pHba->state = DPTI_STATE_RESET;
1004 pHba->pDev = pDev;
1005 pHba->devices = NULL;
1006 pHba->dma64 = dma64;
1008 // Initializing the spinlocks
1009 spin_lock_init(&pHba->state_lock);
1010 spin_lock_init(&adpt_post_wait_lock);
1012 if(raptorFlag == 0){
1013 printk(KERN_INFO "Adaptec I2O RAID controller"
1014 " %d at %p size=%x irq=%d%s\n",
1015 hba_count-1, base_addr_virt,
1016 hba_map0_area_size, pDev->irq,
1017 dma64 ? " (64-bit DMA)" : "");
1018 } else {
1019 printk(KERN_INFO"Adaptec I2O RAID controller %d irq=%d%s\n",
1020 hba_count-1, pDev->irq,
1021 dma64 ? " (64-bit DMA)" : "");
1022 printk(KERN_INFO" BAR0 %p - size= %x\n",base_addr_virt,hba_map0_area_size);
1023 printk(KERN_INFO" BAR1 %p - size= %x\n",msg_addr_virt,hba_map1_area_size);
1026 if (request_irq (pDev->irq, adpt_isr, IRQF_SHARED, pHba->name, pHba)) {
1027 printk(KERN_ERR"%s: Couldn't register IRQ %d\n", pHba->name, pDev->irq);
1028 adpt_i2o_delete_hba(pHba);
1029 return -EINVAL;
1032 return 0;
1036 static void adpt_i2o_delete_hba(adpt_hba* pHba)
1038 adpt_hba* p1;
1039 adpt_hba* p2;
1040 struct i2o_device* d;
1041 struct i2o_device* next;
1042 int i;
1043 int j;
1044 struct adpt_device* pDev;
1045 struct adpt_device* pNext;
1048 mutex_lock(&adpt_configuration_lock);
1049 if(pHba->host){
1050 free_irq(pHba->host->irq, pHba);
1052 p2 = NULL;
1053 for( p1 = hba_chain; p1; p2 = p1,p1=p1->next){
1054 if(p1 == pHba) {
1055 if(p2) {
1056 p2->next = p1->next;
1057 } else {
1058 hba_chain = p1->next;
1060 break;
1064 hba_count--;
1065 mutex_unlock(&adpt_configuration_lock);
1067 iounmap(pHba->base_addr_virt);
1068 pci_release_regions(pHba->pDev);
1069 if(pHba->msg_addr_virt != pHba->base_addr_virt){
1070 iounmap(pHba->msg_addr_virt);
1072 if(pHba->FwDebugBuffer_P)
1073 iounmap(pHba->FwDebugBuffer_P);
1074 if(pHba->hrt) {
1075 dma_free_coherent(&pHba->pDev->dev,
1076 pHba->hrt->num_entries * pHba->hrt->entry_len << 2,
1077 pHba->hrt, pHba->hrt_pa);
1079 if(pHba->lct) {
1080 dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
1081 pHba->lct, pHba->lct_pa);
1083 if(pHba->status_block) {
1084 dma_free_coherent(&pHba->pDev->dev, sizeof(i2o_status_block),
1085 pHba->status_block, pHba->status_block_pa);
1087 if(pHba->reply_pool) {
1088 dma_free_coherent(&pHba->pDev->dev,
1089 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
1090 pHba->reply_pool, pHba->reply_pool_pa);
1093 for(d = pHba->devices; d ; d = next){
1094 next = d->next;
1095 kfree(d);
1097 for(i = 0 ; i < pHba->top_scsi_channel ; i++){
1098 for(j = 0; j < MAX_ID; j++){
1099 if(pHba->channel[i].device[j] != NULL){
1100 for(pDev = pHba->channel[i].device[j]; pDev; pDev = pNext){
1101 pNext = pDev->next_lun;
1102 kfree(pDev);
1107 pci_dev_put(pHba->pDev);
1108 if (adpt_sysfs_class)
1109 device_destroy(adpt_sysfs_class,
1110 MKDEV(DPTI_I2O_MAJOR, pHba->unit));
1111 kfree(pHba);
1113 if(hba_count <= 0){
1114 unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER);
1115 if (adpt_sysfs_class) {
1116 class_destroy(adpt_sysfs_class);
1117 adpt_sysfs_class = NULL;
1122 static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u64 lun)
1124 struct adpt_device* d;
1126 if(chan < 0 || chan >= MAX_CHANNEL)
1127 return NULL;
1129 d = pHba->channel[chan].device[id];
1130 if(!d || d->tid == 0) {
1131 return NULL;
1134 /* If it is the only lun at that address then this should match*/
1135 if(d->scsi_lun == lun){
1136 return d;
1139 /* else we need to look through all the luns */
1140 for(d=d->next_lun ; d ; d = d->next_lun){
1141 if(d->scsi_lun == lun){
1142 return d;
1145 return NULL;
1149 static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout)
1151 // I used my own version of the WAIT_QUEUE_HEAD
1152 // to handle some version differences
1153 // When embedded in the kernel this could go back to the vanilla one
1154 ADPT_DECLARE_WAIT_QUEUE_HEAD(adpt_wq_i2o_post);
1155 int status = 0;
1156 ulong flags = 0;
1157 struct adpt_i2o_post_wait_data *p1, *p2;
1158 struct adpt_i2o_post_wait_data *wait_data =
1159 kmalloc(sizeof(struct adpt_i2o_post_wait_data), GFP_ATOMIC);
1160 DECLARE_WAITQUEUE(wait, current);
1162 if (!wait_data)
1163 return -ENOMEM;
1166 * The spin locking is needed to keep anyone from playing
1167 * with the queue pointers and id while we do the same
1169 spin_lock_irqsave(&adpt_post_wait_lock, flags);
1170 // TODO we need a MORE unique way of getting ids
1171 // to support async LCT get
1172 wait_data->next = adpt_post_wait_queue;
1173 adpt_post_wait_queue = wait_data;
1174 adpt_post_wait_id++;
1175 adpt_post_wait_id &= 0x7fff;
1176 wait_data->id = adpt_post_wait_id;
1177 spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1179 wait_data->wq = &adpt_wq_i2o_post;
1180 wait_data->status = -ETIMEDOUT;
1182 add_wait_queue(&adpt_wq_i2o_post, &wait);
1184 msg[2] |= 0x80000000 | ((u32)wait_data->id);
1185 timeout *= HZ;
1186 if((status = adpt_i2o_post_this(pHba, msg, len)) == 0){
1187 set_current_state(TASK_INTERRUPTIBLE);
1188 if(pHba->host)
1189 spin_unlock_irq(pHba->host->host_lock);
1190 if (!timeout)
1191 schedule();
1192 else{
1193 timeout = schedule_timeout(timeout);
1194 if (timeout == 0) {
1195 // I/O issued, but cannot get result in
1196 // specified time. Freeing resorces is
1197 // dangerous.
1198 status = -ETIME;
1201 if(pHba->host)
1202 spin_lock_irq(pHba->host->host_lock);
1204 remove_wait_queue(&adpt_wq_i2o_post, &wait);
1206 if(status == -ETIMEDOUT){
1207 printk(KERN_INFO"dpti%d: POST WAIT TIMEOUT\n",pHba->unit);
1208 // We will have to free the wait_data memory during shutdown
1209 return status;
1212 /* Remove the entry from the queue. */
1213 p2 = NULL;
1214 spin_lock_irqsave(&adpt_post_wait_lock, flags);
1215 for(p1 = adpt_post_wait_queue; p1; p2 = p1, p1 = p1->next) {
1216 if(p1 == wait_data) {
1217 if(p1->status == I2O_DETAIL_STATUS_UNSUPPORTED_FUNCTION ) {
1218 status = -EOPNOTSUPP;
1220 if(p2) {
1221 p2->next = p1->next;
1222 } else {
1223 adpt_post_wait_queue = p1->next;
1225 break;
1228 spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1230 kfree(wait_data);
1232 return status;
1236 static s32 adpt_i2o_post_this(adpt_hba* pHba, u32* data, int len)
1239 u32 m = EMPTY_QUEUE;
1240 u32 __iomem *msg;
1241 ulong timeout = jiffies + 30*HZ;
1242 do {
1243 rmb();
1244 m = readl(pHba->post_port);
1245 if (m != EMPTY_QUEUE) {
1246 break;
1248 if(time_after(jiffies,timeout)){
1249 printk(KERN_WARNING"dpti%d: Timeout waiting for message frame!\n", pHba->unit);
1250 return -ETIMEDOUT;
1252 schedule_timeout_uninterruptible(1);
1253 } while(m == EMPTY_QUEUE);
1255 msg = pHba->msg_addr_virt + m;
1256 memcpy_toio(msg, data, len);
1257 wmb();
1259 //post message
1260 writel(m, pHba->post_port);
1261 wmb();
1263 return 0;
1267 static void adpt_i2o_post_wait_complete(u32 context, int status)
1269 struct adpt_i2o_post_wait_data *p1 = NULL;
1271 * We need to search through the adpt_post_wait
1272 * queue to see if the given message is still
1273 * outstanding. If not, it means that the IOP
1274 * took longer to respond to the message than we
1275 * had allowed and timer has already expired.
1276 * Not much we can do about that except log
1277 * it for debug purposes, increase timeout, and recompile
1279 * Lock needed to keep anyone from moving queue pointers
1280 * around while we're looking through them.
1283 context &= 0x7fff;
1285 spin_lock(&adpt_post_wait_lock);
1286 for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1287 if(p1->id == context) {
1288 p1->status = status;
1289 spin_unlock(&adpt_post_wait_lock);
1290 wake_up_interruptible(p1->wq);
1291 return;
1294 spin_unlock(&adpt_post_wait_lock);
1295 // If this happens we lose commands that probably really completed
1296 printk(KERN_DEBUG"dpti: Could Not find task %d in wait queue\n",context);
1297 printk(KERN_DEBUG" Tasks in wait queue:\n");
1298 for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1299 printk(KERN_DEBUG" %d\n",p1->id);
1301 return;
1304 static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
1306 u32 msg[8];
1307 u8* status;
1308 dma_addr_t addr;
1309 u32 m = EMPTY_QUEUE ;
1310 ulong timeout = jiffies + (TMOUT_IOPRESET*HZ);
1312 if(pHba->initialized == FALSE) { // First time reset should be quick
1313 timeout = jiffies + (25*HZ);
1314 } else {
1315 adpt_i2o_quiesce_hba(pHba);
1318 do {
1319 rmb();
1320 m = readl(pHba->post_port);
1321 if (m != EMPTY_QUEUE) {
1322 break;
1324 if(time_after(jiffies,timeout)){
1325 printk(KERN_WARNING"Timeout waiting for message!\n");
1326 return -ETIMEDOUT;
1328 schedule_timeout_uninterruptible(1);
1329 } while (m == EMPTY_QUEUE);
1331 status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
1332 if(status == NULL) {
1333 adpt_send_nop(pHba, m);
1334 printk(KERN_ERR"IOP reset failed - no free memory.\n");
1335 return -ENOMEM;
1337 memset(status,0,4);
1339 msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0;
1340 msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID;
1341 msg[2]=0;
1342 msg[3]=0;
1343 msg[4]=0;
1344 msg[5]=0;
1345 msg[6]=dma_low(addr);
1346 msg[7]=dma_high(addr);
1348 memcpy_toio(pHba->msg_addr_virt+m, msg, sizeof(msg));
1349 wmb();
1350 writel(m, pHba->post_port);
1351 wmb();
1353 while(*status == 0){
1354 if(time_after(jiffies,timeout)){
1355 printk(KERN_WARNING"%s: IOP Reset Timeout\n",pHba->name);
1356 /* We lose 4 bytes of "status" here, but we cannot
1357 free these because controller may awake and corrupt
1358 those bytes at any time */
1359 /* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
1360 return -ETIMEDOUT;
1362 rmb();
1363 schedule_timeout_uninterruptible(1);
1366 if(*status == 0x01 /*I2O_EXEC_IOP_RESET_IN_PROGRESS*/) {
1367 PDEBUG("%s: Reset in progress...\n", pHba->name);
1368 // Here we wait for message frame to become available
1369 // indicated that reset has finished
1370 do {
1371 rmb();
1372 m = readl(pHba->post_port);
1373 if (m != EMPTY_QUEUE) {
1374 break;
1376 if(time_after(jiffies,timeout)){
1377 printk(KERN_ERR "%s:Timeout waiting for IOP Reset.\n",pHba->name);
1378 /* We lose 4 bytes of "status" here, but we
1379 cannot free these because controller may
1380 awake and corrupt those bytes at any time */
1381 /* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
1382 return -ETIMEDOUT;
1384 schedule_timeout_uninterruptible(1);
1385 } while (m == EMPTY_QUEUE);
1386 // Flush the offset
1387 adpt_send_nop(pHba, m);
1389 adpt_i2o_status_get(pHba);
1390 if(*status == 0x02 ||
1391 pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
1392 printk(KERN_WARNING"%s: Reset reject, trying to clear\n",
1393 pHba->name);
1394 } else {
1395 PDEBUG("%s: Reset completed.\n", pHba->name);
1398 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
1399 #ifdef UARTDELAY
1400 // This delay is to allow someone attached to the card through the debug UART to
1401 // set up the dump levels that they want before the rest of the initialization sequence
1402 adpt_delay(20000);
1403 #endif
1404 return 0;
1408 static int adpt_i2o_parse_lct(adpt_hba* pHba)
1410 int i;
1411 int max;
1412 int tid;
1413 struct i2o_device *d;
1414 i2o_lct *lct = pHba->lct;
1415 u8 bus_no = 0;
1416 s16 scsi_id;
1417 u64 scsi_lun;
1418 u32 buf[10]; // larger than 7, or 8 ...
1419 struct adpt_device* pDev;
1421 if (lct == NULL) {
1422 printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
1423 return -1;
1426 max = lct->table_size;
1427 max -= 3;
1428 max /= 9;
1430 for(i=0;i<max;i++) {
1431 if( lct->lct_entry[i].user_tid != 0xfff){
1433 * If we have hidden devices, we need to inform the upper layers about
1434 * the possible maximum id reference to handle device access when
1435 * an array is disassembled. This code has no other purpose but to
1436 * allow us future access to devices that are currently hidden
1437 * behind arrays, hotspares or have not been configured (JBOD mode).
1439 if( lct->lct_entry[i].class_id != I2O_CLASS_RANDOM_BLOCK_STORAGE &&
1440 lct->lct_entry[i].class_id != I2O_CLASS_SCSI_PERIPHERAL &&
1441 lct->lct_entry[i].class_id != I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1442 continue;
1444 tid = lct->lct_entry[i].tid;
1445 // I2O_DPT_DEVICE_INFO_GROUP_NO;
1446 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
1447 continue;
1449 bus_no = buf[0]>>16;
1450 scsi_id = buf[1];
1451 scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
1452 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1453 printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no);
1454 continue;
1456 if (scsi_id >= MAX_ID){
1457 printk(KERN_WARNING"%s: SCSI ID %d out of range \n", pHba->name, bus_no);
1458 continue;
1460 if(bus_no > pHba->top_scsi_channel){
1461 pHba->top_scsi_channel = bus_no;
1463 if(scsi_id > pHba->top_scsi_id){
1464 pHba->top_scsi_id = scsi_id;
1466 if(scsi_lun > pHba->top_scsi_lun){
1467 pHba->top_scsi_lun = scsi_lun;
1469 continue;
1471 d = kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
1472 if(d==NULL)
1474 printk(KERN_CRIT"%s: Out of memory for I2O device data.\n",pHba->name);
1475 return -ENOMEM;
1478 d->controller = pHba;
1479 d->next = NULL;
1481 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
1483 d->flags = 0;
1484 tid = d->lct_data.tid;
1485 adpt_i2o_report_hba_unit(pHba, d);
1486 adpt_i2o_install_device(pHba, d);
1488 bus_no = 0;
1489 for(d = pHba->devices; d ; d = d->next) {
1490 if(d->lct_data.class_id == I2O_CLASS_BUS_ADAPTER_PORT ||
1491 d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PORT){
1492 tid = d->lct_data.tid;
1493 // TODO get the bus_no from hrt-but for now they are in order
1494 //bus_no =
1495 if(bus_no > pHba->top_scsi_channel){
1496 pHba->top_scsi_channel = bus_no;
1498 pHba->channel[bus_no].type = d->lct_data.class_id;
1499 pHba->channel[bus_no].tid = tid;
1500 if(adpt_i2o_query_scalar(pHba, tid, 0x0200, -1, buf, 28)>=0)
1502 pHba->channel[bus_no].scsi_id = buf[1];
1503 PDEBUG("Bus %d - SCSI ID %d.\n", bus_no, buf[1]);
1505 // TODO remove - this is just until we get from hrt
1506 bus_no++;
1507 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1508 printk(KERN_WARNING"%s: Channel number %d out of range - LCT\n", pHba->name, bus_no);
1509 break;
1514 // Setup adpt_device table
1515 for(d = pHba->devices; d ; d = d->next) {
1516 if(d->lct_data.class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
1517 d->lct_data.class_id == I2O_CLASS_SCSI_PERIPHERAL ||
1518 d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1520 tid = d->lct_data.tid;
1521 scsi_id = -1;
1522 // I2O_DPT_DEVICE_INFO_GROUP_NO;
1523 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)>=0) {
1524 bus_no = buf[0]>>16;
1525 scsi_id = buf[1];
1526 scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
1527 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1528 continue;
1530 if (scsi_id >= MAX_ID) {
1531 continue;
1533 if( pHba->channel[bus_no].device[scsi_id] == NULL){
1534 pDev = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1535 if(pDev == NULL) {
1536 return -ENOMEM;
1538 pHba->channel[bus_no].device[scsi_id] = pDev;
1539 } else {
1540 for( pDev = pHba->channel[bus_no].device[scsi_id];
1541 pDev->next_lun; pDev = pDev->next_lun){
1543 pDev->next_lun = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1544 if(pDev->next_lun == NULL) {
1545 return -ENOMEM;
1547 pDev = pDev->next_lun;
1549 pDev->tid = tid;
1550 pDev->scsi_channel = bus_no;
1551 pDev->scsi_id = scsi_id;
1552 pDev->scsi_lun = scsi_lun;
1553 pDev->pI2o_dev = d;
1554 d->owner = pDev;
1555 pDev->type = (buf[0])&0xff;
1556 pDev->flags = (buf[0]>>8)&0xff;
1557 if(scsi_id > pHba->top_scsi_id){
1558 pHba->top_scsi_id = scsi_id;
1560 if(scsi_lun > pHba->top_scsi_lun){
1561 pHba->top_scsi_lun = scsi_lun;
1564 if(scsi_id == -1){
1565 printk(KERN_WARNING"Could not find SCSI ID for %s\n",
1566 d->lct_data.identity_tag);
1570 return 0;
1575 * Each I2O controller has a chain of devices on it - these match
1576 * the useful parts of the LCT of the board.
1579 static int adpt_i2o_install_device(adpt_hba* pHba, struct i2o_device *d)
1581 mutex_lock(&adpt_configuration_lock);
1582 d->controller=pHba;
1583 d->owner=NULL;
1584 d->next=pHba->devices;
1585 d->prev=NULL;
1586 if (pHba->devices != NULL){
1587 pHba->devices->prev=d;
1589 pHba->devices=d;
1590 *d->dev_name = 0;
1592 mutex_unlock(&adpt_configuration_lock);
1593 return 0;
1596 static int adpt_open(struct inode *inode, struct file *file)
1598 int minor;
1599 adpt_hba* pHba;
1601 mutex_lock(&adpt_mutex);
1602 //TODO check for root access
1604 minor = iminor(inode);
1605 if (minor >= hba_count) {
1606 mutex_unlock(&adpt_mutex);
1607 return -ENXIO;
1609 mutex_lock(&adpt_configuration_lock);
1610 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1611 if (pHba->unit == minor) {
1612 break; /* found adapter */
1615 if (pHba == NULL) {
1616 mutex_unlock(&adpt_configuration_lock);
1617 mutex_unlock(&adpt_mutex);
1618 return -ENXIO;
1621 // if(pHba->in_use){
1622 // mutex_unlock(&adpt_configuration_lock);
1623 // return -EBUSY;
1624 // }
1626 pHba->in_use = 1;
1627 mutex_unlock(&adpt_configuration_lock);
1628 mutex_unlock(&adpt_mutex);
1630 return 0;
1633 static int adpt_close(struct inode *inode, struct file *file)
1635 int minor;
1636 adpt_hba* pHba;
1638 minor = iminor(inode);
1639 if (minor >= hba_count) {
1640 return -ENXIO;
1642 mutex_lock(&adpt_configuration_lock);
1643 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1644 if (pHba->unit == minor) {
1645 break; /* found adapter */
1648 mutex_unlock(&adpt_configuration_lock);
1649 if (pHba == NULL) {
1650 return -ENXIO;
1653 pHba->in_use = 0;
1655 return 0;
1659 static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1661 u32 msg[MAX_MESSAGE_SIZE];
1662 u32* reply = NULL;
1663 u32 size = 0;
1664 u32 reply_size = 0;
1665 u32 __user *user_msg = arg;
1666 u32 __user * user_reply = NULL;
1667 void **sg_list = NULL;
1668 u32 sg_offset = 0;
1669 u32 sg_count = 0;
1670 int sg_index = 0;
1671 u32 i = 0;
1672 u32 rcode = 0;
1673 void *p = NULL;
1674 dma_addr_t addr;
1675 ulong flags = 0;
1677 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1678 // get user msg size in u32s
1679 if(get_user(size, &user_msg[0])){
1680 return -EFAULT;
1682 size = size>>16;
1684 user_reply = &user_msg[size];
1685 if(size > MAX_MESSAGE_SIZE){
1686 return -EFAULT;
1688 size *= 4; // Convert to bytes
1690 /* Copy in the user's I2O command */
1691 if(copy_from_user(msg, user_msg, size)) {
1692 return -EFAULT;
1694 get_user(reply_size, &user_reply[0]);
1695 reply_size = reply_size>>16;
1696 if(reply_size > REPLY_FRAME_SIZE){
1697 reply_size = REPLY_FRAME_SIZE;
1699 reply_size *= 4;
1700 reply = kzalloc(REPLY_FRAME_SIZE*4, GFP_KERNEL);
1701 if(reply == NULL) {
1702 printk(KERN_WARNING"%s: Could not allocate reply buffer\n",pHba->name);
1703 return -ENOMEM;
1705 sg_offset = (msg[0]>>4)&0xf;
1706 msg[2] = 0x40000000; // IOCTL context
1707 msg[3] = adpt_ioctl_to_context(pHba, reply);
1708 if (msg[3] == (u32)-1) {
1709 rcode = -EBUSY;
1710 goto free;
1713 sg_list = kcalloc(pHba->sg_tablesize, sizeof(*sg_list), GFP_KERNEL);
1714 if (!sg_list) {
1715 rcode = -ENOMEM;
1716 goto free;
1718 if(sg_offset) {
1719 // TODO add 64 bit API
1720 struct sg_simple_element *sg = (struct sg_simple_element*) (msg+sg_offset);
1721 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1722 if (sg_count > pHba->sg_tablesize){
1723 printk(KERN_DEBUG"%s:IOCTL SG List too large (%u)\n", pHba->name,sg_count);
1724 rcode = -EINVAL;
1725 goto free;
1728 for(i = 0; i < sg_count; i++) {
1729 int sg_size;
1731 if (!(sg[i].flag_count & 0x10000000 /*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT*/)) {
1732 printk(KERN_DEBUG"%s:Bad SG element %d - not simple (%x)\n",pHba->name,i, sg[i].flag_count);
1733 rcode = -EINVAL;
1734 goto cleanup;
1736 sg_size = sg[i].flag_count & 0xffffff;
1737 /* Allocate memory for the transfer */
1738 p = dma_alloc_coherent(&pHba->pDev->dev, sg_size, &addr, GFP_KERNEL);
1739 if(!p) {
1740 printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
1741 pHba->name,sg_size,i,sg_count);
1742 rcode = -ENOMEM;
1743 goto cleanup;
1745 sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame.
1746 /* Copy in the user's SG buffer if necessary */
1747 if(sg[i].flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR*/) {
1748 // sg_simple_element API is 32 bit
1749 if (copy_from_user(p,(void __user *)(ulong)sg[i].addr_bus, sg_size)) {
1750 printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",pHba->name,i);
1751 rcode = -EFAULT;
1752 goto cleanup;
1755 /* sg_simple_element API is 32 bit, but addr < 4GB */
1756 sg[i].addr_bus = addr;
1760 do {
1762 * Stop any new commands from enterring the
1763 * controller while processing the ioctl
1765 if (pHba->host) {
1766 scsi_block_requests(pHba->host);
1767 spin_lock_irqsave(pHba->host->host_lock, flags);
1769 rcode = adpt_i2o_post_wait(pHba, msg, size, FOREVER);
1770 if (rcode != 0)
1771 printk("adpt_i2o_passthru: post wait failed %d %p\n",
1772 rcode, reply);
1773 if (pHba->host) {
1774 spin_unlock_irqrestore(pHba->host->host_lock, flags);
1775 scsi_unblock_requests(pHba->host);
1777 } while (rcode == -ETIMEDOUT);
1779 if(rcode){
1780 goto cleanup;
1783 if(sg_offset) {
1784 /* Copy back the Scatter Gather buffers back to user space */
1785 u32 j;
1786 // TODO add 64 bit API
1787 struct sg_simple_element* sg;
1788 int sg_size;
1790 // re-acquire the original message to handle correctly the sg copy operation
1791 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1792 // get user msg size in u32s
1793 if(get_user(size, &user_msg[0])){
1794 rcode = -EFAULT;
1795 goto cleanup;
1797 size = size>>16;
1798 size *= 4;
1799 if (size > MAX_MESSAGE_SIZE) {
1800 rcode = -EINVAL;
1801 goto cleanup;
1803 /* Copy in the user's I2O command */
1804 if (copy_from_user (msg, user_msg, size)) {
1805 rcode = -EFAULT;
1806 goto cleanup;
1808 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1810 // TODO add 64 bit API
1811 sg = (struct sg_simple_element*)(msg + sg_offset);
1812 for (j = 0; j < sg_count; j++) {
1813 /* Copy out the SG list to user's buffer if necessary */
1814 if(! (sg[j].flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR*/)) {
1815 sg_size = sg[j].flag_count & 0xffffff;
1816 // sg_simple_element API is 32 bit
1817 if (copy_to_user((void __user *)(ulong)sg[j].addr_bus,sg_list[j], sg_size)) {
1818 printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus);
1819 rcode = -EFAULT;
1820 goto cleanup;
1826 /* Copy back the reply to user space */
1827 if (reply_size) {
1828 // we wrote our own values for context - now restore the user supplied ones
1829 if(copy_from_user(reply+2, user_msg+2, sizeof(u32)*2)) {
1830 printk(KERN_WARNING"%s: Could not copy message context FROM user\n",pHba->name);
1831 rcode = -EFAULT;
1833 if(copy_to_user(user_reply, reply, reply_size)) {
1834 printk(KERN_WARNING"%s: Could not copy reply TO user\n",pHba->name);
1835 rcode = -EFAULT;
1840 cleanup:
1841 if (rcode != -ETIME && rcode != -EINTR) {
1842 struct sg_simple_element *sg =
1843 (struct sg_simple_element*) (msg +sg_offset);
1844 while(sg_index) {
1845 if(sg_list[--sg_index]) {
1846 dma_free_coherent(&pHba->pDev->dev,
1847 sg[sg_index].flag_count & 0xffffff,
1848 sg_list[sg_index],
1849 sg[sg_index].addr_bus);
1854 free:
1855 kfree(sg_list);
1856 kfree(reply);
1857 return rcode;
1860 #if defined __ia64__
1861 static void adpt_ia64_info(sysInfo_S* si)
1863 // This is all the info we need for now
1864 // We will add more info as our new
1865 // managmenent utility requires it
1866 si->processorType = PROC_IA64;
1868 #endif
1870 #if defined __sparc__
1871 static void adpt_sparc_info(sysInfo_S* si)
1873 // This is all the info we need for now
1874 // We will add more info as our new
1875 // managmenent utility requires it
1876 si->processorType = PROC_ULTRASPARC;
1878 #endif
1879 #if defined __alpha__
1880 static void adpt_alpha_info(sysInfo_S* si)
1882 // This is all the info we need for now
1883 // We will add more info as our new
1884 // managmenent utility requires it
1885 si->processorType = PROC_ALPHA;
1887 #endif
1889 #if defined __i386__
1891 #include <uapi/asm/vm86.h>
1893 static void adpt_i386_info(sysInfo_S* si)
1895 // This is all the info we need for now
1896 // We will add more info as our new
1897 // managmenent utility requires it
1898 switch (boot_cpu_data.x86) {
1899 case CPU_386:
1900 si->processorType = PROC_386;
1901 break;
1902 case CPU_486:
1903 si->processorType = PROC_486;
1904 break;
1905 case CPU_586:
1906 si->processorType = PROC_PENTIUM;
1907 break;
1908 default: // Just in case
1909 si->processorType = PROC_PENTIUM;
1910 break;
1913 #endif
1916 * This routine returns information about the system. This does not effect
1917 * any logic and if the info is wrong - it doesn't matter.
1920 /* Get all the info we can not get from kernel services */
1921 static int adpt_system_info(void __user *buffer)
1923 sysInfo_S si;
1925 memset(&si, 0, sizeof(si));
1927 si.osType = OS_LINUX;
1928 si.osMajorVersion = 0;
1929 si.osMinorVersion = 0;
1930 si.osRevision = 0;
1931 si.busType = SI_PCI_BUS;
1932 si.processorFamily = DPTI_sig.dsProcessorFamily;
1934 #if defined __i386__
1935 adpt_i386_info(&si);
1936 #elif defined (__ia64__)
1937 adpt_ia64_info(&si);
1938 #elif defined(__sparc__)
1939 adpt_sparc_info(&si);
1940 #elif defined (__alpha__)
1941 adpt_alpha_info(&si);
1942 #else
1943 si.processorType = 0xff ;
1944 #endif
1945 if (copy_to_user(buffer, &si, sizeof(si))){
1946 printk(KERN_WARNING"dpti: Could not copy buffer TO user\n");
1947 return -EFAULT;
1950 return 0;
1953 static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
1955 int minor;
1956 int error = 0;
1957 adpt_hba* pHba;
1958 ulong flags = 0;
1959 void __user *argp = (void __user *)arg;
1961 minor = iminor(inode);
1962 if (minor >= DPTI_MAX_HBA){
1963 return -ENXIO;
1965 mutex_lock(&adpt_configuration_lock);
1966 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1967 if (pHba->unit == minor) {
1968 break; /* found adapter */
1971 mutex_unlock(&adpt_configuration_lock);
1972 if(pHba == NULL){
1973 return -ENXIO;
1976 while((volatile u32) pHba->state & DPTI_STATE_RESET )
1977 schedule_timeout_uninterruptible(2);
1979 switch (cmd) {
1980 // TODO: handle 3 cases
1981 case DPT_SIGNATURE:
1982 if (copy_to_user(argp, &DPTI_sig, sizeof(DPTI_sig))) {
1983 return -EFAULT;
1985 break;
1986 case I2OUSRCMD:
1987 return adpt_i2o_passthru(pHba, argp);
1989 case DPT_CTRLINFO:{
1990 drvrHBAinfo_S HbaInfo;
1992 #define FLG_OSD_PCI_VALID 0x0001
1993 #define FLG_OSD_DMA 0x0002
1994 #define FLG_OSD_I2O 0x0004
1995 memset(&HbaInfo, 0, sizeof(HbaInfo));
1996 HbaInfo.drvrHBAnum = pHba->unit;
1997 HbaInfo.baseAddr = (ulong) pHba->base_addr_phys;
1998 HbaInfo.blinkState = adpt_read_blink_led(pHba);
1999 HbaInfo.pciBusNum = pHba->pDev->bus->number;
2000 HbaInfo.pciDeviceNum=PCI_SLOT(pHba->pDev->devfn);
2001 HbaInfo.Interrupt = pHba->pDev->irq;
2002 HbaInfo.hbaFlags = FLG_OSD_PCI_VALID | FLG_OSD_DMA | FLG_OSD_I2O;
2003 if(copy_to_user(argp, &HbaInfo, sizeof(HbaInfo))){
2004 printk(KERN_WARNING"%s: Could not copy HbaInfo TO user\n",pHba->name);
2005 return -EFAULT;
2007 break;
2009 case DPT_SYSINFO:
2010 return adpt_system_info(argp);
2011 case DPT_BLINKLED:{
2012 u32 value;
2013 value = (u32)adpt_read_blink_led(pHba);
2014 if (copy_to_user(argp, &value, sizeof(value))) {
2015 return -EFAULT;
2017 break;
2019 case I2ORESETCMD: {
2020 struct Scsi_Host *shost = pHba->host;
2022 if (shost)
2023 spin_lock_irqsave(shost->host_lock, flags);
2024 adpt_hba_reset(pHba);
2025 if (shost)
2026 spin_unlock_irqrestore(shost->host_lock, flags);
2027 break;
2029 case I2ORESCANCMD:
2030 adpt_rescan(pHba);
2031 break;
2032 default:
2033 return -EINVAL;
2036 return error;
2039 static long adpt_unlocked_ioctl(struct file *file, uint cmd, ulong arg)
2041 struct inode *inode;
2042 long ret;
2044 inode = file_inode(file);
2046 mutex_lock(&adpt_mutex);
2047 ret = adpt_ioctl(inode, file, cmd, arg);
2048 mutex_unlock(&adpt_mutex);
2050 return ret;
2053 #ifdef CONFIG_COMPAT
2054 static long compat_adpt_ioctl(struct file *file,
2055 unsigned int cmd, unsigned long arg)
2057 struct inode *inode;
2058 long ret;
2060 inode = file_inode(file);
2062 mutex_lock(&adpt_mutex);
2064 switch(cmd) {
2065 case DPT_SIGNATURE:
2066 case I2OUSRCMD:
2067 case DPT_CTRLINFO:
2068 case DPT_SYSINFO:
2069 case DPT_BLINKLED:
2070 case I2ORESETCMD:
2071 case I2ORESCANCMD:
2072 case (DPT_TARGET_BUSY & 0xFFFF):
2073 case DPT_TARGET_BUSY:
2074 ret = adpt_ioctl(inode, file, cmd, arg);
2075 break;
2076 default:
2077 ret = -ENOIOCTLCMD;
2080 mutex_unlock(&adpt_mutex);
2082 return ret;
2084 #endif
2086 static irqreturn_t adpt_isr(int irq, void *dev_id)
2088 struct scsi_cmnd* cmd;
2089 adpt_hba* pHba = dev_id;
2090 u32 m;
2091 void __iomem *reply;
2092 u32 status=0;
2093 u32 context;
2094 ulong flags = 0;
2095 int handled = 0;
2097 if (pHba == NULL){
2098 printk(KERN_WARNING"adpt_isr: NULL dev_id\n");
2099 return IRQ_NONE;
2101 if(pHba->host)
2102 spin_lock_irqsave(pHba->host->host_lock, flags);
2104 while( readl(pHba->irq_mask) & I2O_INTERRUPT_PENDING_B) {
2105 m = readl(pHba->reply_port);
2106 if(m == EMPTY_QUEUE){
2107 // Try twice then give up
2108 rmb();
2109 m = readl(pHba->reply_port);
2110 if(m == EMPTY_QUEUE){
2111 // This really should not happen
2112 printk(KERN_ERR"dpti: Could not get reply frame\n");
2113 goto out;
2116 if (pHba->reply_pool_pa <= m &&
2117 m < pHba->reply_pool_pa +
2118 (pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4)) {
2119 reply = (u8 *)pHba->reply_pool +
2120 (m - pHba->reply_pool_pa);
2121 } else {
2122 /* Ick, we should *never* be here */
2123 printk(KERN_ERR "dpti: reply frame not from pool\n");
2124 reply = (u8 *)bus_to_virt(m);
2127 if (readl(reply) & MSG_FAIL) {
2128 u32 old_m = readl(reply+28);
2129 void __iomem *msg;
2130 u32 old_context;
2131 PDEBUG("%s: Failed message\n",pHba->name);
2132 if(old_m >= 0x100000){
2133 printk(KERN_ERR"%s: Bad preserved MFA (%x)- dropping frame\n",pHba->name,old_m);
2134 writel(m,pHba->reply_port);
2135 continue;
2137 // Transaction context is 0 in failed reply frame
2138 msg = pHba->msg_addr_virt + old_m;
2139 old_context = readl(msg+12);
2140 writel(old_context, reply+12);
2141 adpt_send_nop(pHba, old_m);
2143 context = readl(reply+8);
2144 if(context & 0x40000000){ // IOCTL
2145 void *p = adpt_ioctl_from_context(pHba, readl(reply+12));
2146 if( p != NULL) {
2147 memcpy_fromio(p, reply, REPLY_FRAME_SIZE * 4);
2149 // All IOCTLs will also be post wait
2151 if(context & 0x80000000){ // Post wait message
2152 status = readl(reply+16);
2153 if(status >> 24){
2154 status &= 0xffff; /* Get detail status */
2155 } else {
2156 status = I2O_POST_WAIT_OK;
2158 if(!(context & 0x40000000)) {
2160 * The request tag is one less than the command tag
2161 * as the firmware might treat a 0 tag as invalid
2163 cmd = scsi_host_find_tag(pHba->host,
2164 readl(reply + 12) - 1);
2165 if(cmd != NULL) {
2166 printk(KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->name, cmd, context);
2169 adpt_i2o_post_wait_complete(context, status);
2170 } else { // SCSI message
2172 * The request tag is one less than the command tag
2173 * as the firmware might treat a 0 tag as invalid
2175 cmd = scsi_host_find_tag(pHba->host,
2176 readl(reply + 12) - 1);
2177 if(cmd != NULL){
2178 scsi_dma_unmap(cmd);
2179 adpt_i2o_to_scsi(reply, cmd);
2182 writel(m, pHba->reply_port);
2183 wmb();
2184 rmb();
2186 handled = 1;
2187 out: if(pHba->host)
2188 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2189 return IRQ_RETVAL(handled);
2192 static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_device* d)
2194 int i;
2195 u32 msg[MAX_MESSAGE_SIZE];
2196 u32* mptr;
2197 u32* lptr;
2198 u32 *lenptr;
2199 int direction;
2200 int scsidir;
2201 int nseg;
2202 u32 len;
2203 u32 reqlen;
2204 s32 rcode;
2205 dma_addr_t addr;
2207 memset(msg, 0 , sizeof(msg));
2208 len = scsi_bufflen(cmd);
2209 direction = 0x00000000;
2211 scsidir = 0x00000000; // DATA NO XFER
2212 if(len) {
2214 * Set SCBFlags to indicate if data is being transferred
2215 * in or out, or no data transfer
2216 * Note: Do not have to verify index is less than 0 since
2217 * cmd->cmnd[0] is an unsigned char
2219 switch(cmd->sc_data_direction){
2220 case DMA_FROM_DEVICE:
2221 scsidir =0x40000000; // DATA IN (iop<--dev)
2222 break;
2223 case DMA_TO_DEVICE:
2224 direction=0x04000000; // SGL OUT
2225 scsidir =0x80000000; // DATA OUT (iop-->dev)
2226 break;
2227 case DMA_NONE:
2228 break;
2229 case DMA_BIDIRECTIONAL:
2230 scsidir =0x40000000; // DATA IN (iop<--dev)
2231 // Assume In - and continue;
2232 break;
2233 default:
2234 printk(KERN_WARNING"%s: scsi opcode 0x%x not supported.\n",
2235 pHba->name, cmd->cmnd[0]);
2236 cmd->result = (DID_OK <<16) | (INITIATOR_ERROR << 8);
2237 cmd->scsi_done(cmd);
2238 return 0;
2241 // msg[0] is set later
2242 // I2O_CMD_SCSI_EXEC
2243 msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid);
2244 msg[2] = 0;
2245 /* Add 1 to avoid firmware treating it as invalid command */
2246 msg[3] = cmd->request->tag + 1;
2247 // Our cards use the transaction context as the tag for queueing
2248 // Adaptec/DPT Private stuff
2249 msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16);
2250 msg[5] = d->tid;
2251 /* Direction, disconnect ok | sense data | simple queue , CDBLen */
2252 // I2O_SCB_FLAG_ENABLE_DISCONNECT |
2253 // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
2254 // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
2255 msg[6] = scsidir|0x20a00000|cmd->cmd_len;
2257 mptr=msg+7;
2259 // Write SCSI command into the message - always 16 byte block
2260 memset(mptr, 0, 16);
2261 memcpy(mptr, cmd->cmnd, cmd->cmd_len);
2262 mptr+=4;
2263 lenptr=mptr++; /* Remember me - fill in when we know */
2264 if (dpt_dma64(pHba)) {
2265 reqlen = 16; // SINGLE SGE
2266 *mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
2267 *mptr++ = 1 << PAGE_SHIFT;
2268 } else {
2269 reqlen = 14; // SINGLE SGE
2271 /* Now fill in the SGList and command */
2273 nseg = scsi_dma_map(cmd);
2274 BUG_ON(nseg < 0);
2275 if (nseg) {
2276 struct scatterlist *sg;
2278 len = 0;
2279 scsi_for_each_sg(cmd, sg, nseg, i) {
2280 lptr = mptr;
2281 *mptr++ = direction|0x10000000|sg_dma_len(sg);
2282 len+=sg_dma_len(sg);
2283 addr = sg_dma_address(sg);
2284 *mptr++ = dma_low(addr);
2285 if (dpt_dma64(pHba))
2286 *mptr++ = dma_high(addr);
2287 /* Make this an end of list */
2288 if (i == nseg - 1)
2289 *lptr = direction|0xD0000000|sg_dma_len(sg);
2291 reqlen = mptr - msg;
2292 *lenptr = len;
2294 if(cmd->underflow && len != cmd->underflow){
2295 printk(KERN_WARNING"Cmd len %08X Cmd underflow %08X\n",
2296 len, cmd->underflow);
2298 } else {
2299 *lenptr = len = 0;
2300 reqlen = 12;
2303 /* Stick the headers on */
2304 msg[0] = reqlen<<16 | ((reqlen > 12) ? SGL_OFFSET_12 : SGL_OFFSET_0);
2306 // Send it on it's way
2307 rcode = adpt_i2o_post_this(pHba, msg, reqlen<<2);
2308 if (rcode == 0) {
2309 return 0;
2311 return rcode;
2315 static s32 adpt_scsi_host_alloc(adpt_hba* pHba, struct scsi_host_template *sht)
2317 struct Scsi_Host *host;
2319 host = scsi_host_alloc(sht, sizeof(adpt_hba*));
2320 if (host == NULL) {
2321 printk("%s: scsi_host_alloc returned NULL\n", pHba->name);
2322 return -1;
2324 host->hostdata[0] = (unsigned long)pHba;
2325 pHba->host = host;
2327 host->irq = pHba->pDev->irq;
2328 /* no IO ports, so don't have to set host->io_port and
2329 * host->n_io_port
2331 host->io_port = 0;
2332 host->n_io_port = 0;
2333 /* see comments in scsi_host.h */
2334 host->max_id = 16;
2335 host->max_lun = 256;
2336 host->max_channel = pHba->top_scsi_channel + 1;
2337 host->cmd_per_lun = 1;
2338 host->unique_id = (u32)sys_tbl_pa + pHba->unit;
2339 host->sg_tablesize = pHba->sg_tablesize;
2340 host->can_queue = pHba->post_fifo_size;
2341 host->use_cmd_list = 1;
2343 return 0;
2347 static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd)
2349 adpt_hba* pHba;
2350 u32 hba_status;
2351 u32 dev_status;
2352 u32 reply_flags = readl(reply) & 0xff00; // Leave it shifted up 8 bits
2353 // I know this would look cleaner if I just read bytes
2354 // but the model I have been using for all the rest of the
2355 // io is in 4 byte words - so I keep that model
2356 u16 detailed_status = readl(reply+16) &0xffff;
2357 dev_status = (detailed_status & 0xff);
2358 hba_status = detailed_status >> 8;
2360 // calculate resid for sg
2361 scsi_set_resid(cmd, scsi_bufflen(cmd) - readl(reply+20));
2363 pHba = (adpt_hba*) cmd->device->host->hostdata[0];
2365 cmd->sense_buffer[0] = '\0'; // initialize sense valid flag to false
2367 if(!(reply_flags & MSG_FAIL)) {
2368 switch(detailed_status & I2O_SCSI_DSC_MASK) {
2369 case I2O_SCSI_DSC_SUCCESS:
2370 cmd->result = (DID_OK << 16);
2371 // handle underflow
2372 if (readl(reply+20) < cmd->underflow) {
2373 cmd->result = (DID_ERROR <<16);
2374 printk(KERN_WARNING"%s: SCSI CMD underflow\n",pHba->name);
2376 break;
2377 case I2O_SCSI_DSC_REQUEST_ABORTED:
2378 cmd->result = (DID_ABORT << 16);
2379 break;
2380 case I2O_SCSI_DSC_PATH_INVALID:
2381 case I2O_SCSI_DSC_DEVICE_NOT_PRESENT:
2382 case I2O_SCSI_DSC_SELECTION_TIMEOUT:
2383 case I2O_SCSI_DSC_COMMAND_TIMEOUT:
2384 case I2O_SCSI_DSC_NO_ADAPTER:
2385 case I2O_SCSI_DSC_RESOURCE_UNAVAILABLE:
2386 printk(KERN_WARNING"%s: SCSI Timeout-Device (%d,%d,%llu) hba status=0x%x, dev status=0x%x, cmd=0x%x\n",
2387 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun, hba_status, dev_status, cmd->cmnd[0]);
2388 cmd->result = (DID_TIME_OUT << 16);
2389 break;
2390 case I2O_SCSI_DSC_ADAPTER_BUSY:
2391 case I2O_SCSI_DSC_BUS_BUSY:
2392 cmd->result = (DID_BUS_BUSY << 16);
2393 break;
2394 case I2O_SCSI_DSC_SCSI_BUS_RESET:
2395 case I2O_SCSI_DSC_BDR_MESSAGE_SENT:
2396 cmd->result = (DID_RESET << 16);
2397 break;
2398 case I2O_SCSI_DSC_PARITY_ERROR_FAILURE:
2399 printk(KERN_WARNING"%s: SCSI CMD parity error\n",pHba->name);
2400 cmd->result = (DID_PARITY << 16);
2401 break;
2402 case I2O_SCSI_DSC_UNABLE_TO_ABORT:
2403 case I2O_SCSI_DSC_COMPLETE_WITH_ERROR:
2404 case I2O_SCSI_DSC_UNABLE_TO_TERMINATE:
2405 case I2O_SCSI_DSC_MR_MESSAGE_RECEIVED:
2406 case I2O_SCSI_DSC_AUTOSENSE_FAILED:
2407 case I2O_SCSI_DSC_DATA_OVERRUN:
2408 case I2O_SCSI_DSC_UNEXPECTED_BUS_FREE:
2409 case I2O_SCSI_DSC_SEQUENCE_FAILURE:
2410 case I2O_SCSI_DSC_REQUEST_LENGTH_ERROR:
2411 case I2O_SCSI_DSC_PROVIDE_FAILURE:
2412 case I2O_SCSI_DSC_REQUEST_TERMINATED:
2413 case I2O_SCSI_DSC_IDE_MESSAGE_SENT:
2414 case I2O_SCSI_DSC_UNACKNOWLEDGED_EVENT:
2415 case I2O_SCSI_DSC_MESSAGE_RECEIVED:
2416 case I2O_SCSI_DSC_INVALID_CDB:
2417 case I2O_SCSI_DSC_LUN_INVALID:
2418 case I2O_SCSI_DSC_SCSI_TID_INVALID:
2419 case I2O_SCSI_DSC_FUNCTION_UNAVAILABLE:
2420 case I2O_SCSI_DSC_NO_NEXUS:
2421 case I2O_SCSI_DSC_CDB_RECEIVED:
2422 case I2O_SCSI_DSC_LUN_ALREADY_ENABLED:
2423 case I2O_SCSI_DSC_QUEUE_FROZEN:
2424 case I2O_SCSI_DSC_REQUEST_INVALID:
2425 default:
2426 printk(KERN_WARNING"%s: SCSI error %0x-Device(%d,%d,%llu) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2427 pHba->name, detailed_status & I2O_SCSI_DSC_MASK, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
2428 hba_status, dev_status, cmd->cmnd[0]);
2429 cmd->result = (DID_ERROR << 16);
2430 break;
2433 // copy over the request sense data if it was a check
2434 // condition status
2435 if (dev_status == SAM_STAT_CHECK_CONDITION) {
2436 u32 len = min(SCSI_SENSE_BUFFERSIZE, 40);
2437 // Copy over the sense data
2438 memcpy_fromio(cmd->sense_buffer, (reply+28) , len);
2439 if(cmd->sense_buffer[0] == 0x70 /* class 7 */ &&
2440 cmd->sense_buffer[2] == DATA_PROTECT ){
2441 /* This is to handle an array failed */
2442 cmd->result = (DID_TIME_OUT << 16);
2443 printk(KERN_WARNING"%s: SCSI Data Protect-Device (%d,%d,%llu) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2444 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
2445 hba_status, dev_status, cmd->cmnd[0]);
2449 } else {
2450 /* In this condtion we could not talk to the tid
2451 * the card rejected it. We should signal a retry
2452 * for a limitted number of retries.
2454 cmd->result = (DID_TIME_OUT << 16);
2455 printk(KERN_WARNING"%s: I2O MSG_FAIL - Device (%d,%d,%llu) tid=%d, cmd=0x%x\n",
2456 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
2457 ((struct adpt_device*)(cmd->device->hostdata))->tid, cmd->cmnd[0]);
2460 cmd->result |= (dev_status);
2462 if(cmd->scsi_done != NULL){
2463 cmd->scsi_done(cmd);
2465 return cmd->result;
2469 static s32 adpt_rescan(adpt_hba* pHba)
2471 s32 rcode;
2472 ulong flags = 0;
2474 if(pHba->host)
2475 spin_lock_irqsave(pHba->host->host_lock, flags);
2476 if ((rcode=adpt_i2o_lct_get(pHba)) < 0)
2477 goto out;
2478 if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0)
2479 goto out;
2480 rcode = 0;
2481 out: if(pHba->host)
2482 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2483 return rcode;
2487 static s32 adpt_i2o_reparse_lct(adpt_hba* pHba)
2489 int i;
2490 int max;
2491 int tid;
2492 struct i2o_device *d;
2493 i2o_lct *lct = pHba->lct;
2494 u8 bus_no = 0;
2495 s16 scsi_id;
2496 u64 scsi_lun;
2497 u32 buf[10]; // at least 8 u32's
2498 struct adpt_device* pDev = NULL;
2499 struct i2o_device* pI2o_dev = NULL;
2501 if (lct == NULL) {
2502 printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
2503 return -1;
2506 max = lct->table_size;
2507 max -= 3;
2508 max /= 9;
2510 // Mark each drive as unscanned
2511 for (d = pHba->devices; d; d = d->next) {
2512 pDev =(struct adpt_device*) d->owner;
2513 if(!pDev){
2514 continue;
2516 pDev->state |= DPTI_DEV_UNSCANNED;
2519 printk(KERN_INFO "%s: LCT has %d entries.\n", pHba->name,max);
2521 for(i=0;i<max;i++) {
2522 if( lct->lct_entry[i].user_tid != 0xfff){
2523 continue;
2526 if( lct->lct_entry[i].class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
2527 lct->lct_entry[i].class_id == I2O_CLASS_SCSI_PERIPHERAL ||
2528 lct->lct_entry[i].class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
2529 tid = lct->lct_entry[i].tid;
2530 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
2531 printk(KERN_ERR"%s: Could not query device\n",pHba->name);
2532 continue;
2534 bus_no = buf[0]>>16;
2535 if (bus_no >= MAX_CHANNEL) { /* Something wrong skip it */
2536 printk(KERN_WARNING
2537 "%s: Channel number %d out of range\n",
2538 pHba->name, bus_no);
2539 continue;
2542 scsi_id = buf[1];
2543 scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
2544 pDev = pHba->channel[bus_no].device[scsi_id];
2545 /* da lun */
2546 while(pDev) {
2547 if(pDev->scsi_lun == scsi_lun) {
2548 break;
2550 pDev = pDev->next_lun;
2552 if(!pDev ) { // Something new add it
2553 d = kmalloc(sizeof(struct i2o_device),
2554 GFP_ATOMIC);
2555 if(d==NULL)
2557 printk(KERN_CRIT "Out of memory for I2O device data.\n");
2558 return -ENOMEM;
2561 d->controller = pHba;
2562 d->next = NULL;
2564 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2566 d->flags = 0;
2567 adpt_i2o_report_hba_unit(pHba, d);
2568 adpt_i2o_install_device(pHba, d);
2570 pDev = pHba->channel[bus_no].device[scsi_id];
2571 if( pDev == NULL){
2572 pDev =
2573 kzalloc(sizeof(struct adpt_device),
2574 GFP_ATOMIC);
2575 if(pDev == NULL) {
2576 return -ENOMEM;
2578 pHba->channel[bus_no].device[scsi_id] = pDev;
2579 } else {
2580 while (pDev->next_lun) {
2581 pDev = pDev->next_lun;
2583 pDev = pDev->next_lun =
2584 kzalloc(sizeof(struct adpt_device),
2585 GFP_ATOMIC);
2586 if(pDev == NULL) {
2587 return -ENOMEM;
2590 pDev->tid = d->lct_data.tid;
2591 pDev->scsi_channel = bus_no;
2592 pDev->scsi_id = scsi_id;
2593 pDev->scsi_lun = scsi_lun;
2594 pDev->pI2o_dev = d;
2595 d->owner = pDev;
2596 pDev->type = (buf[0])&0xff;
2597 pDev->flags = (buf[0]>>8)&0xff;
2598 // Too late, SCSI system has made up it's mind, but what the hey ...
2599 if(scsi_id > pHba->top_scsi_id){
2600 pHba->top_scsi_id = scsi_id;
2602 if(scsi_lun > pHba->top_scsi_lun){
2603 pHba->top_scsi_lun = scsi_lun;
2605 continue;
2606 } // end of new i2o device
2608 // We found an old device - check it
2609 while(pDev) {
2610 if(pDev->scsi_lun == scsi_lun) {
2611 if(!scsi_device_online(pDev->pScsi_dev)) {
2612 printk(KERN_WARNING"%s: Setting device (%d,%d,%llu) back online\n",
2613 pHba->name,bus_no,scsi_id,scsi_lun);
2614 if (pDev->pScsi_dev) {
2615 scsi_device_set_state(pDev->pScsi_dev, SDEV_RUNNING);
2618 d = pDev->pI2o_dev;
2619 if(d->lct_data.tid != tid) { // something changed
2620 pDev->tid = tid;
2621 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2622 if (pDev->pScsi_dev) {
2623 pDev->pScsi_dev->changed = TRUE;
2624 pDev->pScsi_dev->removable = TRUE;
2627 // Found it - mark it scanned
2628 pDev->state = DPTI_DEV_ONLINE;
2629 break;
2631 pDev = pDev->next_lun;
2635 for (pI2o_dev = pHba->devices; pI2o_dev; pI2o_dev = pI2o_dev->next) {
2636 pDev =(struct adpt_device*) pI2o_dev->owner;
2637 if(!pDev){
2638 continue;
2640 // Drive offline drives that previously existed but could not be found
2641 // in the LCT table
2642 if (pDev->state & DPTI_DEV_UNSCANNED){
2643 pDev->state = DPTI_DEV_OFFLINE;
2644 printk(KERN_WARNING"%s: Device (%d,%d,%llu) offline\n",pHba->name,pDev->scsi_channel,pDev->scsi_id,pDev->scsi_lun);
2645 if (pDev->pScsi_dev) {
2646 scsi_device_set_state(pDev->pScsi_dev, SDEV_OFFLINE);
2650 return 0;
2653 static void adpt_fail_posted_scbs(adpt_hba* pHba)
2655 struct scsi_cmnd* cmd = NULL;
2656 struct scsi_device* d = NULL;
2658 shost_for_each_device(d, pHba->host) {
2659 unsigned long flags;
2660 spin_lock_irqsave(&d->list_lock, flags);
2661 list_for_each_entry(cmd, &d->cmd_list, list) {
2662 cmd->result = (DID_OK << 16) | (QUEUE_FULL <<1);
2663 cmd->scsi_done(cmd);
2665 spin_unlock_irqrestore(&d->list_lock, flags);
2670 /*============================================================================
2671 * Routines from i2o subsystem
2672 *============================================================================
2678 * Bring an I2O controller into HOLD state. See the spec.
2680 static int adpt_i2o_activate_hba(adpt_hba* pHba)
2682 int rcode;
2684 if(pHba->initialized ) {
2685 if (adpt_i2o_status_get(pHba) < 0) {
2686 if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2687 printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2688 return rcode;
2690 if (adpt_i2o_status_get(pHba) < 0) {
2691 printk(KERN_INFO "HBA not responding.\n");
2692 return -1;
2696 if(pHba->status_block->iop_state == ADAPTER_STATE_FAULTED) {
2697 printk(KERN_CRIT "%s: hardware fault\n", pHba->name);
2698 return -1;
2701 if (pHba->status_block->iop_state == ADAPTER_STATE_READY ||
2702 pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL ||
2703 pHba->status_block->iop_state == ADAPTER_STATE_HOLD ||
2704 pHba->status_block->iop_state == ADAPTER_STATE_FAILED) {
2705 adpt_i2o_reset_hba(pHba);
2706 if (adpt_i2o_status_get(pHba) < 0 || pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
2707 printk(KERN_ERR "%s: Failed to initialize.\n", pHba->name);
2708 return -1;
2711 } else {
2712 if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2713 printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2714 return rcode;
2719 if (adpt_i2o_init_outbound_q(pHba) < 0) {
2720 return -1;
2723 /* In HOLD state */
2725 if (adpt_i2o_hrt_get(pHba) < 0) {
2726 return -1;
2729 return 0;
2733 * Bring a controller online into OPERATIONAL state.
2736 static int adpt_i2o_online_hba(adpt_hba* pHba)
2738 if (adpt_i2o_systab_send(pHba) < 0)
2739 return -1;
2740 /* In READY state */
2742 if (adpt_i2o_enable_hba(pHba) < 0)
2743 return -1;
2745 /* In OPERATIONAL state */
2746 return 0;
2749 static s32 adpt_send_nop(adpt_hba*pHba,u32 m)
2751 u32 __iomem *msg;
2752 ulong timeout = jiffies + 5*HZ;
2754 while(m == EMPTY_QUEUE){
2755 rmb();
2756 m = readl(pHba->post_port);
2757 if(m != EMPTY_QUEUE){
2758 break;
2760 if(time_after(jiffies,timeout)){
2761 printk(KERN_ERR "%s: Timeout waiting for message frame!\n",pHba->name);
2762 return 2;
2764 schedule_timeout_uninterruptible(1);
2766 msg = (u32 __iomem *)(pHba->msg_addr_virt + m);
2767 writel( THREE_WORD_MSG_SIZE | SGL_OFFSET_0,&msg[0]);
2768 writel( I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0,&msg[1]);
2769 writel( 0,&msg[2]);
2770 wmb();
2772 writel(m, pHba->post_port);
2773 wmb();
2774 return 0;
2777 static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
2779 u8 *status;
2780 dma_addr_t addr;
2781 u32 __iomem *msg = NULL;
2782 int i;
2783 ulong timeout = jiffies + TMOUT_INITOUTBOUND*HZ;
2784 u32 m;
2786 do {
2787 rmb();
2788 m = readl(pHba->post_port);
2789 if (m != EMPTY_QUEUE) {
2790 break;
2793 if(time_after(jiffies,timeout)){
2794 printk(KERN_WARNING"%s: Timeout waiting for message frame\n",pHba->name);
2795 return -ETIMEDOUT;
2797 schedule_timeout_uninterruptible(1);
2798 } while(m == EMPTY_QUEUE);
2800 msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2802 status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
2803 if (!status) {
2804 adpt_send_nop(pHba, m);
2805 printk(KERN_WARNING"%s: IOP reset failed - no free memory.\n",
2806 pHba->name);
2807 return -ENOMEM;
2809 memset(status, 0, 4);
2811 writel(EIGHT_WORD_MSG_SIZE| SGL_OFFSET_6, &msg[0]);
2812 writel(I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID, &msg[1]);
2813 writel(0, &msg[2]);
2814 writel(0x0106, &msg[3]); /* Transaction context */
2815 writel(4096, &msg[4]); /* Host page frame size */
2816 writel((REPLY_FRAME_SIZE)<<16|0x80, &msg[5]); /* Outbound msg frame size and Initcode */
2817 writel(0xD0000004, &msg[6]); /* Simple SG LE, EOB */
2818 writel((u32)addr, &msg[7]);
2820 writel(m, pHba->post_port);
2821 wmb();
2823 // Wait for the reply status to come back
2824 do {
2825 if (*status) {
2826 if (*status != 0x01 /*I2O_EXEC_OUTBOUND_INIT_IN_PROGRESS*/) {
2827 break;
2830 rmb();
2831 if(time_after(jiffies,timeout)){
2832 printk(KERN_WARNING"%s: Timeout Initializing\n",pHba->name);
2833 /* We lose 4 bytes of "status" here, but we
2834 cannot free these because controller may
2835 awake and corrupt those bytes at any time */
2836 /* dma_free_coherent(&pHba->pDev->dev, 4, status, addr); */
2837 return -ETIMEDOUT;
2839 schedule_timeout_uninterruptible(1);
2840 } while (1);
2842 // If the command was successful, fill the fifo with our reply
2843 // message packets
2844 if(*status != 0x04 /*I2O_EXEC_OUTBOUND_INIT_COMPLETE*/) {
2845 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
2846 return -2;
2848 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
2850 if(pHba->reply_pool != NULL) {
2851 dma_free_coherent(&pHba->pDev->dev,
2852 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2853 pHba->reply_pool, pHba->reply_pool_pa);
2856 pHba->reply_pool = dma_alloc_coherent(&pHba->pDev->dev,
2857 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2858 &pHba->reply_pool_pa, GFP_KERNEL);
2859 if (!pHba->reply_pool) {
2860 printk(KERN_ERR "%s: Could not allocate reply pool\n", pHba->name);
2861 return -ENOMEM;
2863 memset(pHba->reply_pool, 0 , pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4);
2865 for(i = 0; i < pHba->reply_fifo_size; i++) {
2866 writel(pHba->reply_pool_pa + (i * REPLY_FRAME_SIZE * 4),
2867 pHba->reply_port);
2868 wmb();
2870 adpt_i2o_status_get(pHba);
2871 return 0;
2876 * I2O System Table. Contains information about
2877 * all the IOPs in the system. Used to inform IOPs
2878 * about each other's existence.
2880 * sys_tbl_ver is the CurrentChangeIndicator that is
2881 * used by IOPs to track changes.
2886 static s32 adpt_i2o_status_get(adpt_hba* pHba)
2888 ulong timeout;
2889 u32 m;
2890 u32 __iomem *msg;
2891 u8 *status_block=NULL;
2893 if(pHba->status_block == NULL) {
2894 pHba->status_block = dma_alloc_coherent(&pHba->pDev->dev,
2895 sizeof(i2o_status_block),
2896 &pHba->status_block_pa, GFP_KERNEL);
2897 if(pHba->status_block == NULL) {
2898 printk(KERN_ERR
2899 "dpti%d: Get Status Block failed; Out of memory. \n",
2900 pHba->unit);
2901 return -ENOMEM;
2904 memset(pHba->status_block, 0, sizeof(i2o_status_block));
2905 status_block = (u8*)(pHba->status_block);
2906 timeout = jiffies+TMOUT_GETSTATUS*HZ;
2907 do {
2908 rmb();
2909 m = readl(pHba->post_port);
2910 if (m != EMPTY_QUEUE) {
2911 break;
2913 if(time_after(jiffies,timeout)){
2914 printk(KERN_ERR "%s: Timeout waiting for message !\n",
2915 pHba->name);
2916 return -ETIMEDOUT;
2918 schedule_timeout_uninterruptible(1);
2919 } while(m==EMPTY_QUEUE);
2922 msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2924 writel(NINE_WORD_MSG_SIZE|SGL_OFFSET_0, &msg[0]);
2925 writel(I2O_CMD_STATUS_GET<<24|HOST_TID<<12|ADAPTER_TID, &msg[1]);
2926 writel(1, &msg[2]);
2927 writel(0, &msg[3]);
2928 writel(0, &msg[4]);
2929 writel(0, &msg[5]);
2930 writel( dma_low(pHba->status_block_pa), &msg[6]);
2931 writel( dma_high(pHba->status_block_pa), &msg[7]);
2932 writel(sizeof(i2o_status_block), &msg[8]); // 88 bytes
2934 //post message
2935 writel(m, pHba->post_port);
2936 wmb();
2938 while(status_block[87]!=0xff){
2939 if(time_after(jiffies,timeout)){
2940 printk(KERN_ERR"dpti%d: Get status timeout.\n",
2941 pHba->unit);
2942 return -ETIMEDOUT;
2944 rmb();
2945 schedule_timeout_uninterruptible(1);
2948 // Set up our number of outbound and inbound messages
2949 pHba->post_fifo_size = pHba->status_block->max_inbound_frames;
2950 if (pHba->post_fifo_size > MAX_TO_IOP_MESSAGES) {
2951 pHba->post_fifo_size = MAX_TO_IOP_MESSAGES;
2954 pHba->reply_fifo_size = pHba->status_block->max_outbound_frames;
2955 if (pHba->reply_fifo_size > MAX_FROM_IOP_MESSAGES) {
2956 pHba->reply_fifo_size = MAX_FROM_IOP_MESSAGES;
2959 // Calculate the Scatter Gather list size
2960 if (dpt_dma64(pHba)) {
2961 pHba->sg_tablesize
2962 = ((pHba->status_block->inbound_frame_size * 4
2963 - 14 * sizeof(u32))
2964 / (sizeof(struct sg_simple_element) + sizeof(u32)));
2965 } else {
2966 pHba->sg_tablesize
2967 = ((pHba->status_block->inbound_frame_size * 4
2968 - 12 * sizeof(u32))
2969 / sizeof(struct sg_simple_element));
2971 if (pHba->sg_tablesize > SG_LIST_ELEMENTS) {
2972 pHba->sg_tablesize = SG_LIST_ELEMENTS;
2976 #ifdef DEBUG
2977 printk("dpti%d: State = ",pHba->unit);
2978 switch(pHba->status_block->iop_state) {
2979 case 0x01:
2980 printk("INIT\n");
2981 break;
2982 case 0x02:
2983 printk("RESET\n");
2984 break;
2985 case 0x04:
2986 printk("HOLD\n");
2987 break;
2988 case 0x05:
2989 printk("READY\n");
2990 break;
2991 case 0x08:
2992 printk("OPERATIONAL\n");
2993 break;
2994 case 0x10:
2995 printk("FAILED\n");
2996 break;
2997 case 0x11:
2998 printk("FAULTED\n");
2999 break;
3000 default:
3001 printk("%x (unknown!!)\n",pHba->status_block->iop_state);
3003 #endif
3004 return 0;
3008 * Get the IOP's Logical Configuration Table
3010 static int adpt_i2o_lct_get(adpt_hba* pHba)
3012 u32 msg[8];
3013 int ret;
3014 u32 buf[16];
3016 if ((pHba->lct_size == 0) || (pHba->lct == NULL)){
3017 pHba->lct_size = pHba->status_block->expected_lct_size;
3019 do {
3020 if (pHba->lct == NULL) {
3021 pHba->lct = dma_alloc_coherent(&pHba->pDev->dev,
3022 pHba->lct_size, &pHba->lct_pa,
3023 GFP_ATOMIC);
3024 if(pHba->lct == NULL) {
3025 printk(KERN_CRIT "%s: Lct Get failed. Out of memory.\n",
3026 pHba->name);
3027 return -ENOMEM;
3030 memset(pHba->lct, 0, pHba->lct_size);
3032 msg[0] = EIGHT_WORD_MSG_SIZE|SGL_OFFSET_6;
3033 msg[1] = I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID;
3034 msg[2] = 0;
3035 msg[3] = 0;
3036 msg[4] = 0xFFFFFFFF; /* All devices */
3037 msg[5] = 0x00000000; /* Report now */
3038 msg[6] = 0xD0000000|pHba->lct_size;
3039 msg[7] = (u32)pHba->lct_pa;
3041 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 360))) {
3042 printk(KERN_ERR "%s: LCT Get failed (status=%#10x.\n",
3043 pHba->name, ret);
3044 printk(KERN_ERR"Adaptec: Error Reading Hardware.\n");
3045 return ret;
3048 if ((pHba->lct->table_size << 2) > pHba->lct_size) {
3049 pHba->lct_size = pHba->lct->table_size << 2;
3050 dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
3051 pHba->lct, pHba->lct_pa);
3052 pHba->lct = NULL;
3054 } while (pHba->lct == NULL);
3056 PDEBUG("%s: Hardware resource table read.\n", pHba->name);
3059 // I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO;
3060 if(adpt_i2o_query_scalar(pHba, 0 , 0x8000, -1, buf, sizeof(buf))>=0) {
3061 pHba->FwDebugBufferSize = buf[1];
3062 pHba->FwDebugBuffer_P = ioremap(pHba->base_addr_phys + buf[0],
3063 pHba->FwDebugBufferSize);
3064 if (pHba->FwDebugBuffer_P) {
3065 pHba->FwDebugFlags_P = pHba->FwDebugBuffer_P +
3066 FW_DEBUG_FLAGS_OFFSET;
3067 pHba->FwDebugBLEDvalue_P = pHba->FwDebugBuffer_P +
3068 FW_DEBUG_BLED_OFFSET;
3069 pHba->FwDebugBLEDflag_P = pHba->FwDebugBLEDvalue_P + 1;
3070 pHba->FwDebugStrLength_P = pHba->FwDebugBuffer_P +
3071 FW_DEBUG_STR_LENGTH_OFFSET;
3072 pHba->FwDebugBuffer_P += buf[2];
3073 pHba->FwDebugFlags = 0;
3077 return 0;
3080 static int adpt_i2o_build_sys_table(void)
3082 adpt_hba* pHba = hba_chain;
3083 int count = 0;
3085 if (sys_tbl)
3086 dma_free_coherent(&pHba->pDev->dev, sys_tbl_len,
3087 sys_tbl, sys_tbl_pa);
3089 sys_tbl_len = sizeof(struct i2o_sys_tbl) + // Header + IOPs
3090 (hba_count) * sizeof(struct i2o_sys_tbl_entry);
3092 sys_tbl = dma_alloc_coherent(&pHba->pDev->dev,
3093 sys_tbl_len, &sys_tbl_pa, GFP_KERNEL);
3094 if (!sys_tbl) {
3095 printk(KERN_WARNING "SysTab Set failed. Out of memory.\n");
3096 return -ENOMEM;
3098 memset(sys_tbl, 0, sys_tbl_len);
3100 sys_tbl->num_entries = hba_count;
3101 sys_tbl->version = I2OVERSION;
3102 sys_tbl->change_ind = sys_tbl_ind++;
3104 for(pHba = hba_chain; pHba; pHba = pHba->next) {
3105 u64 addr;
3106 // Get updated Status Block so we have the latest information
3107 if (adpt_i2o_status_get(pHba)) {
3108 sys_tbl->num_entries--;
3109 continue; // try next one
3112 sys_tbl->iops[count].org_id = pHba->status_block->org_id;
3113 sys_tbl->iops[count].iop_id = pHba->unit + 2;
3114 sys_tbl->iops[count].seg_num = 0;
3115 sys_tbl->iops[count].i2o_version = pHba->status_block->i2o_version;
3116 sys_tbl->iops[count].iop_state = pHba->status_block->iop_state;
3117 sys_tbl->iops[count].msg_type = pHba->status_block->msg_type;
3118 sys_tbl->iops[count].frame_size = pHba->status_block->inbound_frame_size;
3119 sys_tbl->iops[count].last_changed = sys_tbl_ind - 1; // ??
3120 sys_tbl->iops[count].iop_capabilities = pHba->status_block->iop_capabilities;
3121 addr = pHba->base_addr_phys + 0x40;
3122 sys_tbl->iops[count].inbound_low = dma_low(addr);
3123 sys_tbl->iops[count].inbound_high = dma_high(addr);
3125 count++;
3128 #ifdef DEBUG
3130 u32 *table = (u32*)sys_tbl;
3131 printk(KERN_DEBUG"sys_tbl_len=%d in 32bit words\n",(sys_tbl_len >>2));
3132 for(count = 0; count < (sys_tbl_len >>2); count++) {
3133 printk(KERN_INFO "sys_tbl[%d] = %0#10x\n",
3134 count, table[count]);
3137 #endif
3139 return 0;
3144 * Dump the information block associated with a given unit (TID)
3147 static void adpt_i2o_report_hba_unit(adpt_hba* pHba, struct i2o_device *d)
3149 char buf[64];
3150 int unit = d->lct_data.tid;
3152 printk(KERN_INFO "TID %3.3d ", unit);
3154 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 3, buf, 16)>=0)
3156 buf[16]=0;
3157 printk(" Vendor: %-12.12s", buf);
3159 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 4, buf, 16)>=0)
3161 buf[16]=0;
3162 printk(" Device: %-12.12s", buf);
3164 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 6, buf, 8)>=0)
3166 buf[8]=0;
3167 printk(" Rev: %-12.12s\n", buf);
3169 #ifdef DEBUG
3170 printk(KERN_INFO "\tClass: %.21s\n", adpt_i2o_get_class_name(d->lct_data.class_id));
3171 printk(KERN_INFO "\tSubclass: 0x%04X\n", d->lct_data.sub_class);
3172 printk(KERN_INFO "\tFlags: ");
3174 if(d->lct_data.device_flags&(1<<0))
3175 printk("C"); // ConfigDialog requested
3176 if(d->lct_data.device_flags&(1<<1))
3177 printk("U"); // Multi-user capable
3178 if(!(d->lct_data.device_flags&(1<<4)))
3179 printk("P"); // Peer service enabled!
3180 if(!(d->lct_data.device_flags&(1<<5)))
3181 printk("M"); // Mgmt service enabled!
3182 printk("\n");
3183 #endif
3186 #ifdef DEBUG
3188 * Do i2o class name lookup
3190 static const char *adpt_i2o_get_class_name(int class)
3192 int idx = 16;
3193 static char *i2o_class_name[] = {
3194 "Executive",
3195 "Device Driver Module",
3196 "Block Device",
3197 "Tape Device",
3198 "LAN Interface",
3199 "WAN Interface",
3200 "Fibre Channel Port",
3201 "Fibre Channel Device",
3202 "SCSI Device",
3203 "ATE Port",
3204 "ATE Device",
3205 "Floppy Controller",
3206 "Floppy Device",
3207 "Secondary Bus Port",
3208 "Peer Transport Agent",
3209 "Peer Transport",
3210 "Unknown"
3213 switch(class&0xFFF) {
3214 case I2O_CLASS_EXECUTIVE:
3215 idx = 0; break;
3216 case I2O_CLASS_DDM:
3217 idx = 1; break;
3218 case I2O_CLASS_RANDOM_BLOCK_STORAGE:
3219 idx = 2; break;
3220 case I2O_CLASS_SEQUENTIAL_STORAGE:
3221 idx = 3; break;
3222 case I2O_CLASS_LAN:
3223 idx = 4; break;
3224 case I2O_CLASS_WAN:
3225 idx = 5; break;
3226 case I2O_CLASS_FIBRE_CHANNEL_PORT:
3227 idx = 6; break;
3228 case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
3229 idx = 7; break;
3230 case I2O_CLASS_SCSI_PERIPHERAL:
3231 idx = 8; break;
3232 case I2O_CLASS_ATE_PORT:
3233 idx = 9; break;
3234 case I2O_CLASS_ATE_PERIPHERAL:
3235 idx = 10; break;
3236 case I2O_CLASS_FLOPPY_CONTROLLER:
3237 idx = 11; break;
3238 case I2O_CLASS_FLOPPY_DEVICE:
3239 idx = 12; break;
3240 case I2O_CLASS_BUS_ADAPTER_PORT:
3241 idx = 13; break;
3242 case I2O_CLASS_PEER_TRANSPORT_AGENT:
3243 idx = 14; break;
3244 case I2O_CLASS_PEER_TRANSPORT:
3245 idx = 15; break;
3247 return i2o_class_name[idx];
3249 #endif
3252 static s32 adpt_i2o_hrt_get(adpt_hba* pHba)
3254 u32 msg[6];
3255 int ret, size = sizeof(i2o_hrt);
3257 do {
3258 if (pHba->hrt == NULL) {
3259 pHba->hrt = dma_alloc_coherent(&pHba->pDev->dev,
3260 size, &pHba->hrt_pa, GFP_KERNEL);
3261 if (pHba->hrt == NULL) {
3262 printk(KERN_CRIT "%s: Hrt Get failed; Out of memory.\n", pHba->name);
3263 return -ENOMEM;
3267 msg[0]= SIX_WORD_MSG_SIZE| SGL_OFFSET_4;
3268 msg[1]= I2O_CMD_HRT_GET<<24 | HOST_TID<<12 | ADAPTER_TID;
3269 msg[2]= 0;
3270 msg[3]= 0;
3271 msg[4]= (0xD0000000 | size); /* Simple transaction */
3272 msg[5]= (u32)pHba->hrt_pa; /* Dump it here */
3274 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg),20))) {
3275 printk(KERN_ERR "%s: Unable to get HRT (status=%#10x)\n", pHba->name, ret);
3276 return ret;
3279 if (pHba->hrt->num_entries * pHba->hrt->entry_len << 2 > size) {
3280 int newsize = pHba->hrt->num_entries * pHba->hrt->entry_len << 2;
3281 dma_free_coherent(&pHba->pDev->dev, size,
3282 pHba->hrt, pHba->hrt_pa);
3283 size = newsize;
3284 pHba->hrt = NULL;
3286 } while(pHba->hrt == NULL);
3287 return 0;
3291 * Query one scalar group value or a whole scalar group.
3293 static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
3294 int group, int field, void *buf, int buflen)
3296 u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field };
3297 u8 *opblk_va;
3298 dma_addr_t opblk_pa;
3299 u8 *resblk_va;
3300 dma_addr_t resblk_pa;
3302 int size;
3304 /* 8 bytes for header */
3305 resblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3306 sizeof(u8) * (8 + buflen), &resblk_pa, GFP_KERNEL);
3307 if (resblk_va == NULL) {
3308 printk(KERN_CRIT "%s: query scalar failed; Out of memory.\n", pHba->name);
3309 return -ENOMEM;
3312 opblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3313 sizeof(opblk), &opblk_pa, GFP_KERNEL);
3314 if (opblk_va == NULL) {
3315 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3316 resblk_va, resblk_pa);
3317 printk(KERN_CRIT "%s: query operation failed; Out of memory.\n",
3318 pHba->name);
3319 return -ENOMEM;
3321 if (field == -1) /* whole group */
3322 opblk[4] = -1;
3324 memcpy(opblk_va, opblk, sizeof(opblk));
3325 size = adpt_i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET, pHba, tid,
3326 opblk_va, opblk_pa, sizeof(opblk),
3327 resblk_va, resblk_pa, sizeof(u8)*(8+buflen));
3328 dma_free_coherent(&pHba->pDev->dev, sizeof(opblk), opblk_va, opblk_pa);
3329 if (size == -ETIME) {
3330 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3331 resblk_va, resblk_pa);
3332 printk(KERN_WARNING "%s: issue params failed; Timed out.\n", pHba->name);
3333 return -ETIME;
3334 } else if (size == -EINTR) {
3335 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3336 resblk_va, resblk_pa);
3337 printk(KERN_WARNING "%s: issue params failed; Interrupted.\n", pHba->name);
3338 return -EINTR;
3341 memcpy(buf, resblk_va+8, buflen); /* cut off header */
3343 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3344 resblk_va, resblk_pa);
3345 if (size < 0)
3346 return size;
3348 return buflen;
3352 /* Issue UTIL_PARAMS_GET or UTIL_PARAMS_SET
3354 * This function can be used for all UtilParamsGet/Set operations.
3355 * The OperationBlock is given in opblk-buffer,
3356 * and results are returned in resblk-buffer.
3357 * Note that the minimum sized resblk is 8 bytes and contains
3358 * ResultCount, ErrorInfoSize, BlockStatus and BlockSize.
3360 static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid,
3361 void *opblk_va, dma_addr_t opblk_pa, int oplen,
3362 void *resblk_va, dma_addr_t resblk_pa, int reslen)
3364 u32 msg[9];
3365 u32 *res = (u32 *)resblk_va;
3366 int wait_status;
3368 msg[0] = NINE_WORD_MSG_SIZE | SGL_OFFSET_5;
3369 msg[1] = cmd << 24 | HOST_TID << 12 | tid;
3370 msg[2] = 0;
3371 msg[3] = 0;
3372 msg[4] = 0;
3373 msg[5] = 0x54000000 | oplen; /* OperationBlock */
3374 msg[6] = (u32)opblk_pa;
3375 msg[7] = 0xD0000000 | reslen; /* ResultBlock */
3376 msg[8] = (u32)resblk_pa;
3378 if ((wait_status = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 20))) {
3379 printk("adpt_i2o_issue_params: post_wait failed (%p)\n", resblk_va);
3380 return wait_status; /* -DetailedStatus */
3383 if (res[1]&0x00FF0000) { /* BlockStatus != SUCCESS */
3384 printk(KERN_WARNING "%s: %s - Error:\n ErrorInfoSize = 0x%02x, "
3385 "BlockStatus = 0x%02x, BlockSize = 0x%04x\n",
3386 pHba->name,
3387 (cmd == I2O_CMD_UTIL_PARAMS_SET) ? "PARAMS_SET"
3388 : "PARAMS_GET",
3389 res[1]>>24, (res[1]>>16)&0xFF, res[1]&0xFFFF);
3390 return -((res[1] >> 16) & 0xFF); /* -BlockStatus */
3393 return 4 + ((res[1] & 0x0000FFFF) << 2); /* bytes used in resblk */
3397 static s32 adpt_i2o_quiesce_hba(adpt_hba* pHba)
3399 u32 msg[4];
3400 int ret;
3402 adpt_i2o_status_get(pHba);
3404 /* SysQuiesce discarded if IOP not in READY or OPERATIONAL state */
3406 if((pHba->status_block->iop_state != ADAPTER_STATE_READY) &&
3407 (pHba->status_block->iop_state != ADAPTER_STATE_OPERATIONAL)){
3408 return 0;
3411 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3412 msg[1] = I2O_CMD_SYS_QUIESCE<<24|HOST_TID<<12|ADAPTER_TID;
3413 msg[2] = 0;
3414 msg[3] = 0;
3416 if((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3417 printk(KERN_INFO"dpti%d: Unable to quiesce (status=%#x).\n",
3418 pHba->unit, -ret);
3419 } else {
3420 printk(KERN_INFO"dpti%d: Quiesced.\n",pHba->unit);
3423 adpt_i2o_status_get(pHba);
3424 return ret;
3429 * Enable IOP. Allows the IOP to resume external operations.
3431 static int adpt_i2o_enable_hba(adpt_hba* pHba)
3433 u32 msg[4];
3434 int ret;
3436 adpt_i2o_status_get(pHba);
3437 if(!pHba->status_block){
3438 return -ENOMEM;
3440 /* Enable only allowed on READY state */
3441 if(pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL)
3442 return 0;
3444 if(pHba->status_block->iop_state != ADAPTER_STATE_READY)
3445 return -EINVAL;
3447 msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3448 msg[1]=I2O_CMD_SYS_ENABLE<<24|HOST_TID<<12|ADAPTER_TID;
3449 msg[2]= 0;
3450 msg[3]= 0;
3452 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3453 printk(KERN_WARNING"%s: Could not enable (status=%#10x).\n",
3454 pHba->name, ret);
3455 } else {
3456 PDEBUG("%s: Enabled.\n", pHba->name);
3459 adpt_i2o_status_get(pHba);
3460 return ret;
3464 static int adpt_i2o_systab_send(adpt_hba* pHba)
3466 u32 msg[12];
3467 int ret;
3469 msg[0] = I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6;
3470 msg[1] = I2O_CMD_SYS_TAB_SET<<24 | HOST_TID<<12 | ADAPTER_TID;
3471 msg[2] = 0;
3472 msg[3] = 0;
3473 msg[4] = (0<<16) | ((pHba->unit+2) << 12); /* Host 0 IOP ID (unit + 2) */
3474 msg[5] = 0; /* Segment 0 */
3477 * Provide three SGL-elements:
3478 * System table (SysTab), Private memory space declaration and
3479 * Private i/o space declaration
3481 msg[6] = 0x54000000 | sys_tbl_len;
3482 msg[7] = (u32)sys_tbl_pa;
3483 msg[8] = 0x54000000 | 0;
3484 msg[9] = 0;
3485 msg[10] = 0xD4000000 | 0;
3486 msg[11] = 0;
3488 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 120))) {
3489 printk(KERN_INFO "%s: Unable to set SysTab (status=%#10x).\n",
3490 pHba->name, ret);
3492 #ifdef DEBUG
3493 else {
3494 PINFO("%s: SysTab set.\n", pHba->name);
3496 #endif
3498 return ret;
3502 /*============================================================================
3504 *============================================================================
3508 #ifdef UARTDELAY
3510 static static void adpt_delay(int millisec)
3512 int i;
3513 for (i = 0; i < millisec; i++) {
3514 udelay(1000); /* delay for one millisecond */
3518 #endif
3520 static struct scsi_host_template driver_template = {
3521 .module = THIS_MODULE,
3522 .name = "dpt_i2o",
3523 .proc_name = "dpt_i2o",
3524 .show_info = adpt_show_info,
3525 .info = adpt_info,
3526 .queuecommand = adpt_queue,
3527 .eh_abort_handler = adpt_abort,
3528 .eh_device_reset_handler = adpt_device_reset,
3529 .eh_bus_reset_handler = adpt_bus_reset,
3530 .eh_host_reset_handler = adpt_reset,
3531 .bios_param = adpt_bios_param,
3532 .slave_configure = adpt_slave_configure,
3533 .can_queue = MAX_TO_IOP_MESSAGES,
3534 .this_id = 7,
3537 static int __init adpt_init(void)
3539 int error;
3540 adpt_hba *pHba, *next;
3542 printk("Loading Adaptec I2O RAID: Version " DPT_I2O_VERSION "\n");
3544 error = adpt_detect(&driver_template);
3545 if (error < 0)
3546 return error;
3547 if (hba_chain == NULL)
3548 return -ENODEV;
3550 for (pHba = hba_chain; pHba; pHba = pHba->next) {
3551 error = scsi_add_host(pHba->host, &pHba->pDev->dev);
3552 if (error)
3553 goto fail;
3554 scsi_scan_host(pHba->host);
3556 return 0;
3557 fail:
3558 for (pHba = hba_chain; pHba; pHba = next) {
3559 next = pHba->next;
3560 scsi_remove_host(pHba->host);
3562 return error;
3565 static void __exit adpt_exit(void)
3567 adpt_hba *pHba, *next;
3569 for (pHba = hba_chain; pHba; pHba = next) {
3570 next = pHba->next;
3571 adpt_release(pHba);
3575 module_init(adpt_init);
3576 module_exit(adpt_exit);
3578 MODULE_LICENSE("GPL");