powerpc: Define PVR value for POWER8NVL processor
[linux/fpc-iii.git] / drivers / scsi / dpt_i2o.c
blob21c8d210c456d839fd1e63299cc6789938b08d40
1 /***************************************************************************
2 dpti.c - description
3 -------------------
4 begin : Thu Sep 7 2000
5 copyright : (C) 2000 by Adaptec
7 July 30, 2001 First version being submitted
8 for inclusion in the kernel. V2.4
10 See Documentation/scsi/dpti.txt for history, notes, license info
11 and credits
12 ***************************************************************************/
14 /***************************************************************************
15 * *
16 * This program is free software; you can redistribute it and/or modify *
17 * it under the terms of the GNU General Public License as published by *
18 * the Free Software Foundation; either version 2 of the License, or *
19 * (at your option) any later version. *
20 * *
21 ***************************************************************************/
22 /***************************************************************************
23 * Sat Dec 20 2003 Go Taniguchi <go@turbolinux.co.jp>
24 - Support 2.6 kernel and DMA-mapping
25 - ioctl fix for raid tools
26 - use schedule_timeout in long long loop
27 **************************************************************************/
29 /*#define DEBUG 1 */
30 /*#define UARTDELAY 1 */
32 #include <linux/module.h>
34 MODULE_AUTHOR("Deanna Bonds, with _lots_ of help from Mark Salyzyn");
35 MODULE_DESCRIPTION("Adaptec I2O RAID Driver");
37 ////////////////////////////////////////////////////////////////
39 #include <linux/ioctl.h> /* For SCSI-Passthrough */
40 #include <asm/uaccess.h>
42 #include <linux/stat.h>
43 #include <linux/slab.h> /* for kmalloc() */
44 #include <linux/pci.h> /* for PCI support */
45 #include <linux/proc_fs.h>
46 #include <linux/blkdev.h>
47 #include <linux/delay.h> /* for udelay */
48 #include <linux/interrupt.h>
49 #include <linux/kernel.h> /* for printk */
50 #include <linux/sched.h>
51 #include <linux/reboot.h>
52 #include <linux/spinlock.h>
53 #include <linux/dma-mapping.h>
55 #include <linux/timer.h>
56 #include <linux/string.h>
57 #include <linux/ioport.h>
58 #include <linux/mutex.h>
60 #include <asm/processor.h> /* for boot_cpu_data */
61 #include <asm/pgtable.h>
62 #include <asm/io.h> /* for virt_to_bus, etc. */
64 #include <scsi/scsi.h>
65 #include <scsi/scsi_cmnd.h>
66 #include <scsi/scsi_device.h>
67 #include <scsi/scsi_host.h>
68 #include <scsi/scsi_tcq.h>
70 #include "dpt/dptsig.h"
71 #include "dpti.h"
73 /*============================================================================
74 * Create a binary signature - this is read by dptsig
75 * Needed for our management apps
76 *============================================================================
78 static DEFINE_MUTEX(adpt_mutex);
79 static dpt_sig_S DPTI_sig = {
80 {'d', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION,
81 #ifdef __i386__
82 PROC_INTEL, PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM,
83 #elif defined(__ia64__)
84 PROC_INTEL, PROC_IA64,
85 #elif defined(__sparc__)
86 PROC_ULTRASPARC, PROC_ULTRASPARC,
87 #elif defined(__alpha__)
88 PROC_ALPHA, PROC_ALPHA,
89 #else
90 (-1),(-1),
91 #endif
92 FT_HBADRVR, 0, OEM_DPT, OS_LINUX, CAP_OVERLAP, DEV_ALL,
93 ADF_ALL_SC5, 0, 0, DPT_VERSION, DPT_REVISION, DPT_SUBREVISION,
94 DPT_MONTH, DPT_DAY, DPT_YEAR, "Adaptec Linux I2O RAID Driver"
100 /*============================================================================
101 * Globals
102 *============================================================================
105 static DEFINE_MUTEX(adpt_configuration_lock);
107 static struct i2o_sys_tbl *sys_tbl;
108 static dma_addr_t sys_tbl_pa;
109 static int sys_tbl_ind;
110 static int sys_tbl_len;
112 static adpt_hba* hba_chain = NULL;
113 static int hba_count = 0;
115 static struct class *adpt_sysfs_class;
117 static long adpt_unlocked_ioctl(struct file *, unsigned int, unsigned long);
118 #ifdef CONFIG_COMPAT
119 static long compat_adpt_ioctl(struct file *, unsigned int, unsigned long);
120 #endif
122 static const struct file_operations adpt_fops = {
123 .unlocked_ioctl = adpt_unlocked_ioctl,
124 .open = adpt_open,
125 .release = adpt_close,
126 #ifdef CONFIG_COMPAT
127 .compat_ioctl = compat_adpt_ioctl,
128 #endif
129 .llseek = noop_llseek,
132 /* Structures and definitions for synchronous message posting.
133 * See adpt_i2o_post_wait() for description
134 * */
135 struct adpt_i2o_post_wait_data
137 int status;
138 u32 id;
139 adpt_wait_queue_head_t *wq;
140 struct adpt_i2o_post_wait_data *next;
143 static struct adpt_i2o_post_wait_data *adpt_post_wait_queue = NULL;
144 static u32 adpt_post_wait_id = 0;
145 static DEFINE_SPINLOCK(adpt_post_wait_lock);
148 /*============================================================================
149 * Functions
150 *============================================================================
153 static inline int dpt_dma64(adpt_hba *pHba)
155 return (sizeof(dma_addr_t) > 4 && (pHba)->dma64);
158 static inline u32 dma_high(dma_addr_t addr)
160 return upper_32_bits(addr);
163 static inline u32 dma_low(dma_addr_t addr)
165 return (u32)addr;
168 static u8 adpt_read_blink_led(adpt_hba* host)
170 if (host->FwDebugBLEDflag_P) {
171 if( readb(host->FwDebugBLEDflag_P) == 0xbc ){
172 return readb(host->FwDebugBLEDvalue_P);
175 return 0;
178 /*============================================================================
179 * Scsi host template interface functions
180 *============================================================================
183 #ifdef MODULE
184 static struct pci_device_id dptids[] = {
185 { PCI_DPT_VENDOR_ID, PCI_DPT_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
186 { PCI_DPT_VENDOR_ID, PCI_DPT_RAPTOR_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
187 { 0, }
189 #endif
191 MODULE_DEVICE_TABLE(pci,dptids);
193 static int adpt_detect(struct scsi_host_template* sht)
195 struct pci_dev *pDev = NULL;
196 adpt_hba *pHba;
197 adpt_hba *next;
199 PINFO("Detecting Adaptec I2O RAID controllers...\n");
201 /* search for all Adatpec I2O RAID cards */
202 while ((pDev = pci_get_device( PCI_DPT_VENDOR_ID, PCI_ANY_ID, pDev))) {
203 if(pDev->device == PCI_DPT_DEVICE_ID ||
204 pDev->device == PCI_DPT_RAPTOR_DEVICE_ID){
205 if(adpt_install_hba(sht, pDev) ){
206 PERROR("Could not Init an I2O RAID device\n");
207 PERROR("Will not try to detect others.\n");
208 return hba_count-1;
210 pci_dev_get(pDev);
214 /* In INIT state, Activate IOPs */
215 for (pHba = hba_chain; pHba; pHba = next) {
216 next = pHba->next;
217 // Activate does get status , init outbound, and get hrt
218 if (adpt_i2o_activate_hba(pHba) < 0) {
219 adpt_i2o_delete_hba(pHba);
224 /* Active IOPs in HOLD state */
226 rebuild_sys_tab:
227 if (hba_chain == NULL)
228 return 0;
231 * If build_sys_table fails, we kill everything and bail
232 * as we can't init the IOPs w/o a system table
234 if (adpt_i2o_build_sys_table() < 0) {
235 adpt_i2o_sys_shutdown();
236 return 0;
239 PDEBUG("HBA's in HOLD state\n");
241 /* If IOP don't get online, we need to rebuild the System table */
242 for (pHba = hba_chain; pHba; pHba = pHba->next) {
243 if (adpt_i2o_online_hba(pHba) < 0) {
244 adpt_i2o_delete_hba(pHba);
245 goto rebuild_sys_tab;
249 /* Active IOPs now in OPERATIONAL state */
250 PDEBUG("HBA's in OPERATIONAL state\n");
252 printk("dpti: If you have a lot of devices this could take a few minutes.\n");
253 for (pHba = hba_chain; pHba; pHba = next) {
254 next = pHba->next;
255 printk(KERN_INFO"%s: Reading the hardware resource table.\n", pHba->name);
256 if (adpt_i2o_lct_get(pHba) < 0){
257 adpt_i2o_delete_hba(pHba);
258 continue;
261 if (adpt_i2o_parse_lct(pHba) < 0){
262 adpt_i2o_delete_hba(pHba);
263 continue;
265 adpt_inquiry(pHba);
268 adpt_sysfs_class = class_create(THIS_MODULE, "dpt_i2o");
269 if (IS_ERR(adpt_sysfs_class)) {
270 printk(KERN_WARNING"dpti: unable to create dpt_i2o class\n");
271 adpt_sysfs_class = NULL;
274 for (pHba = hba_chain; pHba; pHba = next) {
275 next = pHba->next;
276 if (adpt_scsi_host_alloc(pHba, sht) < 0){
277 adpt_i2o_delete_hba(pHba);
278 continue;
280 pHba->initialized = TRUE;
281 pHba->state &= ~DPTI_STATE_RESET;
282 if (adpt_sysfs_class) {
283 struct device *dev = device_create(adpt_sysfs_class,
284 NULL, MKDEV(DPTI_I2O_MAJOR, pHba->unit), NULL,
285 "dpti%d", pHba->unit);
286 if (IS_ERR(dev)) {
287 printk(KERN_WARNING"dpti%d: unable to "
288 "create device in dpt_i2o class\n",
289 pHba->unit);
294 // Register our control device node
295 // nodes will need to be created in /dev to access this
296 // the nodes can not be created from within the driver
297 if (hba_count && register_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER, &adpt_fops)) {
298 adpt_i2o_sys_shutdown();
299 return 0;
301 return hba_count;
306 * scsi_unregister will be called AFTER we return.
308 static int adpt_release(struct Scsi_Host *host)
310 adpt_hba* pHba = (adpt_hba*) host->hostdata[0];
311 // adpt_i2o_quiesce_hba(pHba);
312 adpt_i2o_delete_hba(pHba);
313 scsi_unregister(host);
314 return 0;
318 static void adpt_inquiry(adpt_hba* pHba)
320 u32 msg[17];
321 u32 *mptr;
322 u32 *lenptr;
323 int direction;
324 int scsidir;
325 u32 len;
326 u32 reqlen;
327 u8* buf;
328 dma_addr_t addr;
329 u8 scb[16];
330 s32 rcode;
332 memset(msg, 0, sizeof(msg));
333 buf = dma_alloc_coherent(&pHba->pDev->dev, 80, &addr, GFP_KERNEL);
334 if(!buf){
335 printk(KERN_ERR"%s: Could not allocate buffer\n",pHba->name);
336 return;
338 memset((void*)buf, 0, 36);
340 len = 36;
341 direction = 0x00000000;
342 scsidir =0x40000000; // DATA IN (iop<--dev)
344 if (dpt_dma64(pHba))
345 reqlen = 17; // SINGLE SGE, 64 bit
346 else
347 reqlen = 14; // SINGLE SGE, 32 bit
348 /* Stick the headers on */
349 msg[0] = reqlen<<16 | SGL_OFFSET_12;
350 msg[1] = (0xff<<24|HOST_TID<<12|ADAPTER_TID);
351 msg[2] = 0;
352 msg[3] = 0;
353 // Adaptec/DPT Private stuff
354 msg[4] = I2O_CMD_SCSI_EXEC|DPT_ORGANIZATION_ID<<16;
355 msg[5] = ADAPTER_TID | 1<<16 /* Interpret*/;
356 /* Direction, disconnect ok | sense data | simple queue , CDBLen */
357 // I2O_SCB_FLAG_ENABLE_DISCONNECT |
358 // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
359 // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
360 msg[6] = scsidir|0x20a00000| 6 /* cmd len*/;
362 mptr=msg+7;
364 memset(scb, 0, sizeof(scb));
365 // Write SCSI command into the message - always 16 byte block
366 scb[0] = INQUIRY;
367 scb[1] = 0;
368 scb[2] = 0;
369 scb[3] = 0;
370 scb[4] = 36;
371 scb[5] = 0;
372 // Don't care about the rest of scb
374 memcpy(mptr, scb, sizeof(scb));
375 mptr+=4;
376 lenptr=mptr++; /* Remember me - fill in when we know */
378 /* Now fill in the SGList and command */
379 *lenptr = len;
380 if (dpt_dma64(pHba)) {
381 *mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
382 *mptr++ = 1 << PAGE_SHIFT;
383 *mptr++ = 0xD0000000|direction|len;
384 *mptr++ = dma_low(addr);
385 *mptr++ = dma_high(addr);
386 } else {
387 *mptr++ = 0xD0000000|direction|len;
388 *mptr++ = addr;
391 // Send it on it's way
392 rcode = adpt_i2o_post_wait(pHba, msg, reqlen<<2, 120);
393 if (rcode != 0) {
394 sprintf(pHba->detail, "Adaptec I2O RAID");
395 printk(KERN_INFO "%s: Inquiry Error (%d)\n",pHba->name,rcode);
396 if (rcode != -ETIME && rcode != -EINTR)
397 dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
398 } else {
399 memset(pHba->detail, 0, sizeof(pHba->detail));
400 memcpy(&(pHba->detail), "Vendor: Adaptec ", 16);
401 memcpy(&(pHba->detail[16]), " Model: ", 8);
402 memcpy(&(pHba->detail[24]), (u8*) &buf[16], 16);
403 memcpy(&(pHba->detail[40]), " FW: ", 4);
404 memcpy(&(pHba->detail[44]), (u8*) &buf[32], 4);
405 pHba->detail[48] = '\0'; /* precautionary */
406 dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
408 adpt_i2o_status_get(pHba);
409 return ;
413 static int adpt_slave_configure(struct scsi_device * device)
415 struct Scsi_Host *host = device->host;
416 adpt_hba* pHba;
418 pHba = (adpt_hba *) host->hostdata[0];
420 if (host->can_queue && device->tagged_supported) {
421 scsi_change_queue_depth(device,
422 host->can_queue - 1);
424 return 0;
427 static int adpt_queue_lck(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *))
429 adpt_hba* pHba = NULL;
430 struct adpt_device* pDev = NULL; /* dpt per device information */
432 cmd->scsi_done = done;
434 * SCSI REQUEST_SENSE commands will be executed automatically by the
435 * Host Adapter for any errors, so they should not be executed
436 * explicitly unless the Sense Data is zero indicating that no error
437 * occurred.
440 if ((cmd->cmnd[0] == REQUEST_SENSE) && (cmd->sense_buffer[0] != 0)) {
441 cmd->result = (DID_OK << 16);
442 cmd->scsi_done(cmd);
443 return 0;
446 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
447 if (!pHba) {
448 return FAILED;
451 rmb();
452 if ((pHba->state) & DPTI_STATE_RESET)
453 return SCSI_MLQUEUE_HOST_BUSY;
455 // TODO if the cmd->device if offline then I may need to issue a bus rescan
456 // followed by a get_lct to see if the device is there anymore
457 if((pDev = (struct adpt_device*) (cmd->device->hostdata)) == NULL) {
459 * First command request for this device. Set up a pointer
460 * to the device structure. This should be a TEST_UNIT_READY
461 * command from scan_scsis_single.
463 if ((pDev = adpt_find_device(pHba, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun)) == NULL) {
464 // TODO: if any luns are at this bus, scsi id then fake a TEST_UNIT_READY and INQUIRY response
465 // with type 7F (for all luns less than the max for this bus,id) so the lun scan will continue.
466 cmd->result = (DID_NO_CONNECT << 16);
467 cmd->scsi_done(cmd);
468 return 0;
470 cmd->device->hostdata = pDev;
472 pDev->pScsi_dev = cmd->device;
475 * If we are being called from when the device is being reset,
476 * delay processing of the command until later.
478 if (pDev->state & DPTI_DEV_RESET ) {
479 return FAILED;
481 return adpt_scsi_to_i2o(pHba, cmd, pDev);
484 static DEF_SCSI_QCMD(adpt_queue)
486 static int adpt_bios_param(struct scsi_device *sdev, struct block_device *dev,
487 sector_t capacity, int geom[])
489 int heads=-1;
490 int sectors=-1;
491 int cylinders=-1;
493 // *** First lets set the default geometry ****
495 // If the capacity is less than ox2000
496 if (capacity < 0x2000 ) { // floppy
497 heads = 18;
498 sectors = 2;
500 // else if between 0x2000 and 0x20000
501 else if (capacity < 0x20000) {
502 heads = 64;
503 sectors = 32;
505 // else if between 0x20000 and 0x40000
506 else if (capacity < 0x40000) {
507 heads = 65;
508 sectors = 63;
510 // else if between 0x4000 and 0x80000
511 else if (capacity < 0x80000) {
512 heads = 128;
513 sectors = 63;
515 // else if greater than 0x80000
516 else {
517 heads = 255;
518 sectors = 63;
520 cylinders = sector_div(capacity, heads * sectors);
522 // Special case if CDROM
523 if(sdev->type == 5) { // CDROM
524 heads = 252;
525 sectors = 63;
526 cylinders = 1111;
529 geom[0] = heads;
530 geom[1] = sectors;
531 geom[2] = cylinders;
533 PDEBUG("adpt_bios_param: exit\n");
534 return 0;
538 static const char *adpt_info(struct Scsi_Host *host)
540 adpt_hba* pHba;
542 pHba = (adpt_hba *) host->hostdata[0];
543 return (char *) (pHba->detail);
546 static int adpt_show_info(struct seq_file *m, struct Scsi_Host *host)
548 struct adpt_device* d;
549 int id;
550 int chan;
551 adpt_hba* pHba;
552 int unit;
554 // Find HBA (host bus adapter) we are looking for
555 mutex_lock(&adpt_configuration_lock);
556 for (pHba = hba_chain; pHba; pHba = pHba->next) {
557 if (pHba->host == host) {
558 break; /* found adapter */
561 mutex_unlock(&adpt_configuration_lock);
562 if (pHba == NULL) {
563 return 0;
565 host = pHba->host;
567 seq_printf(m, "Adaptec I2O RAID Driver Version: %s\n\n", DPT_I2O_VERSION);
568 seq_printf(m, "%s\n", pHba->detail);
569 seq_printf(m, "SCSI Host=scsi%d Control Node=/dev/%s irq=%d\n",
570 pHba->host->host_no, pHba->name, host->irq);
571 seq_printf(m, "\tpost fifo size = %d\n\treply fifo size = %d\n\tsg table size = %d\n\n",
572 host->can_queue, (int) pHba->reply_fifo_size , host->sg_tablesize);
574 seq_puts(m, "Devices:\n");
575 for(chan = 0; chan < MAX_CHANNEL; chan++) {
576 for(id = 0; id < MAX_ID; id++) {
577 d = pHba->channel[chan].device[id];
578 while(d) {
579 seq_printf(m,"\t%-24.24s", d->pScsi_dev->vendor);
580 seq_printf(m," Rev: %-8.8s\n", d->pScsi_dev->rev);
582 unit = d->pI2o_dev->lct_data.tid;
583 seq_printf(m, "\tTID=%d, (Channel=%d, Target=%d, Lun=%llu) (%s)\n\n",
584 unit, (int)d->scsi_channel, (int)d->scsi_id, d->scsi_lun,
585 scsi_device_online(d->pScsi_dev)? "online":"offline");
586 d = d->next_lun;
590 return 0;
594 * Turn a struct scsi_cmnd * into a unique 32 bit 'context'.
596 static u32 adpt_cmd_to_context(struct scsi_cmnd *cmd)
598 return (u32)cmd->serial_number;
602 * Go from a u32 'context' to a struct scsi_cmnd * .
603 * This could probably be made more efficient.
605 static struct scsi_cmnd *
606 adpt_cmd_from_context(adpt_hba * pHba, u32 context)
608 struct scsi_cmnd * cmd;
609 struct scsi_device * d;
611 if (context == 0)
612 return NULL;
614 spin_unlock(pHba->host->host_lock);
615 shost_for_each_device(d, pHba->host) {
616 unsigned long flags;
617 spin_lock_irqsave(&d->list_lock, flags);
618 list_for_each_entry(cmd, &d->cmd_list, list) {
619 if (((u32)cmd->serial_number == context)) {
620 spin_unlock_irqrestore(&d->list_lock, flags);
621 scsi_device_put(d);
622 spin_lock(pHba->host->host_lock);
623 return cmd;
626 spin_unlock_irqrestore(&d->list_lock, flags);
628 spin_lock(pHba->host->host_lock);
630 return NULL;
634 * Turn a pointer to ioctl reply data into an u32 'context'
636 static u32 adpt_ioctl_to_context(adpt_hba * pHba, void *reply)
638 #if BITS_PER_LONG == 32
639 return (u32)(unsigned long)reply;
640 #else
641 ulong flags = 0;
642 u32 nr, i;
644 spin_lock_irqsave(pHba->host->host_lock, flags);
645 nr = ARRAY_SIZE(pHba->ioctl_reply_context);
646 for (i = 0; i < nr; i++) {
647 if (pHba->ioctl_reply_context[i] == NULL) {
648 pHba->ioctl_reply_context[i] = reply;
649 break;
652 spin_unlock_irqrestore(pHba->host->host_lock, flags);
653 if (i >= nr) {
654 kfree (reply);
655 printk(KERN_WARNING"%s: Too many outstanding "
656 "ioctl commands\n", pHba->name);
657 return (u32)-1;
660 return i;
661 #endif
665 * Go from an u32 'context' to a pointer to ioctl reply data.
667 static void *adpt_ioctl_from_context(adpt_hba *pHba, u32 context)
669 #if BITS_PER_LONG == 32
670 return (void *)(unsigned long)context;
671 #else
672 void *p = pHba->ioctl_reply_context[context];
673 pHba->ioctl_reply_context[context] = NULL;
675 return p;
676 #endif
679 /*===========================================================================
680 * Error Handling routines
681 *===========================================================================
684 static int adpt_abort(struct scsi_cmnd * cmd)
686 adpt_hba* pHba = NULL; /* host bus adapter structure */
687 struct adpt_device* dptdevice; /* dpt per device information */
688 u32 msg[5];
689 int rcode;
691 if(cmd->serial_number == 0){
692 return FAILED;
694 pHba = (adpt_hba*) cmd->device->host->hostdata[0];
695 printk(KERN_INFO"%s: Trying to Abort\n",pHba->name);
696 if ((dptdevice = (void*) (cmd->device->hostdata)) == NULL) {
697 printk(KERN_ERR "%s: Unable to abort: No device in cmnd\n",pHba->name);
698 return FAILED;
701 memset(msg, 0, sizeof(msg));
702 msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
703 msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|dptdevice->tid;
704 msg[2] = 0;
705 msg[3]= 0;
706 msg[4] = adpt_cmd_to_context(cmd);
707 if (pHba->host)
708 spin_lock_irq(pHba->host->host_lock);
709 rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER);
710 if (pHba->host)
711 spin_unlock_irq(pHba->host->host_lock);
712 if (rcode != 0) {
713 if(rcode == -EOPNOTSUPP ){
714 printk(KERN_INFO"%s: Abort cmd not supported\n",pHba->name);
715 return FAILED;
717 printk(KERN_INFO"%s: Abort failed.\n",pHba->name);
718 return FAILED;
720 printk(KERN_INFO"%s: Abort complete.\n",pHba->name);
721 return SUCCESS;
725 #define I2O_DEVICE_RESET 0x27
726 // This is the same for BLK and SCSI devices
727 // NOTE this is wrong in the i2o.h definitions
728 // This is not currently supported by our adapter but we issue it anyway
729 static int adpt_device_reset(struct scsi_cmnd* cmd)
731 adpt_hba* pHba;
732 u32 msg[4];
733 u32 rcode;
734 int old_state;
735 struct adpt_device* d = cmd->device->hostdata;
737 pHba = (void*) cmd->device->host->hostdata[0];
738 printk(KERN_INFO"%s: Trying to reset device\n",pHba->name);
739 if (!d) {
740 printk(KERN_INFO"%s: Reset Device: Device Not found\n",pHba->name);
741 return FAILED;
743 memset(msg, 0, sizeof(msg));
744 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
745 msg[1] = (I2O_DEVICE_RESET<<24|HOST_TID<<12|d->tid);
746 msg[2] = 0;
747 msg[3] = 0;
749 if (pHba->host)
750 spin_lock_irq(pHba->host->host_lock);
751 old_state = d->state;
752 d->state |= DPTI_DEV_RESET;
753 rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
754 d->state = old_state;
755 if (pHba->host)
756 spin_unlock_irq(pHba->host->host_lock);
757 if (rcode != 0) {
758 if(rcode == -EOPNOTSUPP ){
759 printk(KERN_INFO"%s: Device reset not supported\n",pHba->name);
760 return FAILED;
762 printk(KERN_INFO"%s: Device reset failed\n",pHba->name);
763 return FAILED;
764 } else {
765 printk(KERN_INFO"%s: Device reset successful\n",pHba->name);
766 return SUCCESS;
771 #define I2O_HBA_BUS_RESET 0x87
772 // This version of bus reset is called by the eh_error handler
773 static int adpt_bus_reset(struct scsi_cmnd* cmd)
775 adpt_hba* pHba;
776 u32 msg[4];
777 u32 rcode;
779 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
780 memset(msg, 0, sizeof(msg));
781 printk(KERN_WARNING"%s: Bus reset: SCSI Bus %d: tid: %d\n",pHba->name, cmd->device->channel,pHba->channel[cmd->device->channel].tid );
782 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
783 msg[1] = (I2O_HBA_BUS_RESET<<24|HOST_TID<<12|pHba->channel[cmd->device->channel].tid);
784 msg[2] = 0;
785 msg[3] = 0;
786 if (pHba->host)
787 spin_lock_irq(pHba->host->host_lock);
788 rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
789 if (pHba->host)
790 spin_unlock_irq(pHba->host->host_lock);
791 if (rcode != 0) {
792 printk(KERN_WARNING"%s: Bus reset failed.\n",pHba->name);
793 return FAILED;
794 } else {
795 printk(KERN_WARNING"%s: Bus reset success.\n",pHba->name);
796 return SUCCESS;
800 // This version of reset is called by the eh_error_handler
801 static int __adpt_reset(struct scsi_cmnd* cmd)
803 adpt_hba* pHba;
804 int rcode;
805 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
806 printk(KERN_WARNING"%s: Hba Reset: scsi id %d: tid: %d\n",pHba->name,cmd->device->channel,pHba->channel[cmd->device->channel].tid );
807 rcode = adpt_hba_reset(pHba);
808 if(rcode == 0){
809 printk(KERN_WARNING"%s: HBA reset complete\n",pHba->name);
810 return SUCCESS;
811 } else {
812 printk(KERN_WARNING"%s: HBA reset failed (%x)\n",pHba->name, rcode);
813 return FAILED;
817 static int adpt_reset(struct scsi_cmnd* cmd)
819 int rc;
821 spin_lock_irq(cmd->device->host->host_lock);
822 rc = __adpt_reset(cmd);
823 spin_unlock_irq(cmd->device->host->host_lock);
825 return rc;
828 // This version of reset is called by the ioctls and indirectly from eh_error_handler via adpt_reset
829 static int adpt_hba_reset(adpt_hba* pHba)
831 int rcode;
833 pHba->state |= DPTI_STATE_RESET;
835 // Activate does get status , init outbound, and get hrt
836 if ((rcode=adpt_i2o_activate_hba(pHba)) < 0) {
837 printk(KERN_ERR "%s: Could not activate\n", pHba->name);
838 adpt_i2o_delete_hba(pHba);
839 return rcode;
842 if ((rcode=adpt_i2o_build_sys_table()) < 0) {
843 adpt_i2o_delete_hba(pHba);
844 return rcode;
846 PDEBUG("%s: in HOLD state\n",pHba->name);
848 if ((rcode=adpt_i2o_online_hba(pHba)) < 0) {
849 adpt_i2o_delete_hba(pHba);
850 return rcode;
852 PDEBUG("%s: in OPERATIONAL state\n",pHba->name);
854 if ((rcode=adpt_i2o_lct_get(pHba)) < 0){
855 adpt_i2o_delete_hba(pHba);
856 return rcode;
859 if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0){
860 adpt_i2o_delete_hba(pHba);
861 return rcode;
863 pHba->state &= ~DPTI_STATE_RESET;
865 adpt_fail_posted_scbs(pHba);
866 return 0; /* return success */
869 /*===========================================================================
871 *===========================================================================
875 static void adpt_i2o_sys_shutdown(void)
877 adpt_hba *pHba, *pNext;
878 struct adpt_i2o_post_wait_data *p1, *old;
880 printk(KERN_INFO"Shutting down Adaptec I2O controllers.\n");
881 printk(KERN_INFO" This could take a few minutes if there are many devices attached\n");
882 /* Delete all IOPs from the controller chain */
883 /* They should have already been released by the
884 * scsi-core
886 for (pHba = hba_chain; pHba; pHba = pNext) {
887 pNext = pHba->next;
888 adpt_i2o_delete_hba(pHba);
891 /* Remove any timedout entries from the wait queue. */
892 // spin_lock_irqsave(&adpt_post_wait_lock, flags);
893 /* Nothing should be outstanding at this point so just
894 * free them
896 for(p1 = adpt_post_wait_queue; p1;) {
897 old = p1;
898 p1 = p1->next;
899 kfree(old);
901 // spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
902 adpt_post_wait_queue = NULL;
904 printk(KERN_INFO "Adaptec I2O controllers down.\n");
907 static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev)
910 adpt_hba* pHba = NULL;
911 adpt_hba* p = NULL;
912 ulong base_addr0_phys = 0;
913 ulong base_addr1_phys = 0;
914 u32 hba_map0_area_size = 0;
915 u32 hba_map1_area_size = 0;
916 void __iomem *base_addr_virt = NULL;
917 void __iomem *msg_addr_virt = NULL;
918 int dma64 = 0;
920 int raptorFlag = FALSE;
922 if(pci_enable_device(pDev)) {
923 return -EINVAL;
926 if (pci_request_regions(pDev, "dpt_i2o")) {
927 PERROR("dpti: adpt_config_hba: pci request region failed\n");
928 return -EINVAL;
931 pci_set_master(pDev);
934 * See if we should enable dma64 mode.
936 if (sizeof(dma_addr_t) > 4 &&
937 pci_set_dma_mask(pDev, DMA_BIT_MASK(64)) == 0) {
938 if (dma_get_required_mask(&pDev->dev) > DMA_BIT_MASK(32))
939 dma64 = 1;
941 if (!dma64 && pci_set_dma_mask(pDev, DMA_BIT_MASK(32)) != 0)
942 return -EINVAL;
944 /* adapter only supports message blocks below 4GB */
945 pci_set_consistent_dma_mask(pDev, DMA_BIT_MASK(32));
947 base_addr0_phys = pci_resource_start(pDev,0);
948 hba_map0_area_size = pci_resource_len(pDev,0);
950 // Check if standard PCI card or single BAR Raptor
951 if(pDev->device == PCI_DPT_DEVICE_ID){
952 if(pDev->subsystem_device >=0xc032 && pDev->subsystem_device <= 0xc03b){
953 // Raptor card with this device id needs 4M
954 hba_map0_area_size = 0x400000;
955 } else { // Not Raptor - it is a PCI card
956 if(hba_map0_area_size > 0x100000 ){
957 hba_map0_area_size = 0x100000;
960 } else {// Raptor split BAR config
961 // Use BAR1 in this configuration
962 base_addr1_phys = pci_resource_start(pDev,1);
963 hba_map1_area_size = pci_resource_len(pDev,1);
964 raptorFlag = TRUE;
967 #if BITS_PER_LONG == 64
969 * The original Adaptec 64 bit driver has this comment here:
970 * "x86_64 machines need more optimal mappings"
972 * I assume some HBAs report ridiculously large mappings
973 * and we need to limit them on platforms with IOMMUs.
975 if (raptorFlag == TRUE) {
976 if (hba_map0_area_size > 128)
977 hba_map0_area_size = 128;
978 if (hba_map1_area_size > 524288)
979 hba_map1_area_size = 524288;
980 } else {
981 if (hba_map0_area_size > 524288)
982 hba_map0_area_size = 524288;
984 #endif
986 base_addr_virt = ioremap(base_addr0_phys,hba_map0_area_size);
987 if (!base_addr_virt) {
988 pci_release_regions(pDev);
989 PERROR("dpti: adpt_config_hba: io remap failed\n");
990 return -EINVAL;
993 if(raptorFlag == TRUE) {
994 msg_addr_virt = ioremap(base_addr1_phys, hba_map1_area_size );
995 if (!msg_addr_virt) {
996 PERROR("dpti: adpt_config_hba: io remap failed on BAR1\n");
997 iounmap(base_addr_virt);
998 pci_release_regions(pDev);
999 return -EINVAL;
1001 } else {
1002 msg_addr_virt = base_addr_virt;
1005 // Allocate and zero the data structure
1006 pHba = kzalloc(sizeof(adpt_hba), GFP_KERNEL);
1007 if (!pHba) {
1008 if (msg_addr_virt != base_addr_virt)
1009 iounmap(msg_addr_virt);
1010 iounmap(base_addr_virt);
1011 pci_release_regions(pDev);
1012 return -ENOMEM;
1015 mutex_lock(&adpt_configuration_lock);
1017 if(hba_chain != NULL){
1018 for(p = hba_chain; p->next; p = p->next);
1019 p->next = pHba;
1020 } else {
1021 hba_chain = pHba;
1023 pHba->next = NULL;
1024 pHba->unit = hba_count;
1025 sprintf(pHba->name, "dpti%d", hba_count);
1026 hba_count++;
1028 mutex_unlock(&adpt_configuration_lock);
1030 pHba->pDev = pDev;
1031 pHba->base_addr_phys = base_addr0_phys;
1033 // Set up the Virtual Base Address of the I2O Device
1034 pHba->base_addr_virt = base_addr_virt;
1035 pHba->msg_addr_virt = msg_addr_virt;
1036 pHba->irq_mask = base_addr_virt+0x30;
1037 pHba->post_port = base_addr_virt+0x40;
1038 pHba->reply_port = base_addr_virt+0x44;
1040 pHba->hrt = NULL;
1041 pHba->lct = NULL;
1042 pHba->lct_size = 0;
1043 pHba->status_block = NULL;
1044 pHba->post_count = 0;
1045 pHba->state = DPTI_STATE_RESET;
1046 pHba->pDev = pDev;
1047 pHba->devices = NULL;
1048 pHba->dma64 = dma64;
1050 // Initializing the spinlocks
1051 spin_lock_init(&pHba->state_lock);
1052 spin_lock_init(&adpt_post_wait_lock);
1054 if(raptorFlag == 0){
1055 printk(KERN_INFO "Adaptec I2O RAID controller"
1056 " %d at %p size=%x irq=%d%s\n",
1057 hba_count-1, base_addr_virt,
1058 hba_map0_area_size, pDev->irq,
1059 dma64 ? " (64-bit DMA)" : "");
1060 } else {
1061 printk(KERN_INFO"Adaptec I2O RAID controller %d irq=%d%s\n",
1062 hba_count-1, pDev->irq,
1063 dma64 ? " (64-bit DMA)" : "");
1064 printk(KERN_INFO" BAR0 %p - size= %x\n",base_addr_virt,hba_map0_area_size);
1065 printk(KERN_INFO" BAR1 %p - size= %x\n",msg_addr_virt,hba_map1_area_size);
1068 if (request_irq (pDev->irq, adpt_isr, IRQF_SHARED, pHba->name, pHba)) {
1069 printk(KERN_ERR"%s: Couldn't register IRQ %d\n", pHba->name, pDev->irq);
1070 adpt_i2o_delete_hba(pHba);
1071 return -EINVAL;
1074 return 0;
1078 static void adpt_i2o_delete_hba(adpt_hba* pHba)
1080 adpt_hba* p1;
1081 adpt_hba* p2;
1082 struct i2o_device* d;
1083 struct i2o_device* next;
1084 int i;
1085 int j;
1086 struct adpt_device* pDev;
1087 struct adpt_device* pNext;
1090 mutex_lock(&adpt_configuration_lock);
1091 // scsi_unregister calls our adpt_release which
1092 // does a quiese
1093 if(pHba->host){
1094 free_irq(pHba->host->irq, pHba);
1096 p2 = NULL;
1097 for( p1 = hba_chain; p1; p2 = p1,p1=p1->next){
1098 if(p1 == pHba) {
1099 if(p2) {
1100 p2->next = p1->next;
1101 } else {
1102 hba_chain = p1->next;
1104 break;
1108 hba_count--;
1109 mutex_unlock(&adpt_configuration_lock);
1111 iounmap(pHba->base_addr_virt);
1112 pci_release_regions(pHba->pDev);
1113 if(pHba->msg_addr_virt != pHba->base_addr_virt){
1114 iounmap(pHba->msg_addr_virt);
1116 if(pHba->FwDebugBuffer_P)
1117 iounmap(pHba->FwDebugBuffer_P);
1118 if(pHba->hrt) {
1119 dma_free_coherent(&pHba->pDev->dev,
1120 pHba->hrt->num_entries * pHba->hrt->entry_len << 2,
1121 pHba->hrt, pHba->hrt_pa);
1123 if(pHba->lct) {
1124 dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
1125 pHba->lct, pHba->lct_pa);
1127 if(pHba->status_block) {
1128 dma_free_coherent(&pHba->pDev->dev, sizeof(i2o_status_block),
1129 pHba->status_block, pHba->status_block_pa);
1131 if(pHba->reply_pool) {
1132 dma_free_coherent(&pHba->pDev->dev,
1133 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
1134 pHba->reply_pool, pHba->reply_pool_pa);
1137 for(d = pHba->devices; d ; d = next){
1138 next = d->next;
1139 kfree(d);
1141 for(i = 0 ; i < pHba->top_scsi_channel ; i++){
1142 for(j = 0; j < MAX_ID; j++){
1143 if(pHba->channel[i].device[j] != NULL){
1144 for(pDev = pHba->channel[i].device[j]; pDev; pDev = pNext){
1145 pNext = pDev->next_lun;
1146 kfree(pDev);
1151 pci_dev_put(pHba->pDev);
1152 if (adpt_sysfs_class)
1153 device_destroy(adpt_sysfs_class,
1154 MKDEV(DPTI_I2O_MAJOR, pHba->unit));
1155 kfree(pHba);
1157 if(hba_count <= 0){
1158 unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER);
1159 if (adpt_sysfs_class) {
1160 class_destroy(adpt_sysfs_class);
1161 adpt_sysfs_class = NULL;
1166 static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u64 lun)
1168 struct adpt_device* d;
1170 if(chan < 0 || chan >= MAX_CHANNEL)
1171 return NULL;
1173 if( pHba->channel[chan].device == NULL){
1174 printk(KERN_DEBUG"Adaptec I2O RAID: Trying to find device before they are allocated\n");
1175 return NULL;
1178 d = pHba->channel[chan].device[id];
1179 if(!d || d->tid == 0) {
1180 return NULL;
1183 /* If it is the only lun at that address then this should match*/
1184 if(d->scsi_lun == lun){
1185 return d;
1188 /* else we need to look through all the luns */
1189 for(d=d->next_lun ; d ; d = d->next_lun){
1190 if(d->scsi_lun == lun){
1191 return d;
1194 return NULL;
1198 static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout)
1200 // I used my own version of the WAIT_QUEUE_HEAD
1201 // to handle some version differences
1202 // When embedded in the kernel this could go back to the vanilla one
1203 ADPT_DECLARE_WAIT_QUEUE_HEAD(adpt_wq_i2o_post);
1204 int status = 0;
1205 ulong flags = 0;
1206 struct adpt_i2o_post_wait_data *p1, *p2;
1207 struct adpt_i2o_post_wait_data *wait_data =
1208 kmalloc(sizeof(struct adpt_i2o_post_wait_data), GFP_ATOMIC);
1209 DECLARE_WAITQUEUE(wait, current);
1211 if (!wait_data)
1212 return -ENOMEM;
1215 * The spin locking is needed to keep anyone from playing
1216 * with the queue pointers and id while we do the same
1218 spin_lock_irqsave(&adpt_post_wait_lock, flags);
1219 // TODO we need a MORE unique way of getting ids
1220 // to support async LCT get
1221 wait_data->next = adpt_post_wait_queue;
1222 adpt_post_wait_queue = wait_data;
1223 adpt_post_wait_id++;
1224 adpt_post_wait_id &= 0x7fff;
1225 wait_data->id = adpt_post_wait_id;
1226 spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1228 wait_data->wq = &adpt_wq_i2o_post;
1229 wait_data->status = -ETIMEDOUT;
1231 add_wait_queue(&adpt_wq_i2o_post, &wait);
1233 msg[2] |= 0x80000000 | ((u32)wait_data->id);
1234 timeout *= HZ;
1235 if((status = adpt_i2o_post_this(pHba, msg, len)) == 0){
1236 set_current_state(TASK_INTERRUPTIBLE);
1237 if(pHba->host)
1238 spin_unlock_irq(pHba->host->host_lock);
1239 if (!timeout)
1240 schedule();
1241 else{
1242 timeout = schedule_timeout(timeout);
1243 if (timeout == 0) {
1244 // I/O issued, but cannot get result in
1245 // specified time. Freeing resorces is
1246 // dangerous.
1247 status = -ETIME;
1250 if(pHba->host)
1251 spin_lock_irq(pHba->host->host_lock);
1253 remove_wait_queue(&adpt_wq_i2o_post, &wait);
1255 if(status == -ETIMEDOUT){
1256 printk(KERN_INFO"dpti%d: POST WAIT TIMEOUT\n",pHba->unit);
1257 // We will have to free the wait_data memory during shutdown
1258 return status;
1261 /* Remove the entry from the queue. */
1262 p2 = NULL;
1263 spin_lock_irqsave(&adpt_post_wait_lock, flags);
1264 for(p1 = adpt_post_wait_queue; p1; p2 = p1, p1 = p1->next) {
1265 if(p1 == wait_data) {
1266 if(p1->status == I2O_DETAIL_STATUS_UNSUPPORTED_FUNCTION ) {
1267 status = -EOPNOTSUPP;
1269 if(p2) {
1270 p2->next = p1->next;
1271 } else {
1272 adpt_post_wait_queue = p1->next;
1274 break;
1277 spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1279 kfree(wait_data);
1281 return status;
1285 static s32 adpt_i2o_post_this(adpt_hba* pHba, u32* data, int len)
1288 u32 m = EMPTY_QUEUE;
1289 u32 __iomem *msg;
1290 ulong timeout = jiffies + 30*HZ;
1291 do {
1292 rmb();
1293 m = readl(pHba->post_port);
1294 if (m != EMPTY_QUEUE) {
1295 break;
1297 if(time_after(jiffies,timeout)){
1298 printk(KERN_WARNING"dpti%d: Timeout waiting for message frame!\n", pHba->unit);
1299 return -ETIMEDOUT;
1301 schedule_timeout_uninterruptible(1);
1302 } while(m == EMPTY_QUEUE);
1304 msg = pHba->msg_addr_virt + m;
1305 memcpy_toio(msg, data, len);
1306 wmb();
1308 //post message
1309 writel(m, pHba->post_port);
1310 wmb();
1312 return 0;
1316 static void adpt_i2o_post_wait_complete(u32 context, int status)
1318 struct adpt_i2o_post_wait_data *p1 = NULL;
1320 * We need to search through the adpt_post_wait
1321 * queue to see if the given message is still
1322 * outstanding. If not, it means that the IOP
1323 * took longer to respond to the message than we
1324 * had allowed and timer has already expired.
1325 * Not much we can do about that except log
1326 * it for debug purposes, increase timeout, and recompile
1328 * Lock needed to keep anyone from moving queue pointers
1329 * around while we're looking through them.
1332 context &= 0x7fff;
1334 spin_lock(&adpt_post_wait_lock);
1335 for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1336 if(p1->id == context) {
1337 p1->status = status;
1338 spin_unlock(&adpt_post_wait_lock);
1339 wake_up_interruptible(p1->wq);
1340 return;
1343 spin_unlock(&adpt_post_wait_lock);
1344 // If this happens we lose commands that probably really completed
1345 printk(KERN_DEBUG"dpti: Could Not find task %d in wait queue\n",context);
1346 printk(KERN_DEBUG" Tasks in wait queue:\n");
1347 for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1348 printk(KERN_DEBUG" %d\n",p1->id);
1350 return;
1353 static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
1355 u32 msg[8];
1356 u8* status;
1357 dma_addr_t addr;
1358 u32 m = EMPTY_QUEUE ;
1359 ulong timeout = jiffies + (TMOUT_IOPRESET*HZ);
1361 if(pHba->initialized == FALSE) { // First time reset should be quick
1362 timeout = jiffies + (25*HZ);
1363 } else {
1364 adpt_i2o_quiesce_hba(pHba);
1367 do {
1368 rmb();
1369 m = readl(pHba->post_port);
1370 if (m != EMPTY_QUEUE) {
1371 break;
1373 if(time_after(jiffies,timeout)){
1374 printk(KERN_WARNING"Timeout waiting for message!\n");
1375 return -ETIMEDOUT;
1377 schedule_timeout_uninterruptible(1);
1378 } while (m == EMPTY_QUEUE);
1380 status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
1381 if(status == NULL) {
1382 adpt_send_nop(pHba, m);
1383 printk(KERN_ERR"IOP reset failed - no free memory.\n");
1384 return -ENOMEM;
1386 memset(status,0,4);
1388 msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0;
1389 msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID;
1390 msg[2]=0;
1391 msg[3]=0;
1392 msg[4]=0;
1393 msg[5]=0;
1394 msg[6]=dma_low(addr);
1395 msg[7]=dma_high(addr);
1397 memcpy_toio(pHba->msg_addr_virt+m, msg, sizeof(msg));
1398 wmb();
1399 writel(m, pHba->post_port);
1400 wmb();
1402 while(*status == 0){
1403 if(time_after(jiffies,timeout)){
1404 printk(KERN_WARNING"%s: IOP Reset Timeout\n",pHba->name);
1405 /* We lose 4 bytes of "status" here, but we cannot
1406 free these because controller may awake and corrupt
1407 those bytes at any time */
1408 /* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
1409 return -ETIMEDOUT;
1411 rmb();
1412 schedule_timeout_uninterruptible(1);
1415 if(*status == 0x01 /*I2O_EXEC_IOP_RESET_IN_PROGRESS*/) {
1416 PDEBUG("%s: Reset in progress...\n", pHba->name);
1417 // Here we wait for message frame to become available
1418 // indicated that reset has finished
1419 do {
1420 rmb();
1421 m = readl(pHba->post_port);
1422 if (m != EMPTY_QUEUE) {
1423 break;
1425 if(time_after(jiffies,timeout)){
1426 printk(KERN_ERR "%s:Timeout waiting for IOP Reset.\n",pHba->name);
1427 /* We lose 4 bytes of "status" here, but we
1428 cannot free these because controller may
1429 awake and corrupt those bytes at any time */
1430 /* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
1431 return -ETIMEDOUT;
1433 schedule_timeout_uninterruptible(1);
1434 } while (m == EMPTY_QUEUE);
1435 // Flush the offset
1436 adpt_send_nop(pHba, m);
1438 adpt_i2o_status_get(pHba);
1439 if(*status == 0x02 ||
1440 pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
1441 printk(KERN_WARNING"%s: Reset reject, trying to clear\n",
1442 pHba->name);
1443 } else {
1444 PDEBUG("%s: Reset completed.\n", pHba->name);
1447 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
1448 #ifdef UARTDELAY
1449 // This delay is to allow someone attached to the card through the debug UART to
1450 // set up the dump levels that they want before the rest of the initialization sequence
1451 adpt_delay(20000);
1452 #endif
1453 return 0;
1457 static int adpt_i2o_parse_lct(adpt_hba* pHba)
1459 int i;
1460 int max;
1461 int tid;
1462 struct i2o_device *d;
1463 i2o_lct *lct = pHba->lct;
1464 u8 bus_no = 0;
1465 s16 scsi_id;
1466 u64 scsi_lun;
1467 u32 buf[10]; // larger than 7, or 8 ...
1468 struct adpt_device* pDev;
1470 if (lct == NULL) {
1471 printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
1472 return -1;
1475 max = lct->table_size;
1476 max -= 3;
1477 max /= 9;
1479 for(i=0;i<max;i++) {
1480 if( lct->lct_entry[i].user_tid != 0xfff){
1482 * If we have hidden devices, we need to inform the upper layers about
1483 * the possible maximum id reference to handle device access when
1484 * an array is disassembled. This code has no other purpose but to
1485 * allow us future access to devices that are currently hidden
1486 * behind arrays, hotspares or have not been configured (JBOD mode).
1488 if( lct->lct_entry[i].class_id != I2O_CLASS_RANDOM_BLOCK_STORAGE &&
1489 lct->lct_entry[i].class_id != I2O_CLASS_SCSI_PERIPHERAL &&
1490 lct->lct_entry[i].class_id != I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1491 continue;
1493 tid = lct->lct_entry[i].tid;
1494 // I2O_DPT_DEVICE_INFO_GROUP_NO;
1495 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
1496 continue;
1498 bus_no = buf[0]>>16;
1499 scsi_id = buf[1];
1500 scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
1501 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1502 printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no);
1503 continue;
1505 if (scsi_id >= MAX_ID){
1506 printk(KERN_WARNING"%s: SCSI ID %d out of range \n", pHba->name, bus_no);
1507 continue;
1509 if(bus_no > pHba->top_scsi_channel){
1510 pHba->top_scsi_channel = bus_no;
1512 if(scsi_id > pHba->top_scsi_id){
1513 pHba->top_scsi_id = scsi_id;
1515 if(scsi_lun > pHba->top_scsi_lun){
1516 pHba->top_scsi_lun = scsi_lun;
1518 continue;
1520 d = kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
1521 if(d==NULL)
1523 printk(KERN_CRIT"%s: Out of memory for I2O device data.\n",pHba->name);
1524 return -ENOMEM;
1527 d->controller = pHba;
1528 d->next = NULL;
1530 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
1532 d->flags = 0;
1533 tid = d->lct_data.tid;
1534 adpt_i2o_report_hba_unit(pHba, d);
1535 adpt_i2o_install_device(pHba, d);
1537 bus_no = 0;
1538 for(d = pHba->devices; d ; d = d->next) {
1539 if(d->lct_data.class_id == I2O_CLASS_BUS_ADAPTER_PORT ||
1540 d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PORT){
1541 tid = d->lct_data.tid;
1542 // TODO get the bus_no from hrt-but for now they are in order
1543 //bus_no =
1544 if(bus_no > pHba->top_scsi_channel){
1545 pHba->top_scsi_channel = bus_no;
1547 pHba->channel[bus_no].type = d->lct_data.class_id;
1548 pHba->channel[bus_no].tid = tid;
1549 if(adpt_i2o_query_scalar(pHba, tid, 0x0200, -1, buf, 28)>=0)
1551 pHba->channel[bus_no].scsi_id = buf[1];
1552 PDEBUG("Bus %d - SCSI ID %d.\n", bus_no, buf[1]);
1554 // TODO remove - this is just until we get from hrt
1555 bus_no++;
1556 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1557 printk(KERN_WARNING"%s: Channel number %d out of range - LCT\n", pHba->name, bus_no);
1558 break;
1563 // Setup adpt_device table
1564 for(d = pHba->devices; d ; d = d->next) {
1565 if(d->lct_data.class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
1566 d->lct_data.class_id == I2O_CLASS_SCSI_PERIPHERAL ||
1567 d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1569 tid = d->lct_data.tid;
1570 scsi_id = -1;
1571 // I2O_DPT_DEVICE_INFO_GROUP_NO;
1572 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)>=0) {
1573 bus_no = buf[0]>>16;
1574 scsi_id = buf[1];
1575 scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
1576 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1577 continue;
1579 if (scsi_id >= MAX_ID) {
1580 continue;
1582 if( pHba->channel[bus_no].device[scsi_id] == NULL){
1583 pDev = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1584 if(pDev == NULL) {
1585 return -ENOMEM;
1587 pHba->channel[bus_no].device[scsi_id] = pDev;
1588 } else {
1589 for( pDev = pHba->channel[bus_no].device[scsi_id];
1590 pDev->next_lun; pDev = pDev->next_lun){
1592 pDev->next_lun = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1593 if(pDev->next_lun == NULL) {
1594 return -ENOMEM;
1596 pDev = pDev->next_lun;
1598 pDev->tid = tid;
1599 pDev->scsi_channel = bus_no;
1600 pDev->scsi_id = scsi_id;
1601 pDev->scsi_lun = scsi_lun;
1602 pDev->pI2o_dev = d;
1603 d->owner = pDev;
1604 pDev->type = (buf[0])&0xff;
1605 pDev->flags = (buf[0]>>8)&0xff;
1606 if(scsi_id > pHba->top_scsi_id){
1607 pHba->top_scsi_id = scsi_id;
1609 if(scsi_lun > pHba->top_scsi_lun){
1610 pHba->top_scsi_lun = scsi_lun;
1613 if(scsi_id == -1){
1614 printk(KERN_WARNING"Could not find SCSI ID for %s\n",
1615 d->lct_data.identity_tag);
1619 return 0;
1624 * Each I2O controller has a chain of devices on it - these match
1625 * the useful parts of the LCT of the board.
1628 static int adpt_i2o_install_device(adpt_hba* pHba, struct i2o_device *d)
1630 mutex_lock(&adpt_configuration_lock);
1631 d->controller=pHba;
1632 d->owner=NULL;
1633 d->next=pHba->devices;
1634 d->prev=NULL;
1635 if (pHba->devices != NULL){
1636 pHba->devices->prev=d;
1638 pHba->devices=d;
1639 *d->dev_name = 0;
1641 mutex_unlock(&adpt_configuration_lock);
1642 return 0;
1645 static int adpt_open(struct inode *inode, struct file *file)
1647 int minor;
1648 adpt_hba* pHba;
1650 mutex_lock(&adpt_mutex);
1651 //TODO check for root access
1653 minor = iminor(inode);
1654 if (minor >= hba_count) {
1655 mutex_unlock(&adpt_mutex);
1656 return -ENXIO;
1658 mutex_lock(&adpt_configuration_lock);
1659 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1660 if (pHba->unit == minor) {
1661 break; /* found adapter */
1664 if (pHba == NULL) {
1665 mutex_unlock(&adpt_configuration_lock);
1666 mutex_unlock(&adpt_mutex);
1667 return -ENXIO;
1670 // if(pHba->in_use){
1671 // mutex_unlock(&adpt_configuration_lock);
1672 // return -EBUSY;
1673 // }
1675 pHba->in_use = 1;
1676 mutex_unlock(&adpt_configuration_lock);
1677 mutex_unlock(&adpt_mutex);
1679 return 0;
1682 static int adpt_close(struct inode *inode, struct file *file)
1684 int minor;
1685 adpt_hba* pHba;
1687 minor = iminor(inode);
1688 if (minor >= hba_count) {
1689 return -ENXIO;
1691 mutex_lock(&adpt_configuration_lock);
1692 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1693 if (pHba->unit == minor) {
1694 break; /* found adapter */
1697 mutex_unlock(&adpt_configuration_lock);
1698 if (pHba == NULL) {
1699 return -ENXIO;
1702 pHba->in_use = 0;
1704 return 0;
1708 static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1710 u32 msg[MAX_MESSAGE_SIZE];
1711 u32* reply = NULL;
1712 u32 size = 0;
1713 u32 reply_size = 0;
1714 u32 __user *user_msg = arg;
1715 u32 __user * user_reply = NULL;
1716 void *sg_list[pHba->sg_tablesize];
1717 u32 sg_offset = 0;
1718 u32 sg_count = 0;
1719 int sg_index = 0;
1720 u32 i = 0;
1721 u32 rcode = 0;
1722 void *p = NULL;
1723 dma_addr_t addr;
1724 ulong flags = 0;
1726 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1727 // get user msg size in u32s
1728 if(get_user(size, &user_msg[0])){
1729 return -EFAULT;
1731 size = size>>16;
1733 user_reply = &user_msg[size];
1734 if(size > MAX_MESSAGE_SIZE){
1735 return -EFAULT;
1737 size *= 4; // Convert to bytes
1739 /* Copy in the user's I2O command */
1740 if(copy_from_user(msg, user_msg, size)) {
1741 return -EFAULT;
1743 get_user(reply_size, &user_reply[0]);
1744 reply_size = reply_size>>16;
1745 if(reply_size > REPLY_FRAME_SIZE){
1746 reply_size = REPLY_FRAME_SIZE;
1748 reply_size *= 4;
1749 reply = kzalloc(REPLY_FRAME_SIZE*4, GFP_KERNEL);
1750 if(reply == NULL) {
1751 printk(KERN_WARNING"%s: Could not allocate reply buffer\n",pHba->name);
1752 return -ENOMEM;
1754 sg_offset = (msg[0]>>4)&0xf;
1755 msg[2] = 0x40000000; // IOCTL context
1756 msg[3] = adpt_ioctl_to_context(pHba, reply);
1757 if (msg[3] == (u32)-1)
1758 return -EBUSY;
1760 memset(sg_list,0, sizeof(sg_list[0])*pHba->sg_tablesize);
1761 if(sg_offset) {
1762 // TODO add 64 bit API
1763 struct sg_simple_element *sg = (struct sg_simple_element*) (msg+sg_offset);
1764 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1765 if (sg_count > pHba->sg_tablesize){
1766 printk(KERN_DEBUG"%s:IOCTL SG List too large (%u)\n", pHba->name,sg_count);
1767 kfree (reply);
1768 return -EINVAL;
1771 for(i = 0; i < sg_count; i++) {
1772 int sg_size;
1774 if (!(sg[i].flag_count & 0x10000000 /*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT*/)) {
1775 printk(KERN_DEBUG"%s:Bad SG element %d - not simple (%x)\n",pHba->name,i, sg[i].flag_count);
1776 rcode = -EINVAL;
1777 goto cleanup;
1779 sg_size = sg[i].flag_count & 0xffffff;
1780 /* Allocate memory for the transfer */
1781 p = dma_alloc_coherent(&pHba->pDev->dev, sg_size, &addr, GFP_KERNEL);
1782 if(!p) {
1783 printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
1784 pHba->name,sg_size,i,sg_count);
1785 rcode = -ENOMEM;
1786 goto cleanup;
1788 sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame.
1789 /* Copy in the user's SG buffer if necessary */
1790 if(sg[i].flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR*/) {
1791 // sg_simple_element API is 32 bit
1792 if (copy_from_user(p,(void __user *)(ulong)sg[i].addr_bus, sg_size)) {
1793 printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",pHba->name,i);
1794 rcode = -EFAULT;
1795 goto cleanup;
1798 /* sg_simple_element API is 32 bit, but addr < 4GB */
1799 sg[i].addr_bus = addr;
1803 do {
1805 * Stop any new commands from enterring the
1806 * controller while processing the ioctl
1808 if (pHba->host) {
1809 scsi_block_requests(pHba->host);
1810 spin_lock_irqsave(pHba->host->host_lock, flags);
1812 rcode = adpt_i2o_post_wait(pHba, msg, size, FOREVER);
1813 if (rcode != 0)
1814 printk("adpt_i2o_passthru: post wait failed %d %p\n",
1815 rcode, reply);
1816 if (pHba->host) {
1817 spin_unlock_irqrestore(pHba->host->host_lock, flags);
1818 scsi_unblock_requests(pHba->host);
1820 } while (rcode == -ETIMEDOUT);
1822 if(rcode){
1823 goto cleanup;
1826 if(sg_offset) {
1827 /* Copy back the Scatter Gather buffers back to user space */
1828 u32 j;
1829 // TODO add 64 bit API
1830 struct sg_simple_element* sg;
1831 int sg_size;
1833 // re-acquire the original message to handle correctly the sg copy operation
1834 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1835 // get user msg size in u32s
1836 if(get_user(size, &user_msg[0])){
1837 rcode = -EFAULT;
1838 goto cleanup;
1840 size = size>>16;
1841 size *= 4;
1842 if (size > MAX_MESSAGE_SIZE) {
1843 rcode = -EINVAL;
1844 goto cleanup;
1846 /* Copy in the user's I2O command */
1847 if (copy_from_user (msg, user_msg, size)) {
1848 rcode = -EFAULT;
1849 goto cleanup;
1851 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1853 // TODO add 64 bit API
1854 sg = (struct sg_simple_element*)(msg + sg_offset);
1855 for (j = 0; j < sg_count; j++) {
1856 /* Copy out the SG list to user's buffer if necessary */
1857 if(! (sg[j].flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR*/)) {
1858 sg_size = sg[j].flag_count & 0xffffff;
1859 // sg_simple_element API is 32 bit
1860 if (copy_to_user((void __user *)(ulong)sg[j].addr_bus,sg_list[j], sg_size)) {
1861 printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus);
1862 rcode = -EFAULT;
1863 goto cleanup;
1869 /* Copy back the reply to user space */
1870 if (reply_size) {
1871 // we wrote our own values for context - now restore the user supplied ones
1872 if(copy_from_user(reply+2, user_msg+2, sizeof(u32)*2)) {
1873 printk(KERN_WARNING"%s: Could not copy message context FROM user\n",pHba->name);
1874 rcode = -EFAULT;
1876 if(copy_to_user(user_reply, reply, reply_size)) {
1877 printk(KERN_WARNING"%s: Could not copy reply TO user\n",pHba->name);
1878 rcode = -EFAULT;
1883 cleanup:
1884 if (rcode != -ETIME && rcode != -EINTR) {
1885 struct sg_simple_element *sg =
1886 (struct sg_simple_element*) (msg +sg_offset);
1887 kfree (reply);
1888 while(sg_index) {
1889 if(sg_list[--sg_index]) {
1890 dma_free_coherent(&pHba->pDev->dev,
1891 sg[sg_index].flag_count & 0xffffff,
1892 sg_list[sg_index],
1893 sg[sg_index].addr_bus);
1897 return rcode;
1900 #if defined __ia64__
1901 static void adpt_ia64_info(sysInfo_S* si)
1903 // This is all the info we need for now
1904 // We will add more info as our new
1905 // managmenent utility requires it
1906 si->processorType = PROC_IA64;
1908 #endif
1910 #if defined __sparc__
1911 static void adpt_sparc_info(sysInfo_S* si)
1913 // This is all the info we need for now
1914 // We will add more info as our new
1915 // managmenent utility requires it
1916 si->processorType = PROC_ULTRASPARC;
1918 #endif
1919 #if defined __alpha__
1920 static void adpt_alpha_info(sysInfo_S* si)
1922 // This is all the info we need for now
1923 // We will add more info as our new
1924 // managmenent utility requires it
1925 si->processorType = PROC_ALPHA;
1927 #endif
1929 #if defined __i386__
1931 #include <uapi/asm/vm86.h>
1933 static void adpt_i386_info(sysInfo_S* si)
1935 // This is all the info we need for now
1936 // We will add more info as our new
1937 // managmenent utility requires it
1938 switch (boot_cpu_data.x86) {
1939 case CPU_386:
1940 si->processorType = PROC_386;
1941 break;
1942 case CPU_486:
1943 si->processorType = PROC_486;
1944 break;
1945 case CPU_586:
1946 si->processorType = PROC_PENTIUM;
1947 break;
1948 default: // Just in case
1949 si->processorType = PROC_PENTIUM;
1950 break;
1953 #endif
1956 * This routine returns information about the system. This does not effect
1957 * any logic and if the info is wrong - it doesn't matter.
1960 /* Get all the info we can not get from kernel services */
1961 static int adpt_system_info(void __user *buffer)
1963 sysInfo_S si;
1965 memset(&si, 0, sizeof(si));
1967 si.osType = OS_LINUX;
1968 si.osMajorVersion = 0;
1969 si.osMinorVersion = 0;
1970 si.osRevision = 0;
1971 si.busType = SI_PCI_BUS;
1972 si.processorFamily = DPTI_sig.dsProcessorFamily;
1974 #if defined __i386__
1975 adpt_i386_info(&si);
1976 #elif defined (__ia64__)
1977 adpt_ia64_info(&si);
1978 #elif defined(__sparc__)
1979 adpt_sparc_info(&si);
1980 #elif defined (__alpha__)
1981 adpt_alpha_info(&si);
1982 #else
1983 si.processorType = 0xff ;
1984 #endif
1985 if (copy_to_user(buffer, &si, sizeof(si))){
1986 printk(KERN_WARNING"dpti: Could not copy buffer TO user\n");
1987 return -EFAULT;
1990 return 0;
1993 static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
1995 int minor;
1996 int error = 0;
1997 adpt_hba* pHba;
1998 ulong flags = 0;
1999 void __user *argp = (void __user *)arg;
2001 minor = iminor(inode);
2002 if (minor >= DPTI_MAX_HBA){
2003 return -ENXIO;
2005 mutex_lock(&adpt_configuration_lock);
2006 for (pHba = hba_chain; pHba; pHba = pHba->next) {
2007 if (pHba->unit == minor) {
2008 break; /* found adapter */
2011 mutex_unlock(&adpt_configuration_lock);
2012 if(pHba == NULL){
2013 return -ENXIO;
2016 while((volatile u32) pHba->state & DPTI_STATE_RESET )
2017 schedule_timeout_uninterruptible(2);
2019 switch (cmd) {
2020 // TODO: handle 3 cases
2021 case DPT_SIGNATURE:
2022 if (copy_to_user(argp, &DPTI_sig, sizeof(DPTI_sig))) {
2023 return -EFAULT;
2025 break;
2026 case I2OUSRCMD:
2027 return adpt_i2o_passthru(pHba, argp);
2029 case DPT_CTRLINFO:{
2030 drvrHBAinfo_S HbaInfo;
2032 #define FLG_OSD_PCI_VALID 0x0001
2033 #define FLG_OSD_DMA 0x0002
2034 #define FLG_OSD_I2O 0x0004
2035 memset(&HbaInfo, 0, sizeof(HbaInfo));
2036 HbaInfo.drvrHBAnum = pHba->unit;
2037 HbaInfo.baseAddr = (ulong) pHba->base_addr_phys;
2038 HbaInfo.blinkState = adpt_read_blink_led(pHba);
2039 HbaInfo.pciBusNum = pHba->pDev->bus->number;
2040 HbaInfo.pciDeviceNum=PCI_SLOT(pHba->pDev->devfn);
2041 HbaInfo.Interrupt = pHba->pDev->irq;
2042 HbaInfo.hbaFlags = FLG_OSD_PCI_VALID | FLG_OSD_DMA | FLG_OSD_I2O;
2043 if(copy_to_user(argp, &HbaInfo, sizeof(HbaInfo))){
2044 printk(KERN_WARNING"%s: Could not copy HbaInfo TO user\n",pHba->name);
2045 return -EFAULT;
2047 break;
2049 case DPT_SYSINFO:
2050 return adpt_system_info(argp);
2051 case DPT_BLINKLED:{
2052 u32 value;
2053 value = (u32)adpt_read_blink_led(pHba);
2054 if (copy_to_user(argp, &value, sizeof(value))) {
2055 return -EFAULT;
2057 break;
2059 case I2ORESETCMD:
2060 if(pHba->host)
2061 spin_lock_irqsave(pHba->host->host_lock, flags);
2062 adpt_hba_reset(pHba);
2063 if(pHba->host)
2064 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2065 break;
2066 case I2ORESCANCMD:
2067 adpt_rescan(pHba);
2068 break;
2069 default:
2070 return -EINVAL;
2073 return error;
2076 static long adpt_unlocked_ioctl(struct file *file, uint cmd, ulong arg)
2078 struct inode *inode;
2079 long ret;
2081 inode = file_inode(file);
2083 mutex_lock(&adpt_mutex);
2084 ret = adpt_ioctl(inode, file, cmd, arg);
2085 mutex_unlock(&adpt_mutex);
2087 return ret;
2090 #ifdef CONFIG_COMPAT
2091 static long compat_adpt_ioctl(struct file *file,
2092 unsigned int cmd, unsigned long arg)
2094 struct inode *inode;
2095 long ret;
2097 inode = file_inode(file);
2099 mutex_lock(&adpt_mutex);
2101 switch(cmd) {
2102 case DPT_SIGNATURE:
2103 case I2OUSRCMD:
2104 case DPT_CTRLINFO:
2105 case DPT_SYSINFO:
2106 case DPT_BLINKLED:
2107 case I2ORESETCMD:
2108 case I2ORESCANCMD:
2109 case (DPT_TARGET_BUSY & 0xFFFF):
2110 case DPT_TARGET_BUSY:
2111 ret = adpt_ioctl(inode, file, cmd, arg);
2112 break;
2113 default:
2114 ret = -ENOIOCTLCMD;
2117 mutex_unlock(&adpt_mutex);
2119 return ret;
2121 #endif
2123 static irqreturn_t adpt_isr(int irq, void *dev_id)
2125 struct scsi_cmnd* cmd;
2126 adpt_hba* pHba = dev_id;
2127 u32 m;
2128 void __iomem *reply;
2129 u32 status=0;
2130 u32 context;
2131 ulong flags = 0;
2132 int handled = 0;
2134 if (pHba == NULL){
2135 printk(KERN_WARNING"adpt_isr: NULL dev_id\n");
2136 return IRQ_NONE;
2138 if(pHba->host)
2139 spin_lock_irqsave(pHba->host->host_lock, flags);
2141 while( readl(pHba->irq_mask) & I2O_INTERRUPT_PENDING_B) {
2142 m = readl(pHba->reply_port);
2143 if(m == EMPTY_QUEUE){
2144 // Try twice then give up
2145 rmb();
2146 m = readl(pHba->reply_port);
2147 if(m == EMPTY_QUEUE){
2148 // This really should not happen
2149 printk(KERN_ERR"dpti: Could not get reply frame\n");
2150 goto out;
2153 if (pHba->reply_pool_pa <= m &&
2154 m < pHba->reply_pool_pa +
2155 (pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4)) {
2156 reply = (u8 *)pHba->reply_pool +
2157 (m - pHba->reply_pool_pa);
2158 } else {
2159 /* Ick, we should *never* be here */
2160 printk(KERN_ERR "dpti: reply frame not from pool\n");
2161 reply = (u8 *)bus_to_virt(m);
2164 if (readl(reply) & MSG_FAIL) {
2165 u32 old_m = readl(reply+28);
2166 void __iomem *msg;
2167 u32 old_context;
2168 PDEBUG("%s: Failed message\n",pHba->name);
2169 if(old_m >= 0x100000){
2170 printk(KERN_ERR"%s: Bad preserved MFA (%x)- dropping frame\n",pHba->name,old_m);
2171 writel(m,pHba->reply_port);
2172 continue;
2174 // Transaction context is 0 in failed reply frame
2175 msg = pHba->msg_addr_virt + old_m;
2176 old_context = readl(msg+12);
2177 writel(old_context, reply+12);
2178 adpt_send_nop(pHba, old_m);
2180 context = readl(reply+8);
2181 if(context & 0x40000000){ // IOCTL
2182 void *p = adpt_ioctl_from_context(pHba, readl(reply+12));
2183 if( p != NULL) {
2184 memcpy_fromio(p, reply, REPLY_FRAME_SIZE * 4);
2186 // All IOCTLs will also be post wait
2188 if(context & 0x80000000){ // Post wait message
2189 status = readl(reply+16);
2190 if(status >> 24){
2191 status &= 0xffff; /* Get detail status */
2192 } else {
2193 status = I2O_POST_WAIT_OK;
2195 if(!(context & 0x40000000)) {
2196 cmd = adpt_cmd_from_context(pHba,
2197 readl(reply+12));
2198 if(cmd != NULL) {
2199 printk(KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->name, cmd, context);
2202 adpt_i2o_post_wait_complete(context, status);
2203 } else { // SCSI message
2204 cmd = adpt_cmd_from_context (pHba, readl(reply+12));
2205 if(cmd != NULL){
2206 scsi_dma_unmap(cmd);
2207 if(cmd->serial_number != 0) { // If not timedout
2208 adpt_i2o_to_scsi(reply, cmd);
2212 writel(m, pHba->reply_port);
2213 wmb();
2214 rmb();
2216 handled = 1;
2217 out: if(pHba->host)
2218 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2219 return IRQ_RETVAL(handled);
2222 static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_device* d)
2224 int i;
2225 u32 msg[MAX_MESSAGE_SIZE];
2226 u32* mptr;
2227 u32* lptr;
2228 u32 *lenptr;
2229 int direction;
2230 int scsidir;
2231 int nseg;
2232 u32 len;
2233 u32 reqlen;
2234 s32 rcode;
2235 dma_addr_t addr;
2237 memset(msg, 0 , sizeof(msg));
2238 len = scsi_bufflen(cmd);
2239 direction = 0x00000000;
2241 scsidir = 0x00000000; // DATA NO XFER
2242 if(len) {
2244 * Set SCBFlags to indicate if data is being transferred
2245 * in or out, or no data transfer
2246 * Note: Do not have to verify index is less than 0 since
2247 * cmd->cmnd[0] is an unsigned char
2249 switch(cmd->sc_data_direction){
2250 case DMA_FROM_DEVICE:
2251 scsidir =0x40000000; // DATA IN (iop<--dev)
2252 break;
2253 case DMA_TO_DEVICE:
2254 direction=0x04000000; // SGL OUT
2255 scsidir =0x80000000; // DATA OUT (iop-->dev)
2256 break;
2257 case DMA_NONE:
2258 break;
2259 case DMA_BIDIRECTIONAL:
2260 scsidir =0x40000000; // DATA IN (iop<--dev)
2261 // Assume In - and continue;
2262 break;
2263 default:
2264 printk(KERN_WARNING"%s: scsi opcode 0x%x not supported.\n",
2265 pHba->name, cmd->cmnd[0]);
2266 cmd->result = (DID_OK <<16) | (INITIATOR_ERROR << 8);
2267 cmd->scsi_done(cmd);
2268 return 0;
2271 // msg[0] is set later
2272 // I2O_CMD_SCSI_EXEC
2273 msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid);
2274 msg[2] = 0;
2275 msg[3] = adpt_cmd_to_context(cmd); /* Want SCSI control block back */
2276 // Our cards use the transaction context as the tag for queueing
2277 // Adaptec/DPT Private stuff
2278 msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16);
2279 msg[5] = d->tid;
2280 /* Direction, disconnect ok | sense data | simple queue , CDBLen */
2281 // I2O_SCB_FLAG_ENABLE_DISCONNECT |
2282 // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
2283 // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
2284 msg[6] = scsidir|0x20a00000|cmd->cmd_len;
2286 mptr=msg+7;
2288 // Write SCSI command into the message - always 16 byte block
2289 memset(mptr, 0, 16);
2290 memcpy(mptr, cmd->cmnd, cmd->cmd_len);
2291 mptr+=4;
2292 lenptr=mptr++; /* Remember me - fill in when we know */
2293 if (dpt_dma64(pHba)) {
2294 reqlen = 16; // SINGLE SGE
2295 *mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
2296 *mptr++ = 1 << PAGE_SHIFT;
2297 } else {
2298 reqlen = 14; // SINGLE SGE
2300 /* Now fill in the SGList and command */
2302 nseg = scsi_dma_map(cmd);
2303 BUG_ON(nseg < 0);
2304 if (nseg) {
2305 struct scatterlist *sg;
2307 len = 0;
2308 scsi_for_each_sg(cmd, sg, nseg, i) {
2309 lptr = mptr;
2310 *mptr++ = direction|0x10000000|sg_dma_len(sg);
2311 len+=sg_dma_len(sg);
2312 addr = sg_dma_address(sg);
2313 *mptr++ = dma_low(addr);
2314 if (dpt_dma64(pHba))
2315 *mptr++ = dma_high(addr);
2316 /* Make this an end of list */
2317 if (i == nseg - 1)
2318 *lptr = direction|0xD0000000|sg_dma_len(sg);
2320 reqlen = mptr - msg;
2321 *lenptr = len;
2323 if(cmd->underflow && len != cmd->underflow){
2324 printk(KERN_WARNING"Cmd len %08X Cmd underflow %08X\n",
2325 len, cmd->underflow);
2327 } else {
2328 *lenptr = len = 0;
2329 reqlen = 12;
2332 /* Stick the headers on */
2333 msg[0] = reqlen<<16 | ((reqlen > 12) ? SGL_OFFSET_12 : SGL_OFFSET_0);
2335 // Send it on it's way
2336 rcode = adpt_i2o_post_this(pHba, msg, reqlen<<2);
2337 if (rcode == 0) {
2338 return 0;
2340 return rcode;
2344 static s32 adpt_scsi_host_alloc(adpt_hba* pHba, struct scsi_host_template *sht)
2346 struct Scsi_Host *host;
2348 host = scsi_host_alloc(sht, sizeof(adpt_hba*));
2349 if (host == NULL) {
2350 printk("%s: scsi_host_alloc returned NULL\n", pHba->name);
2351 return -1;
2353 host->hostdata[0] = (unsigned long)pHba;
2354 pHba->host = host;
2356 host->irq = pHba->pDev->irq;
2357 /* no IO ports, so don't have to set host->io_port and
2358 * host->n_io_port
2360 host->io_port = 0;
2361 host->n_io_port = 0;
2362 /* see comments in scsi_host.h */
2363 host->max_id = 16;
2364 host->max_lun = 256;
2365 host->max_channel = pHba->top_scsi_channel + 1;
2366 host->cmd_per_lun = 1;
2367 host->unique_id = (u32)sys_tbl_pa + pHba->unit;
2368 host->sg_tablesize = pHba->sg_tablesize;
2369 host->can_queue = pHba->post_fifo_size;
2370 host->use_cmd_list = 1;
2372 return 0;
2376 static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd)
2378 adpt_hba* pHba;
2379 u32 hba_status;
2380 u32 dev_status;
2381 u32 reply_flags = readl(reply) & 0xff00; // Leave it shifted up 8 bits
2382 // I know this would look cleaner if I just read bytes
2383 // but the model I have been using for all the rest of the
2384 // io is in 4 byte words - so I keep that model
2385 u16 detailed_status = readl(reply+16) &0xffff;
2386 dev_status = (detailed_status & 0xff);
2387 hba_status = detailed_status >> 8;
2389 // calculate resid for sg
2390 scsi_set_resid(cmd, scsi_bufflen(cmd) - readl(reply+20));
2392 pHba = (adpt_hba*) cmd->device->host->hostdata[0];
2394 cmd->sense_buffer[0] = '\0'; // initialize sense valid flag to false
2396 if(!(reply_flags & MSG_FAIL)) {
2397 switch(detailed_status & I2O_SCSI_DSC_MASK) {
2398 case I2O_SCSI_DSC_SUCCESS:
2399 cmd->result = (DID_OK << 16);
2400 // handle underflow
2401 if (readl(reply+20) < cmd->underflow) {
2402 cmd->result = (DID_ERROR <<16);
2403 printk(KERN_WARNING"%s: SCSI CMD underflow\n",pHba->name);
2405 break;
2406 case I2O_SCSI_DSC_REQUEST_ABORTED:
2407 cmd->result = (DID_ABORT << 16);
2408 break;
2409 case I2O_SCSI_DSC_PATH_INVALID:
2410 case I2O_SCSI_DSC_DEVICE_NOT_PRESENT:
2411 case I2O_SCSI_DSC_SELECTION_TIMEOUT:
2412 case I2O_SCSI_DSC_COMMAND_TIMEOUT:
2413 case I2O_SCSI_DSC_NO_ADAPTER:
2414 case I2O_SCSI_DSC_RESOURCE_UNAVAILABLE:
2415 printk(KERN_WARNING"%s: SCSI Timeout-Device (%d,%d,%llu) hba status=0x%x, dev status=0x%x, cmd=0x%x\n",
2416 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun, hba_status, dev_status, cmd->cmnd[0]);
2417 cmd->result = (DID_TIME_OUT << 16);
2418 break;
2419 case I2O_SCSI_DSC_ADAPTER_BUSY:
2420 case I2O_SCSI_DSC_BUS_BUSY:
2421 cmd->result = (DID_BUS_BUSY << 16);
2422 break;
2423 case I2O_SCSI_DSC_SCSI_BUS_RESET:
2424 case I2O_SCSI_DSC_BDR_MESSAGE_SENT:
2425 cmd->result = (DID_RESET << 16);
2426 break;
2427 case I2O_SCSI_DSC_PARITY_ERROR_FAILURE:
2428 printk(KERN_WARNING"%s: SCSI CMD parity error\n",pHba->name);
2429 cmd->result = (DID_PARITY << 16);
2430 break;
2431 case I2O_SCSI_DSC_UNABLE_TO_ABORT:
2432 case I2O_SCSI_DSC_COMPLETE_WITH_ERROR:
2433 case I2O_SCSI_DSC_UNABLE_TO_TERMINATE:
2434 case I2O_SCSI_DSC_MR_MESSAGE_RECEIVED:
2435 case I2O_SCSI_DSC_AUTOSENSE_FAILED:
2436 case I2O_SCSI_DSC_DATA_OVERRUN:
2437 case I2O_SCSI_DSC_UNEXPECTED_BUS_FREE:
2438 case I2O_SCSI_DSC_SEQUENCE_FAILURE:
2439 case I2O_SCSI_DSC_REQUEST_LENGTH_ERROR:
2440 case I2O_SCSI_DSC_PROVIDE_FAILURE:
2441 case I2O_SCSI_DSC_REQUEST_TERMINATED:
2442 case I2O_SCSI_DSC_IDE_MESSAGE_SENT:
2443 case I2O_SCSI_DSC_UNACKNOWLEDGED_EVENT:
2444 case I2O_SCSI_DSC_MESSAGE_RECEIVED:
2445 case I2O_SCSI_DSC_INVALID_CDB:
2446 case I2O_SCSI_DSC_LUN_INVALID:
2447 case I2O_SCSI_DSC_SCSI_TID_INVALID:
2448 case I2O_SCSI_DSC_FUNCTION_UNAVAILABLE:
2449 case I2O_SCSI_DSC_NO_NEXUS:
2450 case I2O_SCSI_DSC_CDB_RECEIVED:
2451 case I2O_SCSI_DSC_LUN_ALREADY_ENABLED:
2452 case I2O_SCSI_DSC_QUEUE_FROZEN:
2453 case I2O_SCSI_DSC_REQUEST_INVALID:
2454 default:
2455 printk(KERN_WARNING"%s: SCSI error %0x-Device(%d,%d,%llu) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2456 pHba->name, detailed_status & I2O_SCSI_DSC_MASK, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
2457 hba_status, dev_status, cmd->cmnd[0]);
2458 cmd->result = (DID_ERROR << 16);
2459 break;
2462 // copy over the request sense data if it was a check
2463 // condition status
2464 if (dev_status == SAM_STAT_CHECK_CONDITION) {
2465 u32 len = min(SCSI_SENSE_BUFFERSIZE, 40);
2466 // Copy over the sense data
2467 memcpy_fromio(cmd->sense_buffer, (reply+28) , len);
2468 if(cmd->sense_buffer[0] == 0x70 /* class 7 */ &&
2469 cmd->sense_buffer[2] == DATA_PROTECT ){
2470 /* This is to handle an array failed */
2471 cmd->result = (DID_TIME_OUT << 16);
2472 printk(KERN_WARNING"%s: SCSI Data Protect-Device (%d,%d,%llu) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2473 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
2474 hba_status, dev_status, cmd->cmnd[0]);
2478 } else {
2479 /* In this condtion we could not talk to the tid
2480 * the card rejected it. We should signal a retry
2481 * for a limitted number of retries.
2483 cmd->result = (DID_TIME_OUT << 16);
2484 printk(KERN_WARNING"%s: I2O MSG_FAIL - Device (%d,%d,%llu) tid=%d, cmd=0x%x\n",
2485 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
2486 ((struct adpt_device*)(cmd->device->hostdata))->tid, cmd->cmnd[0]);
2489 cmd->result |= (dev_status);
2491 if(cmd->scsi_done != NULL){
2492 cmd->scsi_done(cmd);
2494 return cmd->result;
2498 static s32 adpt_rescan(adpt_hba* pHba)
2500 s32 rcode;
2501 ulong flags = 0;
2503 if(pHba->host)
2504 spin_lock_irqsave(pHba->host->host_lock, flags);
2505 if ((rcode=adpt_i2o_lct_get(pHba)) < 0)
2506 goto out;
2507 if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0)
2508 goto out;
2509 rcode = 0;
2510 out: if(pHba->host)
2511 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2512 return rcode;
2516 static s32 adpt_i2o_reparse_lct(adpt_hba* pHba)
2518 int i;
2519 int max;
2520 int tid;
2521 struct i2o_device *d;
2522 i2o_lct *lct = pHba->lct;
2523 u8 bus_no = 0;
2524 s16 scsi_id;
2525 u64 scsi_lun;
2526 u32 buf[10]; // at least 8 u32's
2527 struct adpt_device* pDev = NULL;
2528 struct i2o_device* pI2o_dev = NULL;
2530 if (lct == NULL) {
2531 printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
2532 return -1;
2535 max = lct->table_size;
2536 max -= 3;
2537 max /= 9;
2539 // Mark each drive as unscanned
2540 for (d = pHba->devices; d; d = d->next) {
2541 pDev =(struct adpt_device*) d->owner;
2542 if(!pDev){
2543 continue;
2545 pDev->state |= DPTI_DEV_UNSCANNED;
2548 printk(KERN_INFO "%s: LCT has %d entries.\n", pHba->name,max);
2550 for(i=0;i<max;i++) {
2551 if( lct->lct_entry[i].user_tid != 0xfff){
2552 continue;
2555 if( lct->lct_entry[i].class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
2556 lct->lct_entry[i].class_id == I2O_CLASS_SCSI_PERIPHERAL ||
2557 lct->lct_entry[i].class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
2558 tid = lct->lct_entry[i].tid;
2559 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
2560 printk(KERN_ERR"%s: Could not query device\n",pHba->name);
2561 continue;
2563 bus_no = buf[0]>>16;
2564 if (bus_no >= MAX_CHANNEL) { /* Something wrong skip it */
2565 printk(KERN_WARNING
2566 "%s: Channel number %d out of range\n",
2567 pHba->name, bus_no);
2568 continue;
2571 scsi_id = buf[1];
2572 scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
2573 pDev = pHba->channel[bus_no].device[scsi_id];
2574 /* da lun */
2575 while(pDev) {
2576 if(pDev->scsi_lun == scsi_lun) {
2577 break;
2579 pDev = pDev->next_lun;
2581 if(!pDev ) { // Something new add it
2582 d = kmalloc(sizeof(struct i2o_device),
2583 GFP_ATOMIC);
2584 if(d==NULL)
2586 printk(KERN_CRIT "Out of memory for I2O device data.\n");
2587 return -ENOMEM;
2590 d->controller = pHba;
2591 d->next = NULL;
2593 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2595 d->flags = 0;
2596 adpt_i2o_report_hba_unit(pHba, d);
2597 adpt_i2o_install_device(pHba, d);
2599 pDev = pHba->channel[bus_no].device[scsi_id];
2600 if( pDev == NULL){
2601 pDev =
2602 kzalloc(sizeof(struct adpt_device),
2603 GFP_ATOMIC);
2604 if(pDev == NULL) {
2605 return -ENOMEM;
2607 pHba->channel[bus_no].device[scsi_id] = pDev;
2608 } else {
2609 while (pDev->next_lun) {
2610 pDev = pDev->next_lun;
2612 pDev = pDev->next_lun =
2613 kzalloc(sizeof(struct adpt_device),
2614 GFP_ATOMIC);
2615 if(pDev == NULL) {
2616 return -ENOMEM;
2619 pDev->tid = d->lct_data.tid;
2620 pDev->scsi_channel = bus_no;
2621 pDev->scsi_id = scsi_id;
2622 pDev->scsi_lun = scsi_lun;
2623 pDev->pI2o_dev = d;
2624 d->owner = pDev;
2625 pDev->type = (buf[0])&0xff;
2626 pDev->flags = (buf[0]>>8)&0xff;
2627 // Too late, SCSI system has made up it's mind, but what the hey ...
2628 if(scsi_id > pHba->top_scsi_id){
2629 pHba->top_scsi_id = scsi_id;
2631 if(scsi_lun > pHba->top_scsi_lun){
2632 pHba->top_scsi_lun = scsi_lun;
2634 continue;
2635 } // end of new i2o device
2637 // We found an old device - check it
2638 while(pDev) {
2639 if(pDev->scsi_lun == scsi_lun) {
2640 if(!scsi_device_online(pDev->pScsi_dev)) {
2641 printk(KERN_WARNING"%s: Setting device (%d,%d,%llu) back online\n",
2642 pHba->name,bus_no,scsi_id,scsi_lun);
2643 if (pDev->pScsi_dev) {
2644 scsi_device_set_state(pDev->pScsi_dev, SDEV_RUNNING);
2647 d = pDev->pI2o_dev;
2648 if(d->lct_data.tid != tid) { // something changed
2649 pDev->tid = tid;
2650 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2651 if (pDev->pScsi_dev) {
2652 pDev->pScsi_dev->changed = TRUE;
2653 pDev->pScsi_dev->removable = TRUE;
2656 // Found it - mark it scanned
2657 pDev->state = DPTI_DEV_ONLINE;
2658 break;
2660 pDev = pDev->next_lun;
2664 for (pI2o_dev = pHba->devices; pI2o_dev; pI2o_dev = pI2o_dev->next) {
2665 pDev =(struct adpt_device*) pI2o_dev->owner;
2666 if(!pDev){
2667 continue;
2669 // Drive offline drives that previously existed but could not be found
2670 // in the LCT table
2671 if (pDev->state & DPTI_DEV_UNSCANNED){
2672 pDev->state = DPTI_DEV_OFFLINE;
2673 printk(KERN_WARNING"%s: Device (%d,%d,%llu) offline\n",pHba->name,pDev->scsi_channel,pDev->scsi_id,pDev->scsi_lun);
2674 if (pDev->pScsi_dev) {
2675 scsi_device_set_state(pDev->pScsi_dev, SDEV_OFFLINE);
2679 return 0;
2682 static void adpt_fail_posted_scbs(adpt_hba* pHba)
2684 struct scsi_cmnd* cmd = NULL;
2685 struct scsi_device* d = NULL;
2687 shost_for_each_device(d, pHba->host) {
2688 unsigned long flags;
2689 spin_lock_irqsave(&d->list_lock, flags);
2690 list_for_each_entry(cmd, &d->cmd_list, list) {
2691 if(cmd->serial_number == 0){
2692 continue;
2694 cmd->result = (DID_OK << 16) | (QUEUE_FULL <<1);
2695 cmd->scsi_done(cmd);
2697 spin_unlock_irqrestore(&d->list_lock, flags);
2702 /*============================================================================
2703 * Routines from i2o subsystem
2704 *============================================================================
2710 * Bring an I2O controller into HOLD state. See the spec.
2712 static int adpt_i2o_activate_hba(adpt_hba* pHba)
2714 int rcode;
2716 if(pHba->initialized ) {
2717 if (adpt_i2o_status_get(pHba) < 0) {
2718 if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2719 printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2720 return rcode;
2722 if (adpt_i2o_status_get(pHba) < 0) {
2723 printk(KERN_INFO "HBA not responding.\n");
2724 return -1;
2728 if(pHba->status_block->iop_state == ADAPTER_STATE_FAULTED) {
2729 printk(KERN_CRIT "%s: hardware fault\n", pHba->name);
2730 return -1;
2733 if (pHba->status_block->iop_state == ADAPTER_STATE_READY ||
2734 pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL ||
2735 pHba->status_block->iop_state == ADAPTER_STATE_HOLD ||
2736 pHba->status_block->iop_state == ADAPTER_STATE_FAILED) {
2737 adpt_i2o_reset_hba(pHba);
2738 if (adpt_i2o_status_get(pHba) < 0 || pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
2739 printk(KERN_ERR "%s: Failed to initialize.\n", pHba->name);
2740 return -1;
2743 } else {
2744 if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2745 printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2746 return rcode;
2751 if (adpt_i2o_init_outbound_q(pHba) < 0) {
2752 return -1;
2755 /* In HOLD state */
2757 if (adpt_i2o_hrt_get(pHba) < 0) {
2758 return -1;
2761 return 0;
2765 * Bring a controller online into OPERATIONAL state.
2768 static int adpt_i2o_online_hba(adpt_hba* pHba)
2770 if (adpt_i2o_systab_send(pHba) < 0) {
2771 adpt_i2o_delete_hba(pHba);
2772 return -1;
2774 /* In READY state */
2776 if (adpt_i2o_enable_hba(pHba) < 0) {
2777 adpt_i2o_delete_hba(pHba);
2778 return -1;
2781 /* In OPERATIONAL state */
2782 return 0;
2785 static s32 adpt_send_nop(adpt_hba*pHba,u32 m)
2787 u32 __iomem *msg;
2788 ulong timeout = jiffies + 5*HZ;
2790 while(m == EMPTY_QUEUE){
2791 rmb();
2792 m = readl(pHba->post_port);
2793 if(m != EMPTY_QUEUE){
2794 break;
2796 if(time_after(jiffies,timeout)){
2797 printk(KERN_ERR "%s: Timeout waiting for message frame!\n",pHba->name);
2798 return 2;
2800 schedule_timeout_uninterruptible(1);
2802 msg = (u32 __iomem *)(pHba->msg_addr_virt + m);
2803 writel( THREE_WORD_MSG_SIZE | SGL_OFFSET_0,&msg[0]);
2804 writel( I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0,&msg[1]);
2805 writel( 0,&msg[2]);
2806 wmb();
2808 writel(m, pHba->post_port);
2809 wmb();
2810 return 0;
2813 static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
2815 u8 *status;
2816 dma_addr_t addr;
2817 u32 __iomem *msg = NULL;
2818 int i;
2819 ulong timeout = jiffies + TMOUT_INITOUTBOUND*HZ;
2820 u32 m;
2822 do {
2823 rmb();
2824 m = readl(pHba->post_port);
2825 if (m != EMPTY_QUEUE) {
2826 break;
2829 if(time_after(jiffies,timeout)){
2830 printk(KERN_WARNING"%s: Timeout waiting for message frame\n",pHba->name);
2831 return -ETIMEDOUT;
2833 schedule_timeout_uninterruptible(1);
2834 } while(m == EMPTY_QUEUE);
2836 msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2838 status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
2839 if (!status) {
2840 adpt_send_nop(pHba, m);
2841 printk(KERN_WARNING"%s: IOP reset failed - no free memory.\n",
2842 pHba->name);
2843 return -ENOMEM;
2845 memset(status, 0, 4);
2847 writel(EIGHT_WORD_MSG_SIZE| SGL_OFFSET_6, &msg[0]);
2848 writel(I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID, &msg[1]);
2849 writel(0, &msg[2]);
2850 writel(0x0106, &msg[3]); /* Transaction context */
2851 writel(4096, &msg[4]); /* Host page frame size */
2852 writel((REPLY_FRAME_SIZE)<<16|0x80, &msg[5]); /* Outbound msg frame size and Initcode */
2853 writel(0xD0000004, &msg[6]); /* Simple SG LE, EOB */
2854 writel((u32)addr, &msg[7]);
2856 writel(m, pHba->post_port);
2857 wmb();
2859 // Wait for the reply status to come back
2860 do {
2861 if (*status) {
2862 if (*status != 0x01 /*I2O_EXEC_OUTBOUND_INIT_IN_PROGRESS*/) {
2863 break;
2866 rmb();
2867 if(time_after(jiffies,timeout)){
2868 printk(KERN_WARNING"%s: Timeout Initializing\n",pHba->name);
2869 /* We lose 4 bytes of "status" here, but we
2870 cannot free these because controller may
2871 awake and corrupt those bytes at any time */
2872 /* dma_free_coherent(&pHba->pDev->dev, 4, status, addr); */
2873 return -ETIMEDOUT;
2875 schedule_timeout_uninterruptible(1);
2876 } while (1);
2878 // If the command was successful, fill the fifo with our reply
2879 // message packets
2880 if(*status != 0x04 /*I2O_EXEC_OUTBOUND_INIT_COMPLETE*/) {
2881 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
2882 return -2;
2884 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
2886 if(pHba->reply_pool != NULL) {
2887 dma_free_coherent(&pHba->pDev->dev,
2888 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2889 pHba->reply_pool, pHba->reply_pool_pa);
2892 pHba->reply_pool = dma_alloc_coherent(&pHba->pDev->dev,
2893 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2894 &pHba->reply_pool_pa, GFP_KERNEL);
2895 if (!pHba->reply_pool) {
2896 printk(KERN_ERR "%s: Could not allocate reply pool\n", pHba->name);
2897 return -ENOMEM;
2899 memset(pHba->reply_pool, 0 , pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4);
2901 for(i = 0; i < pHba->reply_fifo_size; i++) {
2902 writel(pHba->reply_pool_pa + (i * REPLY_FRAME_SIZE * 4),
2903 pHba->reply_port);
2904 wmb();
2906 adpt_i2o_status_get(pHba);
2907 return 0;
2912 * I2O System Table. Contains information about
2913 * all the IOPs in the system. Used to inform IOPs
2914 * about each other's existence.
2916 * sys_tbl_ver is the CurrentChangeIndicator that is
2917 * used by IOPs to track changes.
2922 static s32 adpt_i2o_status_get(adpt_hba* pHba)
2924 ulong timeout;
2925 u32 m;
2926 u32 __iomem *msg;
2927 u8 *status_block=NULL;
2929 if(pHba->status_block == NULL) {
2930 pHba->status_block = dma_alloc_coherent(&pHba->pDev->dev,
2931 sizeof(i2o_status_block),
2932 &pHba->status_block_pa, GFP_KERNEL);
2933 if(pHba->status_block == NULL) {
2934 printk(KERN_ERR
2935 "dpti%d: Get Status Block failed; Out of memory. \n",
2936 pHba->unit);
2937 return -ENOMEM;
2940 memset(pHba->status_block, 0, sizeof(i2o_status_block));
2941 status_block = (u8*)(pHba->status_block);
2942 timeout = jiffies+TMOUT_GETSTATUS*HZ;
2943 do {
2944 rmb();
2945 m = readl(pHba->post_port);
2946 if (m != EMPTY_QUEUE) {
2947 break;
2949 if(time_after(jiffies,timeout)){
2950 printk(KERN_ERR "%s: Timeout waiting for message !\n",
2951 pHba->name);
2952 return -ETIMEDOUT;
2954 schedule_timeout_uninterruptible(1);
2955 } while(m==EMPTY_QUEUE);
2958 msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2960 writel(NINE_WORD_MSG_SIZE|SGL_OFFSET_0, &msg[0]);
2961 writel(I2O_CMD_STATUS_GET<<24|HOST_TID<<12|ADAPTER_TID, &msg[1]);
2962 writel(1, &msg[2]);
2963 writel(0, &msg[3]);
2964 writel(0, &msg[4]);
2965 writel(0, &msg[5]);
2966 writel( dma_low(pHba->status_block_pa), &msg[6]);
2967 writel( dma_high(pHba->status_block_pa), &msg[7]);
2968 writel(sizeof(i2o_status_block), &msg[8]); // 88 bytes
2970 //post message
2971 writel(m, pHba->post_port);
2972 wmb();
2974 while(status_block[87]!=0xff){
2975 if(time_after(jiffies,timeout)){
2976 printk(KERN_ERR"dpti%d: Get status timeout.\n",
2977 pHba->unit);
2978 return -ETIMEDOUT;
2980 rmb();
2981 schedule_timeout_uninterruptible(1);
2984 // Set up our number of outbound and inbound messages
2985 pHba->post_fifo_size = pHba->status_block->max_inbound_frames;
2986 if (pHba->post_fifo_size > MAX_TO_IOP_MESSAGES) {
2987 pHba->post_fifo_size = MAX_TO_IOP_MESSAGES;
2990 pHba->reply_fifo_size = pHba->status_block->max_outbound_frames;
2991 if (pHba->reply_fifo_size > MAX_FROM_IOP_MESSAGES) {
2992 pHba->reply_fifo_size = MAX_FROM_IOP_MESSAGES;
2995 // Calculate the Scatter Gather list size
2996 if (dpt_dma64(pHba)) {
2997 pHba->sg_tablesize
2998 = ((pHba->status_block->inbound_frame_size * 4
2999 - 14 * sizeof(u32))
3000 / (sizeof(struct sg_simple_element) + sizeof(u32)));
3001 } else {
3002 pHba->sg_tablesize
3003 = ((pHba->status_block->inbound_frame_size * 4
3004 - 12 * sizeof(u32))
3005 / sizeof(struct sg_simple_element));
3007 if (pHba->sg_tablesize > SG_LIST_ELEMENTS) {
3008 pHba->sg_tablesize = SG_LIST_ELEMENTS;
3012 #ifdef DEBUG
3013 printk("dpti%d: State = ",pHba->unit);
3014 switch(pHba->status_block->iop_state) {
3015 case 0x01:
3016 printk("INIT\n");
3017 break;
3018 case 0x02:
3019 printk("RESET\n");
3020 break;
3021 case 0x04:
3022 printk("HOLD\n");
3023 break;
3024 case 0x05:
3025 printk("READY\n");
3026 break;
3027 case 0x08:
3028 printk("OPERATIONAL\n");
3029 break;
3030 case 0x10:
3031 printk("FAILED\n");
3032 break;
3033 case 0x11:
3034 printk("FAULTED\n");
3035 break;
3036 default:
3037 printk("%x (unknown!!)\n",pHba->status_block->iop_state);
3039 #endif
3040 return 0;
3044 * Get the IOP's Logical Configuration Table
3046 static int adpt_i2o_lct_get(adpt_hba* pHba)
3048 u32 msg[8];
3049 int ret;
3050 u32 buf[16];
3052 if ((pHba->lct_size == 0) || (pHba->lct == NULL)){
3053 pHba->lct_size = pHba->status_block->expected_lct_size;
3055 do {
3056 if (pHba->lct == NULL) {
3057 pHba->lct = dma_alloc_coherent(&pHba->pDev->dev,
3058 pHba->lct_size, &pHba->lct_pa,
3059 GFP_ATOMIC);
3060 if(pHba->lct == NULL) {
3061 printk(KERN_CRIT "%s: Lct Get failed. Out of memory.\n",
3062 pHba->name);
3063 return -ENOMEM;
3066 memset(pHba->lct, 0, pHba->lct_size);
3068 msg[0] = EIGHT_WORD_MSG_SIZE|SGL_OFFSET_6;
3069 msg[1] = I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID;
3070 msg[2] = 0;
3071 msg[3] = 0;
3072 msg[4] = 0xFFFFFFFF; /* All devices */
3073 msg[5] = 0x00000000; /* Report now */
3074 msg[6] = 0xD0000000|pHba->lct_size;
3075 msg[7] = (u32)pHba->lct_pa;
3077 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 360))) {
3078 printk(KERN_ERR "%s: LCT Get failed (status=%#10x.\n",
3079 pHba->name, ret);
3080 printk(KERN_ERR"Adaptec: Error Reading Hardware.\n");
3081 return ret;
3084 if ((pHba->lct->table_size << 2) > pHba->lct_size) {
3085 pHba->lct_size = pHba->lct->table_size << 2;
3086 dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
3087 pHba->lct, pHba->lct_pa);
3088 pHba->lct = NULL;
3090 } while (pHba->lct == NULL);
3092 PDEBUG("%s: Hardware resource table read.\n", pHba->name);
3095 // I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO;
3096 if(adpt_i2o_query_scalar(pHba, 0 , 0x8000, -1, buf, sizeof(buf))>=0) {
3097 pHba->FwDebugBufferSize = buf[1];
3098 pHba->FwDebugBuffer_P = ioremap(pHba->base_addr_phys + buf[0],
3099 pHba->FwDebugBufferSize);
3100 if (pHba->FwDebugBuffer_P) {
3101 pHba->FwDebugFlags_P = pHba->FwDebugBuffer_P +
3102 FW_DEBUG_FLAGS_OFFSET;
3103 pHba->FwDebugBLEDvalue_P = pHba->FwDebugBuffer_P +
3104 FW_DEBUG_BLED_OFFSET;
3105 pHba->FwDebugBLEDflag_P = pHba->FwDebugBLEDvalue_P + 1;
3106 pHba->FwDebugStrLength_P = pHba->FwDebugBuffer_P +
3107 FW_DEBUG_STR_LENGTH_OFFSET;
3108 pHba->FwDebugBuffer_P += buf[2];
3109 pHba->FwDebugFlags = 0;
3113 return 0;
3116 static int adpt_i2o_build_sys_table(void)
3118 adpt_hba* pHba = hba_chain;
3119 int count = 0;
3121 if (sys_tbl)
3122 dma_free_coherent(&pHba->pDev->dev, sys_tbl_len,
3123 sys_tbl, sys_tbl_pa);
3125 sys_tbl_len = sizeof(struct i2o_sys_tbl) + // Header + IOPs
3126 (hba_count) * sizeof(struct i2o_sys_tbl_entry);
3128 sys_tbl = dma_alloc_coherent(&pHba->pDev->dev,
3129 sys_tbl_len, &sys_tbl_pa, GFP_KERNEL);
3130 if (!sys_tbl) {
3131 printk(KERN_WARNING "SysTab Set failed. Out of memory.\n");
3132 return -ENOMEM;
3134 memset(sys_tbl, 0, sys_tbl_len);
3136 sys_tbl->num_entries = hba_count;
3137 sys_tbl->version = I2OVERSION;
3138 sys_tbl->change_ind = sys_tbl_ind++;
3140 for(pHba = hba_chain; pHba; pHba = pHba->next) {
3141 u64 addr;
3142 // Get updated Status Block so we have the latest information
3143 if (adpt_i2o_status_get(pHba)) {
3144 sys_tbl->num_entries--;
3145 continue; // try next one
3148 sys_tbl->iops[count].org_id = pHba->status_block->org_id;
3149 sys_tbl->iops[count].iop_id = pHba->unit + 2;
3150 sys_tbl->iops[count].seg_num = 0;
3151 sys_tbl->iops[count].i2o_version = pHba->status_block->i2o_version;
3152 sys_tbl->iops[count].iop_state = pHba->status_block->iop_state;
3153 sys_tbl->iops[count].msg_type = pHba->status_block->msg_type;
3154 sys_tbl->iops[count].frame_size = pHba->status_block->inbound_frame_size;
3155 sys_tbl->iops[count].last_changed = sys_tbl_ind - 1; // ??
3156 sys_tbl->iops[count].iop_capabilities = pHba->status_block->iop_capabilities;
3157 addr = pHba->base_addr_phys + 0x40;
3158 sys_tbl->iops[count].inbound_low = dma_low(addr);
3159 sys_tbl->iops[count].inbound_high = dma_high(addr);
3161 count++;
3164 #ifdef DEBUG
3166 u32 *table = (u32*)sys_tbl;
3167 printk(KERN_DEBUG"sys_tbl_len=%d in 32bit words\n",(sys_tbl_len >>2));
3168 for(count = 0; count < (sys_tbl_len >>2); count++) {
3169 printk(KERN_INFO "sys_tbl[%d] = %0#10x\n",
3170 count, table[count]);
3173 #endif
3175 return 0;
3180 * Dump the information block associated with a given unit (TID)
3183 static void adpt_i2o_report_hba_unit(adpt_hba* pHba, struct i2o_device *d)
3185 char buf[64];
3186 int unit = d->lct_data.tid;
3188 printk(KERN_INFO "TID %3.3d ", unit);
3190 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 3, buf, 16)>=0)
3192 buf[16]=0;
3193 printk(" Vendor: %-12.12s", buf);
3195 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 4, buf, 16)>=0)
3197 buf[16]=0;
3198 printk(" Device: %-12.12s", buf);
3200 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 6, buf, 8)>=0)
3202 buf[8]=0;
3203 printk(" Rev: %-12.12s\n", buf);
3205 #ifdef DEBUG
3206 printk(KERN_INFO "\tClass: %.21s\n", adpt_i2o_get_class_name(d->lct_data.class_id));
3207 printk(KERN_INFO "\tSubclass: 0x%04X\n", d->lct_data.sub_class);
3208 printk(KERN_INFO "\tFlags: ");
3210 if(d->lct_data.device_flags&(1<<0))
3211 printk("C"); // ConfigDialog requested
3212 if(d->lct_data.device_flags&(1<<1))
3213 printk("U"); // Multi-user capable
3214 if(!(d->lct_data.device_flags&(1<<4)))
3215 printk("P"); // Peer service enabled!
3216 if(!(d->lct_data.device_flags&(1<<5)))
3217 printk("M"); // Mgmt service enabled!
3218 printk("\n");
3219 #endif
3222 #ifdef DEBUG
3224 * Do i2o class name lookup
3226 static const char *adpt_i2o_get_class_name(int class)
3228 int idx = 16;
3229 static char *i2o_class_name[] = {
3230 "Executive",
3231 "Device Driver Module",
3232 "Block Device",
3233 "Tape Device",
3234 "LAN Interface",
3235 "WAN Interface",
3236 "Fibre Channel Port",
3237 "Fibre Channel Device",
3238 "SCSI Device",
3239 "ATE Port",
3240 "ATE Device",
3241 "Floppy Controller",
3242 "Floppy Device",
3243 "Secondary Bus Port",
3244 "Peer Transport Agent",
3245 "Peer Transport",
3246 "Unknown"
3249 switch(class&0xFFF) {
3250 case I2O_CLASS_EXECUTIVE:
3251 idx = 0; break;
3252 case I2O_CLASS_DDM:
3253 idx = 1; break;
3254 case I2O_CLASS_RANDOM_BLOCK_STORAGE:
3255 idx = 2; break;
3256 case I2O_CLASS_SEQUENTIAL_STORAGE:
3257 idx = 3; break;
3258 case I2O_CLASS_LAN:
3259 idx = 4; break;
3260 case I2O_CLASS_WAN:
3261 idx = 5; break;
3262 case I2O_CLASS_FIBRE_CHANNEL_PORT:
3263 idx = 6; break;
3264 case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
3265 idx = 7; break;
3266 case I2O_CLASS_SCSI_PERIPHERAL:
3267 idx = 8; break;
3268 case I2O_CLASS_ATE_PORT:
3269 idx = 9; break;
3270 case I2O_CLASS_ATE_PERIPHERAL:
3271 idx = 10; break;
3272 case I2O_CLASS_FLOPPY_CONTROLLER:
3273 idx = 11; break;
3274 case I2O_CLASS_FLOPPY_DEVICE:
3275 idx = 12; break;
3276 case I2O_CLASS_BUS_ADAPTER_PORT:
3277 idx = 13; break;
3278 case I2O_CLASS_PEER_TRANSPORT_AGENT:
3279 idx = 14; break;
3280 case I2O_CLASS_PEER_TRANSPORT:
3281 idx = 15; break;
3283 return i2o_class_name[idx];
3285 #endif
3288 static s32 adpt_i2o_hrt_get(adpt_hba* pHba)
3290 u32 msg[6];
3291 int ret, size = sizeof(i2o_hrt);
3293 do {
3294 if (pHba->hrt == NULL) {
3295 pHba->hrt = dma_alloc_coherent(&pHba->pDev->dev,
3296 size, &pHba->hrt_pa, GFP_KERNEL);
3297 if (pHba->hrt == NULL) {
3298 printk(KERN_CRIT "%s: Hrt Get failed; Out of memory.\n", pHba->name);
3299 return -ENOMEM;
3303 msg[0]= SIX_WORD_MSG_SIZE| SGL_OFFSET_4;
3304 msg[1]= I2O_CMD_HRT_GET<<24 | HOST_TID<<12 | ADAPTER_TID;
3305 msg[2]= 0;
3306 msg[3]= 0;
3307 msg[4]= (0xD0000000 | size); /* Simple transaction */
3308 msg[5]= (u32)pHba->hrt_pa; /* Dump it here */
3310 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg),20))) {
3311 printk(KERN_ERR "%s: Unable to get HRT (status=%#10x)\n", pHba->name, ret);
3312 return ret;
3315 if (pHba->hrt->num_entries * pHba->hrt->entry_len << 2 > size) {
3316 int newsize = pHba->hrt->num_entries * pHba->hrt->entry_len << 2;
3317 dma_free_coherent(&pHba->pDev->dev, size,
3318 pHba->hrt, pHba->hrt_pa);
3319 size = newsize;
3320 pHba->hrt = NULL;
3322 } while(pHba->hrt == NULL);
3323 return 0;
3327 * Query one scalar group value or a whole scalar group.
3329 static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
3330 int group, int field, void *buf, int buflen)
3332 u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field };
3333 u8 *opblk_va;
3334 dma_addr_t opblk_pa;
3335 u8 *resblk_va;
3336 dma_addr_t resblk_pa;
3338 int size;
3340 /* 8 bytes for header */
3341 resblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3342 sizeof(u8) * (8 + buflen), &resblk_pa, GFP_KERNEL);
3343 if (resblk_va == NULL) {
3344 printk(KERN_CRIT "%s: query scalar failed; Out of memory.\n", pHba->name);
3345 return -ENOMEM;
3348 opblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3349 sizeof(opblk), &opblk_pa, GFP_KERNEL);
3350 if (opblk_va == NULL) {
3351 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3352 resblk_va, resblk_pa);
3353 printk(KERN_CRIT "%s: query operatio failed; Out of memory.\n",
3354 pHba->name);
3355 return -ENOMEM;
3357 if (field == -1) /* whole group */
3358 opblk[4] = -1;
3360 memcpy(opblk_va, opblk, sizeof(opblk));
3361 size = adpt_i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET, pHba, tid,
3362 opblk_va, opblk_pa, sizeof(opblk),
3363 resblk_va, resblk_pa, sizeof(u8)*(8+buflen));
3364 dma_free_coherent(&pHba->pDev->dev, sizeof(opblk), opblk_va, opblk_pa);
3365 if (size == -ETIME) {
3366 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3367 resblk_va, resblk_pa);
3368 printk(KERN_WARNING "%s: issue params failed; Timed out.\n", pHba->name);
3369 return -ETIME;
3370 } else if (size == -EINTR) {
3371 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3372 resblk_va, resblk_pa);
3373 printk(KERN_WARNING "%s: issue params failed; Interrupted.\n", pHba->name);
3374 return -EINTR;
3377 memcpy(buf, resblk_va+8, buflen); /* cut off header */
3379 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3380 resblk_va, resblk_pa);
3381 if (size < 0)
3382 return size;
3384 return buflen;
3388 /* Issue UTIL_PARAMS_GET or UTIL_PARAMS_SET
3390 * This function can be used for all UtilParamsGet/Set operations.
3391 * The OperationBlock is given in opblk-buffer,
3392 * and results are returned in resblk-buffer.
3393 * Note that the minimum sized resblk is 8 bytes and contains
3394 * ResultCount, ErrorInfoSize, BlockStatus and BlockSize.
3396 static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid,
3397 void *opblk_va, dma_addr_t opblk_pa, int oplen,
3398 void *resblk_va, dma_addr_t resblk_pa, int reslen)
3400 u32 msg[9];
3401 u32 *res = (u32 *)resblk_va;
3402 int wait_status;
3404 msg[0] = NINE_WORD_MSG_SIZE | SGL_OFFSET_5;
3405 msg[1] = cmd << 24 | HOST_TID << 12 | tid;
3406 msg[2] = 0;
3407 msg[3] = 0;
3408 msg[4] = 0;
3409 msg[5] = 0x54000000 | oplen; /* OperationBlock */
3410 msg[6] = (u32)opblk_pa;
3411 msg[7] = 0xD0000000 | reslen; /* ResultBlock */
3412 msg[8] = (u32)resblk_pa;
3414 if ((wait_status = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 20))) {
3415 printk("adpt_i2o_issue_params: post_wait failed (%p)\n", resblk_va);
3416 return wait_status; /* -DetailedStatus */
3419 if (res[1]&0x00FF0000) { /* BlockStatus != SUCCESS */
3420 printk(KERN_WARNING "%s: %s - Error:\n ErrorInfoSize = 0x%02x, "
3421 "BlockStatus = 0x%02x, BlockSize = 0x%04x\n",
3422 pHba->name,
3423 (cmd == I2O_CMD_UTIL_PARAMS_SET) ? "PARAMS_SET"
3424 : "PARAMS_GET",
3425 res[1]>>24, (res[1]>>16)&0xFF, res[1]&0xFFFF);
3426 return -((res[1] >> 16) & 0xFF); /* -BlockStatus */
3429 return 4 + ((res[1] & 0x0000FFFF) << 2); /* bytes used in resblk */
3433 static s32 adpt_i2o_quiesce_hba(adpt_hba* pHba)
3435 u32 msg[4];
3436 int ret;
3438 adpt_i2o_status_get(pHba);
3440 /* SysQuiesce discarded if IOP not in READY or OPERATIONAL state */
3442 if((pHba->status_block->iop_state != ADAPTER_STATE_READY) &&
3443 (pHba->status_block->iop_state != ADAPTER_STATE_OPERATIONAL)){
3444 return 0;
3447 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3448 msg[1] = I2O_CMD_SYS_QUIESCE<<24|HOST_TID<<12|ADAPTER_TID;
3449 msg[2] = 0;
3450 msg[3] = 0;
3452 if((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3453 printk(KERN_INFO"dpti%d: Unable to quiesce (status=%#x).\n",
3454 pHba->unit, -ret);
3455 } else {
3456 printk(KERN_INFO"dpti%d: Quiesced.\n",pHba->unit);
3459 adpt_i2o_status_get(pHba);
3460 return ret;
3465 * Enable IOP. Allows the IOP to resume external operations.
3467 static int adpt_i2o_enable_hba(adpt_hba* pHba)
3469 u32 msg[4];
3470 int ret;
3472 adpt_i2o_status_get(pHba);
3473 if(!pHba->status_block){
3474 return -ENOMEM;
3476 /* Enable only allowed on READY state */
3477 if(pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL)
3478 return 0;
3480 if(pHba->status_block->iop_state != ADAPTER_STATE_READY)
3481 return -EINVAL;
3483 msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3484 msg[1]=I2O_CMD_SYS_ENABLE<<24|HOST_TID<<12|ADAPTER_TID;
3485 msg[2]= 0;
3486 msg[3]= 0;
3488 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3489 printk(KERN_WARNING"%s: Could not enable (status=%#10x).\n",
3490 pHba->name, ret);
3491 } else {
3492 PDEBUG("%s: Enabled.\n", pHba->name);
3495 adpt_i2o_status_get(pHba);
3496 return ret;
3500 static int adpt_i2o_systab_send(adpt_hba* pHba)
3502 u32 msg[12];
3503 int ret;
3505 msg[0] = I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6;
3506 msg[1] = I2O_CMD_SYS_TAB_SET<<24 | HOST_TID<<12 | ADAPTER_TID;
3507 msg[2] = 0;
3508 msg[3] = 0;
3509 msg[4] = (0<<16) | ((pHba->unit+2) << 12); /* Host 0 IOP ID (unit + 2) */
3510 msg[5] = 0; /* Segment 0 */
3513 * Provide three SGL-elements:
3514 * System table (SysTab), Private memory space declaration and
3515 * Private i/o space declaration
3517 msg[6] = 0x54000000 | sys_tbl_len;
3518 msg[7] = (u32)sys_tbl_pa;
3519 msg[8] = 0x54000000 | 0;
3520 msg[9] = 0;
3521 msg[10] = 0xD4000000 | 0;
3522 msg[11] = 0;
3524 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 120))) {
3525 printk(KERN_INFO "%s: Unable to set SysTab (status=%#10x).\n",
3526 pHba->name, ret);
3528 #ifdef DEBUG
3529 else {
3530 PINFO("%s: SysTab set.\n", pHba->name);
3532 #endif
3534 return ret;
3538 /*============================================================================
3540 *============================================================================
3544 #ifdef UARTDELAY
3546 static static void adpt_delay(int millisec)
3548 int i;
3549 for (i = 0; i < millisec; i++) {
3550 udelay(1000); /* delay for one millisecond */
3554 #endif
3556 static struct scsi_host_template driver_template = {
3557 .module = THIS_MODULE,
3558 .name = "dpt_i2o",
3559 .proc_name = "dpt_i2o",
3560 .show_info = adpt_show_info,
3561 .info = adpt_info,
3562 .queuecommand = adpt_queue,
3563 .eh_abort_handler = adpt_abort,
3564 .eh_device_reset_handler = adpt_device_reset,
3565 .eh_bus_reset_handler = adpt_bus_reset,
3566 .eh_host_reset_handler = adpt_reset,
3567 .bios_param = adpt_bios_param,
3568 .slave_configure = adpt_slave_configure,
3569 .can_queue = MAX_TO_IOP_MESSAGES,
3570 .this_id = 7,
3571 .use_clustering = ENABLE_CLUSTERING,
3574 static int __init adpt_init(void)
3576 int error;
3577 adpt_hba *pHba, *next;
3579 printk("Loading Adaptec I2O RAID: Version " DPT_I2O_VERSION "\n");
3581 error = adpt_detect(&driver_template);
3582 if (error < 0)
3583 return error;
3584 if (hba_chain == NULL)
3585 return -ENODEV;
3587 for (pHba = hba_chain; pHba; pHba = pHba->next) {
3588 error = scsi_add_host(pHba->host, &pHba->pDev->dev);
3589 if (error)
3590 goto fail;
3591 scsi_scan_host(pHba->host);
3593 return 0;
3594 fail:
3595 for (pHba = hba_chain; pHba; pHba = next) {
3596 next = pHba->next;
3597 scsi_remove_host(pHba->host);
3599 return error;
3602 static void __exit adpt_exit(void)
3604 adpt_hba *pHba, *next;
3606 for (pHba = hba_chain; pHba; pHba = pHba->next)
3607 scsi_remove_host(pHba->host);
3608 for (pHba = hba_chain; pHba; pHba = next) {
3609 next = pHba->next;
3610 adpt_release(pHba->host);
3614 module_init(adpt_init);
3615 module_exit(adpt_exit);
3617 MODULE_LICENSE("GPL");