of: MSI: Simplify irqdomain lookup
[linux/fpc-iii.git] / drivers / scsi / dpt_i2o.c
blobd4cda5e9600e6e7560f6561bb6c007cce1f6e43d
1 /***************************************************************************
2 dpti.c - description
3 -------------------
4 begin : Thu Sep 7 2000
5 copyright : (C) 2000 by Adaptec
7 July 30, 2001 First version being submitted
8 for inclusion in the kernel. V2.4
10 See Documentation/scsi/dpti.txt for history, notes, license info
11 and credits
12 ***************************************************************************/
14 /***************************************************************************
15 * *
16 * This program is free software; you can redistribute it and/or modify *
17 * it under the terms of the GNU General Public License as published by *
18 * the Free Software Foundation; either version 2 of the License, or *
19 * (at your option) any later version. *
20 * *
21 ***************************************************************************/
22 /***************************************************************************
23 * Sat Dec 20 2003 Go Taniguchi <go@turbolinux.co.jp>
24 - Support 2.6 kernel and DMA-mapping
25 - ioctl fix for raid tools
26 - use schedule_timeout in long long loop
27 **************************************************************************/
29 /*#define DEBUG 1 */
30 /*#define UARTDELAY 1 */
32 #include <linux/module.h>
34 MODULE_AUTHOR("Deanna Bonds, with _lots_ of help from Mark Salyzyn");
35 MODULE_DESCRIPTION("Adaptec I2O RAID Driver");
37 ////////////////////////////////////////////////////////////////
39 #include <linux/ioctl.h> /* For SCSI-Passthrough */
40 #include <asm/uaccess.h>
42 #include <linux/stat.h>
43 #include <linux/slab.h> /* for kmalloc() */
44 #include <linux/pci.h> /* for PCI support */
45 #include <linux/proc_fs.h>
46 #include <linux/blkdev.h>
47 #include <linux/delay.h> /* for udelay */
48 #include <linux/interrupt.h>
49 #include <linux/kernel.h> /* for printk */
50 #include <linux/sched.h>
51 #include <linux/reboot.h>
52 #include <linux/spinlock.h>
53 #include <linux/dma-mapping.h>
55 #include <linux/timer.h>
56 #include <linux/string.h>
57 #include <linux/ioport.h>
58 #include <linux/mutex.h>
60 #include <asm/processor.h> /* for boot_cpu_data */
61 #include <asm/pgtable.h>
62 #include <asm/io.h> /* for virt_to_bus, etc. */
64 #include <scsi/scsi.h>
65 #include <scsi/scsi_cmnd.h>
66 #include <scsi/scsi_device.h>
67 #include <scsi/scsi_host.h>
68 #include <scsi/scsi_tcq.h>
70 #include "dpt/dptsig.h"
71 #include "dpti.h"
73 /*============================================================================
74 * Create a binary signature - this is read by dptsig
75 * Needed for our management apps
76 *============================================================================
78 static DEFINE_MUTEX(adpt_mutex);
79 static dpt_sig_S DPTI_sig = {
80 {'d', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION,
81 #ifdef __i386__
82 PROC_INTEL, PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM,
83 #elif defined(__ia64__)
84 PROC_INTEL, PROC_IA64,
85 #elif defined(__sparc__)
86 PROC_ULTRASPARC, PROC_ULTRASPARC,
87 #elif defined(__alpha__)
88 PROC_ALPHA, PROC_ALPHA,
89 #else
90 (-1),(-1),
91 #endif
92 FT_HBADRVR, 0, OEM_DPT, OS_LINUX, CAP_OVERLAP, DEV_ALL,
93 ADF_ALL_SC5, 0, 0, DPT_VERSION, DPT_REVISION, DPT_SUBREVISION,
94 DPT_MONTH, DPT_DAY, DPT_YEAR, "Adaptec Linux I2O RAID Driver"
100 /*============================================================================
101 * Globals
102 *============================================================================
105 static DEFINE_MUTEX(adpt_configuration_lock);
107 static struct i2o_sys_tbl *sys_tbl;
108 static dma_addr_t sys_tbl_pa;
109 static int sys_tbl_ind;
110 static int sys_tbl_len;
112 static adpt_hba* hba_chain = NULL;
113 static int hba_count = 0;
115 static struct class *adpt_sysfs_class;
117 static long adpt_unlocked_ioctl(struct file *, unsigned int, unsigned long);
118 #ifdef CONFIG_COMPAT
119 static long compat_adpt_ioctl(struct file *, unsigned int, unsigned long);
120 #endif
122 static const struct file_operations adpt_fops = {
123 .unlocked_ioctl = adpt_unlocked_ioctl,
124 .open = adpt_open,
125 .release = adpt_close,
126 #ifdef CONFIG_COMPAT
127 .compat_ioctl = compat_adpt_ioctl,
128 #endif
129 .llseek = noop_llseek,
132 /* Structures and definitions for synchronous message posting.
133 * See adpt_i2o_post_wait() for description
134 * */
135 struct adpt_i2o_post_wait_data
137 int status;
138 u32 id;
139 adpt_wait_queue_head_t *wq;
140 struct adpt_i2o_post_wait_data *next;
143 static struct adpt_i2o_post_wait_data *adpt_post_wait_queue = NULL;
144 static u32 adpt_post_wait_id = 0;
145 static DEFINE_SPINLOCK(adpt_post_wait_lock);
148 /*============================================================================
149 * Functions
150 *============================================================================
153 static inline int dpt_dma64(adpt_hba *pHba)
155 return (sizeof(dma_addr_t) > 4 && (pHba)->dma64);
158 static inline u32 dma_high(dma_addr_t addr)
160 return upper_32_bits(addr);
163 static inline u32 dma_low(dma_addr_t addr)
165 return (u32)addr;
168 static u8 adpt_read_blink_led(adpt_hba* host)
170 if (host->FwDebugBLEDflag_P) {
171 if( readb(host->FwDebugBLEDflag_P) == 0xbc ){
172 return readb(host->FwDebugBLEDvalue_P);
175 return 0;
178 /*============================================================================
179 * Scsi host template interface functions
180 *============================================================================
183 static struct pci_device_id dptids[] = {
184 { PCI_DPT_VENDOR_ID, PCI_DPT_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
185 { PCI_DPT_VENDOR_ID, PCI_DPT_RAPTOR_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
186 { 0, }
188 MODULE_DEVICE_TABLE(pci,dptids);
190 static int adpt_detect(struct scsi_host_template* sht)
192 struct pci_dev *pDev = NULL;
193 adpt_hba *pHba;
194 adpt_hba *next;
196 PINFO("Detecting Adaptec I2O RAID controllers...\n");
198 /* search for all Adatpec I2O RAID cards */
199 while ((pDev = pci_get_device( PCI_DPT_VENDOR_ID, PCI_ANY_ID, pDev))) {
200 if(pDev->device == PCI_DPT_DEVICE_ID ||
201 pDev->device == PCI_DPT_RAPTOR_DEVICE_ID){
202 if(adpt_install_hba(sht, pDev) ){
203 PERROR("Could not Init an I2O RAID device\n");
204 PERROR("Will not try to detect others.\n");
205 return hba_count-1;
207 pci_dev_get(pDev);
211 /* In INIT state, Activate IOPs */
212 for (pHba = hba_chain; pHba; pHba = next) {
213 next = pHba->next;
214 // Activate does get status , init outbound, and get hrt
215 if (adpt_i2o_activate_hba(pHba) < 0) {
216 adpt_i2o_delete_hba(pHba);
221 /* Active IOPs in HOLD state */
223 rebuild_sys_tab:
224 if (hba_chain == NULL)
225 return 0;
228 * If build_sys_table fails, we kill everything and bail
229 * as we can't init the IOPs w/o a system table
231 if (adpt_i2o_build_sys_table() < 0) {
232 adpt_i2o_sys_shutdown();
233 return 0;
236 PDEBUG("HBA's in HOLD state\n");
238 /* If IOP don't get online, we need to rebuild the System table */
239 for (pHba = hba_chain; pHba; pHba = pHba->next) {
240 if (adpt_i2o_online_hba(pHba) < 0) {
241 adpt_i2o_delete_hba(pHba);
242 goto rebuild_sys_tab;
246 /* Active IOPs now in OPERATIONAL state */
247 PDEBUG("HBA's in OPERATIONAL state\n");
249 printk("dpti: If you have a lot of devices this could take a few minutes.\n");
250 for (pHba = hba_chain; pHba; pHba = next) {
251 next = pHba->next;
252 printk(KERN_INFO"%s: Reading the hardware resource table.\n", pHba->name);
253 if (adpt_i2o_lct_get(pHba) < 0){
254 adpt_i2o_delete_hba(pHba);
255 continue;
258 if (adpt_i2o_parse_lct(pHba) < 0){
259 adpt_i2o_delete_hba(pHba);
260 continue;
262 adpt_inquiry(pHba);
265 adpt_sysfs_class = class_create(THIS_MODULE, "dpt_i2o");
266 if (IS_ERR(adpt_sysfs_class)) {
267 printk(KERN_WARNING"dpti: unable to create dpt_i2o class\n");
268 adpt_sysfs_class = NULL;
271 for (pHba = hba_chain; pHba; pHba = next) {
272 next = pHba->next;
273 if (adpt_scsi_host_alloc(pHba, sht) < 0){
274 adpt_i2o_delete_hba(pHba);
275 continue;
277 pHba->initialized = TRUE;
278 pHba->state &= ~DPTI_STATE_RESET;
279 if (adpt_sysfs_class) {
280 struct device *dev = device_create(adpt_sysfs_class,
281 NULL, MKDEV(DPTI_I2O_MAJOR, pHba->unit), NULL,
282 "dpti%d", pHba->unit);
283 if (IS_ERR(dev)) {
284 printk(KERN_WARNING"dpti%d: unable to "
285 "create device in dpt_i2o class\n",
286 pHba->unit);
291 // Register our control device node
292 // nodes will need to be created in /dev to access this
293 // the nodes can not be created from within the driver
294 if (hba_count && register_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER, &adpt_fops)) {
295 adpt_i2o_sys_shutdown();
296 return 0;
298 return hba_count;
303 * scsi_unregister will be called AFTER we return.
305 static int adpt_release(struct Scsi_Host *host)
307 adpt_hba* pHba = (adpt_hba*) host->hostdata[0];
308 // adpt_i2o_quiesce_hba(pHba);
309 adpt_i2o_delete_hba(pHba);
310 scsi_unregister(host);
311 return 0;
315 static void adpt_inquiry(adpt_hba* pHba)
317 u32 msg[17];
318 u32 *mptr;
319 u32 *lenptr;
320 int direction;
321 int scsidir;
322 u32 len;
323 u32 reqlen;
324 u8* buf;
325 dma_addr_t addr;
326 u8 scb[16];
327 s32 rcode;
329 memset(msg, 0, sizeof(msg));
330 buf = dma_alloc_coherent(&pHba->pDev->dev, 80, &addr, GFP_KERNEL);
331 if(!buf){
332 printk(KERN_ERR"%s: Could not allocate buffer\n",pHba->name);
333 return;
335 memset((void*)buf, 0, 36);
337 len = 36;
338 direction = 0x00000000;
339 scsidir =0x40000000; // DATA IN (iop<--dev)
341 if (dpt_dma64(pHba))
342 reqlen = 17; // SINGLE SGE, 64 bit
343 else
344 reqlen = 14; // SINGLE SGE, 32 bit
345 /* Stick the headers on */
346 msg[0] = reqlen<<16 | SGL_OFFSET_12;
347 msg[1] = (0xff<<24|HOST_TID<<12|ADAPTER_TID);
348 msg[2] = 0;
349 msg[3] = 0;
350 // Adaptec/DPT Private stuff
351 msg[4] = I2O_CMD_SCSI_EXEC|DPT_ORGANIZATION_ID<<16;
352 msg[5] = ADAPTER_TID | 1<<16 /* Interpret*/;
353 /* Direction, disconnect ok | sense data | simple queue , CDBLen */
354 // I2O_SCB_FLAG_ENABLE_DISCONNECT |
355 // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
356 // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
357 msg[6] = scsidir|0x20a00000| 6 /* cmd len*/;
359 mptr=msg+7;
361 memset(scb, 0, sizeof(scb));
362 // Write SCSI command into the message - always 16 byte block
363 scb[0] = INQUIRY;
364 scb[1] = 0;
365 scb[2] = 0;
366 scb[3] = 0;
367 scb[4] = 36;
368 scb[5] = 0;
369 // Don't care about the rest of scb
371 memcpy(mptr, scb, sizeof(scb));
372 mptr+=4;
373 lenptr=mptr++; /* Remember me - fill in when we know */
375 /* Now fill in the SGList and command */
376 *lenptr = len;
377 if (dpt_dma64(pHba)) {
378 *mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
379 *mptr++ = 1 << PAGE_SHIFT;
380 *mptr++ = 0xD0000000|direction|len;
381 *mptr++ = dma_low(addr);
382 *mptr++ = dma_high(addr);
383 } else {
384 *mptr++ = 0xD0000000|direction|len;
385 *mptr++ = addr;
388 // Send it on it's way
389 rcode = adpt_i2o_post_wait(pHba, msg, reqlen<<2, 120);
390 if (rcode != 0) {
391 sprintf(pHba->detail, "Adaptec I2O RAID");
392 printk(KERN_INFO "%s: Inquiry Error (%d)\n",pHba->name,rcode);
393 if (rcode != -ETIME && rcode != -EINTR)
394 dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
395 } else {
396 memset(pHba->detail, 0, sizeof(pHba->detail));
397 memcpy(&(pHba->detail), "Vendor: Adaptec ", 16);
398 memcpy(&(pHba->detail[16]), " Model: ", 8);
399 memcpy(&(pHba->detail[24]), (u8*) &buf[16], 16);
400 memcpy(&(pHba->detail[40]), " FW: ", 4);
401 memcpy(&(pHba->detail[44]), (u8*) &buf[32], 4);
402 pHba->detail[48] = '\0'; /* precautionary */
403 dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
405 adpt_i2o_status_get(pHba);
406 return ;
410 static int adpt_slave_configure(struct scsi_device * device)
412 struct Scsi_Host *host = device->host;
413 adpt_hba* pHba;
415 pHba = (adpt_hba *) host->hostdata[0];
417 if (host->can_queue && device->tagged_supported) {
418 scsi_change_queue_depth(device,
419 host->can_queue - 1);
421 return 0;
424 static int adpt_queue_lck(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *))
426 adpt_hba* pHba = NULL;
427 struct adpt_device* pDev = NULL; /* dpt per device information */
429 cmd->scsi_done = done;
431 * SCSI REQUEST_SENSE commands will be executed automatically by the
432 * Host Adapter for any errors, so they should not be executed
433 * explicitly unless the Sense Data is zero indicating that no error
434 * occurred.
437 if ((cmd->cmnd[0] == REQUEST_SENSE) && (cmd->sense_buffer[0] != 0)) {
438 cmd->result = (DID_OK << 16);
439 cmd->scsi_done(cmd);
440 return 0;
443 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
444 if (!pHba) {
445 return FAILED;
448 rmb();
449 if ((pHba->state) & DPTI_STATE_RESET)
450 return SCSI_MLQUEUE_HOST_BUSY;
452 // TODO if the cmd->device if offline then I may need to issue a bus rescan
453 // followed by a get_lct to see if the device is there anymore
454 if((pDev = (struct adpt_device*) (cmd->device->hostdata)) == NULL) {
456 * First command request for this device. Set up a pointer
457 * to the device structure. This should be a TEST_UNIT_READY
458 * command from scan_scsis_single.
460 if ((pDev = adpt_find_device(pHba, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun)) == NULL) {
461 // TODO: if any luns are at this bus, scsi id then fake a TEST_UNIT_READY and INQUIRY response
462 // with type 7F (for all luns less than the max for this bus,id) so the lun scan will continue.
463 cmd->result = (DID_NO_CONNECT << 16);
464 cmd->scsi_done(cmd);
465 return 0;
467 cmd->device->hostdata = pDev;
469 pDev->pScsi_dev = cmd->device;
472 * If we are being called from when the device is being reset,
473 * delay processing of the command until later.
475 if (pDev->state & DPTI_DEV_RESET ) {
476 return FAILED;
478 return adpt_scsi_to_i2o(pHba, cmd, pDev);
481 static DEF_SCSI_QCMD(adpt_queue)
483 static int adpt_bios_param(struct scsi_device *sdev, struct block_device *dev,
484 sector_t capacity, int geom[])
486 int heads=-1;
487 int sectors=-1;
488 int cylinders=-1;
490 // *** First lets set the default geometry ****
492 // If the capacity is less than ox2000
493 if (capacity < 0x2000 ) { // floppy
494 heads = 18;
495 sectors = 2;
497 // else if between 0x2000 and 0x20000
498 else if (capacity < 0x20000) {
499 heads = 64;
500 sectors = 32;
502 // else if between 0x20000 and 0x40000
503 else if (capacity < 0x40000) {
504 heads = 65;
505 sectors = 63;
507 // else if between 0x4000 and 0x80000
508 else if (capacity < 0x80000) {
509 heads = 128;
510 sectors = 63;
512 // else if greater than 0x80000
513 else {
514 heads = 255;
515 sectors = 63;
517 cylinders = sector_div(capacity, heads * sectors);
519 // Special case if CDROM
520 if(sdev->type == 5) { // CDROM
521 heads = 252;
522 sectors = 63;
523 cylinders = 1111;
526 geom[0] = heads;
527 geom[1] = sectors;
528 geom[2] = cylinders;
530 PDEBUG("adpt_bios_param: exit\n");
531 return 0;
535 static const char *adpt_info(struct Scsi_Host *host)
537 adpt_hba* pHba;
539 pHba = (adpt_hba *) host->hostdata[0];
540 return (char *) (pHba->detail);
543 static int adpt_show_info(struct seq_file *m, struct Scsi_Host *host)
545 struct adpt_device* d;
546 int id;
547 int chan;
548 adpt_hba* pHba;
549 int unit;
551 // Find HBA (host bus adapter) we are looking for
552 mutex_lock(&adpt_configuration_lock);
553 for (pHba = hba_chain; pHba; pHba = pHba->next) {
554 if (pHba->host == host) {
555 break; /* found adapter */
558 mutex_unlock(&adpt_configuration_lock);
559 if (pHba == NULL) {
560 return 0;
562 host = pHba->host;
564 seq_printf(m, "Adaptec I2O RAID Driver Version: %s\n\n", DPT_I2O_VERSION);
565 seq_printf(m, "%s\n", pHba->detail);
566 seq_printf(m, "SCSI Host=scsi%d Control Node=/dev/%s irq=%d\n",
567 pHba->host->host_no, pHba->name, host->irq);
568 seq_printf(m, "\tpost fifo size = %d\n\treply fifo size = %d\n\tsg table size = %d\n\n",
569 host->can_queue, (int) pHba->reply_fifo_size , host->sg_tablesize);
571 seq_puts(m, "Devices:\n");
572 for(chan = 0; chan < MAX_CHANNEL; chan++) {
573 for(id = 0; id < MAX_ID; id++) {
574 d = pHba->channel[chan].device[id];
575 while(d) {
576 seq_printf(m,"\t%-24.24s", d->pScsi_dev->vendor);
577 seq_printf(m," Rev: %-8.8s\n", d->pScsi_dev->rev);
579 unit = d->pI2o_dev->lct_data.tid;
580 seq_printf(m, "\tTID=%d, (Channel=%d, Target=%d, Lun=%llu) (%s)\n\n",
581 unit, (int)d->scsi_channel, (int)d->scsi_id, d->scsi_lun,
582 scsi_device_online(d->pScsi_dev)? "online":"offline");
583 d = d->next_lun;
587 return 0;
591 * Turn a struct scsi_cmnd * into a unique 32 bit 'context'.
593 static u32 adpt_cmd_to_context(struct scsi_cmnd *cmd)
595 return (u32)cmd->serial_number;
599 * Go from a u32 'context' to a struct scsi_cmnd * .
600 * This could probably be made more efficient.
602 static struct scsi_cmnd *
603 adpt_cmd_from_context(adpt_hba * pHba, u32 context)
605 struct scsi_cmnd * cmd;
606 struct scsi_device * d;
608 if (context == 0)
609 return NULL;
611 spin_unlock(pHba->host->host_lock);
612 shost_for_each_device(d, pHba->host) {
613 unsigned long flags;
614 spin_lock_irqsave(&d->list_lock, flags);
615 list_for_each_entry(cmd, &d->cmd_list, list) {
616 if (((u32)cmd->serial_number == context)) {
617 spin_unlock_irqrestore(&d->list_lock, flags);
618 scsi_device_put(d);
619 spin_lock(pHba->host->host_lock);
620 return cmd;
623 spin_unlock_irqrestore(&d->list_lock, flags);
625 spin_lock(pHba->host->host_lock);
627 return NULL;
631 * Turn a pointer to ioctl reply data into an u32 'context'
633 static u32 adpt_ioctl_to_context(adpt_hba * pHba, void *reply)
635 #if BITS_PER_LONG == 32
636 return (u32)(unsigned long)reply;
637 #else
638 ulong flags = 0;
639 u32 nr, i;
641 spin_lock_irqsave(pHba->host->host_lock, flags);
642 nr = ARRAY_SIZE(pHba->ioctl_reply_context);
643 for (i = 0; i < nr; i++) {
644 if (pHba->ioctl_reply_context[i] == NULL) {
645 pHba->ioctl_reply_context[i] = reply;
646 break;
649 spin_unlock_irqrestore(pHba->host->host_lock, flags);
650 if (i >= nr) {
651 kfree (reply);
652 printk(KERN_WARNING"%s: Too many outstanding "
653 "ioctl commands\n", pHba->name);
654 return (u32)-1;
657 return i;
658 #endif
662 * Go from an u32 'context' to a pointer to ioctl reply data.
664 static void *adpt_ioctl_from_context(adpt_hba *pHba, u32 context)
666 #if BITS_PER_LONG == 32
667 return (void *)(unsigned long)context;
668 #else
669 void *p = pHba->ioctl_reply_context[context];
670 pHba->ioctl_reply_context[context] = NULL;
672 return p;
673 #endif
676 /*===========================================================================
677 * Error Handling routines
678 *===========================================================================
681 static int adpt_abort(struct scsi_cmnd * cmd)
683 adpt_hba* pHba = NULL; /* host bus adapter structure */
684 struct adpt_device* dptdevice; /* dpt per device information */
685 u32 msg[5];
686 int rcode;
688 if(cmd->serial_number == 0){
689 return FAILED;
691 pHba = (adpt_hba*) cmd->device->host->hostdata[0];
692 printk(KERN_INFO"%s: Trying to Abort\n",pHba->name);
693 if ((dptdevice = (void*) (cmd->device->hostdata)) == NULL) {
694 printk(KERN_ERR "%s: Unable to abort: No device in cmnd\n",pHba->name);
695 return FAILED;
698 memset(msg, 0, sizeof(msg));
699 msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
700 msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|dptdevice->tid;
701 msg[2] = 0;
702 msg[3]= 0;
703 msg[4] = adpt_cmd_to_context(cmd);
704 if (pHba->host)
705 spin_lock_irq(pHba->host->host_lock);
706 rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER);
707 if (pHba->host)
708 spin_unlock_irq(pHba->host->host_lock);
709 if (rcode != 0) {
710 if(rcode == -EOPNOTSUPP ){
711 printk(KERN_INFO"%s: Abort cmd not supported\n",pHba->name);
712 return FAILED;
714 printk(KERN_INFO"%s: Abort failed.\n",pHba->name);
715 return FAILED;
717 printk(KERN_INFO"%s: Abort complete.\n",pHba->name);
718 return SUCCESS;
722 #define I2O_DEVICE_RESET 0x27
723 // This is the same for BLK and SCSI devices
724 // NOTE this is wrong in the i2o.h definitions
725 // This is not currently supported by our adapter but we issue it anyway
726 static int adpt_device_reset(struct scsi_cmnd* cmd)
728 adpt_hba* pHba;
729 u32 msg[4];
730 u32 rcode;
731 int old_state;
732 struct adpt_device* d = cmd->device->hostdata;
734 pHba = (void*) cmd->device->host->hostdata[0];
735 printk(KERN_INFO"%s: Trying to reset device\n",pHba->name);
736 if (!d) {
737 printk(KERN_INFO"%s: Reset Device: Device Not found\n",pHba->name);
738 return FAILED;
740 memset(msg, 0, sizeof(msg));
741 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
742 msg[1] = (I2O_DEVICE_RESET<<24|HOST_TID<<12|d->tid);
743 msg[2] = 0;
744 msg[3] = 0;
746 if (pHba->host)
747 spin_lock_irq(pHba->host->host_lock);
748 old_state = d->state;
749 d->state |= DPTI_DEV_RESET;
750 rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
751 d->state = old_state;
752 if (pHba->host)
753 spin_unlock_irq(pHba->host->host_lock);
754 if (rcode != 0) {
755 if(rcode == -EOPNOTSUPP ){
756 printk(KERN_INFO"%s: Device reset not supported\n",pHba->name);
757 return FAILED;
759 printk(KERN_INFO"%s: Device reset failed\n",pHba->name);
760 return FAILED;
761 } else {
762 printk(KERN_INFO"%s: Device reset successful\n",pHba->name);
763 return SUCCESS;
768 #define I2O_HBA_BUS_RESET 0x87
769 // This version of bus reset is called by the eh_error handler
770 static int adpt_bus_reset(struct scsi_cmnd* cmd)
772 adpt_hba* pHba;
773 u32 msg[4];
774 u32 rcode;
776 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
777 memset(msg, 0, sizeof(msg));
778 printk(KERN_WARNING"%s: Bus reset: SCSI Bus %d: tid: %d\n",pHba->name, cmd->device->channel,pHba->channel[cmd->device->channel].tid );
779 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
780 msg[1] = (I2O_HBA_BUS_RESET<<24|HOST_TID<<12|pHba->channel[cmd->device->channel].tid);
781 msg[2] = 0;
782 msg[3] = 0;
783 if (pHba->host)
784 spin_lock_irq(pHba->host->host_lock);
785 rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
786 if (pHba->host)
787 spin_unlock_irq(pHba->host->host_lock);
788 if (rcode != 0) {
789 printk(KERN_WARNING"%s: Bus reset failed.\n",pHba->name);
790 return FAILED;
791 } else {
792 printk(KERN_WARNING"%s: Bus reset success.\n",pHba->name);
793 return SUCCESS;
797 // This version of reset is called by the eh_error_handler
798 static int __adpt_reset(struct scsi_cmnd* cmd)
800 adpt_hba* pHba;
801 int rcode;
802 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
803 printk(KERN_WARNING"%s: Hba Reset: scsi id %d: tid: %d\n",pHba->name,cmd->device->channel,pHba->channel[cmd->device->channel].tid );
804 rcode = adpt_hba_reset(pHba);
805 if(rcode == 0){
806 printk(KERN_WARNING"%s: HBA reset complete\n",pHba->name);
807 return SUCCESS;
808 } else {
809 printk(KERN_WARNING"%s: HBA reset failed (%x)\n",pHba->name, rcode);
810 return FAILED;
814 static int adpt_reset(struct scsi_cmnd* cmd)
816 int rc;
818 spin_lock_irq(cmd->device->host->host_lock);
819 rc = __adpt_reset(cmd);
820 spin_unlock_irq(cmd->device->host->host_lock);
822 return rc;
825 // This version of reset is called by the ioctls and indirectly from eh_error_handler via adpt_reset
826 static int adpt_hba_reset(adpt_hba* pHba)
828 int rcode;
830 pHba->state |= DPTI_STATE_RESET;
832 // Activate does get status , init outbound, and get hrt
833 if ((rcode=adpt_i2o_activate_hba(pHba)) < 0) {
834 printk(KERN_ERR "%s: Could not activate\n", pHba->name);
835 adpt_i2o_delete_hba(pHba);
836 return rcode;
839 if ((rcode=adpt_i2o_build_sys_table()) < 0) {
840 adpt_i2o_delete_hba(pHba);
841 return rcode;
843 PDEBUG("%s: in HOLD state\n",pHba->name);
845 if ((rcode=adpt_i2o_online_hba(pHba)) < 0) {
846 adpt_i2o_delete_hba(pHba);
847 return rcode;
849 PDEBUG("%s: in OPERATIONAL state\n",pHba->name);
851 if ((rcode=adpt_i2o_lct_get(pHba)) < 0){
852 adpt_i2o_delete_hba(pHba);
853 return rcode;
856 if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0){
857 adpt_i2o_delete_hba(pHba);
858 return rcode;
860 pHba->state &= ~DPTI_STATE_RESET;
862 adpt_fail_posted_scbs(pHba);
863 return 0; /* return success */
866 /*===========================================================================
868 *===========================================================================
872 static void adpt_i2o_sys_shutdown(void)
874 adpt_hba *pHba, *pNext;
875 struct adpt_i2o_post_wait_data *p1, *old;
877 printk(KERN_INFO"Shutting down Adaptec I2O controllers.\n");
878 printk(KERN_INFO" This could take a few minutes if there are many devices attached\n");
879 /* Delete all IOPs from the controller chain */
880 /* They should have already been released by the
881 * scsi-core
883 for (pHba = hba_chain; pHba; pHba = pNext) {
884 pNext = pHba->next;
885 adpt_i2o_delete_hba(pHba);
888 /* Remove any timedout entries from the wait queue. */
889 // spin_lock_irqsave(&adpt_post_wait_lock, flags);
890 /* Nothing should be outstanding at this point so just
891 * free them
893 for(p1 = adpt_post_wait_queue; p1;) {
894 old = p1;
895 p1 = p1->next;
896 kfree(old);
898 // spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
899 adpt_post_wait_queue = NULL;
901 printk(KERN_INFO "Adaptec I2O controllers down.\n");
904 static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev)
907 adpt_hba* pHba = NULL;
908 adpt_hba* p = NULL;
909 ulong base_addr0_phys = 0;
910 ulong base_addr1_phys = 0;
911 u32 hba_map0_area_size = 0;
912 u32 hba_map1_area_size = 0;
913 void __iomem *base_addr_virt = NULL;
914 void __iomem *msg_addr_virt = NULL;
915 int dma64 = 0;
917 int raptorFlag = FALSE;
919 if(pci_enable_device(pDev)) {
920 return -EINVAL;
923 if (pci_request_regions(pDev, "dpt_i2o")) {
924 PERROR("dpti: adpt_config_hba: pci request region failed\n");
925 return -EINVAL;
928 pci_set_master(pDev);
931 * See if we should enable dma64 mode.
933 if (sizeof(dma_addr_t) > 4 &&
934 pci_set_dma_mask(pDev, DMA_BIT_MASK(64)) == 0) {
935 if (dma_get_required_mask(&pDev->dev) > DMA_BIT_MASK(32))
936 dma64 = 1;
938 if (!dma64 && pci_set_dma_mask(pDev, DMA_BIT_MASK(32)) != 0)
939 return -EINVAL;
941 /* adapter only supports message blocks below 4GB */
942 pci_set_consistent_dma_mask(pDev, DMA_BIT_MASK(32));
944 base_addr0_phys = pci_resource_start(pDev,0);
945 hba_map0_area_size = pci_resource_len(pDev,0);
947 // Check if standard PCI card or single BAR Raptor
948 if(pDev->device == PCI_DPT_DEVICE_ID){
949 if(pDev->subsystem_device >=0xc032 && pDev->subsystem_device <= 0xc03b){
950 // Raptor card with this device id needs 4M
951 hba_map0_area_size = 0x400000;
952 } else { // Not Raptor - it is a PCI card
953 if(hba_map0_area_size > 0x100000 ){
954 hba_map0_area_size = 0x100000;
957 } else {// Raptor split BAR config
958 // Use BAR1 in this configuration
959 base_addr1_phys = pci_resource_start(pDev,1);
960 hba_map1_area_size = pci_resource_len(pDev,1);
961 raptorFlag = TRUE;
964 #if BITS_PER_LONG == 64
966 * The original Adaptec 64 bit driver has this comment here:
967 * "x86_64 machines need more optimal mappings"
969 * I assume some HBAs report ridiculously large mappings
970 * and we need to limit them on platforms with IOMMUs.
972 if (raptorFlag == TRUE) {
973 if (hba_map0_area_size > 128)
974 hba_map0_area_size = 128;
975 if (hba_map1_area_size > 524288)
976 hba_map1_area_size = 524288;
977 } else {
978 if (hba_map0_area_size > 524288)
979 hba_map0_area_size = 524288;
981 #endif
983 base_addr_virt = ioremap(base_addr0_phys,hba_map0_area_size);
984 if (!base_addr_virt) {
985 pci_release_regions(pDev);
986 PERROR("dpti: adpt_config_hba: io remap failed\n");
987 return -EINVAL;
990 if(raptorFlag == TRUE) {
991 msg_addr_virt = ioremap(base_addr1_phys, hba_map1_area_size );
992 if (!msg_addr_virt) {
993 PERROR("dpti: adpt_config_hba: io remap failed on BAR1\n");
994 iounmap(base_addr_virt);
995 pci_release_regions(pDev);
996 return -EINVAL;
998 } else {
999 msg_addr_virt = base_addr_virt;
1002 // Allocate and zero the data structure
1003 pHba = kzalloc(sizeof(adpt_hba), GFP_KERNEL);
1004 if (!pHba) {
1005 if (msg_addr_virt != base_addr_virt)
1006 iounmap(msg_addr_virt);
1007 iounmap(base_addr_virt);
1008 pci_release_regions(pDev);
1009 return -ENOMEM;
1012 mutex_lock(&adpt_configuration_lock);
1014 if(hba_chain != NULL){
1015 for(p = hba_chain; p->next; p = p->next);
1016 p->next = pHba;
1017 } else {
1018 hba_chain = pHba;
1020 pHba->next = NULL;
1021 pHba->unit = hba_count;
1022 sprintf(pHba->name, "dpti%d", hba_count);
1023 hba_count++;
1025 mutex_unlock(&adpt_configuration_lock);
1027 pHba->pDev = pDev;
1028 pHba->base_addr_phys = base_addr0_phys;
1030 // Set up the Virtual Base Address of the I2O Device
1031 pHba->base_addr_virt = base_addr_virt;
1032 pHba->msg_addr_virt = msg_addr_virt;
1033 pHba->irq_mask = base_addr_virt+0x30;
1034 pHba->post_port = base_addr_virt+0x40;
1035 pHba->reply_port = base_addr_virt+0x44;
1037 pHba->hrt = NULL;
1038 pHba->lct = NULL;
1039 pHba->lct_size = 0;
1040 pHba->status_block = NULL;
1041 pHba->post_count = 0;
1042 pHba->state = DPTI_STATE_RESET;
1043 pHba->pDev = pDev;
1044 pHba->devices = NULL;
1045 pHba->dma64 = dma64;
1047 // Initializing the spinlocks
1048 spin_lock_init(&pHba->state_lock);
1049 spin_lock_init(&adpt_post_wait_lock);
1051 if(raptorFlag == 0){
1052 printk(KERN_INFO "Adaptec I2O RAID controller"
1053 " %d at %p size=%x irq=%d%s\n",
1054 hba_count-1, base_addr_virt,
1055 hba_map0_area_size, pDev->irq,
1056 dma64 ? " (64-bit DMA)" : "");
1057 } else {
1058 printk(KERN_INFO"Adaptec I2O RAID controller %d irq=%d%s\n",
1059 hba_count-1, pDev->irq,
1060 dma64 ? " (64-bit DMA)" : "");
1061 printk(KERN_INFO" BAR0 %p - size= %x\n",base_addr_virt,hba_map0_area_size);
1062 printk(KERN_INFO" BAR1 %p - size= %x\n",msg_addr_virt,hba_map1_area_size);
1065 if (request_irq (pDev->irq, adpt_isr, IRQF_SHARED, pHba->name, pHba)) {
1066 printk(KERN_ERR"%s: Couldn't register IRQ %d\n", pHba->name, pDev->irq);
1067 adpt_i2o_delete_hba(pHba);
1068 return -EINVAL;
1071 return 0;
1075 static void adpt_i2o_delete_hba(adpt_hba* pHba)
1077 adpt_hba* p1;
1078 adpt_hba* p2;
1079 struct i2o_device* d;
1080 struct i2o_device* next;
1081 int i;
1082 int j;
1083 struct adpt_device* pDev;
1084 struct adpt_device* pNext;
1087 mutex_lock(&adpt_configuration_lock);
1088 // scsi_unregister calls our adpt_release which
1089 // does a quiese
1090 if(pHba->host){
1091 free_irq(pHba->host->irq, pHba);
1093 p2 = NULL;
1094 for( p1 = hba_chain; p1; p2 = p1,p1=p1->next){
1095 if(p1 == pHba) {
1096 if(p2) {
1097 p2->next = p1->next;
1098 } else {
1099 hba_chain = p1->next;
1101 break;
1105 hba_count--;
1106 mutex_unlock(&adpt_configuration_lock);
1108 iounmap(pHba->base_addr_virt);
1109 pci_release_regions(pHba->pDev);
1110 if(pHba->msg_addr_virt != pHba->base_addr_virt){
1111 iounmap(pHba->msg_addr_virt);
1113 if(pHba->FwDebugBuffer_P)
1114 iounmap(pHba->FwDebugBuffer_P);
1115 if(pHba->hrt) {
1116 dma_free_coherent(&pHba->pDev->dev,
1117 pHba->hrt->num_entries * pHba->hrt->entry_len << 2,
1118 pHba->hrt, pHba->hrt_pa);
1120 if(pHba->lct) {
1121 dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
1122 pHba->lct, pHba->lct_pa);
1124 if(pHba->status_block) {
1125 dma_free_coherent(&pHba->pDev->dev, sizeof(i2o_status_block),
1126 pHba->status_block, pHba->status_block_pa);
1128 if(pHba->reply_pool) {
1129 dma_free_coherent(&pHba->pDev->dev,
1130 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
1131 pHba->reply_pool, pHba->reply_pool_pa);
1134 for(d = pHba->devices; d ; d = next){
1135 next = d->next;
1136 kfree(d);
1138 for(i = 0 ; i < pHba->top_scsi_channel ; i++){
1139 for(j = 0; j < MAX_ID; j++){
1140 if(pHba->channel[i].device[j] != NULL){
1141 for(pDev = pHba->channel[i].device[j]; pDev; pDev = pNext){
1142 pNext = pDev->next_lun;
1143 kfree(pDev);
1148 pci_dev_put(pHba->pDev);
1149 if (adpt_sysfs_class)
1150 device_destroy(adpt_sysfs_class,
1151 MKDEV(DPTI_I2O_MAJOR, pHba->unit));
1152 kfree(pHba);
1154 if(hba_count <= 0){
1155 unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER);
1156 if (adpt_sysfs_class) {
1157 class_destroy(adpt_sysfs_class);
1158 adpt_sysfs_class = NULL;
1163 static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u64 lun)
1165 struct adpt_device* d;
1167 if(chan < 0 || chan >= MAX_CHANNEL)
1168 return NULL;
1170 if( pHba->channel[chan].device == NULL){
1171 printk(KERN_DEBUG"Adaptec I2O RAID: Trying to find device before they are allocated\n");
1172 return NULL;
1175 d = pHba->channel[chan].device[id];
1176 if(!d || d->tid == 0) {
1177 return NULL;
1180 /* If it is the only lun at that address then this should match*/
1181 if(d->scsi_lun == lun){
1182 return d;
1185 /* else we need to look through all the luns */
1186 for(d=d->next_lun ; d ; d = d->next_lun){
1187 if(d->scsi_lun == lun){
1188 return d;
1191 return NULL;
1195 static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout)
1197 // I used my own version of the WAIT_QUEUE_HEAD
1198 // to handle some version differences
1199 // When embedded in the kernel this could go back to the vanilla one
1200 ADPT_DECLARE_WAIT_QUEUE_HEAD(adpt_wq_i2o_post);
1201 int status = 0;
1202 ulong flags = 0;
1203 struct adpt_i2o_post_wait_data *p1, *p2;
1204 struct adpt_i2o_post_wait_data *wait_data =
1205 kmalloc(sizeof(struct adpt_i2o_post_wait_data), GFP_ATOMIC);
1206 DECLARE_WAITQUEUE(wait, current);
1208 if (!wait_data)
1209 return -ENOMEM;
1212 * The spin locking is needed to keep anyone from playing
1213 * with the queue pointers and id while we do the same
1215 spin_lock_irqsave(&adpt_post_wait_lock, flags);
1216 // TODO we need a MORE unique way of getting ids
1217 // to support async LCT get
1218 wait_data->next = adpt_post_wait_queue;
1219 adpt_post_wait_queue = wait_data;
1220 adpt_post_wait_id++;
1221 adpt_post_wait_id &= 0x7fff;
1222 wait_data->id = adpt_post_wait_id;
1223 spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1225 wait_data->wq = &adpt_wq_i2o_post;
1226 wait_data->status = -ETIMEDOUT;
1228 add_wait_queue(&adpt_wq_i2o_post, &wait);
1230 msg[2] |= 0x80000000 | ((u32)wait_data->id);
1231 timeout *= HZ;
1232 if((status = adpt_i2o_post_this(pHba, msg, len)) == 0){
1233 set_current_state(TASK_INTERRUPTIBLE);
1234 if(pHba->host)
1235 spin_unlock_irq(pHba->host->host_lock);
1236 if (!timeout)
1237 schedule();
1238 else{
1239 timeout = schedule_timeout(timeout);
1240 if (timeout == 0) {
1241 // I/O issued, but cannot get result in
1242 // specified time. Freeing resorces is
1243 // dangerous.
1244 status = -ETIME;
1247 if(pHba->host)
1248 spin_lock_irq(pHba->host->host_lock);
1250 remove_wait_queue(&adpt_wq_i2o_post, &wait);
1252 if(status == -ETIMEDOUT){
1253 printk(KERN_INFO"dpti%d: POST WAIT TIMEOUT\n",pHba->unit);
1254 // We will have to free the wait_data memory during shutdown
1255 return status;
1258 /* Remove the entry from the queue. */
1259 p2 = NULL;
1260 spin_lock_irqsave(&adpt_post_wait_lock, flags);
1261 for(p1 = adpt_post_wait_queue; p1; p2 = p1, p1 = p1->next) {
1262 if(p1 == wait_data) {
1263 if(p1->status == I2O_DETAIL_STATUS_UNSUPPORTED_FUNCTION ) {
1264 status = -EOPNOTSUPP;
1266 if(p2) {
1267 p2->next = p1->next;
1268 } else {
1269 adpt_post_wait_queue = p1->next;
1271 break;
1274 spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1276 kfree(wait_data);
1278 return status;
1282 static s32 adpt_i2o_post_this(adpt_hba* pHba, u32* data, int len)
1285 u32 m = EMPTY_QUEUE;
1286 u32 __iomem *msg;
1287 ulong timeout = jiffies + 30*HZ;
1288 do {
1289 rmb();
1290 m = readl(pHba->post_port);
1291 if (m != EMPTY_QUEUE) {
1292 break;
1294 if(time_after(jiffies,timeout)){
1295 printk(KERN_WARNING"dpti%d: Timeout waiting for message frame!\n", pHba->unit);
1296 return -ETIMEDOUT;
1298 schedule_timeout_uninterruptible(1);
1299 } while(m == EMPTY_QUEUE);
1301 msg = pHba->msg_addr_virt + m;
1302 memcpy_toio(msg, data, len);
1303 wmb();
1305 //post message
1306 writel(m, pHba->post_port);
1307 wmb();
1309 return 0;
1313 static void adpt_i2o_post_wait_complete(u32 context, int status)
1315 struct adpt_i2o_post_wait_data *p1 = NULL;
1317 * We need to search through the adpt_post_wait
1318 * queue to see if the given message is still
1319 * outstanding. If not, it means that the IOP
1320 * took longer to respond to the message than we
1321 * had allowed and timer has already expired.
1322 * Not much we can do about that except log
1323 * it for debug purposes, increase timeout, and recompile
1325 * Lock needed to keep anyone from moving queue pointers
1326 * around while we're looking through them.
1329 context &= 0x7fff;
1331 spin_lock(&adpt_post_wait_lock);
1332 for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1333 if(p1->id == context) {
1334 p1->status = status;
1335 spin_unlock(&adpt_post_wait_lock);
1336 wake_up_interruptible(p1->wq);
1337 return;
1340 spin_unlock(&adpt_post_wait_lock);
1341 // If this happens we lose commands that probably really completed
1342 printk(KERN_DEBUG"dpti: Could Not find task %d in wait queue\n",context);
1343 printk(KERN_DEBUG" Tasks in wait queue:\n");
1344 for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1345 printk(KERN_DEBUG" %d\n",p1->id);
1347 return;
1350 static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
1352 u32 msg[8];
1353 u8* status;
1354 dma_addr_t addr;
1355 u32 m = EMPTY_QUEUE ;
1356 ulong timeout = jiffies + (TMOUT_IOPRESET*HZ);
1358 if(pHba->initialized == FALSE) { // First time reset should be quick
1359 timeout = jiffies + (25*HZ);
1360 } else {
1361 adpt_i2o_quiesce_hba(pHba);
1364 do {
1365 rmb();
1366 m = readl(pHba->post_port);
1367 if (m != EMPTY_QUEUE) {
1368 break;
1370 if(time_after(jiffies,timeout)){
1371 printk(KERN_WARNING"Timeout waiting for message!\n");
1372 return -ETIMEDOUT;
1374 schedule_timeout_uninterruptible(1);
1375 } while (m == EMPTY_QUEUE);
1377 status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
1378 if(status == NULL) {
1379 adpt_send_nop(pHba, m);
1380 printk(KERN_ERR"IOP reset failed - no free memory.\n");
1381 return -ENOMEM;
1383 memset(status,0,4);
1385 msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0;
1386 msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID;
1387 msg[2]=0;
1388 msg[3]=0;
1389 msg[4]=0;
1390 msg[5]=0;
1391 msg[6]=dma_low(addr);
1392 msg[7]=dma_high(addr);
1394 memcpy_toio(pHba->msg_addr_virt+m, msg, sizeof(msg));
1395 wmb();
1396 writel(m, pHba->post_port);
1397 wmb();
1399 while(*status == 0){
1400 if(time_after(jiffies,timeout)){
1401 printk(KERN_WARNING"%s: IOP Reset Timeout\n",pHba->name);
1402 /* We lose 4 bytes of "status" here, but we cannot
1403 free these because controller may awake and corrupt
1404 those bytes at any time */
1405 /* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
1406 return -ETIMEDOUT;
1408 rmb();
1409 schedule_timeout_uninterruptible(1);
1412 if(*status == 0x01 /*I2O_EXEC_IOP_RESET_IN_PROGRESS*/) {
1413 PDEBUG("%s: Reset in progress...\n", pHba->name);
1414 // Here we wait for message frame to become available
1415 // indicated that reset has finished
1416 do {
1417 rmb();
1418 m = readl(pHba->post_port);
1419 if (m != EMPTY_QUEUE) {
1420 break;
1422 if(time_after(jiffies,timeout)){
1423 printk(KERN_ERR "%s:Timeout waiting for IOP Reset.\n",pHba->name);
1424 /* We lose 4 bytes of "status" here, but we
1425 cannot free these because controller may
1426 awake and corrupt those bytes at any time */
1427 /* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
1428 return -ETIMEDOUT;
1430 schedule_timeout_uninterruptible(1);
1431 } while (m == EMPTY_QUEUE);
1432 // Flush the offset
1433 adpt_send_nop(pHba, m);
1435 adpt_i2o_status_get(pHba);
1436 if(*status == 0x02 ||
1437 pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
1438 printk(KERN_WARNING"%s: Reset reject, trying to clear\n",
1439 pHba->name);
1440 } else {
1441 PDEBUG("%s: Reset completed.\n", pHba->name);
1444 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
1445 #ifdef UARTDELAY
1446 // This delay is to allow someone attached to the card through the debug UART to
1447 // set up the dump levels that they want before the rest of the initialization sequence
1448 adpt_delay(20000);
1449 #endif
1450 return 0;
1454 static int adpt_i2o_parse_lct(adpt_hba* pHba)
1456 int i;
1457 int max;
1458 int tid;
1459 struct i2o_device *d;
1460 i2o_lct *lct = pHba->lct;
1461 u8 bus_no = 0;
1462 s16 scsi_id;
1463 u64 scsi_lun;
1464 u32 buf[10]; // larger than 7, or 8 ...
1465 struct adpt_device* pDev;
1467 if (lct == NULL) {
1468 printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
1469 return -1;
1472 max = lct->table_size;
1473 max -= 3;
1474 max /= 9;
1476 for(i=0;i<max;i++) {
1477 if( lct->lct_entry[i].user_tid != 0xfff){
1479 * If we have hidden devices, we need to inform the upper layers about
1480 * the possible maximum id reference to handle device access when
1481 * an array is disassembled. This code has no other purpose but to
1482 * allow us future access to devices that are currently hidden
1483 * behind arrays, hotspares or have not been configured (JBOD mode).
1485 if( lct->lct_entry[i].class_id != I2O_CLASS_RANDOM_BLOCK_STORAGE &&
1486 lct->lct_entry[i].class_id != I2O_CLASS_SCSI_PERIPHERAL &&
1487 lct->lct_entry[i].class_id != I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1488 continue;
1490 tid = lct->lct_entry[i].tid;
1491 // I2O_DPT_DEVICE_INFO_GROUP_NO;
1492 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
1493 continue;
1495 bus_no = buf[0]>>16;
1496 scsi_id = buf[1];
1497 scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
1498 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1499 printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no);
1500 continue;
1502 if (scsi_id >= MAX_ID){
1503 printk(KERN_WARNING"%s: SCSI ID %d out of range \n", pHba->name, bus_no);
1504 continue;
1506 if(bus_no > pHba->top_scsi_channel){
1507 pHba->top_scsi_channel = bus_no;
1509 if(scsi_id > pHba->top_scsi_id){
1510 pHba->top_scsi_id = scsi_id;
1512 if(scsi_lun > pHba->top_scsi_lun){
1513 pHba->top_scsi_lun = scsi_lun;
1515 continue;
1517 d = kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
1518 if(d==NULL)
1520 printk(KERN_CRIT"%s: Out of memory for I2O device data.\n",pHba->name);
1521 return -ENOMEM;
1524 d->controller = pHba;
1525 d->next = NULL;
1527 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
1529 d->flags = 0;
1530 tid = d->lct_data.tid;
1531 adpt_i2o_report_hba_unit(pHba, d);
1532 adpt_i2o_install_device(pHba, d);
1534 bus_no = 0;
1535 for(d = pHba->devices; d ; d = d->next) {
1536 if(d->lct_data.class_id == I2O_CLASS_BUS_ADAPTER_PORT ||
1537 d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PORT){
1538 tid = d->lct_data.tid;
1539 // TODO get the bus_no from hrt-but for now they are in order
1540 //bus_no =
1541 if(bus_no > pHba->top_scsi_channel){
1542 pHba->top_scsi_channel = bus_no;
1544 pHba->channel[bus_no].type = d->lct_data.class_id;
1545 pHba->channel[bus_no].tid = tid;
1546 if(adpt_i2o_query_scalar(pHba, tid, 0x0200, -1, buf, 28)>=0)
1548 pHba->channel[bus_no].scsi_id = buf[1];
1549 PDEBUG("Bus %d - SCSI ID %d.\n", bus_no, buf[1]);
1551 // TODO remove - this is just until we get from hrt
1552 bus_no++;
1553 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1554 printk(KERN_WARNING"%s: Channel number %d out of range - LCT\n", pHba->name, bus_no);
1555 break;
1560 // Setup adpt_device table
1561 for(d = pHba->devices; d ; d = d->next) {
1562 if(d->lct_data.class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
1563 d->lct_data.class_id == I2O_CLASS_SCSI_PERIPHERAL ||
1564 d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1566 tid = d->lct_data.tid;
1567 scsi_id = -1;
1568 // I2O_DPT_DEVICE_INFO_GROUP_NO;
1569 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)>=0) {
1570 bus_no = buf[0]>>16;
1571 scsi_id = buf[1];
1572 scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
1573 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1574 continue;
1576 if (scsi_id >= MAX_ID) {
1577 continue;
1579 if( pHba->channel[bus_no].device[scsi_id] == NULL){
1580 pDev = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1581 if(pDev == NULL) {
1582 return -ENOMEM;
1584 pHba->channel[bus_no].device[scsi_id] = pDev;
1585 } else {
1586 for( pDev = pHba->channel[bus_no].device[scsi_id];
1587 pDev->next_lun; pDev = pDev->next_lun){
1589 pDev->next_lun = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1590 if(pDev->next_lun == NULL) {
1591 return -ENOMEM;
1593 pDev = pDev->next_lun;
1595 pDev->tid = tid;
1596 pDev->scsi_channel = bus_no;
1597 pDev->scsi_id = scsi_id;
1598 pDev->scsi_lun = scsi_lun;
1599 pDev->pI2o_dev = d;
1600 d->owner = pDev;
1601 pDev->type = (buf[0])&0xff;
1602 pDev->flags = (buf[0]>>8)&0xff;
1603 if(scsi_id > pHba->top_scsi_id){
1604 pHba->top_scsi_id = scsi_id;
1606 if(scsi_lun > pHba->top_scsi_lun){
1607 pHba->top_scsi_lun = scsi_lun;
1610 if(scsi_id == -1){
1611 printk(KERN_WARNING"Could not find SCSI ID for %s\n",
1612 d->lct_data.identity_tag);
1616 return 0;
1621 * Each I2O controller has a chain of devices on it - these match
1622 * the useful parts of the LCT of the board.
1625 static int adpt_i2o_install_device(adpt_hba* pHba, struct i2o_device *d)
1627 mutex_lock(&adpt_configuration_lock);
1628 d->controller=pHba;
1629 d->owner=NULL;
1630 d->next=pHba->devices;
1631 d->prev=NULL;
1632 if (pHba->devices != NULL){
1633 pHba->devices->prev=d;
1635 pHba->devices=d;
1636 *d->dev_name = 0;
1638 mutex_unlock(&adpt_configuration_lock);
1639 return 0;
1642 static int adpt_open(struct inode *inode, struct file *file)
1644 int minor;
1645 adpt_hba* pHba;
1647 mutex_lock(&adpt_mutex);
1648 //TODO check for root access
1650 minor = iminor(inode);
1651 if (minor >= hba_count) {
1652 mutex_unlock(&adpt_mutex);
1653 return -ENXIO;
1655 mutex_lock(&adpt_configuration_lock);
1656 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1657 if (pHba->unit == minor) {
1658 break; /* found adapter */
1661 if (pHba == NULL) {
1662 mutex_unlock(&adpt_configuration_lock);
1663 mutex_unlock(&adpt_mutex);
1664 return -ENXIO;
1667 // if(pHba->in_use){
1668 // mutex_unlock(&adpt_configuration_lock);
1669 // return -EBUSY;
1670 // }
1672 pHba->in_use = 1;
1673 mutex_unlock(&adpt_configuration_lock);
1674 mutex_unlock(&adpt_mutex);
1676 return 0;
1679 static int adpt_close(struct inode *inode, struct file *file)
1681 int minor;
1682 adpt_hba* pHba;
1684 minor = iminor(inode);
1685 if (minor >= hba_count) {
1686 return -ENXIO;
1688 mutex_lock(&adpt_configuration_lock);
1689 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1690 if (pHba->unit == minor) {
1691 break; /* found adapter */
1694 mutex_unlock(&adpt_configuration_lock);
1695 if (pHba == NULL) {
1696 return -ENXIO;
1699 pHba->in_use = 0;
1701 return 0;
1705 static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1707 u32 msg[MAX_MESSAGE_SIZE];
1708 u32* reply = NULL;
1709 u32 size = 0;
1710 u32 reply_size = 0;
1711 u32 __user *user_msg = arg;
1712 u32 __user * user_reply = NULL;
1713 void *sg_list[pHba->sg_tablesize];
1714 u32 sg_offset = 0;
1715 u32 sg_count = 0;
1716 int sg_index = 0;
1717 u32 i = 0;
1718 u32 rcode = 0;
1719 void *p = NULL;
1720 dma_addr_t addr;
1721 ulong flags = 0;
1723 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1724 // get user msg size in u32s
1725 if(get_user(size, &user_msg[0])){
1726 return -EFAULT;
1728 size = size>>16;
1730 user_reply = &user_msg[size];
1731 if(size > MAX_MESSAGE_SIZE){
1732 return -EFAULT;
1734 size *= 4; // Convert to bytes
1736 /* Copy in the user's I2O command */
1737 if(copy_from_user(msg, user_msg, size)) {
1738 return -EFAULT;
1740 get_user(reply_size, &user_reply[0]);
1741 reply_size = reply_size>>16;
1742 if(reply_size > REPLY_FRAME_SIZE){
1743 reply_size = REPLY_FRAME_SIZE;
1745 reply_size *= 4;
1746 reply = kzalloc(REPLY_FRAME_SIZE*4, GFP_KERNEL);
1747 if(reply == NULL) {
1748 printk(KERN_WARNING"%s: Could not allocate reply buffer\n",pHba->name);
1749 return -ENOMEM;
1751 sg_offset = (msg[0]>>4)&0xf;
1752 msg[2] = 0x40000000; // IOCTL context
1753 msg[3] = adpt_ioctl_to_context(pHba, reply);
1754 if (msg[3] == (u32)-1)
1755 return -EBUSY;
1757 memset(sg_list,0, sizeof(sg_list[0])*pHba->sg_tablesize);
1758 if(sg_offset) {
1759 // TODO add 64 bit API
1760 struct sg_simple_element *sg = (struct sg_simple_element*) (msg+sg_offset);
1761 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1762 if (sg_count > pHba->sg_tablesize){
1763 printk(KERN_DEBUG"%s:IOCTL SG List too large (%u)\n", pHba->name,sg_count);
1764 kfree (reply);
1765 return -EINVAL;
1768 for(i = 0; i < sg_count; i++) {
1769 int sg_size;
1771 if (!(sg[i].flag_count & 0x10000000 /*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT*/)) {
1772 printk(KERN_DEBUG"%s:Bad SG element %d - not simple (%x)\n",pHba->name,i, sg[i].flag_count);
1773 rcode = -EINVAL;
1774 goto cleanup;
1776 sg_size = sg[i].flag_count & 0xffffff;
1777 /* Allocate memory for the transfer */
1778 p = dma_alloc_coherent(&pHba->pDev->dev, sg_size, &addr, GFP_KERNEL);
1779 if(!p) {
1780 printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
1781 pHba->name,sg_size,i,sg_count);
1782 rcode = -ENOMEM;
1783 goto cleanup;
1785 sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame.
1786 /* Copy in the user's SG buffer if necessary */
1787 if(sg[i].flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR*/) {
1788 // sg_simple_element API is 32 bit
1789 if (copy_from_user(p,(void __user *)(ulong)sg[i].addr_bus, sg_size)) {
1790 printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",pHba->name,i);
1791 rcode = -EFAULT;
1792 goto cleanup;
1795 /* sg_simple_element API is 32 bit, but addr < 4GB */
1796 sg[i].addr_bus = addr;
1800 do {
1802 * Stop any new commands from enterring the
1803 * controller while processing the ioctl
1805 if (pHba->host) {
1806 scsi_block_requests(pHba->host);
1807 spin_lock_irqsave(pHba->host->host_lock, flags);
1809 rcode = adpt_i2o_post_wait(pHba, msg, size, FOREVER);
1810 if (rcode != 0)
1811 printk("adpt_i2o_passthru: post wait failed %d %p\n",
1812 rcode, reply);
1813 if (pHba->host) {
1814 spin_unlock_irqrestore(pHba->host->host_lock, flags);
1815 scsi_unblock_requests(pHba->host);
1817 } while (rcode == -ETIMEDOUT);
1819 if(rcode){
1820 goto cleanup;
1823 if(sg_offset) {
1824 /* Copy back the Scatter Gather buffers back to user space */
1825 u32 j;
1826 // TODO add 64 bit API
1827 struct sg_simple_element* sg;
1828 int sg_size;
1830 // re-acquire the original message to handle correctly the sg copy operation
1831 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1832 // get user msg size in u32s
1833 if(get_user(size, &user_msg[0])){
1834 rcode = -EFAULT;
1835 goto cleanup;
1837 size = size>>16;
1838 size *= 4;
1839 if (size > MAX_MESSAGE_SIZE) {
1840 rcode = -EINVAL;
1841 goto cleanup;
1843 /* Copy in the user's I2O command */
1844 if (copy_from_user (msg, user_msg, size)) {
1845 rcode = -EFAULT;
1846 goto cleanup;
1848 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1850 // TODO add 64 bit API
1851 sg = (struct sg_simple_element*)(msg + sg_offset);
1852 for (j = 0; j < sg_count; j++) {
1853 /* Copy out the SG list to user's buffer if necessary */
1854 if(! (sg[j].flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR*/)) {
1855 sg_size = sg[j].flag_count & 0xffffff;
1856 // sg_simple_element API is 32 bit
1857 if (copy_to_user((void __user *)(ulong)sg[j].addr_bus,sg_list[j], sg_size)) {
1858 printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus);
1859 rcode = -EFAULT;
1860 goto cleanup;
1866 /* Copy back the reply to user space */
1867 if (reply_size) {
1868 // we wrote our own values for context - now restore the user supplied ones
1869 if(copy_from_user(reply+2, user_msg+2, sizeof(u32)*2)) {
1870 printk(KERN_WARNING"%s: Could not copy message context FROM user\n",pHba->name);
1871 rcode = -EFAULT;
1873 if(copy_to_user(user_reply, reply, reply_size)) {
1874 printk(KERN_WARNING"%s: Could not copy reply TO user\n",pHba->name);
1875 rcode = -EFAULT;
1880 cleanup:
1881 if (rcode != -ETIME && rcode != -EINTR) {
1882 struct sg_simple_element *sg =
1883 (struct sg_simple_element*) (msg +sg_offset);
1884 kfree (reply);
1885 while(sg_index) {
1886 if(sg_list[--sg_index]) {
1887 dma_free_coherent(&pHba->pDev->dev,
1888 sg[sg_index].flag_count & 0xffffff,
1889 sg_list[sg_index],
1890 sg[sg_index].addr_bus);
1894 return rcode;
1897 #if defined __ia64__
1898 static void adpt_ia64_info(sysInfo_S* si)
1900 // This is all the info we need for now
1901 // We will add more info as our new
1902 // managmenent utility requires it
1903 si->processorType = PROC_IA64;
1905 #endif
1907 #if defined __sparc__
1908 static void adpt_sparc_info(sysInfo_S* si)
1910 // This is all the info we need for now
1911 // We will add more info as our new
1912 // managmenent utility requires it
1913 si->processorType = PROC_ULTRASPARC;
1915 #endif
1916 #if defined __alpha__
1917 static void adpt_alpha_info(sysInfo_S* si)
1919 // This is all the info we need for now
1920 // We will add more info as our new
1921 // managmenent utility requires it
1922 si->processorType = PROC_ALPHA;
1924 #endif
1926 #if defined __i386__
1928 #include <uapi/asm/vm86.h>
1930 static void adpt_i386_info(sysInfo_S* si)
1932 // This is all the info we need for now
1933 // We will add more info as our new
1934 // managmenent utility requires it
1935 switch (boot_cpu_data.x86) {
1936 case CPU_386:
1937 si->processorType = PROC_386;
1938 break;
1939 case CPU_486:
1940 si->processorType = PROC_486;
1941 break;
1942 case CPU_586:
1943 si->processorType = PROC_PENTIUM;
1944 break;
1945 default: // Just in case
1946 si->processorType = PROC_PENTIUM;
1947 break;
1950 #endif
1953 * This routine returns information about the system. This does not effect
1954 * any logic and if the info is wrong - it doesn't matter.
1957 /* Get all the info we can not get from kernel services */
1958 static int adpt_system_info(void __user *buffer)
1960 sysInfo_S si;
1962 memset(&si, 0, sizeof(si));
1964 si.osType = OS_LINUX;
1965 si.osMajorVersion = 0;
1966 si.osMinorVersion = 0;
1967 si.osRevision = 0;
1968 si.busType = SI_PCI_BUS;
1969 si.processorFamily = DPTI_sig.dsProcessorFamily;
1971 #if defined __i386__
1972 adpt_i386_info(&si);
1973 #elif defined (__ia64__)
1974 adpt_ia64_info(&si);
1975 #elif defined(__sparc__)
1976 adpt_sparc_info(&si);
1977 #elif defined (__alpha__)
1978 adpt_alpha_info(&si);
1979 #else
1980 si.processorType = 0xff ;
1981 #endif
1982 if (copy_to_user(buffer, &si, sizeof(si))){
1983 printk(KERN_WARNING"dpti: Could not copy buffer TO user\n");
1984 return -EFAULT;
1987 return 0;
1990 static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
1992 int minor;
1993 int error = 0;
1994 adpt_hba* pHba;
1995 ulong flags = 0;
1996 void __user *argp = (void __user *)arg;
1998 minor = iminor(inode);
1999 if (minor >= DPTI_MAX_HBA){
2000 return -ENXIO;
2002 mutex_lock(&adpt_configuration_lock);
2003 for (pHba = hba_chain; pHba; pHba = pHba->next) {
2004 if (pHba->unit == minor) {
2005 break; /* found adapter */
2008 mutex_unlock(&adpt_configuration_lock);
2009 if(pHba == NULL){
2010 return -ENXIO;
2013 while((volatile u32) pHba->state & DPTI_STATE_RESET )
2014 schedule_timeout_uninterruptible(2);
2016 switch (cmd) {
2017 // TODO: handle 3 cases
2018 case DPT_SIGNATURE:
2019 if (copy_to_user(argp, &DPTI_sig, sizeof(DPTI_sig))) {
2020 return -EFAULT;
2022 break;
2023 case I2OUSRCMD:
2024 return adpt_i2o_passthru(pHba, argp);
2026 case DPT_CTRLINFO:{
2027 drvrHBAinfo_S HbaInfo;
2029 #define FLG_OSD_PCI_VALID 0x0001
2030 #define FLG_OSD_DMA 0x0002
2031 #define FLG_OSD_I2O 0x0004
2032 memset(&HbaInfo, 0, sizeof(HbaInfo));
2033 HbaInfo.drvrHBAnum = pHba->unit;
2034 HbaInfo.baseAddr = (ulong) pHba->base_addr_phys;
2035 HbaInfo.blinkState = adpt_read_blink_led(pHba);
2036 HbaInfo.pciBusNum = pHba->pDev->bus->number;
2037 HbaInfo.pciDeviceNum=PCI_SLOT(pHba->pDev->devfn);
2038 HbaInfo.Interrupt = pHba->pDev->irq;
2039 HbaInfo.hbaFlags = FLG_OSD_PCI_VALID | FLG_OSD_DMA | FLG_OSD_I2O;
2040 if(copy_to_user(argp, &HbaInfo, sizeof(HbaInfo))){
2041 printk(KERN_WARNING"%s: Could not copy HbaInfo TO user\n",pHba->name);
2042 return -EFAULT;
2044 break;
2046 case DPT_SYSINFO:
2047 return adpt_system_info(argp);
2048 case DPT_BLINKLED:{
2049 u32 value;
2050 value = (u32)adpt_read_blink_led(pHba);
2051 if (copy_to_user(argp, &value, sizeof(value))) {
2052 return -EFAULT;
2054 break;
2056 case I2ORESETCMD:
2057 if(pHba->host)
2058 spin_lock_irqsave(pHba->host->host_lock, flags);
2059 adpt_hba_reset(pHba);
2060 if(pHba->host)
2061 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2062 break;
2063 case I2ORESCANCMD:
2064 adpt_rescan(pHba);
2065 break;
2066 default:
2067 return -EINVAL;
2070 return error;
2073 static long adpt_unlocked_ioctl(struct file *file, uint cmd, ulong arg)
2075 struct inode *inode;
2076 long ret;
2078 inode = file_inode(file);
2080 mutex_lock(&adpt_mutex);
2081 ret = adpt_ioctl(inode, file, cmd, arg);
2082 mutex_unlock(&adpt_mutex);
2084 return ret;
2087 #ifdef CONFIG_COMPAT
2088 static long compat_adpt_ioctl(struct file *file,
2089 unsigned int cmd, unsigned long arg)
2091 struct inode *inode;
2092 long ret;
2094 inode = file_inode(file);
2096 mutex_lock(&adpt_mutex);
2098 switch(cmd) {
2099 case DPT_SIGNATURE:
2100 case I2OUSRCMD:
2101 case DPT_CTRLINFO:
2102 case DPT_SYSINFO:
2103 case DPT_BLINKLED:
2104 case I2ORESETCMD:
2105 case I2ORESCANCMD:
2106 case (DPT_TARGET_BUSY & 0xFFFF):
2107 case DPT_TARGET_BUSY:
2108 ret = adpt_ioctl(inode, file, cmd, arg);
2109 break;
2110 default:
2111 ret = -ENOIOCTLCMD;
2114 mutex_unlock(&adpt_mutex);
2116 return ret;
2118 #endif
2120 static irqreturn_t adpt_isr(int irq, void *dev_id)
2122 struct scsi_cmnd* cmd;
2123 adpt_hba* pHba = dev_id;
2124 u32 m;
2125 void __iomem *reply;
2126 u32 status=0;
2127 u32 context;
2128 ulong flags = 0;
2129 int handled = 0;
2131 if (pHba == NULL){
2132 printk(KERN_WARNING"adpt_isr: NULL dev_id\n");
2133 return IRQ_NONE;
2135 if(pHba->host)
2136 spin_lock_irqsave(pHba->host->host_lock, flags);
2138 while( readl(pHba->irq_mask) & I2O_INTERRUPT_PENDING_B) {
2139 m = readl(pHba->reply_port);
2140 if(m == EMPTY_QUEUE){
2141 // Try twice then give up
2142 rmb();
2143 m = readl(pHba->reply_port);
2144 if(m == EMPTY_QUEUE){
2145 // This really should not happen
2146 printk(KERN_ERR"dpti: Could not get reply frame\n");
2147 goto out;
2150 if (pHba->reply_pool_pa <= m &&
2151 m < pHba->reply_pool_pa +
2152 (pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4)) {
2153 reply = (u8 *)pHba->reply_pool +
2154 (m - pHba->reply_pool_pa);
2155 } else {
2156 /* Ick, we should *never* be here */
2157 printk(KERN_ERR "dpti: reply frame not from pool\n");
2158 reply = (u8 *)bus_to_virt(m);
2161 if (readl(reply) & MSG_FAIL) {
2162 u32 old_m = readl(reply+28);
2163 void __iomem *msg;
2164 u32 old_context;
2165 PDEBUG("%s: Failed message\n",pHba->name);
2166 if(old_m >= 0x100000){
2167 printk(KERN_ERR"%s: Bad preserved MFA (%x)- dropping frame\n",pHba->name,old_m);
2168 writel(m,pHba->reply_port);
2169 continue;
2171 // Transaction context is 0 in failed reply frame
2172 msg = pHba->msg_addr_virt + old_m;
2173 old_context = readl(msg+12);
2174 writel(old_context, reply+12);
2175 adpt_send_nop(pHba, old_m);
2177 context = readl(reply+8);
2178 if(context & 0x40000000){ // IOCTL
2179 void *p = adpt_ioctl_from_context(pHba, readl(reply+12));
2180 if( p != NULL) {
2181 memcpy_fromio(p, reply, REPLY_FRAME_SIZE * 4);
2183 // All IOCTLs will also be post wait
2185 if(context & 0x80000000){ // Post wait message
2186 status = readl(reply+16);
2187 if(status >> 24){
2188 status &= 0xffff; /* Get detail status */
2189 } else {
2190 status = I2O_POST_WAIT_OK;
2192 if(!(context & 0x40000000)) {
2193 cmd = adpt_cmd_from_context(pHba,
2194 readl(reply+12));
2195 if(cmd != NULL) {
2196 printk(KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->name, cmd, context);
2199 adpt_i2o_post_wait_complete(context, status);
2200 } else { // SCSI message
2201 cmd = adpt_cmd_from_context (pHba, readl(reply+12));
2202 if(cmd != NULL){
2203 scsi_dma_unmap(cmd);
2204 if(cmd->serial_number != 0) { // If not timedout
2205 adpt_i2o_to_scsi(reply, cmd);
2209 writel(m, pHba->reply_port);
2210 wmb();
2211 rmb();
2213 handled = 1;
2214 out: if(pHba->host)
2215 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2216 return IRQ_RETVAL(handled);
2219 static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_device* d)
2221 int i;
2222 u32 msg[MAX_MESSAGE_SIZE];
2223 u32* mptr;
2224 u32* lptr;
2225 u32 *lenptr;
2226 int direction;
2227 int scsidir;
2228 int nseg;
2229 u32 len;
2230 u32 reqlen;
2231 s32 rcode;
2232 dma_addr_t addr;
2234 memset(msg, 0 , sizeof(msg));
2235 len = scsi_bufflen(cmd);
2236 direction = 0x00000000;
2238 scsidir = 0x00000000; // DATA NO XFER
2239 if(len) {
2241 * Set SCBFlags to indicate if data is being transferred
2242 * in or out, or no data transfer
2243 * Note: Do not have to verify index is less than 0 since
2244 * cmd->cmnd[0] is an unsigned char
2246 switch(cmd->sc_data_direction){
2247 case DMA_FROM_DEVICE:
2248 scsidir =0x40000000; // DATA IN (iop<--dev)
2249 break;
2250 case DMA_TO_DEVICE:
2251 direction=0x04000000; // SGL OUT
2252 scsidir =0x80000000; // DATA OUT (iop-->dev)
2253 break;
2254 case DMA_NONE:
2255 break;
2256 case DMA_BIDIRECTIONAL:
2257 scsidir =0x40000000; // DATA IN (iop<--dev)
2258 // Assume In - and continue;
2259 break;
2260 default:
2261 printk(KERN_WARNING"%s: scsi opcode 0x%x not supported.\n",
2262 pHba->name, cmd->cmnd[0]);
2263 cmd->result = (DID_OK <<16) | (INITIATOR_ERROR << 8);
2264 cmd->scsi_done(cmd);
2265 return 0;
2268 // msg[0] is set later
2269 // I2O_CMD_SCSI_EXEC
2270 msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid);
2271 msg[2] = 0;
2272 msg[3] = adpt_cmd_to_context(cmd); /* Want SCSI control block back */
2273 // Our cards use the transaction context as the tag for queueing
2274 // Adaptec/DPT Private stuff
2275 msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16);
2276 msg[5] = d->tid;
2277 /* Direction, disconnect ok | sense data | simple queue , CDBLen */
2278 // I2O_SCB_FLAG_ENABLE_DISCONNECT |
2279 // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
2280 // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
2281 msg[6] = scsidir|0x20a00000|cmd->cmd_len;
2283 mptr=msg+7;
2285 // Write SCSI command into the message - always 16 byte block
2286 memset(mptr, 0, 16);
2287 memcpy(mptr, cmd->cmnd, cmd->cmd_len);
2288 mptr+=4;
2289 lenptr=mptr++; /* Remember me - fill in when we know */
2290 if (dpt_dma64(pHba)) {
2291 reqlen = 16; // SINGLE SGE
2292 *mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
2293 *mptr++ = 1 << PAGE_SHIFT;
2294 } else {
2295 reqlen = 14; // SINGLE SGE
2297 /* Now fill in the SGList and command */
2299 nseg = scsi_dma_map(cmd);
2300 BUG_ON(nseg < 0);
2301 if (nseg) {
2302 struct scatterlist *sg;
2304 len = 0;
2305 scsi_for_each_sg(cmd, sg, nseg, i) {
2306 lptr = mptr;
2307 *mptr++ = direction|0x10000000|sg_dma_len(sg);
2308 len+=sg_dma_len(sg);
2309 addr = sg_dma_address(sg);
2310 *mptr++ = dma_low(addr);
2311 if (dpt_dma64(pHba))
2312 *mptr++ = dma_high(addr);
2313 /* Make this an end of list */
2314 if (i == nseg - 1)
2315 *lptr = direction|0xD0000000|sg_dma_len(sg);
2317 reqlen = mptr - msg;
2318 *lenptr = len;
2320 if(cmd->underflow && len != cmd->underflow){
2321 printk(KERN_WARNING"Cmd len %08X Cmd underflow %08X\n",
2322 len, cmd->underflow);
2324 } else {
2325 *lenptr = len = 0;
2326 reqlen = 12;
2329 /* Stick the headers on */
2330 msg[0] = reqlen<<16 | ((reqlen > 12) ? SGL_OFFSET_12 : SGL_OFFSET_0);
2332 // Send it on it's way
2333 rcode = adpt_i2o_post_this(pHba, msg, reqlen<<2);
2334 if (rcode == 0) {
2335 return 0;
2337 return rcode;
2341 static s32 adpt_scsi_host_alloc(adpt_hba* pHba, struct scsi_host_template *sht)
2343 struct Scsi_Host *host;
2345 host = scsi_host_alloc(sht, sizeof(adpt_hba*));
2346 if (host == NULL) {
2347 printk("%s: scsi_host_alloc returned NULL\n", pHba->name);
2348 return -1;
2350 host->hostdata[0] = (unsigned long)pHba;
2351 pHba->host = host;
2353 host->irq = pHba->pDev->irq;
2354 /* no IO ports, so don't have to set host->io_port and
2355 * host->n_io_port
2357 host->io_port = 0;
2358 host->n_io_port = 0;
2359 /* see comments in scsi_host.h */
2360 host->max_id = 16;
2361 host->max_lun = 256;
2362 host->max_channel = pHba->top_scsi_channel + 1;
2363 host->cmd_per_lun = 1;
2364 host->unique_id = (u32)sys_tbl_pa + pHba->unit;
2365 host->sg_tablesize = pHba->sg_tablesize;
2366 host->can_queue = pHba->post_fifo_size;
2367 host->use_cmd_list = 1;
2369 return 0;
2373 static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd)
2375 adpt_hba* pHba;
2376 u32 hba_status;
2377 u32 dev_status;
2378 u32 reply_flags = readl(reply) & 0xff00; // Leave it shifted up 8 bits
2379 // I know this would look cleaner if I just read bytes
2380 // but the model I have been using for all the rest of the
2381 // io is in 4 byte words - so I keep that model
2382 u16 detailed_status = readl(reply+16) &0xffff;
2383 dev_status = (detailed_status & 0xff);
2384 hba_status = detailed_status >> 8;
2386 // calculate resid for sg
2387 scsi_set_resid(cmd, scsi_bufflen(cmd) - readl(reply+20));
2389 pHba = (adpt_hba*) cmd->device->host->hostdata[0];
2391 cmd->sense_buffer[0] = '\0'; // initialize sense valid flag to false
2393 if(!(reply_flags & MSG_FAIL)) {
2394 switch(detailed_status & I2O_SCSI_DSC_MASK) {
2395 case I2O_SCSI_DSC_SUCCESS:
2396 cmd->result = (DID_OK << 16);
2397 // handle underflow
2398 if (readl(reply+20) < cmd->underflow) {
2399 cmd->result = (DID_ERROR <<16);
2400 printk(KERN_WARNING"%s: SCSI CMD underflow\n",pHba->name);
2402 break;
2403 case I2O_SCSI_DSC_REQUEST_ABORTED:
2404 cmd->result = (DID_ABORT << 16);
2405 break;
2406 case I2O_SCSI_DSC_PATH_INVALID:
2407 case I2O_SCSI_DSC_DEVICE_NOT_PRESENT:
2408 case I2O_SCSI_DSC_SELECTION_TIMEOUT:
2409 case I2O_SCSI_DSC_COMMAND_TIMEOUT:
2410 case I2O_SCSI_DSC_NO_ADAPTER:
2411 case I2O_SCSI_DSC_RESOURCE_UNAVAILABLE:
2412 printk(KERN_WARNING"%s: SCSI Timeout-Device (%d,%d,%llu) hba status=0x%x, dev status=0x%x, cmd=0x%x\n",
2413 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun, hba_status, dev_status, cmd->cmnd[0]);
2414 cmd->result = (DID_TIME_OUT << 16);
2415 break;
2416 case I2O_SCSI_DSC_ADAPTER_BUSY:
2417 case I2O_SCSI_DSC_BUS_BUSY:
2418 cmd->result = (DID_BUS_BUSY << 16);
2419 break;
2420 case I2O_SCSI_DSC_SCSI_BUS_RESET:
2421 case I2O_SCSI_DSC_BDR_MESSAGE_SENT:
2422 cmd->result = (DID_RESET << 16);
2423 break;
2424 case I2O_SCSI_DSC_PARITY_ERROR_FAILURE:
2425 printk(KERN_WARNING"%s: SCSI CMD parity error\n",pHba->name);
2426 cmd->result = (DID_PARITY << 16);
2427 break;
2428 case I2O_SCSI_DSC_UNABLE_TO_ABORT:
2429 case I2O_SCSI_DSC_COMPLETE_WITH_ERROR:
2430 case I2O_SCSI_DSC_UNABLE_TO_TERMINATE:
2431 case I2O_SCSI_DSC_MR_MESSAGE_RECEIVED:
2432 case I2O_SCSI_DSC_AUTOSENSE_FAILED:
2433 case I2O_SCSI_DSC_DATA_OVERRUN:
2434 case I2O_SCSI_DSC_UNEXPECTED_BUS_FREE:
2435 case I2O_SCSI_DSC_SEQUENCE_FAILURE:
2436 case I2O_SCSI_DSC_REQUEST_LENGTH_ERROR:
2437 case I2O_SCSI_DSC_PROVIDE_FAILURE:
2438 case I2O_SCSI_DSC_REQUEST_TERMINATED:
2439 case I2O_SCSI_DSC_IDE_MESSAGE_SENT:
2440 case I2O_SCSI_DSC_UNACKNOWLEDGED_EVENT:
2441 case I2O_SCSI_DSC_MESSAGE_RECEIVED:
2442 case I2O_SCSI_DSC_INVALID_CDB:
2443 case I2O_SCSI_DSC_LUN_INVALID:
2444 case I2O_SCSI_DSC_SCSI_TID_INVALID:
2445 case I2O_SCSI_DSC_FUNCTION_UNAVAILABLE:
2446 case I2O_SCSI_DSC_NO_NEXUS:
2447 case I2O_SCSI_DSC_CDB_RECEIVED:
2448 case I2O_SCSI_DSC_LUN_ALREADY_ENABLED:
2449 case I2O_SCSI_DSC_QUEUE_FROZEN:
2450 case I2O_SCSI_DSC_REQUEST_INVALID:
2451 default:
2452 printk(KERN_WARNING"%s: SCSI error %0x-Device(%d,%d,%llu) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2453 pHba->name, detailed_status & I2O_SCSI_DSC_MASK, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
2454 hba_status, dev_status, cmd->cmnd[0]);
2455 cmd->result = (DID_ERROR << 16);
2456 break;
2459 // copy over the request sense data if it was a check
2460 // condition status
2461 if (dev_status == SAM_STAT_CHECK_CONDITION) {
2462 u32 len = min(SCSI_SENSE_BUFFERSIZE, 40);
2463 // Copy over the sense data
2464 memcpy_fromio(cmd->sense_buffer, (reply+28) , len);
2465 if(cmd->sense_buffer[0] == 0x70 /* class 7 */ &&
2466 cmd->sense_buffer[2] == DATA_PROTECT ){
2467 /* This is to handle an array failed */
2468 cmd->result = (DID_TIME_OUT << 16);
2469 printk(KERN_WARNING"%s: SCSI Data Protect-Device (%d,%d,%llu) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2470 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
2471 hba_status, dev_status, cmd->cmnd[0]);
2475 } else {
2476 /* In this condtion we could not talk to the tid
2477 * the card rejected it. We should signal a retry
2478 * for a limitted number of retries.
2480 cmd->result = (DID_TIME_OUT << 16);
2481 printk(KERN_WARNING"%s: I2O MSG_FAIL - Device (%d,%d,%llu) tid=%d, cmd=0x%x\n",
2482 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
2483 ((struct adpt_device*)(cmd->device->hostdata))->tid, cmd->cmnd[0]);
2486 cmd->result |= (dev_status);
2488 if(cmd->scsi_done != NULL){
2489 cmd->scsi_done(cmd);
2491 return cmd->result;
2495 static s32 adpt_rescan(adpt_hba* pHba)
2497 s32 rcode;
2498 ulong flags = 0;
2500 if(pHba->host)
2501 spin_lock_irqsave(pHba->host->host_lock, flags);
2502 if ((rcode=adpt_i2o_lct_get(pHba)) < 0)
2503 goto out;
2504 if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0)
2505 goto out;
2506 rcode = 0;
2507 out: if(pHba->host)
2508 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2509 return rcode;
2513 static s32 adpt_i2o_reparse_lct(adpt_hba* pHba)
2515 int i;
2516 int max;
2517 int tid;
2518 struct i2o_device *d;
2519 i2o_lct *lct = pHba->lct;
2520 u8 bus_no = 0;
2521 s16 scsi_id;
2522 u64 scsi_lun;
2523 u32 buf[10]; // at least 8 u32's
2524 struct adpt_device* pDev = NULL;
2525 struct i2o_device* pI2o_dev = NULL;
2527 if (lct == NULL) {
2528 printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
2529 return -1;
2532 max = lct->table_size;
2533 max -= 3;
2534 max /= 9;
2536 // Mark each drive as unscanned
2537 for (d = pHba->devices; d; d = d->next) {
2538 pDev =(struct adpt_device*) d->owner;
2539 if(!pDev){
2540 continue;
2542 pDev->state |= DPTI_DEV_UNSCANNED;
2545 printk(KERN_INFO "%s: LCT has %d entries.\n", pHba->name,max);
2547 for(i=0;i<max;i++) {
2548 if( lct->lct_entry[i].user_tid != 0xfff){
2549 continue;
2552 if( lct->lct_entry[i].class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
2553 lct->lct_entry[i].class_id == I2O_CLASS_SCSI_PERIPHERAL ||
2554 lct->lct_entry[i].class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
2555 tid = lct->lct_entry[i].tid;
2556 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
2557 printk(KERN_ERR"%s: Could not query device\n",pHba->name);
2558 continue;
2560 bus_no = buf[0]>>16;
2561 if (bus_no >= MAX_CHANNEL) { /* Something wrong skip it */
2562 printk(KERN_WARNING
2563 "%s: Channel number %d out of range\n",
2564 pHba->name, bus_no);
2565 continue;
2568 scsi_id = buf[1];
2569 scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
2570 pDev = pHba->channel[bus_no].device[scsi_id];
2571 /* da lun */
2572 while(pDev) {
2573 if(pDev->scsi_lun == scsi_lun) {
2574 break;
2576 pDev = pDev->next_lun;
2578 if(!pDev ) { // Something new add it
2579 d = kmalloc(sizeof(struct i2o_device),
2580 GFP_ATOMIC);
2581 if(d==NULL)
2583 printk(KERN_CRIT "Out of memory for I2O device data.\n");
2584 return -ENOMEM;
2587 d->controller = pHba;
2588 d->next = NULL;
2590 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2592 d->flags = 0;
2593 adpt_i2o_report_hba_unit(pHba, d);
2594 adpt_i2o_install_device(pHba, d);
2596 pDev = pHba->channel[bus_no].device[scsi_id];
2597 if( pDev == NULL){
2598 pDev =
2599 kzalloc(sizeof(struct adpt_device),
2600 GFP_ATOMIC);
2601 if(pDev == NULL) {
2602 return -ENOMEM;
2604 pHba->channel[bus_no].device[scsi_id] = pDev;
2605 } else {
2606 while (pDev->next_lun) {
2607 pDev = pDev->next_lun;
2609 pDev = pDev->next_lun =
2610 kzalloc(sizeof(struct adpt_device),
2611 GFP_ATOMIC);
2612 if(pDev == NULL) {
2613 return -ENOMEM;
2616 pDev->tid = d->lct_data.tid;
2617 pDev->scsi_channel = bus_no;
2618 pDev->scsi_id = scsi_id;
2619 pDev->scsi_lun = scsi_lun;
2620 pDev->pI2o_dev = d;
2621 d->owner = pDev;
2622 pDev->type = (buf[0])&0xff;
2623 pDev->flags = (buf[0]>>8)&0xff;
2624 // Too late, SCSI system has made up it's mind, but what the hey ...
2625 if(scsi_id > pHba->top_scsi_id){
2626 pHba->top_scsi_id = scsi_id;
2628 if(scsi_lun > pHba->top_scsi_lun){
2629 pHba->top_scsi_lun = scsi_lun;
2631 continue;
2632 } // end of new i2o device
2634 // We found an old device - check it
2635 while(pDev) {
2636 if(pDev->scsi_lun == scsi_lun) {
2637 if(!scsi_device_online(pDev->pScsi_dev)) {
2638 printk(KERN_WARNING"%s: Setting device (%d,%d,%llu) back online\n",
2639 pHba->name,bus_no,scsi_id,scsi_lun);
2640 if (pDev->pScsi_dev) {
2641 scsi_device_set_state(pDev->pScsi_dev, SDEV_RUNNING);
2644 d = pDev->pI2o_dev;
2645 if(d->lct_data.tid != tid) { // something changed
2646 pDev->tid = tid;
2647 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2648 if (pDev->pScsi_dev) {
2649 pDev->pScsi_dev->changed = TRUE;
2650 pDev->pScsi_dev->removable = TRUE;
2653 // Found it - mark it scanned
2654 pDev->state = DPTI_DEV_ONLINE;
2655 break;
2657 pDev = pDev->next_lun;
2661 for (pI2o_dev = pHba->devices; pI2o_dev; pI2o_dev = pI2o_dev->next) {
2662 pDev =(struct adpt_device*) pI2o_dev->owner;
2663 if(!pDev){
2664 continue;
2666 // Drive offline drives that previously existed but could not be found
2667 // in the LCT table
2668 if (pDev->state & DPTI_DEV_UNSCANNED){
2669 pDev->state = DPTI_DEV_OFFLINE;
2670 printk(KERN_WARNING"%s: Device (%d,%d,%llu) offline\n",pHba->name,pDev->scsi_channel,pDev->scsi_id,pDev->scsi_lun);
2671 if (pDev->pScsi_dev) {
2672 scsi_device_set_state(pDev->pScsi_dev, SDEV_OFFLINE);
2676 return 0;
2679 static void adpt_fail_posted_scbs(adpt_hba* pHba)
2681 struct scsi_cmnd* cmd = NULL;
2682 struct scsi_device* d = NULL;
2684 shost_for_each_device(d, pHba->host) {
2685 unsigned long flags;
2686 spin_lock_irqsave(&d->list_lock, flags);
2687 list_for_each_entry(cmd, &d->cmd_list, list) {
2688 if(cmd->serial_number == 0){
2689 continue;
2691 cmd->result = (DID_OK << 16) | (QUEUE_FULL <<1);
2692 cmd->scsi_done(cmd);
2694 spin_unlock_irqrestore(&d->list_lock, flags);
2699 /*============================================================================
2700 * Routines from i2o subsystem
2701 *============================================================================
2707 * Bring an I2O controller into HOLD state. See the spec.
2709 static int adpt_i2o_activate_hba(adpt_hba* pHba)
2711 int rcode;
2713 if(pHba->initialized ) {
2714 if (adpt_i2o_status_get(pHba) < 0) {
2715 if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2716 printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2717 return rcode;
2719 if (adpt_i2o_status_get(pHba) < 0) {
2720 printk(KERN_INFO "HBA not responding.\n");
2721 return -1;
2725 if(pHba->status_block->iop_state == ADAPTER_STATE_FAULTED) {
2726 printk(KERN_CRIT "%s: hardware fault\n", pHba->name);
2727 return -1;
2730 if (pHba->status_block->iop_state == ADAPTER_STATE_READY ||
2731 pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL ||
2732 pHba->status_block->iop_state == ADAPTER_STATE_HOLD ||
2733 pHba->status_block->iop_state == ADAPTER_STATE_FAILED) {
2734 adpt_i2o_reset_hba(pHba);
2735 if (adpt_i2o_status_get(pHba) < 0 || pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
2736 printk(KERN_ERR "%s: Failed to initialize.\n", pHba->name);
2737 return -1;
2740 } else {
2741 if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2742 printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2743 return rcode;
2748 if (adpt_i2o_init_outbound_q(pHba) < 0) {
2749 return -1;
2752 /* In HOLD state */
2754 if (adpt_i2o_hrt_get(pHba) < 0) {
2755 return -1;
2758 return 0;
2762 * Bring a controller online into OPERATIONAL state.
2765 static int adpt_i2o_online_hba(adpt_hba* pHba)
2767 if (adpt_i2o_systab_send(pHba) < 0) {
2768 adpt_i2o_delete_hba(pHba);
2769 return -1;
2771 /* In READY state */
2773 if (adpt_i2o_enable_hba(pHba) < 0) {
2774 adpt_i2o_delete_hba(pHba);
2775 return -1;
2778 /* In OPERATIONAL state */
2779 return 0;
2782 static s32 adpt_send_nop(adpt_hba*pHba,u32 m)
2784 u32 __iomem *msg;
2785 ulong timeout = jiffies + 5*HZ;
2787 while(m == EMPTY_QUEUE){
2788 rmb();
2789 m = readl(pHba->post_port);
2790 if(m != EMPTY_QUEUE){
2791 break;
2793 if(time_after(jiffies,timeout)){
2794 printk(KERN_ERR "%s: Timeout waiting for message frame!\n",pHba->name);
2795 return 2;
2797 schedule_timeout_uninterruptible(1);
2799 msg = (u32 __iomem *)(pHba->msg_addr_virt + m);
2800 writel( THREE_WORD_MSG_SIZE | SGL_OFFSET_0,&msg[0]);
2801 writel( I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0,&msg[1]);
2802 writel( 0,&msg[2]);
2803 wmb();
2805 writel(m, pHba->post_port);
2806 wmb();
2807 return 0;
2810 static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
2812 u8 *status;
2813 dma_addr_t addr;
2814 u32 __iomem *msg = NULL;
2815 int i;
2816 ulong timeout = jiffies + TMOUT_INITOUTBOUND*HZ;
2817 u32 m;
2819 do {
2820 rmb();
2821 m = readl(pHba->post_port);
2822 if (m != EMPTY_QUEUE) {
2823 break;
2826 if(time_after(jiffies,timeout)){
2827 printk(KERN_WARNING"%s: Timeout waiting for message frame\n",pHba->name);
2828 return -ETIMEDOUT;
2830 schedule_timeout_uninterruptible(1);
2831 } while(m == EMPTY_QUEUE);
2833 msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2835 status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
2836 if (!status) {
2837 adpt_send_nop(pHba, m);
2838 printk(KERN_WARNING"%s: IOP reset failed - no free memory.\n",
2839 pHba->name);
2840 return -ENOMEM;
2842 memset(status, 0, 4);
2844 writel(EIGHT_WORD_MSG_SIZE| SGL_OFFSET_6, &msg[0]);
2845 writel(I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID, &msg[1]);
2846 writel(0, &msg[2]);
2847 writel(0x0106, &msg[3]); /* Transaction context */
2848 writel(4096, &msg[4]); /* Host page frame size */
2849 writel((REPLY_FRAME_SIZE)<<16|0x80, &msg[5]); /* Outbound msg frame size and Initcode */
2850 writel(0xD0000004, &msg[6]); /* Simple SG LE, EOB */
2851 writel((u32)addr, &msg[7]);
2853 writel(m, pHba->post_port);
2854 wmb();
2856 // Wait for the reply status to come back
2857 do {
2858 if (*status) {
2859 if (*status != 0x01 /*I2O_EXEC_OUTBOUND_INIT_IN_PROGRESS*/) {
2860 break;
2863 rmb();
2864 if(time_after(jiffies,timeout)){
2865 printk(KERN_WARNING"%s: Timeout Initializing\n",pHba->name);
2866 /* We lose 4 bytes of "status" here, but we
2867 cannot free these because controller may
2868 awake and corrupt those bytes at any time */
2869 /* dma_free_coherent(&pHba->pDev->dev, 4, status, addr); */
2870 return -ETIMEDOUT;
2872 schedule_timeout_uninterruptible(1);
2873 } while (1);
2875 // If the command was successful, fill the fifo with our reply
2876 // message packets
2877 if(*status != 0x04 /*I2O_EXEC_OUTBOUND_INIT_COMPLETE*/) {
2878 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
2879 return -2;
2881 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
2883 if(pHba->reply_pool != NULL) {
2884 dma_free_coherent(&pHba->pDev->dev,
2885 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2886 pHba->reply_pool, pHba->reply_pool_pa);
2889 pHba->reply_pool = dma_alloc_coherent(&pHba->pDev->dev,
2890 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2891 &pHba->reply_pool_pa, GFP_KERNEL);
2892 if (!pHba->reply_pool) {
2893 printk(KERN_ERR "%s: Could not allocate reply pool\n", pHba->name);
2894 return -ENOMEM;
2896 memset(pHba->reply_pool, 0 , pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4);
2898 for(i = 0; i < pHba->reply_fifo_size; i++) {
2899 writel(pHba->reply_pool_pa + (i * REPLY_FRAME_SIZE * 4),
2900 pHba->reply_port);
2901 wmb();
2903 adpt_i2o_status_get(pHba);
2904 return 0;
2909 * I2O System Table. Contains information about
2910 * all the IOPs in the system. Used to inform IOPs
2911 * about each other's existence.
2913 * sys_tbl_ver is the CurrentChangeIndicator that is
2914 * used by IOPs to track changes.
2919 static s32 adpt_i2o_status_get(adpt_hba* pHba)
2921 ulong timeout;
2922 u32 m;
2923 u32 __iomem *msg;
2924 u8 *status_block=NULL;
2926 if(pHba->status_block == NULL) {
2927 pHba->status_block = dma_alloc_coherent(&pHba->pDev->dev,
2928 sizeof(i2o_status_block),
2929 &pHba->status_block_pa, GFP_KERNEL);
2930 if(pHba->status_block == NULL) {
2931 printk(KERN_ERR
2932 "dpti%d: Get Status Block failed; Out of memory. \n",
2933 pHba->unit);
2934 return -ENOMEM;
2937 memset(pHba->status_block, 0, sizeof(i2o_status_block));
2938 status_block = (u8*)(pHba->status_block);
2939 timeout = jiffies+TMOUT_GETSTATUS*HZ;
2940 do {
2941 rmb();
2942 m = readl(pHba->post_port);
2943 if (m != EMPTY_QUEUE) {
2944 break;
2946 if(time_after(jiffies,timeout)){
2947 printk(KERN_ERR "%s: Timeout waiting for message !\n",
2948 pHba->name);
2949 return -ETIMEDOUT;
2951 schedule_timeout_uninterruptible(1);
2952 } while(m==EMPTY_QUEUE);
2955 msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2957 writel(NINE_WORD_MSG_SIZE|SGL_OFFSET_0, &msg[0]);
2958 writel(I2O_CMD_STATUS_GET<<24|HOST_TID<<12|ADAPTER_TID, &msg[1]);
2959 writel(1, &msg[2]);
2960 writel(0, &msg[3]);
2961 writel(0, &msg[4]);
2962 writel(0, &msg[5]);
2963 writel( dma_low(pHba->status_block_pa), &msg[6]);
2964 writel( dma_high(pHba->status_block_pa), &msg[7]);
2965 writel(sizeof(i2o_status_block), &msg[8]); // 88 bytes
2967 //post message
2968 writel(m, pHba->post_port);
2969 wmb();
2971 while(status_block[87]!=0xff){
2972 if(time_after(jiffies,timeout)){
2973 printk(KERN_ERR"dpti%d: Get status timeout.\n",
2974 pHba->unit);
2975 return -ETIMEDOUT;
2977 rmb();
2978 schedule_timeout_uninterruptible(1);
2981 // Set up our number of outbound and inbound messages
2982 pHba->post_fifo_size = pHba->status_block->max_inbound_frames;
2983 if (pHba->post_fifo_size > MAX_TO_IOP_MESSAGES) {
2984 pHba->post_fifo_size = MAX_TO_IOP_MESSAGES;
2987 pHba->reply_fifo_size = pHba->status_block->max_outbound_frames;
2988 if (pHba->reply_fifo_size > MAX_FROM_IOP_MESSAGES) {
2989 pHba->reply_fifo_size = MAX_FROM_IOP_MESSAGES;
2992 // Calculate the Scatter Gather list size
2993 if (dpt_dma64(pHba)) {
2994 pHba->sg_tablesize
2995 = ((pHba->status_block->inbound_frame_size * 4
2996 - 14 * sizeof(u32))
2997 / (sizeof(struct sg_simple_element) + sizeof(u32)));
2998 } else {
2999 pHba->sg_tablesize
3000 = ((pHba->status_block->inbound_frame_size * 4
3001 - 12 * sizeof(u32))
3002 / sizeof(struct sg_simple_element));
3004 if (pHba->sg_tablesize > SG_LIST_ELEMENTS) {
3005 pHba->sg_tablesize = SG_LIST_ELEMENTS;
3009 #ifdef DEBUG
3010 printk("dpti%d: State = ",pHba->unit);
3011 switch(pHba->status_block->iop_state) {
3012 case 0x01:
3013 printk("INIT\n");
3014 break;
3015 case 0x02:
3016 printk("RESET\n");
3017 break;
3018 case 0x04:
3019 printk("HOLD\n");
3020 break;
3021 case 0x05:
3022 printk("READY\n");
3023 break;
3024 case 0x08:
3025 printk("OPERATIONAL\n");
3026 break;
3027 case 0x10:
3028 printk("FAILED\n");
3029 break;
3030 case 0x11:
3031 printk("FAULTED\n");
3032 break;
3033 default:
3034 printk("%x (unknown!!)\n",pHba->status_block->iop_state);
3036 #endif
3037 return 0;
3041 * Get the IOP's Logical Configuration Table
3043 static int adpt_i2o_lct_get(adpt_hba* pHba)
3045 u32 msg[8];
3046 int ret;
3047 u32 buf[16];
3049 if ((pHba->lct_size == 0) || (pHba->lct == NULL)){
3050 pHba->lct_size = pHba->status_block->expected_lct_size;
3052 do {
3053 if (pHba->lct == NULL) {
3054 pHba->lct = dma_alloc_coherent(&pHba->pDev->dev,
3055 pHba->lct_size, &pHba->lct_pa,
3056 GFP_ATOMIC);
3057 if(pHba->lct == NULL) {
3058 printk(KERN_CRIT "%s: Lct Get failed. Out of memory.\n",
3059 pHba->name);
3060 return -ENOMEM;
3063 memset(pHba->lct, 0, pHba->lct_size);
3065 msg[0] = EIGHT_WORD_MSG_SIZE|SGL_OFFSET_6;
3066 msg[1] = I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID;
3067 msg[2] = 0;
3068 msg[3] = 0;
3069 msg[4] = 0xFFFFFFFF; /* All devices */
3070 msg[5] = 0x00000000; /* Report now */
3071 msg[6] = 0xD0000000|pHba->lct_size;
3072 msg[7] = (u32)pHba->lct_pa;
3074 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 360))) {
3075 printk(KERN_ERR "%s: LCT Get failed (status=%#10x.\n",
3076 pHba->name, ret);
3077 printk(KERN_ERR"Adaptec: Error Reading Hardware.\n");
3078 return ret;
3081 if ((pHba->lct->table_size << 2) > pHba->lct_size) {
3082 pHba->lct_size = pHba->lct->table_size << 2;
3083 dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
3084 pHba->lct, pHba->lct_pa);
3085 pHba->lct = NULL;
3087 } while (pHba->lct == NULL);
3089 PDEBUG("%s: Hardware resource table read.\n", pHba->name);
3092 // I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO;
3093 if(adpt_i2o_query_scalar(pHba, 0 , 0x8000, -1, buf, sizeof(buf))>=0) {
3094 pHba->FwDebugBufferSize = buf[1];
3095 pHba->FwDebugBuffer_P = ioremap(pHba->base_addr_phys + buf[0],
3096 pHba->FwDebugBufferSize);
3097 if (pHba->FwDebugBuffer_P) {
3098 pHba->FwDebugFlags_P = pHba->FwDebugBuffer_P +
3099 FW_DEBUG_FLAGS_OFFSET;
3100 pHba->FwDebugBLEDvalue_P = pHba->FwDebugBuffer_P +
3101 FW_DEBUG_BLED_OFFSET;
3102 pHba->FwDebugBLEDflag_P = pHba->FwDebugBLEDvalue_P + 1;
3103 pHba->FwDebugStrLength_P = pHba->FwDebugBuffer_P +
3104 FW_DEBUG_STR_LENGTH_OFFSET;
3105 pHba->FwDebugBuffer_P += buf[2];
3106 pHba->FwDebugFlags = 0;
3110 return 0;
3113 static int adpt_i2o_build_sys_table(void)
3115 adpt_hba* pHba = hba_chain;
3116 int count = 0;
3118 if (sys_tbl)
3119 dma_free_coherent(&pHba->pDev->dev, sys_tbl_len,
3120 sys_tbl, sys_tbl_pa);
3122 sys_tbl_len = sizeof(struct i2o_sys_tbl) + // Header + IOPs
3123 (hba_count) * sizeof(struct i2o_sys_tbl_entry);
3125 sys_tbl = dma_alloc_coherent(&pHba->pDev->dev,
3126 sys_tbl_len, &sys_tbl_pa, GFP_KERNEL);
3127 if (!sys_tbl) {
3128 printk(KERN_WARNING "SysTab Set failed. Out of memory.\n");
3129 return -ENOMEM;
3131 memset(sys_tbl, 0, sys_tbl_len);
3133 sys_tbl->num_entries = hba_count;
3134 sys_tbl->version = I2OVERSION;
3135 sys_tbl->change_ind = sys_tbl_ind++;
3137 for(pHba = hba_chain; pHba; pHba = pHba->next) {
3138 u64 addr;
3139 // Get updated Status Block so we have the latest information
3140 if (adpt_i2o_status_get(pHba)) {
3141 sys_tbl->num_entries--;
3142 continue; // try next one
3145 sys_tbl->iops[count].org_id = pHba->status_block->org_id;
3146 sys_tbl->iops[count].iop_id = pHba->unit + 2;
3147 sys_tbl->iops[count].seg_num = 0;
3148 sys_tbl->iops[count].i2o_version = pHba->status_block->i2o_version;
3149 sys_tbl->iops[count].iop_state = pHba->status_block->iop_state;
3150 sys_tbl->iops[count].msg_type = pHba->status_block->msg_type;
3151 sys_tbl->iops[count].frame_size = pHba->status_block->inbound_frame_size;
3152 sys_tbl->iops[count].last_changed = sys_tbl_ind - 1; // ??
3153 sys_tbl->iops[count].iop_capabilities = pHba->status_block->iop_capabilities;
3154 addr = pHba->base_addr_phys + 0x40;
3155 sys_tbl->iops[count].inbound_low = dma_low(addr);
3156 sys_tbl->iops[count].inbound_high = dma_high(addr);
3158 count++;
3161 #ifdef DEBUG
3163 u32 *table = (u32*)sys_tbl;
3164 printk(KERN_DEBUG"sys_tbl_len=%d in 32bit words\n",(sys_tbl_len >>2));
3165 for(count = 0; count < (sys_tbl_len >>2); count++) {
3166 printk(KERN_INFO "sys_tbl[%d] = %0#10x\n",
3167 count, table[count]);
3170 #endif
3172 return 0;
3177 * Dump the information block associated with a given unit (TID)
3180 static void adpt_i2o_report_hba_unit(adpt_hba* pHba, struct i2o_device *d)
3182 char buf[64];
3183 int unit = d->lct_data.tid;
3185 printk(KERN_INFO "TID %3.3d ", unit);
3187 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 3, buf, 16)>=0)
3189 buf[16]=0;
3190 printk(" Vendor: %-12.12s", buf);
3192 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 4, buf, 16)>=0)
3194 buf[16]=0;
3195 printk(" Device: %-12.12s", buf);
3197 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 6, buf, 8)>=0)
3199 buf[8]=0;
3200 printk(" Rev: %-12.12s\n", buf);
3202 #ifdef DEBUG
3203 printk(KERN_INFO "\tClass: %.21s\n", adpt_i2o_get_class_name(d->lct_data.class_id));
3204 printk(KERN_INFO "\tSubclass: 0x%04X\n", d->lct_data.sub_class);
3205 printk(KERN_INFO "\tFlags: ");
3207 if(d->lct_data.device_flags&(1<<0))
3208 printk("C"); // ConfigDialog requested
3209 if(d->lct_data.device_flags&(1<<1))
3210 printk("U"); // Multi-user capable
3211 if(!(d->lct_data.device_flags&(1<<4)))
3212 printk("P"); // Peer service enabled!
3213 if(!(d->lct_data.device_flags&(1<<5)))
3214 printk("M"); // Mgmt service enabled!
3215 printk("\n");
3216 #endif
3219 #ifdef DEBUG
3221 * Do i2o class name lookup
3223 static const char *adpt_i2o_get_class_name(int class)
3225 int idx = 16;
3226 static char *i2o_class_name[] = {
3227 "Executive",
3228 "Device Driver Module",
3229 "Block Device",
3230 "Tape Device",
3231 "LAN Interface",
3232 "WAN Interface",
3233 "Fibre Channel Port",
3234 "Fibre Channel Device",
3235 "SCSI Device",
3236 "ATE Port",
3237 "ATE Device",
3238 "Floppy Controller",
3239 "Floppy Device",
3240 "Secondary Bus Port",
3241 "Peer Transport Agent",
3242 "Peer Transport",
3243 "Unknown"
3246 switch(class&0xFFF) {
3247 case I2O_CLASS_EXECUTIVE:
3248 idx = 0; break;
3249 case I2O_CLASS_DDM:
3250 idx = 1; break;
3251 case I2O_CLASS_RANDOM_BLOCK_STORAGE:
3252 idx = 2; break;
3253 case I2O_CLASS_SEQUENTIAL_STORAGE:
3254 idx = 3; break;
3255 case I2O_CLASS_LAN:
3256 idx = 4; break;
3257 case I2O_CLASS_WAN:
3258 idx = 5; break;
3259 case I2O_CLASS_FIBRE_CHANNEL_PORT:
3260 idx = 6; break;
3261 case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
3262 idx = 7; break;
3263 case I2O_CLASS_SCSI_PERIPHERAL:
3264 idx = 8; break;
3265 case I2O_CLASS_ATE_PORT:
3266 idx = 9; break;
3267 case I2O_CLASS_ATE_PERIPHERAL:
3268 idx = 10; break;
3269 case I2O_CLASS_FLOPPY_CONTROLLER:
3270 idx = 11; break;
3271 case I2O_CLASS_FLOPPY_DEVICE:
3272 idx = 12; break;
3273 case I2O_CLASS_BUS_ADAPTER_PORT:
3274 idx = 13; break;
3275 case I2O_CLASS_PEER_TRANSPORT_AGENT:
3276 idx = 14; break;
3277 case I2O_CLASS_PEER_TRANSPORT:
3278 idx = 15; break;
3280 return i2o_class_name[idx];
3282 #endif
3285 static s32 adpt_i2o_hrt_get(adpt_hba* pHba)
3287 u32 msg[6];
3288 int ret, size = sizeof(i2o_hrt);
3290 do {
3291 if (pHba->hrt == NULL) {
3292 pHba->hrt = dma_alloc_coherent(&pHba->pDev->dev,
3293 size, &pHba->hrt_pa, GFP_KERNEL);
3294 if (pHba->hrt == NULL) {
3295 printk(KERN_CRIT "%s: Hrt Get failed; Out of memory.\n", pHba->name);
3296 return -ENOMEM;
3300 msg[0]= SIX_WORD_MSG_SIZE| SGL_OFFSET_4;
3301 msg[1]= I2O_CMD_HRT_GET<<24 | HOST_TID<<12 | ADAPTER_TID;
3302 msg[2]= 0;
3303 msg[3]= 0;
3304 msg[4]= (0xD0000000 | size); /* Simple transaction */
3305 msg[5]= (u32)pHba->hrt_pa; /* Dump it here */
3307 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg),20))) {
3308 printk(KERN_ERR "%s: Unable to get HRT (status=%#10x)\n", pHba->name, ret);
3309 return ret;
3312 if (pHba->hrt->num_entries * pHba->hrt->entry_len << 2 > size) {
3313 int newsize = pHba->hrt->num_entries * pHba->hrt->entry_len << 2;
3314 dma_free_coherent(&pHba->pDev->dev, size,
3315 pHba->hrt, pHba->hrt_pa);
3316 size = newsize;
3317 pHba->hrt = NULL;
3319 } while(pHba->hrt == NULL);
3320 return 0;
3324 * Query one scalar group value or a whole scalar group.
3326 static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
3327 int group, int field, void *buf, int buflen)
3329 u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field };
3330 u8 *opblk_va;
3331 dma_addr_t opblk_pa;
3332 u8 *resblk_va;
3333 dma_addr_t resblk_pa;
3335 int size;
3337 /* 8 bytes for header */
3338 resblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3339 sizeof(u8) * (8 + buflen), &resblk_pa, GFP_KERNEL);
3340 if (resblk_va == NULL) {
3341 printk(KERN_CRIT "%s: query scalar failed; Out of memory.\n", pHba->name);
3342 return -ENOMEM;
3345 opblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3346 sizeof(opblk), &opblk_pa, GFP_KERNEL);
3347 if (opblk_va == NULL) {
3348 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3349 resblk_va, resblk_pa);
3350 printk(KERN_CRIT "%s: query operatio failed; Out of memory.\n",
3351 pHba->name);
3352 return -ENOMEM;
3354 if (field == -1) /* whole group */
3355 opblk[4] = -1;
3357 memcpy(opblk_va, opblk, sizeof(opblk));
3358 size = adpt_i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET, pHba, tid,
3359 opblk_va, opblk_pa, sizeof(opblk),
3360 resblk_va, resblk_pa, sizeof(u8)*(8+buflen));
3361 dma_free_coherent(&pHba->pDev->dev, sizeof(opblk), opblk_va, opblk_pa);
3362 if (size == -ETIME) {
3363 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3364 resblk_va, resblk_pa);
3365 printk(KERN_WARNING "%s: issue params failed; Timed out.\n", pHba->name);
3366 return -ETIME;
3367 } else if (size == -EINTR) {
3368 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3369 resblk_va, resblk_pa);
3370 printk(KERN_WARNING "%s: issue params failed; Interrupted.\n", pHba->name);
3371 return -EINTR;
3374 memcpy(buf, resblk_va+8, buflen); /* cut off header */
3376 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3377 resblk_va, resblk_pa);
3378 if (size < 0)
3379 return size;
3381 return buflen;
3385 /* Issue UTIL_PARAMS_GET or UTIL_PARAMS_SET
3387 * This function can be used for all UtilParamsGet/Set operations.
3388 * The OperationBlock is given in opblk-buffer,
3389 * and results are returned in resblk-buffer.
3390 * Note that the minimum sized resblk is 8 bytes and contains
3391 * ResultCount, ErrorInfoSize, BlockStatus and BlockSize.
3393 static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid,
3394 void *opblk_va, dma_addr_t opblk_pa, int oplen,
3395 void *resblk_va, dma_addr_t resblk_pa, int reslen)
3397 u32 msg[9];
3398 u32 *res = (u32 *)resblk_va;
3399 int wait_status;
3401 msg[0] = NINE_WORD_MSG_SIZE | SGL_OFFSET_5;
3402 msg[1] = cmd << 24 | HOST_TID << 12 | tid;
3403 msg[2] = 0;
3404 msg[3] = 0;
3405 msg[4] = 0;
3406 msg[5] = 0x54000000 | oplen; /* OperationBlock */
3407 msg[6] = (u32)opblk_pa;
3408 msg[7] = 0xD0000000 | reslen; /* ResultBlock */
3409 msg[8] = (u32)resblk_pa;
3411 if ((wait_status = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 20))) {
3412 printk("adpt_i2o_issue_params: post_wait failed (%p)\n", resblk_va);
3413 return wait_status; /* -DetailedStatus */
3416 if (res[1]&0x00FF0000) { /* BlockStatus != SUCCESS */
3417 printk(KERN_WARNING "%s: %s - Error:\n ErrorInfoSize = 0x%02x, "
3418 "BlockStatus = 0x%02x, BlockSize = 0x%04x\n",
3419 pHba->name,
3420 (cmd == I2O_CMD_UTIL_PARAMS_SET) ? "PARAMS_SET"
3421 : "PARAMS_GET",
3422 res[1]>>24, (res[1]>>16)&0xFF, res[1]&0xFFFF);
3423 return -((res[1] >> 16) & 0xFF); /* -BlockStatus */
3426 return 4 + ((res[1] & 0x0000FFFF) << 2); /* bytes used in resblk */
3430 static s32 adpt_i2o_quiesce_hba(adpt_hba* pHba)
3432 u32 msg[4];
3433 int ret;
3435 adpt_i2o_status_get(pHba);
3437 /* SysQuiesce discarded if IOP not in READY or OPERATIONAL state */
3439 if((pHba->status_block->iop_state != ADAPTER_STATE_READY) &&
3440 (pHba->status_block->iop_state != ADAPTER_STATE_OPERATIONAL)){
3441 return 0;
3444 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3445 msg[1] = I2O_CMD_SYS_QUIESCE<<24|HOST_TID<<12|ADAPTER_TID;
3446 msg[2] = 0;
3447 msg[3] = 0;
3449 if((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3450 printk(KERN_INFO"dpti%d: Unable to quiesce (status=%#x).\n",
3451 pHba->unit, -ret);
3452 } else {
3453 printk(KERN_INFO"dpti%d: Quiesced.\n",pHba->unit);
3456 adpt_i2o_status_get(pHba);
3457 return ret;
3462 * Enable IOP. Allows the IOP to resume external operations.
3464 static int adpt_i2o_enable_hba(adpt_hba* pHba)
3466 u32 msg[4];
3467 int ret;
3469 adpt_i2o_status_get(pHba);
3470 if(!pHba->status_block){
3471 return -ENOMEM;
3473 /* Enable only allowed on READY state */
3474 if(pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL)
3475 return 0;
3477 if(pHba->status_block->iop_state != ADAPTER_STATE_READY)
3478 return -EINVAL;
3480 msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3481 msg[1]=I2O_CMD_SYS_ENABLE<<24|HOST_TID<<12|ADAPTER_TID;
3482 msg[2]= 0;
3483 msg[3]= 0;
3485 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3486 printk(KERN_WARNING"%s: Could not enable (status=%#10x).\n",
3487 pHba->name, ret);
3488 } else {
3489 PDEBUG("%s: Enabled.\n", pHba->name);
3492 adpt_i2o_status_get(pHba);
3493 return ret;
3497 static int adpt_i2o_systab_send(adpt_hba* pHba)
3499 u32 msg[12];
3500 int ret;
3502 msg[0] = I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6;
3503 msg[1] = I2O_CMD_SYS_TAB_SET<<24 | HOST_TID<<12 | ADAPTER_TID;
3504 msg[2] = 0;
3505 msg[3] = 0;
3506 msg[4] = (0<<16) | ((pHba->unit+2) << 12); /* Host 0 IOP ID (unit + 2) */
3507 msg[5] = 0; /* Segment 0 */
3510 * Provide three SGL-elements:
3511 * System table (SysTab), Private memory space declaration and
3512 * Private i/o space declaration
3514 msg[6] = 0x54000000 | sys_tbl_len;
3515 msg[7] = (u32)sys_tbl_pa;
3516 msg[8] = 0x54000000 | 0;
3517 msg[9] = 0;
3518 msg[10] = 0xD4000000 | 0;
3519 msg[11] = 0;
3521 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 120))) {
3522 printk(KERN_INFO "%s: Unable to set SysTab (status=%#10x).\n",
3523 pHba->name, ret);
3525 #ifdef DEBUG
3526 else {
3527 PINFO("%s: SysTab set.\n", pHba->name);
3529 #endif
3531 return ret;
3535 /*============================================================================
3537 *============================================================================
3541 #ifdef UARTDELAY
3543 static static void adpt_delay(int millisec)
3545 int i;
3546 for (i = 0; i < millisec; i++) {
3547 udelay(1000); /* delay for one millisecond */
3551 #endif
3553 static struct scsi_host_template driver_template = {
3554 .module = THIS_MODULE,
3555 .name = "dpt_i2o",
3556 .proc_name = "dpt_i2o",
3557 .show_info = adpt_show_info,
3558 .info = adpt_info,
3559 .queuecommand = adpt_queue,
3560 .eh_abort_handler = adpt_abort,
3561 .eh_device_reset_handler = adpt_device_reset,
3562 .eh_bus_reset_handler = adpt_bus_reset,
3563 .eh_host_reset_handler = adpt_reset,
3564 .bios_param = adpt_bios_param,
3565 .slave_configure = adpt_slave_configure,
3566 .can_queue = MAX_TO_IOP_MESSAGES,
3567 .this_id = 7,
3568 .use_clustering = ENABLE_CLUSTERING,
3571 static int __init adpt_init(void)
3573 int error;
3574 adpt_hba *pHba, *next;
3576 printk("Loading Adaptec I2O RAID: Version " DPT_I2O_VERSION "\n");
3578 error = adpt_detect(&driver_template);
3579 if (error < 0)
3580 return error;
3581 if (hba_chain == NULL)
3582 return -ENODEV;
3584 for (pHba = hba_chain; pHba; pHba = pHba->next) {
3585 error = scsi_add_host(pHba->host, &pHba->pDev->dev);
3586 if (error)
3587 goto fail;
3588 scsi_scan_host(pHba->host);
3590 return 0;
3591 fail:
3592 for (pHba = hba_chain; pHba; pHba = next) {
3593 next = pHba->next;
3594 scsi_remove_host(pHba->host);
3596 return error;
3599 static void __exit adpt_exit(void)
3601 adpt_hba *pHba, *next;
3603 for (pHba = hba_chain; pHba; pHba = pHba->next)
3604 scsi_remove_host(pHba->host);
3605 for (pHba = hba_chain; pHba; pHba = next) {
3606 next = pHba->next;
3607 adpt_release(pHba->host);
3611 module_init(adpt_init);
3612 module_exit(adpt_exit);
3614 MODULE_LICENSE("GPL");