gpio: rcar: Fix runtime PM imbalance on error
[linux/fpc-iii.git] / drivers / scsi / dpt_i2o.c
blob02dff3a684e0f180505e64419616eb2be833d147
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /***************************************************************************
3 dpti.c - description
4 -------------------
5 begin : Thu Sep 7 2000
6 copyright : (C) 2000 by Adaptec
8 July 30, 2001 First version being submitted
9 for inclusion in the kernel. V2.4
11 See Documentation/scsi/dpti.rst for history, notes, license info
12 and credits
13 ***************************************************************************/
15 /***************************************************************************
16 * *
17 * *
18 ***************************************************************************/
19 /***************************************************************************
20 * Sat Dec 20 2003 Go Taniguchi <go@turbolinux.co.jp>
21 - Support 2.6 kernel and DMA-mapping
22 - ioctl fix for raid tools
23 - use schedule_timeout in long long loop
24 **************************************************************************/
26 /*#define DEBUG 1 */
27 /*#define UARTDELAY 1 */
29 #include <linux/module.h>
31 MODULE_AUTHOR("Deanna Bonds, with _lots_ of help from Mark Salyzyn");
32 MODULE_DESCRIPTION("Adaptec I2O RAID Driver");
34 ////////////////////////////////////////////////////////////////
36 #include <linux/ioctl.h> /* For SCSI-Passthrough */
37 #include <linux/uaccess.h>
39 #include <linux/stat.h>
40 #include <linux/slab.h> /* for kmalloc() */
41 #include <linux/pci.h> /* for PCI support */
42 #include <linux/proc_fs.h>
43 #include <linux/blkdev.h>
44 #include <linux/delay.h> /* for udelay */
45 #include <linux/interrupt.h>
46 #include <linux/kernel.h> /* for printk */
47 #include <linux/sched.h>
48 #include <linux/reboot.h>
49 #include <linux/spinlock.h>
50 #include <linux/dma-mapping.h>
52 #include <linux/timer.h>
53 #include <linux/string.h>
54 #include <linux/ioport.h>
55 #include <linux/mutex.h>
57 #include <asm/processor.h> /* for boot_cpu_data */
58 #include <asm/pgtable.h>
59 #include <asm/io.h> /* for virt_to_bus, etc. */
61 #include <scsi/scsi.h>
62 #include <scsi/scsi_cmnd.h>
63 #include <scsi/scsi_device.h>
64 #include <scsi/scsi_host.h>
65 #include <scsi/scsi_tcq.h>
67 #include "dpt/dptsig.h"
68 #include "dpti.h"
70 /*============================================================================
71 * Create a binary signature - this is read by dptsig
72 * Needed for our management apps
73 *============================================================================
75 static DEFINE_MUTEX(adpt_mutex);
76 static dpt_sig_S DPTI_sig = {
77 {'d', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION,
78 #ifdef __i386__
79 PROC_INTEL, PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM,
80 #elif defined(__ia64__)
81 PROC_INTEL, PROC_IA64,
82 #elif defined(__sparc__)
83 PROC_ULTRASPARC, PROC_ULTRASPARC,
84 #elif defined(__alpha__)
85 PROC_ALPHA, PROC_ALPHA,
86 #else
87 (-1),(-1),
88 #endif
89 FT_HBADRVR, 0, OEM_DPT, OS_LINUX, CAP_OVERLAP, DEV_ALL,
90 ADF_ALL_SC5, 0, 0, DPT_VERSION, DPT_REVISION, DPT_SUBREVISION,
91 DPT_MONTH, DPT_DAY, DPT_YEAR, "Adaptec Linux I2O RAID Driver"
97 /*============================================================================
98 * Globals
99 *============================================================================
102 static DEFINE_MUTEX(adpt_configuration_lock);
104 static struct i2o_sys_tbl *sys_tbl;
105 static dma_addr_t sys_tbl_pa;
106 static int sys_tbl_ind;
107 static int sys_tbl_len;
109 static adpt_hba* hba_chain = NULL;
110 static int hba_count = 0;
112 static struct class *adpt_sysfs_class;
114 static long adpt_unlocked_ioctl(struct file *, unsigned int, unsigned long);
115 #ifdef CONFIG_COMPAT
116 static long compat_adpt_ioctl(struct file *, unsigned int, unsigned long);
117 #endif
119 static const struct file_operations adpt_fops = {
120 .unlocked_ioctl = adpt_unlocked_ioctl,
121 .open = adpt_open,
122 .release = adpt_close,
123 #ifdef CONFIG_COMPAT
124 .compat_ioctl = compat_adpt_ioctl,
125 #endif
126 .llseek = noop_llseek,
129 /* Structures and definitions for synchronous message posting.
130 * See adpt_i2o_post_wait() for description
131 * */
132 struct adpt_i2o_post_wait_data
134 int status;
135 u32 id;
136 adpt_wait_queue_head_t *wq;
137 struct adpt_i2o_post_wait_data *next;
140 static struct adpt_i2o_post_wait_data *adpt_post_wait_queue = NULL;
141 static u32 adpt_post_wait_id = 0;
142 static DEFINE_SPINLOCK(adpt_post_wait_lock);
145 /*============================================================================
146 * Functions
147 *============================================================================
150 static inline int dpt_dma64(adpt_hba *pHba)
152 return (sizeof(dma_addr_t) > 4 && (pHba)->dma64);
155 static inline u32 dma_high(dma_addr_t addr)
157 return upper_32_bits(addr);
160 static inline u32 dma_low(dma_addr_t addr)
162 return (u32)addr;
165 static u8 adpt_read_blink_led(adpt_hba* host)
167 if (host->FwDebugBLEDflag_P) {
168 if( readb(host->FwDebugBLEDflag_P) == 0xbc ){
169 return readb(host->FwDebugBLEDvalue_P);
172 return 0;
175 /*============================================================================
176 * Scsi host template interface functions
177 *============================================================================
180 #ifdef MODULE
181 static struct pci_device_id dptids[] = {
182 { PCI_DPT_VENDOR_ID, PCI_DPT_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
183 { PCI_DPT_VENDOR_ID, PCI_DPT_RAPTOR_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
184 { 0, }
186 #endif
188 MODULE_DEVICE_TABLE(pci,dptids);
190 static int adpt_detect(struct scsi_host_template* sht)
192 struct pci_dev *pDev = NULL;
193 adpt_hba *pHba;
194 adpt_hba *next;
196 PINFO("Detecting Adaptec I2O RAID controllers...\n");
198 /* search for all Adatpec I2O RAID cards */
199 while ((pDev = pci_get_device( PCI_DPT_VENDOR_ID, PCI_ANY_ID, pDev))) {
200 if(pDev->device == PCI_DPT_DEVICE_ID ||
201 pDev->device == PCI_DPT_RAPTOR_DEVICE_ID){
202 if(adpt_install_hba(sht, pDev) ){
203 PERROR("Could not Init an I2O RAID device\n");
204 PERROR("Will not try to detect others.\n");
205 return hba_count-1;
207 pci_dev_get(pDev);
211 /* In INIT state, Activate IOPs */
212 for (pHba = hba_chain; pHba; pHba = next) {
213 next = pHba->next;
214 // Activate does get status , init outbound, and get hrt
215 if (adpt_i2o_activate_hba(pHba) < 0) {
216 adpt_i2o_delete_hba(pHba);
221 /* Active IOPs in HOLD state */
223 rebuild_sys_tab:
224 if (hba_chain == NULL)
225 return 0;
228 * If build_sys_table fails, we kill everything and bail
229 * as we can't init the IOPs w/o a system table
231 if (adpt_i2o_build_sys_table() < 0) {
232 adpt_i2o_sys_shutdown();
233 return 0;
236 PDEBUG("HBA's in HOLD state\n");
238 /* If IOP don't get online, we need to rebuild the System table */
239 for (pHba = hba_chain; pHba; pHba = pHba->next) {
240 if (adpt_i2o_online_hba(pHba) < 0) {
241 adpt_i2o_delete_hba(pHba);
242 goto rebuild_sys_tab;
246 /* Active IOPs now in OPERATIONAL state */
247 PDEBUG("HBA's in OPERATIONAL state\n");
249 printk("dpti: If you have a lot of devices this could take a few minutes.\n");
250 for (pHba = hba_chain; pHba; pHba = next) {
251 next = pHba->next;
252 printk(KERN_INFO"%s: Reading the hardware resource table.\n", pHba->name);
253 if (adpt_i2o_lct_get(pHba) < 0){
254 adpt_i2o_delete_hba(pHba);
255 continue;
258 if (adpt_i2o_parse_lct(pHba) < 0){
259 adpt_i2o_delete_hba(pHba);
260 continue;
262 adpt_inquiry(pHba);
265 adpt_sysfs_class = class_create(THIS_MODULE, "dpt_i2o");
266 if (IS_ERR(adpt_sysfs_class)) {
267 printk(KERN_WARNING"dpti: unable to create dpt_i2o class\n");
268 adpt_sysfs_class = NULL;
271 for (pHba = hba_chain; pHba; pHba = next) {
272 next = pHba->next;
273 if (adpt_scsi_host_alloc(pHba, sht) < 0){
274 adpt_i2o_delete_hba(pHba);
275 continue;
277 pHba->initialized = TRUE;
278 pHba->state &= ~DPTI_STATE_RESET;
279 if (adpt_sysfs_class) {
280 struct device *dev = device_create(adpt_sysfs_class,
281 NULL, MKDEV(DPTI_I2O_MAJOR, pHba->unit), NULL,
282 "dpti%d", pHba->unit);
283 if (IS_ERR(dev)) {
284 printk(KERN_WARNING"dpti%d: unable to "
285 "create device in dpt_i2o class\n",
286 pHba->unit);
291 // Register our control device node
292 // nodes will need to be created in /dev to access this
293 // the nodes can not be created from within the driver
294 if (hba_count && register_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER, &adpt_fops)) {
295 adpt_i2o_sys_shutdown();
296 return 0;
298 return hba_count;
302 static void adpt_release(adpt_hba *pHba)
304 struct Scsi_Host *shost = pHba->host;
306 scsi_remove_host(shost);
307 // adpt_i2o_quiesce_hba(pHba);
308 adpt_i2o_delete_hba(pHba);
309 scsi_host_put(shost);
313 static void adpt_inquiry(adpt_hba* pHba)
315 u32 msg[17];
316 u32 *mptr;
317 u32 *lenptr;
318 int direction;
319 int scsidir;
320 u32 len;
321 u32 reqlen;
322 u8* buf;
323 dma_addr_t addr;
324 u8 scb[16];
325 s32 rcode;
327 memset(msg, 0, sizeof(msg));
328 buf = dma_alloc_coherent(&pHba->pDev->dev, 80, &addr, GFP_KERNEL);
329 if(!buf){
330 printk(KERN_ERR"%s: Could not allocate buffer\n",pHba->name);
331 return;
333 memset((void*)buf, 0, 36);
335 len = 36;
336 direction = 0x00000000;
337 scsidir =0x40000000; // DATA IN (iop<--dev)
339 if (dpt_dma64(pHba))
340 reqlen = 17; // SINGLE SGE, 64 bit
341 else
342 reqlen = 14; // SINGLE SGE, 32 bit
343 /* Stick the headers on */
344 msg[0] = reqlen<<16 | SGL_OFFSET_12;
345 msg[1] = (0xff<<24|HOST_TID<<12|ADAPTER_TID);
346 msg[2] = 0;
347 msg[3] = 0;
348 // Adaptec/DPT Private stuff
349 msg[4] = I2O_CMD_SCSI_EXEC|DPT_ORGANIZATION_ID<<16;
350 msg[5] = ADAPTER_TID | 1<<16 /* Interpret*/;
351 /* Direction, disconnect ok | sense data | simple queue , CDBLen */
352 // I2O_SCB_FLAG_ENABLE_DISCONNECT |
353 // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
354 // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
355 msg[6] = scsidir|0x20a00000| 6 /* cmd len*/;
357 mptr=msg+7;
359 memset(scb, 0, sizeof(scb));
360 // Write SCSI command into the message - always 16 byte block
361 scb[0] = INQUIRY;
362 scb[1] = 0;
363 scb[2] = 0;
364 scb[3] = 0;
365 scb[4] = 36;
366 scb[5] = 0;
367 // Don't care about the rest of scb
369 memcpy(mptr, scb, sizeof(scb));
370 mptr+=4;
371 lenptr=mptr++; /* Remember me - fill in when we know */
373 /* Now fill in the SGList and command */
374 *lenptr = len;
375 if (dpt_dma64(pHba)) {
376 *mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
377 *mptr++ = 1 << PAGE_SHIFT;
378 *mptr++ = 0xD0000000|direction|len;
379 *mptr++ = dma_low(addr);
380 *mptr++ = dma_high(addr);
381 } else {
382 *mptr++ = 0xD0000000|direction|len;
383 *mptr++ = addr;
386 // Send it on it's way
387 rcode = adpt_i2o_post_wait(pHba, msg, reqlen<<2, 120);
388 if (rcode != 0) {
389 sprintf(pHba->detail, "Adaptec I2O RAID");
390 printk(KERN_INFO "%s: Inquiry Error (%d)\n",pHba->name,rcode);
391 if (rcode != -ETIME && rcode != -EINTR)
392 dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
393 } else {
394 memset(pHba->detail, 0, sizeof(pHba->detail));
395 memcpy(&(pHba->detail), "Vendor: Adaptec ", 16);
396 memcpy(&(pHba->detail[16]), " Model: ", 8);
397 memcpy(&(pHba->detail[24]), (u8*) &buf[16], 16);
398 memcpy(&(pHba->detail[40]), " FW: ", 4);
399 memcpy(&(pHba->detail[44]), (u8*) &buf[32], 4);
400 pHba->detail[48] = '\0'; /* precautionary */
401 dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
403 adpt_i2o_status_get(pHba);
404 return ;
408 static int adpt_slave_configure(struct scsi_device * device)
410 struct Scsi_Host *host = device->host;
411 adpt_hba* pHba;
413 pHba = (adpt_hba *) host->hostdata[0];
415 if (host->can_queue && device->tagged_supported) {
416 scsi_change_queue_depth(device,
417 host->can_queue - 1);
419 return 0;
422 static int adpt_queue_lck(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *))
424 adpt_hba* pHba = NULL;
425 struct adpt_device* pDev = NULL; /* dpt per device information */
427 cmd->scsi_done = done;
429 * SCSI REQUEST_SENSE commands will be executed automatically by the
430 * Host Adapter for any errors, so they should not be executed
431 * explicitly unless the Sense Data is zero indicating that no error
432 * occurred.
435 if ((cmd->cmnd[0] == REQUEST_SENSE) && (cmd->sense_buffer[0] != 0)) {
436 cmd->result = (DID_OK << 16);
437 cmd->scsi_done(cmd);
438 return 0;
441 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
442 if (!pHba) {
443 return FAILED;
446 rmb();
447 if ((pHba->state) & DPTI_STATE_RESET)
448 return SCSI_MLQUEUE_HOST_BUSY;
450 // TODO if the cmd->device if offline then I may need to issue a bus rescan
451 // followed by a get_lct to see if the device is there anymore
452 if((pDev = (struct adpt_device*) (cmd->device->hostdata)) == NULL) {
454 * First command request for this device. Set up a pointer
455 * to the device structure. This should be a TEST_UNIT_READY
456 * command from scan_scsis_single.
458 if ((pDev = adpt_find_device(pHba, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun)) == NULL) {
459 // TODO: if any luns are at this bus, scsi id then fake a TEST_UNIT_READY and INQUIRY response
460 // with type 7F (for all luns less than the max for this bus,id) so the lun scan will continue.
461 cmd->result = (DID_NO_CONNECT << 16);
462 cmd->scsi_done(cmd);
463 return 0;
465 cmd->device->hostdata = pDev;
467 pDev->pScsi_dev = cmd->device;
470 * If we are being called from when the device is being reset,
471 * delay processing of the command until later.
473 if (pDev->state & DPTI_DEV_RESET ) {
474 return FAILED;
476 return adpt_scsi_to_i2o(pHba, cmd, pDev);
479 static DEF_SCSI_QCMD(adpt_queue)
481 static int adpt_bios_param(struct scsi_device *sdev, struct block_device *dev,
482 sector_t capacity, int geom[])
484 int heads=-1;
485 int sectors=-1;
486 int cylinders=-1;
488 // *** First lets set the default geometry ****
490 // If the capacity is less than ox2000
491 if (capacity < 0x2000 ) { // floppy
492 heads = 18;
493 sectors = 2;
495 // else if between 0x2000 and 0x20000
496 else if (capacity < 0x20000) {
497 heads = 64;
498 sectors = 32;
500 // else if between 0x20000 and 0x40000
501 else if (capacity < 0x40000) {
502 heads = 65;
503 sectors = 63;
505 // else if between 0x4000 and 0x80000
506 else if (capacity < 0x80000) {
507 heads = 128;
508 sectors = 63;
510 // else if greater than 0x80000
511 else {
512 heads = 255;
513 sectors = 63;
515 cylinders = sector_div(capacity, heads * sectors);
517 // Special case if CDROM
518 if(sdev->type == 5) { // CDROM
519 heads = 252;
520 sectors = 63;
521 cylinders = 1111;
524 geom[0] = heads;
525 geom[1] = sectors;
526 geom[2] = cylinders;
528 PDEBUG("adpt_bios_param: exit\n");
529 return 0;
533 static const char *adpt_info(struct Scsi_Host *host)
535 adpt_hba* pHba;
537 pHba = (adpt_hba *) host->hostdata[0];
538 return (char *) (pHba->detail);
541 static int adpt_show_info(struct seq_file *m, struct Scsi_Host *host)
543 struct adpt_device* d;
544 int id;
545 int chan;
546 adpt_hba* pHba;
547 int unit;
549 // Find HBA (host bus adapter) we are looking for
550 mutex_lock(&adpt_configuration_lock);
551 for (pHba = hba_chain; pHba; pHba = pHba->next) {
552 if (pHba->host == host) {
553 break; /* found adapter */
556 mutex_unlock(&adpt_configuration_lock);
557 if (pHba == NULL) {
558 return 0;
560 host = pHba->host;
562 seq_printf(m, "Adaptec I2O RAID Driver Version: %s\n\n", DPT_I2O_VERSION);
563 seq_printf(m, "%s\n", pHba->detail);
564 seq_printf(m, "SCSI Host=scsi%d Control Node=/dev/%s irq=%d\n",
565 pHba->host->host_no, pHba->name, host->irq);
566 seq_printf(m, "\tpost fifo size = %d\n\treply fifo size = %d\n\tsg table size = %d\n\n",
567 host->can_queue, (int) pHba->reply_fifo_size , host->sg_tablesize);
569 seq_puts(m, "Devices:\n");
570 for(chan = 0; chan < MAX_CHANNEL; chan++) {
571 for(id = 0; id < MAX_ID; id++) {
572 d = pHba->channel[chan].device[id];
573 while(d) {
574 seq_printf(m,"\t%-24.24s", d->pScsi_dev->vendor);
575 seq_printf(m," Rev: %-8.8s\n", d->pScsi_dev->rev);
577 unit = d->pI2o_dev->lct_data.tid;
578 seq_printf(m, "\tTID=%d, (Channel=%d, Target=%d, Lun=%llu) (%s)\n\n",
579 unit, (int)d->scsi_channel, (int)d->scsi_id, d->scsi_lun,
580 scsi_device_online(d->pScsi_dev)? "online":"offline");
581 d = d->next_lun;
585 return 0;
589 * Turn a pointer to ioctl reply data into an u32 'context'
591 static u32 adpt_ioctl_to_context(adpt_hba * pHba, void *reply)
593 #if BITS_PER_LONG == 32
594 return (u32)(unsigned long)reply;
595 #else
596 ulong flags = 0;
597 u32 nr, i;
599 spin_lock_irqsave(pHba->host->host_lock, flags);
600 nr = ARRAY_SIZE(pHba->ioctl_reply_context);
601 for (i = 0; i < nr; i++) {
602 if (pHba->ioctl_reply_context[i] == NULL) {
603 pHba->ioctl_reply_context[i] = reply;
604 break;
607 spin_unlock_irqrestore(pHba->host->host_lock, flags);
608 if (i >= nr) {
609 printk(KERN_WARNING"%s: Too many outstanding "
610 "ioctl commands\n", pHba->name);
611 return (u32)-1;
614 return i;
615 #endif
619 * Go from an u32 'context' to a pointer to ioctl reply data.
621 static void *adpt_ioctl_from_context(adpt_hba *pHba, u32 context)
623 #if BITS_PER_LONG == 32
624 return (void *)(unsigned long)context;
625 #else
626 void *p = pHba->ioctl_reply_context[context];
627 pHba->ioctl_reply_context[context] = NULL;
629 return p;
630 #endif
633 /*===========================================================================
634 * Error Handling routines
635 *===========================================================================
638 static int adpt_abort(struct scsi_cmnd * cmd)
640 adpt_hba* pHba = NULL; /* host bus adapter structure */
641 struct adpt_device* dptdevice; /* dpt per device information */
642 u32 msg[5];
643 int rcode;
645 pHba = (adpt_hba*) cmd->device->host->hostdata[0];
646 printk(KERN_INFO"%s: Trying to Abort\n",pHba->name);
647 if ((dptdevice = (void*) (cmd->device->hostdata)) == NULL) {
648 printk(KERN_ERR "%s: Unable to abort: No device in cmnd\n",pHba->name);
649 return FAILED;
652 memset(msg, 0, sizeof(msg));
653 msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
654 msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|dptdevice->tid;
655 msg[2] = 0;
656 msg[3]= 0;
657 /* Add 1 to avoid firmware treating it as invalid command */
658 msg[4] = cmd->request->tag + 1;
659 if (pHba->host)
660 spin_lock_irq(pHba->host->host_lock);
661 rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER);
662 if (pHba->host)
663 spin_unlock_irq(pHba->host->host_lock);
664 if (rcode != 0) {
665 if(rcode == -EOPNOTSUPP ){
666 printk(KERN_INFO"%s: Abort cmd not supported\n",pHba->name);
667 return FAILED;
669 printk(KERN_INFO"%s: Abort failed.\n",pHba->name);
670 return FAILED;
672 printk(KERN_INFO"%s: Abort complete.\n",pHba->name);
673 return SUCCESS;
677 #define I2O_DEVICE_RESET 0x27
678 // This is the same for BLK and SCSI devices
679 // NOTE this is wrong in the i2o.h definitions
680 // This is not currently supported by our adapter but we issue it anyway
681 static int adpt_device_reset(struct scsi_cmnd* cmd)
683 adpt_hba* pHba;
684 u32 msg[4];
685 u32 rcode;
686 int old_state;
687 struct adpt_device* d = cmd->device->hostdata;
689 pHba = (void*) cmd->device->host->hostdata[0];
690 printk(KERN_INFO"%s: Trying to reset device\n",pHba->name);
691 if (!d) {
692 printk(KERN_INFO"%s: Reset Device: Device Not found\n",pHba->name);
693 return FAILED;
695 memset(msg, 0, sizeof(msg));
696 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
697 msg[1] = (I2O_DEVICE_RESET<<24|HOST_TID<<12|d->tid);
698 msg[2] = 0;
699 msg[3] = 0;
701 if (pHba->host)
702 spin_lock_irq(pHba->host->host_lock);
703 old_state = d->state;
704 d->state |= DPTI_DEV_RESET;
705 rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
706 d->state = old_state;
707 if (pHba->host)
708 spin_unlock_irq(pHba->host->host_lock);
709 if (rcode != 0) {
710 if(rcode == -EOPNOTSUPP ){
711 printk(KERN_INFO"%s: Device reset not supported\n",pHba->name);
712 return FAILED;
714 printk(KERN_INFO"%s: Device reset failed\n",pHba->name);
715 return FAILED;
716 } else {
717 printk(KERN_INFO"%s: Device reset successful\n",pHba->name);
718 return SUCCESS;
723 #define I2O_HBA_BUS_RESET 0x87
724 // This version of bus reset is called by the eh_error handler
725 static int adpt_bus_reset(struct scsi_cmnd* cmd)
727 adpt_hba* pHba;
728 u32 msg[4];
729 u32 rcode;
731 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
732 memset(msg, 0, sizeof(msg));
733 printk(KERN_WARNING"%s: Bus reset: SCSI Bus %d: tid: %d\n",pHba->name, cmd->device->channel,pHba->channel[cmd->device->channel].tid );
734 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
735 msg[1] = (I2O_HBA_BUS_RESET<<24|HOST_TID<<12|pHba->channel[cmd->device->channel].tid);
736 msg[2] = 0;
737 msg[3] = 0;
738 if (pHba->host)
739 spin_lock_irq(pHba->host->host_lock);
740 rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
741 if (pHba->host)
742 spin_unlock_irq(pHba->host->host_lock);
743 if (rcode != 0) {
744 printk(KERN_WARNING"%s: Bus reset failed.\n",pHba->name);
745 return FAILED;
746 } else {
747 printk(KERN_WARNING"%s: Bus reset success.\n",pHba->name);
748 return SUCCESS;
752 // This version of reset is called by the eh_error_handler
753 static int __adpt_reset(struct scsi_cmnd* cmd)
755 adpt_hba* pHba;
756 int rcode;
757 char name[32];
759 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
760 strncpy(name, pHba->name, sizeof(name));
761 printk(KERN_WARNING"%s: Hba Reset: scsi id %d: tid: %d\n", name, cmd->device->channel, pHba->channel[cmd->device->channel].tid);
762 rcode = adpt_hba_reset(pHba);
763 if(rcode == 0){
764 printk(KERN_WARNING"%s: HBA reset complete\n", name);
765 return SUCCESS;
766 } else {
767 printk(KERN_WARNING"%s: HBA reset failed (%x)\n", name, rcode);
768 return FAILED;
772 static int adpt_reset(struct scsi_cmnd* cmd)
774 int rc;
776 spin_lock_irq(cmd->device->host->host_lock);
777 rc = __adpt_reset(cmd);
778 spin_unlock_irq(cmd->device->host->host_lock);
780 return rc;
783 // This version of reset is called by the ioctls and indirectly from eh_error_handler via adpt_reset
784 static int adpt_hba_reset(adpt_hba* pHba)
786 int rcode;
788 pHba->state |= DPTI_STATE_RESET;
790 // Activate does get status , init outbound, and get hrt
791 if ((rcode=adpt_i2o_activate_hba(pHba)) < 0) {
792 printk(KERN_ERR "%s: Could not activate\n", pHba->name);
793 adpt_i2o_delete_hba(pHba);
794 return rcode;
797 if ((rcode=adpt_i2o_build_sys_table()) < 0) {
798 adpt_i2o_delete_hba(pHba);
799 return rcode;
801 PDEBUG("%s: in HOLD state\n",pHba->name);
803 if ((rcode=adpt_i2o_online_hba(pHba)) < 0) {
804 adpt_i2o_delete_hba(pHba);
805 return rcode;
807 PDEBUG("%s: in OPERATIONAL state\n",pHba->name);
809 if ((rcode=adpt_i2o_lct_get(pHba)) < 0){
810 adpt_i2o_delete_hba(pHba);
811 return rcode;
814 if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0){
815 adpt_i2o_delete_hba(pHba);
816 return rcode;
818 pHba->state &= ~DPTI_STATE_RESET;
820 scsi_host_complete_all_commands(pHba->host, DID_RESET);
821 return 0; /* return success */
824 /*===========================================================================
826 *===========================================================================
830 static void adpt_i2o_sys_shutdown(void)
832 adpt_hba *pHba, *pNext;
833 struct adpt_i2o_post_wait_data *p1, *old;
835 printk(KERN_INFO "Shutting down Adaptec I2O controllers.\n");
836 printk(KERN_INFO " This could take a few minutes if there are many devices attached\n");
837 /* Delete all IOPs from the controller chain */
838 /* They should have already been released by the
839 * scsi-core
841 for (pHba = hba_chain; pHba; pHba = pNext) {
842 pNext = pHba->next;
843 adpt_i2o_delete_hba(pHba);
846 /* Remove any timedout entries from the wait queue. */
847 // spin_lock_irqsave(&adpt_post_wait_lock, flags);
848 /* Nothing should be outstanding at this point so just
849 * free them
851 for(p1 = adpt_post_wait_queue; p1;) {
852 old = p1;
853 p1 = p1->next;
854 kfree(old);
856 // spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
857 adpt_post_wait_queue = NULL;
859 printk(KERN_INFO "Adaptec I2O controllers down.\n");
862 static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev)
865 adpt_hba* pHba = NULL;
866 adpt_hba* p = NULL;
867 ulong base_addr0_phys = 0;
868 ulong base_addr1_phys = 0;
869 u32 hba_map0_area_size = 0;
870 u32 hba_map1_area_size = 0;
871 void __iomem *base_addr_virt = NULL;
872 void __iomem *msg_addr_virt = NULL;
873 int dma64 = 0;
875 int raptorFlag = FALSE;
877 if(pci_enable_device(pDev)) {
878 return -EINVAL;
881 if (pci_request_regions(pDev, "dpt_i2o")) {
882 PERROR("dpti: adpt_config_hba: pci request region failed\n");
883 return -EINVAL;
886 pci_set_master(pDev);
889 * See if we should enable dma64 mode.
891 if (sizeof(dma_addr_t) > 4 &&
892 dma_get_required_mask(&pDev->dev) > DMA_BIT_MASK(32) &&
893 dma_set_mask(&pDev->dev, DMA_BIT_MASK(64)) == 0)
894 dma64 = 1;
896 if (!dma64 && dma_set_mask(&pDev->dev, DMA_BIT_MASK(32)) != 0)
897 return -EINVAL;
899 /* adapter only supports message blocks below 4GB */
900 dma_set_coherent_mask(&pDev->dev, DMA_BIT_MASK(32));
902 base_addr0_phys = pci_resource_start(pDev,0);
903 hba_map0_area_size = pci_resource_len(pDev,0);
905 // Check if standard PCI card or single BAR Raptor
906 if(pDev->device == PCI_DPT_DEVICE_ID){
907 if(pDev->subsystem_device >=0xc032 && pDev->subsystem_device <= 0xc03b){
908 // Raptor card with this device id needs 4M
909 hba_map0_area_size = 0x400000;
910 } else { // Not Raptor - it is a PCI card
911 if(hba_map0_area_size > 0x100000 ){
912 hba_map0_area_size = 0x100000;
915 } else {// Raptor split BAR config
916 // Use BAR1 in this configuration
917 base_addr1_phys = pci_resource_start(pDev,1);
918 hba_map1_area_size = pci_resource_len(pDev,1);
919 raptorFlag = TRUE;
922 #if BITS_PER_LONG == 64
924 * The original Adaptec 64 bit driver has this comment here:
925 * "x86_64 machines need more optimal mappings"
927 * I assume some HBAs report ridiculously large mappings
928 * and we need to limit them on platforms with IOMMUs.
930 if (raptorFlag == TRUE) {
931 if (hba_map0_area_size > 128)
932 hba_map0_area_size = 128;
933 if (hba_map1_area_size > 524288)
934 hba_map1_area_size = 524288;
935 } else {
936 if (hba_map0_area_size > 524288)
937 hba_map0_area_size = 524288;
939 #endif
941 base_addr_virt = ioremap(base_addr0_phys,hba_map0_area_size);
942 if (!base_addr_virt) {
943 pci_release_regions(pDev);
944 PERROR("dpti: adpt_config_hba: io remap failed\n");
945 return -EINVAL;
948 if(raptorFlag == TRUE) {
949 msg_addr_virt = ioremap(base_addr1_phys, hba_map1_area_size );
950 if (!msg_addr_virt) {
951 PERROR("dpti: adpt_config_hba: io remap failed on BAR1\n");
952 iounmap(base_addr_virt);
953 pci_release_regions(pDev);
954 return -EINVAL;
956 } else {
957 msg_addr_virt = base_addr_virt;
960 // Allocate and zero the data structure
961 pHba = kzalloc(sizeof(adpt_hba), GFP_KERNEL);
962 if (!pHba) {
963 if (msg_addr_virt != base_addr_virt)
964 iounmap(msg_addr_virt);
965 iounmap(base_addr_virt);
966 pci_release_regions(pDev);
967 return -ENOMEM;
970 mutex_lock(&adpt_configuration_lock);
972 if(hba_chain != NULL){
973 for(p = hba_chain; p->next; p = p->next);
974 p->next = pHba;
975 } else {
976 hba_chain = pHba;
978 pHba->next = NULL;
979 pHba->unit = hba_count;
980 sprintf(pHba->name, "dpti%d", hba_count);
981 hba_count++;
983 mutex_unlock(&adpt_configuration_lock);
985 pHba->pDev = pDev;
986 pHba->base_addr_phys = base_addr0_phys;
988 // Set up the Virtual Base Address of the I2O Device
989 pHba->base_addr_virt = base_addr_virt;
990 pHba->msg_addr_virt = msg_addr_virt;
991 pHba->irq_mask = base_addr_virt+0x30;
992 pHba->post_port = base_addr_virt+0x40;
993 pHba->reply_port = base_addr_virt+0x44;
995 pHba->hrt = NULL;
996 pHba->lct = NULL;
997 pHba->lct_size = 0;
998 pHba->status_block = NULL;
999 pHba->post_count = 0;
1000 pHba->state = DPTI_STATE_RESET;
1001 pHba->pDev = pDev;
1002 pHba->devices = NULL;
1003 pHba->dma64 = dma64;
1005 // Initializing the spinlocks
1006 spin_lock_init(&pHba->state_lock);
1007 spin_lock_init(&adpt_post_wait_lock);
1009 if(raptorFlag == 0){
1010 printk(KERN_INFO "Adaptec I2O RAID controller"
1011 " %d at %p size=%x irq=%d%s\n",
1012 hba_count-1, base_addr_virt,
1013 hba_map0_area_size, pDev->irq,
1014 dma64 ? " (64-bit DMA)" : "");
1015 } else {
1016 printk(KERN_INFO"Adaptec I2O RAID controller %d irq=%d%s\n",
1017 hba_count-1, pDev->irq,
1018 dma64 ? " (64-bit DMA)" : "");
1019 printk(KERN_INFO" BAR0 %p - size= %x\n",base_addr_virt,hba_map0_area_size);
1020 printk(KERN_INFO" BAR1 %p - size= %x\n",msg_addr_virt,hba_map1_area_size);
1023 if (request_irq (pDev->irq, adpt_isr, IRQF_SHARED, pHba->name, pHba)) {
1024 printk(KERN_ERR"%s: Couldn't register IRQ %d\n", pHba->name, pDev->irq);
1025 adpt_i2o_delete_hba(pHba);
1026 return -EINVAL;
1029 return 0;
1033 static void adpt_i2o_delete_hba(adpt_hba* pHba)
1035 adpt_hba* p1;
1036 adpt_hba* p2;
1037 struct i2o_device* d;
1038 struct i2o_device* next;
1039 int i;
1040 int j;
1041 struct adpt_device* pDev;
1042 struct adpt_device* pNext;
1045 mutex_lock(&adpt_configuration_lock);
1046 if(pHba->host){
1047 free_irq(pHba->host->irq, pHba);
1049 p2 = NULL;
1050 for( p1 = hba_chain; p1; p2 = p1,p1=p1->next){
1051 if(p1 == pHba) {
1052 if(p2) {
1053 p2->next = p1->next;
1054 } else {
1055 hba_chain = p1->next;
1057 break;
1061 hba_count--;
1062 mutex_unlock(&adpt_configuration_lock);
1064 iounmap(pHba->base_addr_virt);
1065 pci_release_regions(pHba->pDev);
1066 if(pHba->msg_addr_virt != pHba->base_addr_virt){
1067 iounmap(pHba->msg_addr_virt);
1069 if(pHba->FwDebugBuffer_P)
1070 iounmap(pHba->FwDebugBuffer_P);
1071 if(pHba->hrt) {
1072 dma_free_coherent(&pHba->pDev->dev,
1073 pHba->hrt->num_entries * pHba->hrt->entry_len << 2,
1074 pHba->hrt, pHba->hrt_pa);
1076 if(pHba->lct) {
1077 dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
1078 pHba->lct, pHba->lct_pa);
1080 if(pHba->status_block) {
1081 dma_free_coherent(&pHba->pDev->dev, sizeof(i2o_status_block),
1082 pHba->status_block, pHba->status_block_pa);
1084 if(pHba->reply_pool) {
1085 dma_free_coherent(&pHba->pDev->dev,
1086 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
1087 pHba->reply_pool, pHba->reply_pool_pa);
1090 for(d = pHba->devices; d ; d = next){
1091 next = d->next;
1092 kfree(d);
1094 for(i = 0 ; i < pHba->top_scsi_channel ; i++){
1095 for(j = 0; j < MAX_ID; j++){
1096 if(pHba->channel[i].device[j] != NULL){
1097 for(pDev = pHba->channel[i].device[j]; pDev; pDev = pNext){
1098 pNext = pDev->next_lun;
1099 kfree(pDev);
1104 pci_dev_put(pHba->pDev);
1105 if (adpt_sysfs_class)
1106 device_destroy(adpt_sysfs_class,
1107 MKDEV(DPTI_I2O_MAJOR, pHba->unit));
1108 kfree(pHba);
1110 if(hba_count <= 0){
1111 unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER);
1112 if (adpt_sysfs_class) {
1113 class_destroy(adpt_sysfs_class);
1114 adpt_sysfs_class = NULL;
1119 static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u64 lun)
1121 struct adpt_device* d;
1123 if(chan < 0 || chan >= MAX_CHANNEL)
1124 return NULL;
1126 d = pHba->channel[chan].device[id];
1127 if(!d || d->tid == 0) {
1128 return NULL;
1131 /* If it is the only lun at that address then this should match*/
1132 if(d->scsi_lun == lun){
1133 return d;
1136 /* else we need to look through all the luns */
1137 for(d=d->next_lun ; d ; d = d->next_lun){
1138 if(d->scsi_lun == lun){
1139 return d;
1142 return NULL;
1146 static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout)
1148 // I used my own version of the WAIT_QUEUE_HEAD
1149 // to handle some version differences
1150 // When embedded in the kernel this could go back to the vanilla one
1151 ADPT_DECLARE_WAIT_QUEUE_HEAD(adpt_wq_i2o_post);
1152 int status = 0;
1153 ulong flags = 0;
1154 struct adpt_i2o_post_wait_data *p1, *p2;
1155 struct adpt_i2o_post_wait_data *wait_data =
1156 kmalloc(sizeof(struct adpt_i2o_post_wait_data), GFP_ATOMIC);
1157 DECLARE_WAITQUEUE(wait, current);
1159 if (!wait_data)
1160 return -ENOMEM;
1163 * The spin locking is needed to keep anyone from playing
1164 * with the queue pointers and id while we do the same
1166 spin_lock_irqsave(&adpt_post_wait_lock, flags);
1167 // TODO we need a MORE unique way of getting ids
1168 // to support async LCT get
1169 wait_data->next = adpt_post_wait_queue;
1170 adpt_post_wait_queue = wait_data;
1171 adpt_post_wait_id++;
1172 adpt_post_wait_id &= 0x7fff;
1173 wait_data->id = adpt_post_wait_id;
1174 spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1176 wait_data->wq = &adpt_wq_i2o_post;
1177 wait_data->status = -ETIMEDOUT;
1179 add_wait_queue(&adpt_wq_i2o_post, &wait);
1181 msg[2] |= 0x80000000 | ((u32)wait_data->id);
1182 timeout *= HZ;
1183 if((status = adpt_i2o_post_this(pHba, msg, len)) == 0){
1184 set_current_state(TASK_INTERRUPTIBLE);
1185 if(pHba->host)
1186 spin_unlock_irq(pHba->host->host_lock);
1187 if (!timeout)
1188 schedule();
1189 else{
1190 timeout = schedule_timeout(timeout);
1191 if (timeout == 0) {
1192 // I/O issued, but cannot get result in
1193 // specified time. Freeing resorces is
1194 // dangerous.
1195 status = -ETIME;
1198 if(pHba->host)
1199 spin_lock_irq(pHba->host->host_lock);
1201 remove_wait_queue(&adpt_wq_i2o_post, &wait);
1203 if(status == -ETIMEDOUT){
1204 printk(KERN_INFO"dpti%d: POST WAIT TIMEOUT\n",pHba->unit);
1205 // We will have to free the wait_data memory during shutdown
1206 return status;
1209 /* Remove the entry from the queue. */
1210 p2 = NULL;
1211 spin_lock_irqsave(&adpt_post_wait_lock, flags);
1212 for(p1 = adpt_post_wait_queue; p1; p2 = p1, p1 = p1->next) {
1213 if(p1 == wait_data) {
1214 if(p1->status == I2O_DETAIL_STATUS_UNSUPPORTED_FUNCTION ) {
1215 status = -EOPNOTSUPP;
1217 if(p2) {
1218 p2->next = p1->next;
1219 } else {
1220 adpt_post_wait_queue = p1->next;
1222 break;
1225 spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1227 kfree(wait_data);
1229 return status;
1233 static s32 adpt_i2o_post_this(adpt_hba* pHba, u32* data, int len)
1236 u32 m = EMPTY_QUEUE;
1237 u32 __iomem *msg;
1238 ulong timeout = jiffies + 30*HZ;
1239 do {
1240 rmb();
1241 m = readl(pHba->post_port);
1242 if (m != EMPTY_QUEUE) {
1243 break;
1245 if(time_after(jiffies,timeout)){
1246 printk(KERN_WARNING"dpti%d: Timeout waiting for message frame!\n", pHba->unit);
1247 return -ETIMEDOUT;
1249 schedule_timeout_uninterruptible(1);
1250 } while(m == EMPTY_QUEUE);
1252 msg = pHba->msg_addr_virt + m;
1253 memcpy_toio(msg, data, len);
1254 wmb();
1256 //post message
1257 writel(m, pHba->post_port);
1258 wmb();
1260 return 0;
1264 static void adpt_i2o_post_wait_complete(u32 context, int status)
1266 struct adpt_i2o_post_wait_data *p1 = NULL;
1268 * We need to search through the adpt_post_wait
1269 * queue to see if the given message is still
1270 * outstanding. If not, it means that the IOP
1271 * took longer to respond to the message than we
1272 * had allowed and timer has already expired.
1273 * Not much we can do about that except log
1274 * it for debug purposes, increase timeout, and recompile
1276 * Lock needed to keep anyone from moving queue pointers
1277 * around while we're looking through them.
1280 context &= 0x7fff;
1282 spin_lock(&adpt_post_wait_lock);
1283 for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1284 if(p1->id == context) {
1285 p1->status = status;
1286 spin_unlock(&adpt_post_wait_lock);
1287 wake_up_interruptible(p1->wq);
1288 return;
1291 spin_unlock(&adpt_post_wait_lock);
1292 // If this happens we lose commands that probably really completed
1293 printk(KERN_DEBUG"dpti: Could Not find task %d in wait queue\n",context);
1294 printk(KERN_DEBUG" Tasks in wait queue:\n");
1295 for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1296 printk(KERN_DEBUG" %d\n",p1->id);
1298 return;
1301 static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
1303 u32 msg[8];
1304 u8* status;
1305 dma_addr_t addr;
1306 u32 m = EMPTY_QUEUE ;
1307 ulong timeout = jiffies + (TMOUT_IOPRESET*HZ);
1309 if(pHba->initialized == FALSE) { // First time reset should be quick
1310 timeout = jiffies + (25*HZ);
1311 } else {
1312 adpt_i2o_quiesce_hba(pHba);
1315 do {
1316 rmb();
1317 m = readl(pHba->post_port);
1318 if (m != EMPTY_QUEUE) {
1319 break;
1321 if(time_after(jiffies,timeout)){
1322 printk(KERN_WARNING"Timeout waiting for message!\n");
1323 return -ETIMEDOUT;
1325 schedule_timeout_uninterruptible(1);
1326 } while (m == EMPTY_QUEUE);
1328 status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
1329 if(status == NULL) {
1330 adpt_send_nop(pHba, m);
1331 printk(KERN_ERR"IOP reset failed - no free memory.\n");
1332 return -ENOMEM;
1334 memset(status,0,4);
1336 msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0;
1337 msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID;
1338 msg[2]=0;
1339 msg[3]=0;
1340 msg[4]=0;
1341 msg[5]=0;
1342 msg[6]=dma_low(addr);
1343 msg[7]=dma_high(addr);
1345 memcpy_toio(pHba->msg_addr_virt+m, msg, sizeof(msg));
1346 wmb();
1347 writel(m, pHba->post_port);
1348 wmb();
1350 while(*status == 0){
1351 if(time_after(jiffies,timeout)){
1352 printk(KERN_WARNING"%s: IOP Reset Timeout\n",pHba->name);
1353 /* We lose 4 bytes of "status" here, but we cannot
1354 free these because controller may awake and corrupt
1355 those bytes at any time */
1356 /* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
1357 return -ETIMEDOUT;
1359 rmb();
1360 schedule_timeout_uninterruptible(1);
1363 if(*status == 0x01 /*I2O_EXEC_IOP_RESET_IN_PROGRESS*/) {
1364 PDEBUG("%s: Reset in progress...\n", pHba->name);
1365 // Here we wait for message frame to become available
1366 // indicated that reset has finished
1367 do {
1368 rmb();
1369 m = readl(pHba->post_port);
1370 if (m != EMPTY_QUEUE) {
1371 break;
1373 if(time_after(jiffies,timeout)){
1374 printk(KERN_ERR "%s:Timeout waiting for IOP Reset.\n",pHba->name);
1375 /* We lose 4 bytes of "status" here, but we
1376 cannot free these because controller may
1377 awake and corrupt those bytes at any time */
1378 /* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
1379 return -ETIMEDOUT;
1381 schedule_timeout_uninterruptible(1);
1382 } while (m == EMPTY_QUEUE);
1383 // Flush the offset
1384 adpt_send_nop(pHba, m);
1386 adpt_i2o_status_get(pHba);
1387 if(*status == 0x02 ||
1388 pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
1389 printk(KERN_WARNING"%s: Reset reject, trying to clear\n",
1390 pHba->name);
1391 } else {
1392 PDEBUG("%s: Reset completed.\n", pHba->name);
1395 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
1396 #ifdef UARTDELAY
1397 // This delay is to allow someone attached to the card through the debug UART to
1398 // set up the dump levels that they want before the rest of the initialization sequence
1399 adpt_delay(20000);
1400 #endif
1401 return 0;
1405 static int adpt_i2o_parse_lct(adpt_hba* pHba)
1407 int i;
1408 int max;
1409 int tid;
1410 struct i2o_device *d;
1411 i2o_lct *lct = pHba->lct;
1412 u8 bus_no = 0;
1413 s16 scsi_id;
1414 u64 scsi_lun;
1415 u32 buf[10]; // larger than 7, or 8 ...
1416 struct adpt_device* pDev;
1418 if (lct == NULL) {
1419 printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
1420 return -1;
1423 max = lct->table_size;
1424 max -= 3;
1425 max /= 9;
1427 for(i=0;i<max;i++) {
1428 if( lct->lct_entry[i].user_tid != 0xfff){
1430 * If we have hidden devices, we need to inform the upper layers about
1431 * the possible maximum id reference to handle device access when
1432 * an array is disassembled. This code has no other purpose but to
1433 * allow us future access to devices that are currently hidden
1434 * behind arrays, hotspares or have not been configured (JBOD mode).
1436 if( lct->lct_entry[i].class_id != I2O_CLASS_RANDOM_BLOCK_STORAGE &&
1437 lct->lct_entry[i].class_id != I2O_CLASS_SCSI_PERIPHERAL &&
1438 lct->lct_entry[i].class_id != I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1439 continue;
1441 tid = lct->lct_entry[i].tid;
1442 // I2O_DPT_DEVICE_INFO_GROUP_NO;
1443 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
1444 continue;
1446 bus_no = buf[0]>>16;
1447 scsi_id = buf[1];
1448 scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
1449 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1450 printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no);
1451 continue;
1453 if (scsi_id >= MAX_ID){
1454 printk(KERN_WARNING"%s: SCSI ID %d out of range \n", pHba->name, bus_no);
1455 continue;
1457 if(bus_no > pHba->top_scsi_channel){
1458 pHba->top_scsi_channel = bus_no;
1460 if(scsi_id > pHba->top_scsi_id){
1461 pHba->top_scsi_id = scsi_id;
1463 if(scsi_lun > pHba->top_scsi_lun){
1464 pHba->top_scsi_lun = scsi_lun;
1466 continue;
1468 d = kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
1469 if(d==NULL)
1471 printk(KERN_CRIT"%s: Out of memory for I2O device data.\n",pHba->name);
1472 return -ENOMEM;
1475 d->controller = pHba;
1476 d->next = NULL;
1478 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
1480 d->flags = 0;
1481 tid = d->lct_data.tid;
1482 adpt_i2o_report_hba_unit(pHba, d);
1483 adpt_i2o_install_device(pHba, d);
1485 bus_no = 0;
1486 for(d = pHba->devices; d ; d = d->next) {
1487 if(d->lct_data.class_id == I2O_CLASS_BUS_ADAPTER_PORT ||
1488 d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PORT){
1489 tid = d->lct_data.tid;
1490 // TODO get the bus_no from hrt-but for now they are in order
1491 //bus_no =
1492 if(bus_no > pHba->top_scsi_channel){
1493 pHba->top_scsi_channel = bus_no;
1495 pHba->channel[bus_no].type = d->lct_data.class_id;
1496 pHba->channel[bus_no].tid = tid;
1497 if(adpt_i2o_query_scalar(pHba, tid, 0x0200, -1, buf, 28)>=0)
1499 pHba->channel[bus_no].scsi_id = buf[1];
1500 PDEBUG("Bus %d - SCSI ID %d.\n", bus_no, buf[1]);
1502 // TODO remove - this is just until we get from hrt
1503 bus_no++;
1504 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1505 printk(KERN_WARNING"%s: Channel number %d out of range - LCT\n", pHba->name, bus_no);
1506 break;
1511 // Setup adpt_device table
1512 for(d = pHba->devices; d ; d = d->next) {
1513 if(d->lct_data.class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
1514 d->lct_data.class_id == I2O_CLASS_SCSI_PERIPHERAL ||
1515 d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1517 tid = d->lct_data.tid;
1518 scsi_id = -1;
1519 // I2O_DPT_DEVICE_INFO_GROUP_NO;
1520 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)>=0) {
1521 bus_no = buf[0]>>16;
1522 scsi_id = buf[1];
1523 scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
1524 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1525 continue;
1527 if (scsi_id >= MAX_ID) {
1528 continue;
1530 if( pHba->channel[bus_no].device[scsi_id] == NULL){
1531 pDev = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1532 if(pDev == NULL) {
1533 return -ENOMEM;
1535 pHba->channel[bus_no].device[scsi_id] = pDev;
1536 } else {
1537 for( pDev = pHba->channel[bus_no].device[scsi_id];
1538 pDev->next_lun; pDev = pDev->next_lun){
1540 pDev->next_lun = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1541 if(pDev->next_lun == NULL) {
1542 return -ENOMEM;
1544 pDev = pDev->next_lun;
1546 pDev->tid = tid;
1547 pDev->scsi_channel = bus_no;
1548 pDev->scsi_id = scsi_id;
1549 pDev->scsi_lun = scsi_lun;
1550 pDev->pI2o_dev = d;
1551 d->owner = pDev;
1552 pDev->type = (buf[0])&0xff;
1553 pDev->flags = (buf[0]>>8)&0xff;
1554 if(scsi_id > pHba->top_scsi_id){
1555 pHba->top_scsi_id = scsi_id;
1557 if(scsi_lun > pHba->top_scsi_lun){
1558 pHba->top_scsi_lun = scsi_lun;
1561 if(scsi_id == -1){
1562 printk(KERN_WARNING"Could not find SCSI ID for %s\n",
1563 d->lct_data.identity_tag);
1567 return 0;
1572 * Each I2O controller has a chain of devices on it - these match
1573 * the useful parts of the LCT of the board.
1576 static int adpt_i2o_install_device(adpt_hba* pHba, struct i2o_device *d)
1578 mutex_lock(&adpt_configuration_lock);
1579 d->controller=pHba;
1580 d->owner=NULL;
1581 d->next=pHba->devices;
1582 d->prev=NULL;
1583 if (pHba->devices != NULL){
1584 pHba->devices->prev=d;
1586 pHba->devices=d;
1587 *d->dev_name = 0;
1589 mutex_unlock(&adpt_configuration_lock);
1590 return 0;
1593 static int adpt_open(struct inode *inode, struct file *file)
1595 int minor;
1596 adpt_hba* pHba;
1598 mutex_lock(&adpt_mutex);
1599 //TODO check for root access
1601 minor = iminor(inode);
1602 if (minor >= hba_count) {
1603 mutex_unlock(&adpt_mutex);
1604 return -ENXIO;
1606 mutex_lock(&adpt_configuration_lock);
1607 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1608 if (pHba->unit == minor) {
1609 break; /* found adapter */
1612 if (pHba == NULL) {
1613 mutex_unlock(&adpt_configuration_lock);
1614 mutex_unlock(&adpt_mutex);
1615 return -ENXIO;
1618 // if(pHba->in_use){
1619 // mutex_unlock(&adpt_configuration_lock);
1620 // return -EBUSY;
1621 // }
1623 pHba->in_use = 1;
1624 mutex_unlock(&adpt_configuration_lock);
1625 mutex_unlock(&adpt_mutex);
1627 return 0;
1630 static int adpt_close(struct inode *inode, struct file *file)
1632 int minor;
1633 adpt_hba* pHba;
1635 minor = iminor(inode);
1636 if (minor >= hba_count) {
1637 return -ENXIO;
1639 mutex_lock(&adpt_configuration_lock);
1640 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1641 if (pHba->unit == minor) {
1642 break; /* found adapter */
1645 mutex_unlock(&adpt_configuration_lock);
1646 if (pHba == NULL) {
1647 return -ENXIO;
1650 pHba->in_use = 0;
1652 return 0;
1656 static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1658 u32 msg[MAX_MESSAGE_SIZE];
1659 u32* reply = NULL;
1660 u32 size = 0;
1661 u32 reply_size = 0;
1662 u32 __user *user_msg = arg;
1663 u32 __user * user_reply = NULL;
1664 void **sg_list = NULL;
1665 u32 sg_offset = 0;
1666 u32 sg_count = 0;
1667 int sg_index = 0;
1668 u32 i = 0;
1669 u32 rcode = 0;
1670 void *p = NULL;
1671 dma_addr_t addr;
1672 ulong flags = 0;
1674 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1675 // get user msg size in u32s
1676 if(get_user(size, &user_msg[0])){
1677 return -EFAULT;
1679 size = size>>16;
1681 user_reply = &user_msg[size];
1682 if(size > MAX_MESSAGE_SIZE){
1683 return -EFAULT;
1685 size *= 4; // Convert to bytes
1687 /* Copy in the user's I2O command */
1688 if(copy_from_user(msg, user_msg, size)) {
1689 return -EFAULT;
1691 get_user(reply_size, &user_reply[0]);
1692 reply_size = reply_size>>16;
1693 if(reply_size > REPLY_FRAME_SIZE){
1694 reply_size = REPLY_FRAME_SIZE;
1696 reply_size *= 4;
1697 reply = kzalloc(REPLY_FRAME_SIZE*4, GFP_KERNEL);
1698 if(reply == NULL) {
1699 printk(KERN_WARNING"%s: Could not allocate reply buffer\n",pHba->name);
1700 return -ENOMEM;
1702 sg_offset = (msg[0]>>4)&0xf;
1703 msg[2] = 0x40000000; // IOCTL context
1704 msg[3] = adpt_ioctl_to_context(pHba, reply);
1705 if (msg[3] == (u32)-1) {
1706 rcode = -EBUSY;
1707 goto free;
1710 sg_list = kcalloc(pHba->sg_tablesize, sizeof(*sg_list), GFP_KERNEL);
1711 if (!sg_list) {
1712 rcode = -ENOMEM;
1713 goto free;
1715 if(sg_offset) {
1716 // TODO add 64 bit API
1717 struct sg_simple_element *sg = (struct sg_simple_element*) (msg+sg_offset);
1718 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1719 if (sg_count > pHba->sg_tablesize){
1720 printk(KERN_DEBUG"%s:IOCTL SG List too large (%u)\n", pHba->name,sg_count);
1721 rcode = -EINVAL;
1722 goto free;
1725 for(i = 0; i < sg_count; i++) {
1726 int sg_size;
1728 if (!(sg[i].flag_count & 0x10000000 /*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT*/)) {
1729 printk(KERN_DEBUG"%s:Bad SG element %d - not simple (%x)\n",pHba->name,i, sg[i].flag_count);
1730 rcode = -EINVAL;
1731 goto cleanup;
1733 sg_size = sg[i].flag_count & 0xffffff;
1734 /* Allocate memory for the transfer */
1735 p = dma_alloc_coherent(&pHba->pDev->dev, sg_size, &addr, GFP_KERNEL);
1736 if(!p) {
1737 printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
1738 pHba->name,sg_size,i,sg_count);
1739 rcode = -ENOMEM;
1740 goto cleanup;
1742 sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame.
1743 /* Copy in the user's SG buffer if necessary */
1744 if(sg[i].flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR*/) {
1745 // sg_simple_element API is 32 bit
1746 if (copy_from_user(p,(void __user *)(ulong)sg[i].addr_bus, sg_size)) {
1747 printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",pHba->name,i);
1748 rcode = -EFAULT;
1749 goto cleanup;
1752 /* sg_simple_element API is 32 bit, but addr < 4GB */
1753 sg[i].addr_bus = addr;
1757 do {
1759 * Stop any new commands from enterring the
1760 * controller while processing the ioctl
1762 if (pHba->host) {
1763 scsi_block_requests(pHba->host);
1764 spin_lock_irqsave(pHba->host->host_lock, flags);
1766 rcode = adpt_i2o_post_wait(pHba, msg, size, FOREVER);
1767 if (rcode != 0)
1768 printk("adpt_i2o_passthru: post wait failed %d %p\n",
1769 rcode, reply);
1770 if (pHba->host) {
1771 spin_unlock_irqrestore(pHba->host->host_lock, flags);
1772 scsi_unblock_requests(pHba->host);
1774 } while (rcode == -ETIMEDOUT);
1776 if(rcode){
1777 goto cleanup;
1780 if(sg_offset) {
1781 /* Copy back the Scatter Gather buffers back to user space */
1782 u32 j;
1783 // TODO add 64 bit API
1784 struct sg_simple_element* sg;
1785 int sg_size;
1787 // re-acquire the original message to handle correctly the sg copy operation
1788 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1789 // get user msg size in u32s
1790 if(get_user(size, &user_msg[0])){
1791 rcode = -EFAULT;
1792 goto cleanup;
1794 size = size>>16;
1795 size *= 4;
1796 if (size > MAX_MESSAGE_SIZE) {
1797 rcode = -EINVAL;
1798 goto cleanup;
1800 /* Copy in the user's I2O command */
1801 if (copy_from_user (msg, user_msg, size)) {
1802 rcode = -EFAULT;
1803 goto cleanup;
1805 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1807 // TODO add 64 bit API
1808 sg = (struct sg_simple_element*)(msg + sg_offset);
1809 for (j = 0; j < sg_count; j++) {
1810 /* Copy out the SG list to user's buffer if necessary */
1811 if(! (sg[j].flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR*/)) {
1812 sg_size = sg[j].flag_count & 0xffffff;
1813 // sg_simple_element API is 32 bit
1814 if (copy_to_user((void __user *)(ulong)sg[j].addr_bus,sg_list[j], sg_size)) {
1815 printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus);
1816 rcode = -EFAULT;
1817 goto cleanup;
1823 /* Copy back the reply to user space */
1824 if (reply_size) {
1825 // we wrote our own values for context - now restore the user supplied ones
1826 if(copy_from_user(reply+2, user_msg+2, sizeof(u32)*2)) {
1827 printk(KERN_WARNING"%s: Could not copy message context FROM user\n",pHba->name);
1828 rcode = -EFAULT;
1830 if(copy_to_user(user_reply, reply, reply_size)) {
1831 printk(KERN_WARNING"%s: Could not copy reply TO user\n",pHba->name);
1832 rcode = -EFAULT;
1837 cleanup:
1838 if (rcode != -ETIME && rcode != -EINTR) {
1839 struct sg_simple_element *sg =
1840 (struct sg_simple_element*) (msg +sg_offset);
1841 while(sg_index) {
1842 if(sg_list[--sg_index]) {
1843 dma_free_coherent(&pHba->pDev->dev,
1844 sg[sg_index].flag_count & 0xffffff,
1845 sg_list[sg_index],
1846 sg[sg_index].addr_bus);
1851 free:
1852 kfree(sg_list);
1853 kfree(reply);
1854 return rcode;
1857 #if defined __ia64__
1858 static void adpt_ia64_info(sysInfo_S* si)
1860 // This is all the info we need for now
1861 // We will add more info as our new
1862 // managmenent utility requires it
1863 si->processorType = PROC_IA64;
1865 #endif
1867 #if defined __sparc__
1868 static void adpt_sparc_info(sysInfo_S* si)
1870 // This is all the info we need for now
1871 // We will add more info as our new
1872 // managmenent utility requires it
1873 si->processorType = PROC_ULTRASPARC;
1875 #endif
1876 #if defined __alpha__
1877 static void adpt_alpha_info(sysInfo_S* si)
1879 // This is all the info we need for now
1880 // We will add more info as our new
1881 // managmenent utility requires it
1882 si->processorType = PROC_ALPHA;
1884 #endif
1886 #if defined __i386__
1888 #include <uapi/asm/vm86.h>
1890 static void adpt_i386_info(sysInfo_S* si)
1892 // This is all the info we need for now
1893 // We will add more info as our new
1894 // managmenent utility requires it
1895 switch (boot_cpu_data.x86) {
1896 case CPU_386:
1897 si->processorType = PROC_386;
1898 break;
1899 case CPU_486:
1900 si->processorType = PROC_486;
1901 break;
1902 case CPU_586:
1903 si->processorType = PROC_PENTIUM;
1904 break;
1905 default: // Just in case
1906 si->processorType = PROC_PENTIUM;
1907 break;
1910 #endif
1913 * This routine returns information about the system. This does not effect
1914 * any logic and if the info is wrong - it doesn't matter.
1917 /* Get all the info we can not get from kernel services */
1918 static int adpt_system_info(void __user *buffer)
1920 sysInfo_S si;
1922 memset(&si, 0, sizeof(si));
1924 si.osType = OS_LINUX;
1925 si.osMajorVersion = 0;
1926 si.osMinorVersion = 0;
1927 si.osRevision = 0;
1928 si.busType = SI_PCI_BUS;
1929 si.processorFamily = DPTI_sig.dsProcessorFamily;
1931 #if defined __i386__
1932 adpt_i386_info(&si);
1933 #elif defined (__ia64__)
1934 adpt_ia64_info(&si);
1935 #elif defined(__sparc__)
1936 adpt_sparc_info(&si);
1937 #elif defined (__alpha__)
1938 adpt_alpha_info(&si);
1939 #else
1940 si.processorType = 0xff ;
1941 #endif
1942 if (copy_to_user(buffer, &si, sizeof(si))){
1943 printk(KERN_WARNING"dpti: Could not copy buffer TO user\n");
1944 return -EFAULT;
1947 return 0;
1950 static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
1952 int minor;
1953 int error = 0;
1954 adpt_hba* pHba;
1955 ulong flags = 0;
1956 void __user *argp = (void __user *)arg;
1958 minor = iminor(inode);
1959 if (minor >= DPTI_MAX_HBA){
1960 return -ENXIO;
1962 mutex_lock(&adpt_configuration_lock);
1963 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1964 if (pHba->unit == minor) {
1965 break; /* found adapter */
1968 mutex_unlock(&adpt_configuration_lock);
1969 if(pHba == NULL){
1970 return -ENXIO;
1973 while((volatile u32) pHba->state & DPTI_STATE_RESET )
1974 schedule_timeout_uninterruptible(2);
1976 switch (cmd) {
1977 // TODO: handle 3 cases
1978 case DPT_SIGNATURE:
1979 if (copy_to_user(argp, &DPTI_sig, sizeof(DPTI_sig))) {
1980 return -EFAULT;
1982 break;
1983 case I2OUSRCMD:
1984 return adpt_i2o_passthru(pHba, argp);
1986 case DPT_CTRLINFO:{
1987 drvrHBAinfo_S HbaInfo;
1989 #define FLG_OSD_PCI_VALID 0x0001
1990 #define FLG_OSD_DMA 0x0002
1991 #define FLG_OSD_I2O 0x0004
1992 memset(&HbaInfo, 0, sizeof(HbaInfo));
1993 HbaInfo.drvrHBAnum = pHba->unit;
1994 HbaInfo.baseAddr = (ulong) pHba->base_addr_phys;
1995 HbaInfo.blinkState = adpt_read_blink_led(pHba);
1996 HbaInfo.pciBusNum = pHba->pDev->bus->number;
1997 HbaInfo.pciDeviceNum=PCI_SLOT(pHba->pDev->devfn);
1998 HbaInfo.Interrupt = pHba->pDev->irq;
1999 HbaInfo.hbaFlags = FLG_OSD_PCI_VALID | FLG_OSD_DMA | FLG_OSD_I2O;
2000 if(copy_to_user(argp, &HbaInfo, sizeof(HbaInfo))){
2001 printk(KERN_WARNING"%s: Could not copy HbaInfo TO user\n",pHba->name);
2002 return -EFAULT;
2004 break;
2006 case DPT_SYSINFO:
2007 return adpt_system_info(argp);
2008 case DPT_BLINKLED:{
2009 u32 value;
2010 value = (u32)adpt_read_blink_led(pHba);
2011 if (copy_to_user(argp, &value, sizeof(value))) {
2012 return -EFAULT;
2014 break;
2016 case I2ORESETCMD: {
2017 struct Scsi_Host *shost = pHba->host;
2019 if (shost)
2020 spin_lock_irqsave(shost->host_lock, flags);
2021 adpt_hba_reset(pHba);
2022 if (shost)
2023 spin_unlock_irqrestore(shost->host_lock, flags);
2024 break;
2026 case I2ORESCANCMD:
2027 adpt_rescan(pHba);
2028 break;
2029 default:
2030 return -EINVAL;
2033 return error;
2036 static long adpt_unlocked_ioctl(struct file *file, uint cmd, ulong arg)
2038 struct inode *inode;
2039 long ret;
2041 inode = file_inode(file);
2043 mutex_lock(&adpt_mutex);
2044 ret = adpt_ioctl(inode, file, cmd, arg);
2045 mutex_unlock(&adpt_mutex);
2047 return ret;
2050 #ifdef CONFIG_COMPAT
2051 static long compat_adpt_ioctl(struct file *file,
2052 unsigned int cmd, unsigned long arg)
2054 struct inode *inode;
2055 long ret;
2057 inode = file_inode(file);
2059 mutex_lock(&adpt_mutex);
2061 switch(cmd) {
2062 case DPT_SIGNATURE:
2063 case I2OUSRCMD:
2064 case DPT_CTRLINFO:
2065 case DPT_SYSINFO:
2066 case DPT_BLINKLED:
2067 case I2ORESETCMD:
2068 case I2ORESCANCMD:
2069 case (DPT_TARGET_BUSY & 0xFFFF):
2070 case DPT_TARGET_BUSY:
2071 ret = adpt_ioctl(inode, file, cmd, arg);
2072 break;
2073 default:
2074 ret = -ENOIOCTLCMD;
2077 mutex_unlock(&adpt_mutex);
2079 return ret;
2081 #endif
2083 static irqreturn_t adpt_isr(int irq, void *dev_id)
2085 struct scsi_cmnd* cmd;
2086 adpt_hba* pHba = dev_id;
2087 u32 m;
2088 void __iomem *reply;
2089 u32 status=0;
2090 u32 context;
2091 ulong flags = 0;
2092 int handled = 0;
2094 if (pHba == NULL){
2095 printk(KERN_WARNING"adpt_isr: NULL dev_id\n");
2096 return IRQ_NONE;
2098 if(pHba->host)
2099 spin_lock_irqsave(pHba->host->host_lock, flags);
2101 while( readl(pHba->irq_mask) & I2O_INTERRUPT_PENDING_B) {
2102 m = readl(pHba->reply_port);
2103 if(m == EMPTY_QUEUE){
2104 // Try twice then give up
2105 rmb();
2106 m = readl(pHba->reply_port);
2107 if(m == EMPTY_QUEUE){
2108 // This really should not happen
2109 printk(KERN_ERR"dpti: Could not get reply frame\n");
2110 goto out;
2113 if (pHba->reply_pool_pa <= m &&
2114 m < pHba->reply_pool_pa +
2115 (pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4)) {
2116 reply = (u8 *)pHba->reply_pool +
2117 (m - pHba->reply_pool_pa);
2118 } else {
2119 /* Ick, we should *never* be here */
2120 printk(KERN_ERR "dpti: reply frame not from pool\n");
2121 reply = (u8 *)bus_to_virt(m);
2124 if (readl(reply) & MSG_FAIL) {
2125 u32 old_m = readl(reply+28);
2126 void __iomem *msg;
2127 u32 old_context;
2128 PDEBUG("%s: Failed message\n",pHba->name);
2129 if(old_m >= 0x100000){
2130 printk(KERN_ERR"%s: Bad preserved MFA (%x)- dropping frame\n",pHba->name,old_m);
2131 writel(m,pHba->reply_port);
2132 continue;
2134 // Transaction context is 0 in failed reply frame
2135 msg = pHba->msg_addr_virt + old_m;
2136 old_context = readl(msg+12);
2137 writel(old_context, reply+12);
2138 adpt_send_nop(pHba, old_m);
2140 context = readl(reply+8);
2141 if(context & 0x40000000){ // IOCTL
2142 void *p = adpt_ioctl_from_context(pHba, readl(reply+12));
2143 if( p != NULL) {
2144 memcpy_fromio(p, reply, REPLY_FRAME_SIZE * 4);
2146 // All IOCTLs will also be post wait
2148 if(context & 0x80000000){ // Post wait message
2149 status = readl(reply+16);
2150 if(status >> 24){
2151 status &= 0xffff; /* Get detail status */
2152 } else {
2153 status = I2O_POST_WAIT_OK;
2155 if(!(context & 0x40000000)) {
2157 * The request tag is one less than the command tag
2158 * as the firmware might treat a 0 tag as invalid
2160 cmd = scsi_host_find_tag(pHba->host,
2161 readl(reply + 12) - 1);
2162 if(cmd != NULL) {
2163 printk(KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->name, cmd, context);
2166 adpt_i2o_post_wait_complete(context, status);
2167 } else { // SCSI message
2169 * The request tag is one less than the command tag
2170 * as the firmware might treat a 0 tag as invalid
2172 cmd = scsi_host_find_tag(pHba->host,
2173 readl(reply + 12) - 1);
2174 if(cmd != NULL){
2175 scsi_dma_unmap(cmd);
2176 adpt_i2o_scsi_complete(reply, cmd);
2179 writel(m, pHba->reply_port);
2180 wmb();
2181 rmb();
2183 handled = 1;
2184 out: if(pHba->host)
2185 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2186 return IRQ_RETVAL(handled);
2189 static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_device* d)
2191 int i;
2192 u32 msg[MAX_MESSAGE_SIZE];
2193 u32* mptr;
2194 u32* lptr;
2195 u32 *lenptr;
2196 int direction;
2197 int scsidir;
2198 int nseg;
2199 u32 len;
2200 u32 reqlen;
2201 s32 rcode;
2202 dma_addr_t addr;
2204 memset(msg, 0 , sizeof(msg));
2205 len = scsi_bufflen(cmd);
2206 direction = 0x00000000;
2208 scsidir = 0x00000000; // DATA NO XFER
2209 if(len) {
2211 * Set SCBFlags to indicate if data is being transferred
2212 * in or out, or no data transfer
2213 * Note: Do not have to verify index is less than 0 since
2214 * cmd->cmnd[0] is an unsigned char
2216 switch(cmd->sc_data_direction){
2217 case DMA_FROM_DEVICE:
2218 scsidir =0x40000000; // DATA IN (iop<--dev)
2219 break;
2220 case DMA_TO_DEVICE:
2221 direction=0x04000000; // SGL OUT
2222 scsidir =0x80000000; // DATA OUT (iop-->dev)
2223 break;
2224 case DMA_NONE:
2225 break;
2226 case DMA_BIDIRECTIONAL:
2227 scsidir =0x40000000; // DATA IN (iop<--dev)
2228 // Assume In - and continue;
2229 break;
2230 default:
2231 printk(KERN_WARNING"%s: scsi opcode 0x%x not supported.\n",
2232 pHba->name, cmd->cmnd[0]);
2233 cmd->result = (DID_OK <<16) | (INITIATOR_ERROR << 8);
2234 cmd->scsi_done(cmd);
2235 return 0;
2238 // msg[0] is set later
2239 // I2O_CMD_SCSI_EXEC
2240 msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid);
2241 msg[2] = 0;
2242 /* Add 1 to avoid firmware treating it as invalid command */
2243 msg[3] = cmd->request->tag + 1;
2244 // Our cards use the transaction context as the tag for queueing
2245 // Adaptec/DPT Private stuff
2246 msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16);
2247 msg[5] = d->tid;
2248 /* Direction, disconnect ok | sense data | simple queue , CDBLen */
2249 // I2O_SCB_FLAG_ENABLE_DISCONNECT |
2250 // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
2251 // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
2252 msg[6] = scsidir|0x20a00000|cmd->cmd_len;
2254 mptr=msg+7;
2256 // Write SCSI command into the message - always 16 byte block
2257 memset(mptr, 0, 16);
2258 memcpy(mptr, cmd->cmnd, cmd->cmd_len);
2259 mptr+=4;
2260 lenptr=mptr++; /* Remember me - fill in when we know */
2261 if (dpt_dma64(pHba)) {
2262 reqlen = 16; // SINGLE SGE
2263 *mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
2264 *mptr++ = 1 << PAGE_SHIFT;
2265 } else {
2266 reqlen = 14; // SINGLE SGE
2268 /* Now fill in the SGList and command */
2270 nseg = scsi_dma_map(cmd);
2271 BUG_ON(nseg < 0);
2272 if (nseg) {
2273 struct scatterlist *sg;
2275 len = 0;
2276 scsi_for_each_sg(cmd, sg, nseg, i) {
2277 lptr = mptr;
2278 *mptr++ = direction|0x10000000|sg_dma_len(sg);
2279 len+=sg_dma_len(sg);
2280 addr = sg_dma_address(sg);
2281 *mptr++ = dma_low(addr);
2282 if (dpt_dma64(pHba))
2283 *mptr++ = dma_high(addr);
2284 /* Make this an end of list */
2285 if (i == nseg - 1)
2286 *lptr = direction|0xD0000000|sg_dma_len(sg);
2288 reqlen = mptr - msg;
2289 *lenptr = len;
2291 if(cmd->underflow && len != cmd->underflow){
2292 printk(KERN_WARNING"Cmd len %08X Cmd underflow %08X\n",
2293 len, cmd->underflow);
2295 } else {
2296 *lenptr = len = 0;
2297 reqlen = 12;
2300 /* Stick the headers on */
2301 msg[0] = reqlen<<16 | ((reqlen > 12) ? SGL_OFFSET_12 : SGL_OFFSET_0);
2303 // Send it on it's way
2304 rcode = adpt_i2o_post_this(pHba, msg, reqlen<<2);
2305 if (rcode == 0) {
2306 return 0;
2308 return rcode;
2312 static s32 adpt_scsi_host_alloc(adpt_hba* pHba, struct scsi_host_template *sht)
2314 struct Scsi_Host *host;
2316 host = scsi_host_alloc(sht, sizeof(adpt_hba*));
2317 if (host == NULL) {
2318 printk("%s: scsi_host_alloc returned NULL\n", pHba->name);
2319 return -1;
2321 host->hostdata[0] = (unsigned long)pHba;
2322 pHba->host = host;
2324 host->irq = pHba->pDev->irq;
2325 /* no IO ports, so don't have to set host->io_port and
2326 * host->n_io_port
2328 host->io_port = 0;
2329 host->n_io_port = 0;
2330 /* see comments in scsi_host.h */
2331 host->max_id = 16;
2332 host->max_lun = 256;
2333 host->max_channel = pHba->top_scsi_channel + 1;
2334 host->cmd_per_lun = 1;
2335 host->unique_id = (u32)sys_tbl_pa + pHba->unit;
2336 host->sg_tablesize = pHba->sg_tablesize;
2337 host->can_queue = pHba->post_fifo_size;
2339 return 0;
2343 static void adpt_i2o_scsi_complete(void __iomem *reply, struct scsi_cmnd *cmd)
2345 adpt_hba* pHba;
2346 u32 hba_status;
2347 u32 dev_status;
2348 u32 reply_flags = readl(reply) & 0xff00; // Leave it shifted up 8 bits
2349 // I know this would look cleaner if I just read bytes
2350 // but the model I have been using for all the rest of the
2351 // io is in 4 byte words - so I keep that model
2352 u16 detailed_status = readl(reply+16) &0xffff;
2353 dev_status = (detailed_status & 0xff);
2354 hba_status = detailed_status >> 8;
2356 // calculate resid for sg
2357 scsi_set_resid(cmd, scsi_bufflen(cmd) - readl(reply+20));
2359 pHba = (adpt_hba*) cmd->device->host->hostdata[0];
2361 cmd->sense_buffer[0] = '\0'; // initialize sense valid flag to false
2363 if(!(reply_flags & MSG_FAIL)) {
2364 switch(detailed_status & I2O_SCSI_DSC_MASK) {
2365 case I2O_SCSI_DSC_SUCCESS:
2366 cmd->result = (DID_OK << 16);
2367 // handle underflow
2368 if (readl(reply+20) < cmd->underflow) {
2369 cmd->result = (DID_ERROR <<16);
2370 printk(KERN_WARNING"%s: SCSI CMD underflow\n",pHba->name);
2372 break;
2373 case I2O_SCSI_DSC_REQUEST_ABORTED:
2374 cmd->result = (DID_ABORT << 16);
2375 break;
2376 case I2O_SCSI_DSC_PATH_INVALID:
2377 case I2O_SCSI_DSC_DEVICE_NOT_PRESENT:
2378 case I2O_SCSI_DSC_SELECTION_TIMEOUT:
2379 case I2O_SCSI_DSC_COMMAND_TIMEOUT:
2380 case I2O_SCSI_DSC_NO_ADAPTER:
2381 case I2O_SCSI_DSC_RESOURCE_UNAVAILABLE:
2382 printk(KERN_WARNING"%s: SCSI Timeout-Device (%d,%d,%llu) hba status=0x%x, dev status=0x%x, cmd=0x%x\n",
2383 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun, hba_status, dev_status, cmd->cmnd[0]);
2384 cmd->result = (DID_TIME_OUT << 16);
2385 break;
2386 case I2O_SCSI_DSC_ADAPTER_BUSY:
2387 case I2O_SCSI_DSC_BUS_BUSY:
2388 cmd->result = (DID_BUS_BUSY << 16);
2389 break;
2390 case I2O_SCSI_DSC_SCSI_BUS_RESET:
2391 case I2O_SCSI_DSC_BDR_MESSAGE_SENT:
2392 cmd->result = (DID_RESET << 16);
2393 break;
2394 case I2O_SCSI_DSC_PARITY_ERROR_FAILURE:
2395 printk(KERN_WARNING"%s: SCSI CMD parity error\n",pHba->name);
2396 cmd->result = (DID_PARITY << 16);
2397 break;
2398 case I2O_SCSI_DSC_UNABLE_TO_ABORT:
2399 case I2O_SCSI_DSC_COMPLETE_WITH_ERROR:
2400 case I2O_SCSI_DSC_UNABLE_TO_TERMINATE:
2401 case I2O_SCSI_DSC_MR_MESSAGE_RECEIVED:
2402 case I2O_SCSI_DSC_AUTOSENSE_FAILED:
2403 case I2O_SCSI_DSC_DATA_OVERRUN:
2404 case I2O_SCSI_DSC_UNEXPECTED_BUS_FREE:
2405 case I2O_SCSI_DSC_SEQUENCE_FAILURE:
2406 case I2O_SCSI_DSC_REQUEST_LENGTH_ERROR:
2407 case I2O_SCSI_DSC_PROVIDE_FAILURE:
2408 case I2O_SCSI_DSC_REQUEST_TERMINATED:
2409 case I2O_SCSI_DSC_IDE_MESSAGE_SENT:
2410 case I2O_SCSI_DSC_UNACKNOWLEDGED_EVENT:
2411 case I2O_SCSI_DSC_MESSAGE_RECEIVED:
2412 case I2O_SCSI_DSC_INVALID_CDB:
2413 case I2O_SCSI_DSC_LUN_INVALID:
2414 case I2O_SCSI_DSC_SCSI_TID_INVALID:
2415 case I2O_SCSI_DSC_FUNCTION_UNAVAILABLE:
2416 case I2O_SCSI_DSC_NO_NEXUS:
2417 case I2O_SCSI_DSC_CDB_RECEIVED:
2418 case I2O_SCSI_DSC_LUN_ALREADY_ENABLED:
2419 case I2O_SCSI_DSC_QUEUE_FROZEN:
2420 case I2O_SCSI_DSC_REQUEST_INVALID:
2421 default:
2422 printk(KERN_WARNING"%s: SCSI error %0x-Device(%d,%d,%llu) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2423 pHba->name, detailed_status & I2O_SCSI_DSC_MASK, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
2424 hba_status, dev_status, cmd->cmnd[0]);
2425 cmd->result = (DID_ERROR << 16);
2426 break;
2429 // copy over the request sense data if it was a check
2430 // condition status
2431 if (dev_status == SAM_STAT_CHECK_CONDITION) {
2432 u32 len = min(SCSI_SENSE_BUFFERSIZE, 40);
2433 // Copy over the sense data
2434 memcpy_fromio(cmd->sense_buffer, (reply+28) , len);
2435 if(cmd->sense_buffer[0] == 0x70 /* class 7 */ &&
2436 cmd->sense_buffer[2] == DATA_PROTECT ){
2437 /* This is to handle an array failed */
2438 cmd->result = (DID_TIME_OUT << 16);
2439 printk(KERN_WARNING"%s: SCSI Data Protect-Device (%d,%d,%llu) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2440 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
2441 hba_status, dev_status, cmd->cmnd[0]);
2445 } else {
2446 /* In this condtion we could not talk to the tid
2447 * the card rejected it. We should signal a retry
2448 * for a limitted number of retries.
2450 cmd->result = (DID_TIME_OUT << 16);
2451 printk(KERN_WARNING"%s: I2O MSG_FAIL - Device (%d,%d,%llu) tid=%d, cmd=0x%x\n",
2452 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
2453 ((struct adpt_device*)(cmd->device->hostdata))->tid, cmd->cmnd[0]);
2456 cmd->result |= (dev_status);
2458 if(cmd->scsi_done != NULL){
2459 cmd->scsi_done(cmd);
2464 static s32 adpt_rescan(adpt_hba* pHba)
2466 s32 rcode;
2467 ulong flags = 0;
2469 if(pHba->host)
2470 spin_lock_irqsave(pHba->host->host_lock, flags);
2471 if ((rcode=adpt_i2o_lct_get(pHba)) < 0)
2472 goto out;
2473 if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0)
2474 goto out;
2475 rcode = 0;
2476 out: if(pHba->host)
2477 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2478 return rcode;
2482 static s32 adpt_i2o_reparse_lct(adpt_hba* pHba)
2484 int i;
2485 int max;
2486 int tid;
2487 struct i2o_device *d;
2488 i2o_lct *lct = pHba->lct;
2489 u8 bus_no = 0;
2490 s16 scsi_id;
2491 u64 scsi_lun;
2492 u32 buf[10]; // at least 8 u32's
2493 struct adpt_device* pDev = NULL;
2494 struct i2o_device* pI2o_dev = NULL;
2496 if (lct == NULL) {
2497 printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
2498 return -1;
2501 max = lct->table_size;
2502 max -= 3;
2503 max /= 9;
2505 // Mark each drive as unscanned
2506 for (d = pHba->devices; d; d = d->next) {
2507 pDev =(struct adpt_device*) d->owner;
2508 if(!pDev){
2509 continue;
2511 pDev->state |= DPTI_DEV_UNSCANNED;
2514 printk(KERN_INFO "%s: LCT has %d entries.\n", pHba->name,max);
2516 for(i=0;i<max;i++) {
2517 if( lct->lct_entry[i].user_tid != 0xfff){
2518 continue;
2521 if( lct->lct_entry[i].class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
2522 lct->lct_entry[i].class_id == I2O_CLASS_SCSI_PERIPHERAL ||
2523 lct->lct_entry[i].class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
2524 tid = lct->lct_entry[i].tid;
2525 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
2526 printk(KERN_ERR"%s: Could not query device\n",pHba->name);
2527 continue;
2529 bus_no = buf[0]>>16;
2530 if (bus_no >= MAX_CHANNEL) { /* Something wrong skip it */
2531 printk(KERN_WARNING
2532 "%s: Channel number %d out of range\n",
2533 pHba->name, bus_no);
2534 continue;
2537 scsi_id = buf[1];
2538 scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
2539 pDev = pHba->channel[bus_no].device[scsi_id];
2540 /* da lun */
2541 while(pDev) {
2542 if(pDev->scsi_lun == scsi_lun) {
2543 break;
2545 pDev = pDev->next_lun;
2547 if(!pDev ) { // Something new add it
2548 d = kmalloc(sizeof(struct i2o_device),
2549 GFP_ATOMIC);
2550 if(d==NULL)
2552 printk(KERN_CRIT "Out of memory for I2O device data.\n");
2553 return -ENOMEM;
2556 d->controller = pHba;
2557 d->next = NULL;
2559 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2561 d->flags = 0;
2562 adpt_i2o_report_hba_unit(pHba, d);
2563 adpt_i2o_install_device(pHba, d);
2565 pDev = pHba->channel[bus_no].device[scsi_id];
2566 if( pDev == NULL){
2567 pDev =
2568 kzalloc(sizeof(struct adpt_device),
2569 GFP_ATOMIC);
2570 if(pDev == NULL) {
2571 return -ENOMEM;
2573 pHba->channel[bus_no].device[scsi_id] = pDev;
2574 } else {
2575 while (pDev->next_lun) {
2576 pDev = pDev->next_lun;
2578 pDev = pDev->next_lun =
2579 kzalloc(sizeof(struct adpt_device),
2580 GFP_ATOMIC);
2581 if(pDev == NULL) {
2582 return -ENOMEM;
2585 pDev->tid = d->lct_data.tid;
2586 pDev->scsi_channel = bus_no;
2587 pDev->scsi_id = scsi_id;
2588 pDev->scsi_lun = scsi_lun;
2589 pDev->pI2o_dev = d;
2590 d->owner = pDev;
2591 pDev->type = (buf[0])&0xff;
2592 pDev->flags = (buf[0]>>8)&0xff;
2593 // Too late, SCSI system has made up it's mind, but what the hey ...
2594 if(scsi_id > pHba->top_scsi_id){
2595 pHba->top_scsi_id = scsi_id;
2597 if(scsi_lun > pHba->top_scsi_lun){
2598 pHba->top_scsi_lun = scsi_lun;
2600 continue;
2601 } // end of new i2o device
2603 // We found an old device - check it
2604 while(pDev) {
2605 if(pDev->scsi_lun == scsi_lun) {
2606 if(!scsi_device_online(pDev->pScsi_dev)) {
2607 printk(KERN_WARNING"%s: Setting device (%d,%d,%llu) back online\n",
2608 pHba->name,bus_no,scsi_id,scsi_lun);
2609 if (pDev->pScsi_dev) {
2610 scsi_device_set_state(pDev->pScsi_dev, SDEV_RUNNING);
2613 d = pDev->pI2o_dev;
2614 if(d->lct_data.tid != tid) { // something changed
2615 pDev->tid = tid;
2616 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2617 if (pDev->pScsi_dev) {
2618 pDev->pScsi_dev->changed = TRUE;
2619 pDev->pScsi_dev->removable = TRUE;
2622 // Found it - mark it scanned
2623 pDev->state = DPTI_DEV_ONLINE;
2624 break;
2626 pDev = pDev->next_lun;
2630 for (pI2o_dev = pHba->devices; pI2o_dev; pI2o_dev = pI2o_dev->next) {
2631 pDev =(struct adpt_device*) pI2o_dev->owner;
2632 if(!pDev){
2633 continue;
2635 // Drive offline drives that previously existed but could not be found
2636 // in the LCT table
2637 if (pDev->state & DPTI_DEV_UNSCANNED){
2638 pDev->state = DPTI_DEV_OFFLINE;
2639 printk(KERN_WARNING"%s: Device (%d,%d,%llu) offline\n",pHba->name,pDev->scsi_channel,pDev->scsi_id,pDev->scsi_lun);
2640 if (pDev->pScsi_dev) {
2641 scsi_device_set_state(pDev->pScsi_dev, SDEV_OFFLINE);
2645 return 0;
2648 /*============================================================================
2649 * Routines from i2o subsystem
2650 *============================================================================
2656 * Bring an I2O controller into HOLD state. See the spec.
2658 static int adpt_i2o_activate_hba(adpt_hba* pHba)
2660 int rcode;
2662 if(pHba->initialized ) {
2663 if (adpt_i2o_status_get(pHba) < 0) {
2664 if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2665 printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2666 return rcode;
2668 if (adpt_i2o_status_get(pHba) < 0) {
2669 printk(KERN_INFO "HBA not responding.\n");
2670 return -1;
2674 if(pHba->status_block->iop_state == ADAPTER_STATE_FAULTED) {
2675 printk(KERN_CRIT "%s: hardware fault\n", pHba->name);
2676 return -1;
2679 if (pHba->status_block->iop_state == ADAPTER_STATE_READY ||
2680 pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL ||
2681 pHba->status_block->iop_state == ADAPTER_STATE_HOLD ||
2682 pHba->status_block->iop_state == ADAPTER_STATE_FAILED) {
2683 adpt_i2o_reset_hba(pHba);
2684 if (adpt_i2o_status_get(pHba) < 0 || pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
2685 printk(KERN_ERR "%s: Failed to initialize.\n", pHba->name);
2686 return -1;
2689 } else {
2690 if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2691 printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2692 return rcode;
2697 if (adpt_i2o_init_outbound_q(pHba) < 0) {
2698 return -1;
2701 /* In HOLD state */
2703 if (adpt_i2o_hrt_get(pHba) < 0) {
2704 return -1;
2707 return 0;
2711 * Bring a controller online into OPERATIONAL state.
2714 static int adpt_i2o_online_hba(adpt_hba* pHba)
2716 if (adpt_i2o_systab_send(pHba) < 0)
2717 return -1;
2718 /* In READY state */
2720 if (adpt_i2o_enable_hba(pHba) < 0)
2721 return -1;
2723 /* In OPERATIONAL state */
2724 return 0;
2727 static s32 adpt_send_nop(adpt_hba*pHba,u32 m)
2729 u32 __iomem *msg;
2730 ulong timeout = jiffies + 5*HZ;
2732 while(m == EMPTY_QUEUE){
2733 rmb();
2734 m = readl(pHba->post_port);
2735 if(m != EMPTY_QUEUE){
2736 break;
2738 if(time_after(jiffies,timeout)){
2739 printk(KERN_ERR "%s: Timeout waiting for message frame!\n",pHba->name);
2740 return 2;
2742 schedule_timeout_uninterruptible(1);
2744 msg = (u32 __iomem *)(pHba->msg_addr_virt + m);
2745 writel( THREE_WORD_MSG_SIZE | SGL_OFFSET_0,&msg[0]);
2746 writel( I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0,&msg[1]);
2747 writel( 0,&msg[2]);
2748 wmb();
2750 writel(m, pHba->post_port);
2751 wmb();
2752 return 0;
2755 static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
2757 u8 *status;
2758 dma_addr_t addr;
2759 u32 __iomem *msg = NULL;
2760 int i;
2761 ulong timeout = jiffies + TMOUT_INITOUTBOUND*HZ;
2762 u32 m;
2764 do {
2765 rmb();
2766 m = readl(pHba->post_port);
2767 if (m != EMPTY_QUEUE) {
2768 break;
2771 if(time_after(jiffies,timeout)){
2772 printk(KERN_WARNING"%s: Timeout waiting for message frame\n",pHba->name);
2773 return -ETIMEDOUT;
2775 schedule_timeout_uninterruptible(1);
2776 } while(m == EMPTY_QUEUE);
2778 msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2780 status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
2781 if (!status) {
2782 adpt_send_nop(pHba, m);
2783 printk(KERN_WARNING"%s: IOP reset failed - no free memory.\n",
2784 pHba->name);
2785 return -ENOMEM;
2787 memset(status, 0, 4);
2789 writel(EIGHT_WORD_MSG_SIZE| SGL_OFFSET_6, &msg[0]);
2790 writel(I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID, &msg[1]);
2791 writel(0, &msg[2]);
2792 writel(0x0106, &msg[3]); /* Transaction context */
2793 writel(4096, &msg[4]); /* Host page frame size */
2794 writel((REPLY_FRAME_SIZE)<<16|0x80, &msg[5]); /* Outbound msg frame size and Initcode */
2795 writel(0xD0000004, &msg[6]); /* Simple SG LE, EOB */
2796 writel((u32)addr, &msg[7]);
2798 writel(m, pHba->post_port);
2799 wmb();
2801 // Wait for the reply status to come back
2802 do {
2803 if (*status) {
2804 if (*status != 0x01 /*I2O_EXEC_OUTBOUND_INIT_IN_PROGRESS*/) {
2805 break;
2808 rmb();
2809 if(time_after(jiffies,timeout)){
2810 printk(KERN_WARNING"%s: Timeout Initializing\n",pHba->name);
2811 /* We lose 4 bytes of "status" here, but we
2812 cannot free these because controller may
2813 awake and corrupt those bytes at any time */
2814 /* dma_free_coherent(&pHba->pDev->dev, 4, status, addr); */
2815 return -ETIMEDOUT;
2817 schedule_timeout_uninterruptible(1);
2818 } while (1);
2820 // If the command was successful, fill the fifo with our reply
2821 // message packets
2822 if(*status != 0x04 /*I2O_EXEC_OUTBOUND_INIT_COMPLETE*/) {
2823 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
2824 return -2;
2826 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
2828 if(pHba->reply_pool != NULL) {
2829 dma_free_coherent(&pHba->pDev->dev,
2830 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2831 pHba->reply_pool, pHba->reply_pool_pa);
2834 pHba->reply_pool = dma_alloc_coherent(&pHba->pDev->dev,
2835 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2836 &pHba->reply_pool_pa, GFP_KERNEL);
2837 if (!pHba->reply_pool) {
2838 printk(KERN_ERR "%s: Could not allocate reply pool\n", pHba->name);
2839 return -ENOMEM;
2841 memset(pHba->reply_pool, 0 , pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4);
2843 for(i = 0; i < pHba->reply_fifo_size; i++) {
2844 writel(pHba->reply_pool_pa + (i * REPLY_FRAME_SIZE * 4),
2845 pHba->reply_port);
2846 wmb();
2848 adpt_i2o_status_get(pHba);
2849 return 0;
2854 * I2O System Table. Contains information about
2855 * all the IOPs in the system. Used to inform IOPs
2856 * about each other's existence.
2858 * sys_tbl_ver is the CurrentChangeIndicator that is
2859 * used by IOPs to track changes.
2864 static s32 adpt_i2o_status_get(adpt_hba* pHba)
2866 ulong timeout;
2867 u32 m;
2868 u32 __iomem *msg;
2869 u8 *status_block=NULL;
2871 if(pHba->status_block == NULL) {
2872 pHba->status_block = dma_alloc_coherent(&pHba->pDev->dev,
2873 sizeof(i2o_status_block),
2874 &pHba->status_block_pa, GFP_KERNEL);
2875 if(pHba->status_block == NULL) {
2876 printk(KERN_ERR
2877 "dpti%d: Get Status Block failed; Out of memory. \n",
2878 pHba->unit);
2879 return -ENOMEM;
2882 memset(pHba->status_block, 0, sizeof(i2o_status_block));
2883 status_block = (u8*)(pHba->status_block);
2884 timeout = jiffies+TMOUT_GETSTATUS*HZ;
2885 do {
2886 rmb();
2887 m = readl(pHba->post_port);
2888 if (m != EMPTY_QUEUE) {
2889 break;
2891 if(time_after(jiffies,timeout)){
2892 printk(KERN_ERR "%s: Timeout waiting for message !\n",
2893 pHba->name);
2894 return -ETIMEDOUT;
2896 schedule_timeout_uninterruptible(1);
2897 } while(m==EMPTY_QUEUE);
2900 msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2902 writel(NINE_WORD_MSG_SIZE|SGL_OFFSET_0, &msg[0]);
2903 writel(I2O_CMD_STATUS_GET<<24|HOST_TID<<12|ADAPTER_TID, &msg[1]);
2904 writel(1, &msg[2]);
2905 writel(0, &msg[3]);
2906 writel(0, &msg[4]);
2907 writel(0, &msg[5]);
2908 writel( dma_low(pHba->status_block_pa), &msg[6]);
2909 writel( dma_high(pHba->status_block_pa), &msg[7]);
2910 writel(sizeof(i2o_status_block), &msg[8]); // 88 bytes
2912 //post message
2913 writel(m, pHba->post_port);
2914 wmb();
2916 while(status_block[87]!=0xff){
2917 if(time_after(jiffies,timeout)){
2918 printk(KERN_ERR"dpti%d: Get status timeout.\n",
2919 pHba->unit);
2920 return -ETIMEDOUT;
2922 rmb();
2923 schedule_timeout_uninterruptible(1);
2926 // Set up our number of outbound and inbound messages
2927 pHba->post_fifo_size = pHba->status_block->max_inbound_frames;
2928 if (pHba->post_fifo_size > MAX_TO_IOP_MESSAGES) {
2929 pHba->post_fifo_size = MAX_TO_IOP_MESSAGES;
2932 pHba->reply_fifo_size = pHba->status_block->max_outbound_frames;
2933 if (pHba->reply_fifo_size > MAX_FROM_IOP_MESSAGES) {
2934 pHba->reply_fifo_size = MAX_FROM_IOP_MESSAGES;
2937 // Calculate the Scatter Gather list size
2938 if (dpt_dma64(pHba)) {
2939 pHba->sg_tablesize
2940 = ((pHba->status_block->inbound_frame_size * 4
2941 - 14 * sizeof(u32))
2942 / (sizeof(struct sg_simple_element) + sizeof(u32)));
2943 } else {
2944 pHba->sg_tablesize
2945 = ((pHba->status_block->inbound_frame_size * 4
2946 - 12 * sizeof(u32))
2947 / sizeof(struct sg_simple_element));
2949 if (pHba->sg_tablesize > SG_LIST_ELEMENTS) {
2950 pHba->sg_tablesize = SG_LIST_ELEMENTS;
2954 #ifdef DEBUG
2955 printk("dpti%d: State = ",pHba->unit);
2956 switch(pHba->status_block->iop_state) {
2957 case 0x01:
2958 printk("INIT\n");
2959 break;
2960 case 0x02:
2961 printk("RESET\n");
2962 break;
2963 case 0x04:
2964 printk("HOLD\n");
2965 break;
2966 case 0x05:
2967 printk("READY\n");
2968 break;
2969 case 0x08:
2970 printk("OPERATIONAL\n");
2971 break;
2972 case 0x10:
2973 printk("FAILED\n");
2974 break;
2975 case 0x11:
2976 printk("FAULTED\n");
2977 break;
2978 default:
2979 printk("%x (unknown!!)\n",pHba->status_block->iop_state);
2981 #endif
2982 return 0;
2986 * Get the IOP's Logical Configuration Table
2988 static int adpt_i2o_lct_get(adpt_hba* pHba)
2990 u32 msg[8];
2991 int ret;
2992 u32 buf[16];
2994 if ((pHba->lct_size == 0) || (pHba->lct == NULL)){
2995 pHba->lct_size = pHba->status_block->expected_lct_size;
2997 do {
2998 if (pHba->lct == NULL) {
2999 pHba->lct = dma_alloc_coherent(&pHba->pDev->dev,
3000 pHba->lct_size, &pHba->lct_pa,
3001 GFP_ATOMIC);
3002 if(pHba->lct == NULL) {
3003 printk(KERN_CRIT "%s: Lct Get failed. Out of memory.\n",
3004 pHba->name);
3005 return -ENOMEM;
3008 memset(pHba->lct, 0, pHba->lct_size);
3010 msg[0] = EIGHT_WORD_MSG_SIZE|SGL_OFFSET_6;
3011 msg[1] = I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID;
3012 msg[2] = 0;
3013 msg[3] = 0;
3014 msg[4] = 0xFFFFFFFF; /* All devices */
3015 msg[5] = 0x00000000; /* Report now */
3016 msg[6] = 0xD0000000|pHba->lct_size;
3017 msg[7] = (u32)pHba->lct_pa;
3019 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 360))) {
3020 printk(KERN_ERR "%s: LCT Get failed (status=%#10x.\n",
3021 pHba->name, ret);
3022 printk(KERN_ERR"Adaptec: Error Reading Hardware.\n");
3023 return ret;
3026 if ((pHba->lct->table_size << 2) > pHba->lct_size) {
3027 pHba->lct_size = pHba->lct->table_size << 2;
3028 dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
3029 pHba->lct, pHba->lct_pa);
3030 pHba->lct = NULL;
3032 } while (pHba->lct == NULL);
3034 PDEBUG("%s: Hardware resource table read.\n", pHba->name);
3037 // I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO;
3038 if(adpt_i2o_query_scalar(pHba, 0 , 0x8000, -1, buf, sizeof(buf))>=0) {
3039 pHba->FwDebugBufferSize = buf[1];
3040 pHba->FwDebugBuffer_P = ioremap(pHba->base_addr_phys + buf[0],
3041 pHba->FwDebugBufferSize);
3042 if (pHba->FwDebugBuffer_P) {
3043 pHba->FwDebugFlags_P = pHba->FwDebugBuffer_P +
3044 FW_DEBUG_FLAGS_OFFSET;
3045 pHba->FwDebugBLEDvalue_P = pHba->FwDebugBuffer_P +
3046 FW_DEBUG_BLED_OFFSET;
3047 pHba->FwDebugBLEDflag_P = pHba->FwDebugBLEDvalue_P + 1;
3048 pHba->FwDebugStrLength_P = pHba->FwDebugBuffer_P +
3049 FW_DEBUG_STR_LENGTH_OFFSET;
3050 pHba->FwDebugBuffer_P += buf[2];
3051 pHba->FwDebugFlags = 0;
3055 return 0;
3058 static int adpt_i2o_build_sys_table(void)
3060 adpt_hba* pHba = hba_chain;
3061 int count = 0;
3063 if (sys_tbl)
3064 dma_free_coherent(&pHba->pDev->dev, sys_tbl_len,
3065 sys_tbl, sys_tbl_pa);
3067 sys_tbl_len = sizeof(struct i2o_sys_tbl) + // Header + IOPs
3068 (hba_count) * sizeof(struct i2o_sys_tbl_entry);
3070 sys_tbl = dma_alloc_coherent(&pHba->pDev->dev,
3071 sys_tbl_len, &sys_tbl_pa, GFP_KERNEL);
3072 if (!sys_tbl) {
3073 printk(KERN_WARNING "SysTab Set failed. Out of memory.\n");
3074 return -ENOMEM;
3076 memset(sys_tbl, 0, sys_tbl_len);
3078 sys_tbl->num_entries = hba_count;
3079 sys_tbl->version = I2OVERSION;
3080 sys_tbl->change_ind = sys_tbl_ind++;
3082 for(pHba = hba_chain; pHba; pHba = pHba->next) {
3083 u64 addr;
3084 // Get updated Status Block so we have the latest information
3085 if (adpt_i2o_status_get(pHba)) {
3086 sys_tbl->num_entries--;
3087 continue; // try next one
3090 sys_tbl->iops[count].org_id = pHba->status_block->org_id;
3091 sys_tbl->iops[count].iop_id = pHba->unit + 2;
3092 sys_tbl->iops[count].seg_num = 0;
3093 sys_tbl->iops[count].i2o_version = pHba->status_block->i2o_version;
3094 sys_tbl->iops[count].iop_state = pHba->status_block->iop_state;
3095 sys_tbl->iops[count].msg_type = pHba->status_block->msg_type;
3096 sys_tbl->iops[count].frame_size = pHba->status_block->inbound_frame_size;
3097 sys_tbl->iops[count].last_changed = sys_tbl_ind - 1; // ??
3098 sys_tbl->iops[count].iop_capabilities = pHba->status_block->iop_capabilities;
3099 addr = pHba->base_addr_phys + 0x40;
3100 sys_tbl->iops[count].inbound_low = dma_low(addr);
3101 sys_tbl->iops[count].inbound_high = dma_high(addr);
3103 count++;
3106 #ifdef DEBUG
3108 u32 *table = (u32*)sys_tbl;
3109 printk(KERN_DEBUG"sys_tbl_len=%d in 32bit words\n",(sys_tbl_len >>2));
3110 for(count = 0; count < (sys_tbl_len >>2); count++) {
3111 printk(KERN_INFO "sys_tbl[%d] = %0#10x\n",
3112 count, table[count]);
3115 #endif
3117 return 0;
3122 * Dump the information block associated with a given unit (TID)
3125 static void adpt_i2o_report_hba_unit(adpt_hba* pHba, struct i2o_device *d)
3127 char buf[64];
3128 int unit = d->lct_data.tid;
3130 printk(KERN_INFO "TID %3.3d ", unit);
3132 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 3, buf, 16)>=0)
3134 buf[16]=0;
3135 printk(" Vendor: %-12.12s", buf);
3137 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 4, buf, 16)>=0)
3139 buf[16]=0;
3140 printk(" Device: %-12.12s", buf);
3142 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 6, buf, 8)>=0)
3144 buf[8]=0;
3145 printk(" Rev: %-12.12s\n", buf);
3147 #ifdef DEBUG
3148 printk(KERN_INFO "\tClass: %.21s\n", adpt_i2o_get_class_name(d->lct_data.class_id));
3149 printk(KERN_INFO "\tSubclass: 0x%04X\n", d->lct_data.sub_class);
3150 printk(KERN_INFO "\tFlags: ");
3152 if(d->lct_data.device_flags&(1<<0))
3153 printk("C"); // ConfigDialog requested
3154 if(d->lct_data.device_flags&(1<<1))
3155 printk("U"); // Multi-user capable
3156 if(!(d->lct_data.device_flags&(1<<4)))
3157 printk("P"); // Peer service enabled!
3158 if(!(d->lct_data.device_flags&(1<<5)))
3159 printk("M"); // Mgmt service enabled!
3160 printk("\n");
3161 #endif
3164 #ifdef DEBUG
3166 * Do i2o class name lookup
3168 static const char *adpt_i2o_get_class_name(int class)
3170 int idx = 16;
3171 static char *i2o_class_name[] = {
3172 "Executive",
3173 "Device Driver Module",
3174 "Block Device",
3175 "Tape Device",
3176 "LAN Interface",
3177 "WAN Interface",
3178 "Fibre Channel Port",
3179 "Fibre Channel Device",
3180 "SCSI Device",
3181 "ATE Port",
3182 "ATE Device",
3183 "Floppy Controller",
3184 "Floppy Device",
3185 "Secondary Bus Port",
3186 "Peer Transport Agent",
3187 "Peer Transport",
3188 "Unknown"
3191 switch(class&0xFFF) {
3192 case I2O_CLASS_EXECUTIVE:
3193 idx = 0; break;
3194 case I2O_CLASS_DDM:
3195 idx = 1; break;
3196 case I2O_CLASS_RANDOM_BLOCK_STORAGE:
3197 idx = 2; break;
3198 case I2O_CLASS_SEQUENTIAL_STORAGE:
3199 idx = 3; break;
3200 case I2O_CLASS_LAN:
3201 idx = 4; break;
3202 case I2O_CLASS_WAN:
3203 idx = 5; break;
3204 case I2O_CLASS_FIBRE_CHANNEL_PORT:
3205 idx = 6; break;
3206 case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
3207 idx = 7; break;
3208 case I2O_CLASS_SCSI_PERIPHERAL:
3209 idx = 8; break;
3210 case I2O_CLASS_ATE_PORT:
3211 idx = 9; break;
3212 case I2O_CLASS_ATE_PERIPHERAL:
3213 idx = 10; break;
3214 case I2O_CLASS_FLOPPY_CONTROLLER:
3215 idx = 11; break;
3216 case I2O_CLASS_FLOPPY_DEVICE:
3217 idx = 12; break;
3218 case I2O_CLASS_BUS_ADAPTER_PORT:
3219 idx = 13; break;
3220 case I2O_CLASS_PEER_TRANSPORT_AGENT:
3221 idx = 14; break;
3222 case I2O_CLASS_PEER_TRANSPORT:
3223 idx = 15; break;
3225 return i2o_class_name[idx];
3227 #endif
3230 static s32 adpt_i2o_hrt_get(adpt_hba* pHba)
3232 u32 msg[6];
3233 int ret, size = sizeof(i2o_hrt);
3235 do {
3236 if (pHba->hrt == NULL) {
3237 pHba->hrt = dma_alloc_coherent(&pHba->pDev->dev,
3238 size, &pHba->hrt_pa, GFP_KERNEL);
3239 if (pHba->hrt == NULL) {
3240 printk(KERN_CRIT "%s: Hrt Get failed; Out of memory.\n", pHba->name);
3241 return -ENOMEM;
3245 msg[0]= SIX_WORD_MSG_SIZE| SGL_OFFSET_4;
3246 msg[1]= I2O_CMD_HRT_GET<<24 | HOST_TID<<12 | ADAPTER_TID;
3247 msg[2]= 0;
3248 msg[3]= 0;
3249 msg[4]= (0xD0000000 | size); /* Simple transaction */
3250 msg[5]= (u32)pHba->hrt_pa; /* Dump it here */
3252 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg),20))) {
3253 printk(KERN_ERR "%s: Unable to get HRT (status=%#10x)\n", pHba->name, ret);
3254 return ret;
3257 if (pHba->hrt->num_entries * pHba->hrt->entry_len << 2 > size) {
3258 int newsize = pHba->hrt->num_entries * pHba->hrt->entry_len << 2;
3259 dma_free_coherent(&pHba->pDev->dev, size,
3260 pHba->hrt, pHba->hrt_pa);
3261 size = newsize;
3262 pHba->hrt = NULL;
3264 } while(pHba->hrt == NULL);
3265 return 0;
3269 * Query one scalar group value or a whole scalar group.
3271 static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
3272 int group, int field, void *buf, int buflen)
3274 u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field };
3275 u8 *opblk_va;
3276 dma_addr_t opblk_pa;
3277 u8 *resblk_va;
3278 dma_addr_t resblk_pa;
3280 int size;
3282 /* 8 bytes for header */
3283 resblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3284 sizeof(u8) * (8 + buflen), &resblk_pa, GFP_KERNEL);
3285 if (resblk_va == NULL) {
3286 printk(KERN_CRIT "%s: query scalar failed; Out of memory.\n", pHba->name);
3287 return -ENOMEM;
3290 opblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3291 sizeof(opblk), &opblk_pa, GFP_KERNEL);
3292 if (opblk_va == NULL) {
3293 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3294 resblk_va, resblk_pa);
3295 printk(KERN_CRIT "%s: query operation failed; Out of memory.\n",
3296 pHba->name);
3297 return -ENOMEM;
3299 if (field == -1) /* whole group */
3300 opblk[4] = -1;
3302 memcpy(opblk_va, opblk, sizeof(opblk));
3303 size = adpt_i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET, pHba, tid,
3304 opblk_va, opblk_pa, sizeof(opblk),
3305 resblk_va, resblk_pa, sizeof(u8)*(8+buflen));
3306 dma_free_coherent(&pHba->pDev->dev, sizeof(opblk), opblk_va, opblk_pa);
3307 if (size == -ETIME) {
3308 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3309 resblk_va, resblk_pa);
3310 printk(KERN_WARNING "%s: issue params failed; Timed out.\n", pHba->name);
3311 return -ETIME;
3312 } else if (size == -EINTR) {
3313 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3314 resblk_va, resblk_pa);
3315 printk(KERN_WARNING "%s: issue params failed; Interrupted.\n", pHba->name);
3316 return -EINTR;
3319 memcpy(buf, resblk_va+8, buflen); /* cut off header */
3321 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3322 resblk_va, resblk_pa);
3323 if (size < 0)
3324 return size;
3326 return buflen;
3330 /* Issue UTIL_PARAMS_GET or UTIL_PARAMS_SET
3332 * This function can be used for all UtilParamsGet/Set operations.
3333 * The OperationBlock is given in opblk-buffer,
3334 * and results are returned in resblk-buffer.
3335 * Note that the minimum sized resblk is 8 bytes and contains
3336 * ResultCount, ErrorInfoSize, BlockStatus and BlockSize.
3338 static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid,
3339 void *opblk_va, dma_addr_t opblk_pa, int oplen,
3340 void *resblk_va, dma_addr_t resblk_pa, int reslen)
3342 u32 msg[9];
3343 u32 *res = (u32 *)resblk_va;
3344 int wait_status;
3346 msg[0] = NINE_WORD_MSG_SIZE | SGL_OFFSET_5;
3347 msg[1] = cmd << 24 | HOST_TID << 12 | tid;
3348 msg[2] = 0;
3349 msg[3] = 0;
3350 msg[4] = 0;
3351 msg[5] = 0x54000000 | oplen; /* OperationBlock */
3352 msg[6] = (u32)opblk_pa;
3353 msg[7] = 0xD0000000 | reslen; /* ResultBlock */
3354 msg[8] = (u32)resblk_pa;
3356 if ((wait_status = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 20))) {
3357 printk("adpt_i2o_issue_params: post_wait failed (%p)\n", resblk_va);
3358 return wait_status; /* -DetailedStatus */
3361 if (res[1]&0x00FF0000) { /* BlockStatus != SUCCESS */
3362 printk(KERN_WARNING "%s: %s - Error:\n ErrorInfoSize = 0x%02x, "
3363 "BlockStatus = 0x%02x, BlockSize = 0x%04x\n",
3364 pHba->name,
3365 (cmd == I2O_CMD_UTIL_PARAMS_SET) ? "PARAMS_SET"
3366 : "PARAMS_GET",
3367 res[1]>>24, (res[1]>>16)&0xFF, res[1]&0xFFFF);
3368 return -((res[1] >> 16) & 0xFF); /* -BlockStatus */
3371 return 4 + ((res[1] & 0x0000FFFF) << 2); /* bytes used in resblk */
3375 static s32 adpt_i2o_quiesce_hba(adpt_hba* pHba)
3377 u32 msg[4];
3378 int ret;
3380 adpt_i2o_status_get(pHba);
3382 /* SysQuiesce discarded if IOP not in READY or OPERATIONAL state */
3384 if((pHba->status_block->iop_state != ADAPTER_STATE_READY) &&
3385 (pHba->status_block->iop_state != ADAPTER_STATE_OPERATIONAL)){
3386 return 0;
3389 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3390 msg[1] = I2O_CMD_SYS_QUIESCE<<24|HOST_TID<<12|ADAPTER_TID;
3391 msg[2] = 0;
3392 msg[3] = 0;
3394 if((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3395 printk(KERN_INFO"dpti%d: Unable to quiesce (status=%#x).\n",
3396 pHba->unit, -ret);
3397 } else {
3398 printk(KERN_INFO"dpti%d: Quiesced.\n",pHba->unit);
3401 adpt_i2o_status_get(pHba);
3402 return ret;
3407 * Enable IOP. Allows the IOP to resume external operations.
3409 static int adpt_i2o_enable_hba(adpt_hba* pHba)
3411 u32 msg[4];
3412 int ret;
3414 adpt_i2o_status_get(pHba);
3415 if(!pHba->status_block){
3416 return -ENOMEM;
3418 /* Enable only allowed on READY state */
3419 if(pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL)
3420 return 0;
3422 if(pHba->status_block->iop_state != ADAPTER_STATE_READY)
3423 return -EINVAL;
3425 msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3426 msg[1]=I2O_CMD_SYS_ENABLE<<24|HOST_TID<<12|ADAPTER_TID;
3427 msg[2]= 0;
3428 msg[3]= 0;
3430 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3431 printk(KERN_WARNING"%s: Could not enable (status=%#10x).\n",
3432 pHba->name, ret);
3433 } else {
3434 PDEBUG("%s: Enabled.\n", pHba->name);
3437 adpt_i2o_status_get(pHba);
3438 return ret;
3442 static int adpt_i2o_systab_send(adpt_hba* pHba)
3444 u32 msg[12];
3445 int ret;
3447 msg[0] = I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6;
3448 msg[1] = I2O_CMD_SYS_TAB_SET<<24 | HOST_TID<<12 | ADAPTER_TID;
3449 msg[2] = 0;
3450 msg[3] = 0;
3451 msg[4] = (0<<16) | ((pHba->unit+2) << 12); /* Host 0 IOP ID (unit + 2) */
3452 msg[5] = 0; /* Segment 0 */
3455 * Provide three SGL-elements:
3456 * System table (SysTab), Private memory space declaration and
3457 * Private i/o space declaration
3459 msg[6] = 0x54000000 | sys_tbl_len;
3460 msg[7] = (u32)sys_tbl_pa;
3461 msg[8] = 0x54000000 | 0;
3462 msg[9] = 0;
3463 msg[10] = 0xD4000000 | 0;
3464 msg[11] = 0;
3466 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 120))) {
3467 printk(KERN_INFO "%s: Unable to set SysTab (status=%#10x).\n",
3468 pHba->name, ret);
3470 #ifdef DEBUG
3471 else {
3472 PINFO("%s: SysTab set.\n", pHba->name);
3474 #endif
3476 return ret;
3480 /*============================================================================
3482 *============================================================================
3486 #ifdef UARTDELAY
3488 static static void adpt_delay(int millisec)
3490 int i;
3491 for (i = 0; i < millisec; i++) {
3492 udelay(1000); /* delay for one millisecond */
3496 #endif
3498 static struct scsi_host_template driver_template = {
3499 .module = THIS_MODULE,
3500 .name = "dpt_i2o",
3501 .proc_name = "dpt_i2o",
3502 .show_info = adpt_show_info,
3503 .info = adpt_info,
3504 .queuecommand = adpt_queue,
3505 .eh_abort_handler = adpt_abort,
3506 .eh_device_reset_handler = adpt_device_reset,
3507 .eh_bus_reset_handler = adpt_bus_reset,
3508 .eh_host_reset_handler = adpt_reset,
3509 .bios_param = adpt_bios_param,
3510 .slave_configure = adpt_slave_configure,
3511 .can_queue = MAX_TO_IOP_MESSAGES,
3512 .this_id = 7,
3515 static int __init adpt_init(void)
3517 int error;
3518 adpt_hba *pHba, *next;
3520 printk("Loading Adaptec I2O RAID: Version " DPT_I2O_VERSION "\n");
3522 error = adpt_detect(&driver_template);
3523 if (error < 0)
3524 return error;
3525 if (hba_chain == NULL)
3526 return -ENODEV;
3528 for (pHba = hba_chain; pHba; pHba = pHba->next) {
3529 error = scsi_add_host(pHba->host, &pHba->pDev->dev);
3530 if (error)
3531 goto fail;
3532 scsi_scan_host(pHba->host);
3534 return 0;
3535 fail:
3536 for (pHba = hba_chain; pHba; pHba = next) {
3537 next = pHba->next;
3538 scsi_remove_host(pHba->host);
3540 return error;
3543 static void __exit adpt_exit(void)
3545 adpt_hba *pHba, *next;
3547 for (pHba = hba_chain; pHba; pHba = next) {
3548 next = pHba->next;
3549 adpt_release(pHba);
3553 module_init(adpt_init);
3554 module_exit(adpt_exit);
3556 MODULE_LICENSE("GPL");