Merge branch 'for-linus' of git://oss.sgi.com/xfs/xfs
[linux/fpc-iii.git] / drivers / scsi / arcmsr / arcmsr_hba.c
blobffa54792bb330126615a49a873b79bc718f01b78
1 /*
2 *******************************************************************************
3 ** O.S : Linux
4 ** FILE NAME : arcmsr_hba.c
5 ** BY : Erich Chen
6 ** Description: SCSI RAID Device Driver for
7 ** ARECA RAID Host adapter
8 *******************************************************************************
9 ** Copyright (C) 2002 - 2005, Areca Technology Corporation All rights reserved
11 ** Web site: www.areca.com.tw
12 ** E-mail: support@areca.com.tw
14 ** This program is free software; you can redistribute it and/or modify
15 ** it under the terms of the GNU General Public License version 2 as
16 ** published by the Free Software Foundation.
17 ** This program is distributed in the hope that it will be useful,
18 ** but WITHOUT ANY WARRANTY; without even the implied warranty of
19 ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 ** GNU General Public License for more details.
21 *******************************************************************************
22 ** Redistribution and use in source and binary forms, with or without
23 ** modification, are permitted provided that the following conditions
24 ** are met:
25 ** 1. Redistributions of source code must retain the above copyright
26 ** notice, this list of conditions and the following disclaimer.
27 ** 2. Redistributions in binary form must reproduce the above copyright
28 ** notice, this list of conditions and the following disclaimer in the
29 ** documentation and/or other materials provided with the distribution.
30 ** 3. The name of the author may not be used to endorse or promote products
31 ** derived from this software without specific prior written permission.
33 ** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
34 ** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
35 ** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
36 ** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
37 ** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING,BUT
38 ** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
39 ** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY
40 ** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
41 ** (INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF
42 ** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
43 *******************************************************************************
44 ** For history of changes, see Documentation/scsi/ChangeLog.arcmsr
45 ** Firmware Specification, see Documentation/scsi/arcmsr_spec.txt
46 *******************************************************************************
48 #include <linux/module.h>
49 #include <linux/reboot.h>
50 #include <linux/spinlock.h>
51 #include <linux/pci_ids.h>
52 #include <linux/interrupt.h>
53 #include <linux/moduleparam.h>
54 #include <linux/errno.h>
55 #include <linux/types.h>
56 #include <linux/delay.h>
57 #include <linux/dma-mapping.h>
58 #include <linux/timer.h>
59 #include <linux/pci.h>
60 #include <linux/aer.h>
61 #include <linux/slab.h>
62 #include <asm/dma.h>
63 #include <asm/io.h>
64 #include <asm/system.h>
65 #include <asm/uaccess.h>
66 #include <scsi/scsi_host.h>
67 #include <scsi/scsi.h>
68 #include <scsi/scsi_cmnd.h>
69 #include <scsi/scsi_tcq.h>
70 #include <scsi/scsi_device.h>
71 #include <scsi/scsi_transport.h>
72 #include <scsi/scsicam.h>
73 #include "arcmsr.h"
75 #ifdef CONFIG_SCSI_ARCMSR_RESET
76 static int sleeptime = 20;
77 static int retrycount = 12;
78 module_param(sleeptime, int, S_IRUGO|S_IWUSR);
79 MODULE_PARM_DESC(sleeptime, "The waiting period for FW ready while bus reset");
80 module_param(retrycount, int, S_IRUGO|S_IWUSR);
81 MODULE_PARM_DESC(retrycount, "The retry count for FW ready while bus reset");
82 #endif
83 MODULE_AUTHOR("Erich Chen <support@areca.com.tw>");
84 MODULE_DESCRIPTION("ARECA (ARC11xx/12xx/13xx/16xx) SATA/SAS RAID Host Bus Adapter");
85 MODULE_LICENSE("Dual BSD/GPL");
86 MODULE_VERSION(ARCMSR_DRIVER_VERSION);
88 static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
89 struct scsi_cmnd *cmd);
90 static int arcmsr_iop_confirm(struct AdapterControlBlock *acb);
91 static int arcmsr_abort(struct scsi_cmnd *);
92 static int arcmsr_bus_reset(struct scsi_cmnd *);
93 static int arcmsr_bios_param(struct scsi_device *sdev,
94 struct block_device *bdev, sector_t capacity, int *info);
95 static int arcmsr_queue_command(struct scsi_cmnd *cmd,
96 void (*done) (struct scsi_cmnd *));
97 static int arcmsr_probe(struct pci_dev *pdev,
98 const struct pci_device_id *id);
99 static void arcmsr_remove(struct pci_dev *pdev);
100 static void arcmsr_shutdown(struct pci_dev *pdev);
101 static void arcmsr_iop_init(struct AdapterControlBlock *acb);
102 static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb);
103 static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb);
104 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb);
105 static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb);
106 static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb);
107 static void arcmsr_request_device_map(unsigned long pacb);
108 static void arcmsr_request_hba_device_map(struct AdapterControlBlock *acb);
109 static void arcmsr_request_hbb_device_map(struct AdapterControlBlock *acb);
110 static void arcmsr_message_isr_bh_fn(struct work_struct *work);
111 static void *arcmsr_get_firmware_spec(struct AdapterControlBlock *acb, int mode);
112 static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb);
114 static const char *arcmsr_info(struct Scsi_Host *);
115 static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb);
116 static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev,
117 int queue_depth, int reason)
119 if (reason != SCSI_QDEPTH_DEFAULT)
120 return -EOPNOTSUPP;
122 if (queue_depth > ARCMSR_MAX_CMD_PERLUN)
123 queue_depth = ARCMSR_MAX_CMD_PERLUN;
124 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth);
125 return queue_depth;
128 static struct scsi_host_template arcmsr_scsi_host_template = {
129 .module = THIS_MODULE,
130 .name = "ARCMSR ARECA SATA/SAS RAID Host Bus Adapter"
131 ARCMSR_DRIVER_VERSION,
132 .info = arcmsr_info,
133 .queuecommand = arcmsr_queue_command,
134 .eh_abort_handler = arcmsr_abort,
135 .eh_bus_reset_handler = arcmsr_bus_reset,
136 .bios_param = arcmsr_bios_param,
137 .change_queue_depth = arcmsr_adjust_disk_queue_depth,
138 .can_queue = ARCMSR_MAX_OUTSTANDING_CMD,
139 .this_id = ARCMSR_SCSI_INITIATOR_ID,
140 .sg_tablesize = ARCMSR_MAX_SG_ENTRIES,
141 .max_sectors = ARCMSR_MAX_XFER_SECTORS,
142 .cmd_per_lun = ARCMSR_MAX_CMD_PERLUN,
143 .use_clustering = ENABLE_CLUSTERING,
144 .shost_attrs = arcmsr_host_attrs,
146 static struct pci_device_id arcmsr_device_id_table[] = {
147 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1110)},
148 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1120)},
149 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1130)},
150 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1160)},
151 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1170)},
152 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1200)},
153 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1201)},
154 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1202)},
155 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1210)},
156 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1220)},
157 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1230)},
158 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1260)},
159 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1270)},
160 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1280)},
161 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1380)},
162 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1381)},
163 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1680)},
164 {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1681)},
165 {0, 0}, /* Terminating entry */
167 MODULE_DEVICE_TABLE(pci, arcmsr_device_id_table);
168 static struct pci_driver arcmsr_pci_driver = {
169 .name = "arcmsr",
170 .id_table = arcmsr_device_id_table,
171 .probe = arcmsr_probe,
172 .remove = arcmsr_remove,
173 .shutdown = arcmsr_shutdown,
176 static irqreturn_t arcmsr_do_interrupt(int irq, void *dev_id)
178 irqreturn_t handle_state;
179 struct AdapterControlBlock *acb = dev_id;
181 spin_lock(acb->host->host_lock);
182 handle_state = arcmsr_interrupt(acb);
183 spin_unlock(acb->host->host_lock);
185 return handle_state;
188 static int arcmsr_bios_param(struct scsi_device *sdev,
189 struct block_device *bdev, sector_t capacity, int *geom)
191 int ret, heads, sectors, cylinders, total_capacity;
192 unsigned char *buffer;/* return copy of block device's partition table */
194 buffer = scsi_bios_ptable(bdev);
195 if (buffer) {
196 ret = scsi_partsize(buffer, capacity, &geom[2], &geom[0], &geom[1]);
197 kfree(buffer);
198 if (ret != -1)
199 return ret;
201 total_capacity = capacity;
202 heads = 64;
203 sectors = 32;
204 cylinders = total_capacity / (heads * sectors);
205 if (cylinders > 1024) {
206 heads = 255;
207 sectors = 63;
208 cylinders = total_capacity / (heads * sectors);
210 geom[0] = heads;
211 geom[1] = sectors;
212 geom[2] = cylinders;
213 return 0;
216 static void arcmsr_define_adapter_type(struct AdapterControlBlock *acb)
218 struct pci_dev *pdev = acb->pdev;
219 u16 dev_id;
220 pci_read_config_word(pdev, PCI_DEVICE_ID, &dev_id);
221 switch (dev_id) {
222 case 0x1201 : {
223 acb->adapter_type = ACB_ADAPTER_TYPE_B;
225 break;
227 default : acb->adapter_type = ACB_ADAPTER_TYPE_A;
231 static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
234 switch (acb->adapter_type) {
236 case ACB_ADAPTER_TYPE_A: {
237 struct pci_dev *pdev = acb->pdev;
238 void *dma_coherent;
239 dma_addr_t dma_coherent_handle, dma_addr;
240 struct CommandControlBlock *ccb_tmp;
241 int i, j;
243 acb->pmuA = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
244 if (!acb->pmuA) {
245 printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n",
246 acb->host->host_no);
247 return -ENOMEM;
250 dma_coherent = dma_alloc_coherent(&pdev->dev,
251 ARCMSR_MAX_FREECCB_NUM *
252 sizeof (struct CommandControlBlock) + 0x20,
253 &dma_coherent_handle, GFP_KERNEL);
255 if (!dma_coherent) {
256 iounmap(acb->pmuA);
257 return -ENOMEM;
260 acb->dma_coherent = dma_coherent;
261 acb->dma_coherent_handle = dma_coherent_handle;
263 if (((unsigned long)dma_coherent & 0x1F)) {
264 dma_coherent = dma_coherent +
265 (0x20 - ((unsigned long)dma_coherent & 0x1F));
266 dma_coherent_handle = dma_coherent_handle +
267 (0x20 - ((unsigned long)dma_coherent_handle & 0x1F));
270 dma_addr = dma_coherent_handle;
271 ccb_tmp = (struct CommandControlBlock *)dma_coherent;
272 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
273 ccb_tmp->cdb_shifted_phyaddr = dma_addr >> 5;
274 ccb_tmp->acb = acb;
275 acb->pccb_pool[i] = ccb_tmp;
276 list_add_tail(&ccb_tmp->list, &acb->ccb_free_list);
277 dma_addr = dma_addr + sizeof(struct CommandControlBlock);
278 ccb_tmp++;
281 acb->vir2phy_offset = (unsigned long)ccb_tmp -(unsigned long)dma_addr;
282 for (i = 0; i < ARCMSR_MAX_TARGETID; i++)
283 for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++)
284 acb->devstate[i][j] = ARECA_RAID_GONE;
286 break;
288 case ACB_ADAPTER_TYPE_B: {
290 struct pci_dev *pdev = acb->pdev;
291 struct MessageUnit_B *reg;
292 void __iomem *mem_base0, *mem_base1;
293 void *dma_coherent;
294 dma_addr_t dma_coherent_handle, dma_addr;
295 struct CommandControlBlock *ccb_tmp;
296 int i, j;
298 dma_coherent = dma_alloc_coherent(&pdev->dev,
299 ((ARCMSR_MAX_FREECCB_NUM *
300 sizeof(struct CommandControlBlock) + 0x20) +
301 sizeof(struct MessageUnit_B)),
302 &dma_coherent_handle, GFP_KERNEL);
303 if (!dma_coherent)
304 return -ENOMEM;
306 acb->dma_coherent = dma_coherent;
307 acb->dma_coherent_handle = dma_coherent_handle;
309 if (((unsigned long)dma_coherent & 0x1F)) {
310 dma_coherent = dma_coherent +
311 (0x20 - ((unsigned long)dma_coherent & 0x1F));
312 dma_coherent_handle = dma_coherent_handle +
313 (0x20 - ((unsigned long)dma_coherent_handle & 0x1F));
316 dma_addr = dma_coherent_handle;
317 ccb_tmp = (struct CommandControlBlock *)dma_coherent;
318 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
319 ccb_tmp->cdb_shifted_phyaddr = dma_addr >> 5;
320 ccb_tmp->acb = acb;
321 acb->pccb_pool[i] = ccb_tmp;
322 list_add_tail(&ccb_tmp->list, &acb->ccb_free_list);
323 dma_addr = dma_addr + sizeof(struct CommandControlBlock);
324 ccb_tmp++;
327 reg = (struct MessageUnit_B *)(dma_coherent +
328 ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock));
329 acb->pmuB = reg;
330 mem_base0 = ioremap(pci_resource_start(pdev, 0),
331 pci_resource_len(pdev, 0));
332 if (!mem_base0)
333 goto out;
335 mem_base1 = ioremap(pci_resource_start(pdev, 2),
336 pci_resource_len(pdev, 2));
337 if (!mem_base1) {
338 iounmap(mem_base0);
339 goto out;
342 reg->drv2iop_doorbell_reg = mem_base0 + ARCMSR_DRV2IOP_DOORBELL;
343 reg->drv2iop_doorbell_mask_reg = mem_base0 +
344 ARCMSR_DRV2IOP_DOORBELL_MASK;
345 reg->iop2drv_doorbell_reg = mem_base0 + ARCMSR_IOP2DRV_DOORBELL;
346 reg->iop2drv_doorbell_mask_reg = mem_base0 +
347 ARCMSR_IOP2DRV_DOORBELL_MASK;
348 reg->ioctl_wbuffer_reg = mem_base1 + ARCMSR_IOCTL_WBUFFER;
349 reg->ioctl_rbuffer_reg = mem_base1 + ARCMSR_IOCTL_RBUFFER;
350 reg->msgcode_rwbuffer_reg = mem_base1 + ARCMSR_MSGCODE_RWBUFFER;
352 acb->vir2phy_offset = (unsigned long)ccb_tmp -(unsigned long)dma_addr;
353 for (i = 0; i < ARCMSR_MAX_TARGETID; i++)
354 for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++)
355 acb->devstate[i][j] = ARECA_RAID_GOOD;
357 break;
359 return 0;
361 out:
362 dma_free_coherent(&acb->pdev->dev,
363 (ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock) + 0x20 +
364 sizeof(struct MessageUnit_B)), acb->dma_coherent, acb->dma_coherent_handle);
365 return -ENOMEM;
367 static void arcmsr_message_isr_bh_fn(struct work_struct *work)
369 struct AdapterControlBlock *acb = container_of(work, struct AdapterControlBlock, arcmsr_do_message_isr_bh);
371 switch (acb->adapter_type) {
372 case ACB_ADAPTER_TYPE_A: {
374 struct MessageUnit_A __iomem *reg = acb->pmuA;
375 char *acb_dev_map = (char *)acb->device_map;
376 uint32_t __iomem *signature = (uint32_t __iomem *) (&reg->message_rwbuffer[0]);
377 char __iomem *devicemap = (char __iomem *) (&reg->message_rwbuffer[21]);
378 int target, lun;
379 struct scsi_device *psdev;
380 char diff;
382 atomic_inc(&acb->rq_map_token);
383 if (readl(signature) == ARCMSR_SIGNATURE_GET_CONFIG) {
384 for (target = 0; target < ARCMSR_MAX_TARGETID - 1; target++) {
385 diff = (*acb_dev_map)^readb(devicemap);
386 if (diff != 0) {
387 char temp;
388 *acb_dev_map = readb(devicemap);
389 temp = *acb_dev_map;
390 for (lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) {
391 if ((temp & 0x01) == 1 && (diff & 0x01) == 1) {
392 scsi_add_device(acb->host, 0, target, lun);
393 } else if ((temp & 0x01) == 0 && (diff & 0x01) == 1) {
394 psdev = scsi_device_lookup(acb->host, 0, target, lun);
395 if (psdev != NULL) {
396 scsi_remove_device(psdev);
397 scsi_device_put(psdev);
400 temp >>= 1;
401 diff >>= 1;
404 devicemap++;
405 acb_dev_map++;
408 break;
411 case ACB_ADAPTER_TYPE_B: {
412 struct MessageUnit_B *reg = acb->pmuB;
413 char *acb_dev_map = (char *)acb->device_map;
414 uint32_t __iomem *signature = (uint32_t __iomem *)(&reg->msgcode_rwbuffer_reg[0]);
415 char __iomem *devicemap = (char __iomem *)(&reg->msgcode_rwbuffer_reg[21]);
416 int target, lun;
417 struct scsi_device *psdev;
418 char diff;
420 atomic_inc(&acb->rq_map_token);
421 if (readl(signature) == ARCMSR_SIGNATURE_GET_CONFIG) {
422 for (target = 0; target < ARCMSR_MAX_TARGETID - 1; target++) {
423 diff = (*acb_dev_map)^readb(devicemap);
424 if (diff != 0) {
425 char temp;
426 *acb_dev_map = readb(devicemap);
427 temp = *acb_dev_map;
428 for (lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) {
429 if ((temp & 0x01) == 1 && (diff & 0x01) == 1) {
430 scsi_add_device(acb->host, 0, target, lun);
431 } else if ((temp & 0x01) == 0 && (diff & 0x01) == 1) {
432 psdev = scsi_device_lookup(acb->host, 0, target, lun);
433 if (psdev != NULL) {
434 scsi_remove_device(psdev);
435 scsi_device_put(psdev);
438 temp >>= 1;
439 diff >>= 1;
442 devicemap++;
443 acb_dev_map++;
450 static int arcmsr_probe(struct pci_dev *pdev,
451 const struct pci_device_id *id)
453 struct Scsi_Host *host;
454 struct AdapterControlBlock *acb;
455 uint8_t bus, dev_fun;
456 int error;
458 error = pci_enable_device(pdev);
459 if (error)
460 goto out;
461 pci_set_master(pdev);
463 host = scsi_host_alloc(&arcmsr_scsi_host_template,
464 sizeof(struct AdapterControlBlock));
465 if (!host) {
466 error = -ENOMEM;
467 goto out_disable_device;
469 acb = (struct AdapterControlBlock *)host->hostdata;
470 memset(acb, 0, sizeof (struct AdapterControlBlock));
472 error = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
473 if (error) {
474 error = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
475 if (error) {
476 printk(KERN_WARNING
477 "scsi%d: No suitable DMA mask available\n",
478 host->host_no);
479 goto out_host_put;
482 bus = pdev->bus->number;
483 dev_fun = pdev->devfn;
484 acb->host = host;
485 acb->pdev = pdev;
486 host->max_sectors = ARCMSR_MAX_XFER_SECTORS;
487 host->max_lun = ARCMSR_MAX_TARGETLUN;
488 host->max_id = ARCMSR_MAX_TARGETID;/*16:8*/
489 host->max_cmd_len = 16; /*this is issue of 64bit LBA, over 2T byte*/
490 host->sg_tablesize = ARCMSR_MAX_SG_ENTRIES;
491 host->can_queue = ARCMSR_MAX_FREECCB_NUM; /* max simultaneous cmds */
492 host->cmd_per_lun = ARCMSR_MAX_CMD_PERLUN;
493 host->this_id = ARCMSR_SCSI_INITIATOR_ID;
494 host->unique_id = (bus << 8) | dev_fun;
495 host->irq = pdev->irq;
496 error = pci_request_regions(pdev, "arcmsr");
497 if (error) {
498 goto out_host_put;
500 arcmsr_define_adapter_type(acb);
502 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
503 ACB_F_MESSAGE_RQBUFFER_CLEARED |
504 ACB_F_MESSAGE_WQBUFFER_READED);
505 acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER;
506 INIT_LIST_HEAD(&acb->ccb_free_list);
507 INIT_WORK(&acb->arcmsr_do_message_isr_bh, arcmsr_message_isr_bh_fn);
508 error = arcmsr_alloc_ccb_pool(acb);
509 if (error)
510 goto out_release_regions;
512 arcmsr_iop_init(acb);
513 error = request_irq(pdev->irq, arcmsr_do_interrupt,
514 IRQF_SHARED, "arcmsr", acb);
515 if (error)
516 goto out_free_ccb_pool;
518 pci_set_drvdata(pdev, host);
519 if (strncmp(acb->firm_version, "V1.42", 5) >= 0)
520 host->max_sectors= ARCMSR_MAX_XFER_SECTORS_B;
522 error = scsi_add_host(host, &pdev->dev);
523 if (error)
524 goto out_free_irq;
526 error = arcmsr_alloc_sysfs_attr(acb);
527 if (error)
528 goto out_free_sysfs;
530 scsi_scan_host(host);
531 #ifdef CONFIG_SCSI_ARCMSR_AER
532 pci_enable_pcie_error_reporting(pdev);
533 #endif
534 atomic_set(&acb->rq_map_token, 16);
535 acb->fw_state = true;
536 init_timer(&acb->eternal_timer);
537 acb->eternal_timer.expires = jiffies + msecs_to_jiffies(10*HZ);
538 acb->eternal_timer.data = (unsigned long) acb;
539 acb->eternal_timer.function = &arcmsr_request_device_map;
540 add_timer(&acb->eternal_timer);
542 return 0;
543 out_free_sysfs:
544 out_free_irq:
545 free_irq(pdev->irq, acb);
546 out_free_ccb_pool:
547 arcmsr_free_ccb_pool(acb);
548 out_release_regions:
549 pci_release_regions(pdev);
550 out_host_put:
551 scsi_host_put(host);
552 out_disable_device:
553 pci_disable_device(pdev);
554 out:
555 return error;
558 static uint8_t arcmsr_hba_wait_msgint_ready(struct AdapterControlBlock *acb)
560 struct MessageUnit_A __iomem *reg = acb->pmuA;
561 uint32_t Index;
562 uint8_t Retries = 0x00;
564 do {
565 for (Index = 0; Index < 100; Index++) {
566 if (readl(&reg->outbound_intstatus) &
567 ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
568 writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT,
569 &reg->outbound_intstatus);
570 return 0x00;
572 msleep(10);
573 }/*max 1 seconds*/
575 } while (Retries++ < 20);/*max 20 sec*/
576 return 0xff;
579 static uint8_t arcmsr_hbb_wait_msgint_ready(struct AdapterControlBlock *acb)
581 struct MessageUnit_B *reg = acb->pmuB;
582 uint32_t Index;
583 uint8_t Retries = 0x00;
585 do {
586 for (Index = 0; Index < 100; Index++) {
587 if (readl(reg->iop2drv_doorbell_reg)
588 & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
589 writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN
590 , reg->iop2drv_doorbell_reg);
591 writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell_reg);
592 return 0x00;
594 msleep(10);
595 }/*max 1 seconds*/
597 } while (Retries++ < 20);/*max 20 sec*/
598 return 0xff;
601 static uint8_t arcmsr_abort_hba_allcmd(struct AdapterControlBlock *acb)
603 struct MessageUnit_A __iomem *reg = acb->pmuA;
605 writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, &reg->inbound_msgaddr0);
606 if (arcmsr_hba_wait_msgint_ready(acb)) {
607 printk(KERN_NOTICE
608 "arcmsr%d: wait 'abort all outstanding command' timeout \n"
609 , acb->host->host_no);
610 return 0xff;
612 return 0x00;
615 static uint8_t arcmsr_abort_hbb_allcmd(struct AdapterControlBlock *acb)
617 struct MessageUnit_B *reg = acb->pmuB;
619 writel(ARCMSR_MESSAGE_ABORT_CMD, reg->drv2iop_doorbell_reg);
620 if (arcmsr_hbb_wait_msgint_ready(acb)) {
621 printk(KERN_NOTICE
622 "arcmsr%d: wait 'abort all outstanding command' timeout \n"
623 , acb->host->host_no);
624 return 0xff;
626 return 0x00;
629 static uint8_t arcmsr_abort_allcmd(struct AdapterControlBlock *acb)
631 uint8_t rtnval = 0;
632 switch (acb->adapter_type) {
633 case ACB_ADAPTER_TYPE_A: {
634 rtnval = arcmsr_abort_hba_allcmd(acb);
636 break;
638 case ACB_ADAPTER_TYPE_B: {
639 rtnval = arcmsr_abort_hbb_allcmd(acb);
642 return rtnval;
645 static void arcmsr_pci_unmap_dma(struct CommandControlBlock *ccb)
647 struct scsi_cmnd *pcmd = ccb->pcmd;
649 scsi_dma_unmap(pcmd);
652 static void arcmsr_ccb_complete(struct CommandControlBlock *ccb, int stand_flag)
654 struct AdapterControlBlock *acb = ccb->acb;
655 struct scsi_cmnd *pcmd = ccb->pcmd;
657 arcmsr_pci_unmap_dma(ccb);
658 if (stand_flag == 1)
659 atomic_dec(&acb->ccboutstandingcount);
660 ccb->startdone = ARCMSR_CCB_DONE;
661 ccb->ccb_flags = 0;
662 list_add_tail(&ccb->list, &acb->ccb_free_list);
663 pcmd->scsi_done(pcmd);
666 static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb)
668 struct MessageUnit_A __iomem *reg = acb->pmuA;
669 int retry_count = 30;
671 writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, &reg->inbound_msgaddr0);
672 do {
673 if (!arcmsr_hba_wait_msgint_ready(acb))
674 break;
675 else {
676 retry_count--;
677 printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \
678 timeout, retry count down = %d \n", acb->host->host_no, retry_count);
680 } while (retry_count != 0);
683 static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb)
685 struct MessageUnit_B *reg = acb->pmuB;
686 int retry_count = 30;
688 writel(ARCMSR_MESSAGE_FLUSH_CACHE, reg->drv2iop_doorbell_reg);
689 do {
690 if (!arcmsr_hbb_wait_msgint_ready(acb))
691 break;
692 else {
693 retry_count--;
694 printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \
695 timeout,retry count down = %d \n", acb->host->host_no, retry_count);
697 } while (retry_count != 0);
700 static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb)
702 switch (acb->adapter_type) {
704 case ACB_ADAPTER_TYPE_A: {
705 arcmsr_flush_hba_cache(acb);
707 break;
709 case ACB_ADAPTER_TYPE_B: {
710 arcmsr_flush_hbb_cache(acb);
715 static void arcmsr_report_sense_info(struct CommandControlBlock *ccb)
718 struct scsi_cmnd *pcmd = ccb->pcmd;
719 struct SENSE_DATA *sensebuffer = (struct SENSE_DATA *)pcmd->sense_buffer;
721 pcmd->result = DID_OK << 16;
722 if (sensebuffer) {
723 int sense_data_length =
724 sizeof(struct SENSE_DATA) < SCSI_SENSE_BUFFERSIZE
725 ? sizeof(struct SENSE_DATA) : SCSI_SENSE_BUFFERSIZE;
726 memset(sensebuffer, 0, SCSI_SENSE_BUFFERSIZE);
727 memcpy(sensebuffer, ccb->arcmsr_cdb.SenseData, sense_data_length);
728 sensebuffer->ErrorCode = SCSI_SENSE_CURRENT_ERRORS;
729 sensebuffer->Valid = 1;
733 static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb)
735 u32 orig_mask = 0;
736 switch (acb->adapter_type) {
738 case ACB_ADAPTER_TYPE_A : {
739 struct MessageUnit_A __iomem *reg = acb->pmuA;
740 orig_mask = readl(&reg->outbound_intmask);
741 writel(orig_mask|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE, \
742 &reg->outbound_intmask);
744 break;
746 case ACB_ADAPTER_TYPE_B : {
747 struct MessageUnit_B *reg = acb->pmuB;
748 orig_mask = readl(reg->iop2drv_doorbell_mask_reg);
749 writel(0, reg->iop2drv_doorbell_mask_reg);
751 break;
753 return orig_mask;
756 static void arcmsr_report_ccb_state(struct AdapterControlBlock *acb, \
757 struct CommandControlBlock *ccb, uint32_t flag_ccb)
760 uint8_t id, lun;
761 id = ccb->pcmd->device->id;
762 lun = ccb->pcmd->device->lun;
763 if (!(flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR)) {
764 if (acb->devstate[id][lun] == ARECA_RAID_GONE)
765 acb->devstate[id][lun] = ARECA_RAID_GOOD;
766 ccb->pcmd->result = DID_OK << 16;
767 arcmsr_ccb_complete(ccb, 1);
768 } else {
769 switch (ccb->arcmsr_cdb.DeviceStatus) {
770 case ARCMSR_DEV_SELECT_TIMEOUT: {
771 acb->devstate[id][lun] = ARECA_RAID_GONE;
772 ccb->pcmd->result = DID_NO_CONNECT << 16;
773 arcmsr_ccb_complete(ccb, 1);
775 break;
777 case ARCMSR_DEV_ABORTED:
779 case ARCMSR_DEV_INIT_FAIL: {
780 acb->devstate[id][lun] = ARECA_RAID_GONE;
781 ccb->pcmd->result = DID_BAD_TARGET << 16;
782 arcmsr_ccb_complete(ccb, 1);
784 break;
786 case ARCMSR_DEV_CHECK_CONDITION: {
787 acb->devstate[id][lun] = ARECA_RAID_GOOD;
788 arcmsr_report_sense_info(ccb);
789 arcmsr_ccb_complete(ccb, 1);
791 break;
793 default:
794 printk(KERN_NOTICE
795 "arcmsr%d: scsi id = %d lun = %d"
796 " isr get command error done, "
797 "but got unknown DeviceStatus = 0x%x \n"
798 , acb->host->host_no
799 , id
800 , lun
801 , ccb->arcmsr_cdb.DeviceStatus);
802 acb->devstate[id][lun] = ARECA_RAID_GONE;
803 ccb->pcmd->result = DID_NO_CONNECT << 16;
804 arcmsr_ccb_complete(ccb, 1);
805 break;
810 static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, uint32_t flag_ccb)
813 struct CommandControlBlock *ccb;
815 ccb = (struct CommandControlBlock *)(acb->vir2phy_offset + (flag_ccb << 5));
816 if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
817 if (ccb->startdone == ARCMSR_CCB_ABORTED) {
818 struct scsi_cmnd *abortcmd = ccb->pcmd;
819 if (abortcmd) {
820 abortcmd->result |= DID_ABORT << 16;
821 arcmsr_ccb_complete(ccb, 1);
822 printk(KERN_NOTICE "arcmsr%d: ccb ='0x%p' \
823 isr got aborted command \n", acb->host->host_no, ccb);
826 printk(KERN_NOTICE "arcmsr%d: isr get an illegal ccb command \
827 done acb = '0x%p'"
828 "ccb = '0x%p' ccbacb = '0x%p' startdone = 0x%x"
829 " ccboutstandingcount = %d \n"
830 , acb->host->host_no
831 , acb
832 , ccb
833 , ccb->acb
834 , ccb->startdone
835 , atomic_read(&acb->ccboutstandingcount));
837 else
838 arcmsr_report_ccb_state(acb, ccb, flag_ccb);
841 static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
843 int i = 0;
844 uint32_t flag_ccb;
846 switch (acb->adapter_type) {
848 case ACB_ADAPTER_TYPE_A: {
849 struct MessageUnit_A __iomem *reg = acb->pmuA;
850 uint32_t outbound_intstatus;
851 outbound_intstatus = readl(&reg->outbound_intstatus) &
852 acb->outbound_int_enable;
853 /*clear and abort all outbound posted Q*/
854 writel(outbound_intstatus, &reg->outbound_intstatus);/*clear interrupt*/
855 while (((flag_ccb = readl(&reg->outbound_queueport)) != 0xFFFFFFFF)
856 && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
857 arcmsr_drain_donequeue(acb, flag_ccb);
860 break;
862 case ACB_ADAPTER_TYPE_B: {
863 struct MessageUnit_B *reg = acb->pmuB;
864 /*clear all outbound posted Q*/
865 for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) {
866 if ((flag_ccb = readl(&reg->done_qbuffer[i])) != 0) {
867 writel(0, &reg->done_qbuffer[i]);
868 arcmsr_drain_donequeue(acb, flag_ccb);
870 writel(0, &reg->post_qbuffer[i]);
872 reg->doneq_index = 0;
873 reg->postq_index = 0;
875 break;
878 static void arcmsr_remove(struct pci_dev *pdev)
880 struct Scsi_Host *host = pci_get_drvdata(pdev);
881 struct AdapterControlBlock *acb =
882 (struct AdapterControlBlock *) host->hostdata;
883 int poll_count = 0;
884 arcmsr_free_sysfs_attr(acb);
885 scsi_remove_host(host);
886 flush_scheduled_work();
887 del_timer_sync(&acb->eternal_timer);
888 arcmsr_disable_outbound_ints(acb);
889 arcmsr_stop_adapter_bgrb(acb);
890 arcmsr_flush_adapter_cache(acb);
891 acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
892 acb->acb_flags &= ~ACB_F_IOP_INITED;
894 for (poll_count = 0; poll_count < ARCMSR_MAX_OUTSTANDING_CMD; poll_count++) {
895 if (!atomic_read(&acb->ccboutstandingcount))
896 break;
897 arcmsr_interrupt(acb);/* FIXME: need spinlock */
898 msleep(25);
901 if (atomic_read(&acb->ccboutstandingcount)) {
902 int i;
904 arcmsr_abort_allcmd(acb);
905 arcmsr_done4abort_postqueue(acb);
906 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
907 struct CommandControlBlock *ccb = acb->pccb_pool[i];
908 if (ccb->startdone == ARCMSR_CCB_START) {
909 ccb->startdone = ARCMSR_CCB_ABORTED;
910 ccb->pcmd->result = DID_ABORT << 16;
911 arcmsr_ccb_complete(ccb, 1);
916 free_irq(pdev->irq, acb);
917 arcmsr_free_ccb_pool(acb);
918 pci_release_regions(pdev);
920 scsi_host_put(host);
922 pci_disable_device(pdev);
923 pci_set_drvdata(pdev, NULL);
926 static void arcmsr_shutdown(struct pci_dev *pdev)
928 struct Scsi_Host *host = pci_get_drvdata(pdev);
929 struct AdapterControlBlock *acb =
930 (struct AdapterControlBlock *)host->hostdata;
931 del_timer_sync(&acb->eternal_timer);
932 arcmsr_disable_outbound_ints(acb);
933 flush_scheduled_work();
934 arcmsr_stop_adapter_bgrb(acb);
935 arcmsr_flush_adapter_cache(acb);
938 static int arcmsr_module_init(void)
940 int error = 0;
942 error = pci_register_driver(&arcmsr_pci_driver);
943 return error;
946 static void arcmsr_module_exit(void)
948 pci_unregister_driver(&arcmsr_pci_driver);
950 module_init(arcmsr_module_init);
951 module_exit(arcmsr_module_exit);
953 static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb,
954 u32 intmask_org)
956 u32 mask;
958 switch (acb->adapter_type) {
960 case ACB_ADAPTER_TYPE_A : {
961 struct MessageUnit_A __iomem *reg = acb->pmuA;
962 mask = intmask_org & ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE |
963 ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE|
964 ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE);
965 writel(mask, &reg->outbound_intmask);
966 acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff;
968 break;
970 case ACB_ADAPTER_TYPE_B : {
971 struct MessageUnit_B *reg = acb->pmuB;
972 mask = intmask_org | (ARCMSR_IOP2DRV_DATA_WRITE_OK |
973 ARCMSR_IOP2DRV_DATA_READ_OK |
974 ARCMSR_IOP2DRV_CDB_DONE |
975 ARCMSR_IOP2DRV_MESSAGE_CMD_DONE);
976 writel(mask, reg->iop2drv_doorbell_mask_reg);
977 acb->outbound_int_enable = (intmask_org | mask) & 0x0000000f;
982 static int arcmsr_build_ccb(struct AdapterControlBlock *acb,
983 struct CommandControlBlock *ccb, struct scsi_cmnd *pcmd)
985 struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
986 int8_t *psge = (int8_t *)&arcmsr_cdb->u;
987 __le32 address_lo, address_hi;
988 int arccdbsize = 0x30;
989 int nseg;
991 ccb->pcmd = pcmd;
992 memset(arcmsr_cdb, 0, sizeof(struct ARCMSR_CDB));
993 arcmsr_cdb->Bus = 0;
994 arcmsr_cdb->TargetID = pcmd->device->id;
995 arcmsr_cdb->LUN = pcmd->device->lun;
996 arcmsr_cdb->Function = 1;
997 arcmsr_cdb->CdbLength = (uint8_t)pcmd->cmd_len;
998 arcmsr_cdb->Context = (unsigned long)arcmsr_cdb;
999 memcpy(arcmsr_cdb->Cdb, pcmd->cmnd, pcmd->cmd_len);
1001 nseg = scsi_dma_map(pcmd);
1002 if (nseg > ARCMSR_MAX_SG_ENTRIES)
1003 return FAILED;
1004 BUG_ON(nseg < 0);
1006 if (nseg) {
1007 __le32 length;
1008 int i, cdb_sgcount = 0;
1009 struct scatterlist *sg;
1011 /* map stor port SG list to our iop SG List. */
1012 scsi_for_each_sg(pcmd, sg, nseg, i) {
1013 /* Get the physical address of the current data pointer */
1014 length = cpu_to_le32(sg_dma_len(sg));
1015 address_lo = cpu_to_le32(dma_addr_lo32(sg_dma_address(sg)));
1016 address_hi = cpu_to_le32(dma_addr_hi32(sg_dma_address(sg)));
1017 if (address_hi == 0) {
1018 struct SG32ENTRY *pdma_sg = (struct SG32ENTRY *)psge;
1020 pdma_sg->address = address_lo;
1021 pdma_sg->length = length;
1022 psge += sizeof (struct SG32ENTRY);
1023 arccdbsize += sizeof (struct SG32ENTRY);
1024 } else {
1025 struct SG64ENTRY *pdma_sg = (struct SG64ENTRY *)psge;
1027 pdma_sg->addresshigh = address_hi;
1028 pdma_sg->address = address_lo;
1029 pdma_sg->length = length|cpu_to_le32(IS_SG64_ADDR);
1030 psge += sizeof (struct SG64ENTRY);
1031 arccdbsize += sizeof (struct SG64ENTRY);
1033 cdb_sgcount++;
1035 arcmsr_cdb->sgcount = (uint8_t)cdb_sgcount;
1036 arcmsr_cdb->DataLength = scsi_bufflen(pcmd);
1037 if ( arccdbsize > 256)
1038 arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_SGL_BSIZE;
1040 if (pcmd->sc_data_direction == DMA_TO_DEVICE ) {
1041 arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_WRITE;
1042 ccb->ccb_flags |= CCB_FLAG_WRITE;
1044 return SUCCESS;
1047 static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandControlBlock *ccb)
1049 uint32_t cdb_shifted_phyaddr = ccb->cdb_shifted_phyaddr;
1050 struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
1051 atomic_inc(&acb->ccboutstandingcount);
1052 ccb->startdone = ARCMSR_CCB_START;
1054 switch (acb->adapter_type) {
1055 case ACB_ADAPTER_TYPE_A: {
1056 struct MessageUnit_A __iomem *reg = acb->pmuA;
1058 if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE)
1059 writel(cdb_shifted_phyaddr | ARCMSR_CCBPOST_FLAG_SGL_BSIZE,
1060 &reg->inbound_queueport);
1061 else {
1062 writel(cdb_shifted_phyaddr, &reg->inbound_queueport);
1065 break;
1067 case ACB_ADAPTER_TYPE_B: {
1068 struct MessageUnit_B *reg = acb->pmuB;
1069 uint32_t ending_index, index = reg->postq_index;
1071 ending_index = ((index + 1) % ARCMSR_MAX_HBB_POSTQUEUE);
1072 writel(0, &reg->post_qbuffer[ending_index]);
1073 if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) {
1074 writel(cdb_shifted_phyaddr | ARCMSR_CCBPOST_FLAG_SGL_BSIZE,\
1075 &reg->post_qbuffer[index]);
1077 else {
1078 writel(cdb_shifted_phyaddr, &reg->post_qbuffer[index]);
1080 index++;
1081 index %= ARCMSR_MAX_HBB_POSTQUEUE;/*if last index number set it to 0 */
1082 reg->postq_index = index;
1083 writel(ARCMSR_DRV2IOP_CDB_POSTED, reg->drv2iop_doorbell_reg);
1085 break;
1089 static void arcmsr_stop_hba_bgrb(struct AdapterControlBlock *acb)
1091 struct MessageUnit_A __iomem *reg = acb->pmuA;
1092 acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
1093 writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, &reg->inbound_msgaddr0);
1095 if (arcmsr_hba_wait_msgint_ready(acb)) {
1096 printk(KERN_NOTICE
1097 "arcmsr%d: wait 'stop adapter background rebulid' timeout \n"
1098 , acb->host->host_no);
1102 static void arcmsr_stop_hbb_bgrb(struct AdapterControlBlock *acb)
1104 struct MessageUnit_B *reg = acb->pmuB;
1105 acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
1106 writel(ARCMSR_MESSAGE_STOP_BGRB, reg->drv2iop_doorbell_reg);
1108 if (arcmsr_hbb_wait_msgint_ready(acb)) {
1109 printk(KERN_NOTICE
1110 "arcmsr%d: wait 'stop adapter background rebulid' timeout \n"
1111 , acb->host->host_no);
1115 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb)
1117 switch (acb->adapter_type) {
1118 case ACB_ADAPTER_TYPE_A: {
1119 arcmsr_stop_hba_bgrb(acb);
1121 break;
1123 case ACB_ADAPTER_TYPE_B: {
1124 arcmsr_stop_hbb_bgrb(acb);
1126 break;
1130 static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb)
1132 switch (acb->adapter_type) {
1133 case ACB_ADAPTER_TYPE_A: {
1134 iounmap(acb->pmuA);
1135 dma_free_coherent(&acb->pdev->dev,
1136 ARCMSR_MAX_FREECCB_NUM * sizeof (struct CommandControlBlock) + 0x20,
1137 acb->dma_coherent,
1138 acb->dma_coherent_handle);
1139 break;
1141 case ACB_ADAPTER_TYPE_B: {
1142 struct MessageUnit_B *reg = acb->pmuB;
1143 iounmap((u8 *)reg->drv2iop_doorbell_reg - ARCMSR_DRV2IOP_DOORBELL);
1144 iounmap((u8 *)reg->ioctl_wbuffer_reg - ARCMSR_IOCTL_WBUFFER);
1145 dma_free_coherent(&acb->pdev->dev,
1146 (ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock) + 0x20 +
1147 sizeof(struct MessageUnit_B)), acb->dma_coherent, acb->dma_coherent_handle);
1153 void arcmsr_iop_message_read(struct AdapterControlBlock *acb)
1155 switch (acb->adapter_type) {
1156 case ACB_ADAPTER_TYPE_A: {
1157 struct MessageUnit_A __iomem *reg = acb->pmuA;
1158 writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, &reg->inbound_doorbell);
1160 break;
1162 case ACB_ADAPTER_TYPE_B: {
1163 struct MessageUnit_B *reg = acb->pmuB;
1164 writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell_reg);
1166 break;
1170 static void arcmsr_iop_message_wrote(struct AdapterControlBlock *acb)
1172 switch (acb->adapter_type) {
1173 case ACB_ADAPTER_TYPE_A: {
1174 struct MessageUnit_A __iomem *reg = acb->pmuA;
1176 ** push inbound doorbell tell iop, driver data write ok
1177 ** and wait reply on next hwinterrupt for next Qbuffer post
1179 writel(ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK, &reg->inbound_doorbell);
1181 break;
1183 case ACB_ADAPTER_TYPE_B: {
1184 struct MessageUnit_B *reg = acb->pmuB;
1186 ** push inbound doorbell tell iop, driver data write ok
1187 ** and wait reply on next hwinterrupt for next Qbuffer post
1189 writel(ARCMSR_DRV2IOP_DATA_WRITE_OK, reg->drv2iop_doorbell_reg);
1191 break;
1195 struct QBUFFER __iomem *arcmsr_get_iop_rqbuffer(struct AdapterControlBlock *acb)
1197 struct QBUFFER __iomem *qbuffer = NULL;
1199 switch (acb->adapter_type) {
1201 case ACB_ADAPTER_TYPE_A: {
1202 struct MessageUnit_A __iomem *reg = acb->pmuA;
1203 qbuffer = (struct QBUFFER __iomem *)&reg->message_rbuffer;
1205 break;
1207 case ACB_ADAPTER_TYPE_B: {
1208 struct MessageUnit_B *reg = acb->pmuB;
1209 qbuffer = (struct QBUFFER __iomem *)reg->ioctl_rbuffer_reg;
1211 break;
1213 return qbuffer;
1216 static struct QBUFFER __iomem *arcmsr_get_iop_wqbuffer(struct AdapterControlBlock *acb)
1218 struct QBUFFER __iomem *pqbuffer = NULL;
1220 switch (acb->adapter_type) {
1222 case ACB_ADAPTER_TYPE_A: {
1223 struct MessageUnit_A __iomem *reg = acb->pmuA;
1224 pqbuffer = (struct QBUFFER __iomem *) &reg->message_wbuffer;
1226 break;
1228 case ACB_ADAPTER_TYPE_B: {
1229 struct MessageUnit_B *reg = acb->pmuB;
1230 pqbuffer = (struct QBUFFER __iomem *)reg->ioctl_wbuffer_reg;
1232 break;
1234 return pqbuffer;
1237 static void arcmsr_iop2drv_data_wrote_handle(struct AdapterControlBlock *acb)
1239 struct QBUFFER __iomem *prbuffer;
1240 struct QBUFFER *pQbuffer;
1241 uint8_t __iomem *iop_data;
1242 int32_t my_empty_len, iop_len, rqbuf_firstindex, rqbuf_lastindex;
1244 rqbuf_lastindex = acb->rqbuf_lastindex;
1245 rqbuf_firstindex = acb->rqbuf_firstindex;
1246 prbuffer = arcmsr_get_iop_rqbuffer(acb);
1247 iop_data = (uint8_t __iomem *)prbuffer->data;
1248 iop_len = prbuffer->data_len;
1249 my_empty_len = (rqbuf_firstindex - rqbuf_lastindex -1)&(ARCMSR_MAX_QBUFFER -1);
1251 if (my_empty_len >= iop_len)
1253 while (iop_len > 0) {
1254 pQbuffer = (struct QBUFFER *)&acb->rqbuffer[rqbuf_lastindex];
1255 memcpy(pQbuffer, iop_data,1);
1256 rqbuf_lastindex++;
1257 rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
1258 iop_data++;
1259 iop_len--;
1261 acb->rqbuf_lastindex = rqbuf_lastindex;
1262 arcmsr_iop_message_read(acb);
1265 else {
1266 acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
1270 static void arcmsr_iop2drv_data_read_handle(struct AdapterControlBlock *acb)
1272 acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READED;
1273 if (acb->wqbuf_firstindex != acb->wqbuf_lastindex) {
1274 uint8_t *pQbuffer;
1275 struct QBUFFER __iomem *pwbuffer;
1276 uint8_t __iomem *iop_data;
1277 int32_t allxfer_len = 0;
1279 acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED);
1280 pwbuffer = arcmsr_get_iop_wqbuffer(acb);
1281 iop_data = (uint8_t __iomem *)pwbuffer->data;
1283 while ((acb->wqbuf_firstindex != acb->wqbuf_lastindex) && \
1284 (allxfer_len < 124)) {
1285 pQbuffer = &acb->wqbuffer[acb->wqbuf_firstindex];
1286 memcpy(iop_data, pQbuffer, 1);
1287 acb->wqbuf_firstindex++;
1288 acb->wqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
1289 iop_data++;
1290 allxfer_len++;
1292 pwbuffer->data_len = allxfer_len;
1294 arcmsr_iop_message_wrote(acb);
1297 if (acb->wqbuf_firstindex == acb->wqbuf_lastindex) {
1298 acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED;
1302 static void arcmsr_hba_doorbell_isr(struct AdapterControlBlock *acb)
1304 uint32_t outbound_doorbell;
1305 struct MessageUnit_A __iomem *reg = acb->pmuA;
1307 outbound_doorbell = readl(&reg->outbound_doorbell);
1308 writel(outbound_doorbell, &reg->outbound_doorbell);
1309 if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK) {
1310 arcmsr_iop2drv_data_wrote_handle(acb);
1313 if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK) {
1314 arcmsr_iop2drv_data_read_handle(acb);
1318 static void arcmsr_hba_postqueue_isr(struct AdapterControlBlock *acb)
1320 uint32_t flag_ccb;
1321 struct MessageUnit_A __iomem *reg = acb->pmuA;
1323 while ((flag_ccb = readl(&reg->outbound_queueport)) != 0xFFFFFFFF) {
1324 arcmsr_drain_donequeue(acb, flag_ccb);
1328 static void arcmsr_hbb_postqueue_isr(struct AdapterControlBlock *acb)
1330 uint32_t index;
1331 uint32_t flag_ccb;
1332 struct MessageUnit_B *reg = acb->pmuB;
1334 index = reg->doneq_index;
1336 while ((flag_ccb = readl(&reg->done_qbuffer[index])) != 0) {
1337 writel(0, &reg->done_qbuffer[index]);
1338 arcmsr_drain_donequeue(acb, flag_ccb);
1339 index++;
1340 index %= ARCMSR_MAX_HBB_POSTQUEUE;
1341 reg->doneq_index = index;
1345 **********************************************************************************
1346 ** Handle a message interrupt
1348 ** The only message interrupt we expect is in response to a query for the current adapter config.
1349 ** We want this in order to compare the drivemap so that we can detect newly-attached drives.
1350 **********************************************************************************
1352 static void arcmsr_hba_message_isr(struct AdapterControlBlock *acb)
1354 struct MessageUnit_A *reg = acb->pmuA;
1356 /*clear interrupt and message state*/
1357 writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT, &reg->outbound_intstatus);
1358 schedule_work(&acb->arcmsr_do_message_isr_bh);
1360 static void arcmsr_hbb_message_isr(struct AdapterControlBlock *acb)
1362 struct MessageUnit_B *reg = acb->pmuB;
1364 /*clear interrupt and message state*/
1365 writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN, reg->iop2drv_doorbell_reg);
1366 schedule_work(&acb->arcmsr_do_message_isr_bh);
1368 static int arcmsr_handle_hba_isr(struct AdapterControlBlock *acb)
1370 uint32_t outbound_intstatus;
1371 struct MessageUnit_A __iomem *reg = acb->pmuA;
1373 outbound_intstatus = readl(&reg->outbound_intstatus) &
1374 acb->outbound_int_enable;
1375 if (!(outbound_intstatus & ARCMSR_MU_OUTBOUND_HANDLE_INT)) {
1376 return 1;
1378 writel(outbound_intstatus, &reg->outbound_intstatus);
1379 if (outbound_intstatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT) {
1380 arcmsr_hba_doorbell_isr(acb);
1382 if (outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT) {
1383 arcmsr_hba_postqueue_isr(acb);
1385 if (outbound_intstatus & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
1386 /* messenger of "driver to iop commands" */
1387 arcmsr_hba_message_isr(acb);
1389 return 0;
1392 static int arcmsr_handle_hbb_isr(struct AdapterControlBlock *acb)
1394 uint32_t outbound_doorbell;
1395 struct MessageUnit_B *reg = acb->pmuB;
1397 outbound_doorbell = readl(reg->iop2drv_doorbell_reg) &
1398 acb->outbound_int_enable;
1399 if (!outbound_doorbell)
1400 return 1;
1402 writel(~outbound_doorbell, reg->iop2drv_doorbell_reg);
1403 /*in case the last action of doorbell interrupt clearance is cached,
1404 this action can push HW to write down the clear bit*/
1405 readl(reg->iop2drv_doorbell_reg);
1406 writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell_reg);
1407 if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) {
1408 arcmsr_iop2drv_data_wrote_handle(acb);
1410 if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_READ_OK) {
1411 arcmsr_iop2drv_data_read_handle(acb);
1413 if (outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE) {
1414 arcmsr_hbb_postqueue_isr(acb);
1416 if (outbound_doorbell & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
1417 /* messenger of "driver to iop commands" */
1418 arcmsr_hbb_message_isr(acb);
1421 return 0;
1424 static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb)
1426 switch (acb->adapter_type) {
1427 case ACB_ADAPTER_TYPE_A: {
1428 if (arcmsr_handle_hba_isr(acb)) {
1429 return IRQ_NONE;
1432 break;
1434 case ACB_ADAPTER_TYPE_B: {
1435 if (arcmsr_handle_hbb_isr(acb)) {
1436 return IRQ_NONE;
1439 break;
1441 return IRQ_HANDLED;
1444 static void arcmsr_iop_parking(struct AdapterControlBlock *acb)
1446 if (acb) {
1447 /* stop adapter background rebuild */
1448 if (acb->acb_flags & ACB_F_MSG_START_BGRB) {
1449 uint32_t intmask_org;
1450 acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
1451 intmask_org = arcmsr_disable_outbound_ints(acb);
1452 arcmsr_stop_adapter_bgrb(acb);
1453 arcmsr_flush_adapter_cache(acb);
1454 arcmsr_enable_outbound_ints(acb, intmask_org);
1459 void arcmsr_post_ioctldata2iop(struct AdapterControlBlock *acb)
1461 int32_t wqbuf_firstindex, wqbuf_lastindex;
1462 uint8_t *pQbuffer;
1463 struct QBUFFER __iomem *pwbuffer;
1464 uint8_t __iomem *iop_data;
1465 int32_t allxfer_len = 0;
1467 pwbuffer = arcmsr_get_iop_wqbuffer(acb);
1468 iop_data = (uint8_t __iomem *)pwbuffer->data;
1469 if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READED) {
1470 acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED);
1471 wqbuf_firstindex = acb->wqbuf_firstindex;
1472 wqbuf_lastindex = acb->wqbuf_lastindex;
1473 while ((wqbuf_firstindex != wqbuf_lastindex) && (allxfer_len < 124)) {
1474 pQbuffer = &acb->wqbuffer[wqbuf_firstindex];
1475 memcpy(iop_data, pQbuffer, 1);
1476 wqbuf_firstindex++;
1477 wqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
1478 iop_data++;
1479 allxfer_len++;
1481 acb->wqbuf_firstindex = wqbuf_firstindex;
1482 pwbuffer->data_len = allxfer_len;
1483 arcmsr_iop_message_wrote(acb);
1487 static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
1488 struct scsi_cmnd *cmd)
1490 struct CMD_MESSAGE_FIELD *pcmdmessagefld;
1491 int retvalue = 0, transfer_len = 0;
1492 char *buffer;
1493 struct scatterlist *sg;
1494 uint32_t controlcode = (uint32_t ) cmd->cmnd[5] << 24 |
1495 (uint32_t ) cmd->cmnd[6] << 16 |
1496 (uint32_t ) cmd->cmnd[7] << 8 |
1497 (uint32_t ) cmd->cmnd[8];
1498 /* 4 bytes: Areca io control code */
1500 sg = scsi_sglist(cmd);
1501 buffer = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset;
1502 if (scsi_sg_count(cmd) > 1) {
1503 retvalue = ARCMSR_MESSAGE_FAIL;
1504 goto message_out;
1506 transfer_len += sg->length;
1508 if (transfer_len > sizeof(struct CMD_MESSAGE_FIELD)) {
1509 retvalue = ARCMSR_MESSAGE_FAIL;
1510 goto message_out;
1512 pcmdmessagefld = (struct CMD_MESSAGE_FIELD *) buffer;
1513 switch(controlcode) {
1515 case ARCMSR_MESSAGE_READ_RQBUFFER: {
1516 unsigned char *ver_addr;
1517 uint8_t *pQbuffer, *ptmpQbuffer;
1518 int32_t allxfer_len = 0;
1520 ver_addr = kmalloc(1032, GFP_ATOMIC);
1521 if (!ver_addr) {
1522 retvalue = ARCMSR_MESSAGE_FAIL;
1523 goto message_out;
1526 if (!acb->fw_state) {
1527 pcmdmessagefld->cmdmessage.ReturnCode =
1528 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
1529 goto message_out;
1532 ptmpQbuffer = ver_addr;
1533 while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex)
1534 && (allxfer_len < 1031)) {
1535 pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex];
1536 memcpy(ptmpQbuffer, pQbuffer, 1);
1537 acb->rqbuf_firstindex++;
1538 acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
1539 ptmpQbuffer++;
1540 allxfer_len++;
1542 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
1544 struct QBUFFER __iomem *prbuffer;
1545 uint8_t __iomem *iop_data;
1546 int32_t iop_len;
1548 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
1549 prbuffer = arcmsr_get_iop_rqbuffer(acb);
1550 iop_data = prbuffer->data;
1551 iop_len = readl(&prbuffer->data_len);
1552 while (iop_len > 0) {
1553 acb->rqbuffer[acb->rqbuf_lastindex] = readb(iop_data);
1554 acb->rqbuf_lastindex++;
1555 acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
1556 iop_data++;
1557 iop_len--;
1559 arcmsr_iop_message_read(acb);
1561 memcpy(pcmdmessagefld->messagedatabuffer, ver_addr, allxfer_len);
1562 pcmdmessagefld->cmdmessage.Length = allxfer_len;
1563 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
1564 kfree(ver_addr);
1566 break;
1568 case ARCMSR_MESSAGE_WRITE_WQBUFFER: {
1569 unsigned char *ver_addr;
1570 int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex;
1571 uint8_t *pQbuffer, *ptmpuserbuffer;
1573 ver_addr = kmalloc(1032, GFP_ATOMIC);
1574 if (!ver_addr) {
1575 retvalue = ARCMSR_MESSAGE_FAIL;
1576 goto message_out;
1578 if (!acb->fw_state) {
1579 pcmdmessagefld->cmdmessage.ReturnCode =
1580 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
1581 goto message_out;
1584 ptmpuserbuffer = ver_addr;
1585 user_len = pcmdmessagefld->cmdmessage.Length;
1586 memcpy(ptmpuserbuffer, pcmdmessagefld->messagedatabuffer, user_len);
1587 wqbuf_lastindex = acb->wqbuf_lastindex;
1588 wqbuf_firstindex = acb->wqbuf_firstindex;
1589 if (wqbuf_lastindex != wqbuf_firstindex) {
1590 struct SENSE_DATA *sensebuffer =
1591 (struct SENSE_DATA *)cmd->sense_buffer;
1592 arcmsr_post_ioctldata2iop(acb);
1593 /* has error report sensedata */
1594 sensebuffer->ErrorCode = 0x70;
1595 sensebuffer->SenseKey = ILLEGAL_REQUEST;
1596 sensebuffer->AdditionalSenseLength = 0x0A;
1597 sensebuffer->AdditionalSenseCode = 0x20;
1598 sensebuffer->Valid = 1;
1599 retvalue = ARCMSR_MESSAGE_FAIL;
1600 } else {
1601 my_empty_len = (wqbuf_firstindex-wqbuf_lastindex - 1)
1602 &(ARCMSR_MAX_QBUFFER - 1);
1603 if (my_empty_len >= user_len) {
1604 while (user_len > 0) {
1605 pQbuffer =
1606 &acb->wqbuffer[acb->wqbuf_lastindex];
1607 memcpy(pQbuffer, ptmpuserbuffer, 1);
1608 acb->wqbuf_lastindex++;
1609 acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
1610 ptmpuserbuffer++;
1611 user_len--;
1613 if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) {
1614 acb->acb_flags &=
1615 ~ACB_F_MESSAGE_WQBUFFER_CLEARED;
1616 arcmsr_post_ioctldata2iop(acb);
1618 } else {
1619 /* has error report sensedata */
1620 struct SENSE_DATA *sensebuffer =
1621 (struct SENSE_DATA *)cmd->sense_buffer;
1622 sensebuffer->ErrorCode = 0x70;
1623 sensebuffer->SenseKey = ILLEGAL_REQUEST;
1624 sensebuffer->AdditionalSenseLength = 0x0A;
1625 sensebuffer->AdditionalSenseCode = 0x20;
1626 sensebuffer->Valid = 1;
1627 retvalue = ARCMSR_MESSAGE_FAIL;
1630 kfree(ver_addr);
1632 break;
1634 case ARCMSR_MESSAGE_CLEAR_RQBUFFER: {
1635 uint8_t *pQbuffer = acb->rqbuffer;
1636 if (!acb->fw_state) {
1637 pcmdmessagefld->cmdmessage.ReturnCode =
1638 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
1639 goto message_out;
1642 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
1643 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
1644 arcmsr_iop_message_read(acb);
1646 acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
1647 acb->rqbuf_firstindex = 0;
1648 acb->rqbuf_lastindex = 0;
1649 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
1650 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
1652 break;
1654 case ARCMSR_MESSAGE_CLEAR_WQBUFFER: {
1655 uint8_t *pQbuffer = acb->wqbuffer;
1656 if (!acb->fw_state) {
1657 pcmdmessagefld->cmdmessage.ReturnCode =
1658 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
1659 goto message_out;
1662 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
1663 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
1664 arcmsr_iop_message_read(acb);
1666 acb->acb_flags |=
1667 (ACB_F_MESSAGE_WQBUFFER_CLEARED |
1668 ACB_F_MESSAGE_WQBUFFER_READED);
1669 acb->wqbuf_firstindex = 0;
1670 acb->wqbuf_lastindex = 0;
1671 memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
1672 pcmdmessagefld->cmdmessage.ReturnCode =
1673 ARCMSR_MESSAGE_RETURNCODE_OK;
1675 break;
1677 case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: {
1678 uint8_t *pQbuffer;
1679 if (!acb->fw_state) {
1680 pcmdmessagefld->cmdmessage.ReturnCode =
1681 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
1682 goto message_out;
1685 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
1686 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
1687 arcmsr_iop_message_read(acb);
1689 acb->acb_flags |=
1690 (ACB_F_MESSAGE_WQBUFFER_CLEARED
1691 | ACB_F_MESSAGE_RQBUFFER_CLEARED
1692 | ACB_F_MESSAGE_WQBUFFER_READED);
1693 acb->rqbuf_firstindex = 0;
1694 acb->rqbuf_lastindex = 0;
1695 acb->wqbuf_firstindex = 0;
1696 acb->wqbuf_lastindex = 0;
1697 pQbuffer = acb->rqbuffer;
1698 memset(pQbuffer, 0, sizeof(struct QBUFFER));
1699 pQbuffer = acb->wqbuffer;
1700 memset(pQbuffer, 0, sizeof(struct QBUFFER));
1701 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
1703 break;
1705 case ARCMSR_MESSAGE_RETURN_CODE_3F: {
1706 if (!acb->fw_state) {
1707 pcmdmessagefld->cmdmessage.ReturnCode =
1708 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
1709 goto message_out;
1711 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_3F;
1713 break;
1715 case ARCMSR_MESSAGE_SAY_HELLO: {
1716 int8_t *hello_string = "Hello! I am ARCMSR";
1717 if (!acb->fw_state) {
1718 pcmdmessagefld->cmdmessage.ReturnCode =
1719 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
1720 goto message_out;
1722 memcpy(pcmdmessagefld->messagedatabuffer, hello_string
1723 , (int16_t)strlen(hello_string));
1724 pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
1726 break;
1728 case ARCMSR_MESSAGE_SAY_GOODBYE:
1729 if (!acb->fw_state) {
1730 pcmdmessagefld->cmdmessage.ReturnCode =
1731 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
1732 goto message_out;
1734 arcmsr_iop_parking(acb);
1735 break;
1737 case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE:
1738 if (!acb->fw_state) {
1739 pcmdmessagefld->cmdmessage.ReturnCode =
1740 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
1741 goto message_out;
1743 arcmsr_flush_adapter_cache(acb);
1744 break;
1746 default:
1747 retvalue = ARCMSR_MESSAGE_FAIL;
1749 message_out:
1750 sg = scsi_sglist(cmd);
1751 kunmap_atomic(buffer - sg->offset, KM_IRQ0);
1752 return retvalue;
1755 static struct CommandControlBlock *arcmsr_get_freeccb(struct AdapterControlBlock *acb)
1757 struct list_head *head = &acb->ccb_free_list;
1758 struct CommandControlBlock *ccb = NULL;
1760 if (!list_empty(head)) {
1761 ccb = list_entry(head->next, struct CommandControlBlock, list);
1762 list_del(head->next);
1764 return ccb;
1767 static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb,
1768 struct scsi_cmnd *cmd)
1770 switch (cmd->cmnd[0]) {
1771 case INQUIRY: {
1772 unsigned char inqdata[36];
1773 char *buffer;
1774 struct scatterlist *sg;
1776 if (cmd->device->lun) {
1777 cmd->result = (DID_TIME_OUT << 16);
1778 cmd->scsi_done(cmd);
1779 return;
1781 inqdata[0] = TYPE_PROCESSOR;
1782 /* Periph Qualifier & Periph Dev Type */
1783 inqdata[1] = 0;
1784 /* rem media bit & Dev Type Modifier */
1785 inqdata[2] = 0;
1786 /* ISO, ECMA, & ANSI versions */
1787 inqdata[4] = 31;
1788 /* length of additional data */
1789 strncpy(&inqdata[8], "Areca ", 8);
1790 /* Vendor Identification */
1791 strncpy(&inqdata[16], "RAID controller ", 16);
1792 /* Product Identification */
1793 strncpy(&inqdata[32], "R001", 4); /* Product Revision */
1795 sg = scsi_sglist(cmd);
1796 buffer = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset;
1798 memcpy(buffer, inqdata, sizeof(inqdata));
1799 sg = scsi_sglist(cmd);
1800 kunmap_atomic(buffer - sg->offset, KM_IRQ0);
1802 cmd->scsi_done(cmd);
1804 break;
1805 case WRITE_BUFFER:
1806 case READ_BUFFER: {
1807 if (arcmsr_iop_message_xfer(acb, cmd))
1808 cmd->result = (DID_ERROR << 16);
1809 cmd->scsi_done(cmd);
1811 break;
1812 default:
1813 cmd->scsi_done(cmd);
1817 static int arcmsr_queue_command(struct scsi_cmnd *cmd,
1818 void (* done)(struct scsi_cmnd *))
1820 struct Scsi_Host *host = cmd->device->host;
1821 struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
1822 struct CommandControlBlock *ccb;
1823 int target = cmd->device->id;
1824 int lun = cmd->device->lun;
1825 uint8_t scsicmd = cmd->cmnd[0];
1826 cmd->scsi_done = done;
1827 cmd->host_scribble = NULL;
1828 cmd->result = 0;
1830 if ((scsicmd == SYNCHRONIZE_CACHE) || (scsicmd == SEND_DIAGNOSTIC)) {
1831 if (acb->devstate[target][lun] == ARECA_RAID_GONE) {
1832 cmd->result = (DID_NO_CONNECT << 16);
1834 cmd->scsi_done(cmd);
1835 return 0;
1838 if (acb->acb_flags & ACB_F_BUS_RESET) {
1839 switch (acb->adapter_type) {
1840 case ACB_ADAPTER_TYPE_A: {
1841 struct MessageUnit_A __iomem *reg = acb->pmuA;
1842 uint32_t intmask_org, outbound_doorbell;
1844 if ((readl(&reg->outbound_msgaddr1) &
1845 ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0) {
1846 printk(KERN_NOTICE "arcmsr%d: bus reset and return busy\n",
1847 acb->host->host_no);
1848 return SCSI_MLQUEUE_HOST_BUSY;
1851 acb->acb_flags &= ~ACB_F_FIRMWARE_TRAP;
1852 printk(KERN_NOTICE "arcmsr%d: hardware bus reset and reset ok\n",
1853 acb->host->host_no);
1854 /* disable all outbound interrupt */
1855 intmask_org = arcmsr_disable_outbound_ints(acb);
1856 arcmsr_get_firmware_spec(acb, 1);
1857 /*start background rebuild*/
1858 arcmsr_start_adapter_bgrb(acb);
1859 /* clear Qbuffer if door bell ringed */
1860 outbound_doorbell = readl(&reg->outbound_doorbell);
1861 /*clear interrupt */
1862 writel(outbound_doorbell, &reg->outbound_doorbell);
1863 writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK,
1864 &reg->inbound_doorbell);
1865 /* enable outbound Post Queue,outbound doorbell Interrupt */
1866 arcmsr_enable_outbound_ints(acb, intmask_org);
1867 acb->acb_flags |= ACB_F_IOP_INITED;
1868 acb->acb_flags &= ~ACB_F_BUS_RESET;
1870 break;
1871 case ACB_ADAPTER_TYPE_B: {
1876 if (target == 16) {
1877 /* virtual device for iop message transfer */
1878 arcmsr_handle_virtual_command(acb, cmd);
1879 return 0;
1881 if (acb->devstate[target][lun] == ARECA_RAID_GONE) {
1882 uint8_t block_cmd;
1884 block_cmd = cmd->cmnd[0] & 0x0f;
1885 if (block_cmd == 0x08 || block_cmd == 0x0a) {
1886 printk(KERN_NOTICE
1887 "arcmsr%d: block 'read/write'"
1888 "command with gone raid volume"
1889 " Cmd = %2x, TargetId = %d, Lun = %d \n"
1890 , acb->host->host_no
1891 , cmd->cmnd[0]
1892 , target, lun);
1893 cmd->result = (DID_NO_CONNECT << 16);
1894 cmd->scsi_done(cmd);
1895 return 0;
1898 if (atomic_read(&acb->ccboutstandingcount) >=
1899 ARCMSR_MAX_OUTSTANDING_CMD)
1900 return SCSI_MLQUEUE_HOST_BUSY;
1902 ccb = arcmsr_get_freeccb(acb);
1903 if (!ccb)
1904 return SCSI_MLQUEUE_HOST_BUSY;
1905 if ( arcmsr_build_ccb( acb, ccb, cmd ) == FAILED ) {
1906 cmd->result = (DID_ERROR << 16) | (RESERVATION_CONFLICT << 1);
1907 cmd->scsi_done(cmd);
1908 return 0;
1910 arcmsr_post_ccb(acb, ccb);
1911 return 0;
1914 static void *arcmsr_get_hba_config(struct AdapterControlBlock *acb, int mode)
1916 struct MessageUnit_A __iomem *reg = acb->pmuA;
1917 char *acb_firm_model = acb->firm_model;
1918 char *acb_firm_version = acb->firm_version;
1919 char *acb_device_map = acb->device_map;
1920 char __iomem *iop_firm_model = (char __iomem *)(&reg->message_rwbuffer[15]);
1921 char __iomem *iop_firm_version = (char __iomem *)(&reg->message_rwbuffer[17]);
1922 char __iomem *iop_device_map = (char __iomem *) (&reg->message_rwbuffer[21]);
1923 int count;
1925 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
1926 if (arcmsr_hba_wait_msgint_ready(acb)) {
1927 printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
1928 miscellaneous data' timeout \n", acb->host->host_no);
1929 return NULL;
1932 if (mode == 1) {
1933 count = 8;
1934 while (count) {
1935 *acb_firm_model = readb(iop_firm_model);
1936 acb_firm_model++;
1937 iop_firm_model++;
1938 count--;
1941 count = 16;
1942 while (count) {
1943 *acb_firm_version = readb(iop_firm_version);
1944 acb_firm_version++;
1945 iop_firm_version++;
1946 count--;
1949 count = 16;
1950 while (count) {
1951 *acb_device_map = readb(iop_device_map);
1952 acb_device_map++;
1953 iop_device_map++;
1954 count--;
1957 printk(KERN_INFO "ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n"
1958 , acb->host->host_no
1959 , acb->firm_version);
1960 acb->signature = readl(&reg->message_rwbuffer[0]);
1961 acb->firm_request_len = readl(&reg->message_rwbuffer[1]);
1962 acb->firm_numbers_queue = readl(&reg->message_rwbuffer[2]);
1963 acb->firm_sdram_size = readl(&reg->message_rwbuffer[3]);
1964 acb->firm_hd_channels = readl(&reg->message_rwbuffer[4]);
1966 return reg->message_rwbuffer;
1968 static void __iomem *arcmsr_get_hbb_config(struct AdapterControlBlock *acb, int mode)
1970 struct MessageUnit_B *reg = acb->pmuB;
1971 uint32_t __iomem *lrwbuffer = reg->msgcode_rwbuffer_reg;
1972 char *acb_firm_model = acb->firm_model;
1973 char *acb_firm_version = acb->firm_version;
1974 char *acb_device_map = acb->device_map;
1975 char __iomem *iop_firm_model = (char __iomem *)(&lrwbuffer[15]);
1976 /*firm_model,15,60-67*/
1977 char __iomem *iop_firm_version = (char __iomem *)(&lrwbuffer[17]);
1978 /*firm_version,17,68-83*/
1979 char __iomem *iop_device_map = (char __iomem *) (&lrwbuffer[21]);
1980 /*firm_version,21,84-99*/
1981 int count;
1983 writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell_reg);
1984 if (arcmsr_hbb_wait_msgint_ready(acb)) {
1985 printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
1986 miscellaneous data' timeout \n", acb->host->host_no);
1987 return NULL;
1990 if (mode == 1) {
1991 count = 8;
1992 while (count)
1994 *acb_firm_model = readb(iop_firm_model);
1995 acb_firm_model++;
1996 iop_firm_model++;
1997 count--;
2000 count = 16;
2001 while (count)
2003 *acb_firm_version = readb(iop_firm_version);
2004 acb_firm_version++;
2005 iop_firm_version++;
2006 count--;
2009 count = 16;
2010 while (count) {
2011 *acb_device_map = readb(iop_device_map);
2012 acb_device_map++;
2013 iop_device_map++;
2014 count--;
2017 printk(KERN_INFO "ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n",
2018 acb->host->host_no,
2019 acb->firm_version);
2021 acb->signature = readl(lrwbuffer++);
2022 /*firm_signature,1,00-03*/
2023 acb->firm_request_len = readl(lrwbuffer++);
2024 /*firm_request_len,1,04-07*/
2025 acb->firm_numbers_queue = readl(lrwbuffer++);
2026 /*firm_numbers_queue,2,08-11*/
2027 acb->firm_sdram_size = readl(lrwbuffer++);
2028 /*firm_sdram_size,3,12-15*/
2029 acb->firm_hd_channels = readl(lrwbuffer);
2030 /*firm_ide_channels,4,16-19*/
2032 return reg->msgcode_rwbuffer_reg;
2034 static void *arcmsr_get_firmware_spec(struct AdapterControlBlock *acb, int mode)
2036 void *rtnval = 0;
2037 switch (acb->adapter_type) {
2038 case ACB_ADAPTER_TYPE_A: {
2039 rtnval = arcmsr_get_hba_config(acb, mode);
2041 break;
2043 case ACB_ADAPTER_TYPE_B: {
2044 rtnval = arcmsr_get_hbb_config(acb, mode);
2046 break;
2048 return rtnval;
2051 static void arcmsr_polling_hba_ccbdone(struct AdapterControlBlock *acb,
2052 struct CommandControlBlock *poll_ccb)
2054 struct MessageUnit_A __iomem *reg = acb->pmuA;
2055 struct CommandControlBlock *ccb;
2056 uint32_t flag_ccb, outbound_intstatus, poll_ccb_done = 0, poll_count = 0;
2058 polling_hba_ccb_retry:
2059 poll_count++;
2060 outbound_intstatus = readl(&reg->outbound_intstatus) & acb->outbound_int_enable;
2061 writel(outbound_intstatus, &reg->outbound_intstatus);/*clear interrupt*/
2062 while (1) {
2063 if ((flag_ccb = readl(&reg->outbound_queueport)) == 0xFFFFFFFF) {
2064 if (poll_ccb_done)
2065 break;
2066 else {
2067 msleep(25);
2068 if (poll_count > 100)
2069 break;
2070 goto polling_hba_ccb_retry;
2073 ccb = (struct CommandControlBlock *)(acb->vir2phy_offset + (flag_ccb << 5));
2074 poll_ccb_done = (ccb == poll_ccb) ? 1:0;
2075 if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
2076 if ((ccb->startdone == ARCMSR_CCB_ABORTED) || (ccb == poll_ccb)) {
2077 printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'"
2078 " poll command abort successfully \n"
2079 , acb->host->host_no
2080 , ccb->pcmd->device->id
2081 , ccb->pcmd->device->lun
2082 , ccb);
2083 ccb->pcmd->result = DID_ABORT << 16;
2084 arcmsr_ccb_complete(ccb, 1);
2085 poll_ccb_done = 1;
2086 continue;
2088 printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb"
2089 " command done ccb = '0x%p'"
2090 "ccboutstandingcount = %d \n"
2091 , acb->host->host_no
2092 , ccb
2093 , atomic_read(&acb->ccboutstandingcount));
2094 continue;
2096 arcmsr_report_ccb_state(acb, ccb, flag_ccb);
2100 static void arcmsr_polling_hbb_ccbdone(struct AdapterControlBlock *acb,
2101 struct CommandControlBlock *poll_ccb)
2103 struct MessageUnit_B *reg = acb->pmuB;
2104 struct CommandControlBlock *ccb;
2105 uint32_t flag_ccb, poll_ccb_done = 0, poll_count = 0;
2106 int index;
2108 polling_hbb_ccb_retry:
2109 poll_count++;
2110 /* clear doorbell interrupt */
2111 writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell_reg);
2112 while (1) {
2113 index = reg->doneq_index;
2114 if ((flag_ccb = readl(&reg->done_qbuffer[index])) == 0) {
2115 if (poll_ccb_done)
2116 break;
2117 else {
2118 msleep(25);
2119 if (poll_count > 100)
2120 break;
2121 goto polling_hbb_ccb_retry;
2124 writel(0, &reg->done_qbuffer[index]);
2125 index++;
2126 /*if last index number set it to 0 */
2127 index %= ARCMSR_MAX_HBB_POSTQUEUE;
2128 reg->doneq_index = index;
2129 /* check ifcommand done with no error*/
2130 ccb = (struct CommandControlBlock *)\
2131 (acb->vir2phy_offset + (flag_ccb << 5));/*frame must be 32 bytes aligned*/
2132 poll_ccb_done = (ccb == poll_ccb) ? 1:0;
2133 if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
2134 if ((ccb->startdone == ARCMSR_CCB_ABORTED) || (ccb == poll_ccb)) {
2135 printk(KERN_NOTICE "arcmsr%d: \
2136 scsi id = %d lun = %d ccb = '0x%p' poll command abort successfully \n"
2137 ,acb->host->host_no
2138 ,ccb->pcmd->device->id
2139 ,ccb->pcmd->device->lun
2140 ,ccb);
2141 ccb->pcmd->result = DID_ABORT << 16;
2142 arcmsr_ccb_complete(ccb, 1);
2143 continue;
2145 printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb"
2146 " command done ccb = '0x%p'"
2147 "ccboutstandingcount = %d \n"
2148 , acb->host->host_no
2149 , ccb
2150 , atomic_read(&acb->ccboutstandingcount));
2151 continue;
2153 arcmsr_report_ccb_state(acb, ccb, flag_ccb);
2154 } /*drain reply FIFO*/
2157 static void arcmsr_polling_ccbdone(struct AdapterControlBlock *acb,
2158 struct CommandControlBlock *poll_ccb)
2160 switch (acb->adapter_type) {
2162 case ACB_ADAPTER_TYPE_A: {
2163 arcmsr_polling_hba_ccbdone(acb,poll_ccb);
2165 break;
2167 case ACB_ADAPTER_TYPE_B: {
2168 arcmsr_polling_hbb_ccbdone(acb,poll_ccb);
2173 static int arcmsr_iop_confirm(struct AdapterControlBlock *acb)
2175 uint32_t cdb_phyaddr, ccb_phyaddr_hi32;
2176 dma_addr_t dma_coherent_handle;
2178 ********************************************************************
2179 ** here we need to tell iop 331 our freeccb.HighPart
2180 ** if freeccb.HighPart is not zero
2181 ********************************************************************
2183 dma_coherent_handle = acb->dma_coherent_handle;
2184 cdb_phyaddr = (uint32_t)(dma_coherent_handle);
2185 ccb_phyaddr_hi32 = (uint32_t)((cdb_phyaddr >> 16) >> 16);
2187 ***********************************************************************
2188 ** if adapter type B, set window of "post command Q"
2189 ***********************************************************************
2191 switch (acb->adapter_type) {
2193 case ACB_ADAPTER_TYPE_A: {
2194 if (ccb_phyaddr_hi32 != 0) {
2195 struct MessageUnit_A __iomem *reg = acb->pmuA;
2196 uint32_t intmask_org;
2197 intmask_org = arcmsr_disable_outbound_ints(acb);
2198 writel(ARCMSR_SIGNATURE_SET_CONFIG, \
2199 &reg->message_rwbuffer[0]);
2200 writel(ccb_phyaddr_hi32, &reg->message_rwbuffer[1]);
2201 writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, \
2202 &reg->inbound_msgaddr0);
2203 if (arcmsr_hba_wait_msgint_ready(acb)) {
2204 printk(KERN_NOTICE "arcmsr%d: ""set ccb high \
2205 part physical address timeout\n",
2206 acb->host->host_no);
2207 return 1;
2209 arcmsr_enable_outbound_ints(acb, intmask_org);
2212 break;
2214 case ACB_ADAPTER_TYPE_B: {
2215 unsigned long post_queue_phyaddr;
2216 uint32_t __iomem *rwbuffer;
2218 struct MessageUnit_B *reg = acb->pmuB;
2219 uint32_t intmask_org;
2220 intmask_org = arcmsr_disable_outbound_ints(acb);
2221 reg->postq_index = 0;
2222 reg->doneq_index = 0;
2223 writel(ARCMSR_MESSAGE_SET_POST_WINDOW, reg->drv2iop_doorbell_reg);
2224 if (arcmsr_hbb_wait_msgint_ready(acb)) {
2225 printk(KERN_NOTICE "arcmsr%d:can not set diver mode\n", \
2226 acb->host->host_no);
2227 return 1;
2229 post_queue_phyaddr = cdb_phyaddr + ARCMSR_MAX_FREECCB_NUM * \
2230 sizeof(struct CommandControlBlock) + offsetof(struct MessageUnit_B, post_qbuffer) ;
2231 rwbuffer = reg->msgcode_rwbuffer_reg;
2232 /* driver "set config" signature */
2233 writel(ARCMSR_SIGNATURE_SET_CONFIG, rwbuffer++);
2234 /* normal should be zero */
2235 writel(ccb_phyaddr_hi32, rwbuffer++);
2236 /* postQ size (256 + 8)*4 */
2237 writel(post_queue_phyaddr, rwbuffer++);
2238 /* doneQ size (256 + 8)*4 */
2239 writel(post_queue_phyaddr + 1056, rwbuffer++);
2240 /* ccb maxQ size must be --> [(256 + 8)*4]*/
2241 writel(1056, rwbuffer);
2243 writel(ARCMSR_MESSAGE_SET_CONFIG, reg->drv2iop_doorbell_reg);
2244 if (arcmsr_hbb_wait_msgint_ready(acb)) {
2245 printk(KERN_NOTICE "arcmsr%d: 'set command Q window' \
2246 timeout \n",acb->host->host_no);
2247 return 1;
2250 writel(ARCMSR_MESSAGE_START_DRIVER_MODE, reg->drv2iop_doorbell_reg);
2251 if (arcmsr_hbb_wait_msgint_ready(acb)) {
2252 printk(KERN_NOTICE "arcmsr%d: 'can not set diver mode \n"\
2253 ,acb->host->host_no);
2254 return 1;
2256 arcmsr_enable_outbound_ints(acb, intmask_org);
2258 break;
2260 return 0;
2263 static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb)
2265 uint32_t firmware_state = 0;
2267 switch (acb->adapter_type) {
2269 case ACB_ADAPTER_TYPE_A: {
2270 struct MessageUnit_A __iomem *reg = acb->pmuA;
2271 do {
2272 firmware_state = readl(&reg->outbound_msgaddr1);
2273 } while ((firmware_state & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0);
2275 break;
2277 case ACB_ADAPTER_TYPE_B: {
2278 struct MessageUnit_B *reg = acb->pmuB;
2279 do {
2280 firmware_state = readl(reg->iop2drv_doorbell_reg);
2281 } while ((firmware_state & ARCMSR_MESSAGE_FIRMWARE_OK) == 0);
2282 writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell_reg);
2284 break;
2288 static void arcmsr_request_hba_device_map(struct AdapterControlBlock *acb)
2290 struct MessageUnit_A __iomem *reg = acb->pmuA;
2292 if (unlikely(atomic_read(&acb->rq_map_token) == 0)) {
2293 acb->fw_state = false;
2294 } else {
2295 /*to prevent rq_map_token from changing by other interrupt, then
2296 avoid the dead-lock*/
2297 acb->fw_state = true;
2298 atomic_dec(&acb->rq_map_token);
2299 if (!(acb->fw_state) ||
2300 (acb->ante_token_value == atomic_read(&acb->rq_map_token))) {
2301 atomic_set(&acb->rq_map_token, 16);
2303 acb->ante_token_value = atomic_read(&acb->rq_map_token);
2304 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
2306 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6000));
2307 return;
2310 static void arcmsr_request_hbb_device_map(struct AdapterControlBlock *acb)
2312 struct MessageUnit_B __iomem *reg = acb->pmuB;
2314 if (unlikely(atomic_read(&acb->rq_map_token) == 0)) {
2315 acb->fw_state = false;
2316 } else {
2317 /*to prevent rq_map_token from changing by other interrupt, then
2318 avoid the dead-lock*/
2319 acb->fw_state = true;
2320 atomic_dec(&acb->rq_map_token);
2321 if (!(acb->fw_state) ||
2322 (acb->ante_token_value == atomic_read(&acb->rq_map_token))) {
2323 atomic_set(&acb->rq_map_token, 16);
2325 acb->ante_token_value = atomic_read(&acb->rq_map_token);
2326 writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell_reg);
2328 mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6000));
2329 return;
2332 static void arcmsr_request_device_map(unsigned long pacb)
2334 struct AdapterControlBlock *acb = (struct AdapterControlBlock *)pacb;
2336 switch (acb->adapter_type) {
2337 case ACB_ADAPTER_TYPE_A: {
2338 arcmsr_request_hba_device_map(acb);
2340 break;
2341 case ACB_ADAPTER_TYPE_B: {
2342 arcmsr_request_hbb_device_map(acb);
2344 break;
2348 static void arcmsr_start_hba_bgrb(struct AdapterControlBlock *acb)
2350 struct MessageUnit_A __iomem *reg = acb->pmuA;
2351 acb->acb_flags |= ACB_F_MSG_START_BGRB;
2352 writel(ARCMSR_INBOUND_MESG0_START_BGRB, &reg->inbound_msgaddr0);
2353 if (arcmsr_hba_wait_msgint_ready(acb)) {
2354 printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
2355 rebulid' timeout \n", acb->host->host_no);
2359 static void arcmsr_start_hbb_bgrb(struct AdapterControlBlock *acb)
2361 struct MessageUnit_B *reg = acb->pmuB;
2362 acb->acb_flags |= ACB_F_MSG_START_BGRB;
2363 writel(ARCMSR_MESSAGE_START_BGRB, reg->drv2iop_doorbell_reg);
2364 if (arcmsr_hbb_wait_msgint_ready(acb)) {
2365 printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
2366 rebulid' timeout \n",acb->host->host_no);
2370 static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb)
2372 switch (acb->adapter_type) {
2373 case ACB_ADAPTER_TYPE_A:
2374 arcmsr_start_hba_bgrb(acb);
2375 break;
2376 case ACB_ADAPTER_TYPE_B:
2377 arcmsr_start_hbb_bgrb(acb);
2378 break;
2382 static void arcmsr_clear_doorbell_queue_buffer(struct AdapterControlBlock *acb)
2384 switch (acb->adapter_type) {
2385 case ACB_ADAPTER_TYPE_A: {
2386 struct MessageUnit_A __iomem *reg = acb->pmuA;
2387 uint32_t outbound_doorbell;
2388 /* empty doorbell Qbuffer if door bell ringed */
2389 outbound_doorbell = readl(&reg->outbound_doorbell);
2390 /*clear doorbell interrupt */
2391 writel(outbound_doorbell, &reg->outbound_doorbell);
2392 writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, &reg->inbound_doorbell);
2394 break;
2396 case ACB_ADAPTER_TYPE_B: {
2397 struct MessageUnit_B *reg = acb->pmuB;
2398 /*clear interrupt and message state*/
2399 writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN, reg->iop2drv_doorbell_reg);
2400 writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell_reg);
2401 /* let IOP know data has been read */
2403 break;
2407 static void arcmsr_enable_eoi_mode(struct AdapterControlBlock *acb)
2409 switch (acb->adapter_type) {
2410 case ACB_ADAPTER_TYPE_A:
2411 return;
2412 case ACB_ADAPTER_TYPE_B:
2414 struct MessageUnit_B *reg = acb->pmuB;
2415 writel(ARCMSR_MESSAGE_ACTIVE_EOI_MODE, reg->drv2iop_doorbell_reg);
2416 if(arcmsr_hbb_wait_msgint_ready(acb)) {
2417 printk(KERN_NOTICE "ARCMSR IOP enables EOI_MODE TIMEOUT");
2418 return;
2421 break;
2423 return;
2426 static void arcmsr_hardware_reset(struct AdapterControlBlock *acb)
2428 uint8_t value[64];
2429 int i;
2431 /* backup pci config data */
2432 for (i = 0; i < 64; i++) {
2433 pci_read_config_byte(acb->pdev, i, &value[i]);
2435 /* hardware reset signal */
2436 pci_write_config_byte(acb->pdev, 0x84, 0x20);
2437 msleep(1000);
2438 /* write back pci config data */
2439 for (i = 0; i < 64; i++) {
2440 pci_write_config_byte(acb->pdev, i, value[i]);
2442 msleep(1000);
2443 return;
2446 ****************************************************************************
2447 ****************************************************************************
2449 #ifdef CONFIG_SCSI_ARCMSR_RESET
2450 int arcmsr_sleep_for_bus_reset(struct scsi_cmnd *cmd)
2452 struct Scsi_Host *shost = NULL;
2453 spinlock_t *host_lock = NULL;
2454 int i, isleep;
2456 shost = cmd->device->host;
2457 host_lock = shost->host_lock;
2459 printk(KERN_NOTICE "Host %d bus reset over, sleep %d seconds (busy %d, can queue %d) ...........\n",
2460 shost->host_no, sleeptime, shost->host_busy, shost->can_queue);
2461 isleep = sleeptime / 10;
2462 spin_unlock_irq(host_lock);
2463 if (isleep > 0) {
2464 for (i = 0; i < isleep; i++) {
2465 msleep(10000);
2466 printk(KERN_NOTICE "^%d^\n", i);
2470 isleep = sleeptime % 10;
2471 if (isleep > 0) {
2472 msleep(isleep * 1000);
2473 printk(KERN_NOTICE "^v^\n");
2475 spin_lock_irq(host_lock);
2476 printk(KERN_NOTICE "***** wake up *****\n");
2477 return 0;
2479 #endif
2480 static void arcmsr_iop_init(struct AdapterControlBlock *acb)
2482 uint32_t intmask_org;
2484 /* disable all outbound interrupt */
2485 intmask_org = arcmsr_disable_outbound_ints(acb);
2486 arcmsr_wait_firmware_ready(acb);
2487 arcmsr_iop_confirm(acb);
2488 arcmsr_get_firmware_spec(acb, 1);
2489 /*start background rebuild*/
2490 arcmsr_start_adapter_bgrb(acb);
2491 /* empty doorbell Qbuffer if door bell ringed */
2492 arcmsr_clear_doorbell_queue_buffer(acb);
2493 arcmsr_enable_eoi_mode(acb);
2494 /* enable outbound Post Queue,outbound doorbell Interrupt */
2495 arcmsr_enable_outbound_ints(acb, intmask_org);
2496 acb->acb_flags |= ACB_F_IOP_INITED;
2499 static uint8_t arcmsr_iop_reset(struct AdapterControlBlock *acb)
2501 struct CommandControlBlock *ccb;
2502 uint32_t intmask_org;
2503 uint8_t rtnval = 0x00;
2504 int i = 0;
2506 if (atomic_read(&acb->ccboutstandingcount) != 0) {
2507 /* disable all outbound interrupt */
2508 intmask_org = arcmsr_disable_outbound_ints(acb);
2509 /* talk to iop 331 outstanding command aborted */
2510 rtnval = arcmsr_abort_allcmd(acb);
2511 /* wait for 3 sec for all command aborted*/
2512 ssleep(3);
2513 /* clear all outbound posted Q */
2514 arcmsr_done4abort_postqueue(acb);
2515 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
2516 ccb = acb->pccb_pool[i];
2517 if (ccb->startdone == ARCMSR_CCB_START) {
2518 arcmsr_ccb_complete(ccb, 1);
2521 atomic_set(&acb->ccboutstandingcount, 0);
2522 /* enable all outbound interrupt */
2523 arcmsr_enable_outbound_ints(acb, intmask_org);
2524 return rtnval;
2526 return rtnval;
2529 static int arcmsr_bus_reset(struct scsi_cmnd *cmd)
2531 struct AdapterControlBlock *acb =
2532 (struct AdapterControlBlock *)cmd->device->host->hostdata;
2533 int retry = 0;
2535 if (acb->acb_flags & ACB_F_BUS_RESET)
2536 return SUCCESS;
2538 printk(KERN_NOTICE "arcmsr%d: bus reset ..... \n", acb->adapter_index);
2539 acb->acb_flags |= ACB_F_BUS_RESET;
2540 acb->num_resets++;
2541 while (atomic_read(&acb->ccboutstandingcount) != 0 && retry < 4) {
2542 arcmsr_interrupt(acb);
2543 retry++;
2546 if (arcmsr_iop_reset(acb)) {
2547 switch (acb->adapter_type) {
2548 case ACB_ADAPTER_TYPE_A: {
2549 printk(KERN_NOTICE "arcmsr%d: do hardware bus reset, num_resets = %d num_aborts = %d \n",
2550 acb->adapter_index, acb->num_resets, acb->num_aborts);
2551 arcmsr_hardware_reset(acb);
2552 acb->acb_flags |= ACB_F_FIRMWARE_TRAP;
2553 acb->acb_flags &= ~ACB_F_IOP_INITED;
2554 #ifdef CONFIG_SCSI_ARCMSR_RESET
2555 struct MessageUnit_A __iomem *reg = acb->pmuA;
2556 uint32_t intmask_org, outbound_doorbell;
2557 int retry_count = 0;
2558 sleep_again:
2559 arcmsr_sleep_for_bus_reset(cmd);
2560 if ((readl(&reg->outbound_msgaddr1) &
2561 ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0) {
2562 printk(KERN_NOTICE "arcmsr%d: hardware bus reset and return busy, retry=%d \n",
2563 acb->host->host_no, retry_count);
2564 if (retry_count > retrycount) {
2565 printk(KERN_NOTICE "arcmsr%d: hardware bus reset and return busy, retry aborted \n",
2566 acb->host->host_no);
2567 return SUCCESS;
2569 retry_count++;
2570 goto sleep_again;
2572 acb->acb_flags &= ~ACB_F_FIRMWARE_TRAP;
2573 acb->acb_flags |= ACB_F_IOP_INITED;
2574 acb->acb_flags &= ~ACB_F_BUS_RESET;
2575 printk(KERN_NOTICE "arcmsr%d: hardware bus reset and reset ok \n",
2576 acb->host->host_no);
2577 /* disable all outbound interrupt */
2578 intmask_org = arcmsr_disable_outbound_ints(acb);
2579 arcmsr_get_firmware_spec(acb, 1);
2580 /*start background rebuild*/
2581 arcmsr_start_adapter_bgrb(acb);
2582 /* clear Qbuffer if door bell ringed */
2583 outbound_doorbell = readl(&reg->outbound_doorbell);
2584 writel(outbound_doorbell, &reg->outbound_doorbell); /*clear interrupt */
2585 writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, &reg->inbound_doorbell);
2586 /* enable outbound Post Queue,outbound doorbell Interrupt */
2587 arcmsr_enable_outbound_ints(acb, intmask_org);
2588 atomic_set(&acb->rq_map_token, 16);
2589 init_timer(&acb->eternal_timer);
2590 acb->eternal_timer.expires = jiffies + msecs_to_jiffies(20*HZ);
2591 acb->eternal_timer.data = (unsigned long) acb;
2592 acb->eternal_timer.function = &arcmsr_request_device_map;
2593 add_timer(&acb->eternal_timer);
2594 #endif
2596 break;
2597 case ACB_ADAPTER_TYPE_B: {
2600 } else {
2601 acb->acb_flags &= ~ACB_F_BUS_RESET;
2603 return SUCCESS;
2606 static void arcmsr_abort_one_cmd(struct AdapterControlBlock *acb,
2607 struct CommandControlBlock *ccb)
2609 u32 intmask;
2611 ccb->startdone = ARCMSR_CCB_ABORTED;
2614 ** Wait for 3 sec for all command done.
2616 ssleep(3);
2618 intmask = arcmsr_disable_outbound_ints(acb);
2619 arcmsr_polling_ccbdone(acb, ccb);
2620 arcmsr_enable_outbound_ints(acb, intmask);
2623 static int arcmsr_abort(struct scsi_cmnd *cmd)
2625 struct AdapterControlBlock *acb =
2626 (struct AdapterControlBlock *)cmd->device->host->hostdata;
2627 int i = 0;
2629 printk(KERN_NOTICE
2630 "arcmsr%d: abort device command of scsi id = %d lun = %d \n",
2631 acb->host->host_no, cmd->device->id, cmd->device->lun);
2632 acb->num_aborts++;
2634 ************************************************
2635 ** the all interrupt service routine is locked
2636 ** we need to handle it as soon as possible and exit
2637 ************************************************
2639 if (!atomic_read(&acb->ccboutstandingcount))
2640 return SUCCESS;
2642 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
2643 struct CommandControlBlock *ccb = acb->pccb_pool[i];
2644 if (ccb->startdone == ARCMSR_CCB_START && ccb->pcmd == cmd) {
2645 arcmsr_abort_one_cmd(acb, ccb);
2646 break;
2650 return SUCCESS;
2653 static const char *arcmsr_info(struct Scsi_Host *host)
2655 struct AdapterControlBlock *acb =
2656 (struct AdapterControlBlock *) host->hostdata;
2657 static char buf[256];
2658 char *type;
2659 int raid6 = 1;
2661 switch (acb->pdev->device) {
2662 case PCI_DEVICE_ID_ARECA_1110:
2663 case PCI_DEVICE_ID_ARECA_1200:
2664 case PCI_DEVICE_ID_ARECA_1202:
2665 case PCI_DEVICE_ID_ARECA_1210:
2666 raid6 = 0;
2667 /*FALLTHRU*/
2668 case PCI_DEVICE_ID_ARECA_1120:
2669 case PCI_DEVICE_ID_ARECA_1130:
2670 case PCI_DEVICE_ID_ARECA_1160:
2671 case PCI_DEVICE_ID_ARECA_1170:
2672 case PCI_DEVICE_ID_ARECA_1201:
2673 case PCI_DEVICE_ID_ARECA_1220:
2674 case PCI_DEVICE_ID_ARECA_1230:
2675 case PCI_DEVICE_ID_ARECA_1260:
2676 case PCI_DEVICE_ID_ARECA_1270:
2677 case PCI_DEVICE_ID_ARECA_1280:
2678 type = "SATA";
2679 break;
2680 case PCI_DEVICE_ID_ARECA_1380:
2681 case PCI_DEVICE_ID_ARECA_1381:
2682 case PCI_DEVICE_ID_ARECA_1680:
2683 case PCI_DEVICE_ID_ARECA_1681:
2684 type = "SAS";
2685 break;
2686 default:
2687 type = "X-TYPE";
2688 break;
2690 sprintf(buf, "Areca %s Host Adapter RAID Controller%s\n %s",
2691 type, raid6 ? "( RAID6 capable)" : "",
2692 ARCMSR_DRIVER_VERSION);
2693 return buf;