2 *******************************************************************************
4 ** FILE NAME : arcmsr_hba.c
5 ** BY : Nick Cheng, C.L. Huang
6 ** Description: SCSI RAID Device Driver for Areca RAID Controller
7 *******************************************************************************
8 ** Copyright (C) 2002 - 2014, Areca Technology Corporation All rights reserved
10 ** Web site: www.areca.com.tw
11 ** E-mail: support@areca.com.tw
13 ** This program is free software; you can redistribute it and/or modify
14 ** it under the terms of the GNU General Public License version 2 as
15 ** published by the Free Software Foundation.
16 ** This program is distributed in the hope that it will be useful,
17 ** but WITHOUT ANY WARRANTY; without even the implied warranty of
18 ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 ** GNU General Public License for more details.
20 *******************************************************************************
21 ** Redistribution and use in source and binary forms, with or without
22 ** modification, are permitted provided that the following conditions
24 ** 1. Redistributions of source code must retain the above copyright
25 ** notice, this list of conditions and the following disclaimer.
26 ** 2. Redistributions in binary form must reproduce the above copyright
27 ** notice, this list of conditions and the following disclaimer in the
28 ** documentation and/or other materials provided with the distribution.
29 ** 3. The name of the author may not be used to endorse or promote products
30 ** derived from this software without specific prior written permission.
32 ** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
33 ** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
34 ** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
35 ** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
36 ** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING,BUT
37 ** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
38 ** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY
39 ** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
40 ** (INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF
41 ** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 *******************************************************************************
43 ** For history of changes, see Documentation/scsi/ChangeLog.arcmsr
44 ** Firmware Specification, see Documentation/scsi/arcmsr_spec.rst
45 *******************************************************************************
47 #include <linux/module.h>
48 #include <linux/reboot.h>
49 #include <linux/spinlock.h>
50 #include <linux/pci_ids.h>
51 #include <linux/interrupt.h>
52 #include <linux/moduleparam.h>
53 #include <linux/errno.h>
54 #include <linux/types.h>
55 #include <linux/delay.h>
56 #include <linux/dma-mapping.h>
57 #include <linux/timer.h>
58 #include <linux/slab.h>
59 #include <linux/pci.h>
60 #include <linux/aer.h>
61 #include <linux/circ_buf.h>
64 #include <linux/uaccess.h>
65 #include <scsi/scsi_host.h>
66 #include <scsi/scsi.h>
67 #include <scsi/scsi_cmnd.h>
68 #include <scsi/scsi_tcq.h>
69 #include <scsi/scsi_device.h>
70 #include <scsi/scsi_transport.h>
71 #include <scsi/scsicam.h>
73 MODULE_AUTHOR("Nick Cheng, C.L. Huang <support@areca.com.tw>");
74 MODULE_DESCRIPTION("Areca ARC11xx/12xx/16xx/188x SAS/SATA RAID Controller Driver");
75 MODULE_LICENSE("Dual BSD/GPL");
76 MODULE_VERSION(ARCMSR_DRIVER_VERSION
);
78 static int msix_enable
= 1;
79 module_param(msix_enable
, int, S_IRUGO
);
80 MODULE_PARM_DESC(msix_enable
, "Enable MSI-X interrupt(0 ~ 1), msix_enable=1(enable), =0(disable)");
82 static int msi_enable
= 1;
83 module_param(msi_enable
, int, S_IRUGO
);
84 MODULE_PARM_DESC(msi_enable
, "Enable MSI interrupt(0 ~ 1), msi_enable=1(enable), =0(disable)");
86 static int host_can_queue
= ARCMSR_DEFAULT_OUTSTANDING_CMD
;
87 module_param(host_can_queue
, int, S_IRUGO
);
88 MODULE_PARM_DESC(host_can_queue
, " adapter queue depth(32 ~ 1024), default is 128");
90 static int cmd_per_lun
= ARCMSR_DEFAULT_CMD_PERLUN
;
91 module_param(cmd_per_lun
, int, S_IRUGO
);
92 MODULE_PARM_DESC(cmd_per_lun
, " device queue depth(1 ~ 128), default is 32");
94 static int dma_mask_64
= 0;
95 module_param(dma_mask_64
, int, S_IRUGO
);
96 MODULE_PARM_DESC(dma_mask_64
, " set DMA mask to 64 bits(0 ~ 1), dma_mask_64=1(64 bits), =0(32 bits)");
98 static int set_date_time
= 0;
99 module_param(set_date_time
, int, S_IRUGO
);
100 MODULE_PARM_DESC(set_date_time
, " send date, time to iop(0 ~ 1), set_date_time=1(enable), default(=0) is disable");
102 static int cmd_timeout
= ARCMSR_DEFAULT_TIMEOUT
;
103 module_param(cmd_timeout
, int, S_IRUGO
);
104 MODULE_PARM_DESC(cmd_timeout
, " scsi cmd timeout(0 ~ 120 sec.), default is 90");
106 #define ARCMSR_SLEEPTIME 10
107 #define ARCMSR_RETRYCOUNT 12
109 static wait_queue_head_t wait_q
;
110 static int arcmsr_iop_message_xfer(struct AdapterControlBlock
*acb
,
111 struct scsi_cmnd
*cmd
);
112 static int arcmsr_iop_confirm(struct AdapterControlBlock
*acb
);
113 static int arcmsr_abort(struct scsi_cmnd
*);
114 static int arcmsr_bus_reset(struct scsi_cmnd
*);
115 static int arcmsr_bios_param(struct scsi_device
*sdev
,
116 struct block_device
*bdev
, sector_t capacity
, int *info
);
117 static int arcmsr_queue_command(struct Scsi_Host
*h
, struct scsi_cmnd
*cmd
);
118 static int arcmsr_probe(struct pci_dev
*pdev
,
119 const struct pci_device_id
*id
);
120 static int __maybe_unused
arcmsr_suspend(struct device
*dev
);
121 static int __maybe_unused
arcmsr_resume(struct device
*dev
);
122 static void arcmsr_remove(struct pci_dev
*pdev
);
123 static void arcmsr_shutdown(struct pci_dev
*pdev
);
124 static void arcmsr_iop_init(struct AdapterControlBlock
*acb
);
125 static void arcmsr_free_ccb_pool(struct AdapterControlBlock
*acb
);
126 static u32
arcmsr_disable_outbound_ints(struct AdapterControlBlock
*acb
);
127 static void arcmsr_enable_outbound_ints(struct AdapterControlBlock
*acb
,
129 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock
*acb
);
130 static void arcmsr_hbaA_flush_cache(struct AdapterControlBlock
*acb
);
131 static void arcmsr_hbaB_flush_cache(struct AdapterControlBlock
*acb
);
132 static void arcmsr_request_device_map(struct timer_list
*t
);
133 static void arcmsr_message_isr_bh_fn(struct work_struct
*work
);
134 static bool arcmsr_get_firmware_spec(struct AdapterControlBlock
*acb
);
135 static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock
*acb
);
136 static void arcmsr_hbaC_message_isr(struct AdapterControlBlock
*pACB
);
137 static void arcmsr_hbaD_message_isr(struct AdapterControlBlock
*acb
);
138 static void arcmsr_hbaE_message_isr(struct AdapterControlBlock
*acb
);
139 static void arcmsr_hbaE_postqueue_isr(struct AdapterControlBlock
*acb
);
140 static void arcmsr_hbaF_postqueue_isr(struct AdapterControlBlock
*acb
);
141 static void arcmsr_hardware_reset(struct AdapterControlBlock
*acb
);
142 static const char *arcmsr_info(struct Scsi_Host
*);
143 static irqreturn_t
arcmsr_interrupt(struct AdapterControlBlock
*acb
);
144 static void arcmsr_free_irq(struct pci_dev
*, struct AdapterControlBlock
*);
145 static void arcmsr_wait_firmware_ready(struct AdapterControlBlock
*acb
);
146 static void arcmsr_set_iop_datetime(struct timer_list
*);
147 static int arcmsr_slave_config(struct scsi_device
*sdev
);
148 static int arcmsr_adjust_disk_queue_depth(struct scsi_device
*sdev
, int queue_depth
)
150 if (queue_depth
> ARCMSR_MAX_CMD_PERLUN
)
151 queue_depth
= ARCMSR_MAX_CMD_PERLUN
;
152 return scsi_change_queue_depth(sdev
, queue_depth
);
155 static struct scsi_host_template arcmsr_scsi_host_template
= {
156 .module
= THIS_MODULE
,
157 .name
= "Areca SAS/SATA RAID driver",
159 .queuecommand
= arcmsr_queue_command
,
160 .eh_abort_handler
= arcmsr_abort
,
161 .eh_bus_reset_handler
= arcmsr_bus_reset
,
162 .bios_param
= arcmsr_bios_param
,
163 .slave_configure
= arcmsr_slave_config
,
164 .change_queue_depth
= arcmsr_adjust_disk_queue_depth
,
165 .can_queue
= ARCMSR_DEFAULT_OUTSTANDING_CMD
,
166 .this_id
= ARCMSR_SCSI_INITIATOR_ID
,
167 .sg_tablesize
= ARCMSR_DEFAULT_SG_ENTRIES
,
168 .max_sectors
= ARCMSR_MAX_XFER_SECTORS_C
,
169 .cmd_per_lun
= ARCMSR_DEFAULT_CMD_PERLUN
,
170 .shost_attrs
= arcmsr_host_attrs
,
174 static struct pci_device_id arcmsr_device_id_table
[] = {
175 {PCI_DEVICE(PCI_VENDOR_ID_ARECA
, PCI_DEVICE_ID_ARECA_1110
),
176 .driver_data
= ACB_ADAPTER_TYPE_A
},
177 {PCI_DEVICE(PCI_VENDOR_ID_ARECA
, PCI_DEVICE_ID_ARECA_1120
),
178 .driver_data
= ACB_ADAPTER_TYPE_A
},
179 {PCI_DEVICE(PCI_VENDOR_ID_ARECA
, PCI_DEVICE_ID_ARECA_1130
),
180 .driver_data
= ACB_ADAPTER_TYPE_A
},
181 {PCI_DEVICE(PCI_VENDOR_ID_ARECA
, PCI_DEVICE_ID_ARECA_1160
),
182 .driver_data
= ACB_ADAPTER_TYPE_A
},
183 {PCI_DEVICE(PCI_VENDOR_ID_ARECA
, PCI_DEVICE_ID_ARECA_1170
),
184 .driver_data
= ACB_ADAPTER_TYPE_A
},
185 {PCI_DEVICE(PCI_VENDOR_ID_ARECA
, PCI_DEVICE_ID_ARECA_1200
),
186 .driver_data
= ACB_ADAPTER_TYPE_B
},
187 {PCI_DEVICE(PCI_VENDOR_ID_ARECA
, PCI_DEVICE_ID_ARECA_1201
),
188 .driver_data
= ACB_ADAPTER_TYPE_B
},
189 {PCI_DEVICE(PCI_VENDOR_ID_ARECA
, PCI_DEVICE_ID_ARECA_1202
),
190 .driver_data
= ACB_ADAPTER_TYPE_B
},
191 {PCI_DEVICE(PCI_VENDOR_ID_ARECA
, PCI_DEVICE_ID_ARECA_1203
),
192 .driver_data
= ACB_ADAPTER_TYPE_B
},
193 {PCI_DEVICE(PCI_VENDOR_ID_ARECA
, PCI_DEVICE_ID_ARECA_1210
),
194 .driver_data
= ACB_ADAPTER_TYPE_A
},
195 {PCI_DEVICE(PCI_VENDOR_ID_ARECA
, PCI_DEVICE_ID_ARECA_1214
),
196 .driver_data
= ACB_ADAPTER_TYPE_D
},
197 {PCI_DEVICE(PCI_VENDOR_ID_ARECA
, PCI_DEVICE_ID_ARECA_1220
),
198 .driver_data
= ACB_ADAPTER_TYPE_A
},
199 {PCI_DEVICE(PCI_VENDOR_ID_ARECA
, PCI_DEVICE_ID_ARECA_1230
),
200 .driver_data
= ACB_ADAPTER_TYPE_A
},
201 {PCI_DEVICE(PCI_VENDOR_ID_ARECA
, PCI_DEVICE_ID_ARECA_1260
),
202 .driver_data
= ACB_ADAPTER_TYPE_A
},
203 {PCI_DEVICE(PCI_VENDOR_ID_ARECA
, PCI_DEVICE_ID_ARECA_1270
),
204 .driver_data
= ACB_ADAPTER_TYPE_A
},
205 {PCI_DEVICE(PCI_VENDOR_ID_ARECA
, PCI_DEVICE_ID_ARECA_1280
),
206 .driver_data
= ACB_ADAPTER_TYPE_A
},
207 {PCI_DEVICE(PCI_VENDOR_ID_ARECA
, PCI_DEVICE_ID_ARECA_1380
),
208 .driver_data
= ACB_ADAPTER_TYPE_A
},
209 {PCI_DEVICE(PCI_VENDOR_ID_ARECA
, PCI_DEVICE_ID_ARECA_1381
),
210 .driver_data
= ACB_ADAPTER_TYPE_A
},
211 {PCI_DEVICE(PCI_VENDOR_ID_ARECA
, PCI_DEVICE_ID_ARECA_1680
),
212 .driver_data
= ACB_ADAPTER_TYPE_A
},
213 {PCI_DEVICE(PCI_VENDOR_ID_ARECA
, PCI_DEVICE_ID_ARECA_1681
),
214 .driver_data
= ACB_ADAPTER_TYPE_A
},
215 {PCI_DEVICE(PCI_VENDOR_ID_ARECA
, PCI_DEVICE_ID_ARECA_1880
),
216 .driver_data
= ACB_ADAPTER_TYPE_C
},
217 {PCI_DEVICE(PCI_VENDOR_ID_ARECA
, PCI_DEVICE_ID_ARECA_1884
),
218 .driver_data
= ACB_ADAPTER_TYPE_E
},
219 {PCI_DEVICE(PCI_VENDOR_ID_ARECA
, PCI_DEVICE_ID_ARECA_1886
),
220 .driver_data
= ACB_ADAPTER_TYPE_F
},
221 {0, 0}, /* Terminating entry */
223 MODULE_DEVICE_TABLE(pci
, arcmsr_device_id_table
);
225 static SIMPLE_DEV_PM_OPS(arcmsr_pm_ops
, arcmsr_suspend
, arcmsr_resume
);
227 static struct pci_driver arcmsr_pci_driver
= {
229 .id_table
= arcmsr_device_id_table
,
230 .probe
= arcmsr_probe
,
231 .remove
= arcmsr_remove
,
232 .driver
.pm
= &arcmsr_pm_ops
,
233 .shutdown
= arcmsr_shutdown
,
236 ****************************************************************************
237 ****************************************************************************
240 static void arcmsr_free_io_queue(struct AdapterControlBlock
*acb
)
242 switch (acb
->adapter_type
) {
243 case ACB_ADAPTER_TYPE_B
:
244 case ACB_ADAPTER_TYPE_D
:
245 case ACB_ADAPTER_TYPE_E
:
246 case ACB_ADAPTER_TYPE_F
:
247 dma_free_coherent(&acb
->pdev
->dev
, acb
->ioqueue_size
,
248 acb
->dma_coherent2
, acb
->dma_coherent_handle2
);
253 static bool arcmsr_remap_pciregion(struct AdapterControlBlock
*acb
)
255 struct pci_dev
*pdev
= acb
->pdev
;
256 switch (acb
->adapter_type
){
257 case ACB_ADAPTER_TYPE_A
:{
258 acb
->pmuA
= ioremap(pci_resource_start(pdev
,0), pci_resource_len(pdev
,0));
260 printk(KERN_NOTICE
"arcmsr%d: memory mapping region fail \n", acb
->host
->host_no
);
265 case ACB_ADAPTER_TYPE_B
:{
266 void __iomem
*mem_base0
, *mem_base1
;
267 mem_base0
= ioremap(pci_resource_start(pdev
, 0), pci_resource_len(pdev
, 0));
269 printk(KERN_NOTICE
"arcmsr%d: memory mapping region fail \n", acb
->host
->host_no
);
272 mem_base1
= ioremap(pci_resource_start(pdev
, 2), pci_resource_len(pdev
, 2));
275 printk(KERN_NOTICE
"arcmsr%d: memory mapping region fail \n", acb
->host
->host_no
);
278 acb
->mem_base0
= mem_base0
;
279 acb
->mem_base1
= mem_base1
;
282 case ACB_ADAPTER_TYPE_C
:{
283 acb
->pmuC
= ioremap(pci_resource_start(pdev
, 1), pci_resource_len(pdev
, 1));
285 printk(KERN_NOTICE
"arcmsr%d: memory mapping region fail \n", acb
->host
->host_no
);
288 if (readl(&acb
->pmuC
->outbound_doorbell
) & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE
) {
289 writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR
, &acb
->pmuC
->outbound_doorbell_clear
);/*clear interrupt*/
294 case ACB_ADAPTER_TYPE_D
: {
295 void __iomem
*mem_base0
;
296 unsigned long addr
, range
;
298 addr
= (unsigned long)pci_resource_start(pdev
, 0);
299 range
= pci_resource_len(pdev
, 0);
300 mem_base0
= ioremap(addr
, range
);
302 pr_notice("arcmsr%d: memory mapping region fail\n",
306 acb
->mem_base0
= mem_base0
;
309 case ACB_ADAPTER_TYPE_E
: {
310 acb
->pmuE
= ioremap(pci_resource_start(pdev
, 1),
311 pci_resource_len(pdev
, 1));
313 pr_notice("arcmsr%d: memory mapping region fail \n",
317 writel(0, &acb
->pmuE
->host_int_status
); /*clear interrupt*/
318 writel(ARCMSR_HBEMU_DOORBELL_SYNC
, &acb
->pmuE
->iobound_doorbell
); /* synchronize doorbell to 0 */
319 acb
->in_doorbell
= 0;
320 acb
->out_doorbell
= 0;
323 case ACB_ADAPTER_TYPE_F
: {
324 acb
->pmuF
= ioremap(pci_resource_start(pdev
, 0), pci_resource_len(pdev
, 0));
326 pr_notice("arcmsr%d: memory mapping region fail\n",
330 writel(0, &acb
->pmuF
->host_int_status
); /* clear interrupt */
331 writel(ARCMSR_HBFMU_DOORBELL_SYNC
, &acb
->pmuF
->iobound_doorbell
);
332 acb
->in_doorbell
= 0;
333 acb
->out_doorbell
= 0;
340 static void arcmsr_unmap_pciregion(struct AdapterControlBlock
*acb
)
342 switch (acb
->adapter_type
) {
343 case ACB_ADAPTER_TYPE_A
:
346 case ACB_ADAPTER_TYPE_B
:
347 iounmap(acb
->mem_base0
);
348 iounmap(acb
->mem_base1
);
350 case ACB_ADAPTER_TYPE_C
:
353 case ACB_ADAPTER_TYPE_D
:
354 iounmap(acb
->mem_base0
);
356 case ACB_ADAPTER_TYPE_E
:
359 case ACB_ADAPTER_TYPE_F
:
365 static irqreturn_t
arcmsr_do_interrupt(int irq
, void *dev_id
)
367 irqreturn_t handle_state
;
368 struct AdapterControlBlock
*acb
= dev_id
;
370 handle_state
= arcmsr_interrupt(acb
);
374 static int arcmsr_bios_param(struct scsi_device
*sdev
,
375 struct block_device
*bdev
, sector_t capacity
, int *geom
)
377 int heads
, sectors
, cylinders
, total_capacity
;
379 if (scsi_partsize(bdev
, capacity
, geom
))
382 total_capacity
= capacity
;
385 cylinders
= total_capacity
/ (heads
* sectors
);
386 if (cylinders
> 1024) {
389 cylinders
= total_capacity
/ (heads
* sectors
);
397 static uint8_t arcmsr_hbaA_wait_msgint_ready(struct AdapterControlBlock
*acb
)
399 struct MessageUnit_A __iomem
*reg
= acb
->pmuA
;
402 for (i
= 0; i
< 2000; i
++) {
403 if (readl(®
->outbound_intstatus
) &
404 ARCMSR_MU_OUTBOUND_MESSAGE0_INT
) {
405 writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT
,
406 ®
->outbound_intstatus
);
410 } /* max 20 seconds */
415 static uint8_t arcmsr_hbaB_wait_msgint_ready(struct AdapterControlBlock
*acb
)
417 struct MessageUnit_B
*reg
= acb
->pmuB
;
420 for (i
= 0; i
< 2000; i
++) {
421 if (readl(reg
->iop2drv_doorbell
)
422 & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE
) {
423 writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN
,
424 reg
->iop2drv_doorbell
);
425 writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT
,
426 reg
->drv2iop_doorbell
);
430 } /* max 20 seconds */
435 static uint8_t arcmsr_hbaC_wait_msgint_ready(struct AdapterControlBlock
*pACB
)
437 struct MessageUnit_C __iomem
*phbcmu
= pACB
->pmuC
;
440 for (i
= 0; i
< 2000; i
++) {
441 if (readl(&phbcmu
->outbound_doorbell
)
442 & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE
) {
443 writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR
,
444 &phbcmu
->outbound_doorbell_clear
); /*clear interrupt*/
448 } /* max 20 seconds */
453 static bool arcmsr_hbaD_wait_msgint_ready(struct AdapterControlBlock
*pACB
)
455 struct MessageUnit_D
*reg
= pACB
->pmuD
;
458 for (i
= 0; i
< 2000; i
++) {
459 if (readl(reg
->outbound_doorbell
)
460 & ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE
) {
461 writel(ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE
,
462 reg
->outbound_doorbell
);
466 } /* max 20 seconds */
470 static bool arcmsr_hbaE_wait_msgint_ready(struct AdapterControlBlock
*pACB
)
473 uint32_t read_doorbell
;
474 struct MessageUnit_E __iomem
*phbcmu
= pACB
->pmuE
;
476 for (i
= 0; i
< 2000; i
++) {
477 read_doorbell
= readl(&phbcmu
->iobound_doorbell
);
478 if ((read_doorbell
^ pACB
->in_doorbell
) & ARCMSR_HBEMU_IOP2DRV_MESSAGE_CMD_DONE
) {
479 writel(0, &phbcmu
->host_int_status
); /*clear interrupt*/
480 pACB
->in_doorbell
= read_doorbell
;
484 } /* max 20 seconds */
488 static void arcmsr_hbaA_flush_cache(struct AdapterControlBlock
*acb
)
490 struct MessageUnit_A __iomem
*reg
= acb
->pmuA
;
491 int retry_count
= 30;
492 writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE
, ®
->inbound_msgaddr0
);
494 if (arcmsr_hbaA_wait_msgint_ready(acb
))
498 printk(KERN_NOTICE
"arcmsr%d: wait 'flush adapter cache' \
499 timeout, retry count down = %d \n", acb
->host
->host_no
, retry_count
);
501 } while (retry_count
!= 0);
504 static void arcmsr_hbaB_flush_cache(struct AdapterControlBlock
*acb
)
506 struct MessageUnit_B
*reg
= acb
->pmuB
;
507 int retry_count
= 30;
508 writel(ARCMSR_MESSAGE_FLUSH_CACHE
, reg
->drv2iop_doorbell
);
510 if (arcmsr_hbaB_wait_msgint_ready(acb
))
514 printk(KERN_NOTICE
"arcmsr%d: wait 'flush adapter cache' \
515 timeout,retry count down = %d \n", acb
->host
->host_no
, retry_count
);
517 } while (retry_count
!= 0);
520 static void arcmsr_hbaC_flush_cache(struct AdapterControlBlock
*pACB
)
522 struct MessageUnit_C __iomem
*reg
= pACB
->pmuC
;
523 int retry_count
= 30;/* enlarge wait flush adapter cache time: 10 minute */
524 writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE
, ®
->inbound_msgaddr0
);
525 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE
, ®
->inbound_doorbell
);
527 if (arcmsr_hbaC_wait_msgint_ready(pACB
)) {
531 printk(KERN_NOTICE
"arcmsr%d: wait 'flush adapter cache' \
532 timeout,retry count down = %d \n", pACB
->host
->host_no
, retry_count
);
534 } while (retry_count
!= 0);
538 static void arcmsr_hbaD_flush_cache(struct AdapterControlBlock
*pACB
)
540 int retry_count
= 15;
541 struct MessageUnit_D
*reg
= pACB
->pmuD
;
543 writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE
, reg
->inbound_msgaddr0
);
545 if (arcmsr_hbaD_wait_msgint_ready(pACB
))
549 pr_notice("arcmsr%d: wait 'flush adapter "
550 "cache' timeout, retry count down = %d\n",
551 pACB
->host
->host_no
, retry_count
);
552 } while (retry_count
!= 0);
555 static void arcmsr_hbaE_flush_cache(struct AdapterControlBlock
*pACB
)
557 int retry_count
= 30;
558 struct MessageUnit_E __iomem
*reg
= pACB
->pmuE
;
560 writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE
, ®
->inbound_msgaddr0
);
561 pACB
->out_doorbell
^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE
;
562 writel(pACB
->out_doorbell
, ®
->iobound_doorbell
);
564 if (arcmsr_hbaE_wait_msgint_ready(pACB
))
567 pr_notice("arcmsr%d: wait 'flush adapter "
568 "cache' timeout, retry count down = %d\n",
569 pACB
->host
->host_no
, retry_count
);
570 } while (retry_count
!= 0);
573 static void arcmsr_flush_adapter_cache(struct AdapterControlBlock
*acb
)
575 switch (acb
->adapter_type
) {
577 case ACB_ADAPTER_TYPE_A
:
578 arcmsr_hbaA_flush_cache(acb
);
580 case ACB_ADAPTER_TYPE_B
:
581 arcmsr_hbaB_flush_cache(acb
);
583 case ACB_ADAPTER_TYPE_C
:
584 arcmsr_hbaC_flush_cache(acb
);
586 case ACB_ADAPTER_TYPE_D
:
587 arcmsr_hbaD_flush_cache(acb
);
589 case ACB_ADAPTER_TYPE_E
:
590 case ACB_ADAPTER_TYPE_F
:
591 arcmsr_hbaE_flush_cache(acb
);
596 static void arcmsr_hbaB_assign_regAddr(struct AdapterControlBlock
*acb
)
598 struct MessageUnit_B
*reg
= acb
->pmuB
;
600 if (acb
->pdev
->device
== PCI_DEVICE_ID_ARECA_1203
) {
601 reg
->drv2iop_doorbell
= MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL_1203
);
602 reg
->drv2iop_doorbell_mask
= MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL_MASK_1203
);
603 reg
->iop2drv_doorbell
= MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL_1203
);
604 reg
->iop2drv_doorbell_mask
= MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL_MASK_1203
);
606 reg
->drv2iop_doorbell
= MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL
);
607 reg
->drv2iop_doorbell_mask
= MEM_BASE0(ARCMSR_DRV2IOP_DOORBELL_MASK
);
608 reg
->iop2drv_doorbell
= MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL
);
609 reg
->iop2drv_doorbell_mask
= MEM_BASE0(ARCMSR_IOP2DRV_DOORBELL_MASK
);
611 reg
->message_wbuffer
= MEM_BASE1(ARCMSR_MESSAGE_WBUFFER
);
612 reg
->message_rbuffer
= MEM_BASE1(ARCMSR_MESSAGE_RBUFFER
);
613 reg
->message_rwbuffer
= MEM_BASE1(ARCMSR_MESSAGE_RWBUFFER
);
616 static void arcmsr_hbaD_assign_regAddr(struct AdapterControlBlock
*acb
)
618 struct MessageUnit_D
*reg
= acb
->pmuD
;
620 reg
->chip_id
= MEM_BASE0(ARCMSR_ARC1214_CHIP_ID
);
621 reg
->cpu_mem_config
= MEM_BASE0(ARCMSR_ARC1214_CPU_MEMORY_CONFIGURATION
);
622 reg
->i2o_host_interrupt_mask
= MEM_BASE0(ARCMSR_ARC1214_I2_HOST_INTERRUPT_MASK
);
623 reg
->sample_at_reset
= MEM_BASE0(ARCMSR_ARC1214_SAMPLE_RESET
);
624 reg
->reset_request
= MEM_BASE0(ARCMSR_ARC1214_RESET_REQUEST
);
625 reg
->host_int_status
= MEM_BASE0(ARCMSR_ARC1214_MAIN_INTERRUPT_STATUS
);
626 reg
->pcief0_int_enable
= MEM_BASE0(ARCMSR_ARC1214_PCIE_F0_INTERRUPT_ENABLE
);
627 reg
->inbound_msgaddr0
= MEM_BASE0(ARCMSR_ARC1214_INBOUND_MESSAGE0
);
628 reg
->inbound_msgaddr1
= MEM_BASE0(ARCMSR_ARC1214_INBOUND_MESSAGE1
);
629 reg
->outbound_msgaddr0
= MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_MESSAGE0
);
630 reg
->outbound_msgaddr1
= MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_MESSAGE1
);
631 reg
->inbound_doorbell
= MEM_BASE0(ARCMSR_ARC1214_INBOUND_DOORBELL
);
632 reg
->outbound_doorbell
= MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_DOORBELL
);
633 reg
->outbound_doorbell_enable
= MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_DOORBELL_ENABLE
);
634 reg
->inboundlist_base_low
= MEM_BASE0(ARCMSR_ARC1214_INBOUND_LIST_BASE_LOW
);
635 reg
->inboundlist_base_high
= MEM_BASE0(ARCMSR_ARC1214_INBOUND_LIST_BASE_HIGH
);
636 reg
->inboundlist_write_pointer
= MEM_BASE0(ARCMSR_ARC1214_INBOUND_LIST_WRITE_POINTER
);
637 reg
->outboundlist_base_low
= MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_BASE_LOW
);
638 reg
->outboundlist_base_high
= MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_BASE_HIGH
);
639 reg
->outboundlist_copy_pointer
= MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_COPY_POINTER
);
640 reg
->outboundlist_read_pointer
= MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_LIST_READ_POINTER
);
641 reg
->outboundlist_interrupt_cause
= MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_INTERRUPT_CAUSE
);
642 reg
->outboundlist_interrupt_enable
= MEM_BASE0(ARCMSR_ARC1214_OUTBOUND_INTERRUPT_ENABLE
);
643 reg
->message_wbuffer
= MEM_BASE0(ARCMSR_ARC1214_MESSAGE_WBUFFER
);
644 reg
->message_rbuffer
= MEM_BASE0(ARCMSR_ARC1214_MESSAGE_RBUFFER
);
645 reg
->msgcode_rwbuffer
= MEM_BASE0(ARCMSR_ARC1214_MESSAGE_RWBUFFER
);
648 static void arcmsr_hbaF_assign_regAddr(struct AdapterControlBlock
*acb
)
650 dma_addr_t host_buffer_dma
;
651 struct MessageUnit_F __iomem
*pmuF
;
653 memset(acb
->dma_coherent2
, 0xff, acb
->completeQ_size
);
654 acb
->message_wbuffer
= (uint32_t *)round_up((unsigned long)acb
->dma_coherent2
+
655 acb
->completeQ_size
, 4);
656 acb
->message_rbuffer
= ((void *)acb
->message_wbuffer
) + 0x100;
657 acb
->msgcode_rwbuffer
= ((void *)acb
->message_wbuffer
) + 0x200;
658 memset((void *)acb
->message_wbuffer
, 0, MESG_RW_BUFFER_SIZE
);
659 host_buffer_dma
= round_up(acb
->dma_coherent_handle2
+ acb
->completeQ_size
, 4);
661 /* host buffer low address, bit0:1 all buffer active */
662 writel(lower_32_bits(host_buffer_dma
| 1), &pmuF
->inbound_msgaddr0
);
663 /* host buffer high address */
664 writel(upper_32_bits(host_buffer_dma
), &pmuF
->inbound_msgaddr1
);
665 /* set host buffer physical address */
666 writel(ARCMSR_HBFMU_DOORBELL_SYNC1
, &pmuF
->iobound_doorbell
);
669 static bool arcmsr_alloc_io_queue(struct AdapterControlBlock
*acb
)
673 dma_addr_t dma_coherent_handle
;
674 struct pci_dev
*pdev
= acb
->pdev
;
676 switch (acb
->adapter_type
) {
677 case ACB_ADAPTER_TYPE_B
: {
678 acb
->ioqueue_size
= roundup(sizeof(struct MessageUnit_B
), 32);
679 dma_coherent
= dma_alloc_coherent(&pdev
->dev
, acb
->ioqueue_size
,
680 &dma_coherent_handle
, GFP_KERNEL
);
682 pr_notice("arcmsr%d: DMA allocation failed\n", acb
->host
->host_no
);
685 acb
->dma_coherent_handle2
= dma_coherent_handle
;
686 acb
->dma_coherent2
= dma_coherent
;
687 acb
->pmuB
= (struct MessageUnit_B
*)dma_coherent
;
688 arcmsr_hbaB_assign_regAddr(acb
);
691 case ACB_ADAPTER_TYPE_D
: {
692 acb
->ioqueue_size
= roundup(sizeof(struct MessageUnit_D
), 32);
693 dma_coherent
= dma_alloc_coherent(&pdev
->dev
, acb
->ioqueue_size
,
694 &dma_coherent_handle
, GFP_KERNEL
);
696 pr_notice("arcmsr%d: DMA allocation failed\n", acb
->host
->host_no
);
699 acb
->dma_coherent_handle2
= dma_coherent_handle
;
700 acb
->dma_coherent2
= dma_coherent
;
701 acb
->pmuD
= (struct MessageUnit_D
*)dma_coherent
;
702 arcmsr_hbaD_assign_regAddr(acb
);
705 case ACB_ADAPTER_TYPE_E
: {
706 uint32_t completeQ_size
;
707 completeQ_size
= sizeof(struct deliver_completeQ
) * ARCMSR_MAX_HBE_DONEQUEUE
+ 128;
708 acb
->ioqueue_size
= roundup(completeQ_size
, 32);
709 dma_coherent
= dma_alloc_coherent(&pdev
->dev
, acb
->ioqueue_size
,
710 &dma_coherent_handle
, GFP_KERNEL
);
712 pr_notice("arcmsr%d: DMA allocation failed\n", acb
->host
->host_no
);
715 acb
->dma_coherent_handle2
= dma_coherent_handle
;
716 acb
->dma_coherent2
= dma_coherent
;
717 acb
->pCompletionQ
= dma_coherent
;
718 acb
->completionQ_entry
= acb
->ioqueue_size
/ sizeof(struct deliver_completeQ
);
719 acb
->doneq_index
= 0;
722 case ACB_ADAPTER_TYPE_F
: {
724 uint32_t depthTbl
[] = {256, 512, 1024, 128, 64, 32};
726 arcmsr_wait_firmware_ready(acb
);
727 QueueDepth
= depthTbl
[readl(&acb
->pmuF
->outbound_msgaddr1
) & 7];
728 acb
->completeQ_size
= sizeof(struct deliver_completeQ
) * QueueDepth
+ 128;
729 acb
->ioqueue_size
= roundup(acb
->completeQ_size
+ MESG_RW_BUFFER_SIZE
, 32);
730 dma_coherent
= dma_alloc_coherent(&pdev
->dev
, acb
->ioqueue_size
,
731 &dma_coherent_handle
, GFP_KERNEL
);
733 pr_notice("arcmsr%d: DMA allocation failed\n", acb
->host
->host_no
);
736 acb
->dma_coherent_handle2
= dma_coherent_handle
;
737 acb
->dma_coherent2
= dma_coherent
;
738 acb
->pCompletionQ
= dma_coherent
;
739 acb
->completionQ_entry
= acb
->completeQ_size
/ sizeof(struct deliver_completeQ
);
740 acb
->doneq_index
= 0;
741 arcmsr_hbaF_assign_regAddr(acb
);
750 static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock
*acb
)
752 struct pci_dev
*pdev
= acb
->pdev
;
754 dma_addr_t dma_coherent_handle
;
755 struct CommandControlBlock
*ccb_tmp
;
757 unsigned long cdb_phyaddr
, next_ccb_phy
;
758 unsigned long roundup_ccbsize
;
759 unsigned long max_xfer_len
;
760 unsigned long max_sg_entrys
;
761 uint32_t firm_config_version
, curr_phy_upper32
;
763 for (i
= 0; i
< ARCMSR_MAX_TARGETID
; i
++)
764 for (j
= 0; j
< ARCMSR_MAX_TARGETLUN
; j
++)
765 acb
->devstate
[i
][j
] = ARECA_RAID_GONE
;
767 max_xfer_len
= ARCMSR_MAX_XFER_LEN
;
768 max_sg_entrys
= ARCMSR_DEFAULT_SG_ENTRIES
;
769 firm_config_version
= acb
->firm_cfg_version
;
770 if((firm_config_version
& 0xFF) >= 3){
771 max_xfer_len
= (ARCMSR_CDB_SG_PAGE_LENGTH
<< ((firm_config_version
>> 8) & 0xFF)) * 1024;/* max 4M byte */
772 max_sg_entrys
= (max_xfer_len
/4096);
774 acb
->host
->max_sectors
= max_xfer_len
/512;
775 acb
->host
->sg_tablesize
= max_sg_entrys
;
776 roundup_ccbsize
= roundup(sizeof(struct CommandControlBlock
) + (max_sg_entrys
- 1) * sizeof(struct SG64ENTRY
), 32);
777 acb
->uncache_size
= roundup_ccbsize
* acb
->maxFreeCCB
;
778 if (acb
->adapter_type
!= ACB_ADAPTER_TYPE_F
)
779 acb
->uncache_size
+= acb
->ioqueue_size
;
780 dma_coherent
= dma_alloc_coherent(&pdev
->dev
, acb
->uncache_size
, &dma_coherent_handle
, GFP_KERNEL
);
782 printk(KERN_NOTICE
"arcmsr%d: dma_alloc_coherent got error\n", acb
->host
->host_no
);
785 acb
->dma_coherent
= dma_coherent
;
786 acb
->dma_coherent_handle
= dma_coherent_handle
;
787 memset(dma_coherent
, 0, acb
->uncache_size
);
788 acb
->ccbsize
= roundup_ccbsize
;
789 ccb_tmp
= dma_coherent
;
790 curr_phy_upper32
= upper_32_bits(dma_coherent_handle
);
791 acb
->vir2phy_offset
= (unsigned long)dma_coherent
- (unsigned long)dma_coherent_handle
;
792 for(i
= 0; i
< acb
->maxFreeCCB
; i
++){
793 cdb_phyaddr
= (unsigned long)dma_coherent_handle
+ offsetof(struct CommandControlBlock
, arcmsr_cdb
);
794 switch (acb
->adapter_type
) {
795 case ACB_ADAPTER_TYPE_A
:
796 case ACB_ADAPTER_TYPE_B
:
797 ccb_tmp
->cdb_phyaddr
= cdb_phyaddr
>> 5;
799 case ACB_ADAPTER_TYPE_C
:
800 case ACB_ADAPTER_TYPE_D
:
801 case ACB_ADAPTER_TYPE_E
:
802 case ACB_ADAPTER_TYPE_F
:
803 ccb_tmp
->cdb_phyaddr
= cdb_phyaddr
;
806 acb
->pccb_pool
[i
] = ccb_tmp
;
808 ccb_tmp
->smid
= (u32
)i
<< 16;
809 INIT_LIST_HEAD(&ccb_tmp
->list
);
810 next_ccb_phy
= dma_coherent_handle
+ roundup_ccbsize
;
811 if (upper_32_bits(next_ccb_phy
) != curr_phy_upper32
) {
813 acb
->host
->can_queue
= i
;
817 list_add_tail(&ccb_tmp
->list
, &acb
->ccb_free_list
);
818 ccb_tmp
= (struct CommandControlBlock
*)((unsigned long)ccb_tmp
+ roundup_ccbsize
);
819 dma_coherent_handle
= next_ccb_phy
;
821 if (acb
->adapter_type
!= ACB_ADAPTER_TYPE_F
) {
822 acb
->dma_coherent_handle2
= dma_coherent_handle
;
823 acb
->dma_coherent2
= ccb_tmp
;
825 switch (acb
->adapter_type
) {
826 case ACB_ADAPTER_TYPE_B
:
827 acb
->pmuB
= (struct MessageUnit_B
*)acb
->dma_coherent2
;
828 arcmsr_hbaB_assign_regAddr(acb
);
830 case ACB_ADAPTER_TYPE_D
:
831 acb
->pmuD
= (struct MessageUnit_D
*)acb
->dma_coherent2
;
832 arcmsr_hbaD_assign_regAddr(acb
);
834 case ACB_ADAPTER_TYPE_E
:
835 acb
->pCompletionQ
= acb
->dma_coherent2
;
836 acb
->completionQ_entry
= acb
->ioqueue_size
/ sizeof(struct deliver_completeQ
);
837 acb
->doneq_index
= 0;
843 static void arcmsr_message_isr_bh_fn(struct work_struct
*work
)
845 struct AdapterControlBlock
*acb
= container_of(work
,
846 struct AdapterControlBlock
, arcmsr_do_message_isr_bh
);
847 char *acb_dev_map
= (char *)acb
->device_map
;
848 uint32_t __iomem
*signature
= NULL
;
849 char __iomem
*devicemap
= NULL
;
851 struct scsi_device
*psdev
;
854 switch (acb
->adapter_type
) {
855 case ACB_ADAPTER_TYPE_A
: {
856 struct MessageUnit_A __iomem
*reg
= acb
->pmuA
;
858 signature
= (uint32_t __iomem
*)(®
->message_rwbuffer
[0]);
859 devicemap
= (char __iomem
*)(®
->message_rwbuffer
[21]);
862 case ACB_ADAPTER_TYPE_B
: {
863 struct MessageUnit_B
*reg
= acb
->pmuB
;
865 signature
= (uint32_t __iomem
*)(®
->message_rwbuffer
[0]);
866 devicemap
= (char __iomem
*)(®
->message_rwbuffer
[21]);
869 case ACB_ADAPTER_TYPE_C
: {
870 struct MessageUnit_C __iomem
*reg
= acb
->pmuC
;
872 signature
= (uint32_t __iomem
*)(®
->msgcode_rwbuffer
[0]);
873 devicemap
= (char __iomem
*)(®
->msgcode_rwbuffer
[21]);
876 case ACB_ADAPTER_TYPE_D
: {
877 struct MessageUnit_D
*reg
= acb
->pmuD
;
879 signature
= (uint32_t __iomem
*)(®
->msgcode_rwbuffer
[0]);
880 devicemap
= (char __iomem
*)(®
->msgcode_rwbuffer
[21]);
883 case ACB_ADAPTER_TYPE_E
: {
884 struct MessageUnit_E __iomem
*reg
= acb
->pmuE
;
886 signature
= (uint32_t __iomem
*)(®
->msgcode_rwbuffer
[0]);
887 devicemap
= (char __iomem
*)(®
->msgcode_rwbuffer
[21]);
890 case ACB_ADAPTER_TYPE_F
: {
891 signature
= (uint32_t __iomem
*)(&acb
->msgcode_rwbuffer
[0]);
892 devicemap
= (char __iomem
*)(&acb
->msgcode_rwbuffer
[21]);
896 if (readl(signature
) != ARCMSR_SIGNATURE_GET_CONFIG
)
898 for (target
= 0; target
< ARCMSR_MAX_TARGETID
- 1;
900 temp
= readb(devicemap
);
901 diff
= (*acb_dev_map
) ^ temp
;
904 for (lun
= 0; lun
< ARCMSR_MAX_TARGETLUN
;
906 if ((diff
& 0x01) == 1 &&
907 (temp
& 0x01) == 1) {
908 scsi_add_device(acb
->host
,
910 } else if ((diff
& 0x01) == 1
911 && (temp
& 0x01) == 0) {
912 psdev
= scsi_device_lookup(acb
->host
,
915 scsi_remove_device(psdev
);
916 scsi_device_put(psdev
);
926 acb
->acb_flags
&= ~ACB_F_MSG_GET_CONFIG
;
930 arcmsr_request_irq(struct pci_dev
*pdev
, struct AdapterControlBlock
*acb
)
935 if (msix_enable
== 0)
937 nvec
= pci_alloc_irq_vectors(pdev
, 1, ARCMST_NUM_MSIX_VECTORS
,
940 pr_info("arcmsr%d: msi-x enabled\n", acb
->host
->host_no
);
944 if (msi_enable
== 1) {
945 nvec
= pci_alloc_irq_vectors(pdev
, 1, 1, PCI_IRQ_MSI
);
947 dev_info(&pdev
->dev
, "msi enabled\n");
951 nvec
= pci_alloc_irq_vectors(pdev
, 1, 1, PCI_IRQ_LEGACY
);
958 acb
->vector_count
= nvec
;
959 for (i
= 0; i
< nvec
; i
++) {
960 if (request_irq(pci_irq_vector(pdev
, i
), arcmsr_do_interrupt
,
961 flags
, "arcmsr", acb
)) {
962 pr_warn("arcmsr%d: request_irq =%d failed!\n",
963 acb
->host
->host_no
, pci_irq_vector(pdev
, i
));
971 free_irq(pci_irq_vector(pdev
, i
), acb
);
972 pci_free_irq_vectors(pdev
);
976 static void arcmsr_init_get_devmap_timer(struct AdapterControlBlock
*pacb
)
978 INIT_WORK(&pacb
->arcmsr_do_message_isr_bh
, arcmsr_message_isr_bh_fn
);
979 pacb
->fw_flag
= FW_NORMAL
;
980 timer_setup(&pacb
->eternal_timer
, arcmsr_request_device_map
, 0);
981 pacb
->eternal_timer
.expires
= jiffies
+ msecs_to_jiffies(6 * HZ
);
982 add_timer(&pacb
->eternal_timer
);
985 static void arcmsr_init_set_datetime_timer(struct AdapterControlBlock
*pacb
)
987 timer_setup(&pacb
->refresh_timer
, arcmsr_set_iop_datetime
, 0);
988 pacb
->refresh_timer
.expires
= jiffies
+ msecs_to_jiffies(60 * 1000);
989 add_timer(&pacb
->refresh_timer
);
992 static int arcmsr_set_dma_mask(struct AdapterControlBlock
*acb
)
994 struct pci_dev
*pcidev
= acb
->pdev
;
997 if (((acb
->adapter_type
== ACB_ADAPTER_TYPE_A
) && !dma_mask_64
) ||
998 dma_set_mask(&pcidev
->dev
, DMA_BIT_MASK(64)))
1000 if (dma_set_coherent_mask(&pcidev
->dev
, DMA_BIT_MASK(64)) ||
1001 dma_set_mask_and_coherent(&pcidev
->dev
, DMA_BIT_MASK(64))) {
1002 printk("arcmsr: set DMA 64 mask failed\n");
1007 if (dma_set_mask(&pcidev
->dev
, DMA_BIT_MASK(32)) ||
1008 dma_set_coherent_mask(&pcidev
->dev
, DMA_BIT_MASK(32)) ||
1009 dma_set_mask_and_coherent(&pcidev
->dev
, DMA_BIT_MASK(32))) {
1010 printk("arcmsr: set DMA 32-bit mask failed\n");
1017 static int arcmsr_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
1019 struct Scsi_Host
*host
;
1020 struct AdapterControlBlock
*acb
;
1021 uint8_t bus
,dev_fun
;
1023 error
= pci_enable_device(pdev
);
1027 host
= scsi_host_alloc(&arcmsr_scsi_host_template
, sizeof(struct AdapterControlBlock
));
1029 goto pci_disable_dev
;
1031 init_waitqueue_head(&wait_q
);
1032 bus
= pdev
->bus
->number
;
1033 dev_fun
= pdev
->devfn
;
1034 acb
= (struct AdapterControlBlock
*) host
->hostdata
;
1035 memset(acb
,0,sizeof(struct AdapterControlBlock
));
1037 acb
->adapter_type
= id
->driver_data
;
1038 if (arcmsr_set_dma_mask(acb
))
1039 goto scsi_host_release
;
1041 host
->max_lun
= ARCMSR_MAX_TARGETLUN
;
1042 host
->max_id
= ARCMSR_MAX_TARGETID
; /*16:8*/
1043 host
->max_cmd_len
= 16; /*this is issue of 64bit LBA ,over 2T byte*/
1044 if ((host_can_queue
< ARCMSR_MIN_OUTSTANDING_CMD
) || (host_can_queue
> ARCMSR_MAX_OUTSTANDING_CMD
))
1045 host_can_queue
= ARCMSR_DEFAULT_OUTSTANDING_CMD
;
1046 host
->can_queue
= host_can_queue
; /* max simultaneous cmds */
1047 if ((cmd_per_lun
< ARCMSR_MIN_CMD_PERLUN
) || (cmd_per_lun
> ARCMSR_MAX_CMD_PERLUN
))
1048 cmd_per_lun
= ARCMSR_DEFAULT_CMD_PERLUN
;
1049 host
->cmd_per_lun
= cmd_per_lun
;
1050 host
->this_id
= ARCMSR_SCSI_INITIATOR_ID
;
1051 host
->unique_id
= (bus
<< 8) | dev_fun
;
1052 pci_set_drvdata(pdev
, host
);
1053 pci_set_master(pdev
);
1054 error
= pci_request_regions(pdev
, "arcmsr");
1056 goto scsi_host_release
;
1058 spin_lock_init(&acb
->eh_lock
);
1059 spin_lock_init(&acb
->ccblist_lock
);
1060 spin_lock_init(&acb
->postq_lock
);
1061 spin_lock_init(&acb
->doneq_lock
);
1062 spin_lock_init(&acb
->rqbuffer_lock
);
1063 spin_lock_init(&acb
->wqbuffer_lock
);
1064 acb
->acb_flags
|= (ACB_F_MESSAGE_WQBUFFER_CLEARED
|
1065 ACB_F_MESSAGE_RQBUFFER_CLEARED
|
1066 ACB_F_MESSAGE_WQBUFFER_READED
);
1067 acb
->acb_flags
&= ~ACB_F_SCSISTOPADAPTER
;
1068 INIT_LIST_HEAD(&acb
->ccb_free_list
);
1069 error
= arcmsr_remap_pciregion(acb
);
1071 goto pci_release_regs
;
1073 error
= arcmsr_alloc_io_queue(acb
);
1075 goto unmap_pci_region
;
1076 error
= arcmsr_get_firmware_spec(acb
);
1080 if (acb
->adapter_type
!= ACB_ADAPTER_TYPE_F
)
1081 arcmsr_free_io_queue(acb
);
1082 error
= arcmsr_alloc_ccb_pool(acb
);
1084 goto unmap_pci_region
;
1086 error
= scsi_add_host(host
, &pdev
->dev
);
1090 if (arcmsr_request_irq(pdev
, acb
) == FAILED
)
1091 goto scsi_host_remove
;
1092 arcmsr_iop_init(acb
);
1093 arcmsr_init_get_devmap_timer(acb
);
1095 arcmsr_init_set_datetime_timer(acb
);
1096 if(arcmsr_alloc_sysfs_attr(acb
))
1097 goto out_free_sysfs
;
1098 scsi_scan_host(host
);
1102 del_timer_sync(&acb
->refresh_timer
);
1103 del_timer_sync(&acb
->eternal_timer
);
1104 flush_work(&acb
->arcmsr_do_message_isr_bh
);
1105 arcmsr_stop_adapter_bgrb(acb
);
1106 arcmsr_flush_adapter_cache(acb
);
1107 arcmsr_free_irq(pdev
, acb
);
1109 scsi_remove_host(host
);
1111 arcmsr_free_ccb_pool(acb
);
1112 goto unmap_pci_region
;
1114 arcmsr_free_io_queue(acb
);
1116 arcmsr_unmap_pciregion(acb
);
1118 pci_release_regions(pdev
);
1120 scsi_host_put(host
);
1122 pci_disable_device(pdev
);
1126 static void arcmsr_free_irq(struct pci_dev
*pdev
,
1127 struct AdapterControlBlock
*acb
)
1131 for (i
= 0; i
< acb
->vector_count
; i
++)
1132 free_irq(pci_irq_vector(pdev
, i
), acb
);
1133 pci_free_irq_vectors(pdev
);
1136 static int __maybe_unused
arcmsr_suspend(struct device
*dev
)
1138 struct pci_dev
*pdev
= to_pci_dev(dev
);
1139 struct Scsi_Host
*host
= pci_get_drvdata(pdev
);
1140 struct AdapterControlBlock
*acb
=
1141 (struct AdapterControlBlock
*)host
->hostdata
;
1143 arcmsr_disable_outbound_ints(acb
);
1144 arcmsr_free_irq(pdev
, acb
);
1145 del_timer_sync(&acb
->eternal_timer
);
1147 del_timer_sync(&acb
->refresh_timer
);
1148 flush_work(&acb
->arcmsr_do_message_isr_bh
);
1149 arcmsr_stop_adapter_bgrb(acb
);
1150 arcmsr_flush_adapter_cache(acb
);
1154 static int __maybe_unused
arcmsr_resume(struct device
*dev
)
1156 struct pci_dev
*pdev
= to_pci_dev(dev
);
1157 struct Scsi_Host
*host
= pci_get_drvdata(pdev
);
1158 struct AdapterControlBlock
*acb
=
1159 (struct AdapterControlBlock
*)host
->hostdata
;
1161 if (arcmsr_set_dma_mask(acb
))
1162 goto controller_unregister
;
1163 if (arcmsr_request_irq(pdev
, acb
) == FAILED
)
1164 goto controller_stop
;
1165 switch (acb
->adapter_type
) {
1166 case ACB_ADAPTER_TYPE_B
: {
1167 struct MessageUnit_B
*reg
= acb
->pmuB
;
1169 for (i
= 0; i
< ARCMSR_MAX_HBB_POSTQUEUE
; i
++) {
1170 reg
->post_qbuffer
[i
] = 0;
1171 reg
->done_qbuffer
[i
] = 0;
1173 reg
->postq_index
= 0;
1174 reg
->doneq_index
= 0;
1177 case ACB_ADAPTER_TYPE_E
:
1178 writel(0, &acb
->pmuE
->host_int_status
);
1179 writel(ARCMSR_HBEMU_DOORBELL_SYNC
, &acb
->pmuE
->iobound_doorbell
);
1180 acb
->in_doorbell
= 0;
1181 acb
->out_doorbell
= 0;
1182 acb
->doneq_index
= 0;
1184 case ACB_ADAPTER_TYPE_F
:
1185 writel(0, &acb
->pmuF
->host_int_status
);
1186 writel(ARCMSR_HBFMU_DOORBELL_SYNC
, &acb
->pmuF
->iobound_doorbell
);
1187 acb
->in_doorbell
= 0;
1188 acb
->out_doorbell
= 0;
1189 acb
->doneq_index
= 0;
1190 arcmsr_hbaF_assign_regAddr(acb
);
1193 arcmsr_iop_init(acb
);
1194 arcmsr_init_get_devmap_timer(acb
);
1196 arcmsr_init_set_datetime_timer(acb
);
1199 arcmsr_stop_adapter_bgrb(acb
);
1200 arcmsr_flush_adapter_cache(acb
);
1201 controller_unregister
:
1202 scsi_remove_host(host
);
1203 arcmsr_free_ccb_pool(acb
);
1204 if (acb
->adapter_type
== ACB_ADAPTER_TYPE_F
)
1205 arcmsr_free_io_queue(acb
);
1206 arcmsr_unmap_pciregion(acb
);
1207 scsi_host_put(host
);
1211 static uint8_t arcmsr_hbaA_abort_allcmd(struct AdapterControlBlock
*acb
)
1213 struct MessageUnit_A __iomem
*reg
= acb
->pmuA
;
1214 writel(ARCMSR_INBOUND_MESG0_ABORT_CMD
, ®
->inbound_msgaddr0
);
1215 if (!arcmsr_hbaA_wait_msgint_ready(acb
)) {
1217 "arcmsr%d: wait 'abort all outstanding command' timeout\n"
1218 , acb
->host
->host_no
);
1224 static uint8_t arcmsr_hbaB_abort_allcmd(struct AdapterControlBlock
*acb
)
1226 struct MessageUnit_B
*reg
= acb
->pmuB
;
1228 writel(ARCMSR_MESSAGE_ABORT_CMD
, reg
->drv2iop_doorbell
);
1229 if (!arcmsr_hbaB_wait_msgint_ready(acb
)) {
1231 "arcmsr%d: wait 'abort all outstanding command' timeout\n"
1232 , acb
->host
->host_no
);
1237 static uint8_t arcmsr_hbaC_abort_allcmd(struct AdapterControlBlock
*pACB
)
1239 struct MessageUnit_C __iomem
*reg
= pACB
->pmuC
;
1240 writel(ARCMSR_INBOUND_MESG0_ABORT_CMD
, ®
->inbound_msgaddr0
);
1241 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE
, ®
->inbound_doorbell
);
1242 if (!arcmsr_hbaC_wait_msgint_ready(pACB
)) {
1244 "arcmsr%d: wait 'abort all outstanding command' timeout\n"
1245 , pACB
->host
->host_no
);
1251 static uint8_t arcmsr_hbaD_abort_allcmd(struct AdapterControlBlock
*pACB
)
1253 struct MessageUnit_D
*reg
= pACB
->pmuD
;
1255 writel(ARCMSR_INBOUND_MESG0_ABORT_CMD
, reg
->inbound_msgaddr0
);
1256 if (!arcmsr_hbaD_wait_msgint_ready(pACB
)) {
1257 pr_notice("arcmsr%d: wait 'abort all outstanding "
1258 "command' timeout\n", pACB
->host
->host_no
);
1264 static uint8_t arcmsr_hbaE_abort_allcmd(struct AdapterControlBlock
*pACB
)
1266 struct MessageUnit_E __iomem
*reg
= pACB
->pmuE
;
1268 writel(ARCMSR_INBOUND_MESG0_ABORT_CMD
, ®
->inbound_msgaddr0
);
1269 pACB
->out_doorbell
^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE
;
1270 writel(pACB
->out_doorbell
, ®
->iobound_doorbell
);
1271 if (!arcmsr_hbaE_wait_msgint_ready(pACB
)) {
1272 pr_notice("arcmsr%d: wait 'abort all outstanding "
1273 "command' timeout\n", pACB
->host
->host_no
);
1279 static uint8_t arcmsr_abort_allcmd(struct AdapterControlBlock
*acb
)
1282 switch (acb
->adapter_type
) {
1283 case ACB_ADAPTER_TYPE_A
:
1284 rtnval
= arcmsr_hbaA_abort_allcmd(acb
);
1286 case ACB_ADAPTER_TYPE_B
:
1287 rtnval
= arcmsr_hbaB_abort_allcmd(acb
);
1289 case ACB_ADAPTER_TYPE_C
:
1290 rtnval
= arcmsr_hbaC_abort_allcmd(acb
);
1292 case ACB_ADAPTER_TYPE_D
:
1293 rtnval
= arcmsr_hbaD_abort_allcmd(acb
);
1295 case ACB_ADAPTER_TYPE_E
:
1296 case ACB_ADAPTER_TYPE_F
:
1297 rtnval
= arcmsr_hbaE_abort_allcmd(acb
);
1303 static void arcmsr_pci_unmap_dma(struct CommandControlBlock
*ccb
)
1305 struct scsi_cmnd
*pcmd
= ccb
->pcmd
;
1307 scsi_dma_unmap(pcmd
);
1310 static void arcmsr_ccb_complete(struct CommandControlBlock
*ccb
)
1312 struct AdapterControlBlock
*acb
= ccb
->acb
;
1313 struct scsi_cmnd
*pcmd
= ccb
->pcmd
;
1314 unsigned long flags
;
1315 atomic_dec(&acb
->ccboutstandingcount
);
1316 arcmsr_pci_unmap_dma(ccb
);
1317 ccb
->startdone
= ARCMSR_CCB_DONE
;
1318 spin_lock_irqsave(&acb
->ccblist_lock
, flags
);
1319 list_add_tail(&ccb
->list
, &acb
->ccb_free_list
);
1320 spin_unlock_irqrestore(&acb
->ccblist_lock
, flags
);
1321 pcmd
->scsi_done(pcmd
);
1324 static void arcmsr_report_sense_info(struct CommandControlBlock
*ccb
)
1327 struct scsi_cmnd
*pcmd
= ccb
->pcmd
;
1328 struct SENSE_DATA
*sensebuffer
= (struct SENSE_DATA
*)pcmd
->sense_buffer
;
1329 pcmd
->result
= (DID_OK
<< 16) | (CHECK_CONDITION
<< 1);
1331 int sense_data_length
=
1332 sizeof(struct SENSE_DATA
) < SCSI_SENSE_BUFFERSIZE
1333 ? sizeof(struct SENSE_DATA
) : SCSI_SENSE_BUFFERSIZE
;
1334 memset(sensebuffer
, 0, SCSI_SENSE_BUFFERSIZE
);
1335 memcpy(sensebuffer
, ccb
->arcmsr_cdb
.SenseData
, sense_data_length
);
1336 sensebuffer
->ErrorCode
= SCSI_SENSE_CURRENT_ERRORS
;
1337 sensebuffer
->Valid
= 1;
1338 pcmd
->result
|= (DRIVER_SENSE
<< 24);
1342 static u32
arcmsr_disable_outbound_ints(struct AdapterControlBlock
*acb
)
1345 switch (acb
->adapter_type
) {
1346 case ACB_ADAPTER_TYPE_A
: {
1347 struct MessageUnit_A __iomem
*reg
= acb
->pmuA
;
1348 orig_mask
= readl(®
->outbound_intmask
);
1349 writel(orig_mask
|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE
, \
1350 ®
->outbound_intmask
);
1353 case ACB_ADAPTER_TYPE_B
: {
1354 struct MessageUnit_B
*reg
= acb
->pmuB
;
1355 orig_mask
= readl(reg
->iop2drv_doorbell_mask
);
1356 writel(0, reg
->iop2drv_doorbell_mask
);
1359 case ACB_ADAPTER_TYPE_C
:{
1360 struct MessageUnit_C __iomem
*reg
= acb
->pmuC
;
1361 /* disable all outbound interrupt */
1362 orig_mask
= readl(®
->host_int_mask
); /* disable outbound message0 int */
1363 writel(orig_mask
|ARCMSR_HBCMU_ALL_INTMASKENABLE
, ®
->host_int_mask
);
1366 case ACB_ADAPTER_TYPE_D
: {
1367 struct MessageUnit_D
*reg
= acb
->pmuD
;
1368 /* disable all outbound interrupt */
1369 writel(ARCMSR_ARC1214_ALL_INT_DISABLE
, reg
->pcief0_int_enable
);
1372 case ACB_ADAPTER_TYPE_E
:
1373 case ACB_ADAPTER_TYPE_F
: {
1374 struct MessageUnit_E __iomem
*reg
= acb
->pmuE
;
1375 orig_mask
= readl(®
->host_int_mask
);
1376 writel(orig_mask
| ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR
| ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR
, ®
->host_int_mask
);
1377 readl(®
->host_int_mask
); /* Dummy readl to force pci flush */
1384 static void arcmsr_report_ccb_state(struct AdapterControlBlock
*acb
,
1385 struct CommandControlBlock
*ccb
, bool error
)
1388 id
= ccb
->pcmd
->device
->id
;
1389 lun
= ccb
->pcmd
->device
->lun
;
1391 if (acb
->devstate
[id
][lun
] == ARECA_RAID_GONE
)
1392 acb
->devstate
[id
][lun
] = ARECA_RAID_GOOD
;
1393 ccb
->pcmd
->result
= DID_OK
<< 16;
1394 arcmsr_ccb_complete(ccb
);
1396 switch (ccb
->arcmsr_cdb
.DeviceStatus
) {
1397 case ARCMSR_DEV_SELECT_TIMEOUT
: {
1398 acb
->devstate
[id
][lun
] = ARECA_RAID_GONE
;
1399 ccb
->pcmd
->result
= DID_NO_CONNECT
<< 16;
1400 arcmsr_ccb_complete(ccb
);
1404 case ARCMSR_DEV_ABORTED
:
1406 case ARCMSR_DEV_INIT_FAIL
: {
1407 acb
->devstate
[id
][lun
] = ARECA_RAID_GONE
;
1408 ccb
->pcmd
->result
= DID_BAD_TARGET
<< 16;
1409 arcmsr_ccb_complete(ccb
);
1413 case ARCMSR_DEV_CHECK_CONDITION
: {
1414 acb
->devstate
[id
][lun
] = ARECA_RAID_GOOD
;
1415 arcmsr_report_sense_info(ccb
);
1416 arcmsr_ccb_complete(ccb
);
1422 "arcmsr%d: scsi id = %d lun = %d isr get command error done, \
1423 but got unknown DeviceStatus = 0x%x \n"
1424 , acb
->host
->host_no
1427 , ccb
->arcmsr_cdb
.DeviceStatus
);
1428 acb
->devstate
[id
][lun
] = ARECA_RAID_GONE
;
1429 ccb
->pcmd
->result
= DID_NO_CONNECT
<< 16;
1430 arcmsr_ccb_complete(ccb
);
1436 static void arcmsr_drain_donequeue(struct AdapterControlBlock
*acb
, struct CommandControlBlock
*pCCB
, bool error
)
1438 if ((pCCB
->acb
!= acb
) || (pCCB
->startdone
!= ARCMSR_CCB_START
)) {
1439 if (pCCB
->startdone
== ARCMSR_CCB_ABORTED
) {
1440 struct scsi_cmnd
*abortcmd
= pCCB
->pcmd
;
1442 abortcmd
->result
|= DID_ABORT
<< 16;
1443 arcmsr_ccb_complete(pCCB
);
1444 printk(KERN_NOTICE
"arcmsr%d: pCCB ='0x%p' isr got aborted command \n",
1445 acb
->host
->host_no
, pCCB
);
1449 printk(KERN_NOTICE
"arcmsr%d: isr get an illegal ccb command \
1451 "ccb = '0x%p' ccbacb = '0x%p' startdone = 0x%x"
1452 " ccboutstandingcount = %d \n"
1453 , acb
->host
->host_no
1458 , atomic_read(&acb
->ccboutstandingcount
));
1461 arcmsr_report_ccb_state(acb
, pCCB
, error
);
1464 static void arcmsr_done4abort_postqueue(struct AdapterControlBlock
*acb
)
1468 struct ARCMSR_CDB
*pARCMSR_CDB
;
1470 struct CommandControlBlock
*pCCB
;
1471 unsigned long ccb_cdb_phy
;
1473 switch (acb
->adapter_type
) {
1475 case ACB_ADAPTER_TYPE_A
: {
1476 struct MessageUnit_A __iomem
*reg
= acb
->pmuA
;
1477 uint32_t outbound_intstatus
;
1478 outbound_intstatus
= readl(®
->outbound_intstatus
) &
1479 acb
->outbound_int_enable
;
1480 /*clear and abort all outbound posted Q*/
1481 writel(outbound_intstatus
, ®
->outbound_intstatus
);/*clear interrupt*/
1482 while(((flag_ccb
= readl(®
->outbound_queueport
)) != 0xFFFFFFFF)
1483 && (i
++ < acb
->maxOutstanding
)) {
1484 ccb_cdb_phy
= (flag_ccb
<< 5) & 0xffffffff;
1485 if (acb
->cdb_phyadd_hipart
)
1486 ccb_cdb_phy
= ccb_cdb_phy
| acb
->cdb_phyadd_hipart
;
1487 pARCMSR_CDB
= (struct ARCMSR_CDB
*)(acb
->vir2phy_offset
+ ccb_cdb_phy
);
1488 pCCB
= container_of(pARCMSR_CDB
, struct CommandControlBlock
, arcmsr_cdb
);
1489 error
= (flag_ccb
& ARCMSR_CCBREPLY_FLAG_ERROR_MODE0
) ? true : false;
1490 arcmsr_drain_donequeue(acb
, pCCB
, error
);
1495 case ACB_ADAPTER_TYPE_B
: {
1496 struct MessageUnit_B
*reg
= acb
->pmuB
;
1497 /*clear all outbound posted Q*/
1498 writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN
, reg
->iop2drv_doorbell
); /* clear doorbell interrupt */
1499 for (i
= 0; i
< ARCMSR_MAX_HBB_POSTQUEUE
; i
++) {
1500 flag_ccb
= reg
->done_qbuffer
[i
];
1501 if (flag_ccb
!= 0) {
1502 reg
->done_qbuffer
[i
] = 0;
1503 ccb_cdb_phy
= (flag_ccb
<< 5) & 0xffffffff;
1504 if (acb
->cdb_phyadd_hipart
)
1505 ccb_cdb_phy
= ccb_cdb_phy
| acb
->cdb_phyadd_hipart
;
1506 pARCMSR_CDB
= (struct ARCMSR_CDB
*)(acb
->vir2phy_offset
+ ccb_cdb_phy
);
1507 pCCB
= container_of(pARCMSR_CDB
, struct CommandControlBlock
, arcmsr_cdb
);
1508 error
= (flag_ccb
& ARCMSR_CCBREPLY_FLAG_ERROR_MODE0
) ? true : false;
1509 arcmsr_drain_donequeue(acb
, pCCB
, error
);
1511 reg
->post_qbuffer
[i
] = 0;
1513 reg
->doneq_index
= 0;
1514 reg
->postq_index
= 0;
1517 case ACB_ADAPTER_TYPE_C
: {
1518 struct MessageUnit_C __iomem
*reg
= acb
->pmuC
;
1519 while ((readl(®
->host_int_status
) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR
) && (i
++ < acb
->maxOutstanding
)) {
1521 flag_ccb
= readl(®
->outbound_queueport_low
);
1522 ccb_cdb_phy
= (flag_ccb
& 0xFFFFFFF0);
1523 if (acb
->cdb_phyadd_hipart
)
1524 ccb_cdb_phy
= ccb_cdb_phy
| acb
->cdb_phyadd_hipart
;
1525 pARCMSR_CDB
= (struct ARCMSR_CDB
*)(acb
->vir2phy_offset
+ ccb_cdb_phy
);
1526 pCCB
= container_of(pARCMSR_CDB
, struct CommandControlBlock
, arcmsr_cdb
);
1527 error
= (flag_ccb
& ARCMSR_CCBREPLY_FLAG_ERROR_MODE1
) ? true : false;
1528 arcmsr_drain_donequeue(acb
, pCCB
, error
);
1532 case ACB_ADAPTER_TYPE_D
: {
1533 struct MessageUnit_D
*pmu
= acb
->pmuD
;
1534 uint32_t outbound_write_pointer
;
1535 uint32_t doneq_index
, index_stripped
, addressLow
, residual
, toggle
;
1536 unsigned long flags
;
1538 residual
= atomic_read(&acb
->ccboutstandingcount
);
1539 for (i
= 0; i
< residual
; i
++) {
1540 spin_lock_irqsave(&acb
->doneq_lock
, flags
);
1541 outbound_write_pointer
=
1542 pmu
->done_qbuffer
[0].addressLow
+ 1;
1543 doneq_index
= pmu
->doneq_index
;
1544 if ((doneq_index
& 0xFFF) !=
1545 (outbound_write_pointer
& 0xFFF)) {
1546 toggle
= doneq_index
& 0x4000;
1547 index_stripped
= (doneq_index
& 0xFFF) + 1;
1548 index_stripped
%= ARCMSR_MAX_ARC1214_DONEQUEUE
;
1549 pmu
->doneq_index
= index_stripped
? (index_stripped
| toggle
) :
1550 ((toggle
^ 0x4000) + 1);
1551 doneq_index
= pmu
->doneq_index
;
1552 spin_unlock_irqrestore(&acb
->doneq_lock
, flags
);
1553 addressLow
= pmu
->done_qbuffer
[doneq_index
&
1555 ccb_cdb_phy
= (addressLow
& 0xFFFFFFF0);
1556 if (acb
->cdb_phyadd_hipart
)
1557 ccb_cdb_phy
= ccb_cdb_phy
| acb
->cdb_phyadd_hipart
;
1558 pARCMSR_CDB
= (struct ARCMSR_CDB
*)
1559 (acb
->vir2phy_offset
+ ccb_cdb_phy
);
1560 pCCB
= container_of(pARCMSR_CDB
,
1561 struct CommandControlBlock
, arcmsr_cdb
);
1562 error
= (addressLow
&
1563 ARCMSR_CCBREPLY_FLAG_ERROR_MODE1
) ?
1565 arcmsr_drain_donequeue(acb
, pCCB
, error
);
1567 pmu
->outboundlist_read_pointer
);
1569 spin_unlock_irqrestore(&acb
->doneq_lock
, flags
);
1573 pmu
->postq_index
= 0;
1574 pmu
->doneq_index
= 0x40FF;
1577 case ACB_ADAPTER_TYPE_E
:
1578 arcmsr_hbaE_postqueue_isr(acb
);
1580 case ACB_ADAPTER_TYPE_F
:
1581 arcmsr_hbaF_postqueue_isr(acb
);
1586 static void arcmsr_remove_scsi_devices(struct AdapterControlBlock
*acb
)
1588 char *acb_dev_map
= (char *)acb
->device_map
;
1590 struct scsi_device
*psdev
;
1591 struct CommandControlBlock
*ccb
;
1594 for (i
= 0; i
< acb
->maxFreeCCB
; i
++) {
1595 ccb
= acb
->pccb_pool
[i
];
1596 if (ccb
->startdone
== ARCMSR_CCB_START
) {
1597 ccb
->pcmd
->result
= DID_NO_CONNECT
<< 16;
1598 arcmsr_pci_unmap_dma(ccb
);
1599 ccb
->pcmd
->scsi_done(ccb
->pcmd
);
1602 for (target
= 0; target
< ARCMSR_MAX_TARGETID
; target
++) {
1603 temp
= *acb_dev_map
;
1605 for (lun
= 0; lun
< ARCMSR_MAX_TARGETLUN
; lun
++) {
1607 psdev
= scsi_device_lookup(acb
->host
,
1609 if (psdev
!= NULL
) {
1610 scsi_remove_device(psdev
);
1611 scsi_device_put(psdev
);
1622 static void arcmsr_free_pcidev(struct AdapterControlBlock
*acb
)
1624 struct pci_dev
*pdev
;
1625 struct Scsi_Host
*host
;
1628 arcmsr_free_sysfs_attr(acb
);
1629 scsi_remove_host(host
);
1630 flush_work(&acb
->arcmsr_do_message_isr_bh
);
1631 del_timer_sync(&acb
->eternal_timer
);
1633 del_timer_sync(&acb
->refresh_timer
);
1635 arcmsr_free_irq(pdev
, acb
);
1636 arcmsr_free_ccb_pool(acb
);
1637 if (acb
->adapter_type
== ACB_ADAPTER_TYPE_F
)
1638 arcmsr_free_io_queue(acb
);
1639 arcmsr_unmap_pciregion(acb
);
1640 pci_release_regions(pdev
);
1641 scsi_host_put(host
);
1642 pci_disable_device(pdev
);
1645 static void arcmsr_remove(struct pci_dev
*pdev
)
1647 struct Scsi_Host
*host
= pci_get_drvdata(pdev
);
1648 struct AdapterControlBlock
*acb
=
1649 (struct AdapterControlBlock
*) host
->hostdata
;
1653 pci_read_config_word(pdev
, PCI_DEVICE_ID
, &dev_id
);
1654 if (dev_id
== 0xffff) {
1655 acb
->acb_flags
&= ~ACB_F_IOP_INITED
;
1656 acb
->acb_flags
|= ACB_F_ADAPTER_REMOVED
;
1657 arcmsr_remove_scsi_devices(acb
);
1658 arcmsr_free_pcidev(acb
);
1661 arcmsr_free_sysfs_attr(acb
);
1662 scsi_remove_host(host
);
1663 flush_work(&acb
->arcmsr_do_message_isr_bh
);
1664 del_timer_sync(&acb
->eternal_timer
);
1666 del_timer_sync(&acb
->refresh_timer
);
1667 arcmsr_disable_outbound_ints(acb
);
1668 arcmsr_stop_adapter_bgrb(acb
);
1669 arcmsr_flush_adapter_cache(acb
);
1670 acb
->acb_flags
|= ACB_F_SCSISTOPADAPTER
;
1671 acb
->acb_flags
&= ~ACB_F_IOP_INITED
;
1673 for (poll_count
= 0; poll_count
< acb
->maxOutstanding
; poll_count
++){
1674 if (!atomic_read(&acb
->ccboutstandingcount
))
1676 arcmsr_interrupt(acb
);/* FIXME: need spinlock */
1680 if (atomic_read(&acb
->ccboutstandingcount
)) {
1683 arcmsr_abort_allcmd(acb
);
1684 arcmsr_done4abort_postqueue(acb
);
1685 for (i
= 0; i
< acb
->maxFreeCCB
; i
++) {
1686 struct CommandControlBlock
*ccb
= acb
->pccb_pool
[i
];
1687 if (ccb
->startdone
== ARCMSR_CCB_START
) {
1688 ccb
->startdone
= ARCMSR_CCB_ABORTED
;
1689 ccb
->pcmd
->result
= DID_ABORT
<< 16;
1690 arcmsr_ccb_complete(ccb
);
1694 arcmsr_free_irq(pdev
, acb
);
1695 arcmsr_free_ccb_pool(acb
);
1696 if (acb
->adapter_type
== ACB_ADAPTER_TYPE_F
)
1697 arcmsr_free_io_queue(acb
);
1698 arcmsr_unmap_pciregion(acb
);
1699 pci_release_regions(pdev
);
1700 scsi_host_put(host
);
1701 pci_disable_device(pdev
);
1704 static void arcmsr_shutdown(struct pci_dev
*pdev
)
1706 struct Scsi_Host
*host
= pci_get_drvdata(pdev
);
1707 struct AdapterControlBlock
*acb
=
1708 (struct AdapterControlBlock
*)host
->hostdata
;
1709 if (acb
->acb_flags
& ACB_F_ADAPTER_REMOVED
)
1711 del_timer_sync(&acb
->eternal_timer
);
1713 del_timer_sync(&acb
->refresh_timer
);
1714 arcmsr_disable_outbound_ints(acb
);
1715 arcmsr_free_irq(pdev
, acb
);
1716 flush_work(&acb
->arcmsr_do_message_isr_bh
);
1717 arcmsr_stop_adapter_bgrb(acb
);
1718 arcmsr_flush_adapter_cache(acb
);
1721 static int arcmsr_module_init(void)
1724 error
= pci_register_driver(&arcmsr_pci_driver
);
1728 static void arcmsr_module_exit(void)
1730 pci_unregister_driver(&arcmsr_pci_driver
);
1732 module_init(arcmsr_module_init
);
1733 module_exit(arcmsr_module_exit
);
1735 static void arcmsr_enable_outbound_ints(struct AdapterControlBlock
*acb
,
1739 switch (acb
->adapter_type
) {
1741 case ACB_ADAPTER_TYPE_A
: {
1742 struct MessageUnit_A __iomem
*reg
= acb
->pmuA
;
1743 mask
= intmask_org
& ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE
|
1744 ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE
|
1745 ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE
);
1746 writel(mask
, ®
->outbound_intmask
);
1747 acb
->outbound_int_enable
= ~(intmask_org
& mask
) & 0x000000ff;
1751 case ACB_ADAPTER_TYPE_B
: {
1752 struct MessageUnit_B
*reg
= acb
->pmuB
;
1753 mask
= intmask_org
| (ARCMSR_IOP2DRV_DATA_WRITE_OK
|
1754 ARCMSR_IOP2DRV_DATA_READ_OK
|
1755 ARCMSR_IOP2DRV_CDB_DONE
|
1756 ARCMSR_IOP2DRV_MESSAGE_CMD_DONE
);
1757 writel(mask
, reg
->iop2drv_doorbell_mask
);
1758 acb
->outbound_int_enable
= (intmask_org
| mask
) & 0x0000000f;
1761 case ACB_ADAPTER_TYPE_C
: {
1762 struct MessageUnit_C __iomem
*reg
= acb
->pmuC
;
1763 mask
= ~(ARCMSR_HBCMU_UTILITY_A_ISR_MASK
| ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR_MASK
|ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR_MASK
);
1764 writel(intmask_org
& mask
, ®
->host_int_mask
);
1765 acb
->outbound_int_enable
= ~(intmask_org
& mask
) & 0x0000000f;
1768 case ACB_ADAPTER_TYPE_D
: {
1769 struct MessageUnit_D
*reg
= acb
->pmuD
;
1771 mask
= ARCMSR_ARC1214_ALL_INT_ENABLE
;
1772 writel(intmask_org
| mask
, reg
->pcief0_int_enable
);
1775 case ACB_ADAPTER_TYPE_E
:
1776 case ACB_ADAPTER_TYPE_F
: {
1777 struct MessageUnit_E __iomem
*reg
= acb
->pmuE
;
1779 mask
= ~(ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR
| ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR
);
1780 writel(intmask_org
& mask
, ®
->host_int_mask
);
1786 static int arcmsr_build_ccb(struct AdapterControlBlock
*acb
,
1787 struct CommandControlBlock
*ccb
, struct scsi_cmnd
*pcmd
)
1789 struct ARCMSR_CDB
*arcmsr_cdb
= (struct ARCMSR_CDB
*)&ccb
->arcmsr_cdb
;
1790 int8_t *psge
= (int8_t *)&arcmsr_cdb
->u
;
1791 __le32 address_lo
, address_hi
;
1792 int arccdbsize
= 0x30;
1795 struct scatterlist
*sg
;
1798 memset(arcmsr_cdb
, 0, sizeof(struct ARCMSR_CDB
));
1799 arcmsr_cdb
->TargetID
= pcmd
->device
->id
;
1800 arcmsr_cdb
->LUN
= pcmd
->device
->lun
;
1801 arcmsr_cdb
->Function
= 1;
1802 arcmsr_cdb
->msgContext
= 0;
1803 memcpy(arcmsr_cdb
->Cdb
, pcmd
->cmnd
, pcmd
->cmd_len
);
1805 nseg
= scsi_dma_map(pcmd
);
1806 if (unlikely(nseg
> acb
->host
->sg_tablesize
|| nseg
< 0))
1808 scsi_for_each_sg(pcmd
, sg
, nseg
, i
) {
1809 /* Get the physical address of the current data pointer */
1810 length
= cpu_to_le32(sg_dma_len(sg
));
1811 address_lo
= cpu_to_le32(dma_addr_lo32(sg_dma_address(sg
)));
1812 address_hi
= cpu_to_le32(dma_addr_hi32(sg_dma_address(sg
)));
1813 if (address_hi
== 0) {
1814 struct SG32ENTRY
*pdma_sg
= (struct SG32ENTRY
*)psge
;
1816 pdma_sg
->address
= address_lo
;
1817 pdma_sg
->length
= length
;
1818 psge
+= sizeof (struct SG32ENTRY
);
1819 arccdbsize
+= sizeof (struct SG32ENTRY
);
1821 struct SG64ENTRY
*pdma_sg
= (struct SG64ENTRY
*)psge
;
1823 pdma_sg
->addresshigh
= address_hi
;
1824 pdma_sg
->address
= address_lo
;
1825 pdma_sg
->length
= length
|cpu_to_le32(IS_SG64_ADDR
);
1826 psge
+= sizeof (struct SG64ENTRY
);
1827 arccdbsize
+= sizeof (struct SG64ENTRY
);
1830 arcmsr_cdb
->sgcount
= (uint8_t)nseg
;
1831 arcmsr_cdb
->DataLength
= scsi_bufflen(pcmd
);
1832 arcmsr_cdb
->msgPages
= arccdbsize
/0x100 + (arccdbsize
% 0x100 ? 1 : 0);
1833 if ( arccdbsize
> 256)
1834 arcmsr_cdb
->Flags
|= ARCMSR_CDB_FLAG_SGL_BSIZE
;
1835 if (pcmd
->sc_data_direction
== DMA_TO_DEVICE
)
1836 arcmsr_cdb
->Flags
|= ARCMSR_CDB_FLAG_WRITE
;
1837 ccb
->arc_cdb_size
= arccdbsize
;
1841 static void arcmsr_post_ccb(struct AdapterControlBlock
*acb
, struct CommandControlBlock
*ccb
)
1843 uint32_t cdb_phyaddr
= ccb
->cdb_phyaddr
;
1844 struct ARCMSR_CDB
*arcmsr_cdb
= (struct ARCMSR_CDB
*)&ccb
->arcmsr_cdb
;
1845 atomic_inc(&acb
->ccboutstandingcount
);
1846 ccb
->startdone
= ARCMSR_CCB_START
;
1847 switch (acb
->adapter_type
) {
1848 case ACB_ADAPTER_TYPE_A
: {
1849 struct MessageUnit_A __iomem
*reg
= acb
->pmuA
;
1851 if (arcmsr_cdb
->Flags
& ARCMSR_CDB_FLAG_SGL_BSIZE
)
1852 writel(cdb_phyaddr
| ARCMSR_CCBPOST_FLAG_SGL_BSIZE
,
1853 ®
->inbound_queueport
);
1855 writel(cdb_phyaddr
, ®
->inbound_queueport
);
1859 case ACB_ADAPTER_TYPE_B
: {
1860 struct MessageUnit_B
*reg
= acb
->pmuB
;
1861 uint32_t ending_index
, index
= reg
->postq_index
;
1863 ending_index
= ((index
+ 1) % ARCMSR_MAX_HBB_POSTQUEUE
);
1864 reg
->post_qbuffer
[ending_index
] = 0;
1865 if (arcmsr_cdb
->Flags
& ARCMSR_CDB_FLAG_SGL_BSIZE
) {
1866 reg
->post_qbuffer
[index
] =
1867 cdb_phyaddr
| ARCMSR_CCBPOST_FLAG_SGL_BSIZE
;
1869 reg
->post_qbuffer
[index
] = cdb_phyaddr
;
1872 index
%= ARCMSR_MAX_HBB_POSTQUEUE
;/*if last index number set it to 0 */
1873 reg
->postq_index
= index
;
1874 writel(ARCMSR_DRV2IOP_CDB_POSTED
, reg
->drv2iop_doorbell
);
1877 case ACB_ADAPTER_TYPE_C
: {
1878 struct MessageUnit_C __iomem
*phbcmu
= acb
->pmuC
;
1879 uint32_t ccb_post_stamp
, arc_cdb_size
;
1881 arc_cdb_size
= (ccb
->arc_cdb_size
> 0x300) ? 0x300 : ccb
->arc_cdb_size
;
1882 ccb_post_stamp
= (cdb_phyaddr
| ((arc_cdb_size
- 1) >> 6) | 1);
1883 writel(upper_32_bits(ccb
->cdb_phyaddr
), &phbcmu
->inbound_queueport_high
);
1884 writel(ccb_post_stamp
, &phbcmu
->inbound_queueport_low
);
1887 case ACB_ADAPTER_TYPE_D
: {
1888 struct MessageUnit_D
*pmu
= acb
->pmuD
;
1890 u16 postq_index
, toggle
;
1891 unsigned long flags
;
1892 struct InBound_SRB
*pinbound_srb
;
1894 spin_lock_irqsave(&acb
->postq_lock
, flags
);
1895 postq_index
= pmu
->postq_index
;
1896 pinbound_srb
= (struct InBound_SRB
*)&(pmu
->post_qbuffer
[postq_index
& 0xFF]);
1897 pinbound_srb
->addressHigh
= upper_32_bits(ccb
->cdb_phyaddr
);
1898 pinbound_srb
->addressLow
= cdb_phyaddr
;
1899 pinbound_srb
->length
= ccb
->arc_cdb_size
>> 2;
1900 arcmsr_cdb
->msgContext
= dma_addr_lo32(cdb_phyaddr
);
1901 toggle
= postq_index
& 0x4000;
1902 index_stripped
= postq_index
+ 1;
1903 index_stripped
&= (ARCMSR_MAX_ARC1214_POSTQUEUE
- 1);
1904 pmu
->postq_index
= index_stripped
? (index_stripped
| toggle
) :
1906 writel(postq_index
, pmu
->inboundlist_write_pointer
);
1907 spin_unlock_irqrestore(&acb
->postq_lock
, flags
);
1910 case ACB_ADAPTER_TYPE_E
: {
1911 struct MessageUnit_E __iomem
*pmu
= acb
->pmuE
;
1912 u32 ccb_post_stamp
, arc_cdb_size
;
1914 arc_cdb_size
= (ccb
->arc_cdb_size
> 0x300) ? 0x300 : ccb
->arc_cdb_size
;
1915 ccb_post_stamp
= (ccb
->smid
| ((arc_cdb_size
- 1) >> 6));
1916 writel(0, &pmu
->inbound_queueport_high
);
1917 writel(ccb_post_stamp
, &pmu
->inbound_queueport_low
);
1920 case ACB_ADAPTER_TYPE_F
: {
1921 struct MessageUnit_F __iomem
*pmu
= acb
->pmuF
;
1922 u32 ccb_post_stamp
, arc_cdb_size
;
1924 if (ccb
->arc_cdb_size
<= 0x300)
1925 arc_cdb_size
= (ccb
->arc_cdb_size
- 1) >> 6 | 1;
1927 arc_cdb_size
= (((ccb
->arc_cdb_size
+ 0xff) >> 8) + 2) << 1 | 1;
1928 ccb_post_stamp
= (ccb
->smid
| arc_cdb_size
);
1929 writel(0, &pmu
->inbound_queueport_high
);
1930 writel(ccb_post_stamp
, &pmu
->inbound_queueport_low
);
1936 static void arcmsr_hbaA_stop_bgrb(struct AdapterControlBlock
*acb
)
1938 struct MessageUnit_A __iomem
*reg
= acb
->pmuA
;
1939 acb
->acb_flags
&= ~ACB_F_MSG_START_BGRB
;
1940 writel(ARCMSR_INBOUND_MESG0_STOP_BGRB
, ®
->inbound_msgaddr0
);
1941 if (!arcmsr_hbaA_wait_msgint_ready(acb
)) {
1943 "arcmsr%d: wait 'stop adapter background rebuild' timeout\n"
1944 , acb
->host
->host_no
);
1948 static void arcmsr_hbaB_stop_bgrb(struct AdapterControlBlock
*acb
)
1950 struct MessageUnit_B
*reg
= acb
->pmuB
;
1951 acb
->acb_flags
&= ~ACB_F_MSG_START_BGRB
;
1952 writel(ARCMSR_MESSAGE_STOP_BGRB
, reg
->drv2iop_doorbell
);
1954 if (!arcmsr_hbaB_wait_msgint_ready(acb
)) {
1956 "arcmsr%d: wait 'stop adapter background rebuild' timeout\n"
1957 , acb
->host
->host_no
);
1961 static void arcmsr_hbaC_stop_bgrb(struct AdapterControlBlock
*pACB
)
1963 struct MessageUnit_C __iomem
*reg
= pACB
->pmuC
;
1964 pACB
->acb_flags
&= ~ACB_F_MSG_START_BGRB
;
1965 writel(ARCMSR_INBOUND_MESG0_STOP_BGRB
, ®
->inbound_msgaddr0
);
1966 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE
, ®
->inbound_doorbell
);
1967 if (!arcmsr_hbaC_wait_msgint_ready(pACB
)) {
1969 "arcmsr%d: wait 'stop adapter background rebuild' timeout\n"
1970 , pACB
->host
->host_no
);
1975 static void arcmsr_hbaD_stop_bgrb(struct AdapterControlBlock
*pACB
)
1977 struct MessageUnit_D
*reg
= pACB
->pmuD
;
1979 pACB
->acb_flags
&= ~ACB_F_MSG_START_BGRB
;
1980 writel(ARCMSR_INBOUND_MESG0_STOP_BGRB
, reg
->inbound_msgaddr0
);
1981 if (!arcmsr_hbaD_wait_msgint_ready(pACB
))
1982 pr_notice("arcmsr%d: wait 'stop adapter background rebuild' "
1983 "timeout\n", pACB
->host
->host_no
);
1986 static void arcmsr_hbaE_stop_bgrb(struct AdapterControlBlock
*pACB
)
1988 struct MessageUnit_E __iomem
*reg
= pACB
->pmuE
;
1990 pACB
->acb_flags
&= ~ACB_F_MSG_START_BGRB
;
1991 writel(ARCMSR_INBOUND_MESG0_STOP_BGRB
, ®
->inbound_msgaddr0
);
1992 pACB
->out_doorbell
^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE
;
1993 writel(pACB
->out_doorbell
, ®
->iobound_doorbell
);
1994 if (!arcmsr_hbaE_wait_msgint_ready(pACB
)) {
1995 pr_notice("arcmsr%d: wait 'stop adapter background rebuild' "
1996 "timeout\n", pACB
->host
->host_no
);
2000 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock
*acb
)
2002 switch (acb
->adapter_type
) {
2003 case ACB_ADAPTER_TYPE_A
:
2004 arcmsr_hbaA_stop_bgrb(acb
);
2006 case ACB_ADAPTER_TYPE_B
:
2007 arcmsr_hbaB_stop_bgrb(acb
);
2009 case ACB_ADAPTER_TYPE_C
:
2010 arcmsr_hbaC_stop_bgrb(acb
);
2012 case ACB_ADAPTER_TYPE_D
:
2013 arcmsr_hbaD_stop_bgrb(acb
);
2015 case ACB_ADAPTER_TYPE_E
:
2016 case ACB_ADAPTER_TYPE_F
:
2017 arcmsr_hbaE_stop_bgrb(acb
);
2022 static void arcmsr_free_ccb_pool(struct AdapterControlBlock
*acb
)
2024 dma_free_coherent(&acb
->pdev
->dev
, acb
->uncache_size
, acb
->dma_coherent
, acb
->dma_coherent_handle
);
2027 static void arcmsr_iop_message_read(struct AdapterControlBlock
*acb
)
2029 switch (acb
->adapter_type
) {
2030 case ACB_ADAPTER_TYPE_A
: {
2031 struct MessageUnit_A __iomem
*reg
= acb
->pmuA
;
2032 writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK
, ®
->inbound_doorbell
);
2035 case ACB_ADAPTER_TYPE_B
: {
2036 struct MessageUnit_B
*reg
= acb
->pmuB
;
2037 writel(ARCMSR_DRV2IOP_DATA_READ_OK
, reg
->drv2iop_doorbell
);
2040 case ACB_ADAPTER_TYPE_C
: {
2041 struct MessageUnit_C __iomem
*reg
= acb
->pmuC
;
2043 writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK
, ®
->inbound_doorbell
);
2046 case ACB_ADAPTER_TYPE_D
: {
2047 struct MessageUnit_D
*reg
= acb
->pmuD
;
2048 writel(ARCMSR_ARC1214_DRV2IOP_DATA_OUT_READ
,
2049 reg
->inbound_doorbell
);
2052 case ACB_ADAPTER_TYPE_E
:
2053 case ACB_ADAPTER_TYPE_F
: {
2054 struct MessageUnit_E __iomem
*reg
= acb
->pmuE
;
2055 acb
->out_doorbell
^= ARCMSR_HBEMU_DRV2IOP_DATA_READ_OK
;
2056 writel(acb
->out_doorbell
, ®
->iobound_doorbell
);
2062 static void arcmsr_iop_message_wrote(struct AdapterControlBlock
*acb
)
2064 switch (acb
->adapter_type
) {
2065 case ACB_ADAPTER_TYPE_A
: {
2066 struct MessageUnit_A __iomem
*reg
= acb
->pmuA
;
2068 ** push inbound doorbell tell iop, driver data write ok
2069 ** and wait reply on next hwinterrupt for next Qbuffer post
2071 writel(ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK
, ®
->inbound_doorbell
);
2075 case ACB_ADAPTER_TYPE_B
: {
2076 struct MessageUnit_B
*reg
= acb
->pmuB
;
2078 ** push inbound doorbell tell iop, driver data write ok
2079 ** and wait reply on next hwinterrupt for next Qbuffer post
2081 writel(ARCMSR_DRV2IOP_DATA_WRITE_OK
, reg
->drv2iop_doorbell
);
2084 case ACB_ADAPTER_TYPE_C
: {
2085 struct MessageUnit_C __iomem
*reg
= acb
->pmuC
;
2087 ** push inbound doorbell tell iop, driver data write ok
2088 ** and wait reply on next hwinterrupt for next Qbuffer post
2090 writel(ARCMSR_HBCMU_DRV2IOP_DATA_WRITE_OK
, ®
->inbound_doorbell
);
2093 case ACB_ADAPTER_TYPE_D
: {
2094 struct MessageUnit_D
*reg
= acb
->pmuD
;
2095 writel(ARCMSR_ARC1214_DRV2IOP_DATA_IN_READY
,
2096 reg
->inbound_doorbell
);
2099 case ACB_ADAPTER_TYPE_E
:
2100 case ACB_ADAPTER_TYPE_F
: {
2101 struct MessageUnit_E __iomem
*reg
= acb
->pmuE
;
2102 acb
->out_doorbell
^= ARCMSR_HBEMU_DRV2IOP_DATA_WRITE_OK
;
2103 writel(acb
->out_doorbell
, ®
->iobound_doorbell
);
2109 struct QBUFFER __iomem
*arcmsr_get_iop_rqbuffer(struct AdapterControlBlock
*acb
)
2111 struct QBUFFER __iomem
*qbuffer
= NULL
;
2112 switch (acb
->adapter_type
) {
2114 case ACB_ADAPTER_TYPE_A
: {
2115 struct MessageUnit_A __iomem
*reg
= acb
->pmuA
;
2116 qbuffer
= (struct QBUFFER __iomem
*)®
->message_rbuffer
;
2119 case ACB_ADAPTER_TYPE_B
: {
2120 struct MessageUnit_B
*reg
= acb
->pmuB
;
2121 qbuffer
= (struct QBUFFER __iomem
*)reg
->message_rbuffer
;
2124 case ACB_ADAPTER_TYPE_C
: {
2125 struct MessageUnit_C __iomem
*phbcmu
= acb
->pmuC
;
2126 qbuffer
= (struct QBUFFER __iomem
*)&phbcmu
->message_rbuffer
;
2129 case ACB_ADAPTER_TYPE_D
: {
2130 struct MessageUnit_D
*reg
= acb
->pmuD
;
2131 qbuffer
= (struct QBUFFER __iomem
*)reg
->message_rbuffer
;
2134 case ACB_ADAPTER_TYPE_E
: {
2135 struct MessageUnit_E __iomem
*reg
= acb
->pmuE
;
2136 qbuffer
= (struct QBUFFER __iomem
*)®
->message_rbuffer
;
2139 case ACB_ADAPTER_TYPE_F
: {
2140 qbuffer
= (struct QBUFFER __iomem
*)acb
->message_rbuffer
;
2147 static struct QBUFFER __iomem
*arcmsr_get_iop_wqbuffer(struct AdapterControlBlock
*acb
)
2149 struct QBUFFER __iomem
*pqbuffer
= NULL
;
2150 switch (acb
->adapter_type
) {
2152 case ACB_ADAPTER_TYPE_A
: {
2153 struct MessageUnit_A __iomem
*reg
= acb
->pmuA
;
2154 pqbuffer
= (struct QBUFFER __iomem
*) ®
->message_wbuffer
;
2157 case ACB_ADAPTER_TYPE_B
: {
2158 struct MessageUnit_B
*reg
= acb
->pmuB
;
2159 pqbuffer
= (struct QBUFFER __iomem
*)reg
->message_wbuffer
;
2162 case ACB_ADAPTER_TYPE_C
: {
2163 struct MessageUnit_C __iomem
*reg
= acb
->pmuC
;
2164 pqbuffer
= (struct QBUFFER __iomem
*)®
->message_wbuffer
;
2167 case ACB_ADAPTER_TYPE_D
: {
2168 struct MessageUnit_D
*reg
= acb
->pmuD
;
2169 pqbuffer
= (struct QBUFFER __iomem
*)reg
->message_wbuffer
;
2172 case ACB_ADAPTER_TYPE_E
: {
2173 struct MessageUnit_E __iomem
*reg
= acb
->pmuE
;
2174 pqbuffer
= (struct QBUFFER __iomem
*)®
->message_wbuffer
;
2177 case ACB_ADAPTER_TYPE_F
:
2178 pqbuffer
= (struct QBUFFER __iomem
*)acb
->message_wbuffer
;
2185 arcmsr_Read_iop_rqbuffer_in_DWORD(struct AdapterControlBlock
*acb
,
2186 struct QBUFFER __iomem
*prbuffer
)
2189 uint8_t *buf1
= NULL
;
2190 uint32_t __iomem
*iop_data
;
2191 uint32_t iop_len
, data_len
, *buf2
= NULL
;
2193 iop_data
= (uint32_t __iomem
*)prbuffer
->data
;
2194 iop_len
= readl(&prbuffer
->data_len
);
2196 buf1
= kmalloc(128, GFP_ATOMIC
);
2197 buf2
= (uint32_t *)buf1
;
2201 while (data_len
>= 4) {
2202 *buf2
++ = readl(iop_data
);
2207 *buf2
= readl(iop_data
);
2208 buf2
= (uint32_t *)buf1
;
2210 while (iop_len
> 0) {
2211 pQbuffer
= &acb
->rqbuffer
[acb
->rqbuf_putIndex
];
2213 acb
->rqbuf_putIndex
++;
2214 /* if last, index number set it to 0 */
2215 acb
->rqbuf_putIndex
%= ARCMSR_MAX_QBUFFER
;
2220 /* let IOP know data has been read */
2221 arcmsr_iop_message_read(acb
);
2226 arcmsr_Read_iop_rqbuffer_data(struct AdapterControlBlock
*acb
,
2227 struct QBUFFER __iomem
*prbuffer
) {
2230 uint8_t __iomem
*iop_data
;
2233 if (acb
->adapter_type
> ACB_ADAPTER_TYPE_B
)
2234 return arcmsr_Read_iop_rqbuffer_in_DWORD(acb
, prbuffer
);
2235 iop_data
= (uint8_t __iomem
*)prbuffer
->data
;
2236 iop_len
= readl(&prbuffer
->data_len
);
2237 while (iop_len
> 0) {
2238 pQbuffer
= &acb
->rqbuffer
[acb
->rqbuf_putIndex
];
2239 *pQbuffer
= readb(iop_data
);
2240 acb
->rqbuf_putIndex
++;
2241 acb
->rqbuf_putIndex
%= ARCMSR_MAX_QBUFFER
;
2245 arcmsr_iop_message_read(acb
);
2249 static void arcmsr_iop2drv_data_wrote_handle(struct AdapterControlBlock
*acb
)
2251 unsigned long flags
;
2252 struct QBUFFER __iomem
*prbuffer
;
2253 int32_t buf_empty_len
;
2255 spin_lock_irqsave(&acb
->rqbuffer_lock
, flags
);
2256 prbuffer
= arcmsr_get_iop_rqbuffer(acb
);
2257 buf_empty_len
= (acb
->rqbuf_putIndex
- acb
->rqbuf_getIndex
- 1) &
2258 (ARCMSR_MAX_QBUFFER
- 1);
2259 if (buf_empty_len
>= readl(&prbuffer
->data_len
)) {
2260 if (arcmsr_Read_iop_rqbuffer_data(acb
, prbuffer
) == 0)
2261 acb
->acb_flags
|= ACB_F_IOPDATA_OVERFLOW
;
2263 acb
->acb_flags
|= ACB_F_IOPDATA_OVERFLOW
;
2264 spin_unlock_irqrestore(&acb
->rqbuffer_lock
, flags
);
2267 static void arcmsr_write_ioctldata2iop_in_DWORD(struct AdapterControlBlock
*acb
)
2270 struct QBUFFER __iomem
*pwbuffer
;
2271 uint8_t *buf1
= NULL
;
2272 uint32_t __iomem
*iop_data
;
2273 uint32_t allxfer_len
= 0, data_len
, *buf2
= NULL
, data
;
2275 if (acb
->acb_flags
& ACB_F_MESSAGE_WQBUFFER_READED
) {
2276 buf1
= kmalloc(128, GFP_ATOMIC
);
2277 buf2
= (uint32_t *)buf1
;
2281 acb
->acb_flags
&= (~ACB_F_MESSAGE_WQBUFFER_READED
);
2282 pwbuffer
= arcmsr_get_iop_wqbuffer(acb
);
2283 iop_data
= (uint32_t __iomem
*)pwbuffer
->data
;
2284 while ((acb
->wqbuf_getIndex
!= acb
->wqbuf_putIndex
)
2285 && (allxfer_len
< 124)) {
2286 pQbuffer
= &acb
->wqbuffer
[acb
->wqbuf_getIndex
];
2288 acb
->wqbuf_getIndex
++;
2289 acb
->wqbuf_getIndex
%= ARCMSR_MAX_QBUFFER
;
2293 data_len
= allxfer_len
;
2294 buf1
= (uint8_t *)buf2
;
2295 while (data_len
>= 4) {
2297 writel(data
, iop_data
);
2303 writel(data
, iop_data
);
2305 writel(allxfer_len
, &pwbuffer
->data_len
);
2307 arcmsr_iop_message_wrote(acb
);
2312 arcmsr_write_ioctldata2iop(struct AdapterControlBlock
*acb
)
2315 struct QBUFFER __iomem
*pwbuffer
;
2316 uint8_t __iomem
*iop_data
;
2317 int32_t allxfer_len
= 0;
2319 if (acb
->adapter_type
> ACB_ADAPTER_TYPE_B
) {
2320 arcmsr_write_ioctldata2iop_in_DWORD(acb
);
2323 if (acb
->acb_flags
& ACB_F_MESSAGE_WQBUFFER_READED
) {
2324 acb
->acb_flags
&= (~ACB_F_MESSAGE_WQBUFFER_READED
);
2325 pwbuffer
= arcmsr_get_iop_wqbuffer(acb
);
2326 iop_data
= (uint8_t __iomem
*)pwbuffer
->data
;
2327 while ((acb
->wqbuf_getIndex
!= acb
->wqbuf_putIndex
)
2328 && (allxfer_len
< 124)) {
2329 pQbuffer
= &acb
->wqbuffer
[acb
->wqbuf_getIndex
];
2330 writeb(*pQbuffer
, iop_data
);
2331 acb
->wqbuf_getIndex
++;
2332 acb
->wqbuf_getIndex
%= ARCMSR_MAX_QBUFFER
;
2336 writel(allxfer_len
, &pwbuffer
->data_len
);
2337 arcmsr_iop_message_wrote(acb
);
2341 static void arcmsr_iop2drv_data_read_handle(struct AdapterControlBlock
*acb
)
2343 unsigned long flags
;
2345 spin_lock_irqsave(&acb
->wqbuffer_lock
, flags
);
2346 acb
->acb_flags
|= ACB_F_MESSAGE_WQBUFFER_READED
;
2347 if (acb
->wqbuf_getIndex
!= acb
->wqbuf_putIndex
)
2348 arcmsr_write_ioctldata2iop(acb
);
2349 if (acb
->wqbuf_getIndex
== acb
->wqbuf_putIndex
)
2350 acb
->acb_flags
|= ACB_F_MESSAGE_WQBUFFER_CLEARED
;
2351 spin_unlock_irqrestore(&acb
->wqbuffer_lock
, flags
);
2354 static void arcmsr_hbaA_doorbell_isr(struct AdapterControlBlock
*acb
)
2356 uint32_t outbound_doorbell
;
2357 struct MessageUnit_A __iomem
*reg
= acb
->pmuA
;
2358 outbound_doorbell
= readl(®
->outbound_doorbell
);
2360 writel(outbound_doorbell
, ®
->outbound_doorbell
);
2361 if (outbound_doorbell
& ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK
)
2362 arcmsr_iop2drv_data_wrote_handle(acb
);
2363 if (outbound_doorbell
& ARCMSR_OUTBOUND_IOP331_DATA_READ_OK
)
2364 arcmsr_iop2drv_data_read_handle(acb
);
2365 outbound_doorbell
= readl(®
->outbound_doorbell
);
2366 } while (outbound_doorbell
& (ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK
2367 | ARCMSR_OUTBOUND_IOP331_DATA_READ_OK
));
2369 static void arcmsr_hbaC_doorbell_isr(struct AdapterControlBlock
*pACB
)
2371 uint32_t outbound_doorbell
;
2372 struct MessageUnit_C __iomem
*reg
= pACB
->pmuC
;
2374 *******************************************************************
2375 ** Maybe here we need to check wrqbuffer_lock is lock or not
2376 ** DOORBELL: din! don!
2377 ** check if there are any mail need to pack from firmware
2378 *******************************************************************
2380 outbound_doorbell
= readl(®
->outbound_doorbell
);
2382 writel(outbound_doorbell
, ®
->outbound_doorbell_clear
);
2383 readl(®
->outbound_doorbell_clear
);
2384 if (outbound_doorbell
& ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK
)
2385 arcmsr_iop2drv_data_wrote_handle(pACB
);
2386 if (outbound_doorbell
& ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK
)
2387 arcmsr_iop2drv_data_read_handle(pACB
);
2388 if (outbound_doorbell
& ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE
)
2389 arcmsr_hbaC_message_isr(pACB
);
2390 outbound_doorbell
= readl(®
->outbound_doorbell
);
2391 } while (outbound_doorbell
& (ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK
2392 | ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK
2393 | ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE
));
2396 static void arcmsr_hbaD_doorbell_isr(struct AdapterControlBlock
*pACB
)
2398 uint32_t outbound_doorbell
;
2399 struct MessageUnit_D
*pmu
= pACB
->pmuD
;
2401 outbound_doorbell
= readl(pmu
->outbound_doorbell
);
2403 writel(outbound_doorbell
, pmu
->outbound_doorbell
);
2404 if (outbound_doorbell
& ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE
)
2405 arcmsr_hbaD_message_isr(pACB
);
2406 if (outbound_doorbell
& ARCMSR_ARC1214_IOP2DRV_DATA_WRITE_OK
)
2407 arcmsr_iop2drv_data_wrote_handle(pACB
);
2408 if (outbound_doorbell
& ARCMSR_ARC1214_IOP2DRV_DATA_READ_OK
)
2409 arcmsr_iop2drv_data_read_handle(pACB
);
2410 outbound_doorbell
= readl(pmu
->outbound_doorbell
);
2411 } while (outbound_doorbell
& (ARCMSR_ARC1214_IOP2DRV_DATA_WRITE_OK
2412 | ARCMSR_ARC1214_IOP2DRV_DATA_READ_OK
2413 | ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE
));
2416 static void arcmsr_hbaE_doorbell_isr(struct AdapterControlBlock
*pACB
)
2418 uint32_t outbound_doorbell
, in_doorbell
, tmp
;
2419 struct MessageUnit_E __iomem
*reg
= pACB
->pmuE
;
2421 in_doorbell
= readl(®
->iobound_doorbell
);
2422 outbound_doorbell
= in_doorbell
^ pACB
->in_doorbell
;
2424 writel(0, ®
->host_int_status
); /* clear interrupt */
2425 if (outbound_doorbell
& ARCMSR_HBEMU_IOP2DRV_DATA_WRITE_OK
) {
2426 arcmsr_iop2drv_data_wrote_handle(pACB
);
2428 if (outbound_doorbell
& ARCMSR_HBEMU_IOP2DRV_DATA_READ_OK
) {
2429 arcmsr_iop2drv_data_read_handle(pACB
);
2431 if (outbound_doorbell
& ARCMSR_HBEMU_IOP2DRV_MESSAGE_CMD_DONE
) {
2432 arcmsr_hbaE_message_isr(pACB
);
2435 in_doorbell
= readl(®
->iobound_doorbell
);
2436 outbound_doorbell
= tmp
^ in_doorbell
;
2437 } while (outbound_doorbell
& (ARCMSR_HBEMU_IOP2DRV_DATA_WRITE_OK
2438 | ARCMSR_HBEMU_IOP2DRV_DATA_READ_OK
2439 | ARCMSR_HBEMU_IOP2DRV_MESSAGE_CMD_DONE
));
2440 pACB
->in_doorbell
= in_doorbell
;
2443 static void arcmsr_hbaA_postqueue_isr(struct AdapterControlBlock
*acb
)
2446 struct MessageUnit_A __iomem
*reg
= acb
->pmuA
;
2447 struct ARCMSR_CDB
*pARCMSR_CDB
;
2448 struct CommandControlBlock
*pCCB
;
2450 unsigned long cdb_phy_addr
;
2452 while ((flag_ccb
= readl(®
->outbound_queueport
)) != 0xFFFFFFFF) {
2453 cdb_phy_addr
= (flag_ccb
<< 5) & 0xffffffff;
2454 if (acb
->cdb_phyadd_hipart
)
2455 cdb_phy_addr
= cdb_phy_addr
| acb
->cdb_phyadd_hipart
;
2456 pARCMSR_CDB
= (struct ARCMSR_CDB
*)(acb
->vir2phy_offset
+ cdb_phy_addr
);
2457 pCCB
= container_of(pARCMSR_CDB
, struct CommandControlBlock
, arcmsr_cdb
);
2458 error
= (flag_ccb
& ARCMSR_CCBREPLY_FLAG_ERROR_MODE0
) ? true : false;
2459 arcmsr_drain_donequeue(acb
, pCCB
, error
);
2462 static void arcmsr_hbaB_postqueue_isr(struct AdapterControlBlock
*acb
)
2466 struct MessageUnit_B
*reg
= acb
->pmuB
;
2467 struct ARCMSR_CDB
*pARCMSR_CDB
;
2468 struct CommandControlBlock
*pCCB
;
2470 unsigned long cdb_phy_addr
;
2472 index
= reg
->doneq_index
;
2473 while ((flag_ccb
= reg
->done_qbuffer
[index
]) != 0) {
2474 cdb_phy_addr
= (flag_ccb
<< 5) & 0xffffffff;
2475 if (acb
->cdb_phyadd_hipart
)
2476 cdb_phy_addr
= cdb_phy_addr
| acb
->cdb_phyadd_hipart
;
2477 pARCMSR_CDB
= (struct ARCMSR_CDB
*)(acb
->vir2phy_offset
+ cdb_phy_addr
);
2478 pCCB
= container_of(pARCMSR_CDB
, struct CommandControlBlock
, arcmsr_cdb
);
2479 error
= (flag_ccb
& ARCMSR_CCBREPLY_FLAG_ERROR_MODE0
) ? true : false;
2480 arcmsr_drain_donequeue(acb
, pCCB
, error
);
2481 reg
->done_qbuffer
[index
] = 0;
2483 index
%= ARCMSR_MAX_HBB_POSTQUEUE
;
2484 reg
->doneq_index
= index
;
2488 static void arcmsr_hbaC_postqueue_isr(struct AdapterControlBlock
*acb
)
2490 struct MessageUnit_C __iomem
*phbcmu
;
2491 struct ARCMSR_CDB
*arcmsr_cdb
;
2492 struct CommandControlBlock
*ccb
;
2493 uint32_t flag_ccb
, throttling
= 0;
2494 unsigned long ccb_cdb_phy
;
2498 /* areca cdb command done */
2499 /* Use correct offset and size for syncing */
2501 while ((flag_ccb
= readl(&phbcmu
->outbound_queueport_low
)) !=
2503 ccb_cdb_phy
= (flag_ccb
& 0xFFFFFFF0);
2504 if (acb
->cdb_phyadd_hipart
)
2505 ccb_cdb_phy
= ccb_cdb_phy
| acb
->cdb_phyadd_hipart
;
2506 arcmsr_cdb
= (struct ARCMSR_CDB
*)(acb
->vir2phy_offset
2508 ccb
= container_of(arcmsr_cdb
, struct CommandControlBlock
,
2510 error
= (flag_ccb
& ARCMSR_CCBREPLY_FLAG_ERROR_MODE1
)
2512 /* check if command done with no error */
2513 arcmsr_drain_donequeue(acb
, ccb
, error
);
2515 if (throttling
== ARCMSR_HBC_ISR_THROTTLING_LEVEL
) {
2516 writel(ARCMSR_HBCMU_DRV2IOP_POSTQUEUE_THROTTLING
,
2517 &phbcmu
->inbound_doorbell
);
2523 static void arcmsr_hbaD_postqueue_isr(struct AdapterControlBlock
*acb
)
2525 u32 outbound_write_pointer
, doneq_index
, index_stripped
, toggle
;
2526 uint32_t addressLow
;
2528 struct MessageUnit_D
*pmu
;
2529 struct ARCMSR_CDB
*arcmsr_cdb
;
2530 struct CommandControlBlock
*ccb
;
2531 unsigned long flags
, ccb_cdb_phy
;
2533 spin_lock_irqsave(&acb
->doneq_lock
, flags
);
2535 outbound_write_pointer
= pmu
->done_qbuffer
[0].addressLow
+ 1;
2536 doneq_index
= pmu
->doneq_index
;
2537 if ((doneq_index
& 0xFFF) != (outbound_write_pointer
& 0xFFF)) {
2539 toggle
= doneq_index
& 0x4000;
2540 index_stripped
= (doneq_index
& 0xFFF) + 1;
2541 index_stripped
%= ARCMSR_MAX_ARC1214_DONEQUEUE
;
2542 pmu
->doneq_index
= index_stripped
? (index_stripped
| toggle
) :
2543 ((toggle
^ 0x4000) + 1);
2544 doneq_index
= pmu
->doneq_index
;
2545 addressLow
= pmu
->done_qbuffer
[doneq_index
&
2547 ccb_cdb_phy
= (addressLow
& 0xFFFFFFF0);
2548 if (acb
->cdb_phyadd_hipart
)
2549 ccb_cdb_phy
= ccb_cdb_phy
| acb
->cdb_phyadd_hipart
;
2550 arcmsr_cdb
= (struct ARCMSR_CDB
*)(acb
->vir2phy_offset
2552 ccb
= container_of(arcmsr_cdb
,
2553 struct CommandControlBlock
, arcmsr_cdb
);
2554 error
= (addressLow
& ARCMSR_CCBREPLY_FLAG_ERROR_MODE1
)
2556 arcmsr_drain_donequeue(acb
, ccb
, error
);
2557 writel(doneq_index
, pmu
->outboundlist_read_pointer
);
2558 } while ((doneq_index
& 0xFFF) !=
2559 (outbound_write_pointer
& 0xFFF));
2561 writel(ARCMSR_ARC1214_OUTBOUND_LIST_INTERRUPT_CLEAR
,
2562 pmu
->outboundlist_interrupt_cause
);
2563 readl(pmu
->outboundlist_interrupt_cause
);
2564 spin_unlock_irqrestore(&acb
->doneq_lock
, flags
);
2567 static void arcmsr_hbaE_postqueue_isr(struct AdapterControlBlock
*acb
)
2569 uint32_t doneq_index
;
2572 struct MessageUnit_E __iomem
*pmu
;
2573 struct CommandControlBlock
*ccb
;
2574 unsigned long flags
;
2576 spin_lock_irqsave(&acb
->doneq_lock
, flags
);
2577 doneq_index
= acb
->doneq_index
;
2579 while ((readl(&pmu
->reply_post_producer_index
) & 0xFFFF) != doneq_index
) {
2580 cmdSMID
= acb
->pCompletionQ
[doneq_index
].cmdSMID
;
2581 ccb
= acb
->pccb_pool
[cmdSMID
];
2582 error
= (acb
->pCompletionQ
[doneq_index
].cmdFlag
2583 & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1
) ? true : false;
2584 arcmsr_drain_donequeue(acb
, ccb
, error
);
2586 if (doneq_index
>= acb
->completionQ_entry
)
2589 acb
->doneq_index
= doneq_index
;
2590 writel(doneq_index
, &pmu
->reply_post_consumer_index
);
2591 spin_unlock_irqrestore(&acb
->doneq_lock
, flags
);
2594 static void arcmsr_hbaF_postqueue_isr(struct AdapterControlBlock
*acb
)
2596 uint32_t doneq_index
;
2599 struct MessageUnit_F __iomem
*phbcmu
;
2600 struct CommandControlBlock
*ccb
;
2601 unsigned long flags
;
2603 spin_lock_irqsave(&acb
->doneq_lock
, flags
);
2604 doneq_index
= acb
->doneq_index
;
2607 cmdSMID
= acb
->pCompletionQ
[doneq_index
].cmdSMID
;
2608 if (cmdSMID
== 0xffff)
2610 ccb
= acb
->pccb_pool
[cmdSMID
];
2611 error
= (acb
->pCompletionQ
[doneq_index
].cmdFlag
&
2612 ARCMSR_CCBREPLY_FLAG_ERROR_MODE1
) ? true : false;
2613 arcmsr_drain_donequeue(acb
, ccb
, error
);
2614 acb
->pCompletionQ
[doneq_index
].cmdSMID
= 0xffff;
2616 if (doneq_index
>= acb
->completionQ_entry
)
2619 acb
->doneq_index
= doneq_index
;
2620 writel(doneq_index
, &phbcmu
->reply_post_consumer_index
);
2621 spin_unlock_irqrestore(&acb
->doneq_lock
, flags
);
2625 **********************************************************************************
2626 ** Handle a message interrupt
2628 ** The only message interrupt we expect is in response to a query for the current adapter config.
2629 ** We want this in order to compare the drivemap so that we can detect newly-attached drives.
2630 **********************************************************************************
2632 static void arcmsr_hbaA_message_isr(struct AdapterControlBlock
*acb
)
2634 struct MessageUnit_A __iomem
*reg
= acb
->pmuA
;
2635 /*clear interrupt and message state*/
2636 writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT
, ®
->outbound_intstatus
);
2637 if (acb
->acb_flags
& ACB_F_MSG_GET_CONFIG
)
2638 schedule_work(&acb
->arcmsr_do_message_isr_bh
);
2640 static void arcmsr_hbaB_message_isr(struct AdapterControlBlock
*acb
)
2642 struct MessageUnit_B
*reg
= acb
->pmuB
;
2644 /*clear interrupt and message state*/
2645 writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN
, reg
->iop2drv_doorbell
);
2646 if (acb
->acb_flags
& ACB_F_MSG_GET_CONFIG
)
2647 schedule_work(&acb
->arcmsr_do_message_isr_bh
);
2650 **********************************************************************************
2651 ** Handle a message interrupt
2653 ** The only message interrupt we expect is in response to a query for the
2654 ** current adapter config.
2655 ** We want this in order to compare the drivemap so that we can detect newly-attached drives.
2656 **********************************************************************************
2658 static void arcmsr_hbaC_message_isr(struct AdapterControlBlock
*acb
)
2660 struct MessageUnit_C __iomem
*reg
= acb
->pmuC
;
2661 /*clear interrupt and message state*/
2662 writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR
, ®
->outbound_doorbell_clear
);
2663 if (acb
->acb_flags
& ACB_F_MSG_GET_CONFIG
)
2664 schedule_work(&acb
->arcmsr_do_message_isr_bh
);
2667 static void arcmsr_hbaD_message_isr(struct AdapterControlBlock
*acb
)
2669 struct MessageUnit_D
*reg
= acb
->pmuD
;
2671 writel(ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE
, reg
->outbound_doorbell
);
2672 readl(reg
->outbound_doorbell
);
2673 if (acb
->acb_flags
& ACB_F_MSG_GET_CONFIG
)
2674 schedule_work(&acb
->arcmsr_do_message_isr_bh
);
2677 static void arcmsr_hbaE_message_isr(struct AdapterControlBlock
*acb
)
2679 struct MessageUnit_E __iomem
*reg
= acb
->pmuE
;
2681 writel(0, ®
->host_int_status
);
2682 if (acb
->acb_flags
& ACB_F_MSG_GET_CONFIG
)
2683 schedule_work(&acb
->arcmsr_do_message_isr_bh
);
2686 static int arcmsr_hbaA_handle_isr(struct AdapterControlBlock
*acb
)
2688 uint32_t outbound_intstatus
;
2689 struct MessageUnit_A __iomem
*reg
= acb
->pmuA
;
2690 outbound_intstatus
= readl(®
->outbound_intstatus
) &
2691 acb
->outbound_int_enable
;
2692 if (!(outbound_intstatus
& ARCMSR_MU_OUTBOUND_HANDLE_INT
))
2695 writel(outbound_intstatus
, ®
->outbound_intstatus
);
2696 if (outbound_intstatus
& ARCMSR_MU_OUTBOUND_DOORBELL_INT
)
2697 arcmsr_hbaA_doorbell_isr(acb
);
2698 if (outbound_intstatus
& ARCMSR_MU_OUTBOUND_POSTQUEUE_INT
)
2699 arcmsr_hbaA_postqueue_isr(acb
);
2700 if (outbound_intstatus
& ARCMSR_MU_OUTBOUND_MESSAGE0_INT
)
2701 arcmsr_hbaA_message_isr(acb
);
2702 outbound_intstatus
= readl(®
->outbound_intstatus
) &
2703 acb
->outbound_int_enable
;
2704 } while (outbound_intstatus
& (ARCMSR_MU_OUTBOUND_DOORBELL_INT
2705 | ARCMSR_MU_OUTBOUND_POSTQUEUE_INT
2706 | ARCMSR_MU_OUTBOUND_MESSAGE0_INT
));
2710 static int arcmsr_hbaB_handle_isr(struct AdapterControlBlock
*acb
)
2712 uint32_t outbound_doorbell
;
2713 struct MessageUnit_B
*reg
= acb
->pmuB
;
2714 outbound_doorbell
= readl(reg
->iop2drv_doorbell
) &
2715 acb
->outbound_int_enable
;
2716 if (!outbound_doorbell
)
2719 writel(~outbound_doorbell
, reg
->iop2drv_doorbell
);
2720 writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT
, reg
->drv2iop_doorbell
);
2721 if (outbound_doorbell
& ARCMSR_IOP2DRV_DATA_WRITE_OK
)
2722 arcmsr_iop2drv_data_wrote_handle(acb
);
2723 if (outbound_doorbell
& ARCMSR_IOP2DRV_DATA_READ_OK
)
2724 arcmsr_iop2drv_data_read_handle(acb
);
2725 if (outbound_doorbell
& ARCMSR_IOP2DRV_CDB_DONE
)
2726 arcmsr_hbaB_postqueue_isr(acb
);
2727 if (outbound_doorbell
& ARCMSR_IOP2DRV_MESSAGE_CMD_DONE
)
2728 arcmsr_hbaB_message_isr(acb
);
2729 outbound_doorbell
= readl(reg
->iop2drv_doorbell
) &
2730 acb
->outbound_int_enable
;
2731 } while (outbound_doorbell
& (ARCMSR_IOP2DRV_DATA_WRITE_OK
2732 | ARCMSR_IOP2DRV_DATA_READ_OK
2733 | ARCMSR_IOP2DRV_CDB_DONE
2734 | ARCMSR_IOP2DRV_MESSAGE_CMD_DONE
));
2738 static int arcmsr_hbaC_handle_isr(struct AdapterControlBlock
*pACB
)
2740 uint32_t host_interrupt_status
;
2741 struct MessageUnit_C __iomem
*phbcmu
= pACB
->pmuC
;
2743 *********************************************
2744 ** check outbound intstatus
2745 *********************************************
2747 host_interrupt_status
= readl(&phbcmu
->host_int_status
) &
2748 (ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR
|
2749 ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR
);
2750 if (!host_interrupt_status
)
2753 if (host_interrupt_status
& ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR
)
2754 arcmsr_hbaC_doorbell_isr(pACB
);
2755 /* MU post queue interrupts*/
2756 if (host_interrupt_status
& ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR
)
2757 arcmsr_hbaC_postqueue_isr(pACB
);
2758 host_interrupt_status
= readl(&phbcmu
->host_int_status
);
2759 } while (host_interrupt_status
& (ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR
|
2760 ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR
));
2764 static irqreturn_t
arcmsr_hbaD_handle_isr(struct AdapterControlBlock
*pACB
)
2766 u32 host_interrupt_status
;
2767 struct MessageUnit_D
*pmu
= pACB
->pmuD
;
2769 host_interrupt_status
= readl(pmu
->host_int_status
) &
2770 (ARCMSR_ARC1214_OUTBOUND_POSTQUEUE_ISR
|
2771 ARCMSR_ARC1214_OUTBOUND_DOORBELL_ISR
);
2772 if (!host_interrupt_status
)
2775 /* MU post queue interrupts*/
2776 if (host_interrupt_status
&
2777 ARCMSR_ARC1214_OUTBOUND_POSTQUEUE_ISR
)
2778 arcmsr_hbaD_postqueue_isr(pACB
);
2779 if (host_interrupt_status
&
2780 ARCMSR_ARC1214_OUTBOUND_DOORBELL_ISR
)
2781 arcmsr_hbaD_doorbell_isr(pACB
);
2782 host_interrupt_status
= readl(pmu
->host_int_status
);
2783 } while (host_interrupt_status
&
2784 (ARCMSR_ARC1214_OUTBOUND_POSTQUEUE_ISR
|
2785 ARCMSR_ARC1214_OUTBOUND_DOORBELL_ISR
));
2789 static irqreturn_t
arcmsr_hbaE_handle_isr(struct AdapterControlBlock
*pACB
)
2791 uint32_t host_interrupt_status
;
2792 struct MessageUnit_E __iomem
*pmu
= pACB
->pmuE
;
2794 host_interrupt_status
= readl(&pmu
->host_int_status
) &
2795 (ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR
|
2796 ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR
);
2797 if (!host_interrupt_status
)
2800 /* MU ioctl transfer doorbell interrupts*/
2801 if (host_interrupt_status
& ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR
) {
2802 arcmsr_hbaE_doorbell_isr(pACB
);
2804 /* MU post queue interrupts*/
2805 if (host_interrupt_status
& ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR
) {
2806 arcmsr_hbaE_postqueue_isr(pACB
);
2808 host_interrupt_status
= readl(&pmu
->host_int_status
);
2809 } while (host_interrupt_status
& (ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR
|
2810 ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR
));
2814 static irqreturn_t
arcmsr_hbaF_handle_isr(struct AdapterControlBlock
*pACB
)
2816 uint32_t host_interrupt_status
;
2817 struct MessageUnit_F __iomem
*phbcmu
= pACB
->pmuF
;
2819 host_interrupt_status
= readl(&phbcmu
->host_int_status
) &
2820 (ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR
|
2821 ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR
);
2822 if (!host_interrupt_status
)
2825 /* MU post queue interrupts*/
2826 if (host_interrupt_status
& ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR
)
2827 arcmsr_hbaF_postqueue_isr(pACB
);
2829 /* MU ioctl transfer doorbell interrupts*/
2830 if (host_interrupt_status
& ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR
)
2831 arcmsr_hbaE_doorbell_isr(pACB
);
2833 host_interrupt_status
= readl(&phbcmu
->host_int_status
);
2834 } while (host_interrupt_status
& (ARCMSR_HBEMU_OUTBOUND_POSTQUEUE_ISR
|
2835 ARCMSR_HBEMU_OUTBOUND_DOORBELL_ISR
));
2839 static irqreturn_t
arcmsr_interrupt(struct AdapterControlBlock
*acb
)
2841 switch (acb
->adapter_type
) {
2842 case ACB_ADAPTER_TYPE_A
:
2843 return arcmsr_hbaA_handle_isr(acb
);
2844 case ACB_ADAPTER_TYPE_B
:
2845 return arcmsr_hbaB_handle_isr(acb
);
2846 case ACB_ADAPTER_TYPE_C
:
2847 return arcmsr_hbaC_handle_isr(acb
);
2848 case ACB_ADAPTER_TYPE_D
:
2849 return arcmsr_hbaD_handle_isr(acb
);
2850 case ACB_ADAPTER_TYPE_E
:
2851 return arcmsr_hbaE_handle_isr(acb
);
2852 case ACB_ADAPTER_TYPE_F
:
2853 return arcmsr_hbaF_handle_isr(acb
);
2859 static void arcmsr_iop_parking(struct AdapterControlBlock
*acb
)
2862 /* stop adapter background rebuild */
2863 if (acb
->acb_flags
& ACB_F_MSG_START_BGRB
) {
2864 uint32_t intmask_org
;
2865 acb
->acb_flags
&= ~ACB_F_MSG_START_BGRB
;
2866 intmask_org
= arcmsr_disable_outbound_ints(acb
);
2867 arcmsr_stop_adapter_bgrb(acb
);
2868 arcmsr_flush_adapter_cache(acb
);
2869 arcmsr_enable_outbound_ints(acb
, intmask_org
);
2875 void arcmsr_clear_iop2drv_rqueue_buffer(struct AdapterControlBlock
*acb
)
2879 if (acb
->acb_flags
& ACB_F_IOPDATA_OVERFLOW
) {
2880 for (i
= 0; i
< 15; i
++) {
2881 if (acb
->acb_flags
& ACB_F_IOPDATA_OVERFLOW
) {
2882 acb
->acb_flags
&= ~ACB_F_IOPDATA_OVERFLOW
;
2883 acb
->rqbuf_getIndex
= 0;
2884 acb
->rqbuf_putIndex
= 0;
2885 arcmsr_iop_message_read(acb
);
2887 } else if (acb
->rqbuf_getIndex
!=
2888 acb
->rqbuf_putIndex
) {
2889 acb
->rqbuf_getIndex
= 0;
2890 acb
->rqbuf_putIndex
= 0;
2898 static int arcmsr_iop_message_xfer(struct AdapterControlBlock
*acb
,
2899 struct scsi_cmnd
*cmd
)
2902 unsigned short use_sg
;
2903 int retvalue
= 0, transfer_len
= 0;
2904 unsigned long flags
;
2905 struct CMD_MESSAGE_FIELD
*pcmdmessagefld
;
2906 uint32_t controlcode
= (uint32_t)cmd
->cmnd
[5] << 24 |
2907 (uint32_t)cmd
->cmnd
[6] << 16 |
2908 (uint32_t)cmd
->cmnd
[7] << 8 |
2909 (uint32_t)cmd
->cmnd
[8];
2910 struct scatterlist
*sg
;
2912 use_sg
= scsi_sg_count(cmd
);
2913 sg
= scsi_sglist(cmd
);
2914 buffer
= kmap_atomic(sg_page(sg
)) + sg
->offset
;
2916 retvalue
= ARCMSR_MESSAGE_FAIL
;
2919 transfer_len
+= sg
->length
;
2920 if (transfer_len
> sizeof(struct CMD_MESSAGE_FIELD
)) {
2921 retvalue
= ARCMSR_MESSAGE_FAIL
;
2922 pr_info("%s: ARCMSR_MESSAGE_FAIL!\n", __func__
);
2925 pcmdmessagefld
= (struct CMD_MESSAGE_FIELD
*)buffer
;
2926 switch (controlcode
) {
2927 case ARCMSR_MESSAGE_READ_RQBUFFER
: {
2928 unsigned char *ver_addr
;
2929 uint8_t *ptmpQbuffer
;
2930 uint32_t allxfer_len
= 0;
2931 ver_addr
= kmalloc(ARCMSR_API_DATA_BUFLEN
, GFP_ATOMIC
);
2933 retvalue
= ARCMSR_MESSAGE_FAIL
;
2934 pr_info("%s: memory not enough!\n", __func__
);
2937 ptmpQbuffer
= ver_addr
;
2938 spin_lock_irqsave(&acb
->rqbuffer_lock
, flags
);
2939 if (acb
->rqbuf_getIndex
!= acb
->rqbuf_putIndex
) {
2940 unsigned int tail
= acb
->rqbuf_getIndex
;
2941 unsigned int head
= acb
->rqbuf_putIndex
;
2942 unsigned int cnt_to_end
= CIRC_CNT_TO_END(head
, tail
, ARCMSR_MAX_QBUFFER
);
2944 allxfer_len
= CIRC_CNT(head
, tail
, ARCMSR_MAX_QBUFFER
);
2945 if (allxfer_len
> ARCMSR_API_DATA_BUFLEN
)
2946 allxfer_len
= ARCMSR_API_DATA_BUFLEN
;
2948 if (allxfer_len
<= cnt_to_end
)
2949 memcpy(ptmpQbuffer
, acb
->rqbuffer
+ tail
, allxfer_len
);
2951 memcpy(ptmpQbuffer
, acb
->rqbuffer
+ tail
, cnt_to_end
);
2952 memcpy(ptmpQbuffer
+ cnt_to_end
, acb
->rqbuffer
, allxfer_len
- cnt_to_end
);
2954 acb
->rqbuf_getIndex
= (acb
->rqbuf_getIndex
+ allxfer_len
) % ARCMSR_MAX_QBUFFER
;
2956 memcpy(pcmdmessagefld
->messagedatabuffer
, ver_addr
,
2958 if (acb
->acb_flags
& ACB_F_IOPDATA_OVERFLOW
) {
2959 struct QBUFFER __iomem
*prbuffer
;
2960 acb
->acb_flags
&= ~ACB_F_IOPDATA_OVERFLOW
;
2961 prbuffer
= arcmsr_get_iop_rqbuffer(acb
);
2962 if (arcmsr_Read_iop_rqbuffer_data(acb
, prbuffer
) == 0)
2963 acb
->acb_flags
|= ACB_F_IOPDATA_OVERFLOW
;
2965 spin_unlock_irqrestore(&acb
->rqbuffer_lock
, flags
);
2967 pcmdmessagefld
->cmdmessage
.Length
= allxfer_len
;
2968 if (acb
->fw_flag
== FW_DEADLOCK
)
2969 pcmdmessagefld
->cmdmessage
.ReturnCode
=
2970 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON
;
2972 pcmdmessagefld
->cmdmessage
.ReturnCode
=
2973 ARCMSR_MESSAGE_RETURNCODE_OK
;
2976 case ARCMSR_MESSAGE_WRITE_WQBUFFER
: {
2977 unsigned char *ver_addr
;
2980 uint8_t *pQbuffer
, *ptmpuserbuffer
;
2982 user_len
= pcmdmessagefld
->cmdmessage
.Length
;
2983 if (user_len
> ARCMSR_API_DATA_BUFLEN
) {
2984 retvalue
= ARCMSR_MESSAGE_FAIL
;
2988 ver_addr
= kmalloc(ARCMSR_API_DATA_BUFLEN
, GFP_ATOMIC
);
2990 retvalue
= ARCMSR_MESSAGE_FAIL
;
2993 ptmpuserbuffer
= ver_addr
;
2995 memcpy(ptmpuserbuffer
,
2996 pcmdmessagefld
->messagedatabuffer
, user_len
);
2997 spin_lock_irqsave(&acb
->wqbuffer_lock
, flags
);
2998 if (acb
->wqbuf_putIndex
!= acb
->wqbuf_getIndex
) {
2999 struct SENSE_DATA
*sensebuffer
=
3000 (struct SENSE_DATA
*)cmd
->sense_buffer
;
3001 arcmsr_write_ioctldata2iop(acb
);
3002 /* has error report sensedata */
3003 sensebuffer
->ErrorCode
= SCSI_SENSE_CURRENT_ERRORS
;
3004 sensebuffer
->SenseKey
= ILLEGAL_REQUEST
;
3005 sensebuffer
->AdditionalSenseLength
= 0x0A;
3006 sensebuffer
->AdditionalSenseCode
= 0x20;
3007 sensebuffer
->Valid
= 1;
3008 retvalue
= ARCMSR_MESSAGE_FAIL
;
3010 pQbuffer
= &acb
->wqbuffer
[acb
->wqbuf_putIndex
];
3011 cnt2end
= ARCMSR_MAX_QBUFFER
- acb
->wqbuf_putIndex
;
3012 if (user_len
> cnt2end
) {
3013 memcpy(pQbuffer
, ptmpuserbuffer
, cnt2end
);
3014 ptmpuserbuffer
+= cnt2end
;
3015 user_len
-= cnt2end
;
3016 acb
->wqbuf_putIndex
= 0;
3017 pQbuffer
= acb
->wqbuffer
;
3019 memcpy(pQbuffer
, ptmpuserbuffer
, user_len
);
3020 acb
->wqbuf_putIndex
+= user_len
;
3021 acb
->wqbuf_putIndex
%= ARCMSR_MAX_QBUFFER
;
3022 if (acb
->acb_flags
& ACB_F_MESSAGE_WQBUFFER_CLEARED
) {
3024 ~ACB_F_MESSAGE_WQBUFFER_CLEARED
;
3025 arcmsr_write_ioctldata2iop(acb
);
3028 spin_unlock_irqrestore(&acb
->wqbuffer_lock
, flags
);
3030 if (acb
->fw_flag
== FW_DEADLOCK
)
3031 pcmdmessagefld
->cmdmessage
.ReturnCode
=
3032 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON
;
3034 pcmdmessagefld
->cmdmessage
.ReturnCode
=
3035 ARCMSR_MESSAGE_RETURNCODE_OK
;
3038 case ARCMSR_MESSAGE_CLEAR_RQBUFFER
: {
3039 uint8_t *pQbuffer
= acb
->rqbuffer
;
3041 arcmsr_clear_iop2drv_rqueue_buffer(acb
);
3042 spin_lock_irqsave(&acb
->rqbuffer_lock
, flags
);
3043 acb
->acb_flags
|= ACB_F_MESSAGE_RQBUFFER_CLEARED
;
3044 acb
->rqbuf_getIndex
= 0;
3045 acb
->rqbuf_putIndex
= 0;
3046 memset(pQbuffer
, 0, ARCMSR_MAX_QBUFFER
);
3047 spin_unlock_irqrestore(&acb
->rqbuffer_lock
, flags
);
3048 if (acb
->fw_flag
== FW_DEADLOCK
)
3049 pcmdmessagefld
->cmdmessage
.ReturnCode
=
3050 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON
;
3052 pcmdmessagefld
->cmdmessage
.ReturnCode
=
3053 ARCMSR_MESSAGE_RETURNCODE_OK
;
3056 case ARCMSR_MESSAGE_CLEAR_WQBUFFER
: {
3057 uint8_t *pQbuffer
= acb
->wqbuffer
;
3058 spin_lock_irqsave(&acb
->wqbuffer_lock
, flags
);
3059 acb
->acb_flags
|= (ACB_F_MESSAGE_WQBUFFER_CLEARED
|
3060 ACB_F_MESSAGE_WQBUFFER_READED
);
3061 acb
->wqbuf_getIndex
= 0;
3062 acb
->wqbuf_putIndex
= 0;
3063 memset(pQbuffer
, 0, ARCMSR_MAX_QBUFFER
);
3064 spin_unlock_irqrestore(&acb
->wqbuffer_lock
, flags
);
3065 if (acb
->fw_flag
== FW_DEADLOCK
)
3066 pcmdmessagefld
->cmdmessage
.ReturnCode
=
3067 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON
;
3069 pcmdmessagefld
->cmdmessage
.ReturnCode
=
3070 ARCMSR_MESSAGE_RETURNCODE_OK
;
3073 case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER
: {
3075 arcmsr_clear_iop2drv_rqueue_buffer(acb
);
3076 spin_lock_irqsave(&acb
->rqbuffer_lock
, flags
);
3077 acb
->acb_flags
|= ACB_F_MESSAGE_RQBUFFER_CLEARED
;
3078 acb
->rqbuf_getIndex
= 0;
3079 acb
->rqbuf_putIndex
= 0;
3080 pQbuffer
= acb
->rqbuffer
;
3081 memset(pQbuffer
, 0, sizeof(struct QBUFFER
));
3082 spin_unlock_irqrestore(&acb
->rqbuffer_lock
, flags
);
3083 spin_lock_irqsave(&acb
->wqbuffer_lock
, flags
);
3084 acb
->acb_flags
|= (ACB_F_MESSAGE_WQBUFFER_CLEARED
|
3085 ACB_F_MESSAGE_WQBUFFER_READED
);
3086 acb
->wqbuf_getIndex
= 0;
3087 acb
->wqbuf_putIndex
= 0;
3088 pQbuffer
= acb
->wqbuffer
;
3089 memset(pQbuffer
, 0, sizeof(struct QBUFFER
));
3090 spin_unlock_irqrestore(&acb
->wqbuffer_lock
, flags
);
3091 if (acb
->fw_flag
== FW_DEADLOCK
)
3092 pcmdmessagefld
->cmdmessage
.ReturnCode
=
3093 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON
;
3095 pcmdmessagefld
->cmdmessage
.ReturnCode
=
3096 ARCMSR_MESSAGE_RETURNCODE_OK
;
3099 case ARCMSR_MESSAGE_RETURN_CODE_3F
: {
3100 if (acb
->fw_flag
== FW_DEADLOCK
)
3101 pcmdmessagefld
->cmdmessage
.ReturnCode
=
3102 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON
;
3104 pcmdmessagefld
->cmdmessage
.ReturnCode
=
3105 ARCMSR_MESSAGE_RETURNCODE_3F
;
3108 case ARCMSR_MESSAGE_SAY_HELLO
: {
3109 int8_t *hello_string
= "Hello! I am ARCMSR";
3110 if (acb
->fw_flag
== FW_DEADLOCK
)
3111 pcmdmessagefld
->cmdmessage
.ReturnCode
=
3112 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON
;
3114 pcmdmessagefld
->cmdmessage
.ReturnCode
=
3115 ARCMSR_MESSAGE_RETURNCODE_OK
;
3116 memcpy(pcmdmessagefld
->messagedatabuffer
,
3117 hello_string
, (int16_t)strlen(hello_string
));
3120 case ARCMSR_MESSAGE_SAY_GOODBYE
: {
3121 if (acb
->fw_flag
== FW_DEADLOCK
)
3122 pcmdmessagefld
->cmdmessage
.ReturnCode
=
3123 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON
;
3125 pcmdmessagefld
->cmdmessage
.ReturnCode
=
3126 ARCMSR_MESSAGE_RETURNCODE_OK
;
3127 arcmsr_iop_parking(acb
);
3130 case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE
: {
3131 if (acb
->fw_flag
== FW_DEADLOCK
)
3132 pcmdmessagefld
->cmdmessage
.ReturnCode
=
3133 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON
;
3135 pcmdmessagefld
->cmdmessage
.ReturnCode
=
3136 ARCMSR_MESSAGE_RETURNCODE_OK
;
3137 arcmsr_flush_adapter_cache(acb
);
3141 retvalue
= ARCMSR_MESSAGE_FAIL
;
3142 pr_info("%s: unknown controlcode!\n", __func__
);
3146 struct scatterlist
*sg
= scsi_sglist(cmd
);
3147 kunmap_atomic(buffer
- sg
->offset
);
3152 static struct CommandControlBlock
*arcmsr_get_freeccb(struct AdapterControlBlock
*acb
)
3154 struct list_head
*head
;
3155 struct CommandControlBlock
*ccb
= NULL
;
3156 unsigned long flags
;
3158 spin_lock_irqsave(&acb
->ccblist_lock
, flags
);
3159 head
= &acb
->ccb_free_list
;
3160 if (!list_empty(head
)) {
3161 ccb
= list_entry(head
->next
, struct CommandControlBlock
, list
);
3162 list_del_init(&ccb
->list
);
3164 spin_unlock_irqrestore(&acb
->ccblist_lock
, flags
);
3167 spin_unlock_irqrestore(&acb
->ccblist_lock
, flags
);
3171 static void arcmsr_handle_virtual_command(struct AdapterControlBlock
*acb
,
3172 struct scsi_cmnd
*cmd
)
3174 switch (cmd
->cmnd
[0]) {
3176 unsigned char inqdata
[36];
3178 struct scatterlist
*sg
;
3180 if (cmd
->device
->lun
) {
3181 cmd
->result
= (DID_TIME_OUT
<< 16);
3182 cmd
->scsi_done(cmd
);
3185 inqdata
[0] = TYPE_PROCESSOR
;
3186 /* Periph Qualifier & Periph Dev Type */
3188 /* rem media bit & Dev Type Modifier */
3190 /* ISO, ECMA, & ANSI versions */
3192 /* length of additional data */
3193 memcpy(&inqdata
[8], "Areca ", 8);
3194 /* Vendor Identification */
3195 memcpy(&inqdata
[16], "RAID controller ", 16);
3196 /* Product Identification */
3197 memcpy(&inqdata
[32], "R001", 4); /* Product Revision */
3199 sg
= scsi_sglist(cmd
);
3200 buffer
= kmap_atomic(sg_page(sg
)) + sg
->offset
;
3202 memcpy(buffer
, inqdata
, sizeof(inqdata
));
3203 sg
= scsi_sglist(cmd
);
3204 kunmap_atomic(buffer
- sg
->offset
);
3206 cmd
->scsi_done(cmd
);
3211 if (arcmsr_iop_message_xfer(acb
, cmd
))
3212 cmd
->result
= (DID_ERROR
<< 16);
3213 cmd
->scsi_done(cmd
);
3217 cmd
->scsi_done(cmd
);
3221 static int arcmsr_queue_command_lck(struct scsi_cmnd
*cmd
,
3222 void (* done
)(struct scsi_cmnd
*))
3224 struct Scsi_Host
*host
= cmd
->device
->host
;
3225 struct AdapterControlBlock
*acb
= (struct AdapterControlBlock
*) host
->hostdata
;
3226 struct CommandControlBlock
*ccb
;
3227 int target
= cmd
->device
->id
;
3229 if (acb
->acb_flags
& ACB_F_ADAPTER_REMOVED
) {
3230 cmd
->result
= (DID_NO_CONNECT
<< 16);
3231 cmd
->scsi_done(cmd
);
3234 cmd
->scsi_done
= done
;
3235 cmd
->host_scribble
= NULL
;
3238 /* virtual device for iop message transfer */
3239 arcmsr_handle_virtual_command(acb
, cmd
);
3242 ccb
= arcmsr_get_freeccb(acb
);
3244 return SCSI_MLQUEUE_HOST_BUSY
;
3245 if (arcmsr_build_ccb( acb
, ccb
, cmd
) == FAILED
) {
3246 cmd
->result
= (DID_ERROR
<< 16) | (RESERVATION_CONFLICT
<< 1);
3247 cmd
->scsi_done(cmd
);
3250 arcmsr_post_ccb(acb
, ccb
);
3254 static DEF_SCSI_QCMD(arcmsr_queue_command
)
3256 static int arcmsr_slave_config(struct scsi_device
*sdev
)
3258 unsigned int dev_timeout
;
3260 dev_timeout
= sdev
->request_queue
->rq_timeout
;
3261 if ((cmd_timeout
> 0) && ((cmd_timeout
* HZ
) > dev_timeout
))
3262 blk_queue_rq_timeout(sdev
->request_queue
, cmd_timeout
* HZ
);
3266 static void arcmsr_get_adapter_config(struct AdapterControlBlock
*pACB
, uint32_t *rwbuffer
)
3269 uint32_t *acb_firm_model
= (uint32_t *)pACB
->firm_model
;
3270 uint32_t *acb_firm_version
= (uint32_t *)pACB
->firm_version
;
3271 uint32_t *acb_device_map
= (uint32_t *)pACB
->device_map
;
3272 uint32_t *firm_model
= &rwbuffer
[15];
3273 uint32_t *firm_version
= &rwbuffer
[17];
3274 uint32_t *device_map
= &rwbuffer
[21];
3278 *acb_firm_model
= readl(firm_model
);
3285 *acb_firm_version
= readl(firm_version
);
3292 *acb_device_map
= readl(device_map
);
3297 pACB
->signature
= readl(&rwbuffer
[0]);
3298 pACB
->firm_request_len
= readl(&rwbuffer
[1]);
3299 pACB
->firm_numbers_queue
= readl(&rwbuffer
[2]);
3300 pACB
->firm_sdram_size
= readl(&rwbuffer
[3]);
3301 pACB
->firm_hd_channels
= readl(&rwbuffer
[4]);
3302 pACB
->firm_cfg_version
= readl(&rwbuffer
[25]);
3303 pr_notice("Areca RAID Controller%d: Model %s, F/W %s\n",
3304 pACB
->host
->host_no
,
3306 pACB
->firm_version
);
3309 static bool arcmsr_hbaA_get_config(struct AdapterControlBlock
*acb
)
3311 struct MessageUnit_A __iomem
*reg
= acb
->pmuA
;
3313 arcmsr_wait_firmware_ready(acb
);
3314 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG
, ®
->inbound_msgaddr0
);
3315 if (!arcmsr_hbaA_wait_msgint_ready(acb
)) {
3316 printk(KERN_NOTICE
"arcmsr%d: wait 'get adapter firmware \
3317 miscellaneous data' timeout \n", acb
->host
->host_no
);
3320 arcmsr_get_adapter_config(acb
, reg
->message_rwbuffer
);
3323 static bool arcmsr_hbaB_get_config(struct AdapterControlBlock
*acb
)
3325 struct MessageUnit_B
*reg
= acb
->pmuB
;
3327 arcmsr_wait_firmware_ready(acb
);
3328 writel(ARCMSR_MESSAGE_START_DRIVER_MODE
, reg
->drv2iop_doorbell
);
3329 if (!arcmsr_hbaB_wait_msgint_ready(acb
)) {
3330 printk(KERN_ERR
"arcmsr%d: can't set driver mode.\n", acb
->host
->host_no
);
3333 writel(ARCMSR_MESSAGE_GET_CONFIG
, reg
->drv2iop_doorbell
);
3334 if (!arcmsr_hbaB_wait_msgint_ready(acb
)) {
3335 printk(KERN_NOTICE
"arcmsr%d: wait 'get adapter firmware \
3336 miscellaneous data' timeout \n", acb
->host
->host_no
);
3339 arcmsr_get_adapter_config(acb
, reg
->message_rwbuffer
);
3343 static bool arcmsr_hbaC_get_config(struct AdapterControlBlock
*pACB
)
3345 uint32_t intmask_org
;
3346 struct MessageUnit_C __iomem
*reg
= pACB
->pmuC
;
3348 /* disable all outbound interrupt */
3349 intmask_org
= readl(®
->host_int_mask
); /* disable outbound message0 int */
3350 writel(intmask_org
|ARCMSR_HBCMU_ALL_INTMASKENABLE
, ®
->host_int_mask
);
3351 /* wait firmware ready */
3352 arcmsr_wait_firmware_ready(pACB
);
3353 /* post "get config" instruction */
3354 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG
, ®
->inbound_msgaddr0
);
3355 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE
, ®
->inbound_doorbell
);
3356 /* wait message ready */
3357 if (!arcmsr_hbaC_wait_msgint_ready(pACB
)) {
3358 printk(KERN_NOTICE
"arcmsr%d: wait 'get adapter firmware \
3359 miscellaneous data' timeout \n", pACB
->host
->host_no
);
3362 arcmsr_get_adapter_config(pACB
, reg
->msgcode_rwbuffer
);
3366 static bool arcmsr_hbaD_get_config(struct AdapterControlBlock
*acb
)
3368 struct MessageUnit_D
*reg
= acb
->pmuD
;
3370 if (readl(acb
->pmuD
->outbound_doorbell
) &
3371 ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE
) {
3372 writel(ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE
,
3373 acb
->pmuD
->outbound_doorbell
);/*clear interrupt*/
3375 arcmsr_wait_firmware_ready(acb
);
3376 /* post "get config" instruction */
3377 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG
, reg
->inbound_msgaddr0
);
3378 /* wait message ready */
3379 if (!arcmsr_hbaD_wait_msgint_ready(acb
)) {
3380 pr_notice("arcmsr%d: wait get adapter firmware "
3381 "miscellaneous data timeout\n", acb
->host
->host_no
);
3384 arcmsr_get_adapter_config(acb
, reg
->msgcode_rwbuffer
);
3388 static bool arcmsr_hbaE_get_config(struct AdapterControlBlock
*pACB
)
3390 struct MessageUnit_E __iomem
*reg
= pACB
->pmuE
;
3391 uint32_t intmask_org
;
3393 /* disable all outbound interrupt */
3394 intmask_org
= readl(®
->host_int_mask
); /* disable outbound message0 int */
3395 writel(intmask_org
| ARCMSR_HBEMU_ALL_INTMASKENABLE
, ®
->host_int_mask
);
3396 /* wait firmware ready */
3397 arcmsr_wait_firmware_ready(pACB
);
3399 /* post "get config" instruction */
3400 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG
, ®
->inbound_msgaddr0
);
3402 pACB
->out_doorbell
^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE
;
3403 writel(pACB
->out_doorbell
, ®
->iobound_doorbell
);
3404 /* wait message ready */
3405 if (!arcmsr_hbaE_wait_msgint_ready(pACB
)) {
3406 pr_notice("arcmsr%d: wait get adapter firmware "
3407 "miscellaneous data timeout\n", pACB
->host
->host_no
);
3410 arcmsr_get_adapter_config(pACB
, reg
->msgcode_rwbuffer
);
3414 static bool arcmsr_hbaF_get_config(struct AdapterControlBlock
*pACB
)
3416 struct MessageUnit_F __iomem
*reg
= pACB
->pmuF
;
3417 uint32_t intmask_org
;
3419 /* disable all outbound interrupt */
3420 intmask_org
= readl(®
->host_int_mask
); /* disable outbound message0 int */
3421 writel(intmask_org
| ARCMSR_HBEMU_ALL_INTMASKENABLE
, ®
->host_int_mask
);
3422 /* wait firmware ready */
3423 arcmsr_wait_firmware_ready(pACB
);
3424 /* post "get config" instruction */
3425 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG
, ®
->inbound_msgaddr0
);
3427 pACB
->out_doorbell
^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE
;
3428 writel(pACB
->out_doorbell
, ®
->iobound_doorbell
);
3429 /* wait message ready */
3430 if (!arcmsr_hbaE_wait_msgint_ready(pACB
)) {
3431 pr_notice("arcmsr%d: wait get adapter firmware miscellaneous data timeout\n",
3432 pACB
->host
->host_no
);
3435 arcmsr_get_adapter_config(pACB
, pACB
->msgcode_rwbuffer
);
3439 static bool arcmsr_get_firmware_spec(struct AdapterControlBlock
*acb
)
3443 switch (acb
->adapter_type
) {
3444 case ACB_ADAPTER_TYPE_A
:
3445 rtn
= arcmsr_hbaA_get_config(acb
);
3447 case ACB_ADAPTER_TYPE_B
:
3448 rtn
= arcmsr_hbaB_get_config(acb
);
3450 case ACB_ADAPTER_TYPE_C
:
3451 rtn
= arcmsr_hbaC_get_config(acb
);
3453 case ACB_ADAPTER_TYPE_D
:
3454 rtn
= arcmsr_hbaD_get_config(acb
);
3456 case ACB_ADAPTER_TYPE_E
:
3457 rtn
= arcmsr_hbaE_get_config(acb
);
3459 case ACB_ADAPTER_TYPE_F
:
3460 rtn
= arcmsr_hbaF_get_config(acb
);
3465 acb
->maxOutstanding
= acb
->firm_numbers_queue
- 1;
3466 if (acb
->host
->can_queue
>= acb
->firm_numbers_queue
)
3467 acb
->host
->can_queue
= acb
->maxOutstanding
;
3469 acb
->maxOutstanding
= acb
->host
->can_queue
;
3470 acb
->maxFreeCCB
= acb
->host
->can_queue
;
3471 if (acb
->maxFreeCCB
< ARCMSR_MAX_FREECCB_NUM
)
3472 acb
->maxFreeCCB
+= 64;
3476 static int arcmsr_hbaA_polling_ccbdone(struct AdapterControlBlock
*acb
,
3477 struct CommandControlBlock
*poll_ccb
)
3479 struct MessageUnit_A __iomem
*reg
= acb
->pmuA
;
3480 struct CommandControlBlock
*ccb
;
3481 struct ARCMSR_CDB
*arcmsr_cdb
;
3482 uint32_t flag_ccb
, outbound_intstatus
, poll_ccb_done
= 0, poll_count
= 0;
3485 unsigned long ccb_cdb_phy
;
3487 polling_hba_ccb_retry
:
3489 outbound_intstatus
= readl(®
->outbound_intstatus
) & acb
->outbound_int_enable
;
3490 writel(outbound_intstatus
, ®
->outbound_intstatus
);/*clear interrupt*/
3492 if ((flag_ccb
= readl(®
->outbound_queueport
)) == 0xFFFFFFFF) {
3498 if (poll_count
> 100){
3502 goto polling_hba_ccb_retry
;
3505 ccb_cdb_phy
= (flag_ccb
<< 5) & 0xffffffff;
3506 if (acb
->cdb_phyadd_hipart
)
3507 ccb_cdb_phy
= ccb_cdb_phy
| acb
->cdb_phyadd_hipart
;
3508 arcmsr_cdb
= (struct ARCMSR_CDB
*)(acb
->vir2phy_offset
+ ccb_cdb_phy
);
3509 ccb
= container_of(arcmsr_cdb
, struct CommandControlBlock
, arcmsr_cdb
);
3510 poll_ccb_done
|= (ccb
== poll_ccb
) ? 1 : 0;
3511 if ((ccb
->acb
!= acb
) || (ccb
->startdone
!= ARCMSR_CCB_START
)) {
3512 if ((ccb
->startdone
== ARCMSR_CCB_ABORTED
) || (ccb
== poll_ccb
)) {
3513 printk(KERN_NOTICE
"arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'"
3514 " poll command abort successfully \n"
3515 , acb
->host
->host_no
3516 , ccb
->pcmd
->device
->id
3517 , (u32
)ccb
->pcmd
->device
->lun
3519 ccb
->pcmd
->result
= DID_ABORT
<< 16;
3520 arcmsr_ccb_complete(ccb
);
3523 printk(KERN_NOTICE
"arcmsr%d: polling get an illegal ccb"
3524 " command done ccb = '0x%p'"
3525 "ccboutstandingcount = %d \n"
3526 , acb
->host
->host_no
3528 , atomic_read(&acb
->ccboutstandingcount
));
3531 error
= (flag_ccb
& ARCMSR_CCBREPLY_FLAG_ERROR_MODE0
) ? true : false;
3532 arcmsr_report_ccb_state(acb
, ccb
, error
);
3537 static int arcmsr_hbaB_polling_ccbdone(struct AdapterControlBlock
*acb
,
3538 struct CommandControlBlock
*poll_ccb
)
3540 struct MessageUnit_B
*reg
= acb
->pmuB
;
3541 struct ARCMSR_CDB
*arcmsr_cdb
;
3542 struct CommandControlBlock
*ccb
;
3543 uint32_t flag_ccb
, poll_ccb_done
= 0, poll_count
= 0;
3546 unsigned long ccb_cdb_phy
;
3548 polling_hbb_ccb_retry
:
3550 /* clear doorbell interrupt */
3551 writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN
, reg
->iop2drv_doorbell
);
3553 index
= reg
->doneq_index
;
3554 flag_ccb
= reg
->done_qbuffer
[index
];
3555 if (flag_ccb
== 0) {
3561 if (poll_count
> 100){
3565 goto polling_hbb_ccb_retry
;
3568 reg
->done_qbuffer
[index
] = 0;
3570 /*if last index number set it to 0 */
3571 index
%= ARCMSR_MAX_HBB_POSTQUEUE
;
3572 reg
->doneq_index
= index
;
3573 /* check if command done with no error*/
3574 ccb_cdb_phy
= (flag_ccb
<< 5) & 0xffffffff;
3575 if (acb
->cdb_phyadd_hipart
)
3576 ccb_cdb_phy
= ccb_cdb_phy
| acb
->cdb_phyadd_hipart
;
3577 arcmsr_cdb
= (struct ARCMSR_CDB
*)(acb
->vir2phy_offset
+ ccb_cdb_phy
);
3578 ccb
= container_of(arcmsr_cdb
, struct CommandControlBlock
, arcmsr_cdb
);
3579 poll_ccb_done
|= (ccb
== poll_ccb
) ? 1 : 0;
3580 if ((ccb
->acb
!= acb
) || (ccb
->startdone
!= ARCMSR_CCB_START
)) {
3581 if ((ccb
->startdone
== ARCMSR_CCB_ABORTED
) || (ccb
== poll_ccb
)) {
3582 printk(KERN_NOTICE
"arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'"
3583 " poll command abort successfully \n"
3585 ,ccb
->pcmd
->device
->id
3586 ,(u32
)ccb
->pcmd
->device
->lun
3588 ccb
->pcmd
->result
= DID_ABORT
<< 16;
3589 arcmsr_ccb_complete(ccb
);
3592 printk(KERN_NOTICE
"arcmsr%d: polling get an illegal ccb"
3593 " command done ccb = '0x%p'"
3594 "ccboutstandingcount = %d \n"
3595 , acb
->host
->host_no
3597 , atomic_read(&acb
->ccboutstandingcount
));
3600 error
= (flag_ccb
& ARCMSR_CCBREPLY_FLAG_ERROR_MODE0
) ? true : false;
3601 arcmsr_report_ccb_state(acb
, ccb
, error
);
3606 static int arcmsr_hbaC_polling_ccbdone(struct AdapterControlBlock
*acb
,
3607 struct CommandControlBlock
*poll_ccb
)
3609 struct MessageUnit_C __iomem
*reg
= acb
->pmuC
;
3611 struct ARCMSR_CDB
*arcmsr_cdb
;
3613 struct CommandControlBlock
*pCCB
;
3614 uint32_t poll_ccb_done
= 0, poll_count
= 0;
3616 unsigned long ccb_cdb_phy
;
3618 polling_hbc_ccb_retry
:
3621 if ((readl(®
->host_int_status
) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR
) == 0) {
3622 if (poll_ccb_done
) {
3627 if (poll_count
> 100) {
3631 goto polling_hbc_ccb_retry
;
3634 flag_ccb
= readl(®
->outbound_queueport_low
);
3635 ccb_cdb_phy
= (flag_ccb
& 0xFFFFFFF0);
3636 if (acb
->cdb_phyadd_hipart
)
3637 ccb_cdb_phy
= ccb_cdb_phy
| acb
->cdb_phyadd_hipart
;
3638 arcmsr_cdb
= (struct ARCMSR_CDB
*)(acb
->vir2phy_offset
+ ccb_cdb_phy
);
3639 pCCB
= container_of(arcmsr_cdb
, struct CommandControlBlock
, arcmsr_cdb
);
3640 poll_ccb_done
|= (pCCB
== poll_ccb
) ? 1 : 0;
3641 /* check ifcommand done with no error*/
3642 if ((pCCB
->acb
!= acb
) || (pCCB
->startdone
!= ARCMSR_CCB_START
)) {
3643 if (pCCB
->startdone
== ARCMSR_CCB_ABORTED
) {
3644 printk(KERN_NOTICE
"arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'"
3645 " poll command abort successfully \n"
3646 , acb
->host
->host_no
3647 , pCCB
->pcmd
->device
->id
3648 , (u32
)pCCB
->pcmd
->device
->lun
3650 pCCB
->pcmd
->result
= DID_ABORT
<< 16;
3651 arcmsr_ccb_complete(pCCB
);
3654 printk(KERN_NOTICE
"arcmsr%d: polling get an illegal ccb"
3655 " command done ccb = '0x%p'"
3656 "ccboutstandingcount = %d \n"
3657 , acb
->host
->host_no
3659 , atomic_read(&acb
->ccboutstandingcount
));
3662 error
= (flag_ccb
& ARCMSR_CCBREPLY_FLAG_ERROR_MODE1
) ? true : false;
3663 arcmsr_report_ccb_state(acb
, pCCB
, error
);
3668 static int arcmsr_hbaD_polling_ccbdone(struct AdapterControlBlock
*acb
,
3669 struct CommandControlBlock
*poll_ccb
)
3672 uint32_t poll_ccb_done
= 0, poll_count
= 0, flag_ccb
;
3673 int rtn
, doneq_index
, index_stripped
, outbound_write_pointer
, toggle
;
3674 unsigned long flags
, ccb_cdb_phy
;
3675 struct ARCMSR_CDB
*arcmsr_cdb
;
3676 struct CommandControlBlock
*pCCB
;
3677 struct MessageUnit_D
*pmu
= acb
->pmuD
;
3679 polling_hbaD_ccb_retry
:
3682 spin_lock_irqsave(&acb
->doneq_lock
, flags
);
3683 outbound_write_pointer
= pmu
->done_qbuffer
[0].addressLow
+ 1;
3684 doneq_index
= pmu
->doneq_index
;
3685 if ((outbound_write_pointer
& 0xFFF) == (doneq_index
& 0xFFF)) {
3686 spin_unlock_irqrestore(&acb
->doneq_lock
, flags
);
3687 if (poll_ccb_done
) {
3692 if (poll_count
> 40) {
3696 goto polling_hbaD_ccb_retry
;
3699 toggle
= doneq_index
& 0x4000;
3700 index_stripped
= (doneq_index
& 0xFFF) + 1;
3701 index_stripped
%= ARCMSR_MAX_ARC1214_DONEQUEUE
;
3702 pmu
->doneq_index
= index_stripped
? (index_stripped
| toggle
) :
3703 ((toggle
^ 0x4000) + 1);
3704 doneq_index
= pmu
->doneq_index
;
3705 spin_unlock_irqrestore(&acb
->doneq_lock
, flags
);
3706 flag_ccb
= pmu
->done_qbuffer
[doneq_index
& 0xFFF].addressLow
;
3707 ccb_cdb_phy
= (flag_ccb
& 0xFFFFFFF0);
3708 if (acb
->cdb_phyadd_hipart
)
3709 ccb_cdb_phy
= ccb_cdb_phy
| acb
->cdb_phyadd_hipart
;
3710 arcmsr_cdb
= (struct ARCMSR_CDB
*)(acb
->vir2phy_offset
+
3712 pCCB
= container_of(arcmsr_cdb
, struct CommandControlBlock
,
3714 poll_ccb_done
|= (pCCB
== poll_ccb
) ? 1 : 0;
3715 if ((pCCB
->acb
!= acb
) ||
3716 (pCCB
->startdone
!= ARCMSR_CCB_START
)) {
3717 if (pCCB
->startdone
== ARCMSR_CCB_ABORTED
) {
3718 pr_notice("arcmsr%d: scsi id = %d "
3719 "lun = %d ccb = '0x%p' poll command "
3720 "abort successfully\n"
3721 , acb
->host
->host_no
3722 , pCCB
->pcmd
->device
->id
3723 , (u32
)pCCB
->pcmd
->device
->lun
3725 pCCB
->pcmd
->result
= DID_ABORT
<< 16;
3726 arcmsr_ccb_complete(pCCB
);
3729 pr_notice("arcmsr%d: polling an illegal "
3730 "ccb command done ccb = '0x%p' "
3731 "ccboutstandingcount = %d\n"
3732 , acb
->host
->host_no
3734 , atomic_read(&acb
->ccboutstandingcount
));
3737 error
= (flag_ccb
& ARCMSR_CCBREPLY_FLAG_ERROR_MODE1
)
3739 arcmsr_report_ccb_state(acb
, pCCB
, error
);
3744 static int arcmsr_hbaE_polling_ccbdone(struct AdapterControlBlock
*acb
,
3745 struct CommandControlBlock
*poll_ccb
)
3748 uint32_t poll_ccb_done
= 0, poll_count
= 0, doneq_index
;
3750 unsigned long flags
;
3752 struct CommandControlBlock
*pCCB
;
3753 struct MessageUnit_E __iomem
*reg
= acb
->pmuE
;
3755 polling_hbaC_ccb_retry
:
3758 spin_lock_irqsave(&acb
->doneq_lock
, flags
);
3759 doneq_index
= acb
->doneq_index
;
3760 if ((readl(®
->reply_post_producer_index
) & 0xFFFF) ==
3762 spin_unlock_irqrestore(&acb
->doneq_lock
, flags
);
3763 if (poll_ccb_done
) {
3768 if (poll_count
> 40) {
3772 goto polling_hbaC_ccb_retry
;
3775 cmdSMID
= acb
->pCompletionQ
[doneq_index
].cmdSMID
;
3777 if (doneq_index
>= acb
->completionQ_entry
)
3779 acb
->doneq_index
= doneq_index
;
3780 spin_unlock_irqrestore(&acb
->doneq_lock
, flags
);
3781 pCCB
= acb
->pccb_pool
[cmdSMID
];
3782 poll_ccb_done
|= (pCCB
== poll_ccb
) ? 1 : 0;
3783 /* check if command done with no error*/
3784 if ((pCCB
->acb
!= acb
) || (pCCB
->startdone
!= ARCMSR_CCB_START
)) {
3785 if (pCCB
->startdone
== ARCMSR_CCB_ABORTED
) {
3786 pr_notice("arcmsr%d: scsi id = %d "
3787 "lun = %d ccb = '0x%p' poll command "
3788 "abort successfully\n"
3789 , acb
->host
->host_no
3790 , pCCB
->pcmd
->device
->id
3791 , (u32
)pCCB
->pcmd
->device
->lun
3793 pCCB
->pcmd
->result
= DID_ABORT
<< 16;
3794 arcmsr_ccb_complete(pCCB
);
3797 pr_notice("arcmsr%d: polling an illegal "
3798 "ccb command done ccb = '0x%p' "
3799 "ccboutstandingcount = %d\n"
3800 , acb
->host
->host_no
3802 , atomic_read(&acb
->ccboutstandingcount
));
3805 error
= (acb
->pCompletionQ
[doneq_index
].cmdFlag
&
3806 ARCMSR_CCBREPLY_FLAG_ERROR_MODE1
) ? true : false;
3807 arcmsr_report_ccb_state(acb
, pCCB
, error
);
3809 writel(doneq_index
, ®
->reply_post_consumer_index
);
3813 static int arcmsr_polling_ccbdone(struct AdapterControlBlock
*acb
,
3814 struct CommandControlBlock
*poll_ccb
)
3817 switch (acb
->adapter_type
) {
3819 case ACB_ADAPTER_TYPE_A
:
3820 rtn
= arcmsr_hbaA_polling_ccbdone(acb
, poll_ccb
);
3822 case ACB_ADAPTER_TYPE_B
:
3823 rtn
= arcmsr_hbaB_polling_ccbdone(acb
, poll_ccb
);
3825 case ACB_ADAPTER_TYPE_C
:
3826 rtn
= arcmsr_hbaC_polling_ccbdone(acb
, poll_ccb
);
3828 case ACB_ADAPTER_TYPE_D
:
3829 rtn
= arcmsr_hbaD_polling_ccbdone(acb
, poll_ccb
);
3831 case ACB_ADAPTER_TYPE_E
:
3832 case ACB_ADAPTER_TYPE_F
:
3833 rtn
= arcmsr_hbaE_polling_ccbdone(acb
, poll_ccb
);
3839 static void arcmsr_set_iop_datetime(struct timer_list
*t
)
3841 struct AdapterControlBlock
*pacb
= from_timer(pacb
, t
, refresh_timer
);
3842 unsigned int next_time
;
3856 uint32_t msg_time
[2];
3860 time64_to_tm(ktime_get_real_seconds(), -sys_tz
.tz_minuteswest
* 60, &tm
);
3862 datetime
.a
.signature
= 0x55AA;
3863 datetime
.a
.year
= tm
.tm_year
- 100; /* base 2000 instead of 1900 */
3864 datetime
.a
.month
= tm
.tm_mon
;
3865 datetime
.a
.date
= tm
.tm_mday
;
3866 datetime
.a
.hour
= tm
.tm_hour
;
3867 datetime
.a
.minute
= tm
.tm_min
;
3868 datetime
.a
.second
= tm
.tm_sec
;
3870 switch (pacb
->adapter_type
) {
3871 case ACB_ADAPTER_TYPE_A
: {
3872 struct MessageUnit_A __iomem
*reg
= pacb
->pmuA
;
3873 writel(datetime
.b
.msg_time
[0], ®
->message_rwbuffer
[0]);
3874 writel(datetime
.b
.msg_time
[1], ®
->message_rwbuffer
[1]);
3875 writel(ARCMSR_INBOUND_MESG0_SYNC_TIMER
, ®
->inbound_msgaddr0
);
3878 case ACB_ADAPTER_TYPE_B
: {
3879 uint32_t __iomem
*rwbuffer
;
3880 struct MessageUnit_B
*reg
= pacb
->pmuB
;
3881 rwbuffer
= reg
->message_rwbuffer
;
3882 writel(datetime
.b
.msg_time
[0], rwbuffer
++);
3883 writel(datetime
.b
.msg_time
[1], rwbuffer
++);
3884 writel(ARCMSR_MESSAGE_SYNC_TIMER
, reg
->drv2iop_doorbell
);
3887 case ACB_ADAPTER_TYPE_C
: {
3888 struct MessageUnit_C __iomem
*reg
= pacb
->pmuC
;
3889 writel(datetime
.b
.msg_time
[0], ®
->msgcode_rwbuffer
[0]);
3890 writel(datetime
.b
.msg_time
[1], ®
->msgcode_rwbuffer
[1]);
3891 writel(ARCMSR_INBOUND_MESG0_SYNC_TIMER
, ®
->inbound_msgaddr0
);
3892 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE
, ®
->inbound_doorbell
);
3895 case ACB_ADAPTER_TYPE_D
: {
3896 uint32_t __iomem
*rwbuffer
;
3897 struct MessageUnit_D
*reg
= pacb
->pmuD
;
3898 rwbuffer
= reg
->msgcode_rwbuffer
;
3899 writel(datetime
.b
.msg_time
[0], rwbuffer
++);
3900 writel(datetime
.b
.msg_time
[1], rwbuffer
++);
3901 writel(ARCMSR_INBOUND_MESG0_SYNC_TIMER
, reg
->inbound_msgaddr0
);
3904 case ACB_ADAPTER_TYPE_E
: {
3905 struct MessageUnit_E __iomem
*reg
= pacb
->pmuE
;
3906 writel(datetime
.b
.msg_time
[0], ®
->msgcode_rwbuffer
[0]);
3907 writel(datetime
.b
.msg_time
[1], ®
->msgcode_rwbuffer
[1]);
3908 writel(ARCMSR_INBOUND_MESG0_SYNC_TIMER
, ®
->inbound_msgaddr0
);
3909 pacb
->out_doorbell
^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE
;
3910 writel(pacb
->out_doorbell
, ®
->iobound_doorbell
);
3913 case ACB_ADAPTER_TYPE_F
: {
3914 struct MessageUnit_F __iomem
*reg
= pacb
->pmuF
;
3916 pacb
->msgcode_rwbuffer
[0] = datetime
.b
.msg_time
[0];
3917 pacb
->msgcode_rwbuffer
[1] = datetime
.b
.msg_time
[1];
3918 writel(ARCMSR_INBOUND_MESG0_SYNC_TIMER
, ®
->inbound_msgaddr0
);
3919 pacb
->out_doorbell
^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE
;
3920 writel(pacb
->out_doorbell
, ®
->iobound_doorbell
);
3924 if (sys_tz
.tz_minuteswest
)
3925 next_time
= ARCMSR_HOURS
;
3927 next_time
= ARCMSR_MINUTES
;
3928 mod_timer(&pacb
->refresh_timer
, jiffies
+ msecs_to_jiffies(next_time
));
3931 static int arcmsr_iop_confirm(struct AdapterControlBlock
*acb
)
3933 uint32_t cdb_phyaddr
, cdb_phyaddr_hi32
;
3934 dma_addr_t dma_coherent_handle
;
3937 ********************************************************************
3938 ** here we need to tell iop 331 our freeccb.HighPart
3939 ** if freeccb.HighPart is not zero
3940 ********************************************************************
3942 switch (acb
->adapter_type
) {
3943 case ACB_ADAPTER_TYPE_B
:
3944 case ACB_ADAPTER_TYPE_D
:
3945 dma_coherent_handle
= acb
->dma_coherent_handle2
;
3947 case ACB_ADAPTER_TYPE_E
:
3948 case ACB_ADAPTER_TYPE_F
:
3949 dma_coherent_handle
= acb
->dma_coherent_handle
+
3950 offsetof(struct CommandControlBlock
, arcmsr_cdb
);
3953 dma_coherent_handle
= acb
->dma_coherent_handle
;
3956 cdb_phyaddr
= lower_32_bits(dma_coherent_handle
);
3957 cdb_phyaddr_hi32
= upper_32_bits(dma_coherent_handle
);
3958 acb
->cdb_phyaddr_hi32
= cdb_phyaddr_hi32
;
3959 acb
->cdb_phyadd_hipart
= ((uint64_t)cdb_phyaddr_hi32
) << 32;
3961 ***********************************************************************
3962 ** if adapter type B, set window of "post command Q"
3963 ***********************************************************************
3965 switch (acb
->adapter_type
) {
3967 case ACB_ADAPTER_TYPE_A
: {
3968 if (cdb_phyaddr_hi32
!= 0) {
3969 struct MessageUnit_A __iomem
*reg
= acb
->pmuA
;
3970 writel(ARCMSR_SIGNATURE_SET_CONFIG
, \
3971 ®
->message_rwbuffer
[0]);
3972 writel(cdb_phyaddr_hi32
, ®
->message_rwbuffer
[1]);
3973 writel(ARCMSR_INBOUND_MESG0_SET_CONFIG
, \
3974 ®
->inbound_msgaddr0
);
3975 if (!arcmsr_hbaA_wait_msgint_ready(acb
)) {
3976 printk(KERN_NOTICE
"arcmsr%d: ""set ccb high \
3977 part physical address timeout\n",
3978 acb
->host
->host_no
);
3985 case ACB_ADAPTER_TYPE_B
: {
3986 uint32_t __iomem
*rwbuffer
;
3988 struct MessageUnit_B
*reg
= acb
->pmuB
;
3989 reg
->postq_index
= 0;
3990 reg
->doneq_index
= 0;
3991 writel(ARCMSR_MESSAGE_SET_POST_WINDOW
, reg
->drv2iop_doorbell
);
3992 if (!arcmsr_hbaB_wait_msgint_ready(acb
)) {
3993 printk(KERN_NOTICE
"arcmsr%d: cannot set driver mode\n", \
3994 acb
->host
->host_no
);
3997 rwbuffer
= reg
->message_rwbuffer
;
3998 /* driver "set config" signature */
3999 writel(ARCMSR_SIGNATURE_SET_CONFIG
, rwbuffer
++);
4000 /* normal should be zero */
4001 writel(cdb_phyaddr_hi32
, rwbuffer
++);
4002 /* postQ size (256 + 8)*4 */
4003 writel(cdb_phyaddr
, rwbuffer
++);
4004 /* doneQ size (256 + 8)*4 */
4005 writel(cdb_phyaddr
+ 1056, rwbuffer
++);
4006 /* ccb maxQ size must be --> [(256 + 8)*4]*/
4007 writel(1056, rwbuffer
);
4009 writel(ARCMSR_MESSAGE_SET_CONFIG
, reg
->drv2iop_doorbell
);
4010 if (!arcmsr_hbaB_wait_msgint_ready(acb
)) {
4011 printk(KERN_NOTICE
"arcmsr%d: 'set command Q window' \
4012 timeout \n",acb
->host
->host_no
);
4015 writel(ARCMSR_MESSAGE_START_DRIVER_MODE
, reg
->drv2iop_doorbell
);
4016 if (!arcmsr_hbaB_wait_msgint_ready(acb
)) {
4017 pr_err("arcmsr%d: can't set driver mode.\n",
4018 acb
->host
->host_no
);
4023 case ACB_ADAPTER_TYPE_C
: {
4024 struct MessageUnit_C __iomem
*reg
= acb
->pmuC
;
4026 printk(KERN_NOTICE
"arcmsr%d: cdb_phyaddr_hi32=0x%x\n",
4027 acb
->adapter_index
, cdb_phyaddr_hi32
);
4028 writel(ARCMSR_SIGNATURE_SET_CONFIG
, ®
->msgcode_rwbuffer
[0]);
4029 writel(cdb_phyaddr_hi32
, ®
->msgcode_rwbuffer
[1]);
4030 writel(ARCMSR_INBOUND_MESG0_SET_CONFIG
, ®
->inbound_msgaddr0
);
4031 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE
, ®
->inbound_doorbell
);
4032 if (!arcmsr_hbaC_wait_msgint_ready(acb
)) {
4033 printk(KERN_NOTICE
"arcmsr%d: 'set command Q window' \
4034 timeout \n", acb
->host
->host_no
);
4039 case ACB_ADAPTER_TYPE_D
: {
4040 uint32_t __iomem
*rwbuffer
;
4041 struct MessageUnit_D
*reg
= acb
->pmuD
;
4042 reg
->postq_index
= 0;
4043 reg
->doneq_index
= 0;
4044 rwbuffer
= reg
->msgcode_rwbuffer
;
4045 writel(ARCMSR_SIGNATURE_SET_CONFIG
, rwbuffer
++);
4046 writel(cdb_phyaddr_hi32
, rwbuffer
++);
4047 writel(cdb_phyaddr
, rwbuffer
++);
4048 writel(cdb_phyaddr
+ (ARCMSR_MAX_ARC1214_POSTQUEUE
*
4049 sizeof(struct InBound_SRB
)), rwbuffer
++);
4050 writel(0x100, rwbuffer
);
4051 writel(ARCMSR_INBOUND_MESG0_SET_CONFIG
, reg
->inbound_msgaddr0
);
4052 if (!arcmsr_hbaD_wait_msgint_ready(acb
)) {
4053 pr_notice("arcmsr%d: 'set command Q window' timeout\n",
4054 acb
->host
->host_no
);
4059 case ACB_ADAPTER_TYPE_E
: {
4060 struct MessageUnit_E __iomem
*reg
= acb
->pmuE
;
4061 writel(ARCMSR_SIGNATURE_SET_CONFIG
, ®
->msgcode_rwbuffer
[0]);
4062 writel(ARCMSR_SIGNATURE_1884
, ®
->msgcode_rwbuffer
[1]);
4063 writel(cdb_phyaddr
, ®
->msgcode_rwbuffer
[2]);
4064 writel(cdb_phyaddr_hi32
, ®
->msgcode_rwbuffer
[3]);
4065 writel(acb
->ccbsize
, ®
->msgcode_rwbuffer
[4]);
4066 writel(lower_32_bits(acb
->dma_coherent_handle2
), ®
->msgcode_rwbuffer
[5]);
4067 writel(upper_32_bits(acb
->dma_coherent_handle2
), ®
->msgcode_rwbuffer
[6]);
4068 writel(acb
->ioqueue_size
, ®
->msgcode_rwbuffer
[7]);
4069 writel(ARCMSR_INBOUND_MESG0_SET_CONFIG
, ®
->inbound_msgaddr0
);
4070 acb
->out_doorbell
^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE
;
4071 writel(acb
->out_doorbell
, ®
->iobound_doorbell
);
4072 if (!arcmsr_hbaE_wait_msgint_ready(acb
)) {
4073 pr_notice("arcmsr%d: 'set command Q window' timeout \n",
4074 acb
->host
->host_no
);
4079 case ACB_ADAPTER_TYPE_F
: {
4080 struct MessageUnit_F __iomem
*reg
= acb
->pmuF
;
4082 acb
->msgcode_rwbuffer
[0] = ARCMSR_SIGNATURE_SET_CONFIG
;
4083 acb
->msgcode_rwbuffer
[1] = ARCMSR_SIGNATURE_1886
;
4084 acb
->msgcode_rwbuffer
[2] = cdb_phyaddr
;
4085 acb
->msgcode_rwbuffer
[3] = cdb_phyaddr_hi32
;
4086 acb
->msgcode_rwbuffer
[4] = acb
->ccbsize
;
4087 acb
->msgcode_rwbuffer
[5] = lower_32_bits(acb
->dma_coherent_handle2
);
4088 acb
->msgcode_rwbuffer
[6] = upper_32_bits(acb
->dma_coherent_handle2
);
4089 acb
->msgcode_rwbuffer
[7] = acb
->completeQ_size
;
4090 writel(ARCMSR_INBOUND_MESG0_SET_CONFIG
, ®
->inbound_msgaddr0
);
4091 acb
->out_doorbell
^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE
;
4092 writel(acb
->out_doorbell
, ®
->iobound_doorbell
);
4093 if (!arcmsr_hbaE_wait_msgint_ready(acb
)) {
4094 pr_notice("arcmsr%d: 'set command Q window' timeout\n",
4095 acb
->host
->host_no
);
4104 static void arcmsr_wait_firmware_ready(struct AdapterControlBlock
*acb
)
4106 uint32_t firmware_state
= 0;
4107 switch (acb
->adapter_type
) {
4109 case ACB_ADAPTER_TYPE_A
: {
4110 struct MessageUnit_A __iomem
*reg
= acb
->pmuA
;
4112 if (!(acb
->acb_flags
& ACB_F_IOP_INITED
))
4114 firmware_state
= readl(®
->outbound_msgaddr1
);
4115 } while ((firmware_state
& ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK
) == 0);
4119 case ACB_ADAPTER_TYPE_B
: {
4120 struct MessageUnit_B
*reg
= acb
->pmuB
;
4122 if (!(acb
->acb_flags
& ACB_F_IOP_INITED
))
4124 firmware_state
= readl(reg
->iop2drv_doorbell
);
4125 } while ((firmware_state
& ARCMSR_MESSAGE_FIRMWARE_OK
) == 0);
4126 writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT
, reg
->drv2iop_doorbell
);
4129 case ACB_ADAPTER_TYPE_C
: {
4130 struct MessageUnit_C __iomem
*reg
= acb
->pmuC
;
4132 if (!(acb
->acb_flags
& ACB_F_IOP_INITED
))
4134 firmware_state
= readl(®
->outbound_msgaddr1
);
4135 } while ((firmware_state
& ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK
) == 0);
4138 case ACB_ADAPTER_TYPE_D
: {
4139 struct MessageUnit_D
*reg
= acb
->pmuD
;
4141 if (!(acb
->acb_flags
& ACB_F_IOP_INITED
))
4143 firmware_state
= readl(reg
->outbound_msgaddr1
);
4144 } while ((firmware_state
&
4145 ARCMSR_ARC1214_MESSAGE_FIRMWARE_OK
) == 0);
4148 case ACB_ADAPTER_TYPE_E
:
4149 case ACB_ADAPTER_TYPE_F
: {
4150 struct MessageUnit_E __iomem
*reg
= acb
->pmuE
;
4152 if (!(acb
->acb_flags
& ACB_F_IOP_INITED
))
4154 firmware_state
= readl(®
->outbound_msgaddr1
);
4155 } while ((firmware_state
& ARCMSR_HBEMU_MESSAGE_FIRMWARE_OK
) == 0);
4161 static void arcmsr_request_device_map(struct timer_list
*t
)
4163 struct AdapterControlBlock
*acb
= from_timer(acb
, t
, eternal_timer
);
4164 if (acb
->acb_flags
& (ACB_F_MSG_GET_CONFIG
| ACB_F_BUS_RESET
| ACB_F_ABORT
)) {
4165 mod_timer(&acb
->eternal_timer
, jiffies
+ msecs_to_jiffies(6 * HZ
));
4167 acb
->fw_flag
= FW_NORMAL
;
4168 switch (acb
->adapter_type
) {
4169 case ACB_ADAPTER_TYPE_A
: {
4170 struct MessageUnit_A __iomem
*reg
= acb
->pmuA
;
4171 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG
, ®
->inbound_msgaddr0
);
4174 case ACB_ADAPTER_TYPE_B
: {
4175 struct MessageUnit_B
*reg
= acb
->pmuB
;
4176 writel(ARCMSR_MESSAGE_GET_CONFIG
, reg
->drv2iop_doorbell
);
4179 case ACB_ADAPTER_TYPE_C
: {
4180 struct MessageUnit_C __iomem
*reg
= acb
->pmuC
;
4181 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG
, ®
->inbound_msgaddr0
);
4182 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE
, ®
->inbound_doorbell
);
4185 case ACB_ADAPTER_TYPE_D
: {
4186 struct MessageUnit_D
*reg
= acb
->pmuD
;
4187 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG
, reg
->inbound_msgaddr0
);
4190 case ACB_ADAPTER_TYPE_E
: {
4191 struct MessageUnit_E __iomem
*reg
= acb
->pmuE
;
4192 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG
, ®
->inbound_msgaddr0
);
4193 acb
->out_doorbell
^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE
;
4194 writel(acb
->out_doorbell
, ®
->iobound_doorbell
);
4197 case ACB_ADAPTER_TYPE_F
: {
4198 struct MessageUnit_F __iomem
*reg
= acb
->pmuF
;
4199 uint32_t outMsg1
= readl(®
->outbound_msgaddr1
);
4201 if (!(outMsg1
& ARCMSR_HBFMU_MESSAGE_FIRMWARE_OK
) ||
4202 (outMsg1
& ARCMSR_HBFMU_MESSAGE_NO_VOLUME_CHANGE
))
4204 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG
, ®
->inbound_msgaddr0
);
4205 acb
->out_doorbell
^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE
;
4206 writel(acb
->out_doorbell
, ®
->iobound_doorbell
);
4212 acb
->acb_flags
|= ACB_F_MSG_GET_CONFIG
;
4214 mod_timer(&acb
->eternal_timer
, jiffies
+ msecs_to_jiffies(6 * HZ
));
4218 static void arcmsr_hbaA_start_bgrb(struct AdapterControlBlock
*acb
)
4220 struct MessageUnit_A __iomem
*reg
= acb
->pmuA
;
4221 acb
->acb_flags
|= ACB_F_MSG_START_BGRB
;
4222 writel(ARCMSR_INBOUND_MESG0_START_BGRB
, ®
->inbound_msgaddr0
);
4223 if (!arcmsr_hbaA_wait_msgint_ready(acb
)) {
4224 printk(KERN_NOTICE
"arcmsr%d: wait 'start adapter background \
4225 rebuild' timeout \n", acb
->host
->host_no
);
4229 static void arcmsr_hbaB_start_bgrb(struct AdapterControlBlock
*acb
)
4231 struct MessageUnit_B
*reg
= acb
->pmuB
;
4232 acb
->acb_flags
|= ACB_F_MSG_START_BGRB
;
4233 writel(ARCMSR_MESSAGE_START_BGRB
, reg
->drv2iop_doorbell
);
4234 if (!arcmsr_hbaB_wait_msgint_ready(acb
)) {
4235 printk(KERN_NOTICE
"arcmsr%d: wait 'start adapter background \
4236 rebuild' timeout \n",acb
->host
->host_no
);
4240 static void arcmsr_hbaC_start_bgrb(struct AdapterControlBlock
*pACB
)
4242 struct MessageUnit_C __iomem
*phbcmu
= pACB
->pmuC
;
4243 pACB
->acb_flags
|= ACB_F_MSG_START_BGRB
;
4244 writel(ARCMSR_INBOUND_MESG0_START_BGRB
, &phbcmu
->inbound_msgaddr0
);
4245 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE
, &phbcmu
->inbound_doorbell
);
4246 if (!arcmsr_hbaC_wait_msgint_ready(pACB
)) {
4247 printk(KERN_NOTICE
"arcmsr%d: wait 'start adapter background \
4248 rebuild' timeout \n", pACB
->host
->host_no
);
4253 static void arcmsr_hbaD_start_bgrb(struct AdapterControlBlock
*pACB
)
4255 struct MessageUnit_D
*pmu
= pACB
->pmuD
;
4257 pACB
->acb_flags
|= ACB_F_MSG_START_BGRB
;
4258 writel(ARCMSR_INBOUND_MESG0_START_BGRB
, pmu
->inbound_msgaddr0
);
4259 if (!arcmsr_hbaD_wait_msgint_ready(pACB
)) {
4260 pr_notice("arcmsr%d: wait 'start adapter "
4261 "background rebuild' timeout\n", pACB
->host
->host_no
);
4265 static void arcmsr_hbaE_start_bgrb(struct AdapterControlBlock
*pACB
)
4267 struct MessageUnit_E __iomem
*pmu
= pACB
->pmuE
;
4269 pACB
->acb_flags
|= ACB_F_MSG_START_BGRB
;
4270 writel(ARCMSR_INBOUND_MESG0_START_BGRB
, &pmu
->inbound_msgaddr0
);
4271 pACB
->out_doorbell
^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE
;
4272 writel(pACB
->out_doorbell
, &pmu
->iobound_doorbell
);
4273 if (!arcmsr_hbaE_wait_msgint_ready(pACB
)) {
4274 pr_notice("arcmsr%d: wait 'start adapter "
4275 "background rebuild' timeout \n", pACB
->host
->host_no
);
4279 static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock
*acb
)
4281 switch (acb
->adapter_type
) {
4282 case ACB_ADAPTER_TYPE_A
:
4283 arcmsr_hbaA_start_bgrb(acb
);
4285 case ACB_ADAPTER_TYPE_B
:
4286 arcmsr_hbaB_start_bgrb(acb
);
4288 case ACB_ADAPTER_TYPE_C
:
4289 arcmsr_hbaC_start_bgrb(acb
);
4291 case ACB_ADAPTER_TYPE_D
:
4292 arcmsr_hbaD_start_bgrb(acb
);
4294 case ACB_ADAPTER_TYPE_E
:
4295 case ACB_ADAPTER_TYPE_F
:
4296 arcmsr_hbaE_start_bgrb(acb
);
4301 static void arcmsr_clear_doorbell_queue_buffer(struct AdapterControlBlock
*acb
)
4303 switch (acb
->adapter_type
) {
4304 case ACB_ADAPTER_TYPE_A
: {
4305 struct MessageUnit_A __iomem
*reg
= acb
->pmuA
;
4306 uint32_t outbound_doorbell
;
4307 /* empty doorbell Qbuffer if door bell ringed */
4308 outbound_doorbell
= readl(®
->outbound_doorbell
);
4309 /*clear doorbell interrupt */
4310 writel(outbound_doorbell
, ®
->outbound_doorbell
);
4311 writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK
, ®
->inbound_doorbell
);
4315 case ACB_ADAPTER_TYPE_B
: {
4316 struct MessageUnit_B
*reg
= acb
->pmuB
;
4317 uint32_t outbound_doorbell
, i
;
4318 writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN
, reg
->iop2drv_doorbell
);
4319 writel(ARCMSR_DRV2IOP_DATA_READ_OK
, reg
->drv2iop_doorbell
);
4320 /* let IOP know data has been read */
4321 for(i
=0; i
< 200; i
++) {
4323 outbound_doorbell
= readl(reg
->iop2drv_doorbell
);
4324 if( outbound_doorbell
& ARCMSR_IOP2DRV_DATA_WRITE_OK
) {
4325 writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN
, reg
->iop2drv_doorbell
);
4326 writel(ARCMSR_DRV2IOP_DATA_READ_OK
, reg
->drv2iop_doorbell
);
4332 case ACB_ADAPTER_TYPE_C
: {
4333 struct MessageUnit_C __iomem
*reg
= acb
->pmuC
;
4334 uint32_t outbound_doorbell
, i
;
4335 /* empty doorbell Qbuffer if door bell ringed */
4336 outbound_doorbell
= readl(®
->outbound_doorbell
);
4337 writel(outbound_doorbell
, ®
->outbound_doorbell_clear
);
4338 writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK
, ®
->inbound_doorbell
);
4339 for (i
= 0; i
< 200; i
++) {
4341 outbound_doorbell
= readl(®
->outbound_doorbell
);
4342 if (outbound_doorbell
&
4343 ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK
) {
4344 writel(outbound_doorbell
,
4345 ®
->outbound_doorbell_clear
);
4346 writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK
,
4347 ®
->inbound_doorbell
);
4353 case ACB_ADAPTER_TYPE_D
: {
4354 struct MessageUnit_D
*reg
= acb
->pmuD
;
4355 uint32_t outbound_doorbell
, i
;
4356 /* empty doorbell Qbuffer if door bell ringed */
4357 outbound_doorbell
= readl(reg
->outbound_doorbell
);
4358 writel(outbound_doorbell
, reg
->outbound_doorbell
);
4359 writel(ARCMSR_ARC1214_DRV2IOP_DATA_OUT_READ
,
4360 reg
->inbound_doorbell
);
4361 for (i
= 0; i
< 200; i
++) {
4363 outbound_doorbell
= readl(reg
->outbound_doorbell
);
4364 if (outbound_doorbell
&
4365 ARCMSR_ARC1214_IOP2DRV_DATA_WRITE_OK
) {
4366 writel(outbound_doorbell
,
4367 reg
->outbound_doorbell
);
4368 writel(ARCMSR_ARC1214_DRV2IOP_DATA_OUT_READ
,
4369 reg
->inbound_doorbell
);
4375 case ACB_ADAPTER_TYPE_E
:
4376 case ACB_ADAPTER_TYPE_F
: {
4377 struct MessageUnit_E __iomem
*reg
= acb
->pmuE
;
4380 acb
->in_doorbell
= readl(®
->iobound_doorbell
);
4381 writel(0, ®
->host_int_status
); /*clear interrupt*/
4382 acb
->out_doorbell
^= ARCMSR_HBEMU_DRV2IOP_DATA_READ_OK
;
4383 writel(acb
->out_doorbell
, ®
->iobound_doorbell
);
4384 for(i
=0; i
< 200; i
++) {
4386 tmp
= acb
->in_doorbell
;
4387 acb
->in_doorbell
= readl(®
->iobound_doorbell
);
4388 if((tmp
^ acb
->in_doorbell
) & ARCMSR_HBEMU_IOP2DRV_DATA_WRITE_OK
) {
4389 writel(0, ®
->host_int_status
); /*clear interrupt*/
4390 acb
->out_doorbell
^= ARCMSR_HBEMU_DRV2IOP_DATA_READ_OK
;
4391 writel(acb
->out_doorbell
, ®
->iobound_doorbell
);
4400 static void arcmsr_enable_eoi_mode(struct AdapterControlBlock
*acb
)
4402 switch (acb
->adapter_type
) {
4403 case ACB_ADAPTER_TYPE_A
:
4405 case ACB_ADAPTER_TYPE_B
:
4407 struct MessageUnit_B
*reg
= acb
->pmuB
;
4408 writel(ARCMSR_MESSAGE_ACTIVE_EOI_MODE
, reg
->drv2iop_doorbell
);
4409 if (!arcmsr_hbaB_wait_msgint_ready(acb
)) {
4410 printk(KERN_NOTICE
"ARCMSR IOP enables EOI_MODE TIMEOUT");
4415 case ACB_ADAPTER_TYPE_C
:
4421 static void arcmsr_hardware_reset(struct AdapterControlBlock
*acb
)
4425 struct MessageUnit_A __iomem
*pmuA
= acb
->pmuA
;
4426 struct MessageUnit_C __iomem
*pmuC
= acb
->pmuC
;
4427 struct MessageUnit_D
*pmuD
= acb
->pmuD
;
4429 /* backup pci config data */
4430 printk(KERN_NOTICE
"arcmsr%d: executing hw bus reset .....\n", acb
->host
->host_no
);
4431 for (i
= 0; i
< 64; i
++) {
4432 pci_read_config_byte(acb
->pdev
, i
, &value
[i
]);
4434 /* hardware reset signal */
4435 if (acb
->dev_id
== 0x1680) {
4436 writel(ARCMSR_ARC1680_BUS_RESET
, &pmuA
->reserved1
[0]);
4437 } else if (acb
->dev_id
== 0x1880) {
4440 writel(0xF, &pmuC
->write_sequence
);
4441 writel(0x4, &pmuC
->write_sequence
);
4442 writel(0xB, &pmuC
->write_sequence
);
4443 writel(0x2, &pmuC
->write_sequence
);
4444 writel(0x7, &pmuC
->write_sequence
);
4445 writel(0xD, &pmuC
->write_sequence
);
4446 } while (((readl(&pmuC
->host_diagnostic
) & ARCMSR_ARC1880_DiagWrite_ENABLE
) == 0) && (count
< 5));
4447 writel(ARCMSR_ARC1880_RESET_ADAPTER
, &pmuC
->host_diagnostic
);
4448 } else if (acb
->dev_id
== 0x1884) {
4449 struct MessageUnit_E __iomem
*pmuE
= acb
->pmuE
;
4452 writel(0x4, &pmuE
->write_sequence_3xxx
);
4453 writel(0xB, &pmuE
->write_sequence_3xxx
);
4454 writel(0x2, &pmuE
->write_sequence_3xxx
);
4455 writel(0x7, &pmuE
->write_sequence_3xxx
);
4456 writel(0xD, &pmuE
->write_sequence_3xxx
);
4458 } while (((readl(&pmuE
->host_diagnostic_3xxx
) &
4459 ARCMSR_ARC1884_DiagWrite_ENABLE
) == 0) && (count
< 5));
4460 writel(ARCMSR_ARC188X_RESET_ADAPTER
, &pmuE
->host_diagnostic_3xxx
);
4461 } else if (acb
->dev_id
== 0x1214) {
4462 writel(0x20, pmuD
->reset_request
);
4464 pci_write_config_byte(acb
->pdev
, 0x84, 0x20);
4467 /* write back pci config data */
4468 for (i
= 0; i
< 64; i
++) {
4469 pci_write_config_byte(acb
->pdev
, i
, value
[i
]);
4475 static bool arcmsr_reset_in_progress(struct AdapterControlBlock
*acb
)
4479 switch(acb
->adapter_type
) {
4480 case ACB_ADAPTER_TYPE_A
:{
4481 struct MessageUnit_A __iomem
*reg
= acb
->pmuA
;
4482 rtn
= ((readl(®
->outbound_msgaddr1
) &
4483 ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK
) == 0) ? true : false;
4486 case ACB_ADAPTER_TYPE_B
:{
4487 struct MessageUnit_B
*reg
= acb
->pmuB
;
4488 rtn
= ((readl(reg
->iop2drv_doorbell
) &
4489 ARCMSR_MESSAGE_FIRMWARE_OK
) == 0) ? true : false;
4492 case ACB_ADAPTER_TYPE_C
:{
4493 struct MessageUnit_C __iomem
*reg
= acb
->pmuC
;
4494 rtn
= (readl(®
->host_diagnostic
) & 0x04) ? true : false;
4497 case ACB_ADAPTER_TYPE_D
:{
4498 struct MessageUnit_D
*reg
= acb
->pmuD
;
4499 rtn
= ((readl(reg
->sample_at_reset
) & 0x80) == 0) ?
4503 case ACB_ADAPTER_TYPE_E
:
4504 case ACB_ADAPTER_TYPE_F
:{
4505 struct MessageUnit_E __iomem
*reg
= acb
->pmuE
;
4506 rtn
= (readl(®
->host_diagnostic_3xxx
) &
4507 ARCMSR_ARC188X_RESET_ADAPTER
) ? true : false;
4514 static void arcmsr_iop_init(struct AdapterControlBlock
*acb
)
4516 uint32_t intmask_org
;
4517 /* disable all outbound interrupt */
4518 intmask_org
= arcmsr_disable_outbound_ints(acb
);
4519 arcmsr_wait_firmware_ready(acb
);
4520 arcmsr_iop_confirm(acb
);
4521 /*start background rebuild*/
4522 arcmsr_start_adapter_bgrb(acb
);
4523 /* empty doorbell Qbuffer if door bell ringed */
4524 arcmsr_clear_doorbell_queue_buffer(acb
);
4525 arcmsr_enable_eoi_mode(acb
);
4526 /* enable outbound Post Queue,outbound doorbell Interrupt */
4527 arcmsr_enable_outbound_ints(acb
, intmask_org
);
4528 acb
->acb_flags
|= ACB_F_IOP_INITED
;
4531 static uint8_t arcmsr_iop_reset(struct AdapterControlBlock
*acb
)
4533 struct CommandControlBlock
*ccb
;
4534 uint32_t intmask_org
;
4535 uint8_t rtnval
= 0x00;
4537 unsigned long flags
;
4539 if (atomic_read(&acb
->ccboutstandingcount
) != 0) {
4540 /* disable all outbound interrupt */
4541 intmask_org
= arcmsr_disable_outbound_ints(acb
);
4542 /* talk to iop 331 outstanding command aborted */
4543 rtnval
= arcmsr_abort_allcmd(acb
);
4544 /* clear all outbound posted Q */
4545 arcmsr_done4abort_postqueue(acb
);
4546 for (i
= 0; i
< acb
->maxFreeCCB
; i
++) {
4547 ccb
= acb
->pccb_pool
[i
];
4548 if (ccb
->startdone
== ARCMSR_CCB_START
) {
4549 scsi_dma_unmap(ccb
->pcmd
);
4550 ccb
->startdone
= ARCMSR_CCB_DONE
;
4552 spin_lock_irqsave(&acb
->ccblist_lock
, flags
);
4553 list_add_tail(&ccb
->list
, &acb
->ccb_free_list
);
4554 spin_unlock_irqrestore(&acb
->ccblist_lock
, flags
);
4557 atomic_set(&acb
->ccboutstandingcount
, 0);
4558 /* enable all outbound interrupt */
4559 arcmsr_enable_outbound_ints(acb
, intmask_org
);
4565 static int arcmsr_bus_reset(struct scsi_cmnd
*cmd
)
4567 struct AdapterControlBlock
*acb
;
4568 int retry_count
= 0;
4570 acb
= (struct AdapterControlBlock
*) cmd
->device
->host
->hostdata
;
4571 if (acb
->acb_flags
& ACB_F_ADAPTER_REMOVED
)
4573 pr_notice("arcmsr: executing bus reset eh.....num_resets = %d,"
4574 " num_aborts = %d \n", acb
->num_resets
, acb
->num_aborts
);
4577 if (acb
->acb_flags
& ACB_F_BUS_RESET
) {
4579 pr_notice("arcmsr: there is a bus reset eh proceeding...\n");
4580 timeout
= wait_event_timeout(wait_q
, (acb
->acb_flags
4581 & ACB_F_BUS_RESET
) == 0, 220 * HZ
);
4585 acb
->acb_flags
|= ACB_F_BUS_RESET
;
4586 if (!arcmsr_iop_reset(acb
)) {
4587 arcmsr_hardware_reset(acb
);
4588 acb
->acb_flags
&= ~ACB_F_IOP_INITED
;
4590 ssleep(ARCMSR_SLEEPTIME
);
4591 if (arcmsr_reset_in_progress(acb
)) {
4592 if (retry_count
> ARCMSR_RETRYCOUNT
) {
4593 acb
->fw_flag
= FW_DEADLOCK
;
4594 pr_notice("arcmsr%d: waiting for hw bus reset"
4595 " return, RETRY TERMINATED!!\n",
4596 acb
->host
->host_no
);
4600 goto wait_reset_done
;
4602 arcmsr_iop_init(acb
);
4603 acb
->fw_flag
= FW_NORMAL
;
4604 mod_timer(&acb
->eternal_timer
, jiffies
+
4605 msecs_to_jiffies(6 * HZ
));
4606 acb
->acb_flags
&= ~ACB_F_BUS_RESET
;
4608 pr_notice("arcmsr: scsi bus reset eh returns with success\n");
4610 acb
->acb_flags
&= ~ACB_F_BUS_RESET
;
4611 acb
->fw_flag
= FW_NORMAL
;
4612 mod_timer(&acb
->eternal_timer
, jiffies
+
4613 msecs_to_jiffies(6 * HZ
));
4619 static int arcmsr_abort_one_cmd(struct AdapterControlBlock
*acb
,
4620 struct CommandControlBlock
*ccb
)
4623 rtn
= arcmsr_polling_ccbdone(acb
, ccb
);
4627 static int arcmsr_abort(struct scsi_cmnd
*cmd
)
4629 struct AdapterControlBlock
*acb
=
4630 (struct AdapterControlBlock
*)cmd
->device
->host
->hostdata
;
4633 uint32_t intmask_org
;
4635 if (acb
->acb_flags
& ACB_F_ADAPTER_REMOVED
)
4638 "arcmsr%d: abort device command of scsi id = %d lun = %d\n",
4639 acb
->host
->host_no
, cmd
->device
->id
, (u32
)cmd
->device
->lun
);
4640 acb
->acb_flags
|= ACB_F_ABORT
;
4643 ************************************************
4644 ** the all interrupt service routine is locked
4645 ** we need to handle it as soon as possible and exit
4646 ************************************************
4648 if (!atomic_read(&acb
->ccboutstandingcount
)) {
4649 acb
->acb_flags
&= ~ACB_F_ABORT
;
4653 intmask_org
= arcmsr_disable_outbound_ints(acb
);
4654 for (i
= 0; i
< acb
->maxFreeCCB
; i
++) {
4655 struct CommandControlBlock
*ccb
= acb
->pccb_pool
[i
];
4656 if (ccb
->startdone
== ARCMSR_CCB_START
&& ccb
->pcmd
== cmd
) {
4657 ccb
->startdone
= ARCMSR_CCB_ABORTED
;
4658 rtn
= arcmsr_abort_one_cmd(acb
, ccb
);
4662 acb
->acb_flags
&= ~ACB_F_ABORT
;
4663 arcmsr_enable_outbound_ints(acb
, intmask_org
);
4667 static const char *arcmsr_info(struct Scsi_Host
*host
)
4669 struct AdapterControlBlock
*acb
=
4670 (struct AdapterControlBlock
*) host
->hostdata
;
4671 static char buf
[256];
4674 switch (acb
->pdev
->device
) {
4675 case PCI_DEVICE_ID_ARECA_1110
:
4676 case PCI_DEVICE_ID_ARECA_1200
:
4677 case PCI_DEVICE_ID_ARECA_1202
:
4678 case PCI_DEVICE_ID_ARECA_1210
:
4681 case PCI_DEVICE_ID_ARECA_1120
:
4682 case PCI_DEVICE_ID_ARECA_1130
:
4683 case PCI_DEVICE_ID_ARECA_1160
:
4684 case PCI_DEVICE_ID_ARECA_1170
:
4685 case PCI_DEVICE_ID_ARECA_1201
:
4686 case PCI_DEVICE_ID_ARECA_1203
:
4687 case PCI_DEVICE_ID_ARECA_1220
:
4688 case PCI_DEVICE_ID_ARECA_1230
:
4689 case PCI_DEVICE_ID_ARECA_1260
:
4690 case PCI_DEVICE_ID_ARECA_1270
:
4691 case PCI_DEVICE_ID_ARECA_1280
:
4694 case PCI_DEVICE_ID_ARECA_1214
:
4695 case PCI_DEVICE_ID_ARECA_1380
:
4696 case PCI_DEVICE_ID_ARECA_1381
:
4697 case PCI_DEVICE_ID_ARECA_1680
:
4698 case PCI_DEVICE_ID_ARECA_1681
:
4699 case PCI_DEVICE_ID_ARECA_1880
:
4700 case PCI_DEVICE_ID_ARECA_1884
:
4703 case PCI_DEVICE_ID_ARECA_1886
:
4704 type
= "NVMe/SAS/SATA";
4711 sprintf(buf
, "Areca %s RAID Controller %s\narcmsr version %s\n",
4712 type
, raid6
? "(RAID6 capable)" : "", ARCMSR_DRIVER_VERSION
);