1 // SPDX-License-Identifier: GPL-2.0-only
3 * HighPoint RR3xxx/4xxx controller driver for Linux
4 * Copyright (C) 2006-2015 HighPoint Technologies, Inc. All Rights Reserved.
6 * Please report bugs/comments/suggestions to linux@highpoint-tech.com
8 * For more information, visit http://www.highpoint-tech.com
10 #include <linux/module.h>
11 #include <linux/types.h>
12 #include <linux/string.h>
13 #include <linux/kernel.h>
14 #include <linux/pci.h>
15 #include <linux/interrupt.h>
16 #include <linux/errno.h>
17 #include <linux/delay.h>
18 #include <linux/timer.h>
19 #include <linux/spinlock.h>
20 #include <linux/gfp.h>
21 #include <linux/uaccess.h>
23 #include <asm/div64.h>
24 #include <scsi/scsi_cmnd.h>
25 #include <scsi/scsi_device.h>
26 #include <scsi/scsi.h>
27 #include <scsi/scsi_tcq.h>
28 #include <scsi/scsi_host.h>
32 MODULE_AUTHOR("HighPoint Technologies, Inc.");
33 MODULE_DESCRIPTION("HighPoint RocketRAID 3xxx/4xxx Controller Driver");
35 static char driver_name
[] = "hptiop";
36 static const char driver_name_long
[] = "RocketRAID 3xxx/4xxx Controller driver";
37 static const char driver_ver
[] = "v1.10.0";
39 static int iop_send_sync_msg(struct hptiop_hba
*hba
, u32 msg
, u32 millisec
);
40 static void hptiop_finish_scsi_req(struct hptiop_hba
*hba
, u32 tag
,
41 struct hpt_iop_request_scsi_command
*req
);
42 static void hptiop_host_request_callback_itl(struct hptiop_hba
*hba
, u32 tag
);
43 static void hptiop_iop_request_callback_itl(struct hptiop_hba
*hba
, u32 tag
);
44 static void hptiop_message_callback(struct hptiop_hba
*hba
, u32 msg
);
46 static int iop_wait_ready_itl(struct hptiop_hba
*hba
, u32 millisec
)
51 for (i
= 0; i
< millisec
; i
++) {
52 req
= readl(&hba
->u
.itl
.iop
->inbound_queue
);
53 if (req
!= IOPMU_QUEUE_EMPTY
)
58 if (req
!= IOPMU_QUEUE_EMPTY
) {
59 writel(req
, &hba
->u
.itl
.iop
->outbound_queue
);
60 readl(&hba
->u
.itl
.iop
->outbound_intstatus
);
67 static int iop_wait_ready_mv(struct hptiop_hba
*hba
, u32 millisec
)
69 return iop_send_sync_msg(hba
, IOPMU_INBOUND_MSG0_NOP
, millisec
);
72 static int iop_wait_ready_mvfrey(struct hptiop_hba
*hba
, u32 millisec
)
74 return iop_send_sync_msg(hba
, IOPMU_INBOUND_MSG0_NOP
, millisec
);
77 static void hptiop_request_callback_itl(struct hptiop_hba
*hba
, u32 tag
)
79 if (tag
& IOPMU_QUEUE_ADDR_HOST_BIT
)
80 hptiop_host_request_callback_itl(hba
,
81 tag
& ~IOPMU_QUEUE_ADDR_HOST_BIT
);
83 hptiop_iop_request_callback_itl(hba
, tag
);
86 static void hptiop_drain_outbound_queue_itl(struct hptiop_hba
*hba
)
90 while ((req
= readl(&hba
->u
.itl
.iop
->outbound_queue
)) !=
93 if (req
& IOPMU_QUEUE_MASK_HOST_BITS
)
94 hptiop_request_callback_itl(hba
, req
);
96 struct hpt_iop_request_header __iomem
* p
;
98 p
= (struct hpt_iop_request_header __iomem
*)
99 ((char __iomem
*)hba
->u
.itl
.iop
+ req
);
101 if (readl(&p
->flags
) & IOP_REQUEST_FLAG_SYNC_REQUEST
) {
102 if (readl(&p
->context
))
103 hptiop_request_callback_itl(hba
, req
);
105 writel(1, &p
->context
);
108 hptiop_request_callback_itl(hba
, req
);
113 static int iop_intr_itl(struct hptiop_hba
*hba
)
115 struct hpt_iopmu_itl __iomem
*iop
= hba
->u
.itl
.iop
;
116 void __iomem
*plx
= hba
->u
.itl
.plx
;
120 if (plx
&& readl(plx
+ 0x11C5C) & 0xf)
121 writel(1, plx
+ 0x11C60);
123 status
= readl(&iop
->outbound_intstatus
);
125 if (status
& IOPMU_OUTBOUND_INT_MSG0
) {
126 u32 msg
= readl(&iop
->outbound_msgaddr0
);
128 dprintk("received outbound msg %x\n", msg
);
129 writel(IOPMU_OUTBOUND_INT_MSG0
, &iop
->outbound_intstatus
);
130 hptiop_message_callback(hba
, msg
);
134 if (status
& IOPMU_OUTBOUND_INT_POSTQUEUE
) {
135 hptiop_drain_outbound_queue_itl(hba
);
142 static u64
mv_outbound_read(struct hpt_iopmu_mv __iomem
*mu
)
144 u32 outbound_tail
= readl(&mu
->outbound_tail
);
145 u32 outbound_head
= readl(&mu
->outbound_head
);
147 if (outbound_tail
!= outbound_head
) {
150 memcpy_fromio(&p
, &mu
->outbound_q
[mu
->outbound_tail
], 8);
153 if (outbound_tail
== MVIOP_QUEUE_LEN
)
155 writel(outbound_tail
, &mu
->outbound_tail
);
161 static void mv_inbound_write(u64 p
, struct hptiop_hba
*hba
)
163 u32 inbound_head
= readl(&hba
->u
.mv
.mu
->inbound_head
);
164 u32 head
= inbound_head
+ 1;
166 if (head
== MVIOP_QUEUE_LEN
)
169 memcpy_toio(&hba
->u
.mv
.mu
->inbound_q
[inbound_head
], &p
, 8);
170 writel(head
, &hba
->u
.mv
.mu
->inbound_head
);
171 writel(MVIOP_MU_INBOUND_INT_POSTQUEUE
,
172 &hba
->u
.mv
.regs
->inbound_doorbell
);
175 static void hptiop_request_callback_mv(struct hptiop_hba
*hba
, u64 tag
)
177 u32 req_type
= (tag
>> 5) & 0x7;
178 struct hpt_iop_request_scsi_command
*req
;
180 dprintk("hptiop_request_callback_mv: tag=%llx\n", tag
);
182 BUG_ON((tag
& MVIOP_MU_QUEUE_REQUEST_RETURN_CONTEXT
) == 0);
185 case IOP_REQUEST_TYPE_GET_CONFIG
:
186 case IOP_REQUEST_TYPE_SET_CONFIG
:
190 case IOP_REQUEST_TYPE_SCSI_COMMAND
:
191 req
= hba
->reqs
[tag
>> 8].req_virt
;
192 if (likely(tag
& MVIOP_MU_QUEUE_REQUEST_RESULT_BIT
))
193 req
->header
.result
= cpu_to_le32(IOP_RESULT_SUCCESS
);
195 hptiop_finish_scsi_req(hba
, tag
>>8, req
);
203 static int iop_intr_mv(struct hptiop_hba
*hba
)
208 status
= readl(&hba
->u
.mv
.regs
->outbound_doorbell
);
209 writel(~status
, &hba
->u
.mv
.regs
->outbound_doorbell
);
211 if (status
& MVIOP_MU_OUTBOUND_INT_MSG
) {
213 msg
= readl(&hba
->u
.mv
.mu
->outbound_msg
);
214 dprintk("received outbound msg %x\n", msg
);
215 hptiop_message_callback(hba
, msg
);
219 if (status
& MVIOP_MU_OUTBOUND_INT_POSTQUEUE
) {
222 while ((tag
= mv_outbound_read(hba
->u
.mv
.mu
)))
223 hptiop_request_callback_mv(hba
, tag
);
230 static void hptiop_request_callback_mvfrey(struct hptiop_hba
*hba
, u32 _tag
)
232 u32 req_type
= _tag
& 0xf;
233 struct hpt_iop_request_scsi_command
*req
;
236 case IOP_REQUEST_TYPE_GET_CONFIG
:
237 case IOP_REQUEST_TYPE_SET_CONFIG
:
241 case IOP_REQUEST_TYPE_SCSI_COMMAND
:
242 req
= hba
->reqs
[(_tag
>> 4) & 0xff].req_virt
;
243 if (likely(_tag
& IOPMU_QUEUE_REQUEST_RESULT_BIT
))
244 req
->header
.result
= IOP_RESULT_SUCCESS
;
245 hptiop_finish_scsi_req(hba
, (_tag
>> 4) & 0xff, req
);
253 static int iop_intr_mvfrey(struct hptiop_hba
*hba
)
255 u32 _tag
, status
, cptr
, cur_rptr
;
258 if (hba
->initialized
)
259 writel(0, &(hba
->u
.mvfrey
.mu
->pcie_f0_int_enable
));
261 status
= readl(&(hba
->u
.mvfrey
.mu
->f0_doorbell
));
263 writel(status
, &(hba
->u
.mvfrey
.mu
->f0_doorbell
));
264 if (status
& CPU_TO_F0_DRBL_MSG_BIT
) {
265 u32 msg
= readl(&(hba
->u
.mvfrey
.mu
->cpu_to_f0_msg_a
));
266 dprintk("received outbound msg %x\n", msg
);
267 hptiop_message_callback(hba
, msg
);
272 status
= readl(&(hba
->u
.mvfrey
.mu
->isr_cause
));
274 writel(status
, &(hba
->u
.mvfrey
.mu
->isr_cause
));
276 cptr
= *hba
->u
.mvfrey
.outlist_cptr
& 0xff;
277 cur_rptr
= hba
->u
.mvfrey
.outlist_rptr
;
278 while (cur_rptr
!= cptr
) {
280 if (cur_rptr
== hba
->u
.mvfrey
.list_count
)
283 _tag
= hba
->u
.mvfrey
.outlist
[cur_rptr
].val
;
284 BUG_ON(!(_tag
& IOPMU_QUEUE_MASK_HOST_BITS
));
285 hptiop_request_callback_mvfrey(hba
, _tag
);
288 hba
->u
.mvfrey
.outlist_rptr
= cur_rptr
;
289 } while (cptr
!= (*hba
->u
.mvfrey
.outlist_cptr
& 0xff));
292 if (hba
->initialized
)
293 writel(0x1010, &(hba
->u
.mvfrey
.mu
->pcie_f0_int_enable
));
298 static int iop_send_sync_request_itl(struct hptiop_hba
*hba
,
299 void __iomem
*_req
, u32 millisec
)
301 struct hpt_iop_request_header __iomem
*req
= _req
;
304 writel(readl(&req
->flags
) | IOP_REQUEST_FLAG_SYNC_REQUEST
, &req
->flags
);
305 writel(0, &req
->context
);
306 writel((unsigned long)req
- (unsigned long)hba
->u
.itl
.iop
,
307 &hba
->u
.itl
.iop
->inbound_queue
);
308 readl(&hba
->u
.itl
.iop
->outbound_intstatus
);
310 for (i
= 0; i
< millisec
; i
++) {
312 if (readl(&req
->context
))
320 static int iop_send_sync_request_mv(struct hptiop_hba
*hba
,
321 u32 size_bits
, u32 millisec
)
323 struct hpt_iop_request_header
*reqhdr
= hba
->u
.mv
.internal_req
;
327 reqhdr
->flags
|= cpu_to_le32(IOP_REQUEST_FLAG_SYNC_REQUEST
);
328 mv_inbound_write(hba
->u
.mv
.internal_req_phy
|
329 MVIOP_MU_QUEUE_ADDR_HOST_BIT
| size_bits
, hba
);
331 for (i
= 0; i
< millisec
; i
++) {
340 static int iop_send_sync_request_mvfrey(struct hptiop_hba
*hba
,
341 u32 size_bits
, u32 millisec
)
343 struct hpt_iop_request_header
*reqhdr
=
344 hba
->u
.mvfrey
.internal_req
.req_virt
;
348 reqhdr
->flags
|= cpu_to_le32(IOP_REQUEST_FLAG_SYNC_REQUEST
);
349 hba
->ops
->post_req(hba
, &(hba
->u
.mvfrey
.internal_req
));
351 for (i
= 0; i
< millisec
; i
++) {
352 iop_intr_mvfrey(hba
);
357 return hba
->msg_done
? 0 : -1;
360 static void hptiop_post_msg_itl(struct hptiop_hba
*hba
, u32 msg
)
362 writel(msg
, &hba
->u
.itl
.iop
->inbound_msgaddr0
);
363 readl(&hba
->u
.itl
.iop
->outbound_intstatus
);
366 static void hptiop_post_msg_mv(struct hptiop_hba
*hba
, u32 msg
)
368 writel(msg
, &hba
->u
.mv
.mu
->inbound_msg
);
369 writel(MVIOP_MU_INBOUND_INT_MSG
, &hba
->u
.mv
.regs
->inbound_doorbell
);
370 readl(&hba
->u
.mv
.regs
->inbound_doorbell
);
373 static void hptiop_post_msg_mvfrey(struct hptiop_hba
*hba
, u32 msg
)
375 writel(msg
, &(hba
->u
.mvfrey
.mu
->f0_to_cpu_msg_a
));
376 readl(&(hba
->u
.mvfrey
.mu
->f0_to_cpu_msg_a
));
379 static int iop_send_sync_msg(struct hptiop_hba
*hba
, u32 msg
, u32 millisec
)
384 hba
->ops
->disable_intr(hba
);
385 hba
->ops
->post_msg(hba
, msg
);
387 for (i
= 0; i
< millisec
; i
++) {
388 spin_lock_irq(hba
->host
->host_lock
);
389 hba
->ops
->iop_intr(hba
);
390 spin_unlock_irq(hba
->host
->host_lock
);
396 hba
->ops
->enable_intr(hba
);
397 return hba
->msg_done
? 0 : -1;
400 static int iop_get_config_itl(struct hptiop_hba
*hba
,
401 struct hpt_iop_request_get_config
*config
)
404 struct hpt_iop_request_get_config __iomem
*req
;
406 req32
= readl(&hba
->u
.itl
.iop
->inbound_queue
);
407 if (req32
== IOPMU_QUEUE_EMPTY
)
410 req
= (struct hpt_iop_request_get_config __iomem
*)
411 ((unsigned long)hba
->u
.itl
.iop
+ req32
);
413 writel(0, &req
->header
.flags
);
414 writel(IOP_REQUEST_TYPE_GET_CONFIG
, &req
->header
.type
);
415 writel(sizeof(struct hpt_iop_request_get_config
), &req
->header
.size
);
416 writel(IOP_RESULT_PENDING
, &req
->header
.result
);
418 if (iop_send_sync_request_itl(hba
, req
, 20000)) {
419 dprintk("Get config send cmd failed\n");
423 memcpy_fromio(config
, req
, sizeof(*config
));
424 writel(req32
, &hba
->u
.itl
.iop
->outbound_queue
);
428 static int iop_get_config_mv(struct hptiop_hba
*hba
,
429 struct hpt_iop_request_get_config
*config
)
431 struct hpt_iop_request_get_config
*req
= hba
->u
.mv
.internal_req
;
433 req
->header
.flags
= cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT
);
434 req
->header
.type
= cpu_to_le32(IOP_REQUEST_TYPE_GET_CONFIG
);
436 cpu_to_le32(sizeof(struct hpt_iop_request_get_config
));
437 req
->header
.result
= cpu_to_le32(IOP_RESULT_PENDING
);
438 req
->header
.context
= cpu_to_le32(IOP_REQUEST_TYPE_GET_CONFIG
<<5);
439 req
->header
.context_hi32
= 0;
441 if (iop_send_sync_request_mv(hba
, 0, 20000)) {
442 dprintk("Get config send cmd failed\n");
446 memcpy(config
, req
, sizeof(struct hpt_iop_request_get_config
));
450 static int iop_get_config_mvfrey(struct hptiop_hba
*hba
,
451 struct hpt_iop_request_get_config
*config
)
453 struct hpt_iop_request_get_config
*info
= hba
->u
.mvfrey
.config
;
455 if (info
->header
.size
!= sizeof(struct hpt_iop_request_get_config
) ||
456 info
->header
.type
!= IOP_REQUEST_TYPE_GET_CONFIG
)
459 config
->interface_version
= info
->interface_version
;
460 config
->firmware_version
= info
->firmware_version
;
461 config
->max_requests
= info
->max_requests
;
462 config
->request_size
= info
->request_size
;
463 config
->max_sg_count
= info
->max_sg_count
;
464 config
->data_transfer_length
= info
->data_transfer_length
;
465 config
->alignment_mask
= info
->alignment_mask
;
466 config
->max_devices
= info
->max_devices
;
467 config
->sdram_size
= info
->sdram_size
;
472 static int iop_set_config_itl(struct hptiop_hba
*hba
,
473 struct hpt_iop_request_set_config
*config
)
476 struct hpt_iop_request_set_config __iomem
*req
;
478 req32
= readl(&hba
->u
.itl
.iop
->inbound_queue
);
479 if (req32
== IOPMU_QUEUE_EMPTY
)
482 req
= (struct hpt_iop_request_set_config __iomem
*)
483 ((unsigned long)hba
->u
.itl
.iop
+ req32
);
485 memcpy_toio((u8 __iomem
*)req
+ sizeof(struct hpt_iop_request_header
),
486 (u8
*)config
+ sizeof(struct hpt_iop_request_header
),
487 sizeof(struct hpt_iop_request_set_config
) -
488 sizeof(struct hpt_iop_request_header
));
490 writel(0, &req
->header
.flags
);
491 writel(IOP_REQUEST_TYPE_SET_CONFIG
, &req
->header
.type
);
492 writel(sizeof(struct hpt_iop_request_set_config
), &req
->header
.size
);
493 writel(IOP_RESULT_PENDING
, &req
->header
.result
);
495 if (iop_send_sync_request_itl(hba
, req
, 20000)) {
496 dprintk("Set config send cmd failed\n");
500 writel(req32
, &hba
->u
.itl
.iop
->outbound_queue
);
504 static int iop_set_config_mv(struct hptiop_hba
*hba
,
505 struct hpt_iop_request_set_config
*config
)
507 struct hpt_iop_request_set_config
*req
= hba
->u
.mv
.internal_req
;
509 memcpy(req
, config
, sizeof(struct hpt_iop_request_set_config
));
510 req
->header
.flags
= cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT
);
511 req
->header
.type
= cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG
);
513 cpu_to_le32(sizeof(struct hpt_iop_request_set_config
));
514 req
->header
.result
= cpu_to_le32(IOP_RESULT_PENDING
);
515 req
->header
.context
= cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG
<<5);
516 req
->header
.context_hi32
= 0;
518 if (iop_send_sync_request_mv(hba
, 0, 20000)) {
519 dprintk("Set config send cmd failed\n");
526 static int iop_set_config_mvfrey(struct hptiop_hba
*hba
,
527 struct hpt_iop_request_set_config
*config
)
529 struct hpt_iop_request_set_config
*req
=
530 hba
->u
.mvfrey
.internal_req
.req_virt
;
532 memcpy(req
, config
, sizeof(struct hpt_iop_request_set_config
));
533 req
->header
.flags
= cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT
);
534 req
->header
.type
= cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG
);
536 cpu_to_le32(sizeof(struct hpt_iop_request_set_config
));
537 req
->header
.result
= cpu_to_le32(IOP_RESULT_PENDING
);
538 req
->header
.context
= cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG
<<5);
539 req
->header
.context_hi32
= 0;
541 if (iop_send_sync_request_mvfrey(hba
, 0, 20000)) {
542 dprintk("Set config send cmd failed\n");
549 static void hptiop_enable_intr_itl(struct hptiop_hba
*hba
)
551 writel(~(IOPMU_OUTBOUND_INT_POSTQUEUE
| IOPMU_OUTBOUND_INT_MSG0
),
552 &hba
->u
.itl
.iop
->outbound_intmask
);
555 static void hptiop_enable_intr_mv(struct hptiop_hba
*hba
)
557 writel(MVIOP_MU_OUTBOUND_INT_POSTQUEUE
| MVIOP_MU_OUTBOUND_INT_MSG
,
558 &hba
->u
.mv
.regs
->outbound_intmask
);
561 static void hptiop_enable_intr_mvfrey(struct hptiop_hba
*hba
)
563 writel(CPU_TO_F0_DRBL_MSG_BIT
, &(hba
->u
.mvfrey
.mu
->f0_doorbell_enable
));
564 writel(0x1, &(hba
->u
.mvfrey
.mu
->isr_enable
));
565 writel(0x1010, &(hba
->u
.mvfrey
.mu
->pcie_f0_int_enable
));
568 static int hptiop_initialize_iop(struct hptiop_hba
*hba
)
570 /* enable interrupts */
571 hba
->ops
->enable_intr(hba
);
573 hba
->initialized
= 1;
575 /* start background tasks */
576 if (iop_send_sync_msg(hba
,
577 IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK
, 5000)) {
578 printk(KERN_ERR
"scsi%d: fail to start background task\n",
585 static void __iomem
*hptiop_map_pci_bar(struct hptiop_hba
*hba
, int index
)
587 u32 mem_base_phy
, length
;
588 void __iomem
*mem_base_virt
;
590 struct pci_dev
*pcidev
= hba
->pcidev
;
593 if (!(pci_resource_flags(pcidev
, index
) & IORESOURCE_MEM
)) {
594 printk(KERN_ERR
"scsi%d: pci resource invalid\n",
599 mem_base_phy
= pci_resource_start(pcidev
, index
);
600 length
= pci_resource_len(pcidev
, index
);
601 mem_base_virt
= ioremap(mem_base_phy
, length
);
603 if (!mem_base_virt
) {
604 printk(KERN_ERR
"scsi%d: Fail to ioremap memory space\n",
608 return mem_base_virt
;
611 static int hptiop_map_pci_bar_itl(struct hptiop_hba
*hba
)
613 struct pci_dev
*pcidev
= hba
->pcidev
;
614 hba
->u
.itl
.iop
= hptiop_map_pci_bar(hba
, 0);
615 if (hba
->u
.itl
.iop
== NULL
)
617 if ((pcidev
->device
& 0xff00) == 0x4400) {
618 hba
->u
.itl
.plx
= hba
->u
.itl
.iop
;
619 hba
->u
.itl
.iop
= hptiop_map_pci_bar(hba
, 2);
620 if (hba
->u
.itl
.iop
== NULL
) {
621 iounmap(hba
->u
.itl
.plx
);
628 static void hptiop_unmap_pci_bar_itl(struct hptiop_hba
*hba
)
631 iounmap(hba
->u
.itl
.plx
);
632 iounmap(hba
->u
.itl
.iop
);
635 static int hptiop_map_pci_bar_mv(struct hptiop_hba
*hba
)
637 hba
->u
.mv
.regs
= hptiop_map_pci_bar(hba
, 0);
638 if (hba
->u
.mv
.regs
== NULL
)
641 hba
->u
.mv
.mu
= hptiop_map_pci_bar(hba
, 2);
642 if (hba
->u
.mv
.mu
== NULL
) {
643 iounmap(hba
->u
.mv
.regs
);
650 static int hptiop_map_pci_bar_mvfrey(struct hptiop_hba
*hba
)
652 hba
->u
.mvfrey
.config
= hptiop_map_pci_bar(hba
, 0);
653 if (hba
->u
.mvfrey
.config
== NULL
)
656 hba
->u
.mvfrey
.mu
= hptiop_map_pci_bar(hba
, 2);
657 if (hba
->u
.mvfrey
.mu
== NULL
) {
658 iounmap(hba
->u
.mvfrey
.config
);
665 static void hptiop_unmap_pci_bar_mv(struct hptiop_hba
*hba
)
667 iounmap(hba
->u
.mv
.regs
);
668 iounmap(hba
->u
.mv
.mu
);
671 static void hptiop_unmap_pci_bar_mvfrey(struct hptiop_hba
*hba
)
673 iounmap(hba
->u
.mvfrey
.config
);
674 iounmap(hba
->u
.mvfrey
.mu
);
677 static void hptiop_message_callback(struct hptiop_hba
*hba
, u32 msg
)
679 dprintk("iop message 0x%x\n", msg
);
681 if (msg
== IOPMU_INBOUND_MSG0_NOP
||
682 msg
== IOPMU_INBOUND_MSG0_RESET_COMM
)
685 if (!hba
->initialized
)
688 if (msg
== IOPMU_INBOUND_MSG0_RESET
) {
689 atomic_set(&hba
->resetting
, 0);
690 wake_up(&hba
->reset_wq
);
692 else if (msg
<= IOPMU_INBOUND_MSG0_MAX
)
696 static struct hptiop_request
*get_req(struct hptiop_hba
*hba
)
698 struct hptiop_request
*ret
;
700 dprintk("get_req : req=%p\n", hba
->req_list
);
704 hba
->req_list
= ret
->next
;
709 static void free_req(struct hptiop_hba
*hba
, struct hptiop_request
*req
)
711 dprintk("free_req(%d, %p)\n", req
->index
, req
);
712 req
->next
= hba
->req_list
;
716 static void hptiop_finish_scsi_req(struct hptiop_hba
*hba
, u32 tag
,
717 struct hpt_iop_request_scsi_command
*req
)
719 struct scsi_cmnd
*scp
;
721 dprintk("hptiop_finish_scsi_req: req=%p, type=%d, "
722 "result=%d, context=0x%x tag=%d\n",
723 req
, req
->header
.type
, req
->header
.result
,
724 req
->header
.context
, tag
);
726 BUG_ON(!req
->header
.result
);
727 BUG_ON(req
->header
.type
!= cpu_to_le32(IOP_REQUEST_TYPE_SCSI_COMMAND
));
729 scp
= hba
->reqs
[tag
].scp
;
731 if (HPT_SCP(scp
)->mapped
)
734 switch (le32_to_cpu(req
->header
.result
)) {
735 case IOP_RESULT_SUCCESS
:
737 scsi_bufflen(scp
) - le32_to_cpu(req
->dataxfer_length
));
738 scp
->result
= (DID_OK
<<16);
740 case IOP_RESULT_BAD_TARGET
:
741 scp
->result
= (DID_BAD_TARGET
<<16);
743 case IOP_RESULT_BUSY
:
744 scp
->result
= (DID_BUS_BUSY
<<16);
746 case IOP_RESULT_RESET
:
747 scp
->result
= (DID_RESET
<<16);
749 case IOP_RESULT_FAIL
:
750 scp
->result
= (DID_ERROR
<<16);
752 case IOP_RESULT_INVALID_REQUEST
:
753 scp
->result
= (DID_ABORT
<<16);
755 case IOP_RESULT_CHECK_CONDITION
:
757 scsi_bufflen(scp
) - le32_to_cpu(req
->dataxfer_length
));
758 scp
->result
= SAM_STAT_CHECK_CONDITION
;
759 memcpy(scp
->sense_buffer
, &req
->sg_list
, SCSI_SENSE_BUFFERSIZE
);
763 scp
->result
= DRIVER_INVALID
<< 24 | DID_ABORT
<< 16;
768 scsi_bufflen(scp
) - le32_to_cpu(req
->dataxfer_length
));
771 dprintk("scsi_done(%p)\n", scp
);
773 free_req(hba
, &hba
->reqs
[tag
]);
776 static void hptiop_host_request_callback_itl(struct hptiop_hba
*hba
, u32 _tag
)
778 struct hpt_iop_request_scsi_command
*req
;
781 if (hba
->iopintf_v2
) {
782 tag
= _tag
& ~IOPMU_QUEUE_REQUEST_RESULT_BIT
;
783 req
= hba
->reqs
[tag
].req_virt
;
784 if (likely(_tag
& IOPMU_QUEUE_REQUEST_RESULT_BIT
))
785 req
->header
.result
= cpu_to_le32(IOP_RESULT_SUCCESS
);
788 req
= hba
->reqs
[tag
].req_virt
;
791 hptiop_finish_scsi_req(hba
, tag
, req
);
794 static void hptiop_iop_request_callback_itl(struct hptiop_hba
*hba
, u32 tag
)
796 struct hpt_iop_request_header __iomem
*req
;
797 struct hpt_iop_request_ioctl_command __iomem
*p
;
798 struct hpt_ioctl_k
*arg
;
800 req
= (struct hpt_iop_request_header __iomem
*)
801 ((unsigned long)hba
->u
.itl
.iop
+ tag
);
802 dprintk("hptiop_iop_request_callback_itl: req=%p, type=%d, "
803 "result=%d, context=0x%x tag=%d\n",
804 req
, readl(&req
->type
), readl(&req
->result
),
805 readl(&req
->context
), tag
);
807 BUG_ON(!readl(&req
->result
));
808 BUG_ON(readl(&req
->type
) != IOP_REQUEST_TYPE_IOCTL_COMMAND
);
810 p
= (struct hpt_iop_request_ioctl_command __iomem
*)req
;
811 arg
= (struct hpt_ioctl_k
*)(unsigned long)
812 (readl(&req
->context
) |
813 ((u64
)readl(&req
->context_hi32
)<<32));
815 if (readl(&req
->result
) == IOP_RESULT_SUCCESS
) {
816 arg
->result
= HPT_IOCTL_RESULT_OK
;
818 if (arg
->outbuf_size
)
819 memcpy_fromio(arg
->outbuf
,
820 &p
->buf
[(readl(&p
->inbuf_size
) + 3)& ~3],
823 if (arg
->bytes_returned
)
824 *arg
->bytes_returned
= arg
->outbuf_size
;
827 arg
->result
= HPT_IOCTL_RESULT_FAILED
;
830 writel(tag
, &hba
->u
.itl
.iop
->outbound_queue
);
833 static irqreturn_t
hptiop_intr(int irq
, void *dev_id
)
835 struct hptiop_hba
*hba
= dev_id
;
839 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
840 handled
= hba
->ops
->iop_intr(hba
);
841 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
846 static int hptiop_buildsgl(struct scsi_cmnd
*scp
, struct hpt_iopsg
*psg
)
848 struct Scsi_Host
*host
= scp
->device
->host
;
849 struct hptiop_hba
*hba
= (struct hptiop_hba
*)host
->hostdata
;
850 struct scatterlist
*sg
;
853 nseg
= scsi_dma_map(scp
);
858 HPT_SCP(scp
)->sgcnt
= nseg
;
859 HPT_SCP(scp
)->mapped
= 1;
861 BUG_ON(HPT_SCP(scp
)->sgcnt
> hba
->max_sg_descriptors
);
863 scsi_for_each_sg(scp
, sg
, HPT_SCP(scp
)->sgcnt
, idx
) {
864 psg
[idx
].pci_address
= cpu_to_le64(sg_dma_address(sg
)) |
865 hba
->ops
->host_phy_flag
;
866 psg
[idx
].size
= cpu_to_le32(sg_dma_len(sg
));
867 psg
[idx
].eot
= (idx
== HPT_SCP(scp
)->sgcnt
- 1) ?
870 return HPT_SCP(scp
)->sgcnt
;
873 static void hptiop_post_req_itl(struct hptiop_hba
*hba
,
874 struct hptiop_request
*_req
)
876 struct hpt_iop_request_header
*reqhdr
= _req
->req_virt
;
878 reqhdr
->context
= cpu_to_le32(IOPMU_QUEUE_ADDR_HOST_BIT
|
880 reqhdr
->context_hi32
= 0;
882 if (hba
->iopintf_v2
) {
885 size
= le32_to_cpu(reqhdr
->size
);
887 size_bits
= IOPMU_QUEUE_REQUEST_SIZE_BIT
;
889 size_bits
= IOPMU_QUEUE_ADDR_HOST_BIT
;
891 size_bits
= IOPMU_QUEUE_REQUEST_SIZE_BIT
|
892 IOPMU_QUEUE_ADDR_HOST_BIT
;
893 writel(_req
->req_shifted_phy
| size_bits
,
894 &hba
->u
.itl
.iop
->inbound_queue
);
896 writel(_req
->req_shifted_phy
| IOPMU_QUEUE_ADDR_HOST_BIT
,
897 &hba
->u
.itl
.iop
->inbound_queue
);
900 static void hptiop_post_req_mv(struct hptiop_hba
*hba
,
901 struct hptiop_request
*_req
)
903 struct hpt_iop_request_header
*reqhdr
= _req
->req_virt
;
906 reqhdr
->context
= cpu_to_le32(_req
->index
<<8 |
907 IOP_REQUEST_TYPE_SCSI_COMMAND
<<5);
908 reqhdr
->context_hi32
= 0;
909 size
= le32_to_cpu(reqhdr
->size
);
913 else if (size
<= 256*2)
915 else if (size
<= 256*3)
920 mv_inbound_write((_req
->req_shifted_phy
<< 5) |
921 MVIOP_MU_QUEUE_ADDR_HOST_BIT
| size_bit
, hba
);
924 static void hptiop_post_req_mvfrey(struct hptiop_hba
*hba
,
925 struct hptiop_request
*_req
)
927 struct hpt_iop_request_header
*reqhdr
= _req
->req_virt
;
930 reqhdr
->flags
|= cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT
|
931 IOP_REQUEST_FLAG_ADDR_BITS
|
932 ((_req
->req_shifted_phy
>> 11) & 0xffff0000));
933 reqhdr
->context
= cpu_to_le32(IOPMU_QUEUE_ADDR_HOST_BIT
|
934 (_req
->index
<< 4) | reqhdr
->type
);
935 reqhdr
->context_hi32
= cpu_to_le32((_req
->req_shifted_phy
<< 5) &
938 hba
->u
.mvfrey
.inlist_wptr
++;
939 index
= hba
->u
.mvfrey
.inlist_wptr
& 0x3fff;
941 if (index
== hba
->u
.mvfrey
.list_count
) {
943 hba
->u
.mvfrey
.inlist_wptr
&= ~0x3fff;
944 hba
->u
.mvfrey
.inlist_wptr
^= CL_POINTER_TOGGLE
;
947 hba
->u
.mvfrey
.inlist
[index
].addr
=
948 (dma_addr_t
)_req
->req_shifted_phy
<< 5;
949 hba
->u
.mvfrey
.inlist
[index
].intrfc_len
= (reqhdr
->size
+ 3) / 4;
950 writel(hba
->u
.mvfrey
.inlist_wptr
,
951 &(hba
->u
.mvfrey
.mu
->inbound_write_ptr
));
952 readl(&(hba
->u
.mvfrey
.mu
->inbound_write_ptr
));
955 static int hptiop_reset_comm_itl(struct hptiop_hba
*hba
)
960 static int hptiop_reset_comm_mv(struct hptiop_hba
*hba
)
965 static int hptiop_reset_comm_mvfrey(struct hptiop_hba
*hba
)
967 u32 list_count
= hba
->u
.mvfrey
.list_count
;
969 if (iop_send_sync_msg(hba
, IOPMU_INBOUND_MSG0_RESET_COMM
, 3000))
972 /* wait 100ms for MCU ready */
975 writel(cpu_to_le32(hba
->u
.mvfrey
.inlist_phy
& 0xffffffff),
976 &(hba
->u
.mvfrey
.mu
->inbound_base
));
977 writel(cpu_to_le32((hba
->u
.mvfrey
.inlist_phy
>> 16) >> 16),
978 &(hba
->u
.mvfrey
.mu
->inbound_base_high
));
980 writel(cpu_to_le32(hba
->u
.mvfrey
.outlist_phy
& 0xffffffff),
981 &(hba
->u
.mvfrey
.mu
->outbound_base
));
982 writel(cpu_to_le32((hba
->u
.mvfrey
.outlist_phy
>> 16) >> 16),
983 &(hba
->u
.mvfrey
.mu
->outbound_base_high
));
985 writel(cpu_to_le32(hba
->u
.mvfrey
.outlist_cptr_phy
& 0xffffffff),
986 &(hba
->u
.mvfrey
.mu
->outbound_shadow_base
));
987 writel(cpu_to_le32((hba
->u
.mvfrey
.outlist_cptr_phy
>> 16) >> 16),
988 &(hba
->u
.mvfrey
.mu
->outbound_shadow_base_high
));
990 hba
->u
.mvfrey
.inlist_wptr
= (list_count
- 1) | CL_POINTER_TOGGLE
;
991 *hba
->u
.mvfrey
.outlist_cptr
= (list_count
- 1) | CL_POINTER_TOGGLE
;
992 hba
->u
.mvfrey
.outlist_rptr
= list_count
- 1;
996 static int hptiop_queuecommand_lck(struct scsi_cmnd
*scp
,
997 void (*done
)(struct scsi_cmnd
*))
999 struct Scsi_Host
*host
= scp
->device
->host
;
1000 struct hptiop_hba
*hba
= (struct hptiop_hba
*)host
->hostdata
;
1001 struct hpt_iop_request_scsi_command
*req
;
1003 struct hptiop_request
*_req
;
1006 scp
->scsi_done
= done
;
1008 _req
= get_req(hba
);
1010 dprintk("hptiop_queuecmd : no free req\n");
1011 return SCSI_MLQUEUE_HOST_BUSY
;
1016 dprintk("hptiop_queuecmd(scp=%p) %d/%d/%d/%llu cdb=(%08x-%08x-%08x-%08x) "
1017 "req_index=%d, req=%p\n",
1019 host
->host_no
, scp
->device
->channel
,
1020 scp
->device
->id
, scp
->device
->lun
,
1021 cpu_to_be32(((u32
*)scp
->cmnd
)[0]),
1022 cpu_to_be32(((u32
*)scp
->cmnd
)[1]),
1023 cpu_to_be32(((u32
*)scp
->cmnd
)[2]),
1024 cpu_to_be32(((u32
*)scp
->cmnd
)[3]),
1025 _req
->index
, _req
->req_virt
);
1029 if (scp
->device
->channel
||
1030 (scp
->device
->id
> hba
->max_devices
) ||
1031 ((scp
->device
->id
== (hba
->max_devices
-1)) && scp
->device
->lun
)) {
1032 scp
->result
= DID_BAD_TARGET
<< 16;
1033 free_req(hba
, _req
);
1037 req
= _req
->req_virt
;
1039 /* build S/G table */
1040 sg_count
= hptiop_buildsgl(scp
, req
->sg_list
);
1042 HPT_SCP(scp
)->mapped
= 0;
1044 req
->header
.flags
= cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT
);
1045 req
->header
.type
= cpu_to_le32(IOP_REQUEST_TYPE_SCSI_COMMAND
);
1046 req
->header
.result
= cpu_to_le32(IOP_RESULT_PENDING
);
1047 req
->dataxfer_length
= cpu_to_le32(scsi_bufflen(scp
));
1048 req
->channel
= scp
->device
->channel
;
1049 req
->target
= scp
->device
->id
;
1050 req
->lun
= scp
->device
->lun
;
1051 req
->header
.size
= cpu_to_le32(
1052 sizeof(struct hpt_iop_request_scsi_command
)
1053 - sizeof(struct hpt_iopsg
)
1054 + sg_count
* sizeof(struct hpt_iopsg
));
1056 memcpy(req
->cdb
, scp
->cmnd
, sizeof(req
->cdb
));
1057 hba
->ops
->post_req(hba
, _req
);
1061 dprintk("scsi_done(scp=%p)\n", scp
);
1062 scp
->scsi_done(scp
);
1066 static DEF_SCSI_QCMD(hptiop_queuecommand
)
1068 static const char *hptiop_info(struct Scsi_Host
*host
)
1070 return driver_name_long
;
1073 static int hptiop_reset_hba(struct hptiop_hba
*hba
)
1075 if (atomic_xchg(&hba
->resetting
, 1) == 0) {
1076 atomic_inc(&hba
->reset_count
);
1077 hba
->ops
->post_msg(hba
, IOPMU_INBOUND_MSG0_RESET
);
1080 wait_event_timeout(hba
->reset_wq
,
1081 atomic_read(&hba
->resetting
) == 0, 60 * HZ
);
1083 if (atomic_read(&hba
->resetting
)) {
1084 /* IOP is in unknown state, abort reset */
1085 printk(KERN_ERR
"scsi%d: reset failed\n", hba
->host
->host_no
);
1089 if (iop_send_sync_msg(hba
,
1090 IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK
, 5000)) {
1091 dprintk("scsi%d: fail to start background task\n",
1092 hba
->host
->host_no
);
1098 static int hptiop_reset(struct scsi_cmnd
*scp
)
1100 struct hptiop_hba
* hba
= (struct hptiop_hba
*)scp
->device
->host
->hostdata
;
1102 printk(KERN_WARNING
"hptiop_reset(%d/%d/%d)\n",
1103 scp
->device
->host
->host_no
, -1, -1);
1105 return hptiop_reset_hba(hba
)? FAILED
: SUCCESS
;
1108 static int hptiop_adjust_disk_queue_depth(struct scsi_device
*sdev
,
1111 struct hptiop_hba
*hba
= (struct hptiop_hba
*)sdev
->host
->hostdata
;
1113 if (queue_depth
> hba
->max_requests
)
1114 queue_depth
= hba
->max_requests
;
1115 return scsi_change_queue_depth(sdev
, queue_depth
);
1118 static ssize_t
hptiop_show_version(struct device
*dev
,
1119 struct device_attribute
*attr
, char *buf
)
1121 return snprintf(buf
, PAGE_SIZE
, "%s\n", driver_ver
);
1124 static ssize_t
hptiop_show_fw_version(struct device
*dev
,
1125 struct device_attribute
*attr
, char *buf
)
1127 struct Scsi_Host
*host
= class_to_shost(dev
);
1128 struct hptiop_hba
*hba
= (struct hptiop_hba
*)host
->hostdata
;
1130 return snprintf(buf
, PAGE_SIZE
, "%d.%d.%d.%d\n",
1131 hba
->firmware_version
>> 24,
1132 (hba
->firmware_version
>> 16) & 0xff,
1133 (hba
->firmware_version
>> 8) & 0xff,
1134 hba
->firmware_version
& 0xff);
1137 static struct device_attribute hptiop_attr_version
= {
1139 .name
= "driver-version",
1142 .show
= hptiop_show_version
,
1145 static struct device_attribute hptiop_attr_fw_version
= {
1147 .name
= "firmware-version",
1150 .show
= hptiop_show_fw_version
,
1153 static struct device_attribute
*hptiop_attrs
[] = {
1154 &hptiop_attr_version
,
1155 &hptiop_attr_fw_version
,
1159 static int hptiop_slave_config(struct scsi_device
*sdev
)
1161 if (sdev
->type
== TYPE_TAPE
)
1162 blk_queue_max_hw_sectors(sdev
->request_queue
, 8192);
1167 static struct scsi_host_template driver_template
= {
1168 .module
= THIS_MODULE
,
1169 .name
= driver_name
,
1170 .queuecommand
= hptiop_queuecommand
,
1171 .eh_host_reset_handler
= hptiop_reset
,
1172 .info
= hptiop_info
,
1174 .proc_name
= driver_name
,
1175 .shost_attrs
= hptiop_attrs
,
1176 .slave_configure
= hptiop_slave_config
,
1178 .change_queue_depth
= hptiop_adjust_disk_queue_depth
,
1181 static int hptiop_internal_memalloc_itl(struct hptiop_hba
*hba
)
1186 static int hptiop_internal_memalloc_mv(struct hptiop_hba
*hba
)
1188 hba
->u
.mv
.internal_req
= dma_alloc_coherent(&hba
->pcidev
->dev
,
1189 0x800, &hba
->u
.mv
.internal_req_phy
, GFP_KERNEL
);
1190 if (hba
->u
.mv
.internal_req
)
1196 static int hptiop_internal_memalloc_mvfrey(struct hptiop_hba
*hba
)
1198 u32 list_count
= readl(&hba
->u
.mvfrey
.mu
->inbound_conf_ctl
);
1202 BUG_ON(hba
->max_request_size
== 0);
1204 if (list_count
== 0) {
1211 hba
->u
.mvfrey
.list_count
= list_count
;
1212 hba
->u
.mvfrey
.internal_mem_size
= 0x800 +
1213 list_count
* sizeof(struct mvfrey_inlist_entry
) +
1214 list_count
* sizeof(struct mvfrey_outlist_entry
) +
1217 p
= dma_alloc_coherent(&hba
->pcidev
->dev
,
1218 hba
->u
.mvfrey
.internal_mem_size
, &phy
, GFP_KERNEL
);
1222 hba
->u
.mvfrey
.internal_req
.req_virt
= p
;
1223 hba
->u
.mvfrey
.internal_req
.req_shifted_phy
= phy
>> 5;
1224 hba
->u
.mvfrey
.internal_req
.scp
= NULL
;
1225 hba
->u
.mvfrey
.internal_req
.next
= NULL
;
1230 hba
->u
.mvfrey
.inlist
= (struct mvfrey_inlist_entry
*)p
;
1231 hba
->u
.mvfrey
.inlist_phy
= phy
;
1233 p
+= list_count
* sizeof(struct mvfrey_inlist_entry
);
1234 phy
+= list_count
* sizeof(struct mvfrey_inlist_entry
);
1236 hba
->u
.mvfrey
.outlist
= (struct mvfrey_outlist_entry
*)p
;
1237 hba
->u
.mvfrey
.outlist_phy
= phy
;
1239 p
+= list_count
* sizeof(struct mvfrey_outlist_entry
);
1240 phy
+= list_count
* sizeof(struct mvfrey_outlist_entry
);
1242 hba
->u
.mvfrey
.outlist_cptr
= (__le32
*)p
;
1243 hba
->u
.mvfrey
.outlist_cptr_phy
= phy
;
1248 static int hptiop_internal_memfree_itl(struct hptiop_hba
*hba
)
1253 static int hptiop_internal_memfree_mv(struct hptiop_hba
*hba
)
1255 if (hba
->u
.mv
.internal_req
) {
1256 dma_free_coherent(&hba
->pcidev
->dev
, 0x800,
1257 hba
->u
.mv
.internal_req
, hba
->u
.mv
.internal_req_phy
);
1263 static int hptiop_internal_memfree_mvfrey(struct hptiop_hba
*hba
)
1265 if (hba
->u
.mvfrey
.internal_req
.req_virt
) {
1266 dma_free_coherent(&hba
->pcidev
->dev
,
1267 hba
->u
.mvfrey
.internal_mem_size
,
1268 hba
->u
.mvfrey
.internal_req
.req_virt
,
1270 hba
->u
.mvfrey
.internal_req
.req_shifted_phy
<< 5);
1276 static int hptiop_probe(struct pci_dev
*pcidev
, const struct pci_device_id
*id
)
1278 struct Scsi_Host
*host
= NULL
;
1279 struct hptiop_hba
*hba
;
1280 struct hptiop_adapter_ops
*iop_ops
;
1281 struct hpt_iop_request_get_config iop_config
;
1282 struct hpt_iop_request_set_config set_config
;
1283 dma_addr_t start_phy
;
1285 u32 offset
, i
, req_size
;
1288 dprintk("hptiop_probe(%p)\n", pcidev
);
1290 if (pci_enable_device(pcidev
)) {
1291 printk(KERN_ERR
"hptiop: fail to enable pci device\n");
1295 printk(KERN_INFO
"adapter at PCI %d:%d:%d, IRQ %d\n",
1296 pcidev
->bus
->number
, pcidev
->devfn
>> 3, pcidev
->devfn
& 7,
1299 pci_set_master(pcidev
);
1301 /* Enable 64bit DMA if possible */
1302 iop_ops
= (struct hptiop_adapter_ops
*)id
->driver_data
;
1303 rc
= dma_set_mask(&pcidev
->dev
,
1304 DMA_BIT_MASK(iop_ops
->hw_dma_bit_mask
));
1306 rc
= dma_set_mask(&pcidev
->dev
, DMA_BIT_MASK(32));
1309 printk(KERN_ERR
"hptiop: fail to set dma_mask\n");
1310 goto disable_pci_device
;
1313 if (pci_request_regions(pcidev
, driver_name
)) {
1314 printk(KERN_ERR
"hptiop: pci_request_regions failed\n");
1315 goto disable_pci_device
;
1318 host
= scsi_host_alloc(&driver_template
, sizeof(struct hptiop_hba
));
1320 printk(KERN_ERR
"hptiop: fail to alloc scsi host\n");
1321 goto free_pci_regions
;
1324 hba
= (struct hptiop_hba
*)host
->hostdata
;
1325 memset(hba
, 0, sizeof(struct hptiop_hba
));
1328 hba
->pcidev
= pcidev
;
1330 hba
->initialized
= 0;
1331 hba
->iopintf_v2
= 0;
1333 atomic_set(&hba
->resetting
, 0);
1334 atomic_set(&hba
->reset_count
, 0);
1336 init_waitqueue_head(&hba
->reset_wq
);
1337 init_waitqueue_head(&hba
->ioctl_wq
);
1339 host
->max_lun
= 128;
1340 host
->max_channel
= 0;
1342 host
->n_io_port
= 0;
1343 host
->irq
= pcidev
->irq
;
1345 if (hba
->ops
->map_pci_bar(hba
))
1346 goto free_scsi_host
;
1348 if (hba
->ops
->iop_wait_ready(hba
, 20000)) {
1349 printk(KERN_ERR
"scsi%d: firmware not ready\n",
1350 hba
->host
->host_no
);
1354 if (hba
->ops
->family
== MV_BASED_IOP
) {
1355 if (hba
->ops
->internal_memalloc(hba
)) {
1356 printk(KERN_ERR
"scsi%d: internal_memalloc failed\n",
1357 hba
->host
->host_no
);
1362 if (hba
->ops
->get_config(hba
, &iop_config
)) {
1363 printk(KERN_ERR
"scsi%d: get config failed\n",
1364 hba
->host
->host_no
);
1368 hba
->max_requests
= min(le32_to_cpu(iop_config
.max_requests
),
1369 HPTIOP_MAX_REQUESTS
);
1370 hba
->max_devices
= le32_to_cpu(iop_config
.max_devices
);
1371 hba
->max_request_size
= le32_to_cpu(iop_config
.request_size
);
1372 hba
->max_sg_descriptors
= le32_to_cpu(iop_config
.max_sg_count
);
1373 hba
->firmware_version
= le32_to_cpu(iop_config
.firmware_version
);
1374 hba
->interface_version
= le32_to_cpu(iop_config
.interface_version
);
1375 hba
->sdram_size
= le32_to_cpu(iop_config
.sdram_size
);
1377 if (hba
->ops
->family
== MVFREY_BASED_IOP
) {
1378 if (hba
->ops
->internal_memalloc(hba
)) {
1379 printk(KERN_ERR
"scsi%d: internal_memalloc failed\n",
1380 hba
->host
->host_no
);
1383 if (hba
->ops
->reset_comm(hba
)) {
1384 printk(KERN_ERR
"scsi%d: reset comm failed\n",
1385 hba
->host
->host_no
);
1390 if (hba
->firmware_version
> 0x01020000 ||
1391 hba
->interface_version
> 0x01020000)
1392 hba
->iopintf_v2
= 1;
1394 host
->max_sectors
= le32_to_cpu(iop_config
.data_transfer_length
) >> 9;
1395 host
->max_id
= le32_to_cpu(iop_config
.max_devices
);
1396 host
->sg_tablesize
= le32_to_cpu(iop_config
.max_sg_count
);
1397 host
->can_queue
= le32_to_cpu(iop_config
.max_requests
);
1398 host
->cmd_per_lun
= le32_to_cpu(iop_config
.max_requests
);
1399 host
->max_cmd_len
= 16;
1401 req_size
= sizeof(struct hpt_iop_request_scsi_command
)
1402 + sizeof(struct hpt_iopsg
) * (hba
->max_sg_descriptors
- 1);
1403 if ((req_size
& 0x1f) != 0)
1404 req_size
= (req_size
+ 0x1f) & ~0x1f;
1406 memset(&set_config
, 0, sizeof(struct hpt_iop_request_set_config
));
1407 set_config
.iop_id
= cpu_to_le32(host
->host_no
);
1408 set_config
.vbus_id
= cpu_to_le16(host
->host_no
);
1409 set_config
.max_host_request_size
= cpu_to_le16(req_size
);
1411 if (hba
->ops
->set_config(hba
, &set_config
)) {
1412 printk(KERN_ERR
"scsi%d: set config failed\n",
1413 hba
->host
->host_no
);
1417 pci_set_drvdata(pcidev
, host
);
1419 if (request_irq(pcidev
->irq
, hptiop_intr
, IRQF_SHARED
,
1420 driver_name
, hba
)) {
1421 printk(KERN_ERR
"scsi%d: request irq %d failed\n",
1422 hba
->host
->host_no
, pcidev
->irq
);
1426 /* Allocate request mem */
1428 dprintk("req_size=%d, max_requests=%d\n", req_size
, hba
->max_requests
);
1430 hba
->req_size
= req_size
;
1431 hba
->req_list
= NULL
;
1433 for (i
= 0; i
< hba
->max_requests
; i
++) {
1434 start_virt
= dma_alloc_coherent(&pcidev
->dev
,
1435 hba
->req_size
+ 0x20,
1436 &start_phy
, GFP_KERNEL
);
1439 printk(KERN_ERR
"scsi%d: fail to alloc request mem\n",
1440 hba
->host
->host_no
);
1441 goto free_request_mem
;
1444 hba
->dma_coherent
[i
] = start_virt
;
1445 hba
->dma_coherent_handle
[i
] = start_phy
;
1447 if ((start_phy
& 0x1f) != 0) {
1448 offset
= ((start_phy
+ 0x1f) & ~0x1f) - start_phy
;
1449 start_phy
+= offset
;
1450 start_virt
+= offset
;
1453 hba
->reqs
[i
].next
= NULL
;
1454 hba
->reqs
[i
].req_virt
= start_virt
;
1455 hba
->reqs
[i
].req_shifted_phy
= start_phy
>> 5;
1456 hba
->reqs
[i
].index
= i
;
1457 free_req(hba
, &hba
->reqs
[i
]);
1460 /* Enable Interrupt and start background task */
1461 if (hptiop_initialize_iop(hba
))
1462 goto free_request_mem
;
1464 if (scsi_add_host(host
, &pcidev
->dev
)) {
1465 printk(KERN_ERR
"scsi%d: scsi_add_host failed\n",
1466 hba
->host
->host_no
);
1467 goto free_request_mem
;
1470 scsi_scan_host(host
);
1472 dprintk("scsi%d: hptiop_probe successfully\n", hba
->host
->host_no
);
1476 for (i
= 0; i
< hba
->max_requests
; i
++) {
1477 if (hba
->dma_coherent
[i
] && hba
->dma_coherent_handle
[i
])
1478 dma_free_coherent(&hba
->pcidev
->dev
,
1479 hba
->req_size
+ 0x20,
1480 hba
->dma_coherent
[i
],
1481 hba
->dma_coherent_handle
[i
]);
1486 free_irq(hba
->pcidev
->irq
, hba
);
1489 hba
->ops
->internal_memfree(hba
);
1491 hba
->ops
->unmap_pci_bar(hba
);
1494 scsi_host_put(host
);
1497 pci_release_regions(pcidev
);
1500 pci_disable_device(pcidev
);
1502 dprintk("scsi%d: hptiop_probe fail\n", host
? host
->host_no
: 0);
1506 static void hptiop_shutdown(struct pci_dev
*pcidev
)
1508 struct Scsi_Host
*host
= pci_get_drvdata(pcidev
);
1509 struct hptiop_hba
*hba
= (struct hptiop_hba
*)host
->hostdata
;
1511 dprintk("hptiop_shutdown(%p)\n", hba
);
1514 if (iop_send_sync_msg(hba
, IOPMU_INBOUND_MSG0_SHUTDOWN
, 60000))
1515 printk(KERN_ERR
"scsi%d: shutdown the iop timeout\n",
1516 hba
->host
->host_no
);
1518 /* disable all outbound interrupts */
1519 hba
->ops
->disable_intr(hba
);
1522 static void hptiop_disable_intr_itl(struct hptiop_hba
*hba
)
1526 int_mask
= readl(&hba
->u
.itl
.iop
->outbound_intmask
);
1528 IOPMU_OUTBOUND_INT_MSG0
| IOPMU_OUTBOUND_INT_POSTQUEUE
,
1529 &hba
->u
.itl
.iop
->outbound_intmask
);
1530 readl(&hba
->u
.itl
.iop
->outbound_intmask
);
1533 static void hptiop_disable_intr_mv(struct hptiop_hba
*hba
)
1535 writel(0, &hba
->u
.mv
.regs
->outbound_intmask
);
1536 readl(&hba
->u
.mv
.regs
->outbound_intmask
);
1539 static void hptiop_disable_intr_mvfrey(struct hptiop_hba
*hba
)
1541 writel(0, &(hba
->u
.mvfrey
.mu
->f0_doorbell_enable
));
1542 readl(&(hba
->u
.mvfrey
.mu
->f0_doorbell_enable
));
1543 writel(0, &(hba
->u
.mvfrey
.mu
->isr_enable
));
1544 readl(&(hba
->u
.mvfrey
.mu
->isr_enable
));
1545 writel(0, &(hba
->u
.mvfrey
.mu
->pcie_f0_int_enable
));
1546 readl(&(hba
->u
.mvfrey
.mu
->pcie_f0_int_enable
));
1549 static void hptiop_remove(struct pci_dev
*pcidev
)
1551 struct Scsi_Host
*host
= pci_get_drvdata(pcidev
);
1552 struct hptiop_hba
*hba
= (struct hptiop_hba
*)host
->hostdata
;
1555 dprintk("scsi%d: hptiop_remove\n", hba
->host
->host_no
);
1557 scsi_remove_host(host
);
1559 hptiop_shutdown(pcidev
);
1561 free_irq(hba
->pcidev
->irq
, hba
);
1563 for (i
= 0; i
< hba
->max_requests
; i
++) {
1564 if (hba
->dma_coherent
[i
] && hba
->dma_coherent_handle
[i
])
1565 dma_free_coherent(&hba
->pcidev
->dev
,
1566 hba
->req_size
+ 0x20,
1567 hba
->dma_coherent
[i
],
1568 hba
->dma_coherent_handle
[i
]);
1573 hba
->ops
->internal_memfree(hba
);
1575 hba
->ops
->unmap_pci_bar(hba
);
1577 pci_release_regions(hba
->pcidev
);
1578 pci_set_drvdata(hba
->pcidev
, NULL
);
1579 pci_disable_device(hba
->pcidev
);
1581 scsi_host_put(host
);
1584 static struct hptiop_adapter_ops hptiop_itl_ops
= {
1585 .family
= INTEL_BASED_IOP
,
1586 .iop_wait_ready
= iop_wait_ready_itl
,
1587 .internal_memalloc
= hptiop_internal_memalloc_itl
,
1588 .internal_memfree
= hptiop_internal_memfree_itl
,
1589 .map_pci_bar
= hptiop_map_pci_bar_itl
,
1590 .unmap_pci_bar
= hptiop_unmap_pci_bar_itl
,
1591 .enable_intr
= hptiop_enable_intr_itl
,
1592 .disable_intr
= hptiop_disable_intr_itl
,
1593 .get_config
= iop_get_config_itl
,
1594 .set_config
= iop_set_config_itl
,
1595 .iop_intr
= iop_intr_itl
,
1596 .post_msg
= hptiop_post_msg_itl
,
1597 .post_req
= hptiop_post_req_itl
,
1598 .hw_dma_bit_mask
= 64,
1599 .reset_comm
= hptiop_reset_comm_itl
,
1600 .host_phy_flag
= cpu_to_le64(0),
1603 static struct hptiop_adapter_ops hptiop_mv_ops
= {
1604 .family
= MV_BASED_IOP
,
1605 .iop_wait_ready
= iop_wait_ready_mv
,
1606 .internal_memalloc
= hptiop_internal_memalloc_mv
,
1607 .internal_memfree
= hptiop_internal_memfree_mv
,
1608 .map_pci_bar
= hptiop_map_pci_bar_mv
,
1609 .unmap_pci_bar
= hptiop_unmap_pci_bar_mv
,
1610 .enable_intr
= hptiop_enable_intr_mv
,
1611 .disable_intr
= hptiop_disable_intr_mv
,
1612 .get_config
= iop_get_config_mv
,
1613 .set_config
= iop_set_config_mv
,
1614 .iop_intr
= iop_intr_mv
,
1615 .post_msg
= hptiop_post_msg_mv
,
1616 .post_req
= hptiop_post_req_mv
,
1617 .hw_dma_bit_mask
= 33,
1618 .reset_comm
= hptiop_reset_comm_mv
,
1619 .host_phy_flag
= cpu_to_le64(0),
1622 static struct hptiop_adapter_ops hptiop_mvfrey_ops
= {
1623 .family
= MVFREY_BASED_IOP
,
1624 .iop_wait_ready
= iop_wait_ready_mvfrey
,
1625 .internal_memalloc
= hptiop_internal_memalloc_mvfrey
,
1626 .internal_memfree
= hptiop_internal_memfree_mvfrey
,
1627 .map_pci_bar
= hptiop_map_pci_bar_mvfrey
,
1628 .unmap_pci_bar
= hptiop_unmap_pci_bar_mvfrey
,
1629 .enable_intr
= hptiop_enable_intr_mvfrey
,
1630 .disable_intr
= hptiop_disable_intr_mvfrey
,
1631 .get_config
= iop_get_config_mvfrey
,
1632 .set_config
= iop_set_config_mvfrey
,
1633 .iop_intr
= iop_intr_mvfrey
,
1634 .post_msg
= hptiop_post_msg_mvfrey
,
1635 .post_req
= hptiop_post_req_mvfrey
,
1636 .hw_dma_bit_mask
= 64,
1637 .reset_comm
= hptiop_reset_comm_mvfrey
,
1638 .host_phy_flag
= cpu_to_le64(1),
1641 static struct pci_device_id hptiop_id_table
[] = {
1642 { PCI_VDEVICE(TTI
, 0x3220), (kernel_ulong_t
)&hptiop_itl_ops
},
1643 { PCI_VDEVICE(TTI
, 0x3320), (kernel_ulong_t
)&hptiop_itl_ops
},
1644 { PCI_VDEVICE(TTI
, 0x3410), (kernel_ulong_t
)&hptiop_itl_ops
},
1645 { PCI_VDEVICE(TTI
, 0x3510), (kernel_ulong_t
)&hptiop_itl_ops
},
1646 { PCI_VDEVICE(TTI
, 0x3511), (kernel_ulong_t
)&hptiop_itl_ops
},
1647 { PCI_VDEVICE(TTI
, 0x3520), (kernel_ulong_t
)&hptiop_itl_ops
},
1648 { PCI_VDEVICE(TTI
, 0x3521), (kernel_ulong_t
)&hptiop_itl_ops
},
1649 { PCI_VDEVICE(TTI
, 0x3522), (kernel_ulong_t
)&hptiop_itl_ops
},
1650 { PCI_VDEVICE(TTI
, 0x3530), (kernel_ulong_t
)&hptiop_itl_ops
},
1651 { PCI_VDEVICE(TTI
, 0x3540), (kernel_ulong_t
)&hptiop_itl_ops
},
1652 { PCI_VDEVICE(TTI
, 0x3560), (kernel_ulong_t
)&hptiop_itl_ops
},
1653 { PCI_VDEVICE(TTI
, 0x4210), (kernel_ulong_t
)&hptiop_itl_ops
},
1654 { PCI_VDEVICE(TTI
, 0x4211), (kernel_ulong_t
)&hptiop_itl_ops
},
1655 { PCI_VDEVICE(TTI
, 0x4310), (kernel_ulong_t
)&hptiop_itl_ops
},
1656 { PCI_VDEVICE(TTI
, 0x4311), (kernel_ulong_t
)&hptiop_itl_ops
},
1657 { PCI_VDEVICE(TTI
, 0x4320), (kernel_ulong_t
)&hptiop_itl_ops
},
1658 { PCI_VDEVICE(TTI
, 0x4321), (kernel_ulong_t
)&hptiop_itl_ops
},
1659 { PCI_VDEVICE(TTI
, 0x4322), (kernel_ulong_t
)&hptiop_itl_ops
},
1660 { PCI_VDEVICE(TTI
, 0x4400), (kernel_ulong_t
)&hptiop_itl_ops
},
1661 { PCI_VDEVICE(TTI
, 0x3120), (kernel_ulong_t
)&hptiop_mv_ops
},
1662 { PCI_VDEVICE(TTI
, 0x3122), (kernel_ulong_t
)&hptiop_mv_ops
},
1663 { PCI_VDEVICE(TTI
, 0x3020), (kernel_ulong_t
)&hptiop_mv_ops
},
1664 { PCI_VDEVICE(TTI
, 0x4520), (kernel_ulong_t
)&hptiop_mvfrey_ops
},
1665 { PCI_VDEVICE(TTI
, 0x4522), (kernel_ulong_t
)&hptiop_mvfrey_ops
},
1666 { PCI_VDEVICE(TTI
, 0x3610), (kernel_ulong_t
)&hptiop_mvfrey_ops
},
1667 { PCI_VDEVICE(TTI
, 0x3611), (kernel_ulong_t
)&hptiop_mvfrey_ops
},
1668 { PCI_VDEVICE(TTI
, 0x3620), (kernel_ulong_t
)&hptiop_mvfrey_ops
},
1669 { PCI_VDEVICE(TTI
, 0x3622), (kernel_ulong_t
)&hptiop_mvfrey_ops
},
1670 { PCI_VDEVICE(TTI
, 0x3640), (kernel_ulong_t
)&hptiop_mvfrey_ops
},
1671 { PCI_VDEVICE(TTI
, 0x3660), (kernel_ulong_t
)&hptiop_mvfrey_ops
},
1672 { PCI_VDEVICE(TTI
, 0x3680), (kernel_ulong_t
)&hptiop_mvfrey_ops
},
1673 { PCI_VDEVICE(TTI
, 0x3690), (kernel_ulong_t
)&hptiop_mvfrey_ops
},
1677 MODULE_DEVICE_TABLE(pci
, hptiop_id_table
);
1679 static struct pci_driver hptiop_pci_driver
= {
1680 .name
= driver_name
,
1681 .id_table
= hptiop_id_table
,
1682 .probe
= hptiop_probe
,
1683 .remove
= hptiop_remove
,
1684 .shutdown
= hptiop_shutdown
,
1687 static int __init
hptiop_module_init(void)
1689 printk(KERN_INFO
"%s %s\n", driver_name_long
, driver_ver
);
1690 return pci_register_driver(&hptiop_pci_driver
);
1693 static void __exit
hptiop_module_exit(void)
1695 pci_unregister_driver(&hptiop_pci_driver
);
1699 module_init(hptiop_module_init
);
1700 module_exit(hptiop_module_exit
);
1702 MODULE_LICENSE("GPL");