1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright 2011 Marvell. <jyli@marvell.com>
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/moduleparam.h>
11 #include <linux/init.h>
12 #include <linux/device.h>
13 #include <linux/pci.h>
14 #include <linux/list.h>
15 #include <linux/spinlock.h>
16 #include <linux/interrupt.h>
17 #include <linux/delay.h>
18 #include <linux/ktime.h>
19 #include <linux/blkdev.h>
21 #include <scsi/scsi.h>
22 #include <scsi/scsi_cmnd.h>
23 #include <scsi/scsi_device.h>
24 #include <scsi/scsi_host.h>
25 #include <scsi/scsi_transport.h>
26 #include <scsi/scsi_eh.h>
27 #include <linux/uaccess.h>
28 #include <linux/kthread.h>
32 MODULE_LICENSE("GPL");
33 MODULE_AUTHOR("jyli@marvell.com");
34 MODULE_DESCRIPTION("Marvell UMI Driver");
36 static const struct pci_device_id mvumi_pci_table
[] = {
37 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT
, PCI_DEVICE_ID_MARVELL_MV9143
) },
38 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT
, PCI_DEVICE_ID_MARVELL_MV9580
) },
42 MODULE_DEVICE_TABLE(pci
, mvumi_pci_table
);
44 static void tag_init(struct mvumi_tag
*st
, unsigned short size
)
47 BUG_ON(size
!= st
->size
);
49 for (i
= 0; i
< size
; i
++)
50 st
->stack
[i
] = size
- 1 - i
;
53 static unsigned short tag_get_one(struct mvumi_hba
*mhba
, struct mvumi_tag
*st
)
56 return st
->stack
[--st
->top
];
59 static void tag_release_one(struct mvumi_hba
*mhba
, struct mvumi_tag
*st
,
62 BUG_ON(st
->top
>= st
->size
);
63 st
->stack
[st
->top
++] = tag
;
66 static bool tag_is_empty(struct mvumi_tag
*st
)
74 static void mvumi_unmap_pci_addr(struct pci_dev
*dev
, void **addr_array
)
78 for (i
= 0; i
< MAX_BASE_ADDRESS
; i
++)
79 if ((pci_resource_flags(dev
, i
) & IORESOURCE_MEM
) &&
81 pci_iounmap(dev
, addr_array
[i
]);
84 static int mvumi_map_pci_addr(struct pci_dev
*dev
, void **addr_array
)
88 for (i
= 0; i
< MAX_BASE_ADDRESS
; i
++) {
89 if (pci_resource_flags(dev
, i
) & IORESOURCE_MEM
) {
90 addr_array
[i
] = pci_iomap(dev
, i
, 0);
92 dev_err(&dev
->dev
, "failed to map Bar[%d]\n",
94 mvumi_unmap_pci_addr(dev
, addr_array
);
100 dev_dbg(&dev
->dev
, "Bar %d : %p.\n", i
, addr_array
[i
]);
106 static struct mvumi_res
*mvumi_alloc_mem_resource(struct mvumi_hba
*mhba
,
107 enum resource_type type
, unsigned int size
)
109 struct mvumi_res
*res
= kzalloc(sizeof(*res
), GFP_ATOMIC
);
112 dev_err(&mhba
->pdev
->dev
,
113 "Failed to allocate memory for resource manager.\n");
118 case RESOURCE_CACHED_MEMORY
:
119 res
->virt_addr
= kzalloc(size
, GFP_ATOMIC
);
120 if (!res
->virt_addr
) {
121 dev_err(&mhba
->pdev
->dev
,
122 "unable to allocate memory,size = %d.\n", size
);
128 case RESOURCE_UNCACHED_MEMORY
:
129 size
= round_up(size
, 8);
130 res
->virt_addr
= dma_alloc_coherent(&mhba
->pdev
->dev
, size
,
133 if (!res
->virt_addr
) {
134 dev_err(&mhba
->pdev
->dev
,
135 "unable to allocate consistent mem,"
136 "size = %d.\n", size
);
143 dev_err(&mhba
->pdev
->dev
, "unknown resource type %d.\n", type
);
150 INIT_LIST_HEAD(&res
->entry
);
151 list_add_tail(&res
->entry
, &mhba
->res_list
);
156 static void mvumi_release_mem_resource(struct mvumi_hba
*mhba
)
158 struct mvumi_res
*res
, *tmp
;
160 list_for_each_entry_safe(res
, tmp
, &mhba
->res_list
, entry
) {
162 case RESOURCE_UNCACHED_MEMORY
:
163 dma_free_coherent(&mhba
->pdev
->dev
, res
->size
,
164 res
->virt_addr
, res
->bus_addr
);
166 case RESOURCE_CACHED_MEMORY
:
167 kfree(res
->virt_addr
);
170 dev_err(&mhba
->pdev
->dev
,
171 "unknown resource type %d\n", res
->type
);
174 list_del(&res
->entry
);
177 mhba
->fw_flag
&= ~MVUMI_FW_ALLOC
;
181 * mvumi_make_sgl - Prepares SGL
182 * @mhba: Adapter soft state
183 * @scmd: SCSI command from the mid-layer
184 * @sgl_p: SGL to be filled in
185 * @sg_count: return the number of SG elements
187 * If successful, this function returns 0. otherwise, it returns -1.
189 static int mvumi_make_sgl(struct mvumi_hba
*mhba
, struct scsi_cmnd
*scmd
,
190 void *sgl_p
, unsigned char *sg_count
)
192 struct scatterlist
*sg
;
193 struct mvumi_sgl
*m_sg
= (struct mvumi_sgl
*) sgl_p
;
195 unsigned int sgnum
= scsi_sg_count(scmd
);
198 *sg_count
= dma_map_sg(&mhba
->pdev
->dev
, scsi_sglist(scmd
), sgnum
,
199 scmd
->sc_data_direction
);
200 if (*sg_count
> mhba
->max_sge
) {
201 dev_err(&mhba
->pdev
->dev
,
202 "sg count[0x%x] is bigger than max sg[0x%x].\n",
203 *sg_count
, mhba
->max_sge
);
204 dma_unmap_sg(&mhba
->pdev
->dev
, scsi_sglist(scmd
), sgnum
,
205 scmd
->sc_data_direction
);
208 scsi_for_each_sg(scmd
, sg
, *sg_count
, i
) {
209 busaddr
= sg_dma_address(sg
);
210 m_sg
->baseaddr_l
= cpu_to_le32(lower_32_bits(busaddr
));
211 m_sg
->baseaddr_h
= cpu_to_le32(upper_32_bits(busaddr
));
213 sgd_setsz(mhba
, m_sg
, cpu_to_le32(sg_dma_len(sg
)));
214 if ((i
+ 1) == *sg_count
)
215 m_sg
->flags
|= 1U << mhba
->eot_flag
;
223 static int mvumi_internal_cmd_sgl(struct mvumi_hba
*mhba
, struct mvumi_cmd
*cmd
,
226 struct mvumi_sgl
*m_sg
;
233 virt_addr
= dma_alloc_coherent(&mhba
->pdev
->dev
, size
, &phy_addr
,
238 m_sg
= (struct mvumi_sgl
*) &cmd
->frame
->payload
[0];
239 cmd
->frame
->sg_counts
= 1;
240 cmd
->data_buf
= virt_addr
;
242 m_sg
->baseaddr_l
= cpu_to_le32(lower_32_bits(phy_addr
));
243 m_sg
->baseaddr_h
= cpu_to_le32(upper_32_bits(phy_addr
));
244 m_sg
->flags
= 1U << mhba
->eot_flag
;
245 sgd_setsz(mhba
, m_sg
, cpu_to_le32(size
));
250 static struct mvumi_cmd
*mvumi_create_internal_cmd(struct mvumi_hba
*mhba
,
251 unsigned int buf_size
)
253 struct mvumi_cmd
*cmd
;
255 cmd
= kzalloc(sizeof(*cmd
), GFP_KERNEL
);
257 dev_err(&mhba
->pdev
->dev
, "failed to create a internal cmd\n");
260 INIT_LIST_HEAD(&cmd
->queue_pointer
);
262 cmd
->frame
= dma_alloc_coherent(&mhba
->pdev
->dev
, mhba
->ib_max_size
,
263 &cmd
->frame_phys
, GFP_KERNEL
);
265 dev_err(&mhba
->pdev
->dev
, "failed to allocate memory for FW"
266 " frame,size = %d.\n", mhba
->ib_max_size
);
272 if (mvumi_internal_cmd_sgl(mhba
, cmd
, buf_size
)) {
273 dev_err(&mhba
->pdev
->dev
, "failed to allocate memory"
274 " for internal frame\n");
275 dma_free_coherent(&mhba
->pdev
->dev
, mhba
->ib_max_size
,
276 cmd
->frame
, cmd
->frame_phys
);
281 cmd
->frame
->sg_counts
= 0;
286 static void mvumi_delete_internal_cmd(struct mvumi_hba
*mhba
,
287 struct mvumi_cmd
*cmd
)
289 struct mvumi_sgl
*m_sg
;
293 if (cmd
&& cmd
->frame
) {
294 if (cmd
->frame
->sg_counts
) {
295 m_sg
= (struct mvumi_sgl
*) &cmd
->frame
->payload
[0];
296 sgd_getsz(mhba
, m_sg
, size
);
298 phy_addr
= (dma_addr_t
) m_sg
->baseaddr_l
|
299 (dma_addr_t
) ((m_sg
->baseaddr_h
<< 16) << 16);
301 dma_free_coherent(&mhba
->pdev
->dev
, size
, cmd
->data_buf
,
304 dma_free_coherent(&mhba
->pdev
->dev
, mhba
->ib_max_size
,
305 cmd
->frame
, cmd
->frame_phys
);
311 * mvumi_get_cmd - Get a command from the free pool
312 * @mhba: Adapter soft state
314 * Returns a free command from the pool
316 static struct mvumi_cmd
*mvumi_get_cmd(struct mvumi_hba
*mhba
)
318 struct mvumi_cmd
*cmd
= NULL
;
320 if (likely(!list_empty(&mhba
->cmd_pool
))) {
321 cmd
= list_entry((&mhba
->cmd_pool
)->next
,
322 struct mvumi_cmd
, queue_pointer
);
323 list_del_init(&cmd
->queue_pointer
);
325 dev_warn(&mhba
->pdev
->dev
, "command pool is empty!\n");
331 * mvumi_return_cmd - Return a cmd to free command pool
332 * @mhba: Adapter soft state
333 * @cmd: Command packet to be returned to free command pool
335 static inline void mvumi_return_cmd(struct mvumi_hba
*mhba
,
336 struct mvumi_cmd
*cmd
)
339 list_add_tail(&cmd
->queue_pointer
, &mhba
->cmd_pool
);
343 * mvumi_free_cmds - Free all the cmds in the free cmd pool
344 * @mhba: Adapter soft state
346 static void mvumi_free_cmds(struct mvumi_hba
*mhba
)
348 struct mvumi_cmd
*cmd
;
350 while (!list_empty(&mhba
->cmd_pool
)) {
351 cmd
= list_first_entry(&mhba
->cmd_pool
, struct mvumi_cmd
,
353 list_del(&cmd
->queue_pointer
);
354 if (!(mhba
->hba_capability
& HS_CAPABILITY_SUPPORT_DYN_SRC
))
361 * mvumi_alloc_cmds - Allocates the command packets
362 * @mhba: Adapter soft state
365 static int mvumi_alloc_cmds(struct mvumi_hba
*mhba
)
368 struct mvumi_cmd
*cmd
;
370 for (i
= 0; i
< mhba
->max_io
; i
++) {
371 cmd
= kzalloc(sizeof(*cmd
), GFP_KERNEL
);
375 INIT_LIST_HEAD(&cmd
->queue_pointer
);
376 list_add_tail(&cmd
->queue_pointer
, &mhba
->cmd_pool
);
377 if (mhba
->hba_capability
& HS_CAPABILITY_SUPPORT_DYN_SRC
) {
378 cmd
->frame
= mhba
->ib_frame
+ i
* mhba
->ib_max_size
;
379 cmd
->frame_phys
= mhba
->ib_frame_phys
380 + i
* mhba
->ib_max_size
;
382 cmd
->frame
= kzalloc(mhba
->ib_max_size
, GFP_KERNEL
);
389 dev_err(&mhba
->pdev
->dev
,
390 "failed to allocate memory for cmd[0x%x].\n", i
);
391 while (!list_empty(&mhba
->cmd_pool
)) {
392 cmd
= list_first_entry(&mhba
->cmd_pool
, struct mvumi_cmd
,
394 list_del(&cmd
->queue_pointer
);
395 if (!(mhba
->hba_capability
& HS_CAPABILITY_SUPPORT_DYN_SRC
))
402 static unsigned int mvumi_check_ib_list_9143(struct mvumi_hba
*mhba
)
404 unsigned int ib_rp_reg
;
405 struct mvumi_hw_regs
*regs
= mhba
->regs
;
407 ib_rp_reg
= ioread32(mhba
->regs
->inb_read_pointer
);
409 if (unlikely(((ib_rp_reg
& regs
->cl_slot_num_mask
) ==
410 (mhba
->ib_cur_slot
& regs
->cl_slot_num_mask
)) &&
411 ((ib_rp_reg
& regs
->cl_pointer_toggle
)
412 != (mhba
->ib_cur_slot
& regs
->cl_pointer_toggle
)))) {
413 dev_warn(&mhba
->pdev
->dev
, "no free slot to use.\n");
416 if (atomic_read(&mhba
->fw_outstanding
) >= mhba
->max_io
) {
417 dev_warn(&mhba
->pdev
->dev
, "firmware io overflow.\n");
420 return mhba
->max_io
- atomic_read(&mhba
->fw_outstanding
);
424 static unsigned int mvumi_check_ib_list_9580(struct mvumi_hba
*mhba
)
427 if (atomic_read(&mhba
->fw_outstanding
) >= (mhba
->max_io
- 1))
429 count
= ioread32(mhba
->ib_shadow
);
435 static void mvumi_get_ib_list_entry(struct mvumi_hba
*mhba
, void **ib_entry
)
437 unsigned int cur_ib_entry
;
439 cur_ib_entry
= mhba
->ib_cur_slot
& mhba
->regs
->cl_slot_num_mask
;
441 if (cur_ib_entry
>= mhba
->list_num_io
) {
442 cur_ib_entry
-= mhba
->list_num_io
;
443 mhba
->ib_cur_slot
^= mhba
->regs
->cl_pointer_toggle
;
445 mhba
->ib_cur_slot
&= ~mhba
->regs
->cl_slot_num_mask
;
446 mhba
->ib_cur_slot
|= (cur_ib_entry
& mhba
->regs
->cl_slot_num_mask
);
447 if (mhba
->hba_capability
& HS_CAPABILITY_SUPPORT_DYN_SRC
) {
448 *ib_entry
= mhba
->ib_list
+ cur_ib_entry
*
449 sizeof(struct mvumi_dyn_list_entry
);
451 *ib_entry
= mhba
->ib_list
+ cur_ib_entry
* mhba
->ib_max_size
;
453 atomic_inc(&mhba
->fw_outstanding
);
456 static void mvumi_send_ib_list_entry(struct mvumi_hba
*mhba
)
458 iowrite32(0xffff, mhba
->ib_shadow
);
459 iowrite32(mhba
->ib_cur_slot
, mhba
->regs
->inb_write_pointer
);
462 static char mvumi_check_ob_frame(struct mvumi_hba
*mhba
,
463 unsigned int cur_obf
, struct mvumi_rsp_frame
*p_outb_frame
)
465 unsigned short tag
, request_id
;
468 p_outb_frame
= mhba
->ob_list
+ cur_obf
* mhba
->ob_max_size
;
469 request_id
= p_outb_frame
->request_id
;
470 tag
= p_outb_frame
->tag
;
471 if (tag
> mhba
->tag_pool
.size
) {
472 dev_err(&mhba
->pdev
->dev
, "ob frame data error\n");
475 if (mhba
->tag_cmd
[tag
] == NULL
) {
476 dev_err(&mhba
->pdev
->dev
, "tag[0x%x] with NO command\n", tag
);
478 } else if (mhba
->tag_cmd
[tag
]->request_id
!= request_id
&&
479 mhba
->request_id_enabled
) {
480 dev_err(&mhba
->pdev
->dev
, "request ID from FW:0x%x,"
481 "cmd request ID:0x%x\n", request_id
,
482 mhba
->tag_cmd
[tag
]->request_id
);
489 static int mvumi_check_ob_list_9143(struct mvumi_hba
*mhba
,
490 unsigned int *cur_obf
, unsigned int *assign_obf_end
)
492 unsigned int ob_write
, ob_write_shadow
;
493 struct mvumi_hw_regs
*regs
= mhba
->regs
;
496 ob_write
= ioread32(regs
->outb_copy_pointer
);
497 ob_write_shadow
= ioread32(mhba
->ob_shadow
);
498 } while ((ob_write
& regs
->cl_slot_num_mask
) != ob_write_shadow
);
500 *cur_obf
= mhba
->ob_cur_slot
& mhba
->regs
->cl_slot_num_mask
;
501 *assign_obf_end
= ob_write
& mhba
->regs
->cl_slot_num_mask
;
503 if ((ob_write
& regs
->cl_pointer_toggle
) !=
504 (mhba
->ob_cur_slot
& regs
->cl_pointer_toggle
)) {
505 *assign_obf_end
+= mhba
->list_num_io
;
510 static int mvumi_check_ob_list_9580(struct mvumi_hba
*mhba
,
511 unsigned int *cur_obf
, unsigned int *assign_obf_end
)
513 unsigned int ob_write
;
514 struct mvumi_hw_regs
*regs
= mhba
->regs
;
516 ob_write
= ioread32(regs
->outb_read_pointer
);
517 ob_write
= ioread32(regs
->outb_copy_pointer
);
518 *cur_obf
= mhba
->ob_cur_slot
& mhba
->regs
->cl_slot_num_mask
;
519 *assign_obf_end
= ob_write
& mhba
->regs
->cl_slot_num_mask
;
520 if (*assign_obf_end
< *cur_obf
)
521 *assign_obf_end
+= mhba
->list_num_io
;
522 else if (*assign_obf_end
== *cur_obf
)
527 static void mvumi_receive_ob_list_entry(struct mvumi_hba
*mhba
)
529 unsigned int cur_obf
, assign_obf_end
, i
;
530 struct mvumi_ob_data
*ob_data
;
531 struct mvumi_rsp_frame
*p_outb_frame
;
532 struct mvumi_hw_regs
*regs
= mhba
->regs
;
534 if (mhba
->instancet
->check_ob_list(mhba
, &cur_obf
, &assign_obf_end
))
537 for (i
= (assign_obf_end
- cur_obf
); i
!= 0; i
--) {
539 if (cur_obf
>= mhba
->list_num_io
) {
540 cur_obf
-= mhba
->list_num_io
;
541 mhba
->ob_cur_slot
^= regs
->cl_pointer_toggle
;
544 p_outb_frame
= mhba
->ob_list
+ cur_obf
* mhba
->ob_max_size
;
546 /* Copy pointer may point to entry in outbound list
547 * before entry has valid data
549 if (unlikely(p_outb_frame
->tag
> mhba
->tag_pool
.size
||
550 mhba
->tag_cmd
[p_outb_frame
->tag
] == NULL
||
551 p_outb_frame
->request_id
!=
552 mhba
->tag_cmd
[p_outb_frame
->tag
]->request_id
))
553 if (mvumi_check_ob_frame(mhba
, cur_obf
, p_outb_frame
))
556 if (!list_empty(&mhba
->ob_data_list
)) {
557 ob_data
= (struct mvumi_ob_data
*)
558 list_first_entry(&mhba
->ob_data_list
,
559 struct mvumi_ob_data
, list
);
560 list_del_init(&ob_data
->list
);
564 cur_obf
= mhba
->list_num_io
- 1;
565 mhba
->ob_cur_slot
^= regs
->cl_pointer_toggle
;
571 memcpy(ob_data
->data
, p_outb_frame
, mhba
->ob_max_size
);
572 p_outb_frame
->tag
= 0xff;
574 list_add_tail(&ob_data
->list
, &mhba
->free_ob_list
);
576 mhba
->ob_cur_slot
&= ~regs
->cl_slot_num_mask
;
577 mhba
->ob_cur_slot
|= (cur_obf
& regs
->cl_slot_num_mask
);
578 iowrite32(mhba
->ob_cur_slot
, regs
->outb_read_pointer
);
581 static void mvumi_reset(struct mvumi_hba
*mhba
)
583 struct mvumi_hw_regs
*regs
= mhba
->regs
;
585 iowrite32(0, regs
->enpointa_mask_reg
);
586 if (ioread32(regs
->arm_to_pciea_msg1
) != HANDSHAKE_DONESTATE
)
589 iowrite32(DRBL_SOFT_RESET
, regs
->pciea_to_arm_drbl_reg
);
592 static unsigned char mvumi_start(struct mvumi_hba
*mhba
);
594 static int mvumi_wait_for_outstanding(struct mvumi_hba
*mhba
)
596 mhba
->fw_state
= FW_STATE_ABORT
;
599 if (mvumi_start(mhba
))
605 static int mvumi_wait_for_fw(struct mvumi_hba
*mhba
)
607 struct mvumi_hw_regs
*regs
= mhba
->regs
;
609 unsigned long before
;
612 iowrite32(0, regs
->enpointa_mask_reg
);
613 tmp
= ioread32(regs
->arm_to_pciea_msg1
);
614 while (tmp
!= HANDSHAKE_READYSTATE
) {
615 iowrite32(DRBL_MU_RESET
, regs
->pciea_to_arm_drbl_reg
);
616 if (time_after(jiffies
, before
+ FW_MAX_DELAY
* HZ
)) {
617 dev_err(&mhba
->pdev
->dev
,
618 "FW reset failed [0x%x].\n", tmp
);
624 tmp
= ioread32(regs
->arm_to_pciea_msg1
);
630 static void mvumi_backup_bar_addr(struct mvumi_hba
*mhba
)
634 for (i
= 0; i
< MAX_BASE_ADDRESS
; i
++) {
635 pci_read_config_dword(mhba
->pdev
, 0x10 + i
* 4,
640 static void mvumi_restore_bar_addr(struct mvumi_hba
*mhba
)
644 for (i
= 0; i
< MAX_BASE_ADDRESS
; i
++) {
645 if (mhba
->pci_base
[i
])
646 pci_write_config_dword(mhba
->pdev
, 0x10 + i
* 4,
651 static int mvumi_pci_set_master(struct pci_dev
*pdev
)
655 pci_set_master(pdev
);
658 if (dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(64)))
659 ret
= dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(32));
661 ret
= dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(32));
666 static int mvumi_reset_host_9580(struct mvumi_hba
*mhba
)
668 mhba
->fw_state
= FW_STATE_ABORT
;
670 iowrite32(0, mhba
->regs
->reset_enable
);
671 iowrite32(0xf, mhba
->regs
->reset_request
);
673 iowrite32(0x10, mhba
->regs
->reset_enable
);
674 iowrite32(0x10, mhba
->regs
->reset_request
);
676 pci_disable_device(mhba
->pdev
);
678 if (pci_enable_device(mhba
->pdev
)) {
679 dev_err(&mhba
->pdev
->dev
, "enable device failed\n");
682 if (mvumi_pci_set_master(mhba
->pdev
)) {
683 dev_err(&mhba
->pdev
->dev
, "set master failed\n");
686 mvumi_restore_bar_addr(mhba
);
687 if (mvumi_wait_for_fw(mhba
) == FAILED
)
690 return mvumi_wait_for_outstanding(mhba
);
693 static int mvumi_reset_host_9143(struct mvumi_hba
*mhba
)
695 return mvumi_wait_for_outstanding(mhba
);
698 static int mvumi_host_reset(struct scsi_cmnd
*scmd
)
700 struct mvumi_hba
*mhba
;
702 mhba
= (struct mvumi_hba
*) scmd
->device
->host
->hostdata
;
704 scmd_printk(KERN_NOTICE
, scmd
, "RESET -%u cmd=%x retries=%x\n",
705 scsi_cmd_to_rq(scmd
)->tag
, scmd
->cmnd
[0], scmd
->retries
);
707 return mhba
->instancet
->reset_host(mhba
);
710 static int mvumi_issue_blocked_cmd(struct mvumi_hba
*mhba
,
711 struct mvumi_cmd
*cmd
)
715 cmd
->cmd_status
= REQ_STATUS_PENDING
;
717 if (atomic_read(&cmd
->sync_cmd
)) {
718 dev_err(&mhba
->pdev
->dev
,
719 "last blocked cmd not finished, sync_cmd = %d\n",
720 atomic_read(&cmd
->sync_cmd
));
724 atomic_inc(&cmd
->sync_cmd
);
725 spin_lock_irqsave(mhba
->shost
->host_lock
, flags
);
726 mhba
->instancet
->fire_cmd(mhba
, cmd
);
727 spin_unlock_irqrestore(mhba
->shost
->host_lock
, flags
);
729 wait_event_timeout(mhba
->int_cmd_wait_q
,
730 (cmd
->cmd_status
!= REQ_STATUS_PENDING
),
731 MVUMI_INTERNAL_CMD_WAIT_TIME
* HZ
);
733 /* command timeout */
734 if (atomic_read(&cmd
->sync_cmd
)) {
735 spin_lock_irqsave(mhba
->shost
->host_lock
, flags
);
736 atomic_dec(&cmd
->sync_cmd
);
737 if (mhba
->tag_cmd
[cmd
->frame
->tag
]) {
738 mhba
->tag_cmd
[cmd
->frame
->tag
] = NULL
;
739 dev_warn(&mhba
->pdev
->dev
, "TIMEOUT:release tag [%d]\n",
741 tag_release_one(mhba
, &mhba
->tag_pool
, cmd
->frame
->tag
);
743 if (!list_empty(&cmd
->queue_pointer
)) {
744 dev_warn(&mhba
->pdev
->dev
,
745 "TIMEOUT:A internal command doesn't send!\n");
746 list_del_init(&cmd
->queue_pointer
);
748 atomic_dec(&mhba
->fw_outstanding
);
750 spin_unlock_irqrestore(mhba
->shost
->host_lock
, flags
);
755 static void mvumi_release_fw(struct mvumi_hba
*mhba
)
757 mvumi_free_cmds(mhba
);
758 mvumi_release_mem_resource(mhba
);
759 mvumi_unmap_pci_addr(mhba
->pdev
, mhba
->base_addr
);
760 dma_free_coherent(&mhba
->pdev
->dev
, HSP_MAX_SIZE
,
761 mhba
->handshake_page
, mhba
->handshake_page_phys
);
763 pci_release_regions(mhba
->pdev
);
766 static unsigned char mvumi_flush_cache(struct mvumi_hba
*mhba
)
768 struct mvumi_cmd
*cmd
;
769 struct mvumi_msg_frame
*frame
;
770 unsigned char device_id
, retry
= 0;
771 unsigned char bitcount
= sizeof(unsigned char) * 8;
773 for (device_id
= 0; device_id
< mhba
->max_target_id
; device_id
++) {
774 if (!(mhba
->target_map
[device_id
/ bitcount
] &
775 (1 << (device_id
% bitcount
))))
777 get_cmd
: cmd
= mvumi_create_internal_cmd(mhba
, 0);
780 dev_err(&mhba
->pdev
->dev
, "failed to get memory"
781 " for internal flush cache cmd for "
782 "device %d", device_id
);
789 cmd
->cmd_status
= REQ_STATUS_PENDING
;
790 atomic_set(&cmd
->sync_cmd
, 0);
792 frame
->req_function
= CL_FUN_SCSI_CMD
;
793 frame
->device_id
= device_id
;
794 frame
->cmd_flag
= CMD_FLAG_NON_DATA
;
795 frame
->data_transfer_length
= 0;
796 frame
->cdb_length
= MAX_COMMAND_SIZE
;
797 memset(frame
->cdb
, 0, MAX_COMMAND_SIZE
);
798 frame
->cdb
[0] = SCSI_CMD_MARVELL_SPECIFIC
;
799 frame
->cdb
[1] = CDB_CORE_MODULE
;
800 frame
->cdb
[2] = CDB_CORE_SHUTDOWN
;
802 mvumi_issue_blocked_cmd(mhba
, cmd
);
803 if (cmd
->cmd_status
!= SAM_STAT_GOOD
) {
804 dev_err(&mhba
->pdev
->dev
,
805 "device %d flush cache failed, status=0x%x.\n",
806 device_id
, cmd
->cmd_status
);
809 mvumi_delete_internal_cmd(mhba
, cmd
);
815 mvumi_calculate_checksum(struct mvumi_hs_header
*p_header
,
819 unsigned char ret
= 0, i
;
821 ptr
= (unsigned char *) p_header
->frame_content
;
822 for (i
= 0; i
< len
; i
++) {
830 static void mvumi_hs_build_page(struct mvumi_hba
*mhba
,
831 struct mvumi_hs_header
*hs_header
)
833 struct mvumi_hs_page2
*hs_page2
;
834 struct mvumi_hs_page4
*hs_page4
;
835 struct mvumi_hs_page3
*hs_page3
;
839 switch (hs_header
->page_code
) {
840 case HS_PAGE_HOST_INFO
:
841 hs_page2
= (struct mvumi_hs_page2
*) hs_header
;
842 hs_header
->frame_length
= sizeof(*hs_page2
) - 4;
843 memset(hs_header
->frame_content
, 0, hs_header
->frame_length
);
844 hs_page2
->host_type
= 3; /* 3 mean linux*/
845 if (mhba
->hba_capability
& HS_CAPABILITY_SUPPORT_DYN_SRC
)
846 hs_page2
->host_cap
= 0x08;/* host dynamic source mode */
847 hs_page2
->host_ver
.ver_major
= VER_MAJOR
;
848 hs_page2
->host_ver
.ver_minor
= VER_MINOR
;
849 hs_page2
->host_ver
.ver_oem
= VER_OEM
;
850 hs_page2
->host_ver
.ver_build
= VER_BUILD
;
851 hs_page2
->system_io_bus
= 0;
852 hs_page2
->slot_number
= 0;
853 hs_page2
->intr_level
= 0;
854 hs_page2
->intr_vector
= 0;
855 time
= ktime_get_real_seconds();
856 local_time
= (time
- (sys_tz
.tz_minuteswest
* 60));
857 hs_page2
->seconds_since1970
= local_time
;
858 hs_header
->checksum
= mvumi_calculate_checksum(hs_header
,
859 hs_header
->frame_length
);
862 case HS_PAGE_FIRM_CTL
:
863 hs_page3
= (struct mvumi_hs_page3
*) hs_header
;
864 hs_header
->frame_length
= sizeof(*hs_page3
) - 4;
865 memset(hs_header
->frame_content
, 0, hs_header
->frame_length
);
866 hs_header
->checksum
= mvumi_calculate_checksum(hs_header
,
867 hs_header
->frame_length
);
870 case HS_PAGE_CL_INFO
:
871 hs_page4
= (struct mvumi_hs_page4
*) hs_header
;
872 hs_header
->frame_length
= sizeof(*hs_page4
) - 4;
873 memset(hs_header
->frame_content
, 0, hs_header
->frame_length
);
874 hs_page4
->ib_baseaddr_l
= lower_32_bits(mhba
->ib_list_phys
);
875 hs_page4
->ib_baseaddr_h
= upper_32_bits(mhba
->ib_list_phys
);
877 hs_page4
->ob_baseaddr_l
= lower_32_bits(mhba
->ob_list_phys
);
878 hs_page4
->ob_baseaddr_h
= upper_32_bits(mhba
->ob_list_phys
);
879 hs_page4
->ib_entry_size
= mhba
->ib_max_size_setting
;
880 hs_page4
->ob_entry_size
= mhba
->ob_max_size_setting
;
881 if (mhba
->hba_capability
882 & HS_CAPABILITY_NEW_PAGE_IO_DEPTH_DEF
) {
883 hs_page4
->ob_depth
= find_first_bit((unsigned long *)
886 hs_page4
->ib_depth
= find_first_bit((unsigned long *)
890 hs_page4
->ob_depth
= (u8
) mhba
->list_num_io
;
891 hs_page4
->ib_depth
= (u8
) mhba
->list_num_io
;
893 hs_header
->checksum
= mvumi_calculate_checksum(hs_header
,
894 hs_header
->frame_length
);
898 dev_err(&mhba
->pdev
->dev
, "cannot build page, code[0x%x]\n",
899 hs_header
->page_code
);
905 * mvumi_init_data - Initialize requested date for FW
906 * @mhba: Adapter soft state
908 static int mvumi_init_data(struct mvumi_hba
*mhba
)
910 struct mvumi_ob_data
*ob_pool
;
911 struct mvumi_res
*res_mgnt
;
912 unsigned int tmp_size
, offset
, i
;
916 if (mhba
->fw_flag
& MVUMI_FW_ALLOC
)
919 tmp_size
= mhba
->ib_max_size
* mhba
->max_io
;
920 if (mhba
->hba_capability
& HS_CAPABILITY_SUPPORT_DYN_SRC
)
921 tmp_size
+= sizeof(struct mvumi_dyn_list_entry
) * mhba
->max_io
;
923 tmp_size
+= 128 + mhba
->ob_max_size
* mhba
->max_io
;
924 tmp_size
+= 8 + sizeof(u32
)*2 + 16;
926 res_mgnt
= mvumi_alloc_mem_resource(mhba
,
927 RESOURCE_UNCACHED_MEMORY
, tmp_size
);
929 dev_err(&mhba
->pdev
->dev
,
930 "failed to allocate memory for inbound list\n");
931 goto fail_alloc_dma_buf
;
934 p
= res_mgnt
->bus_addr
;
935 v
= res_mgnt
->virt_addr
;
937 offset
= round_up(p
, 128) - p
;
941 mhba
->ib_list_phys
= p
;
942 if (mhba
->hba_capability
& HS_CAPABILITY_SUPPORT_DYN_SRC
) {
943 v
+= sizeof(struct mvumi_dyn_list_entry
) * mhba
->max_io
;
944 p
+= sizeof(struct mvumi_dyn_list_entry
) * mhba
->max_io
;
946 mhba
->ib_frame_phys
= p
;
948 v
+= mhba
->ib_max_size
* mhba
->max_io
;
949 p
+= mhba
->ib_max_size
* mhba
->max_io
;
952 offset
= round_up(p
, 8) - p
;
956 mhba
->ib_shadow_phys
= p
;
960 if (mhba
->pdev
->device
== PCI_DEVICE_ID_MARVELL_MV9580
) {
961 offset
= round_up(p
, 8) - p
;
965 mhba
->ob_shadow_phys
= p
;
969 offset
= round_up(p
, 4) - p
;
973 mhba
->ob_shadow_phys
= p
;
979 offset
= round_up(p
, 128) - p
;
984 mhba
->ob_list_phys
= p
;
987 tmp_size
= mhba
->max_io
* (mhba
->ob_max_size
+ sizeof(*ob_pool
));
988 tmp_size
= round_up(tmp_size
, 8);
990 res_mgnt
= mvumi_alloc_mem_resource(mhba
,
991 RESOURCE_CACHED_MEMORY
, tmp_size
);
993 dev_err(&mhba
->pdev
->dev
,
994 "failed to allocate memory for outbound data buffer\n");
995 goto fail_alloc_dma_buf
;
997 virmem
= res_mgnt
->virt_addr
;
999 for (i
= mhba
->max_io
; i
!= 0; i
--) {
1000 ob_pool
= (struct mvumi_ob_data
*) virmem
;
1001 list_add_tail(&ob_pool
->list
, &mhba
->ob_data_list
);
1002 virmem
+= mhba
->ob_max_size
+ sizeof(*ob_pool
);
1005 tmp_size
= sizeof(unsigned short) * mhba
->max_io
+
1006 sizeof(struct mvumi_cmd
*) * mhba
->max_io
;
1007 tmp_size
+= round_up(mhba
->max_target_id
, sizeof(unsigned char) * 8) /
1008 (sizeof(unsigned char) * 8);
1010 res_mgnt
= mvumi_alloc_mem_resource(mhba
,
1011 RESOURCE_CACHED_MEMORY
, tmp_size
);
1013 dev_err(&mhba
->pdev
->dev
,
1014 "failed to allocate memory for tag and target map\n");
1015 goto fail_alloc_dma_buf
;
1018 virmem
= res_mgnt
->virt_addr
;
1019 mhba
->tag_pool
.stack
= virmem
;
1020 mhba
->tag_pool
.size
= mhba
->max_io
;
1021 tag_init(&mhba
->tag_pool
, mhba
->max_io
);
1022 virmem
+= sizeof(unsigned short) * mhba
->max_io
;
1024 mhba
->tag_cmd
= virmem
;
1025 virmem
+= sizeof(struct mvumi_cmd
*) * mhba
->max_io
;
1027 mhba
->target_map
= virmem
;
1029 mhba
->fw_flag
|= MVUMI_FW_ALLOC
;
1033 mvumi_release_mem_resource(mhba
);
1037 static int mvumi_hs_process_page(struct mvumi_hba
*mhba
,
1038 struct mvumi_hs_header
*hs_header
)
1040 struct mvumi_hs_page1
*hs_page1
;
1041 unsigned char page_checksum
;
1043 page_checksum
= mvumi_calculate_checksum(hs_header
,
1044 hs_header
->frame_length
);
1045 if (page_checksum
!= hs_header
->checksum
) {
1046 dev_err(&mhba
->pdev
->dev
, "checksum error\n");
1050 switch (hs_header
->page_code
) {
1051 case HS_PAGE_FIRM_CAP
:
1052 hs_page1
= (struct mvumi_hs_page1
*) hs_header
;
1054 mhba
->max_io
= hs_page1
->max_io_support
;
1055 mhba
->list_num_io
= hs_page1
->cl_inout_list_depth
;
1056 mhba
->max_transfer_size
= hs_page1
->max_transfer_size
;
1057 mhba
->max_target_id
= hs_page1
->max_devices_support
;
1058 mhba
->hba_capability
= hs_page1
->capability
;
1059 mhba
->ib_max_size_setting
= hs_page1
->cl_in_max_entry_size
;
1060 mhba
->ib_max_size
= (1 << hs_page1
->cl_in_max_entry_size
) << 2;
1062 mhba
->ob_max_size_setting
= hs_page1
->cl_out_max_entry_size
;
1063 mhba
->ob_max_size
= (1 << hs_page1
->cl_out_max_entry_size
) << 2;
1065 dev_dbg(&mhba
->pdev
->dev
, "FW version:%d\n",
1066 hs_page1
->fw_ver
.ver_build
);
1068 if (mhba
->hba_capability
& HS_CAPABILITY_SUPPORT_COMPACT_SG
)
1069 mhba
->eot_flag
= 22;
1071 mhba
->eot_flag
= 27;
1072 if (mhba
->hba_capability
& HS_CAPABILITY_NEW_PAGE_IO_DEPTH_DEF
)
1073 mhba
->list_num_io
= 1 << hs_page1
->cl_inout_list_depth
;
1076 dev_err(&mhba
->pdev
->dev
, "handshake: page code error\n");
1083 * mvumi_handshake - Move the FW to READY state
1084 * @mhba: Adapter soft state
1086 * During the initialization, FW passes can potentially be in any one of
1087 * several possible states. If the FW in operational, waiting-for-handshake
1088 * states, driver must take steps to bring it to ready state. Otherwise, it
1089 * has to wait for the ready state.
1091 static int mvumi_handshake(struct mvumi_hba
*mhba
)
1093 unsigned int hs_state
, tmp
, hs_fun
;
1094 struct mvumi_hs_header
*hs_header
;
1095 struct mvumi_hw_regs
*regs
= mhba
->regs
;
1097 if (mhba
->fw_state
== FW_STATE_STARTING
)
1098 hs_state
= HS_S_START
;
1100 tmp
= ioread32(regs
->arm_to_pciea_msg0
);
1101 hs_state
= HS_GET_STATE(tmp
);
1102 dev_dbg(&mhba
->pdev
->dev
, "handshake state[0x%x].\n", hs_state
);
1103 if (HS_GET_STATUS(tmp
) != HS_STATUS_OK
) {
1104 mhba
->fw_state
= FW_STATE_STARTING
;
1112 mhba
->fw_state
= FW_STATE_HANDSHAKING
;
1113 HS_SET_STATUS(hs_fun
, HS_STATUS_OK
);
1114 HS_SET_STATE(hs_fun
, HS_S_RESET
);
1115 iowrite32(HANDSHAKE_SIGNATURE
, regs
->pciea_to_arm_msg1
);
1116 iowrite32(hs_fun
, regs
->pciea_to_arm_msg0
);
1117 iowrite32(DRBL_HANDSHAKE
, regs
->pciea_to_arm_drbl_reg
);
1121 iowrite32(lower_32_bits(mhba
->handshake_page_phys
),
1122 regs
->pciea_to_arm_msg1
);
1123 iowrite32(upper_32_bits(mhba
->handshake_page_phys
),
1124 regs
->arm_to_pciea_msg1
);
1125 HS_SET_STATUS(hs_fun
, HS_STATUS_OK
);
1126 HS_SET_STATE(hs_fun
, HS_S_PAGE_ADDR
);
1127 iowrite32(hs_fun
, regs
->pciea_to_arm_msg0
);
1128 iowrite32(DRBL_HANDSHAKE
, regs
->pciea_to_arm_drbl_reg
);
1131 case HS_S_PAGE_ADDR
:
1132 case HS_S_QUERY_PAGE
:
1133 case HS_S_SEND_PAGE
:
1134 hs_header
= (struct mvumi_hs_header
*) mhba
->handshake_page
;
1135 if (hs_header
->page_code
== HS_PAGE_FIRM_CAP
) {
1136 mhba
->hba_total_pages
=
1137 ((struct mvumi_hs_page1
*) hs_header
)->total_pages
;
1139 if (mhba
->hba_total_pages
== 0)
1140 mhba
->hba_total_pages
= HS_PAGE_TOTAL
-1;
1143 if (hs_state
== HS_S_QUERY_PAGE
) {
1144 if (mvumi_hs_process_page(mhba
, hs_header
)) {
1145 HS_SET_STATE(hs_fun
, HS_S_ABORT
);
1148 if (mvumi_init_data(mhba
)) {
1149 HS_SET_STATE(hs_fun
, HS_S_ABORT
);
1152 } else if (hs_state
== HS_S_PAGE_ADDR
) {
1153 hs_header
->page_code
= 0;
1154 mhba
->hba_total_pages
= HS_PAGE_TOTAL
-1;
1157 if ((hs_header
->page_code
+ 1) <= mhba
->hba_total_pages
) {
1158 hs_header
->page_code
++;
1159 if (hs_header
->page_code
!= HS_PAGE_FIRM_CAP
) {
1160 mvumi_hs_build_page(mhba
, hs_header
);
1161 HS_SET_STATE(hs_fun
, HS_S_SEND_PAGE
);
1163 HS_SET_STATE(hs_fun
, HS_S_QUERY_PAGE
);
1165 HS_SET_STATE(hs_fun
, HS_S_END
);
1167 HS_SET_STATUS(hs_fun
, HS_STATUS_OK
);
1168 iowrite32(hs_fun
, regs
->pciea_to_arm_msg0
);
1169 iowrite32(DRBL_HANDSHAKE
, regs
->pciea_to_arm_drbl_reg
);
1173 /* Set communication list ISR */
1174 tmp
= ioread32(regs
->enpointa_mask_reg
);
1175 tmp
|= regs
->int_comaout
| regs
->int_comaerr
;
1176 iowrite32(tmp
, regs
->enpointa_mask_reg
);
1177 iowrite32(mhba
->list_num_io
, mhba
->ib_shadow
);
1178 /* Set InBound List Available count shadow */
1179 iowrite32(lower_32_bits(mhba
->ib_shadow_phys
),
1180 regs
->inb_aval_count_basel
);
1181 iowrite32(upper_32_bits(mhba
->ib_shadow_phys
),
1182 regs
->inb_aval_count_baseh
);
1184 if (mhba
->pdev
->device
== PCI_DEVICE_ID_MARVELL_MV9143
) {
1185 /* Set OutBound List Available count shadow */
1186 iowrite32((mhba
->list_num_io
-1) |
1187 regs
->cl_pointer_toggle
,
1189 iowrite32(lower_32_bits(mhba
->ob_shadow_phys
),
1190 regs
->outb_copy_basel
);
1191 iowrite32(upper_32_bits(mhba
->ob_shadow_phys
),
1192 regs
->outb_copy_baseh
);
1195 mhba
->ib_cur_slot
= (mhba
->list_num_io
- 1) |
1196 regs
->cl_pointer_toggle
;
1197 mhba
->ob_cur_slot
= (mhba
->list_num_io
- 1) |
1198 regs
->cl_pointer_toggle
;
1199 mhba
->fw_state
= FW_STATE_STARTED
;
1203 dev_err(&mhba
->pdev
->dev
, "unknown handshake state [0x%x].\n",
1210 static unsigned char mvumi_handshake_event(struct mvumi_hba
*mhba
)
1212 unsigned int isr_status
;
1213 unsigned long before
;
1216 mvumi_handshake(mhba
);
1218 isr_status
= mhba
->instancet
->read_fw_status_reg(mhba
);
1220 if (mhba
->fw_state
== FW_STATE_STARTED
)
1222 if (time_after(jiffies
, before
+ FW_MAX_DELAY
* HZ
)) {
1223 dev_err(&mhba
->pdev
->dev
,
1224 "no handshake response at state 0x%x.\n",
1226 dev_err(&mhba
->pdev
->dev
,
1227 "isr : global=0x%x,status=0x%x.\n",
1228 mhba
->global_isr
, isr_status
);
1232 usleep_range(1000, 2000);
1233 } while (!(isr_status
& DRBL_HANDSHAKE_ISR
));
1238 static unsigned char mvumi_check_handshake(struct mvumi_hba
*mhba
)
1241 unsigned long before
;
1244 tmp
= ioread32(mhba
->regs
->arm_to_pciea_msg1
);
1245 while ((tmp
!= HANDSHAKE_READYSTATE
) && (tmp
!= HANDSHAKE_DONESTATE
)) {
1246 if (tmp
!= HANDSHAKE_READYSTATE
)
1247 iowrite32(DRBL_MU_RESET
,
1248 mhba
->regs
->pciea_to_arm_drbl_reg
);
1249 if (time_after(jiffies
, before
+ FW_MAX_DELAY
* HZ
)) {
1250 dev_err(&mhba
->pdev
->dev
,
1251 "invalid signature [0x%x].\n", tmp
);
1254 usleep_range(1000, 2000);
1256 tmp
= ioread32(mhba
->regs
->arm_to_pciea_msg1
);
1259 mhba
->fw_state
= FW_STATE_STARTING
;
1260 dev_dbg(&mhba
->pdev
->dev
, "start firmware handshake...\n");
1262 if (mvumi_handshake_event(mhba
)) {
1263 dev_err(&mhba
->pdev
->dev
,
1264 "handshake failed at state 0x%x.\n",
1268 } while (mhba
->fw_state
!= FW_STATE_STARTED
);
1270 dev_dbg(&mhba
->pdev
->dev
, "firmware handshake done\n");
1275 static unsigned char mvumi_start(struct mvumi_hba
*mhba
)
1278 struct mvumi_hw_regs
*regs
= mhba
->regs
;
1280 /* clear Door bell */
1281 tmp
= ioread32(regs
->arm_to_pciea_drbl_reg
);
1282 iowrite32(tmp
, regs
->arm_to_pciea_drbl_reg
);
1284 iowrite32(regs
->int_drbl_int_mask
, regs
->arm_to_pciea_mask_reg
);
1285 tmp
= ioread32(regs
->enpointa_mask_reg
) | regs
->int_dl_cpu2pciea
;
1286 iowrite32(tmp
, regs
->enpointa_mask_reg
);
1288 if (mvumi_check_handshake(mhba
))
1295 * mvumi_complete_cmd - Completes a command
1296 * @mhba: Adapter soft state
1297 * @cmd: Command to be completed
1298 * @ob_frame: Command response
1300 static void mvumi_complete_cmd(struct mvumi_hba
*mhba
, struct mvumi_cmd
*cmd
,
1301 struct mvumi_rsp_frame
*ob_frame
)
1303 struct scsi_cmnd
*scmd
= cmd
->scmd
;
1305 mvumi_priv(cmd
->scmd
)->cmd_priv
= NULL
;
1306 scmd
->result
= ob_frame
->req_status
;
1308 switch (ob_frame
->req_status
) {
1310 scmd
->result
|= DID_OK
<< 16;
1313 scmd
->result
|= DID_BUS_BUSY
<< 16;
1315 case SAM_STAT_CHECK_CONDITION
:
1316 scmd
->result
|= (DID_OK
<< 16);
1317 if (ob_frame
->rsp_flag
& CL_RSP_FLAG_SENSEDATA
) {
1318 memcpy(cmd
->scmd
->sense_buffer
, ob_frame
->payload
,
1319 sizeof(struct mvumi_sense_data
));
1323 scmd
->result
|= (DID_ABORT
<< 16);
1327 if (scsi_bufflen(scmd
))
1328 dma_unmap_sg(&mhba
->pdev
->dev
, scsi_sglist(scmd
),
1329 scsi_sg_count(scmd
),
1330 scmd
->sc_data_direction
);
1332 mvumi_return_cmd(mhba
, cmd
);
1335 static void mvumi_complete_internal_cmd(struct mvumi_hba
*mhba
,
1336 struct mvumi_cmd
*cmd
,
1337 struct mvumi_rsp_frame
*ob_frame
)
1339 if (atomic_read(&cmd
->sync_cmd
)) {
1340 cmd
->cmd_status
= ob_frame
->req_status
;
1342 if ((ob_frame
->req_status
== SAM_STAT_CHECK_CONDITION
) &&
1343 (ob_frame
->rsp_flag
& CL_RSP_FLAG_SENSEDATA
) &&
1345 memcpy(cmd
->data_buf
, ob_frame
->payload
,
1346 sizeof(struct mvumi_sense_data
));
1348 atomic_dec(&cmd
->sync_cmd
);
1349 wake_up(&mhba
->int_cmd_wait_q
);
1353 static void mvumi_show_event(struct mvumi_hba
*mhba
,
1354 struct mvumi_driver_event
*ptr
)
1358 dev_warn(&mhba
->pdev
->dev
,
1359 "Event[0x%x] id[0x%x] severity[0x%x] device id[0x%x]\n",
1360 ptr
->sequence_no
, ptr
->event_id
, ptr
->severity
, ptr
->device_id
);
1361 if (ptr
->param_count
) {
1362 printk(KERN_WARNING
"Event param(len 0x%x): ",
1364 for (i
= 0; i
< ptr
->param_count
; i
++)
1365 printk(KERN_WARNING
"0x%x ", ptr
->params
[i
]);
1367 printk(KERN_WARNING
"\n");
1370 if (ptr
->sense_data_length
) {
1371 printk(KERN_WARNING
"Event sense data(len 0x%x): ",
1372 ptr
->sense_data_length
);
1373 for (i
= 0; i
< ptr
->sense_data_length
; i
++)
1374 printk(KERN_WARNING
"0x%x ", ptr
->sense_data
[i
]);
1375 printk(KERN_WARNING
"\n");
1379 static int mvumi_handle_hotplug(struct mvumi_hba
*mhba
, u16 devid
, int status
)
1381 struct scsi_device
*sdev
;
1384 if (status
== DEVICE_OFFLINE
) {
1385 sdev
= scsi_device_lookup(mhba
->shost
, 0, devid
, 0);
1387 dev_dbg(&mhba
->pdev
->dev
, "remove disk %d-%d-%d.\n", 0,
1389 scsi_remove_device(sdev
);
1390 scsi_device_put(sdev
);
1393 dev_err(&mhba
->pdev
->dev
, " no disk[%d] to remove\n",
1395 } else if (status
== DEVICE_ONLINE
) {
1396 sdev
= scsi_device_lookup(mhba
->shost
, 0, devid
, 0);
1398 scsi_add_device(mhba
->shost
, 0, devid
, 0);
1399 dev_dbg(&mhba
->pdev
->dev
, " add disk %d-%d-%d.\n", 0,
1403 dev_err(&mhba
->pdev
->dev
, " don't add disk %d-%d-%d.\n",
1405 scsi_device_put(sdev
);
1411 static u64
mvumi_inquiry(struct mvumi_hba
*mhba
,
1412 unsigned int id
, struct mvumi_cmd
*cmd
)
1414 struct mvumi_msg_frame
*frame
;
1417 int data_buf_len
= 64;
1420 cmd
= mvumi_create_internal_cmd(mhba
, data_buf_len
);
1426 memset(cmd
->data_buf
, 0, data_buf_len
);
1429 cmd
->cmd_status
= REQ_STATUS_PENDING
;
1430 atomic_set(&cmd
->sync_cmd
, 0);
1432 frame
->device_id
= (u16
) id
;
1433 frame
->cmd_flag
= CMD_FLAG_DATA_IN
;
1434 frame
->req_function
= CL_FUN_SCSI_CMD
;
1435 frame
->cdb_length
= 6;
1436 frame
->data_transfer_length
= MVUMI_INQUIRY_LENGTH
;
1437 memset(frame
->cdb
, 0, frame
->cdb_length
);
1438 frame
->cdb
[0] = INQUIRY
;
1439 frame
->cdb
[4] = frame
->data_transfer_length
;
1441 mvumi_issue_blocked_cmd(mhba
, cmd
);
1443 if (cmd
->cmd_status
== SAM_STAT_GOOD
) {
1444 if (mhba
->pdev
->device
== PCI_DEVICE_ID_MARVELL_MV9143
)
1447 memcpy((void *)&wwid
,
1448 (cmd
->data_buf
+ MVUMI_INQUIRY_UUID_OFF
),
1449 MVUMI_INQUIRY_UUID_LEN
);
1450 dev_dbg(&mhba
->pdev
->dev
,
1451 "inquiry device(0:%d:0) wwid(%llx)\n", id
, wwid
);
1456 mvumi_delete_internal_cmd(mhba
, cmd
);
1461 static void mvumi_detach_devices(struct mvumi_hba
*mhba
)
1463 struct mvumi_device
*mv_dev
= NULL
, *dev_next
;
1464 struct scsi_device
*sdev
= NULL
;
1466 mutex_lock(&mhba
->device_lock
);
1468 /* detach Hard Disk */
1469 list_for_each_entry_safe(mv_dev
, dev_next
,
1470 &mhba
->shost_dev_list
, list
) {
1471 mvumi_handle_hotplug(mhba
, mv_dev
->id
, DEVICE_OFFLINE
);
1472 list_del_init(&mv_dev
->list
);
1473 dev_dbg(&mhba
->pdev
->dev
, "release device(0:%d:0) wwid(%llx)\n",
1474 mv_dev
->id
, mv_dev
->wwid
);
1477 list_for_each_entry_safe(mv_dev
, dev_next
, &mhba
->mhba_dev_list
, list
) {
1478 list_del_init(&mv_dev
->list
);
1479 dev_dbg(&mhba
->pdev
->dev
, "release device(0:%d:0) wwid(%llx)\n",
1480 mv_dev
->id
, mv_dev
->wwid
);
1484 /* detach virtual device */
1485 if (mhba
->pdev
->device
== PCI_DEVICE_ID_MARVELL_MV9580
)
1486 sdev
= scsi_device_lookup(mhba
->shost
, 0,
1487 mhba
->max_target_id
- 1, 0);
1490 scsi_remove_device(sdev
);
1491 scsi_device_put(sdev
);
1494 mutex_unlock(&mhba
->device_lock
);
1497 static void mvumi_rescan_devices(struct mvumi_hba
*mhba
, int id
)
1499 struct scsi_device
*sdev
;
1501 sdev
= scsi_device_lookup(mhba
->shost
, 0, id
, 0);
1503 scsi_rescan_device(sdev
);
1504 scsi_device_put(sdev
);
1508 static int mvumi_match_devices(struct mvumi_hba
*mhba
, int id
, u64 wwid
)
1510 struct mvumi_device
*mv_dev
= NULL
;
1512 list_for_each_entry(mv_dev
, &mhba
->shost_dev_list
, list
) {
1513 if (mv_dev
->wwid
== wwid
) {
1514 if (mv_dev
->id
!= id
) {
1515 dev_err(&mhba
->pdev
->dev
,
1516 "%s has same wwid[%llx] ,"
1517 " but different id[%d %d]\n",
1518 __func__
, mv_dev
->wwid
, mv_dev
->id
, id
);
1521 if (mhba
->pdev
->device
==
1522 PCI_DEVICE_ID_MARVELL_MV9143
)
1523 mvumi_rescan_devices(mhba
, id
);
1531 static void mvumi_remove_devices(struct mvumi_hba
*mhba
, int id
)
1533 struct mvumi_device
*mv_dev
= NULL
, *dev_next
;
1535 list_for_each_entry_safe(mv_dev
, dev_next
,
1536 &mhba
->shost_dev_list
, list
) {
1537 if (mv_dev
->id
== id
) {
1538 dev_dbg(&mhba
->pdev
->dev
,
1539 "detach device(0:%d:0) wwid(%llx) from HOST\n",
1540 mv_dev
->id
, mv_dev
->wwid
);
1541 mvumi_handle_hotplug(mhba
, mv_dev
->id
, DEVICE_OFFLINE
);
1542 list_del_init(&mv_dev
->list
);
1548 static int mvumi_probe_devices(struct mvumi_hba
*mhba
)
1552 struct mvumi_device
*mv_dev
= NULL
;
1553 struct mvumi_cmd
*cmd
= NULL
;
1556 cmd
= mvumi_create_internal_cmd(mhba
, 64);
1560 if (mhba
->pdev
->device
== PCI_DEVICE_ID_MARVELL_MV9143
)
1561 maxid
= mhba
->max_target_id
;
1563 maxid
= mhba
->max_target_id
- 1;
1565 for (id
= 0; id
< maxid
; id
++) {
1566 wwid
= mvumi_inquiry(mhba
, id
, cmd
);
1568 /* device no response, remove it */
1569 mvumi_remove_devices(mhba
, id
);
1571 /* device response, add it */
1572 found
= mvumi_match_devices(mhba
, id
, wwid
);
1574 mvumi_remove_devices(mhba
, id
);
1575 mv_dev
= kzalloc(sizeof(struct mvumi_device
),
1578 dev_err(&mhba
->pdev
->dev
,
1579 "%s alloc mv_dev failed\n",
1584 mv_dev
->wwid
= wwid
;
1585 mv_dev
->sdev
= NULL
;
1586 INIT_LIST_HEAD(&mv_dev
->list
);
1587 list_add_tail(&mv_dev
->list
,
1588 &mhba
->mhba_dev_list
);
1589 dev_dbg(&mhba
->pdev
->dev
,
1590 "probe a new device(0:%d:0)"
1591 " wwid(%llx)\n", id
, mv_dev
->wwid
);
1592 } else if (found
== -1)
1600 mvumi_delete_internal_cmd(mhba
, cmd
);
1605 static int mvumi_rescan_bus(void *data
)
1608 struct mvumi_hba
*mhba
= (struct mvumi_hba
*) data
;
1609 struct mvumi_device
*mv_dev
= NULL
, *dev_next
;
1611 while (!kthread_should_stop()) {
1613 set_current_state(TASK_INTERRUPTIBLE
);
1614 if (!atomic_read(&mhba
->pnp_count
))
1617 atomic_set(&mhba
->pnp_count
, 0);
1618 __set_current_state(TASK_RUNNING
);
1620 mutex_lock(&mhba
->device_lock
);
1621 ret
= mvumi_probe_devices(mhba
);
1623 list_for_each_entry_safe(mv_dev
, dev_next
,
1624 &mhba
->mhba_dev_list
, list
) {
1625 if (mvumi_handle_hotplug(mhba
, mv_dev
->id
,
1627 dev_err(&mhba
->pdev
->dev
,
1628 "%s add device(0:%d:0) failed"
1629 "wwid(%llx) has exist\n",
1631 mv_dev
->id
, mv_dev
->wwid
);
1632 list_del_init(&mv_dev
->list
);
1635 list_move_tail(&mv_dev
->list
,
1636 &mhba
->shost_dev_list
);
1640 mutex_unlock(&mhba
->device_lock
);
1645 static void mvumi_proc_msg(struct mvumi_hba
*mhba
,
1646 struct mvumi_hotplug_event
*param
)
1648 u16 size
= param
->size
;
1649 const unsigned long *ar_bitmap
;
1650 const unsigned long *re_bitmap
;
1653 if (mhba
->fw_flag
& MVUMI_FW_ATTACH
) {
1655 ar_bitmap
= (const unsigned long *) param
->bitmap
;
1656 re_bitmap
= (const unsigned long *) ¶m
->bitmap
[size
>> 3];
1658 mutex_lock(&mhba
->sas_discovery_mutex
);
1660 index
= find_next_zero_bit(ar_bitmap
, size
, index
+ 1);
1663 mvumi_handle_hotplug(mhba
, index
, DEVICE_ONLINE
);
1668 index
= find_next_zero_bit(re_bitmap
, size
, index
+ 1);
1671 mvumi_handle_hotplug(mhba
, index
, DEVICE_OFFLINE
);
1673 mutex_unlock(&mhba
->sas_discovery_mutex
);
1677 static void mvumi_notification(struct mvumi_hba
*mhba
, u8 msg
, void *buffer
)
1679 if (msg
== APICDB1_EVENT_GETEVENT
) {
1681 struct mvumi_driver_event
*param
= NULL
;
1682 struct mvumi_event_req
*er
= buffer
;
1684 if (count
> MAX_EVENTS_RETURNED
) {
1685 dev_err(&mhba
->pdev
->dev
, "event count[0x%x] is bigger"
1686 " than max event count[0x%x].\n",
1687 count
, MAX_EVENTS_RETURNED
);
1690 for (i
= 0; i
< count
; i
++) {
1691 param
= &er
->events
[i
];
1692 mvumi_show_event(mhba
, param
);
1694 } else if (msg
== APICDB1_HOST_GETEVENT
) {
1695 mvumi_proc_msg(mhba
, buffer
);
1699 static int mvumi_get_event(struct mvumi_hba
*mhba
, unsigned char msg
)
1701 struct mvumi_cmd
*cmd
;
1702 struct mvumi_msg_frame
*frame
;
1704 cmd
= mvumi_create_internal_cmd(mhba
, 512);
1708 cmd
->cmd_status
= REQ_STATUS_PENDING
;
1709 atomic_set(&cmd
->sync_cmd
, 0);
1711 frame
->device_id
= 0;
1712 frame
->cmd_flag
= CMD_FLAG_DATA_IN
;
1713 frame
->req_function
= CL_FUN_SCSI_CMD
;
1714 frame
->cdb_length
= MAX_COMMAND_SIZE
;
1715 frame
->data_transfer_length
= sizeof(struct mvumi_event_req
);
1716 memset(frame
->cdb
, 0, MAX_COMMAND_SIZE
);
1717 frame
->cdb
[0] = APICDB0_EVENT
;
1718 frame
->cdb
[1] = msg
;
1719 mvumi_issue_blocked_cmd(mhba
, cmd
);
1721 if (cmd
->cmd_status
!= SAM_STAT_GOOD
)
1722 dev_err(&mhba
->pdev
->dev
, "get event failed, status=0x%x.\n",
1725 mvumi_notification(mhba
, cmd
->frame
->cdb
[1], cmd
->data_buf
);
1727 mvumi_delete_internal_cmd(mhba
, cmd
);
1731 static void mvumi_scan_events(struct work_struct
*work
)
1733 struct mvumi_events_wq
*mu_ev
=
1734 container_of(work
, struct mvumi_events_wq
, work_q
);
1736 mvumi_get_event(mu_ev
->mhba
, mu_ev
->event
);
1740 static void mvumi_launch_events(struct mvumi_hba
*mhba
, u32 isr_status
)
1742 struct mvumi_events_wq
*mu_ev
;
1744 while (isr_status
& (DRBL_BUS_CHANGE
| DRBL_EVENT_NOTIFY
)) {
1745 if (isr_status
& DRBL_BUS_CHANGE
) {
1746 atomic_inc(&mhba
->pnp_count
);
1747 wake_up_process(mhba
->dm_thread
);
1748 isr_status
&= ~(DRBL_BUS_CHANGE
);
1752 mu_ev
= kzalloc(sizeof(*mu_ev
), GFP_ATOMIC
);
1754 INIT_WORK(&mu_ev
->work_q
, mvumi_scan_events
);
1756 mu_ev
->event
= APICDB1_EVENT_GETEVENT
;
1757 isr_status
&= ~(DRBL_EVENT_NOTIFY
);
1758 mu_ev
->param
= NULL
;
1759 schedule_work(&mu_ev
->work_q
);
1764 static void mvumi_handle_clob(struct mvumi_hba
*mhba
)
1766 struct mvumi_rsp_frame
*ob_frame
;
1767 struct mvumi_cmd
*cmd
;
1768 struct mvumi_ob_data
*pool
;
1770 while (!list_empty(&mhba
->free_ob_list
)) {
1771 pool
= list_first_entry(&mhba
->free_ob_list
,
1772 struct mvumi_ob_data
, list
);
1773 list_del_init(&pool
->list
);
1774 list_add_tail(&pool
->list
, &mhba
->ob_data_list
);
1776 ob_frame
= (struct mvumi_rsp_frame
*) &pool
->data
[0];
1777 cmd
= mhba
->tag_cmd
[ob_frame
->tag
];
1779 atomic_dec(&mhba
->fw_outstanding
);
1780 mhba
->tag_cmd
[ob_frame
->tag
] = NULL
;
1781 tag_release_one(mhba
, &mhba
->tag_pool
, ob_frame
->tag
);
1783 mvumi_complete_cmd(mhba
, cmd
, ob_frame
);
1785 mvumi_complete_internal_cmd(mhba
, cmd
, ob_frame
);
1787 mhba
->instancet
->fire_cmd(mhba
, NULL
);
1790 static irqreturn_t
mvumi_isr_handler(int irq
, void *devp
)
1792 struct mvumi_hba
*mhba
= (struct mvumi_hba
*) devp
;
1793 unsigned long flags
;
1795 spin_lock_irqsave(mhba
->shost
->host_lock
, flags
);
1796 if (unlikely(mhba
->instancet
->clear_intr(mhba
) || !mhba
->global_isr
)) {
1797 spin_unlock_irqrestore(mhba
->shost
->host_lock
, flags
);
1801 if (mhba
->global_isr
& mhba
->regs
->int_dl_cpu2pciea
) {
1802 if (mhba
->isr_status
& (DRBL_BUS_CHANGE
| DRBL_EVENT_NOTIFY
))
1803 mvumi_launch_events(mhba
, mhba
->isr_status
);
1804 if (mhba
->isr_status
& DRBL_HANDSHAKE_ISR
) {
1805 dev_warn(&mhba
->pdev
->dev
, "enter handshake again!\n");
1806 mvumi_handshake(mhba
);
1811 if (mhba
->global_isr
& mhba
->regs
->int_comaout
)
1812 mvumi_receive_ob_list_entry(mhba
);
1814 mhba
->global_isr
= 0;
1815 mhba
->isr_status
= 0;
1816 if (mhba
->fw_state
== FW_STATE_STARTED
)
1817 mvumi_handle_clob(mhba
);
1818 spin_unlock_irqrestore(mhba
->shost
->host_lock
, flags
);
1822 static enum mvumi_qc_result
mvumi_send_command(struct mvumi_hba
*mhba
,
1823 struct mvumi_cmd
*cmd
)
1826 struct mvumi_msg_frame
*ib_frame
;
1827 unsigned int frame_len
;
1829 ib_frame
= cmd
->frame
;
1830 if (unlikely(mhba
->fw_state
!= FW_STATE_STARTED
)) {
1831 dev_dbg(&mhba
->pdev
->dev
, "firmware not ready.\n");
1832 return MV_QUEUE_COMMAND_RESULT_NO_RESOURCE
;
1834 if (tag_is_empty(&mhba
->tag_pool
)) {
1835 dev_dbg(&mhba
->pdev
->dev
, "no free tag.\n");
1836 return MV_QUEUE_COMMAND_RESULT_NO_RESOURCE
;
1838 mvumi_get_ib_list_entry(mhba
, &ib_entry
);
1840 cmd
->frame
->tag
= tag_get_one(mhba
, &mhba
->tag_pool
);
1841 cmd
->frame
->request_id
= mhba
->io_seq
++;
1842 cmd
->request_id
= cmd
->frame
->request_id
;
1843 mhba
->tag_cmd
[cmd
->frame
->tag
] = cmd
;
1844 frame_len
= sizeof(*ib_frame
) +
1845 ib_frame
->sg_counts
* sizeof(struct mvumi_sgl
);
1846 if (mhba
->hba_capability
& HS_CAPABILITY_SUPPORT_DYN_SRC
) {
1847 struct mvumi_dyn_list_entry
*dle
;
1850 cpu_to_le32(lower_32_bits(cmd
->frame_phys
));
1851 dle
->src_high_addr
=
1852 cpu_to_le32(upper_32_bits(cmd
->frame_phys
));
1853 dle
->if_length
= (frame_len
>> 2) & 0xFFF;
1855 memcpy(ib_entry
, ib_frame
, frame_len
);
1857 return MV_QUEUE_COMMAND_RESULT_SENT
;
1860 static void mvumi_fire_cmd(struct mvumi_hba
*mhba
, struct mvumi_cmd
*cmd
)
1862 unsigned short num_of_cl_sent
= 0;
1864 enum mvumi_qc_result result
;
1867 list_add_tail(&cmd
->queue_pointer
, &mhba
->waiting_req_list
);
1868 count
= mhba
->instancet
->check_ib_list(mhba
);
1869 if (list_empty(&mhba
->waiting_req_list
) || !count
)
1873 cmd
= list_first_entry(&mhba
->waiting_req_list
,
1874 struct mvumi_cmd
, queue_pointer
);
1875 list_del_init(&cmd
->queue_pointer
);
1876 result
= mvumi_send_command(mhba
, cmd
);
1878 case MV_QUEUE_COMMAND_RESULT_SENT
:
1881 case MV_QUEUE_COMMAND_RESULT_NO_RESOURCE
:
1882 list_add(&cmd
->queue_pointer
, &mhba
->waiting_req_list
);
1883 if (num_of_cl_sent
> 0)
1884 mvumi_send_ib_list_entry(mhba
);
1888 } while (!list_empty(&mhba
->waiting_req_list
) && count
--);
1890 if (num_of_cl_sent
> 0)
1891 mvumi_send_ib_list_entry(mhba
);
1895 * mvumi_enable_intr - Enables interrupts
1896 * @mhba: Adapter soft state
1898 static void mvumi_enable_intr(struct mvumi_hba
*mhba
)
1901 struct mvumi_hw_regs
*regs
= mhba
->regs
;
1903 iowrite32(regs
->int_drbl_int_mask
, regs
->arm_to_pciea_mask_reg
);
1904 mask
= ioread32(regs
->enpointa_mask_reg
);
1905 mask
|= regs
->int_dl_cpu2pciea
| regs
->int_comaout
| regs
->int_comaerr
;
1906 iowrite32(mask
, regs
->enpointa_mask_reg
);
1910 * mvumi_disable_intr -Disables interrupt
1911 * @mhba: Adapter soft state
1913 static void mvumi_disable_intr(struct mvumi_hba
*mhba
)
1916 struct mvumi_hw_regs
*regs
= mhba
->regs
;
1918 iowrite32(0, regs
->arm_to_pciea_mask_reg
);
1919 mask
= ioread32(regs
->enpointa_mask_reg
);
1920 mask
&= ~(regs
->int_dl_cpu2pciea
| regs
->int_comaout
|
1922 iowrite32(mask
, regs
->enpointa_mask_reg
);
1925 static int mvumi_clear_intr(void *extend
)
1927 struct mvumi_hba
*mhba
= (struct mvumi_hba
*) extend
;
1928 unsigned int status
, isr_status
= 0, tmp
= 0;
1929 struct mvumi_hw_regs
*regs
= mhba
->regs
;
1931 status
= ioread32(regs
->main_int_cause_reg
);
1932 if (!(status
& regs
->int_mu
) || status
== 0xFFFFFFFF)
1934 if (unlikely(status
& regs
->int_comaerr
)) {
1935 tmp
= ioread32(regs
->outb_isr_cause
);
1936 if (mhba
->pdev
->device
== PCI_DEVICE_ID_MARVELL_MV9580
) {
1937 if (tmp
& regs
->clic_out_err
) {
1938 iowrite32(tmp
& regs
->clic_out_err
,
1939 regs
->outb_isr_cause
);
1942 if (tmp
& (regs
->clic_in_err
| regs
->clic_out_err
))
1943 iowrite32(tmp
& (regs
->clic_in_err
|
1944 regs
->clic_out_err
),
1945 regs
->outb_isr_cause
);
1947 status
^= mhba
->regs
->int_comaerr
;
1948 /* inbound or outbound parity error, command will timeout */
1950 if (status
& regs
->int_comaout
) {
1951 tmp
= ioread32(regs
->outb_isr_cause
);
1952 if (tmp
& regs
->clic_irq
)
1953 iowrite32(tmp
& regs
->clic_irq
, regs
->outb_isr_cause
);
1955 if (status
& regs
->int_dl_cpu2pciea
) {
1956 isr_status
= ioread32(regs
->arm_to_pciea_drbl_reg
);
1958 iowrite32(isr_status
, regs
->arm_to_pciea_drbl_reg
);
1961 mhba
->global_isr
= status
;
1962 mhba
->isr_status
= isr_status
;
1968 * mvumi_read_fw_status_reg - returns the current FW status value
1969 * @mhba: Adapter soft state
1971 static unsigned int mvumi_read_fw_status_reg(struct mvumi_hba
*mhba
)
1973 unsigned int status
;
1975 status
= ioread32(mhba
->regs
->arm_to_pciea_drbl_reg
);
1977 iowrite32(status
, mhba
->regs
->arm_to_pciea_drbl_reg
);
1981 static struct mvumi_instance_template mvumi_instance_9143
= {
1982 .fire_cmd
= mvumi_fire_cmd
,
1983 .enable_intr
= mvumi_enable_intr
,
1984 .disable_intr
= mvumi_disable_intr
,
1985 .clear_intr
= mvumi_clear_intr
,
1986 .read_fw_status_reg
= mvumi_read_fw_status_reg
,
1987 .check_ib_list
= mvumi_check_ib_list_9143
,
1988 .check_ob_list
= mvumi_check_ob_list_9143
,
1989 .reset_host
= mvumi_reset_host_9143
,
1992 static struct mvumi_instance_template mvumi_instance_9580
= {
1993 .fire_cmd
= mvumi_fire_cmd
,
1994 .enable_intr
= mvumi_enable_intr
,
1995 .disable_intr
= mvumi_disable_intr
,
1996 .clear_intr
= mvumi_clear_intr
,
1997 .read_fw_status_reg
= mvumi_read_fw_status_reg
,
1998 .check_ib_list
= mvumi_check_ib_list_9580
,
1999 .check_ob_list
= mvumi_check_ob_list_9580
,
2000 .reset_host
= mvumi_reset_host_9580
,
2003 static int mvumi_slave_configure(struct scsi_device
*sdev
)
2005 struct mvumi_hba
*mhba
;
2006 unsigned char bitcount
= sizeof(unsigned char) * 8;
2008 mhba
= (struct mvumi_hba
*) sdev
->host
->hostdata
;
2009 if (sdev
->id
>= mhba
->max_target_id
)
2012 mhba
->target_map
[sdev
->id
/ bitcount
] |= (1 << (sdev
->id
% bitcount
));
2017 * mvumi_build_frame - Prepares a direct cdb (DCDB) command
2018 * @mhba: Adapter soft state
2019 * @scmd: SCSI command
2020 * @cmd: Command to be prepared in
2022 * This function prepares CDB commands. These are typcially pass-through
2023 * commands to the devices.
2025 static unsigned char mvumi_build_frame(struct mvumi_hba
*mhba
,
2026 struct scsi_cmnd
*scmd
, struct mvumi_cmd
*cmd
)
2028 struct mvumi_msg_frame
*pframe
;
2031 cmd
->cmd_status
= REQ_STATUS_PENDING
;
2032 pframe
= cmd
->frame
;
2033 pframe
->device_id
= ((unsigned short) scmd
->device
->id
) |
2034 (((unsigned short) scmd
->device
->lun
) << 8);
2035 pframe
->cmd_flag
= 0;
2037 switch (scmd
->sc_data_direction
) {
2039 pframe
->cmd_flag
|= CMD_FLAG_NON_DATA
;
2041 case DMA_FROM_DEVICE
:
2042 pframe
->cmd_flag
|= CMD_FLAG_DATA_IN
;
2045 pframe
->cmd_flag
|= CMD_FLAG_DATA_OUT
;
2047 case DMA_BIDIRECTIONAL
:
2049 dev_warn(&mhba
->pdev
->dev
, "unexpected data direction[%d] "
2050 "cmd[0x%x]\n", scmd
->sc_data_direction
, scmd
->cmnd
[0]);
2054 pframe
->cdb_length
= scmd
->cmd_len
;
2055 memcpy(pframe
->cdb
, scmd
->cmnd
, pframe
->cdb_length
);
2056 pframe
->req_function
= CL_FUN_SCSI_CMD
;
2057 if (scsi_bufflen(scmd
)) {
2058 if (mvumi_make_sgl(mhba
, scmd
, &pframe
->payload
[0],
2059 &pframe
->sg_counts
))
2062 pframe
->data_transfer_length
= scsi_bufflen(scmd
);
2064 pframe
->sg_counts
= 0;
2065 pframe
->data_transfer_length
= 0;
2070 scsi_build_sense(scmd
, 0, ILLEGAL_REQUEST
, 0x24, 0);
2075 * mvumi_queue_command - Queue entry point
2076 * @shost: Scsi host to queue command on
2077 * @scmd: SCSI command to be queued
2079 static int mvumi_queue_command(struct Scsi_Host
*shost
,
2080 struct scsi_cmnd
*scmd
)
2082 struct mvumi_cmd
*cmd
;
2083 struct mvumi_hba
*mhba
;
2084 unsigned long irq_flags
;
2086 spin_lock_irqsave(shost
->host_lock
, irq_flags
);
2088 mhba
= (struct mvumi_hba
*) shost
->hostdata
;
2090 cmd
= mvumi_get_cmd(mhba
);
2091 if (unlikely(!cmd
)) {
2092 spin_unlock_irqrestore(shost
->host_lock
, irq_flags
);
2093 return SCSI_MLQUEUE_HOST_BUSY
;
2096 if (unlikely(mvumi_build_frame(mhba
, scmd
, cmd
)))
2097 goto out_return_cmd
;
2100 mvumi_priv(scmd
)->cmd_priv
= cmd
;
2101 mhba
->instancet
->fire_cmd(mhba
, cmd
);
2102 spin_unlock_irqrestore(shost
->host_lock
, irq_flags
);
2106 mvumi_return_cmd(mhba
, cmd
);
2108 spin_unlock_irqrestore(shost
->host_lock
, irq_flags
);
2112 static enum scsi_timeout_action
mvumi_timed_out(struct scsi_cmnd
*scmd
)
2114 struct mvumi_cmd
*cmd
= mvumi_priv(scmd
)->cmd_priv
;
2115 struct Scsi_Host
*host
= scmd
->device
->host
;
2116 struct mvumi_hba
*mhba
= shost_priv(host
);
2117 unsigned long flags
;
2119 spin_lock_irqsave(mhba
->shost
->host_lock
, flags
);
2121 if (mhba
->tag_cmd
[cmd
->frame
->tag
]) {
2122 mhba
->tag_cmd
[cmd
->frame
->tag
] = NULL
;
2123 tag_release_one(mhba
, &mhba
->tag_pool
, cmd
->frame
->tag
);
2125 if (!list_empty(&cmd
->queue_pointer
))
2126 list_del_init(&cmd
->queue_pointer
);
2128 atomic_dec(&mhba
->fw_outstanding
);
2130 scmd
->result
= (DID_ABORT
<< 16);
2131 mvumi_priv(scmd
)->cmd_priv
= NULL
;
2132 if (scsi_bufflen(scmd
)) {
2133 dma_unmap_sg(&mhba
->pdev
->dev
, scsi_sglist(scmd
),
2134 scsi_sg_count(scmd
),
2135 scmd
->sc_data_direction
);
2137 mvumi_return_cmd(mhba
, cmd
);
2138 spin_unlock_irqrestore(mhba
->shost
->host_lock
, flags
);
2140 return SCSI_EH_NOT_HANDLED
;
2144 mvumi_bios_param(struct scsi_device
*sdev
, struct block_device
*bdev
,
2145 sector_t capacity
, int geom
[])
2153 tmp
= heads
* sectors
;
2154 cylinders
= capacity
;
2155 sector_div(cylinders
, tmp
);
2157 if (capacity
>= 0x200000) {
2160 tmp
= heads
* sectors
;
2161 cylinders
= capacity
;
2162 sector_div(cylinders
, tmp
);
2166 geom
[2] = cylinders
;
2171 static const struct scsi_host_template mvumi_template
= {
2173 .module
= THIS_MODULE
,
2174 .name
= "Marvell Storage Controller",
2175 .slave_configure
= mvumi_slave_configure
,
2176 .queuecommand
= mvumi_queue_command
,
2177 .eh_timed_out
= mvumi_timed_out
,
2178 .eh_host_reset_handler
= mvumi_host_reset
,
2179 .bios_param
= mvumi_bios_param
,
2180 .dma_boundary
= PAGE_SIZE
- 1,
2182 .cmd_size
= sizeof(struct mvumi_cmd_priv
),
2185 static int mvumi_cfg_hw_reg(struct mvumi_hba
*mhba
)
2188 struct mvumi_hw_regs
*regs
;
2190 switch (mhba
->pdev
->device
) {
2191 case PCI_DEVICE_ID_MARVELL_MV9143
:
2192 mhba
->mmio
= mhba
->base_addr
[0];
2195 mhba
->regs
= kzalloc(sizeof(*regs
), GFP_KERNEL
);
2196 if (mhba
->regs
== NULL
)
2202 regs
->ctrl_sts_reg
= base
+ 0x20104;
2203 regs
->rstoutn_mask_reg
= base
+ 0x20108;
2204 regs
->sys_soft_rst_reg
= base
+ 0x2010C;
2205 regs
->main_int_cause_reg
= base
+ 0x20200;
2206 regs
->enpointa_mask_reg
= base
+ 0x2020C;
2207 regs
->rstoutn_en_reg
= base
+ 0xF1400;
2209 regs
->pciea_to_arm_drbl_reg
= base
+ 0x20400;
2210 regs
->arm_to_pciea_drbl_reg
= base
+ 0x20408;
2211 regs
->arm_to_pciea_mask_reg
= base
+ 0x2040C;
2212 regs
->pciea_to_arm_msg0
= base
+ 0x20430;
2213 regs
->pciea_to_arm_msg1
= base
+ 0x20434;
2214 regs
->arm_to_pciea_msg0
= base
+ 0x20438;
2215 regs
->arm_to_pciea_msg1
= base
+ 0x2043C;
2217 /* For Message Unit */
2219 regs
->inb_aval_count_basel
= base
+ 0x508;
2220 regs
->inb_aval_count_baseh
= base
+ 0x50C;
2221 regs
->inb_write_pointer
= base
+ 0x518;
2222 regs
->inb_read_pointer
= base
+ 0x51C;
2223 regs
->outb_coal_cfg
= base
+ 0x568;
2224 regs
->outb_copy_basel
= base
+ 0x5B0;
2225 regs
->outb_copy_baseh
= base
+ 0x5B4;
2226 regs
->outb_copy_pointer
= base
+ 0x544;
2227 regs
->outb_read_pointer
= base
+ 0x548;
2228 regs
->outb_isr_cause
= base
+ 0x560;
2229 regs
->outb_coal_cfg
= base
+ 0x568;
2230 /* Bit setting for HW */
2231 regs
->int_comaout
= 1 << 8;
2232 regs
->int_comaerr
= 1 << 6;
2233 regs
->int_dl_cpu2pciea
= 1 << 1;
2234 regs
->cl_pointer_toggle
= 1 << 12;
2235 regs
->clic_irq
= 1 << 1;
2236 regs
->clic_in_err
= 1 << 8;
2237 regs
->clic_out_err
= 1 << 12;
2238 regs
->cl_slot_num_mask
= 0xFFF;
2239 regs
->int_drbl_int_mask
= 0x3FFFFFFF;
2240 regs
->int_mu
= regs
->int_dl_cpu2pciea
| regs
->int_comaout
|
2243 case PCI_DEVICE_ID_MARVELL_MV9580
:
2244 mhba
->mmio
= mhba
->base_addr
[2];
2247 mhba
->regs
= kzalloc(sizeof(*regs
), GFP_KERNEL
);
2248 if (mhba
->regs
== NULL
)
2253 regs
->ctrl_sts_reg
= base
+ 0x20104;
2254 regs
->rstoutn_mask_reg
= base
+ 0x1010C;
2255 regs
->sys_soft_rst_reg
= base
+ 0x10108;
2256 regs
->main_int_cause_reg
= base
+ 0x10200;
2257 regs
->enpointa_mask_reg
= base
+ 0x1020C;
2258 regs
->rstoutn_en_reg
= base
+ 0xF1400;
2261 regs
->pciea_to_arm_drbl_reg
= base
+ 0x10460;
2262 regs
->arm_to_pciea_drbl_reg
= base
+ 0x10480;
2263 regs
->arm_to_pciea_mask_reg
= base
+ 0x10484;
2264 regs
->pciea_to_arm_msg0
= base
+ 0x10400;
2265 regs
->pciea_to_arm_msg1
= base
+ 0x10404;
2266 regs
->arm_to_pciea_msg0
= base
+ 0x10420;
2267 regs
->arm_to_pciea_msg1
= base
+ 0x10424;
2270 regs
->reset_request
= base
+ 0x10108;
2271 regs
->reset_enable
= base
+ 0x1010c;
2273 /* For Message Unit */
2274 regs
->inb_aval_count_basel
= base
+ 0x4008;
2275 regs
->inb_aval_count_baseh
= base
+ 0x400C;
2276 regs
->inb_write_pointer
= base
+ 0x4018;
2277 regs
->inb_read_pointer
= base
+ 0x401C;
2278 regs
->outb_copy_basel
= base
+ 0x4058;
2279 regs
->outb_copy_baseh
= base
+ 0x405C;
2280 regs
->outb_copy_pointer
= base
+ 0x406C;
2281 regs
->outb_read_pointer
= base
+ 0x4070;
2282 regs
->outb_coal_cfg
= base
+ 0x4080;
2283 regs
->outb_isr_cause
= base
+ 0x4088;
2284 /* Bit setting for HW */
2285 regs
->int_comaout
= 1 << 4;
2286 regs
->int_dl_cpu2pciea
= 1 << 12;
2287 regs
->int_comaerr
= 1 << 29;
2288 regs
->cl_pointer_toggle
= 1 << 14;
2289 regs
->cl_slot_num_mask
= 0x3FFF;
2290 regs
->clic_irq
= 1 << 0;
2291 regs
->clic_out_err
= 1 << 1;
2292 regs
->int_drbl_int_mask
= 0x3FFFFFFF;
2293 regs
->int_mu
= regs
->int_dl_cpu2pciea
| regs
->int_comaout
;
2303 * mvumi_init_fw - Initializes the FW
2304 * @mhba: Adapter soft state
2306 * This is the main function for initializing firmware.
2308 static int mvumi_init_fw(struct mvumi_hba
*mhba
)
2312 if (pci_request_regions(mhba
->pdev
, MV_DRIVER_NAME
)) {
2313 dev_err(&mhba
->pdev
->dev
, "IO memory region busy!\n");
2316 ret
= mvumi_map_pci_addr(mhba
->pdev
, mhba
->base_addr
);
2320 switch (mhba
->pdev
->device
) {
2321 case PCI_DEVICE_ID_MARVELL_MV9143
:
2322 mhba
->instancet
= &mvumi_instance_9143
;
2324 mhba
->max_sge
= MVUMI_MAX_SG_ENTRY
;
2325 mhba
->request_id_enabled
= 1;
2327 case PCI_DEVICE_ID_MARVELL_MV9580
:
2328 mhba
->instancet
= &mvumi_instance_9580
;
2330 mhba
->max_sge
= MVUMI_MAX_SG_ENTRY
;
2333 dev_err(&mhba
->pdev
->dev
, "device 0x%x not supported!\n",
2334 mhba
->pdev
->device
);
2335 mhba
->instancet
= NULL
;
2337 goto fail_alloc_mem
;
2339 dev_dbg(&mhba
->pdev
->dev
, "device id : %04X is found.\n",
2340 mhba
->pdev
->device
);
2341 ret
= mvumi_cfg_hw_reg(mhba
);
2343 dev_err(&mhba
->pdev
->dev
,
2344 "failed to allocate memory for reg\n");
2346 goto fail_alloc_mem
;
2348 mhba
->handshake_page
= dma_alloc_coherent(&mhba
->pdev
->dev
,
2349 HSP_MAX_SIZE
, &mhba
->handshake_page_phys
, GFP_KERNEL
);
2350 if (!mhba
->handshake_page
) {
2351 dev_err(&mhba
->pdev
->dev
,
2352 "failed to allocate memory for handshake\n");
2354 goto fail_alloc_page
;
2357 if (mvumi_start(mhba
)) {
2359 goto fail_ready_state
;
2361 ret
= mvumi_alloc_cmds(mhba
);
2363 goto fail_ready_state
;
2368 mvumi_release_mem_resource(mhba
);
2369 dma_free_coherent(&mhba
->pdev
->dev
, HSP_MAX_SIZE
,
2370 mhba
->handshake_page
, mhba
->handshake_page_phys
);
2374 mvumi_unmap_pci_addr(mhba
->pdev
, mhba
->base_addr
);
2376 pci_release_regions(mhba
->pdev
);
2382 * mvumi_io_attach - Attaches this driver to SCSI mid-layer
2383 * @mhba: Adapter soft state
2385 static int mvumi_io_attach(struct mvumi_hba
*mhba
)
2387 struct Scsi_Host
*host
= mhba
->shost
;
2388 struct scsi_device
*sdev
= NULL
;
2390 unsigned int max_sg
= (mhba
->ib_max_size
-
2391 sizeof(struct mvumi_msg_frame
)) / sizeof(struct mvumi_sgl
);
2393 host
->irq
= mhba
->pdev
->irq
;
2394 host
->unique_id
= mhba
->unique_id
;
2395 host
->can_queue
= (mhba
->max_io
- 1) ? (mhba
->max_io
- 1) : 1;
2396 host
->sg_tablesize
= mhba
->max_sge
> max_sg
? max_sg
: mhba
->max_sge
;
2397 host
->max_sectors
= mhba
->max_transfer_size
/ 512;
2398 host
->cmd_per_lun
= (mhba
->max_io
- 1) ? (mhba
->max_io
- 1) : 1;
2399 host
->max_id
= mhba
->max_target_id
;
2400 host
->max_cmd_len
= MAX_COMMAND_SIZE
;
2402 ret
= scsi_add_host(host
, &mhba
->pdev
->dev
);
2404 dev_err(&mhba
->pdev
->dev
, "scsi_add_host failed\n");
2407 mhba
->fw_flag
|= MVUMI_FW_ATTACH
;
2409 mutex_lock(&mhba
->sas_discovery_mutex
);
2410 if (mhba
->pdev
->device
== PCI_DEVICE_ID_MARVELL_MV9580
)
2411 ret
= scsi_add_device(host
, 0, mhba
->max_target_id
- 1, 0);
2415 dev_err(&mhba
->pdev
->dev
, "add virtual device failed\n");
2416 mutex_unlock(&mhba
->sas_discovery_mutex
);
2417 goto fail_add_device
;
2420 mhba
->dm_thread
= kthread_create(mvumi_rescan_bus
,
2421 mhba
, "mvumi_scanthread");
2422 if (IS_ERR(mhba
->dm_thread
)) {
2423 dev_err(&mhba
->pdev
->dev
,
2424 "failed to create device scan thread\n");
2425 ret
= PTR_ERR(mhba
->dm_thread
);
2426 mutex_unlock(&mhba
->sas_discovery_mutex
);
2427 goto fail_create_thread
;
2429 atomic_set(&mhba
->pnp_count
, 1);
2430 wake_up_process(mhba
->dm_thread
);
2432 mutex_unlock(&mhba
->sas_discovery_mutex
);
2436 if (mhba
->pdev
->device
== PCI_DEVICE_ID_MARVELL_MV9580
)
2437 sdev
= scsi_device_lookup(mhba
->shost
, 0,
2438 mhba
->max_target_id
- 1, 0);
2440 scsi_remove_device(sdev
);
2441 scsi_device_put(sdev
);
2444 scsi_remove_host(mhba
->shost
);
2449 * mvumi_probe_one - PCI hotplug entry point
2450 * @pdev: PCI device structure
2451 * @id: PCI ids of supported hotplugged adapter
2453 static int mvumi_probe_one(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
2455 struct Scsi_Host
*host
;
2456 struct mvumi_hba
*mhba
;
2459 dev_dbg(&pdev
->dev
, " %#4.04x:%#4.04x:%#4.04x:%#4.04x: ",
2460 pdev
->vendor
, pdev
->device
, pdev
->subsystem_vendor
,
2461 pdev
->subsystem_device
);
2463 ret
= pci_enable_device(pdev
);
2467 ret
= mvumi_pci_set_master(pdev
);
2469 goto fail_set_dma_mask
;
2471 host
= scsi_host_alloc(&mvumi_template
, sizeof(*mhba
));
2473 dev_err(&pdev
->dev
, "scsi_host_alloc failed\n");
2475 goto fail_alloc_instance
;
2477 mhba
= shost_priv(host
);
2479 INIT_LIST_HEAD(&mhba
->cmd_pool
);
2480 INIT_LIST_HEAD(&mhba
->ob_data_list
);
2481 INIT_LIST_HEAD(&mhba
->free_ob_list
);
2482 INIT_LIST_HEAD(&mhba
->res_list
);
2483 INIT_LIST_HEAD(&mhba
->waiting_req_list
);
2484 mutex_init(&mhba
->device_lock
);
2485 INIT_LIST_HEAD(&mhba
->mhba_dev_list
);
2486 INIT_LIST_HEAD(&mhba
->shost_dev_list
);
2487 atomic_set(&mhba
->fw_outstanding
, 0);
2488 init_waitqueue_head(&mhba
->int_cmd_wait_q
);
2489 mutex_init(&mhba
->sas_discovery_mutex
);
2493 mhba
->unique_id
= pci_dev_id(pdev
);
2495 ret
= mvumi_init_fw(mhba
);
2499 ret
= request_irq(mhba
->pdev
->irq
, mvumi_isr_handler
, IRQF_SHARED
,
2502 dev_err(&pdev
->dev
, "failed to register IRQ\n");
2506 mhba
->instancet
->enable_intr(mhba
);
2507 pci_set_drvdata(pdev
, mhba
);
2509 ret
= mvumi_io_attach(mhba
);
2511 goto fail_io_attach
;
2513 mvumi_backup_bar_addr(mhba
);
2514 dev_dbg(&pdev
->dev
, "probe mvumi driver successfully.\n");
2519 mhba
->instancet
->disable_intr(mhba
);
2520 free_irq(mhba
->pdev
->irq
, mhba
);
2522 mvumi_release_fw(mhba
);
2524 scsi_host_put(host
);
2526 fail_alloc_instance
:
2528 pci_disable_device(pdev
);
2533 static void mvumi_detach_one(struct pci_dev
*pdev
)
2535 struct Scsi_Host
*host
;
2536 struct mvumi_hba
*mhba
;
2538 mhba
= pci_get_drvdata(pdev
);
2539 if (mhba
->dm_thread
) {
2540 kthread_stop(mhba
->dm_thread
);
2541 mhba
->dm_thread
= NULL
;
2544 mvumi_detach_devices(mhba
);
2546 scsi_remove_host(mhba
->shost
);
2547 mvumi_flush_cache(mhba
);
2549 mhba
->instancet
->disable_intr(mhba
);
2550 free_irq(mhba
->pdev
->irq
, mhba
);
2551 mvumi_release_fw(mhba
);
2552 scsi_host_put(host
);
2553 pci_disable_device(pdev
);
2554 dev_dbg(&pdev
->dev
, "driver is removed!\n");
2558 * mvumi_shutdown - Shutdown entry point
2559 * @pdev: PCI device structure
2561 static void mvumi_shutdown(struct pci_dev
*pdev
)
2563 struct mvumi_hba
*mhba
= pci_get_drvdata(pdev
);
2565 mvumi_flush_cache(mhba
);
2568 static int __maybe_unused
mvumi_suspend(struct device
*dev
)
2570 struct pci_dev
*pdev
= to_pci_dev(dev
);
2571 struct mvumi_hba
*mhba
= pci_get_drvdata(pdev
);
2573 mvumi_flush_cache(mhba
);
2575 mhba
->instancet
->disable_intr(mhba
);
2576 mvumi_unmap_pci_addr(pdev
, mhba
->base_addr
);
2581 static int __maybe_unused
mvumi_resume(struct device
*dev
)
2584 struct pci_dev
*pdev
= to_pci_dev(dev
);
2585 struct mvumi_hba
*mhba
= pci_get_drvdata(pdev
);
2587 ret
= dma_set_mask(&pdev
->dev
, DMA_BIT_MASK(32));
2590 ret
= mvumi_map_pci_addr(mhba
->pdev
, mhba
->base_addr
);
2592 goto release_regions
;
2594 if (mvumi_cfg_hw_reg(mhba
)) {
2596 goto unmap_pci_addr
;
2599 mhba
->mmio
= mhba
->base_addr
[0];
2602 if (mvumi_start(mhba
)) {
2604 goto unmap_pci_addr
;
2607 mhba
->instancet
->enable_intr(mhba
);
2612 mvumi_unmap_pci_addr(pdev
, mhba
->base_addr
);
2614 pci_release_regions(pdev
);
2620 static SIMPLE_DEV_PM_OPS(mvumi_pm_ops
, mvumi_suspend
, mvumi_resume
);
2622 static struct pci_driver mvumi_pci_driver
= {
2624 .name
= MV_DRIVER_NAME
,
2625 .id_table
= mvumi_pci_table
,
2626 .probe
= mvumi_probe_one
,
2627 .remove
= mvumi_detach_one
,
2628 .shutdown
= mvumi_shutdown
,
2629 .driver
.pm
= &mvumi_pm_ops
,
2632 module_pci_driver(mvumi_pci_driver
);