1 // SPDX-License-Identifier: GPL-2.0-only
3 * Marvell 88SE64xx/88SE94xx pci init
5 * Copyright 2007 Red Hat, Inc.
6 * Copyright 2008 Marvell. <kewei@marvell.com>
7 * Copyright 2009-2011 Marvell. <yuxiangl@marvell.com>
13 int interrupt_coalescing
= 0x80;
15 static struct scsi_transport_template
*mvs_stt
;
16 static const struct mvs_chip_info mvs_chips
[] = {
17 [chip_6320
] = { 1, 2, 0x400, 17, 16, 6, 9, &mvs_64xx_dispatch
, },
18 [chip_6440
] = { 1, 4, 0x400, 17, 16, 6, 9, &mvs_64xx_dispatch
, },
19 [chip_6485
] = { 1, 8, 0x800, 33, 32, 6, 10, &mvs_64xx_dispatch
, },
20 [chip_9180
] = { 2, 4, 0x800, 17, 64, 8, 9, &mvs_94xx_dispatch
, },
21 [chip_9480
] = { 2, 4, 0x800, 17, 64, 8, 9, &mvs_94xx_dispatch
, },
22 [chip_9445
] = { 1, 4, 0x800, 17, 64, 8, 11, &mvs_94xx_dispatch
, },
23 [chip_9485
] = { 2, 4, 0x800, 17, 64, 8, 11, &mvs_94xx_dispatch
, },
24 [chip_1300
] = { 1, 4, 0x400, 17, 16, 6, 9, &mvs_64xx_dispatch
, },
25 [chip_1320
] = { 2, 4, 0x800, 17, 64, 8, 9, &mvs_94xx_dispatch
, },
28 static const struct attribute_group
*mvst_host_groups
[];
29 static const struct attribute_group
*mvst_sdev_groups
[];
33 static const struct scsi_host_template mvs_sht
= {
35 .scan_finished
= mvs_scan_finished
,
36 .scan_start
= mvs_scan_start
,
38 .sg_tablesize
= SG_ALL
,
39 .shost_groups
= mvst_host_groups
,
40 .sdev_groups
= mvst_sdev_groups
,
41 .track_queue_depth
= 1,
44 static struct sas_domain_function_template mvs_transport_ops
= {
45 .lldd_dev_found
= mvs_dev_found
,
46 .lldd_dev_gone
= mvs_dev_gone
,
47 .lldd_execute_task
= mvs_queue_command
,
48 .lldd_control_phy
= mvs_phy_control
,
50 .lldd_abort_task
= mvs_abort_task
,
51 .lldd_abort_task_set
= sas_abort_task_set
,
52 .lldd_clear_task_set
= sas_clear_task_set
,
53 .lldd_I_T_nexus_reset
= mvs_I_T_nexus_reset
,
54 .lldd_lu_reset
= mvs_lu_reset
,
55 .lldd_query_task
= mvs_query_task
,
56 .lldd_port_formed
= mvs_port_formed
,
57 .lldd_port_deformed
= mvs_port_deformed
,
59 .lldd_write_gpio
= mvs_gpio_write
,
63 static void mvs_phy_init(struct mvs_info
*mvi
, int phy_id
)
65 struct mvs_phy
*phy
= &mvi
->phy
[phy_id
];
66 struct asd_sas_phy
*sas_phy
= &phy
->sas_phy
;
70 timer_setup(&phy
->timer
, NULL
, 0);
71 sas_phy
->enabled
= (phy_id
< mvi
->chip
->n_phy
) ? 1 : 0;
72 sas_phy
->iproto
= SAS_PROTOCOL_ALL
;
74 sas_phy
->role
= PHY_ROLE_INITIATOR
;
75 sas_phy
->oob_mode
= OOB_NOT_CONNECTED
;
76 sas_phy
->linkrate
= SAS_LINK_RATE_UNKNOWN
;
79 sas_phy
->sas_addr
= &mvi
->sas_addr
[0];
80 sas_phy
->frame_rcvd
= &phy
->frame_rcvd
[0];
81 sas_phy
->ha
= (struct sas_ha_struct
*)mvi
->shost
->hostdata
;
82 sas_phy
->lldd_phy
= phy
;
85 static void mvs_free(struct mvs_info
*mvi
)
93 if (mvi
->flags
& MVF_FLAG_SOC
)
94 slot_nr
= MVS_SOC_SLOTS
;
96 slot_nr
= MVS_CHIP_SLOT_SZ
;
98 dma_pool_destroy(mvi
->dma_pool
);
101 dma_free_coherent(mvi
->dev
,
102 sizeof(*mvi
->tx
) * MVS_CHIP_SLOT_SZ
,
103 mvi
->tx
, mvi
->tx_dma
);
105 dma_free_coherent(mvi
->dev
, MVS_RX_FISL_SZ
,
106 mvi
->rx_fis
, mvi
->rx_fis_dma
);
108 dma_free_coherent(mvi
->dev
,
109 sizeof(*mvi
->rx
) * (MVS_RX_RING_SZ
+ 1),
110 mvi
->rx
, mvi
->rx_dma
);
112 dma_free_coherent(mvi
->dev
,
113 sizeof(*mvi
->slot
) * slot_nr
,
114 mvi
->slot
, mvi
->slot_dma
);
116 if (mvi
->bulk_buffer
)
117 dma_free_coherent(mvi
->dev
, TRASH_BUCKET_SIZE
,
118 mvi
->bulk_buffer
, mvi
->bulk_buffer_dma
);
119 if (mvi
->bulk_buffer1
)
120 dma_free_coherent(mvi
->dev
, TRASH_BUCKET_SIZE
,
121 mvi
->bulk_buffer1
, mvi
->bulk_buffer_dma1
);
123 MVS_CHIP_DISP
->chip_iounmap(mvi
);
125 scsi_host_put(mvi
->shost
);
126 list_for_each_entry(mwq
, &mvi
->wq_list
, entry
)
127 cancel_delayed_work(&mwq
->work_q
);
128 kfree(mvi
->rsvd_tags
);
132 #ifdef CONFIG_SCSI_MVSAS_TASKLET
133 static void mvs_tasklet(unsigned long opaque
)
138 struct mvs_info
*mvi
;
139 struct sas_ha_struct
*sha
= (struct sas_ha_struct
*)opaque
;
141 core_nr
= ((struct mvs_prv_info
*)sha
->lldd_ha
)->n_host
;
142 mvi
= ((struct mvs_prv_info
*)sha
->lldd_ha
)->mvi
[0];
147 stat
= MVS_CHIP_DISP
->isr_status(mvi
, mvi
->pdev
->irq
);
151 for (i
= 0; i
< core_nr
; i
++) {
152 mvi
= ((struct mvs_prv_info
*)sha
->lldd_ha
)->mvi
[i
];
153 MVS_CHIP_DISP
->isr(mvi
, mvi
->pdev
->irq
, stat
);
156 MVS_CHIP_DISP
->interrupt_enable(mvi
);
161 static irqreturn_t
mvs_interrupt(int irq
, void *opaque
)
164 struct mvs_info
*mvi
;
165 struct sas_ha_struct
*sha
= opaque
;
166 #ifndef CONFIG_SCSI_MVSAS_TASKLET
170 core_nr
= ((struct mvs_prv_info
*)sha
->lldd_ha
)->n_host
;
173 mvi
= ((struct mvs_prv_info
*)sha
->lldd_ha
)->mvi
[0];
177 #ifdef CONFIG_SCSI_MVSAS_TASKLET
178 MVS_CHIP_DISP
->interrupt_disable(mvi
);
181 stat
= MVS_CHIP_DISP
->isr_status(mvi
, irq
);
183 #ifdef CONFIG_SCSI_MVSAS_TASKLET
184 MVS_CHIP_DISP
->interrupt_enable(mvi
);
189 #ifdef CONFIG_SCSI_MVSAS_TASKLET
190 tasklet_schedule(&((struct mvs_prv_info
*)sha
->lldd_ha
)->mv_tasklet
);
192 for (i
= 0; i
< core_nr
; i
++) {
193 mvi
= ((struct mvs_prv_info
*)sha
->lldd_ha
)->mvi
[i
];
194 MVS_CHIP_DISP
->isr(mvi
, irq
, stat
);
200 static int mvs_alloc(struct mvs_info
*mvi
, struct Scsi_Host
*shost
)
205 if (mvi
->flags
& MVF_FLAG_SOC
)
206 slot_nr
= MVS_SOC_SLOTS
;
208 slot_nr
= MVS_CHIP_SLOT_SZ
;
210 spin_lock_init(&mvi
->lock
);
211 for (i
= 0; i
< mvi
->chip
->n_phy
; i
++) {
212 mvs_phy_init(mvi
, i
);
213 mvi
->port
[i
].wide_port_phymap
= 0;
214 mvi
->port
[i
].port_attached
= 0;
215 INIT_LIST_HEAD(&mvi
->port
[i
].list
);
217 for (i
= 0; i
< MVS_MAX_DEVICES
; i
++) {
218 mvi
->devices
[i
].taskfileset
= MVS_ID_NOT_MAPPED
;
219 mvi
->devices
[i
].dev_type
= SAS_PHY_UNUSED
;
220 mvi
->devices
[i
].device_id
= i
;
221 mvi
->devices
[i
].dev_status
= MVS_DEV_NORMAL
;
225 * alloc and init our DMA areas
227 mvi
->tx
= dma_alloc_coherent(mvi
->dev
,
228 sizeof(*mvi
->tx
) * MVS_CHIP_SLOT_SZ
,
229 &mvi
->tx_dma
, GFP_KERNEL
);
232 mvi
->rx_fis
= dma_alloc_coherent(mvi
->dev
, MVS_RX_FISL_SZ
,
233 &mvi
->rx_fis_dma
, GFP_KERNEL
);
237 mvi
->rx
= dma_alloc_coherent(mvi
->dev
,
238 sizeof(*mvi
->rx
) * (MVS_RX_RING_SZ
+ 1),
239 &mvi
->rx_dma
, GFP_KERNEL
);
242 mvi
->rx
[0] = cpu_to_le32(0xfff);
243 mvi
->rx_cons
= 0xfff;
245 mvi
->slot
= dma_alloc_coherent(mvi
->dev
,
246 sizeof(*mvi
->slot
) * slot_nr
,
247 &mvi
->slot_dma
, GFP_KERNEL
);
251 mvi
->bulk_buffer
= dma_alloc_coherent(mvi
->dev
,
253 &mvi
->bulk_buffer_dma
, GFP_KERNEL
);
254 if (!mvi
->bulk_buffer
)
257 mvi
->bulk_buffer1
= dma_alloc_coherent(mvi
->dev
,
259 &mvi
->bulk_buffer_dma1
, GFP_KERNEL
);
260 if (!mvi
->bulk_buffer1
)
263 sprintf(pool_name
, "%s%d", "mvs_dma_pool", mvi
->id
);
264 mvi
->dma_pool
= dma_pool_create(pool_name
, &mvi
->pdev
->dev
,
265 MVS_SLOT_BUF_SZ
, 16, 0);
266 if (!mvi
->dma_pool
) {
267 printk(KERN_DEBUG
"failed to create dma pool %s.\n", pool_name
);
277 int mvs_ioremap(struct mvs_info
*mvi
, int bar
, int bar_ex
)
279 unsigned long res_start
, res_len
, res_flag_ex
= 0;
280 struct pci_dev
*pdev
= mvi
->pdev
;
283 * ioremap main and peripheral registers
285 res_start
= pci_resource_start(pdev
, bar_ex
);
286 res_len
= pci_resource_len(pdev
, bar_ex
);
287 if (!res_start
|| !res_len
)
290 res_flag_ex
= pci_resource_flags(pdev
, bar_ex
);
291 if (res_flag_ex
& IORESOURCE_MEM
)
292 mvi
->regs_ex
= ioremap(res_start
, res_len
);
294 mvi
->regs_ex
= (void *)res_start
;
299 res_start
= pci_resource_start(pdev
, bar
);
300 res_len
= pci_resource_len(pdev
, bar
);
301 if (!res_start
|| !res_len
) {
302 iounmap(mvi
->regs_ex
);
307 mvi
->regs
= ioremap(res_start
, res_len
);
310 if (mvi
->regs_ex
&& (res_flag_ex
& IORESOURCE_MEM
))
311 iounmap(mvi
->regs_ex
);
321 void mvs_iounmap(void __iomem
*regs
)
326 static struct mvs_info
*mvs_pci_alloc(struct pci_dev
*pdev
,
327 const struct pci_device_id
*ent
,
328 struct Scsi_Host
*shost
, unsigned int id
)
330 struct mvs_info
*mvi
= NULL
;
331 struct sas_ha_struct
*sha
= SHOST_TO_SAS_HA(shost
);
333 mvi
= kzalloc(sizeof(*mvi
) +
334 (1L << mvs_chips
[ent
->driver_data
].slot_width
) *
335 sizeof(struct mvs_slot_info
), GFP_KERNEL
);
340 mvi
->dev
= &pdev
->dev
;
341 mvi
->chip_id
= ent
->driver_data
;
342 mvi
->chip
= &mvs_chips
[mvi
->chip_id
];
343 INIT_LIST_HEAD(&mvi
->wq_list
);
345 ((struct mvs_prv_info
*)sha
->lldd_ha
)->mvi
[id
] = mvi
;
346 ((struct mvs_prv_info
*)sha
->lldd_ha
)->n_phy
= mvi
->chip
->n_phy
;
352 mvi
->rsvd_tags
= bitmap_zalloc(MVS_RSVD_SLOTS
, GFP_KERNEL
);
356 if (MVS_CHIP_DISP
->chip_ioremap(mvi
))
358 if (!mvs_alloc(mvi
, shost
))
365 static int pci_go_64(struct pci_dev
*pdev
)
369 rc
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64));
371 rc
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
373 dev_printk(KERN_ERR
, &pdev
->dev
,
374 "32-bit DMA enable failed\n");
382 static int mvs_prep_sas_ha_init(struct Scsi_Host
*shost
,
383 const struct mvs_chip_info
*chip_info
)
385 int phy_nr
, port_nr
; unsigned short core_nr
;
386 struct asd_sas_phy
**arr_phy
;
387 struct asd_sas_port
**arr_port
;
388 struct sas_ha_struct
*sha
= SHOST_TO_SAS_HA(shost
);
390 core_nr
= chip_info
->n_host
;
391 phy_nr
= core_nr
* chip_info
->n_phy
;
394 memset(sha
, 0x00, sizeof(struct sas_ha_struct
));
395 arr_phy
= kcalloc(phy_nr
, sizeof(void *), GFP_KERNEL
);
396 arr_port
= kcalloc(port_nr
, sizeof(void *), GFP_KERNEL
);
397 if (!arr_phy
|| !arr_port
)
400 sha
->sas_phy
= arr_phy
;
401 sha
->sas_port
= arr_port
;
404 sha
->lldd_ha
= kzalloc(sizeof(struct mvs_prv_info
), GFP_KERNEL
);
408 ((struct mvs_prv_info
*)sha
->lldd_ha
)->n_host
= core_nr
;
410 shost
->transportt
= mvs_stt
;
411 shost
->max_id
= MVS_MAX_DEVICES
;
413 shost
->max_channel
= 1;
414 shost
->max_cmd_len
= 16;
424 static void mvs_post_sas_ha_init(struct Scsi_Host
*shost
,
425 const struct mvs_chip_info
*chip_info
)
427 int can_queue
, i
= 0, j
= 0;
428 struct mvs_info
*mvi
= NULL
;
429 struct sas_ha_struct
*sha
= SHOST_TO_SAS_HA(shost
);
430 unsigned short nr_core
= ((struct mvs_prv_info
*)sha
->lldd_ha
)->n_host
;
432 for (j
= 0; j
< nr_core
; j
++) {
433 mvi
= ((struct mvs_prv_info
*)sha
->lldd_ha
)->mvi
[j
];
434 for (i
= 0; i
< chip_info
->n_phy
; i
++) {
435 sha
->sas_phy
[j
* chip_info
->n_phy
+ i
] =
436 &mvi
->phy
[i
].sas_phy
;
437 sha
->sas_port
[j
* chip_info
->n_phy
+ i
] =
438 &mvi
->port
[i
].sas_port
;
442 sha
->sas_ha_name
= DRV_NAME
;
444 sha
->sas_addr
= &mvi
->sas_addr
[0];
446 sha
->num_phys
= nr_core
* chip_info
->n_phy
;
448 if (mvi
->flags
& MVF_FLAG_SOC
)
449 can_queue
= MVS_SOC_CAN_QUEUE
;
451 can_queue
= MVS_CHIP_SLOT_SZ
;
453 can_queue
-= MVS_RSVD_SLOTS
;
455 shost
->sg_tablesize
= min_t(u16
, SG_ALL
, MVS_MAX_SG
);
456 shost
->can_queue
= can_queue
;
457 mvi
->shost
->cmd_per_lun
= MVS_QUEUE_SIZE
;
458 sha
->shost
= mvi
->shost
;
461 static void mvs_init_sas_add(struct mvs_info
*mvi
)
464 for (i
= 0; i
< mvi
->chip
->n_phy
; i
++) {
465 mvi
->phy
[i
].dev_sas_addr
= 0x5005043011ab0000ULL
;
466 mvi
->phy
[i
].dev_sas_addr
=
467 cpu_to_be64((u64
)(*(u64
*)&mvi
->phy
[i
].dev_sas_addr
));
470 memcpy(mvi
->sas_addr
, &mvi
->phy
[0].dev_sas_addr
, SAS_ADDR_SIZE
);
473 static int mvs_pci_init(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
475 unsigned int rc
, nhost
= 0;
476 struct mvs_info
*mvi
;
477 irq_handler_t irq_handler
= mvs_interrupt
;
478 struct Scsi_Host
*shost
= NULL
;
479 const struct mvs_chip_info
*chip
;
481 dev_printk(KERN_INFO
, &pdev
->dev
,
482 "mvsas: driver version %s\n", DRV_VERSION
);
483 rc
= pci_enable_device(pdev
);
487 pci_set_master(pdev
);
489 rc
= pci_request_regions(pdev
, DRV_NAME
);
491 goto err_out_disable
;
493 rc
= pci_go_64(pdev
);
495 goto err_out_regions
;
497 shost
= scsi_host_alloc(&mvs_sht
, sizeof(void *));
500 goto err_out_regions
;
503 chip
= &mvs_chips
[ent
->driver_data
];
504 SHOST_TO_SAS_HA(shost
) =
505 kcalloc(1, sizeof(struct sas_ha_struct
), GFP_KERNEL
);
506 if (!SHOST_TO_SAS_HA(shost
)) {
507 scsi_host_put(shost
);
509 goto err_out_regions
;
512 rc
= mvs_prep_sas_ha_init(shost
, chip
);
514 scsi_host_put(shost
);
516 goto err_out_regions
;
519 pci_set_drvdata(pdev
, SHOST_TO_SAS_HA(shost
));
522 mvi
= mvs_pci_alloc(pdev
, ent
, shost
, nhost
);
525 goto err_out_regions
;
528 memset(&mvi
->hba_info_param
, 0xFF,
529 sizeof(struct hba_info_page
));
531 mvs_init_sas_add(mvi
);
533 mvi
->instance
= nhost
;
534 rc
= MVS_CHIP_DISP
->chip_init(mvi
);
537 goto err_out_regions
;
540 } while (nhost
< chip
->n_host
);
541 #ifdef CONFIG_SCSI_MVSAS_TASKLET
543 struct mvs_prv_info
*mpi
= SHOST_TO_SAS_HA(shost
)->lldd_ha
;
545 tasklet_init(&(mpi
->mv_tasklet
), mvs_tasklet
,
546 (unsigned long)SHOST_TO_SAS_HA(shost
));
550 mvs_post_sas_ha_init(shost
, chip
);
552 rc
= scsi_add_host(shost
, &pdev
->dev
);
556 rc
= sas_register_ha(SHOST_TO_SAS_HA(shost
));
559 rc
= request_irq(pdev
->irq
, irq_handler
, IRQF_SHARED
,
560 DRV_NAME
, SHOST_TO_SAS_HA(shost
));
564 MVS_CHIP_DISP
->interrupt_enable(mvi
);
566 scsi_scan_host(mvi
->shost
);
571 sas_unregister_ha(SHOST_TO_SAS_HA(shost
));
573 scsi_remove_host(mvi
->shost
);
575 pci_release_regions(pdev
);
577 pci_disable_device(pdev
);
582 static void mvs_pci_remove(struct pci_dev
*pdev
)
584 unsigned short core_nr
, i
= 0;
585 struct sas_ha_struct
*sha
= pci_get_drvdata(pdev
);
586 struct mvs_info
*mvi
= NULL
;
588 core_nr
= ((struct mvs_prv_info
*)sha
->lldd_ha
)->n_host
;
589 mvi
= ((struct mvs_prv_info
*)sha
->lldd_ha
)->mvi
[0];
591 #ifdef CONFIG_SCSI_MVSAS_TASKLET
592 tasklet_kill(&((struct mvs_prv_info
*)sha
->lldd_ha
)->mv_tasklet
);
595 sas_unregister_ha(sha
);
596 sas_remove_host(mvi
->shost
);
598 MVS_CHIP_DISP
->interrupt_disable(mvi
);
599 free_irq(mvi
->pdev
->irq
, sha
);
600 for (i
= 0; i
< core_nr
; i
++) {
601 mvi
= ((struct mvs_prv_info
*)sha
->lldd_ha
)->mvi
[i
];
605 kfree(sha
->sas_port
);
607 pci_release_regions(pdev
);
608 pci_disable_device(pdev
);
612 static struct pci_device_id mvs_pci_table
[] = {
613 { PCI_VDEVICE(MARVELL
, 0x6320), chip_6320
},
614 { PCI_VDEVICE(MARVELL
, 0x6340), chip_6440
},
616 .vendor
= PCI_VENDOR_ID_MARVELL
,
618 .subvendor
= PCI_ANY_ID
,
622 .driver_data
= chip_6485
,
624 { PCI_VDEVICE(MARVELL
, 0x6440), chip_6440
},
625 { PCI_VDEVICE(MARVELL
, 0x6485), chip_6485
},
626 { PCI_VDEVICE(MARVELL
, 0x9480), chip_9480
},
627 { PCI_VDEVICE(MARVELL
, 0x9180), chip_9180
},
628 { PCI_VDEVICE(ARECA
, PCI_DEVICE_ID_ARECA_1300
), chip_1300
},
629 { PCI_VDEVICE(ARECA
, PCI_DEVICE_ID_ARECA_1320
), chip_1320
},
630 { PCI_VDEVICE(ADAPTEC2
, 0x0450), chip_6440
},
631 { PCI_VDEVICE(TTI
, 0x2640), chip_6440
},
632 { PCI_VDEVICE(TTI
, 0x2710), chip_9480
},
633 { PCI_VDEVICE(TTI
, 0x2720), chip_9480
},
634 { PCI_VDEVICE(TTI
, 0x2721), chip_9480
},
635 { PCI_VDEVICE(TTI
, 0x2722), chip_9480
},
636 { PCI_VDEVICE(TTI
, 0x2740), chip_9480
},
637 { PCI_VDEVICE(TTI
, 0x2744), chip_9480
},
638 { PCI_VDEVICE(TTI
, 0x2760), chip_9480
},
640 .vendor
= PCI_VENDOR_ID_MARVELL_EXT
,
642 .subvendor
= PCI_ANY_ID
,
646 .driver_data
= chip_9480
,
649 .vendor
= PCI_VENDOR_ID_MARVELL_EXT
,
651 .subvendor
= PCI_ANY_ID
,
655 .driver_data
= chip_9445
,
657 { PCI_VDEVICE(MARVELL_EXT
, 0x9485), chip_9485
}, /* Marvell 9480/9485 (any vendor/model) */
658 { PCI_VDEVICE(OCZ
, 0x1021), chip_9485
}, /* OCZ RevoDrive3 */
659 { PCI_VDEVICE(OCZ
, 0x1022), chip_9485
}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */
660 { PCI_VDEVICE(OCZ
, 0x1040), chip_9485
}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */
661 { PCI_VDEVICE(OCZ
, 0x1041), chip_9485
}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */
662 { PCI_VDEVICE(OCZ
, 0x1042), chip_9485
}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */
663 { PCI_VDEVICE(OCZ
, 0x1043), chip_9485
}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */
664 { PCI_VDEVICE(OCZ
, 0x1044), chip_9485
}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */
665 { PCI_VDEVICE(OCZ
, 0x1080), chip_9485
}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */
666 { PCI_VDEVICE(OCZ
, 0x1083), chip_9485
}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */
667 { PCI_VDEVICE(OCZ
, 0x1084), chip_9485
}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */
669 { } /* terminate list */
672 static struct pci_driver mvs_pci_driver
= {
674 .id_table
= mvs_pci_table
,
675 .probe
= mvs_pci_init
,
676 .remove
= mvs_pci_remove
,
679 static DEVICE_STRING_ATTR_RO(driver_version
, 0444, DRV_VERSION
);
681 static ssize_t
interrupt_coalescing_store(struct device
*cdev
,
682 struct device_attribute
*attr
,
683 const char *buffer
, size_t size
)
685 unsigned int val
= 0;
686 struct mvs_info
*mvi
= NULL
;
687 struct Scsi_Host
*shost
= class_to_shost(cdev
);
688 struct sas_ha_struct
*sha
= SHOST_TO_SAS_HA(shost
);
693 if (sscanf(buffer
, "%u", &val
) != 1)
696 if (val
>= 0x10000) {
697 mv_dprintk("interrupt coalescing timer %d us is"
699 return strlen(buffer
);
702 interrupt_coalescing
= val
;
704 core_nr
= ((struct mvs_prv_info
*)sha
->lldd_ha
)->n_host
;
705 mvi
= ((struct mvs_prv_info
*)sha
->lldd_ha
)->mvi
[0];
710 for (i
= 0; i
< core_nr
; i
++) {
711 mvi
= ((struct mvs_prv_info
*)sha
->lldd_ha
)->mvi
[i
];
712 if (MVS_CHIP_DISP
->tune_interrupt
)
713 MVS_CHIP_DISP
->tune_interrupt(mvi
,
714 interrupt_coalescing
);
716 mv_dprintk("set interrupt coalescing time to %d us\n",
717 interrupt_coalescing
);
718 return strlen(buffer
);
721 static ssize_t
interrupt_coalescing_show(struct device
*cdev
,
722 struct device_attribute
*attr
, char *buffer
)
724 return sysfs_emit(buffer
, "%d\n", interrupt_coalescing
);
727 static DEVICE_ATTR_RW(interrupt_coalescing
);
729 static int __init
mvs_init(void)
732 mvs_stt
= sas_domain_attach_transport(&mvs_transport_ops
);
736 rc
= pci_register_driver(&mvs_pci_driver
);
743 sas_release_transport(mvs_stt
);
747 static void __exit
mvs_exit(void)
749 pci_unregister_driver(&mvs_pci_driver
);
750 sas_release_transport(mvs_stt
);
753 static struct attribute
*mvst_host_attrs
[] = {
754 &dev_attr_driver_version
.attr
.attr
,
755 &dev_attr_interrupt_coalescing
.attr
,
759 ATTRIBUTE_GROUPS(mvst_host
);
761 static const struct attribute_group
*mvst_sdev_groups
[] = {
762 &sas_ata_sdev_attr_group
,
766 module_init(mvs_init
);
767 module_exit(mvs_exit
);
769 MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
770 MODULE_DESCRIPTION("Marvell 88SE6440 SAS/SATA controller driver");
771 MODULE_VERSION(DRV_VERSION
);
772 MODULE_LICENSE("GPL");
774 MODULE_DEVICE_TABLE(pci
, mvs_pci_table
);