2 * Marvell 88SE64xx/88SE94xx pci init
4 * Copyright 2007 Red Hat, Inc.
5 * Copyright 2008 Marvell. <kewei@marvell.com>
7 * This file is licensed under GPLv2.
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; version 2 of the
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
28 static struct scsi_transport_template
*mvs_stt
;
29 static const struct mvs_chip_info mvs_chips
[] = {
30 [chip_6320
] = { 1, 2, 0x400, 17, 16, 9, &mvs_64xx_dispatch
, },
31 [chip_6440
] = { 1, 4, 0x400, 17, 16, 9, &mvs_64xx_dispatch
, },
32 [chip_6485
] = { 1, 8, 0x800, 33, 32, 10, &mvs_64xx_dispatch
, },
33 [chip_9180
] = { 2, 4, 0x800, 17, 64, 9, &mvs_94xx_dispatch
, },
34 [chip_9480
] = { 2, 4, 0x800, 17, 64, 9, &mvs_94xx_dispatch
, },
35 [chip_1300
] = { 1, 4, 0x400, 17, 16, 9, &mvs_64xx_dispatch
, },
36 [chip_1320
] = { 2, 4, 0x800, 17, 64, 9, &mvs_94xx_dispatch
, },
41 static struct scsi_host_template mvs_sht
= {
42 .module
= THIS_MODULE
,
44 .queuecommand
= sas_queuecommand
,
45 .target_alloc
= sas_target_alloc
,
46 .slave_configure
= mvs_slave_configure
,
47 .slave_destroy
= sas_slave_destroy
,
48 .scan_finished
= mvs_scan_finished
,
49 .scan_start
= mvs_scan_start
,
50 .change_queue_depth
= sas_change_queue_depth
,
51 .change_queue_type
= sas_change_queue_type
,
52 .bios_param
= sas_bios_param
,
56 .sg_tablesize
= SG_ALL
,
57 .max_sectors
= SCSI_DEFAULT_MAX_SECTORS
,
58 .use_clustering
= ENABLE_CLUSTERING
,
59 .eh_device_reset_handler
= sas_eh_device_reset_handler
,
60 .eh_bus_reset_handler
= sas_eh_bus_reset_handler
,
61 .slave_alloc
= mvs_slave_alloc
,
62 .target_destroy
= sas_target_destroy
,
66 static struct sas_domain_function_template mvs_transport_ops
= {
67 .lldd_dev_found
= mvs_dev_found
,
68 .lldd_dev_gone
= mvs_dev_gone
,
70 .lldd_execute_task
= mvs_queue_command
,
71 .lldd_control_phy
= mvs_phy_control
,
73 .lldd_abort_task
= mvs_abort_task
,
74 .lldd_abort_task_set
= mvs_abort_task_set
,
75 .lldd_clear_aca
= mvs_clear_aca
,
76 .lldd_clear_task_set
= mvs_clear_task_set
,
77 .lldd_I_T_nexus_reset
= mvs_I_T_nexus_reset
,
78 .lldd_lu_reset
= mvs_lu_reset
,
79 .lldd_query_task
= mvs_query_task
,
81 .lldd_port_formed
= mvs_port_formed
,
82 .lldd_port_deformed
= mvs_port_deformed
,
86 static void __devinit
mvs_phy_init(struct mvs_info
*mvi
, int phy_id
)
88 struct mvs_phy
*phy
= &mvi
->phy
[phy_id
];
89 struct asd_sas_phy
*sas_phy
= &phy
->sas_phy
;
92 init_timer(&phy
->timer
);
93 sas_phy
->enabled
= (phy_id
< mvi
->chip
->n_phy
) ? 1 : 0;
95 sas_phy
->iproto
= SAS_PROTOCOL_ALL
;
97 sas_phy
->type
= PHY_TYPE_PHYSICAL
;
98 sas_phy
->role
= PHY_ROLE_INITIATOR
;
99 sas_phy
->oob_mode
= OOB_NOT_CONNECTED
;
100 sas_phy
->linkrate
= SAS_LINK_RATE_UNKNOWN
;
102 sas_phy
->id
= phy_id
;
103 sas_phy
->sas_addr
= &mvi
->sas_addr
[0];
104 sas_phy
->frame_rcvd
= &phy
->frame_rcvd
[0];
105 sas_phy
->ha
= (struct sas_ha_struct
*)mvi
->shost
->hostdata
;
106 sas_phy
->lldd_phy
= phy
;
109 static void mvs_free(struct mvs_info
*mvi
)
118 if (mvi
->flags
& MVF_FLAG_SOC
)
119 slot_nr
= MVS_SOC_SLOTS
;
123 for (i
= 0; i
< mvi
->tags_num
; i
++) {
124 struct mvs_slot_info
*slot
= &mvi
->slot_info
[i
];
126 dma_free_coherent(mvi
->dev
, MVS_SLOT_BUF_SZ
,
127 slot
->buf
, slot
->buf_dma
);
131 dma_free_coherent(mvi
->dev
,
132 sizeof(*mvi
->tx
) * MVS_CHIP_SLOT_SZ
,
133 mvi
->tx
, mvi
->tx_dma
);
135 dma_free_coherent(mvi
->dev
, MVS_RX_FISL_SZ
,
136 mvi
->rx_fis
, mvi
->rx_fis_dma
);
138 dma_free_coherent(mvi
->dev
,
139 sizeof(*mvi
->rx
) * (MVS_RX_RING_SZ
+ 1),
140 mvi
->rx
, mvi
->rx_dma
);
142 dma_free_coherent(mvi
->dev
,
143 sizeof(*mvi
->slot
) * slot_nr
,
144 mvi
->slot
, mvi
->slot_dma
);
145 #ifndef DISABLE_HOTPLUG_DMA_FIX
146 if (mvi
->bulk_buffer
)
147 dma_free_coherent(mvi
->dev
, TRASH_BUCKET_SIZE
,
148 mvi
->bulk_buffer
, mvi
->bulk_buffer_dma
);
151 MVS_CHIP_DISP
->chip_iounmap(mvi
);
153 scsi_host_put(mvi
->shost
);
154 list_for_each_entry(mwq
, &mvi
->wq_list
, entry
)
155 cancel_delayed_work(&mwq
->work_q
);
159 #ifdef MVS_USE_TASKLET
160 struct tasklet_struct mv_tasklet
;
161 static void mvs_tasklet(unsigned long opaque
)
167 struct mvs_info
*mvi
;
168 struct sas_ha_struct
*sha
= (struct sas_ha_struct
*)opaque
;
170 core_nr
= ((struct mvs_prv_info
*)sha
->lldd_ha
)->n_host
;
171 mvi
= ((struct mvs_prv_info
*)sha
->lldd_ha
)->mvi
[0];
176 for (i
= 0; i
< core_nr
; i
++) {
177 mvi
= ((struct mvs_prv_info
*)sha
->lldd_ha
)->mvi
[i
];
178 stat
= MVS_CHIP_DISP
->isr_status(mvi
, mvi
->irq
);
180 MVS_CHIP_DISP
->isr(mvi
, mvi
->irq
, stat
);
186 static irqreturn_t
mvs_interrupt(int irq
, void *opaque
)
190 struct mvs_info
*mvi
;
191 struct sas_ha_struct
*sha
= opaque
;
193 core_nr
= ((struct mvs_prv_info
*)sha
->lldd_ha
)->n_host
;
194 mvi
= ((struct mvs_prv_info
*)sha
->lldd_ha
)->mvi
[0];
199 stat
= MVS_CHIP_DISP
->isr_status(mvi
, irq
);
203 #ifdef MVS_USE_TASKLET
204 tasklet_schedule(&mv_tasklet
);
206 for (i
= 0; i
< core_nr
; i
++) {
207 mvi
= ((struct mvs_prv_info
*)sha
->lldd_ha
)->mvi
[i
];
208 MVS_CHIP_DISP
->isr(mvi
, irq
, stat
);
214 static int __devinit
mvs_alloc(struct mvs_info
*mvi
, struct Scsi_Host
*shost
)
218 if (mvi
->flags
& MVF_FLAG_SOC
)
219 slot_nr
= MVS_SOC_SLOTS
;
223 spin_lock_init(&mvi
->lock
);
224 for (i
= 0; i
< mvi
->chip
->n_phy
; i
++) {
225 mvs_phy_init(mvi
, i
);
226 mvi
->port
[i
].wide_port_phymap
= 0;
227 mvi
->port
[i
].port_attached
= 0;
228 INIT_LIST_HEAD(&mvi
->port
[i
].list
);
230 for (i
= 0; i
< MVS_MAX_DEVICES
; i
++) {
231 mvi
->devices
[i
].taskfileset
= MVS_ID_NOT_MAPPED
;
232 mvi
->devices
[i
].dev_type
= NO_DEVICE
;
233 mvi
->devices
[i
].device_id
= i
;
234 mvi
->devices
[i
].dev_status
= MVS_DEV_NORMAL
;
238 * alloc and init our DMA areas
240 mvi
->tx
= dma_alloc_coherent(mvi
->dev
,
241 sizeof(*mvi
->tx
) * MVS_CHIP_SLOT_SZ
,
242 &mvi
->tx_dma
, GFP_KERNEL
);
245 memset(mvi
->tx
, 0, sizeof(*mvi
->tx
) * MVS_CHIP_SLOT_SZ
);
246 mvi
->rx_fis
= dma_alloc_coherent(mvi
->dev
, MVS_RX_FISL_SZ
,
247 &mvi
->rx_fis_dma
, GFP_KERNEL
);
250 memset(mvi
->rx_fis
, 0, MVS_RX_FISL_SZ
);
252 mvi
->rx
= dma_alloc_coherent(mvi
->dev
,
253 sizeof(*mvi
->rx
) * (MVS_RX_RING_SZ
+ 1),
254 &mvi
->rx_dma
, GFP_KERNEL
);
257 memset(mvi
->rx
, 0, sizeof(*mvi
->rx
) * (MVS_RX_RING_SZ
+ 1));
258 mvi
->rx
[0] = cpu_to_le32(0xfff);
259 mvi
->rx_cons
= 0xfff;
261 mvi
->slot
= dma_alloc_coherent(mvi
->dev
,
262 sizeof(*mvi
->slot
) * slot_nr
,
263 &mvi
->slot_dma
, GFP_KERNEL
);
266 memset(mvi
->slot
, 0, sizeof(*mvi
->slot
) * slot_nr
);
268 #ifndef DISABLE_HOTPLUG_DMA_FIX
269 mvi
->bulk_buffer
= dma_alloc_coherent(mvi
->dev
,
271 &mvi
->bulk_buffer_dma
, GFP_KERNEL
);
272 if (!mvi
->bulk_buffer
)
275 for (i
= 0; i
< slot_nr
; i
++) {
276 struct mvs_slot_info
*slot
= &mvi
->slot_info
[i
];
278 slot
->buf
= dma_alloc_coherent(mvi
->dev
, MVS_SLOT_BUF_SZ
,
279 &slot
->buf_dma
, GFP_KERNEL
);
281 printk(KERN_DEBUG
"failed to allocate slot->buf.\n");
284 memset(slot
->buf
, 0, MVS_SLOT_BUF_SZ
);
287 /* Initialize tags */
295 int mvs_ioremap(struct mvs_info
*mvi
, int bar
, int bar_ex
)
297 unsigned long res_start
, res_len
, res_flag
, res_flag_ex
= 0;
298 struct pci_dev
*pdev
= mvi
->pdev
;
301 * ioremap main and peripheral registers
303 res_start
= pci_resource_start(pdev
, bar_ex
);
304 res_len
= pci_resource_len(pdev
, bar_ex
);
305 if (!res_start
|| !res_len
)
308 res_flag_ex
= pci_resource_flags(pdev
, bar_ex
);
309 if (res_flag_ex
& IORESOURCE_MEM
) {
310 if (res_flag_ex
& IORESOURCE_CACHEABLE
)
311 mvi
->regs_ex
= ioremap(res_start
, res_len
);
313 mvi
->regs_ex
= ioremap_nocache(res_start
,
316 mvi
->regs_ex
= (void *)res_start
;
321 res_start
= pci_resource_start(pdev
, bar
);
322 res_len
= pci_resource_len(pdev
, bar
);
323 if (!res_start
|| !res_len
)
326 res_flag
= pci_resource_flags(pdev
, bar
);
327 if (res_flag
& IORESOURCE_CACHEABLE
)
328 mvi
->regs
= ioremap(res_start
, res_len
);
330 mvi
->regs
= ioremap_nocache(res_start
, res_len
);
333 if (mvi
->regs_ex
&& (res_flag_ex
& IORESOURCE_MEM
))
334 iounmap(mvi
->regs_ex
);
344 void mvs_iounmap(void __iomem
*regs
)
349 static struct mvs_info
*__devinit
mvs_pci_alloc(struct pci_dev
*pdev
,
350 const struct pci_device_id
*ent
,
351 struct Scsi_Host
*shost
, unsigned int id
)
353 struct mvs_info
*mvi
;
354 struct sas_ha_struct
*sha
= SHOST_TO_SAS_HA(shost
);
356 mvi
= kzalloc(sizeof(*mvi
) + MVS_SLOTS
* sizeof(struct mvs_slot_info
),
362 mvi
->dev
= &pdev
->dev
;
363 mvi
->chip_id
= ent
->driver_data
;
364 mvi
->chip
= &mvs_chips
[mvi
->chip_id
];
365 INIT_LIST_HEAD(&mvi
->wq_list
);
366 mvi
->irq
= pdev
->irq
;
368 ((struct mvs_prv_info
*)sha
->lldd_ha
)->mvi
[id
] = mvi
;
369 ((struct mvs_prv_info
*)sha
->lldd_ha
)->n_phy
= mvi
->chip
->n_phy
;
374 #ifdef MVS_USE_TASKLET
375 tasklet_init(&mv_tasklet
, mvs_tasklet
, (unsigned long)sha
);
378 if (MVS_CHIP_DISP
->chip_ioremap(mvi
))
380 if (!mvs_alloc(mvi
, shost
))
387 /* move to PCI layer or libata core? */
388 static int pci_go_64(struct pci_dev
*pdev
)
392 if (!pci_set_dma_mask(pdev
, DMA_BIT_MASK(64))) {
393 rc
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64));
395 rc
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(32));
397 dev_printk(KERN_ERR
, &pdev
->dev
,
398 "64-bit DMA enable failed\n");
403 rc
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
405 dev_printk(KERN_ERR
, &pdev
->dev
,
406 "32-bit DMA enable failed\n");
409 rc
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(32));
411 dev_printk(KERN_ERR
, &pdev
->dev
,
412 "32-bit consistent DMA enable failed\n");
420 static int __devinit
mvs_prep_sas_ha_init(struct Scsi_Host
*shost
,
421 const struct mvs_chip_info
*chip_info
)
423 int phy_nr
, port_nr
; unsigned short core_nr
;
424 struct asd_sas_phy
**arr_phy
;
425 struct asd_sas_port
**arr_port
;
426 struct sas_ha_struct
*sha
= SHOST_TO_SAS_HA(shost
);
428 core_nr
= chip_info
->n_host
;
429 phy_nr
= core_nr
* chip_info
->n_phy
;
432 memset(sha
, 0x00, sizeof(struct sas_ha_struct
));
433 arr_phy
= kcalloc(phy_nr
, sizeof(void *), GFP_KERNEL
);
434 arr_port
= kcalloc(port_nr
, sizeof(void *), GFP_KERNEL
);
435 if (!arr_phy
|| !arr_port
)
438 sha
->sas_phy
= arr_phy
;
439 sha
->sas_port
= arr_port
;
441 sha
->lldd_ha
= kzalloc(sizeof(struct mvs_prv_info
), GFP_KERNEL
);
445 ((struct mvs_prv_info
*)sha
->lldd_ha
)->n_host
= core_nr
;
447 shost
->transportt
= mvs_stt
;
450 shost
->max_channel
= 1;
451 shost
->max_cmd_len
= 16;
461 static void __devinit
mvs_post_sas_ha_init(struct Scsi_Host
*shost
,
462 const struct mvs_chip_info
*chip_info
)
464 int can_queue
, i
= 0, j
= 0;
465 struct mvs_info
*mvi
= NULL
;
466 struct sas_ha_struct
*sha
= SHOST_TO_SAS_HA(shost
);
467 unsigned short nr_core
= ((struct mvs_prv_info
*)sha
->lldd_ha
)->n_host
;
469 for (j
= 0; j
< nr_core
; j
++) {
470 mvi
= ((struct mvs_prv_info
*)sha
->lldd_ha
)->mvi
[j
];
471 for (i
= 0; i
< chip_info
->n_phy
; i
++) {
472 sha
->sas_phy
[j
* chip_info
->n_phy
+ i
] =
473 &mvi
->phy
[i
].sas_phy
;
474 sha
->sas_port
[j
* chip_info
->n_phy
+ i
] =
475 &mvi
->port
[i
].sas_port
;
479 sha
->sas_ha_name
= DRV_NAME
;
481 sha
->lldd_module
= THIS_MODULE
;
482 sha
->sas_addr
= &mvi
->sas_addr
[0];
484 sha
->num_phys
= nr_core
* chip_info
->n_phy
;
486 sha
->lldd_max_execute_num
= 1;
488 if (mvi
->flags
& MVF_FLAG_SOC
)
489 can_queue
= MVS_SOC_CAN_QUEUE
;
491 can_queue
= MVS_CAN_QUEUE
;
493 sha
->lldd_queue_size
= can_queue
;
494 shost
->can_queue
= can_queue
;
495 mvi
->shost
->cmd_per_lun
= MVS_SLOTS
/sha
->num_phys
;
496 sha
->core
.shost
= mvi
->shost
;
499 static void mvs_init_sas_add(struct mvs_info
*mvi
)
502 for (i
= 0; i
< mvi
->chip
->n_phy
; i
++) {
503 mvi
->phy
[i
].dev_sas_addr
= 0x5005043011ab0000ULL
;
504 mvi
->phy
[i
].dev_sas_addr
=
505 cpu_to_be64((u64
)(*(u64
*)&mvi
->phy
[i
].dev_sas_addr
));
508 memcpy(mvi
->sas_addr
, &mvi
->phy
[0].dev_sas_addr
, SAS_ADDR_SIZE
);
511 static int __devinit
mvs_pci_init(struct pci_dev
*pdev
,
512 const struct pci_device_id
*ent
)
514 unsigned int rc
, nhost
= 0;
515 struct mvs_info
*mvi
;
516 irq_handler_t irq_handler
= mvs_interrupt
;
517 struct Scsi_Host
*shost
= NULL
;
518 const struct mvs_chip_info
*chip
;
520 dev_printk(KERN_INFO
, &pdev
->dev
,
521 "mvsas: driver version %s\n", DRV_VERSION
);
522 rc
= pci_enable_device(pdev
);
526 pci_set_master(pdev
);
528 rc
= pci_request_regions(pdev
, DRV_NAME
);
530 goto err_out_disable
;
532 rc
= pci_go_64(pdev
);
534 goto err_out_regions
;
536 shost
= scsi_host_alloc(&mvs_sht
, sizeof(void *));
539 goto err_out_regions
;
542 chip
= &mvs_chips
[ent
->driver_data
];
543 SHOST_TO_SAS_HA(shost
) =
544 kcalloc(1, sizeof(struct sas_ha_struct
), GFP_KERNEL
);
545 if (!SHOST_TO_SAS_HA(shost
)) {
548 goto err_out_regions
;
551 rc
= mvs_prep_sas_ha_init(shost
, chip
);
555 goto err_out_regions
;
558 pci_set_drvdata(pdev
, SHOST_TO_SAS_HA(shost
));
561 mvi
= mvs_pci_alloc(pdev
, ent
, shost
, nhost
);
564 goto err_out_regions
;
567 mvs_init_sas_add(mvi
);
569 mvi
->instance
= nhost
;
570 rc
= MVS_CHIP_DISP
->chip_init(mvi
);
573 goto err_out_regions
;
576 } while (nhost
< chip
->n_host
);
578 mvs_post_sas_ha_init(shost
, chip
);
580 rc
= scsi_add_host(shost
, &pdev
->dev
);
584 rc
= sas_register_ha(SHOST_TO_SAS_HA(shost
));
587 rc
= request_irq(pdev
->irq
, irq_handler
, IRQF_SHARED
,
588 DRV_NAME
, SHOST_TO_SAS_HA(shost
));
592 MVS_CHIP_DISP
->interrupt_enable(mvi
);
594 scsi_scan_host(mvi
->shost
);
599 sas_unregister_ha(SHOST_TO_SAS_HA(shost
));
601 scsi_remove_host(mvi
->shost
);
603 pci_release_regions(pdev
);
605 pci_disable_device(pdev
);
610 static void __devexit
mvs_pci_remove(struct pci_dev
*pdev
)
612 unsigned short core_nr
, i
= 0;
613 struct sas_ha_struct
*sha
= pci_get_drvdata(pdev
);
614 struct mvs_info
*mvi
= NULL
;
616 core_nr
= ((struct mvs_prv_info
*)sha
->lldd_ha
)->n_host
;
617 mvi
= ((struct mvs_prv_info
*)sha
->lldd_ha
)->mvi
[0];
619 #ifdef MVS_USE_TASKLET
620 tasklet_kill(&mv_tasklet
);
623 pci_set_drvdata(pdev
, NULL
);
624 sas_unregister_ha(sha
);
625 sas_remove_host(mvi
->shost
);
626 scsi_remove_host(mvi
->shost
);
628 MVS_CHIP_DISP
->interrupt_disable(mvi
);
629 free_irq(mvi
->irq
, sha
);
630 for (i
= 0; i
< core_nr
; i
++) {
631 mvi
= ((struct mvs_prv_info
*)sha
->lldd_ha
)->mvi
[i
];
635 kfree(sha
->sas_port
);
637 pci_release_regions(pdev
);
638 pci_disable_device(pdev
);
642 static struct pci_device_id __devinitdata mvs_pci_table
[] = {
643 { PCI_VDEVICE(MARVELL
, 0x6320), chip_6320
},
644 { PCI_VDEVICE(MARVELL
, 0x6340), chip_6440
},
646 .vendor
= PCI_VENDOR_ID_MARVELL
,
648 .subvendor
= PCI_ANY_ID
,
652 .driver_data
= chip_6485
,
654 { PCI_VDEVICE(MARVELL
, 0x6440), chip_6440
},
655 { PCI_VDEVICE(MARVELL
, 0x6485), chip_6485
},
656 { PCI_VDEVICE(MARVELL
, 0x9480), chip_9480
},
657 { PCI_VDEVICE(MARVELL
, 0x9180), chip_9180
},
658 { PCI_VDEVICE(ARECA
, PCI_DEVICE_ID_ARECA_1300
), chip_1300
},
659 { PCI_VDEVICE(ARECA
, PCI_DEVICE_ID_ARECA_1320
), chip_1320
},
660 { PCI_VDEVICE(ADAPTEC2
, 0x0450), chip_6440
},
662 { } /* terminate list */
665 static struct pci_driver mvs_pci_driver
= {
667 .id_table
= mvs_pci_table
,
668 .probe
= mvs_pci_init
,
669 .remove
= __devexit_p(mvs_pci_remove
),
673 struct task_struct
*mvs_th
;
674 static int __init
mvs_init(void)
677 mvs_stt
= sas_domain_attach_transport(&mvs_transport_ops
);
681 rc
= pci_register_driver(&mvs_pci_driver
);
689 sas_release_transport(mvs_stt
);
693 static void __exit
mvs_exit(void)
695 pci_unregister_driver(&mvs_pci_driver
);
696 sas_release_transport(mvs_stt
);
699 module_init(mvs_init
);
700 module_exit(mvs_exit
);
702 MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
703 MODULE_DESCRIPTION("Marvell 88SE6440 SAS/SATA controller driver");
704 MODULE_VERSION(DRV_VERSION
);
705 MODULE_LICENSE("GPL");
707 MODULE_DEVICE_TABLE(pci
, mvs_pci_table
);