2 * sata_mv.c - Marvell SATA support
4 * Copyright 2005: EMC Corporation, all rights reserved.
6 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; version 2 of the License.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #include <linux/kernel.h>
24 #include <linux/module.h>
25 #include <linux/pci.h>
26 #include <linux/init.h>
27 #include <linux/blkdev.h>
28 #include <linux/delay.h>
29 #include <linux/interrupt.h>
30 #include <linux/sched.h>
31 #include <linux/dma-mapping.h>
33 #include <scsi/scsi_host.h>
34 #include <linux/libata.h>
37 #define DRV_NAME "sata_mv"
38 #define DRV_VERSION "0.12"
41 /* BAR's are enumerated in terms of pci_resource_start() terms */
42 MV_PRIMARY_BAR
= 0, /* offset 0x10: memory space */
43 MV_IO_BAR
= 2, /* offset 0x18: IO space */
44 MV_MISC_BAR
= 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
46 MV_MAJOR_REG_AREA_SZ
= 0x10000, /* 64KB */
47 MV_MINOR_REG_AREA_SZ
= 0x2000, /* 8KB */
50 MV_IRQ_COAL_REG_BASE
= 0x18000, /* 6xxx part only */
51 MV_SATAHC0_REG_BASE
= 0x20000,
53 MV_PCI_REG_SZ
= MV_MAJOR_REG_AREA_SZ
,
54 MV_SATAHC_REG_SZ
= MV_MAJOR_REG_AREA_SZ
,
55 MV_SATAHC_ARBTR_REG_SZ
= MV_MINOR_REG_AREA_SZ
, /* arbiter */
56 MV_PORT_REG_SZ
= MV_MINOR_REG_AREA_SZ
,
62 MV_DMA_BOUNDARY
= 0xffffffffU
,
63 SATAHC_MASK
= (~(MV_SATAHC_REG_SZ
- 1)),
66 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
68 /* == (port % MV_PORTS_PER_HC) to determine port from 0-7 port */
72 MV_FLAG_DUAL_HC
= (1 << 30), /* two SATA Host Controllers */
73 MV_FLAG_IRQ_COALESCE
= (1 << 29), /* IRQ coalescing capability */
74 MV_FLAG_BDMA
= (1 << 28), /* Basic DMA */
81 /* PCI interface registers */
83 PCI_MAIN_CMD_STS_OFS
= 0xd30,
84 STOP_PCI_MASTER
= (1 << 2),
85 PCI_MASTER_EMPTY
= (1 << 3),
86 GLOB_SFT_RST
= (1 << 4),
88 PCI_IRQ_CAUSE_OFS
= 0x1d58,
89 PCI_IRQ_MASK_OFS
= 0x1d5c,
90 PCI_UNMASK_ALL_IRQS
= 0x7fffff, /* bits 22-0 */
92 HC_MAIN_IRQ_CAUSE_OFS
= 0x1d60,
93 HC_MAIN_IRQ_MASK_OFS
= 0x1d64,
94 PORT0_ERR
= (1 << 0), /* shift by port # */
95 PORT0_DONE
= (1 << 1), /* shift by port # */
96 HC0_IRQ_PEND
= 0x1ff, /* bits 0-8 = HC0's ports */
97 HC_SHIFT
= 9, /* bits 9-17 = HC1's ports */
99 TRAN_LO_DONE
= (1 << 19), /* 6xxx: IRQ coalescing */
100 TRAN_HI_DONE
= (1 << 20), /* 6xxx: IRQ coalescing */
101 PORTS_0_7_COAL_DONE
= (1 << 21), /* 6xxx: IRQ coalescing */
102 GPIO_INT
= (1 << 22),
103 SELF_INT
= (1 << 23),
104 TWSI_INT
= (1 << 24),
105 HC_MAIN_RSVD
= (0x7f << 25), /* bits 31-25 */
106 HC_MAIN_MASKED_IRQS
= (TRAN_LO_DONE
| TRAN_HI_DONE
|
107 PORTS_0_7_COAL_DONE
| GPIO_INT
| TWSI_INT
|
110 /* SATAHC registers */
113 HC_IRQ_CAUSE_OFS
= 0x14,
114 CRBP_DMA_DONE
= (1 << 0), /* shift by port # */
115 HC_IRQ_COAL
= (1 << 4), /* IRQ coalescing */
116 DEV_IRQ
= (1 << 8), /* shift by port # */
118 /* Shadow block registers */
119 SHD_PIO_DATA_OFS
= 0x100,
120 SHD_FEA_ERR_OFS
= 0x104,
121 SHD_SECT_CNT_OFS
= 0x108,
122 SHD_LBA_L_OFS
= 0x10C,
123 SHD_LBA_M_OFS
= 0x110,
124 SHD_LBA_H_OFS
= 0x114,
125 SHD_DEV_HD_OFS
= 0x118,
126 SHD_CMD_STA_OFS
= 0x11C,
127 SHD_CTL_AST_OFS
= 0x120,
130 SATA_STATUS_OFS
= 0x300, /* ctrl, err regs follow status */
131 SATA_ACTIVE_OFS
= 0x350,
136 EDMA_ERR_IRQ_CAUSE_OFS
= 0x8,
137 EDMA_ERR_IRQ_MASK_OFS
= 0xc,
138 EDMA_ERR_D_PAR
= (1 << 0),
139 EDMA_ERR_PRD_PAR
= (1 << 1),
140 EDMA_ERR_DEV
= (1 << 2),
141 EDMA_ERR_DEV_DCON
= (1 << 3),
142 EDMA_ERR_DEV_CON
= (1 << 4),
143 EDMA_ERR_SERR
= (1 << 5),
144 EDMA_ERR_SELF_DIS
= (1 << 7),
145 EDMA_ERR_BIST_ASYNC
= (1 << 8),
146 EDMA_ERR_CRBQ_PAR
= (1 << 9),
147 EDMA_ERR_CRPB_PAR
= (1 << 10),
148 EDMA_ERR_INTRL_PAR
= (1 << 11),
149 EDMA_ERR_IORDY
= (1 << 12),
150 EDMA_ERR_LNK_CTRL_RX
= (0xf << 13),
151 EDMA_ERR_LNK_CTRL_RX_2
= (1 << 15),
152 EDMA_ERR_LNK_DATA_RX
= (0xf << 17),
153 EDMA_ERR_LNK_CTRL_TX
= (0x1f << 21),
154 EDMA_ERR_LNK_DATA_TX
= (0x1f << 26),
155 EDMA_ERR_TRANS_PROTO
= (1 << 31),
156 EDMA_ERR_FATAL
= (EDMA_ERR_D_PAR
| EDMA_ERR_PRD_PAR
|
157 EDMA_ERR_DEV_DCON
| EDMA_ERR_CRBQ_PAR
|
158 EDMA_ERR_CRPB_PAR
| EDMA_ERR_INTRL_PAR
|
159 EDMA_ERR_IORDY
| EDMA_ERR_LNK_CTRL_RX_2
|
160 EDMA_ERR_LNK_DATA_RX
|
161 EDMA_ERR_LNK_DATA_TX
|
162 EDMA_ERR_TRANS_PROTO
),
169 /* BDMA is 6xxx part only */
170 BDMA_CMD_OFS
= 0x224,
171 BDMA_START
= (1 << 0),
176 struct mv_port_priv
{
180 struct mv_host_priv
{
184 static void mv_irq_clear(struct ata_port
*ap
);
185 static u32
mv_scr_read(struct ata_port
*ap
, unsigned int sc_reg_in
);
186 static void mv_scr_write(struct ata_port
*ap
, unsigned int sc_reg_in
, u32 val
);
187 static void mv_phy_reset(struct ata_port
*ap
);
188 static int mv_master_reset(void __iomem
*mmio_base
);
189 static irqreturn_t
mv_interrupt(int irq
, void *dev_instance
,
190 struct pt_regs
*regs
);
191 static int mv_init_one(struct pci_dev
*pdev
, const struct pci_device_id
*ent
);
193 static Scsi_Host_Template mv_sht
= {
194 .module
= THIS_MODULE
,
196 .ioctl
= ata_scsi_ioctl
,
197 .queuecommand
= ata_scsi_queuecmd
,
198 .eh_strategy_handler
= ata_scsi_error
,
199 .can_queue
= ATA_DEF_QUEUE
,
200 .this_id
= ATA_SHT_THIS_ID
,
201 .sg_tablesize
= MV_UNDEF
,
202 .max_sectors
= ATA_MAX_SECTORS
,
203 .cmd_per_lun
= ATA_SHT_CMD_PER_LUN
,
204 .emulated
= ATA_SHT_EMULATED
,
205 .use_clustering
= MV_UNDEF
,
206 .proc_name
= DRV_NAME
,
207 .dma_boundary
= MV_DMA_BOUNDARY
,
208 .slave_configure
= ata_scsi_slave_config
,
209 .bios_param
= ata_std_bios_param
,
213 static struct ata_port_operations mv_ops
= {
214 .port_disable
= ata_port_disable
,
216 .tf_load
= ata_tf_load
,
217 .tf_read
= ata_tf_read
,
218 .check_status
= ata_check_status
,
219 .exec_command
= ata_exec_command
,
220 .dev_select
= ata_std_dev_select
,
222 .phy_reset
= mv_phy_reset
,
224 .qc_prep
= ata_qc_prep
,
225 .qc_issue
= ata_qc_issue_prot
,
227 .eng_timeout
= ata_eng_timeout
,
229 .irq_handler
= mv_interrupt
,
230 .irq_clear
= mv_irq_clear
,
232 .scr_read
= mv_scr_read
,
233 .scr_write
= mv_scr_write
,
235 .port_start
= ata_port_start
,
236 .port_stop
= ata_port_stop
,
237 .host_stop
= ata_host_stop
,
240 static struct ata_port_info mv_port_info
[] = {
243 .host_flags
= (ATA_FLAG_SATA
| ATA_FLAG_NO_LEGACY
|
244 ATA_FLAG_SATA_RESET
| ATA_FLAG_MMIO
),
245 .pio_mask
= 0x1f, /* pio4-0 */
246 .udma_mask
= 0, /* 0x7f (udma6-0 disabled for now) */
251 .host_flags
= (ATA_FLAG_SATA
| ATA_FLAG_NO_LEGACY
|
252 ATA_FLAG_SATA_RESET
| ATA_FLAG_MMIO
|
254 .pio_mask
= 0x1f, /* pio4-0 */
255 .udma_mask
= 0, /* 0x7f (udma6-0 disabled for now) */
260 .host_flags
= (ATA_FLAG_SATA
| ATA_FLAG_NO_LEGACY
|
261 ATA_FLAG_SATA_RESET
| ATA_FLAG_MMIO
|
262 MV_FLAG_IRQ_COALESCE
| MV_FLAG_BDMA
),
263 .pio_mask
= 0x1f, /* pio4-0 */
264 .udma_mask
= 0, /* 0x7f (udma6-0 disabled for now) */
269 .host_flags
= (ATA_FLAG_SATA
| ATA_FLAG_NO_LEGACY
|
270 ATA_FLAG_SATA_RESET
| ATA_FLAG_MMIO
|
271 MV_FLAG_IRQ_COALESCE
| MV_FLAG_DUAL_HC
|
273 .pio_mask
= 0x1f, /* pio4-0 */
274 .udma_mask
= 0, /* 0x7f (udma6-0 disabled for now) */
279 static struct pci_device_id mv_pci_tbl
[] = {
280 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL
, 0x5040), 0, 0, chip_504x
},
281 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL
, 0x5041), 0, 0, chip_504x
},
282 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL
, 0x5080), 0, 0, chip_508x
},
283 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL
, 0x5081), 0, 0, chip_508x
},
285 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL
, 0x6040), 0, 0, chip_604x
},
286 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL
, 0x6041), 0, 0, chip_604x
},
287 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL
, 0x6080), 0, 0, chip_608x
},
288 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL
, 0x6081), 0, 0, chip_608x
},
289 {} /* terminate list */
292 static struct pci_driver mv_pci_driver
= {
294 .id_table
= mv_pci_tbl
,
295 .probe
= mv_init_one
,
296 .remove
= ata_pci_remove_one
,
303 static inline void writelfl(unsigned long data
, void __iomem
*addr
)
306 (void) readl(addr
); /* flush to avoid PCI posted write */
309 static inline void __iomem
*mv_port_addr_to_hc_base(void __iomem
*port_mmio
)
311 return ((void __iomem
*)((unsigned long)port_mmio
&
312 (unsigned long)SATAHC_MASK
));
315 static inline void __iomem
*mv_hc_base(void __iomem
*base
, unsigned int hc
)
317 return (base
+ MV_SATAHC0_REG_BASE
+ (hc
* MV_SATAHC_REG_SZ
));
320 static inline void __iomem
*mv_port_base(void __iomem
*base
, unsigned int port
)
322 return (mv_hc_base(base
, port
>> MV_PORT_HC_SHIFT
) +
323 MV_SATAHC_ARBTR_REG_SZ
+
324 ((port
& MV_PORT_MASK
) * MV_PORT_REG_SZ
));
327 static inline void __iomem
*mv_ap_base(struct ata_port
*ap
)
329 return mv_port_base(ap
->host_set
->mmio_base
, ap
->port_no
);
332 static inline int mv_get_hc_count(unsigned long flags
)
334 return ((flags
& MV_FLAG_DUAL_HC
) ? 2 : 1);
337 static inline int mv_is_edma_active(struct ata_port
*ap
)
339 void __iomem
*port_mmio
= mv_ap_base(ap
);
340 return (EDMA_EN
& readl(port_mmio
+ EDMA_CMD_OFS
));
343 static inline int mv_port_bdma_capable(struct ata_port
*ap
)
345 return (ap
->flags
& MV_FLAG_BDMA
);
348 static void mv_irq_clear(struct ata_port
*ap
)
352 static unsigned int mv_scr_offset(unsigned int sc_reg_in
)
360 ofs
= SATA_STATUS_OFS
+ (sc_reg_in
* sizeof(u32
));
363 ofs
= SATA_ACTIVE_OFS
; /* active is not with the others */
372 static u32
mv_scr_read(struct ata_port
*ap
, unsigned int sc_reg_in
)
374 unsigned int ofs
= mv_scr_offset(sc_reg_in
);
376 if (0xffffffffU
!= ofs
) {
377 return readl(mv_ap_base(ap
) + ofs
);
383 static void mv_scr_write(struct ata_port
*ap
, unsigned int sc_reg_in
, u32 val
)
385 unsigned int ofs
= mv_scr_offset(sc_reg_in
);
387 if (0xffffffffU
!= ofs
) {
388 writelfl(val
, mv_ap_base(ap
) + ofs
);
392 static int mv_master_reset(void __iomem
*mmio_base
)
394 void __iomem
*reg
= mmio_base
+ PCI_MAIN_CMD_STS_OFS
;
400 /* Following procedure defined in PCI "main command and status
404 writel(t
| STOP_PCI_MASTER
, reg
);
406 for (i
= 0; i
< 100; i
++) {
409 if (PCI_MASTER_EMPTY
& t
) {
413 if (!(PCI_MASTER_EMPTY
& t
)) {
414 printk(KERN_ERR DRV_NAME
"PCI master won't flush\n");
415 rc
= 1; /* broken HW? */
422 writel(t
| GLOB_SFT_RST
, reg
);
425 } while (!(GLOB_SFT_RST
& t
) && (i
-- > 0));
427 if (!(GLOB_SFT_RST
& t
)) {
428 printk(KERN_ERR DRV_NAME
"can't set global reset\n");
429 rc
= 1; /* broken HW? */
436 writel(t
& ~GLOB_SFT_RST
, reg
);
439 } while ((GLOB_SFT_RST
& t
) && (i
-- > 0));
441 if (GLOB_SFT_RST
& t
) {
442 printk(KERN_ERR DRV_NAME
"can't clear global reset\n");
443 rc
= 1; /* broken HW? */
447 VPRINTK("EXIT, rc = %i\n", rc
);
451 static void mv_err_intr(struct ata_port
*ap
)
453 void __iomem
*port_mmio
;
454 u32 edma_err_cause
, serr
= 0;
456 /* bug here b/c we got an err int on a port we don't know about,
457 * so there's no way to clear it
460 port_mmio
= mv_ap_base(ap
);
462 edma_err_cause
= readl(port_mmio
+ EDMA_ERR_IRQ_CAUSE_OFS
);
464 if (EDMA_ERR_SERR
& edma_err_cause
) {
465 serr
= scr_read(ap
, SCR_ERROR
);
466 scr_write_flush(ap
, SCR_ERROR
, serr
);
468 DPRINTK("port %u error; EDMA err cause: 0x%08x SERR: 0x%08x\n",
469 ap
->port_no
, edma_err_cause
, serr
);
471 /* Clear EDMA now that SERR cleanup done */
472 writelfl(0, port_mmio
+ EDMA_ERR_IRQ_CAUSE_OFS
);
474 /* check for fatal here and recover if needed */
475 if (EDMA_ERR_FATAL
& edma_err_cause
) {
480 /* Handle any outstanding interrupts in a single SATAHC
482 static void mv_host_intr(struct ata_host_set
*host_set
, u32 relevant
,
485 void __iomem
*mmio
= host_set
->mmio_base
;
486 void __iomem
*hc_mmio
= mv_hc_base(mmio
, hc
);
488 struct ata_queued_cmd
*qc
;
490 int shift
, port
, port0
, hard_port
;
496 port0
= MV_PORTS_PER_HC
;
499 /* we'll need the HC success int register in most cases */
500 hc_irq_cause
= readl(hc_mmio
+ HC_IRQ_CAUSE_OFS
);
502 writelfl(0, hc_mmio
+ HC_IRQ_CAUSE_OFS
);
505 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
506 hc
,relevant
,hc_irq_cause
);
508 for (port
= port0
; port
< port0
+ MV_PORTS_PER_HC
; port
++) {
509 ap
= host_set
->ports
[port
];
510 hard_port
= port
& MV_PORT_MASK
; /* range 0-3 */
513 if (((CRBP_DMA_DONE
| DEV_IRQ
) << hard_port
) & hc_irq_cause
) {
515 /* rcv'd new resp, basic DMA complete, or ATA IRQ */
516 /* This is needed to clear the ATA INTRQ.
517 * FIXME: don't read the status reg in EDMA mode!
519 ata_status
= readb((void __iomem
*)
520 ap
->ioaddr
.status_addr
);
524 if (port
>= MV_PORTS_PER_HC
) {
525 shift
++; /* skip bit 8 in the HC Main IRQ reg */
527 if ((PORT0_ERR
<< shift
) & relevant
) {
529 /* FIXME: smart to OR in ATA_ERR? */
530 ata_status
= readb((void __iomem
*)
531 ap
->ioaddr
.status_addr
) | ATA_ERR
;
535 qc
= ata_qc_from_tag(ap
, ap
->active_tag
);
537 VPRINTK("port %u IRQ found for qc, "
538 "ata_status 0x%x\n", port
,ata_status
);
539 BUG_ON(0xffU
== ata_status
);
540 /* mark qc status appropriately */
541 ata_qc_complete(qc
, ata_status
);
548 static irqreturn_t
mv_interrupt(int irq
, void *dev_instance
,
549 struct pt_regs
*regs
)
551 struct ata_host_set
*host_set
= dev_instance
;
552 unsigned int hc
, handled
= 0, n_hcs
;
556 mmio
= host_set
->mmio_base
;
557 irq_stat
= readl(mmio
+ HC_MAIN_IRQ_CAUSE_OFS
);
558 n_hcs
= mv_get_hc_count(host_set
->ports
[0]->flags
);
560 /* check the cases where we either have nothing pending or have read
561 * a bogus register value which can indicate HW removal or PCI fault
563 if (!irq_stat
|| (0xffffffffU
== irq_stat
)) {
567 spin_lock(&host_set
->lock
);
569 for (hc
= 0; hc
< n_hcs
; hc
++) {
570 u32 relevant
= irq_stat
& (HC0_IRQ_PEND
<< (hc
* HC_SHIFT
));
572 mv_host_intr(host_set
, relevant
, hc
);
576 if (PCI_ERR
& irq_stat
) {
577 /* FIXME: these are all masked by default, but still need
578 * to recover from them properly.
582 spin_unlock(&host_set
->lock
);
584 return IRQ_RETVAL(handled
);
587 static void mv_phy_reset(struct ata_port
*ap
)
589 void __iomem
*port_mmio
= mv_ap_base(ap
);
590 struct ata_taskfile tf
;
591 struct ata_device
*dev
= &ap
->device
[0];
594 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap
->port_no
, port_mmio
);
596 edma
= readl(port_mmio
+ EDMA_CMD_OFS
);
597 if (EDMA_EN
& edma
) {
598 /* disable EDMA if active */
600 writelfl(edma
| EDMA_DS
, port_mmio
+ EDMA_CMD_OFS
);
602 } else if (mv_port_bdma_capable(ap
) &&
603 (bdma
= readl(port_mmio
+ BDMA_CMD_OFS
)) & BDMA_START
) {
604 /* disable BDMA if active */
605 writelfl(bdma
& ~BDMA_START
, port_mmio
+ BDMA_CMD_OFS
);
608 writelfl(edma
| ATA_RST
, port_mmio
+ EDMA_CMD_OFS
);
609 udelay(25); /* allow reset propagation */
611 /* Spec never mentions clearing the bit. Marvell's driver does
612 * clear the bit, however.
614 writelfl(edma
& ~ATA_RST
, port_mmio
+ EDMA_CMD_OFS
);
616 VPRINTK("Done. Now calling __sata_phy_reset()\n");
618 /* proceed to init communications via the scr_control reg */
619 __sata_phy_reset(ap
);
621 if (ap
->flags
& ATA_FLAG_PORT_DISABLED
) {
622 VPRINTK("Port disabled pre-sig. Exiting.\n");
626 tf
.lbah
= readb((void __iomem
*) ap
->ioaddr
.lbah_addr
);
627 tf
.lbam
= readb((void __iomem
*) ap
->ioaddr
.lbam_addr
);
628 tf
.lbal
= readb((void __iomem
*) ap
->ioaddr
.lbal_addr
);
629 tf
.nsect
= readb((void __iomem
*) ap
->ioaddr
.nsect_addr
);
631 dev
->class = ata_dev_classify(&tf
);
632 if (!ata_dev_present(dev
)) {
633 VPRINTK("Port disabled post-sig: No device present.\n");
634 ata_port_disable(ap
);
639 static void mv_port_init(struct ata_ioports
*port
, unsigned long base
)
641 /* PIO related setup */
642 port
->data_addr
= base
+ SHD_PIO_DATA_OFS
;
643 port
->error_addr
= port
->feature_addr
= base
+ SHD_FEA_ERR_OFS
;
644 port
->nsect_addr
= base
+ SHD_SECT_CNT_OFS
;
645 port
->lbal_addr
= base
+ SHD_LBA_L_OFS
;
646 port
->lbam_addr
= base
+ SHD_LBA_M_OFS
;
647 port
->lbah_addr
= base
+ SHD_LBA_H_OFS
;
648 port
->device_addr
= base
+ SHD_DEV_HD_OFS
;
649 port
->status_addr
= port
->command_addr
= base
+ SHD_CMD_STA_OFS
;
650 port
->altstatus_addr
= port
->ctl_addr
= base
+ SHD_CTL_AST_OFS
;
652 port
->cmd_addr
= port
->bmdma_addr
= port
->scr_addr
= 0;
654 /* unmask all EDMA error interrupts */
655 writel(~0, (void __iomem
*)base
+ EDMA_ERR_IRQ_MASK_OFS
);
657 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
658 readl((void __iomem
*)base
+ EDMA_CFG_OFS
),
659 readl((void __iomem
*)base
+ EDMA_ERR_IRQ_CAUSE_OFS
),
660 readl((void __iomem
*)base
+ EDMA_ERR_IRQ_MASK_OFS
));
663 static int mv_host_init(struct ata_probe_ent
*probe_ent
)
665 int rc
= 0, n_hc
, port
, hc
;
666 void __iomem
*mmio
= probe_ent
->mmio_base
;
667 void __iomem
*port_mmio
;
669 if (mv_master_reset(probe_ent
->mmio_base
)) {
674 n_hc
= mv_get_hc_count(probe_ent
->host_flags
);
675 probe_ent
->n_ports
= MV_PORTS_PER_HC
* n_hc
;
677 for (port
= 0; port
< probe_ent
->n_ports
; port
++) {
678 port_mmio
= mv_port_base(mmio
, port
);
679 mv_port_init(&probe_ent
->port
[port
], (unsigned long)port_mmio
);
682 for (hc
= 0; hc
< n_hc
; hc
++) {
683 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause=0x%08x\n", hc
,
684 readl(mv_hc_base(mmio
, hc
) + HC_CFG_OFS
),
685 readl(mv_hc_base(mmio
, hc
) + HC_IRQ_CAUSE_OFS
));
688 writel(~HC_MAIN_MASKED_IRQS
, mmio
+ HC_MAIN_IRQ_MASK_OFS
);
689 writel(PCI_UNMASK_ALL_IRQS
, mmio
+ PCI_IRQ_MASK_OFS
);
691 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
692 "PCI int cause/mask=0x%08x/0x%08x\n",
693 readl(mmio
+ HC_MAIN_IRQ_CAUSE_OFS
),
694 readl(mmio
+ HC_MAIN_IRQ_MASK_OFS
),
695 readl(mmio
+ PCI_IRQ_CAUSE_OFS
),
696 readl(mmio
+ PCI_IRQ_MASK_OFS
));
702 static int mv_init_one(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
704 static int printed_version
= 0;
705 struct ata_probe_ent
*probe_ent
= NULL
;
706 struct mv_host_priv
*hpriv
;
707 unsigned int board_idx
= (unsigned int)ent
->driver_data
;
708 void __iomem
*mmio_base
;
709 int pci_dev_busy
= 0;
712 if (!printed_version
++) {
713 printk(KERN_DEBUG DRV_NAME
" version " DRV_VERSION
"\n");
716 VPRINTK("ENTER for PCI Bus:Slot.Func=%u:%u.%u\n", pdev
->bus
->number
,
717 PCI_SLOT(pdev
->devfn
), PCI_FUNC(pdev
->devfn
));
719 rc
= pci_enable_device(pdev
);
724 rc
= pci_request_regions(pdev
, DRV_NAME
);
732 probe_ent
= kmalloc(sizeof(*probe_ent
), GFP_KERNEL
);
733 if (probe_ent
== NULL
) {
735 goto err_out_regions
;
738 memset(probe_ent
, 0, sizeof(*probe_ent
));
739 probe_ent
->dev
= pci_dev_to_dev(pdev
);
740 INIT_LIST_HEAD(&probe_ent
->node
);
742 mmio_base
= ioremap_nocache(pci_resource_start(pdev
, MV_PRIMARY_BAR
),
743 pci_resource_len(pdev
, MV_PRIMARY_BAR
));
744 if (mmio_base
== NULL
) {
746 goto err_out_free_ent
;
749 hpriv
= kmalloc(sizeof(*hpriv
), GFP_KERNEL
);
752 goto err_out_iounmap
;
754 memset(hpriv
, 0, sizeof(*hpriv
));
756 probe_ent
->sht
= mv_port_info
[board_idx
].sht
;
757 probe_ent
->host_flags
= mv_port_info
[board_idx
].host_flags
;
758 probe_ent
->pio_mask
= mv_port_info
[board_idx
].pio_mask
;
759 probe_ent
->udma_mask
= mv_port_info
[board_idx
].udma_mask
;
760 probe_ent
->port_ops
= mv_port_info
[board_idx
].port_ops
;
762 probe_ent
->irq
= pdev
->irq
;
763 probe_ent
->irq_flags
= SA_SHIRQ
;
764 probe_ent
->mmio_base
= mmio_base
;
765 probe_ent
->private_data
= hpriv
;
767 /* initialize adapter */
768 rc
= mv_host_init(probe_ent
);
772 /* mv_print_info(probe_ent); */
776 u32 dw
[4]; /* hold a line of 16b */
777 VPRINTK("PCI config space:\n");
778 for (b
= 0; b
< 0x40; ) {
779 for (w
= 0; w
< 4; w
++) {
780 (void) pci_read_config_dword(pdev
,b
,&dw
[w
]);
783 VPRINTK("%08x %08x %08x %08x\n",
784 dw
[0],dw
[1],dw
[2],dw
[3]);
788 /* FIXME: check ata_device_add return value */
789 ata_device_add(probe_ent
);
801 pci_release_regions(pdev
);
804 pci_disable_device(pdev
);
810 static int __init
mv_init(void)
812 return pci_module_init(&mv_pci_driver
);
815 static void __exit
mv_exit(void)
817 pci_unregister_driver(&mv_pci_driver
);
820 MODULE_AUTHOR("Brett Russ");
821 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
822 MODULE_LICENSE("GPL");
823 MODULE_DEVICE_TABLE(pci
, mv_pci_tbl
);
824 MODULE_VERSION(DRV_VERSION
);
826 module_init(mv_init
);
827 module_exit(mv_exit
);