1 // SPDX-License-Identifier: GPL-2.0-only
3 * sata_mv.c - Marvell SATA support
5 * Copyright 2008-2009: Marvell Corporation, all rights reserved.
6 * Copyright 2005: EMC Corporation, all rights reserved.
7 * Copyright 2005 Red Hat, Inc. All rights reserved.
9 * Originally written by Brett Russ.
10 * Extensive overhaul and enhancement by Mark Lord <mlord@pobox.com>.
12 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
18 * --> Develop a low-power-consumption strategy, and implement it.
20 * --> Add sysfs attributes for per-chip / per-HC IRQ coalescing thresholds.
22 * --> [Experiment, Marvell value added] Is it possible to use target
23 * mode to cross-connect two Linux boxes with Marvell cards? If so,
24 * creating LibATA target mode support would be very interesting.
26 * Target mode, for those without docs, is the ability to directly
27 * connect two SATA ports.
31 * 80x1-B2 errata PCI#11:
33 * Users of the 6041/6081 Rev.B2 chips (current is C0)
34 * should be careful to insert those cards only onto PCI-X bus #0,
35 * and only in device slots 0..7, not higher. The chips may not
36 * work correctly otherwise (note: this is a pretty rare condition).
39 #include <linux/kernel.h>
40 #include <linux/module.h>
41 #include <linux/pci.h>
42 #include <linux/init.h>
43 #include <linux/blkdev.h>
44 #include <linux/delay.h>
45 #include <linux/interrupt.h>
46 #include <linux/dmapool.h>
47 #include <linux/dma-mapping.h>
48 #include <linux/device.h>
49 #include <linux/clk.h>
50 #include <linux/phy/phy.h>
51 #include <linux/platform_device.h>
52 #include <linux/ata_platform.h>
53 #include <linux/mbus.h>
54 #include <linux/bitops.h>
55 #include <linux/gfp.h>
57 #include <linux/of_irq.h>
58 #include <scsi/scsi_host.h>
59 #include <scsi/scsi_cmnd.h>
60 #include <scsi/scsi_device.h>
61 #include <linux/libata.h>
63 #define DRV_NAME "sata_mv"
64 #define DRV_VERSION "1.28"
72 module_param(msi
, int, S_IRUGO
);
73 MODULE_PARM_DESC(msi
, "Enable use of PCI MSI (0=off, 1=on)");
76 static int irq_coalescing_io_count
;
77 module_param(irq_coalescing_io_count
, int, S_IRUGO
);
78 MODULE_PARM_DESC(irq_coalescing_io_count
,
79 "IRQ coalescing I/O count threshold (0..255)");
81 static int irq_coalescing_usecs
;
82 module_param(irq_coalescing_usecs
, int, S_IRUGO
);
83 MODULE_PARM_DESC(irq_coalescing_usecs
,
84 "IRQ coalescing time threshold in usecs");
87 /* BAR's are enumerated in terms of pci_resource_start() terms */
88 MV_PRIMARY_BAR
= 0, /* offset 0x10: memory space */
89 MV_IO_BAR
= 2, /* offset 0x18: IO space */
90 MV_MISC_BAR
= 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
92 MV_MAJOR_REG_AREA_SZ
= 0x10000, /* 64KB */
93 MV_MINOR_REG_AREA_SZ
= 0x2000, /* 8KB */
95 /* For use with both IRQ coalescing methods ("all ports" or "per-HC" */
96 COAL_CLOCKS_PER_USEC
= 150, /* for calculating COAL_TIMEs */
97 MAX_COAL_TIME_THRESHOLD
= ((1 << 24) - 1), /* internal clocks count */
98 MAX_COAL_IO_COUNT
= 255, /* completed I/O count */
103 * Per-chip ("all ports") interrupt coalescing feature.
104 * This is only for GEN_II / GEN_IIE hardware.
106 * Coalescing defers the interrupt until either the IO_THRESHOLD
107 * (count of completed I/Os) is met, or the TIME_THRESHOLD is met.
109 COAL_REG_BASE
= 0x18000,
110 IRQ_COAL_CAUSE
= (COAL_REG_BASE
+ 0x08),
111 ALL_PORTS_COAL_IRQ
= (1 << 4), /* all ports irq event */
113 IRQ_COAL_IO_THRESHOLD
= (COAL_REG_BASE
+ 0xcc),
114 IRQ_COAL_TIME_THRESHOLD
= (COAL_REG_BASE
+ 0xd0),
117 * Registers for the (unused here) transaction coalescing feature:
119 TRAN_COAL_CAUSE_LO
= (COAL_REG_BASE
+ 0x88),
120 TRAN_COAL_CAUSE_HI
= (COAL_REG_BASE
+ 0x8c),
122 SATAHC0_REG_BASE
= 0x20000,
124 GPIO_PORT_CTL
= 0x104f0,
127 MV_PCI_REG_SZ
= MV_MAJOR_REG_AREA_SZ
,
128 MV_SATAHC_REG_SZ
= MV_MAJOR_REG_AREA_SZ
,
129 MV_SATAHC_ARBTR_REG_SZ
= MV_MINOR_REG_AREA_SZ
, /* arbiter */
130 MV_PORT_REG_SZ
= MV_MINOR_REG_AREA_SZ
,
133 MV_MAX_Q_DEPTH_MASK
= MV_MAX_Q_DEPTH
- 1,
135 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
136 * CRPB needs alignment on a 256B boundary. Size == 256B
137 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
139 MV_CRQB_Q_SZ
= (32 * MV_MAX_Q_DEPTH
),
140 MV_CRPB_Q_SZ
= (8 * MV_MAX_Q_DEPTH
),
142 MV_SG_TBL_SZ
= (16 * MV_MAX_SG_CT
),
144 /* Determine hc from 0-7 port: hc = port >> MV_PORT_HC_SHIFT */
145 MV_PORT_HC_SHIFT
= 2,
146 MV_PORTS_PER_HC
= (1 << MV_PORT_HC_SHIFT
), /* 4 */
147 /* Determine hc port from 0-7 port: hardport = port & MV_PORT_MASK */
148 MV_PORT_MASK
= (MV_PORTS_PER_HC
- 1), /* 3 */
151 MV_FLAG_DUAL_HC
= (1 << 30), /* two SATA Host Controllers */
153 MV_COMMON_FLAGS
= ATA_FLAG_SATA
| ATA_FLAG_PIO_POLLING
,
155 MV_GEN_I_FLAGS
= MV_COMMON_FLAGS
| ATA_FLAG_NO_ATAPI
,
157 MV_GEN_II_FLAGS
= MV_COMMON_FLAGS
| ATA_FLAG_NCQ
|
158 ATA_FLAG_PMP
| ATA_FLAG_ACPI_SATA
,
160 MV_GEN_IIE_FLAGS
= MV_GEN_II_FLAGS
| ATA_FLAG_AN
,
162 CRQB_FLAG_READ
= (1 << 0),
164 CRQB_IOID_SHIFT
= 6, /* CRQB Gen-II/IIE IO Id shift */
165 CRQB_PMP_SHIFT
= 12, /* CRQB Gen-II/IIE PMP shift */
166 CRQB_HOSTQ_SHIFT
= 17, /* CRQB Gen-II/IIE HostQueTag shift */
167 CRQB_CMD_ADDR_SHIFT
= 8,
168 CRQB_CMD_CS
= (0x2 << 11),
169 CRQB_CMD_LAST
= (1 << 15),
171 CRPB_FLAG_STATUS_SHIFT
= 8,
172 CRPB_IOID_SHIFT_6
= 5, /* CRPB Gen-II IO Id shift */
173 CRPB_IOID_SHIFT_7
= 7, /* CRPB Gen-IIE IO Id shift */
175 EPRD_FLAG_END_OF_TBL
= (1 << 31),
177 /* PCI interface registers */
179 MV_PCI_COMMAND
= 0xc00,
180 MV_PCI_COMMAND_MWRCOM
= (1 << 4), /* PCI Master Write Combining */
181 MV_PCI_COMMAND_MRDTRIG
= (1 << 7), /* PCI Master Read Trigger */
183 PCI_MAIN_CMD_STS
= 0xd30,
184 STOP_PCI_MASTER
= (1 << 2),
185 PCI_MASTER_EMPTY
= (1 << 3),
186 GLOB_SFT_RST
= (1 << 4),
189 MV_PCI_MODE_MASK
= 0x30,
191 MV_PCI_EXP_ROM_BAR_CTL
= 0xd2c,
192 MV_PCI_DISC_TIMER
= 0xd04,
193 MV_PCI_MSI_TRIGGER
= 0xc38,
194 MV_PCI_SERR_MASK
= 0xc28,
195 MV_PCI_XBAR_TMOUT
= 0x1d04,
196 MV_PCI_ERR_LOW_ADDRESS
= 0x1d40,
197 MV_PCI_ERR_HIGH_ADDRESS
= 0x1d44,
198 MV_PCI_ERR_ATTRIBUTE
= 0x1d48,
199 MV_PCI_ERR_COMMAND
= 0x1d50,
201 PCI_IRQ_CAUSE
= 0x1d58,
202 PCI_IRQ_MASK
= 0x1d5c,
203 PCI_UNMASK_ALL_IRQS
= 0x7fffff, /* bits 22-0 */
205 PCIE_IRQ_CAUSE
= 0x1900,
206 PCIE_IRQ_MASK
= 0x1910,
207 PCIE_UNMASK_ALL_IRQS
= 0x40a, /* assorted bits */
209 /* Host Controller Main Interrupt Cause/Mask registers (1 per-chip) */
210 PCI_HC_MAIN_IRQ_CAUSE
= 0x1d60,
211 PCI_HC_MAIN_IRQ_MASK
= 0x1d64,
212 SOC_HC_MAIN_IRQ_CAUSE
= 0x20020,
213 SOC_HC_MAIN_IRQ_MASK
= 0x20024,
214 ERR_IRQ
= (1 << 0), /* shift by (2 * port #) */
215 DONE_IRQ
= (1 << 1), /* shift by (2 * port #) */
216 HC0_IRQ_PEND
= 0x1ff, /* bits 0-8 = HC0's ports */
217 HC_SHIFT
= 9, /* bits 9-17 = HC1's ports */
218 DONE_IRQ_0_3
= 0x000000aa, /* DONE_IRQ ports 0,1,2,3 */
219 DONE_IRQ_4_7
= (DONE_IRQ_0_3
<< HC_SHIFT
), /* 4,5,6,7 */
221 TRAN_COAL_LO_DONE
= (1 << 19), /* transaction coalescing */
222 TRAN_COAL_HI_DONE
= (1 << 20), /* transaction coalescing */
223 PORTS_0_3_COAL_DONE
= (1 << 8), /* HC0 IRQ coalescing */
224 PORTS_4_7_COAL_DONE
= (1 << 17), /* HC1 IRQ coalescing */
225 ALL_PORTS_COAL_DONE
= (1 << 21), /* GEN_II(E) IRQ coalescing */
226 GPIO_INT
= (1 << 22),
227 SELF_INT
= (1 << 23),
228 TWSI_INT
= (1 << 24),
229 HC_MAIN_RSVD
= (0x7f << 25), /* bits 31-25 */
230 HC_MAIN_RSVD_5
= (0x1fff << 19), /* bits 31-19 */
231 HC_MAIN_RSVD_SOC
= (0x3fffffb << 6), /* bits 31-9, 7-6 */
233 /* SATAHC registers */
237 DMA_IRQ
= (1 << 0), /* shift by port # */
238 HC_COAL_IRQ
= (1 << 4), /* IRQ coalescing */
239 DEV_IRQ
= (1 << 8), /* shift by port # */
242 * Per-HC (Host-Controller) interrupt coalescing feature.
243 * This is present on all chip generations.
245 * Coalescing defers the interrupt until either the IO_THRESHOLD
246 * (count of completed I/Os) is met, or the TIME_THRESHOLD is met.
248 HC_IRQ_COAL_IO_THRESHOLD
= 0x000c,
249 HC_IRQ_COAL_TIME_THRESHOLD
= 0x0010,
252 SOC_LED_CTRL_BLINK
= (1 << 0), /* Active LED blink */
253 SOC_LED_CTRL_ACT_PRESENCE
= (1 << 2), /* Multiplex dev presence */
254 /* with dev activity LED */
256 /* Shadow block registers */
258 SHD_CTL_AST
= 0x20, /* ofs from SHD_BLK */
261 SATA_STATUS
= 0x300, /* ctrl, err regs follow status */
263 FIS_IRQ_CAUSE
= 0x364,
264 FIS_IRQ_CAUSE_AN
= (1 << 9), /* async notification */
266 LTMODE
= 0x30c, /* requires read-after-write */
267 LTMODE_BIT8
= (1 << 8), /* unknown, but necessary */
272 PHY_MODE4
= 0x314, /* requires read-after-write */
273 PHY_MODE4_CFG_MASK
= 0x00000003, /* phy internal config field */
274 PHY_MODE4_CFG_VALUE
= 0x00000001, /* phy internal config field */
275 PHY_MODE4_RSVD_ZEROS
= 0x5de3fffa, /* Gen2e always write zeros */
276 PHY_MODE4_RSVD_ONES
= 0x00000005, /* Gen2e always write ones */
279 SATA_TESTCTL
= 0x348,
281 VENDOR_UNIQUE_FIS
= 0x35c,
284 FISCFG_WAIT_DEV_ERR
= (1 << 8), /* wait for host on DevErr */
285 FISCFG_SINGLE_SYNC
= (1 << 16), /* SYNC on DMA activation */
287 PHY_MODE9_GEN2
= 0x398,
288 PHY_MODE9_GEN1
= 0x39c,
289 PHYCFG_OFS
= 0x3a0, /* only in 65n devices */
296 LP_PHY_CTL_PIN_PU_PLL
= (1 << 0),
297 LP_PHY_CTL_PIN_PU_RX
= (1 << 1),
298 LP_PHY_CTL_PIN_PU_TX
= (1 << 2),
299 LP_PHY_CTL_GEN_TX_3G
= (1 << 5),
300 LP_PHY_CTL_GEN_RX_3G
= (1 << 9),
302 MV_M2_PREAMP_MASK
= 0x7e0,
306 EDMA_CFG_Q_DEPTH
= 0x1f, /* max device queue depth */
307 EDMA_CFG_NCQ
= (1 << 5), /* for R/W FPDMA queued */
308 EDMA_CFG_NCQ_GO_ON_ERR
= (1 << 14), /* continue on error */
309 EDMA_CFG_RD_BRST_EXT
= (1 << 11), /* read burst 512B */
310 EDMA_CFG_WR_BUFF_LEN
= (1 << 13), /* write buffer 512B */
311 EDMA_CFG_EDMA_FBS
= (1 << 16), /* EDMA FIS-Based Switching */
312 EDMA_CFG_FBS
= (1 << 26), /* FIS-Based Switching */
314 EDMA_ERR_IRQ_CAUSE
= 0x8,
315 EDMA_ERR_IRQ_MASK
= 0xc,
316 EDMA_ERR_D_PAR
= (1 << 0), /* UDMA data parity err */
317 EDMA_ERR_PRD_PAR
= (1 << 1), /* UDMA PRD parity err */
318 EDMA_ERR_DEV
= (1 << 2), /* device error */
319 EDMA_ERR_DEV_DCON
= (1 << 3), /* device disconnect */
320 EDMA_ERR_DEV_CON
= (1 << 4), /* device connected */
321 EDMA_ERR_SERR
= (1 << 5), /* SError bits [WBDST] raised */
322 EDMA_ERR_SELF_DIS
= (1 << 7), /* Gen II/IIE self-disable */
323 EDMA_ERR_SELF_DIS_5
= (1 << 8), /* Gen I self-disable */
324 EDMA_ERR_BIST_ASYNC
= (1 << 8), /* BIST FIS or Async Notify */
325 EDMA_ERR_TRANS_IRQ_7
= (1 << 8), /* Gen IIE transprt layer irq */
326 EDMA_ERR_CRQB_PAR
= (1 << 9), /* CRQB parity error */
327 EDMA_ERR_CRPB_PAR
= (1 << 10), /* CRPB parity error */
328 EDMA_ERR_INTRL_PAR
= (1 << 11), /* internal parity error */
329 EDMA_ERR_IORDY
= (1 << 12), /* IORdy timeout */
331 EDMA_ERR_LNK_CTRL_RX
= (0xf << 13), /* link ctrl rx error */
332 EDMA_ERR_LNK_CTRL_RX_0
= (1 << 13), /* transient: CRC err */
333 EDMA_ERR_LNK_CTRL_RX_1
= (1 << 14), /* transient: FIFO err */
334 EDMA_ERR_LNK_CTRL_RX_2
= (1 << 15), /* fatal: caught SYNC */
335 EDMA_ERR_LNK_CTRL_RX_3
= (1 << 16), /* transient: FIS rx err */
337 EDMA_ERR_LNK_DATA_RX
= (0xf << 17), /* link data rx error */
339 EDMA_ERR_LNK_CTRL_TX
= (0x1f << 21), /* link ctrl tx error */
340 EDMA_ERR_LNK_CTRL_TX_0
= (1 << 21), /* transient: CRC err */
341 EDMA_ERR_LNK_CTRL_TX_1
= (1 << 22), /* transient: FIFO err */
342 EDMA_ERR_LNK_CTRL_TX_2
= (1 << 23), /* transient: caught SYNC */
343 EDMA_ERR_LNK_CTRL_TX_3
= (1 << 24), /* transient: caught DMAT */
344 EDMA_ERR_LNK_CTRL_TX_4
= (1 << 25), /* transient: FIS collision */
346 EDMA_ERR_LNK_DATA_TX
= (0x1f << 26), /* link data tx error */
348 EDMA_ERR_TRANS_PROTO
= (1 << 31), /* transport protocol error */
349 EDMA_ERR_OVERRUN_5
= (1 << 5),
350 EDMA_ERR_UNDERRUN_5
= (1 << 6),
352 EDMA_ERR_IRQ_TRANSIENT
= EDMA_ERR_LNK_CTRL_RX_0
|
353 EDMA_ERR_LNK_CTRL_RX_1
|
354 EDMA_ERR_LNK_CTRL_RX_3
|
355 EDMA_ERR_LNK_CTRL_TX
,
357 EDMA_EH_FREEZE
= EDMA_ERR_D_PAR
|
367 EDMA_ERR_LNK_CTRL_RX_2
|
368 EDMA_ERR_LNK_DATA_RX
|
369 EDMA_ERR_LNK_DATA_TX
|
370 EDMA_ERR_TRANS_PROTO
,
372 EDMA_EH_FREEZE_5
= EDMA_ERR_D_PAR
|
377 EDMA_ERR_UNDERRUN_5
|
378 EDMA_ERR_SELF_DIS_5
|
384 EDMA_REQ_Q_BASE_HI
= 0x10,
385 EDMA_REQ_Q_IN_PTR
= 0x14, /* also contains BASE_LO */
387 EDMA_REQ_Q_OUT_PTR
= 0x18,
388 EDMA_REQ_Q_PTR_SHIFT
= 5,
390 EDMA_RSP_Q_BASE_HI
= 0x1c,
391 EDMA_RSP_Q_IN_PTR
= 0x20,
392 EDMA_RSP_Q_OUT_PTR
= 0x24, /* also contains BASE_LO */
393 EDMA_RSP_Q_PTR_SHIFT
= 3,
395 EDMA_CMD
= 0x28, /* EDMA command register */
396 EDMA_EN
= (1 << 0), /* enable EDMA */
397 EDMA_DS
= (1 << 1), /* disable EDMA; self-negated */
398 EDMA_RESET
= (1 << 2), /* reset eng/trans/link/phy */
400 EDMA_STATUS
= 0x30, /* EDMA engine status */
401 EDMA_STATUS_CACHE_EMPTY
= (1 << 6), /* GenIIe command cache empty */
402 EDMA_STATUS_IDLE
= (1 << 7), /* GenIIe EDMA enabled/idle */
404 EDMA_IORDY_TMOUT
= 0x34,
407 EDMA_HALTCOND
= 0x60, /* GenIIe halt conditions */
408 EDMA_UNKNOWN_RSVD
= 0x6C, /* GenIIe unknown/reserved */
410 BMDMA_CMD
= 0x224, /* bmdma command register */
411 BMDMA_STATUS
= 0x228, /* bmdma status register */
412 BMDMA_PRD_LOW
= 0x22c, /* bmdma PRD addr 31:0 */
413 BMDMA_PRD_HIGH
= 0x230, /* bmdma PRD addr 63:32 */
415 /* Host private flags (hp_flags) */
416 MV_HP_FLAG_MSI
= (1 << 0),
417 MV_HP_ERRATA_50XXB0
= (1 << 1),
418 MV_HP_ERRATA_50XXB2
= (1 << 2),
419 MV_HP_ERRATA_60X1B2
= (1 << 3),
420 MV_HP_ERRATA_60X1C0
= (1 << 4),
421 MV_HP_GEN_I
= (1 << 6), /* Generation I: 50xx */
422 MV_HP_GEN_II
= (1 << 7), /* Generation II: 60xx */
423 MV_HP_GEN_IIE
= (1 << 8), /* Generation IIE: 6042/7042 */
424 MV_HP_PCIE
= (1 << 9), /* PCIe bus/regs: 7042 */
425 MV_HP_CUT_THROUGH
= (1 << 10), /* can use EDMA cut-through */
426 MV_HP_FLAG_SOC
= (1 << 11), /* SystemOnChip, no PCI */
427 MV_HP_QUIRK_LED_BLINK_EN
= (1 << 12), /* is led blinking enabled? */
428 MV_HP_FIX_LP_PHY_CTL
= (1 << 13), /* fix speed in LP_PHY_CTL ? */
430 /* Port private flags (pp_flags) */
431 MV_PP_FLAG_EDMA_EN
= (1 << 0), /* is EDMA engine enabled? */
432 MV_PP_FLAG_NCQ_EN
= (1 << 1), /* is EDMA set up for NCQ? */
433 MV_PP_FLAG_FBS_EN
= (1 << 2), /* is EDMA set up for FBS? */
434 MV_PP_FLAG_DELAYED_EH
= (1 << 3), /* delayed dev err handling */
435 MV_PP_FLAG_FAKE_ATA_BUSY
= (1 << 4), /* ignore initial ATA_DRDY */
438 #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
439 #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
440 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
441 #define IS_PCIE(hpriv) ((hpriv)->hp_flags & MV_HP_PCIE)
442 #define IS_SOC(hpriv) ((hpriv)->hp_flags & MV_HP_FLAG_SOC)
444 #define WINDOW_CTRL(i) (0x20030 + ((i) << 4))
445 #define WINDOW_BASE(i) (0x20034 + ((i) << 4))
448 /* DMA boundary 0xffff is required by the s/g splitting
449 * we need on /length/ in mv_fill-sg().
451 MV_DMA_BOUNDARY
= 0xffffU
,
453 /* mask of register bits containing lower 32 bits
454 * of EDMA request queue DMA address
456 EDMA_REQ_Q_BASE_LO_MASK
= 0xfffffc00U
,
458 /* ditto, for response queue */
459 EDMA_RSP_Q_BASE_LO_MASK
= 0xffffff00U
,
473 /* Command ReQuest Block: 32B */
489 /* Command ResPonse Block: 8B */
496 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
505 * We keep a local cache of a few frequently accessed port
506 * registers here, to avoid having to read them (very slow)
507 * when switching between EDMA and non-EDMA modes.
509 struct mv_cached_regs
{
516 struct mv_port_priv
{
517 struct mv_crqb
*crqb
;
519 struct mv_crpb
*crpb
;
521 struct mv_sg
*sg_tbl
[MV_MAX_Q_DEPTH
];
522 dma_addr_t sg_tbl_dma
[MV_MAX_Q_DEPTH
];
524 unsigned int req_idx
;
525 unsigned int resp_idx
;
528 struct mv_cached_regs cached
;
529 unsigned int delayed_eh_pmp_map
;
532 struct mv_port_signal
{
537 struct mv_host_priv
{
539 unsigned int board_idx
;
541 struct mv_port_signal signal
[8];
542 const struct mv_hw_ops
*ops
;
545 void __iomem
*main_irq_cause_addr
;
546 void __iomem
*main_irq_mask_addr
;
547 u32 irq_cause_offset
;
552 * Needed on some devices that require their clocks to be enabled.
553 * These are optional: if the platform device does not have any
554 * clocks, they won't be used. Also, if the underlying hardware
555 * does not support the common clock framework (CONFIG_HAVE_CLK=n),
556 * all the clock operations become no-ops (see clk.h).
559 struct clk
**port_clks
;
561 * Some devices have a SATA PHY which can be enabled/disabled
562 * in order to save power. These are optional: if the platform
563 * devices does not have any phy, they won't be used.
565 struct phy
**port_phys
;
567 * These consistent DMA memory pools give us guaranteed
568 * alignment for hardware-accessed data structures,
569 * and less memory waste in accomplishing the alignment.
571 struct dma_pool
*crqb_pool
;
572 struct dma_pool
*crpb_pool
;
573 struct dma_pool
*sg_tbl_pool
;
577 void (*phy_errata
)(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
579 void (*enable_leds
)(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
580 void (*read_preamp
)(struct mv_host_priv
*hpriv
, int idx
,
582 int (*reset_hc
)(struct ata_host
*host
, void __iomem
*mmio
,
584 void (*reset_flash
)(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
585 void (*reset_bus
)(struct ata_host
*host
, void __iomem
*mmio
);
588 static int mv_scr_read(struct ata_link
*link
, unsigned int sc_reg_in
, u32
*val
);
589 static int mv_scr_write(struct ata_link
*link
, unsigned int sc_reg_in
, u32 val
);
590 static int mv5_scr_read(struct ata_link
*link
, unsigned int sc_reg_in
, u32
*val
);
591 static int mv5_scr_write(struct ata_link
*link
, unsigned int sc_reg_in
, u32 val
);
592 static int mv_port_start(struct ata_port
*ap
);
593 static void mv_port_stop(struct ata_port
*ap
);
594 static int mv_qc_defer(struct ata_queued_cmd
*qc
);
595 static enum ata_completion_errors
mv_qc_prep(struct ata_queued_cmd
*qc
);
596 static enum ata_completion_errors
mv_qc_prep_iie(struct ata_queued_cmd
*qc
);
597 static unsigned int mv_qc_issue(struct ata_queued_cmd
*qc
);
598 static int mv_hardreset(struct ata_link
*link
, unsigned int *class,
599 unsigned long deadline
);
600 static void mv_eh_freeze(struct ata_port
*ap
);
601 static void mv_eh_thaw(struct ata_port
*ap
);
602 static void mv6_dev_config(struct ata_device
*dev
);
604 static void mv5_phy_errata(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
606 static void mv5_enable_leds(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
607 static void mv5_read_preamp(struct mv_host_priv
*hpriv
, int idx
,
609 static int mv5_reset_hc(struct ata_host
*host
, void __iomem
*mmio
,
611 static void mv5_reset_flash(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
612 static void mv5_reset_bus(struct ata_host
*host
, void __iomem
*mmio
);
614 static void mv6_phy_errata(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
616 static void mv6_enable_leds(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
617 static void mv6_read_preamp(struct mv_host_priv
*hpriv
, int idx
,
619 static int mv6_reset_hc(struct ata_host
*host
, void __iomem
*mmio
,
621 static void mv6_reset_flash(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
622 static void mv_soc_enable_leds(struct mv_host_priv
*hpriv
,
624 static void mv_soc_read_preamp(struct mv_host_priv
*hpriv
, int idx
,
626 static int mv_soc_reset_hc(struct ata_host
*host
,
627 void __iomem
*mmio
, unsigned int n_hc
);
628 static void mv_soc_reset_flash(struct mv_host_priv
*hpriv
,
630 static void mv_soc_reset_bus(struct ata_host
*host
, void __iomem
*mmio
);
631 static void mv_soc_65n_phy_errata(struct mv_host_priv
*hpriv
,
632 void __iomem
*mmio
, unsigned int port
);
633 static void mv_reset_pci_bus(struct ata_host
*host
, void __iomem
*mmio
);
634 static void mv_reset_channel(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
635 unsigned int port_no
);
636 static int mv_stop_edma(struct ata_port
*ap
);
637 static int mv_stop_edma_engine(void __iomem
*port_mmio
);
638 static void mv_edma_cfg(struct ata_port
*ap
, int want_ncq
, int want_edma
);
640 static void mv_pmp_select(struct ata_port
*ap
, int pmp
);
641 static int mv_pmp_hardreset(struct ata_link
*link
, unsigned int *class,
642 unsigned long deadline
);
643 static int mv_softreset(struct ata_link
*link
, unsigned int *class,
644 unsigned long deadline
);
645 static void mv_pmp_error_handler(struct ata_port
*ap
);
646 static void mv_process_crpb_entries(struct ata_port
*ap
,
647 struct mv_port_priv
*pp
);
649 static void mv_sff_irq_clear(struct ata_port
*ap
);
650 static int mv_check_atapi_dma(struct ata_queued_cmd
*qc
);
651 static void mv_bmdma_setup(struct ata_queued_cmd
*qc
);
652 static void mv_bmdma_start(struct ata_queued_cmd
*qc
);
653 static void mv_bmdma_stop(struct ata_queued_cmd
*qc
);
654 static u8
mv_bmdma_status(struct ata_port
*ap
);
655 static u8
mv_sff_check_status(struct ata_port
*ap
);
657 /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
658 * because we have to allow room for worst case splitting of
659 * PRDs for 64K boundaries in mv_fill_sg().
662 static const struct scsi_host_template mv5_sht
= {
663 ATA_BASE_SHT(DRV_NAME
),
664 .sg_tablesize
= MV_MAX_SG_CT
/ 2,
665 .dma_boundary
= MV_DMA_BOUNDARY
,
668 static const struct scsi_host_template mv6_sht
= {
669 __ATA_BASE_SHT(DRV_NAME
),
670 .can_queue
= MV_MAX_Q_DEPTH
- 1,
671 .sg_tablesize
= MV_MAX_SG_CT
/ 2,
672 .dma_boundary
= MV_DMA_BOUNDARY
,
673 .sdev_groups
= ata_ncq_sdev_groups
,
674 .change_queue_depth
= ata_scsi_change_queue_depth
,
675 .tag_alloc_policy
= BLK_TAG_ALLOC_RR
,
676 .device_configure
= ata_scsi_device_configure
679 static struct ata_port_operations mv5_ops
= {
680 .inherits
= &ata_sff_port_ops
,
682 .lost_interrupt
= ATA_OP_NULL
,
684 .qc_defer
= mv_qc_defer
,
685 .qc_prep
= mv_qc_prep
,
686 .qc_issue
= mv_qc_issue
,
688 .freeze
= mv_eh_freeze
,
690 .hardreset
= mv_hardreset
,
692 .scr_read
= mv5_scr_read
,
693 .scr_write
= mv5_scr_write
,
695 .port_start
= mv_port_start
,
696 .port_stop
= mv_port_stop
,
699 static struct ata_port_operations mv6_ops
= {
700 .inherits
= &ata_bmdma_port_ops
,
702 .lost_interrupt
= ATA_OP_NULL
,
704 .qc_defer
= mv_qc_defer
,
705 .qc_prep
= mv_qc_prep
,
706 .qc_issue
= mv_qc_issue
,
708 .dev_config
= mv6_dev_config
,
710 .freeze
= mv_eh_freeze
,
712 .hardreset
= mv_hardreset
,
713 .softreset
= mv_softreset
,
714 .pmp_hardreset
= mv_pmp_hardreset
,
715 .pmp_softreset
= mv_softreset
,
716 .error_handler
= mv_pmp_error_handler
,
718 .scr_read
= mv_scr_read
,
719 .scr_write
= mv_scr_write
,
721 .sff_check_status
= mv_sff_check_status
,
722 .sff_irq_clear
= mv_sff_irq_clear
,
723 .check_atapi_dma
= mv_check_atapi_dma
,
724 .bmdma_setup
= mv_bmdma_setup
,
725 .bmdma_start
= mv_bmdma_start
,
726 .bmdma_stop
= mv_bmdma_stop
,
727 .bmdma_status
= mv_bmdma_status
,
729 .port_start
= mv_port_start
,
730 .port_stop
= mv_port_stop
,
733 static struct ata_port_operations mv_iie_ops
= {
734 .inherits
= &mv6_ops
,
735 .dev_config
= ATA_OP_NULL
,
736 .qc_prep
= mv_qc_prep_iie
,
739 static const struct ata_port_info mv_port_info
[] = {
741 .flags
= MV_GEN_I_FLAGS
,
742 .pio_mask
= ATA_PIO4
,
743 .udma_mask
= ATA_UDMA6
,
744 .port_ops
= &mv5_ops
,
747 .flags
= MV_GEN_I_FLAGS
| MV_FLAG_DUAL_HC
,
748 .pio_mask
= ATA_PIO4
,
749 .udma_mask
= ATA_UDMA6
,
750 .port_ops
= &mv5_ops
,
753 .flags
= MV_GEN_I_FLAGS
| MV_FLAG_DUAL_HC
,
754 .pio_mask
= ATA_PIO4
,
755 .udma_mask
= ATA_UDMA6
,
756 .port_ops
= &mv5_ops
,
759 .flags
= MV_GEN_II_FLAGS
,
760 .pio_mask
= ATA_PIO4
,
761 .udma_mask
= ATA_UDMA6
,
762 .port_ops
= &mv6_ops
,
765 .flags
= MV_GEN_II_FLAGS
| MV_FLAG_DUAL_HC
,
766 .pio_mask
= ATA_PIO4
,
767 .udma_mask
= ATA_UDMA6
,
768 .port_ops
= &mv6_ops
,
771 .flags
= MV_GEN_IIE_FLAGS
,
772 .pio_mask
= ATA_PIO4
,
773 .udma_mask
= ATA_UDMA6
,
774 .port_ops
= &mv_iie_ops
,
777 .flags
= MV_GEN_IIE_FLAGS
,
778 .pio_mask
= ATA_PIO4
,
779 .udma_mask
= ATA_UDMA6
,
780 .port_ops
= &mv_iie_ops
,
783 .flags
= MV_GEN_IIE_FLAGS
,
784 .pio_mask
= ATA_PIO4
,
785 .udma_mask
= ATA_UDMA6
,
786 .port_ops
= &mv_iie_ops
,
790 static const struct mv_hw_ops mv5xxx_ops
= {
791 .phy_errata
= mv5_phy_errata
,
792 .enable_leds
= mv5_enable_leds
,
793 .read_preamp
= mv5_read_preamp
,
794 .reset_hc
= mv5_reset_hc
,
795 .reset_flash
= mv5_reset_flash
,
796 .reset_bus
= mv5_reset_bus
,
799 static const struct mv_hw_ops mv6xxx_ops
= {
800 .phy_errata
= mv6_phy_errata
,
801 .enable_leds
= mv6_enable_leds
,
802 .read_preamp
= mv6_read_preamp
,
803 .reset_hc
= mv6_reset_hc
,
804 .reset_flash
= mv6_reset_flash
,
805 .reset_bus
= mv_reset_pci_bus
,
808 static const struct mv_hw_ops mv_soc_ops
= {
809 .phy_errata
= mv6_phy_errata
,
810 .enable_leds
= mv_soc_enable_leds
,
811 .read_preamp
= mv_soc_read_preamp
,
812 .reset_hc
= mv_soc_reset_hc
,
813 .reset_flash
= mv_soc_reset_flash
,
814 .reset_bus
= mv_soc_reset_bus
,
817 static const struct mv_hw_ops mv_soc_65n_ops
= {
818 .phy_errata
= mv_soc_65n_phy_errata
,
819 .enable_leds
= mv_soc_enable_leds
,
820 .reset_hc
= mv_soc_reset_hc
,
821 .reset_flash
= mv_soc_reset_flash
,
822 .reset_bus
= mv_soc_reset_bus
,
829 static inline void writelfl(unsigned long data
, void __iomem
*addr
)
832 (void) readl(addr
); /* flush to avoid PCI posted write */
835 static inline unsigned int mv_hc_from_port(unsigned int port
)
837 return port
>> MV_PORT_HC_SHIFT
;
840 static inline unsigned int mv_hardport_from_port(unsigned int port
)
842 return port
& MV_PORT_MASK
;
846 * Consolidate some rather tricky bit shift calculations.
847 * This is hot-path stuff, so not a function.
848 * Simple code, with two return values, so macro rather than inline.
850 * port is the sole input, in range 0..7.
851 * shift is one output, for use with main_irq_cause / main_irq_mask registers.
852 * hardport is the other output, in range 0..3.
854 * Note that port and hardport may be the same variable in some cases.
856 #define MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport) \
858 shift = mv_hc_from_port(port) * HC_SHIFT; \
859 hardport = mv_hardport_from_port(port); \
860 shift += hardport * 2; \
863 static inline void __iomem
*mv_hc_base(void __iomem
*base
, unsigned int hc
)
865 return (base
+ SATAHC0_REG_BASE
+ (hc
* MV_SATAHC_REG_SZ
));
868 static inline void __iomem
*mv_hc_base_from_port(void __iomem
*base
,
871 return mv_hc_base(base
, mv_hc_from_port(port
));
874 static inline void __iomem
*mv_port_base(void __iomem
*base
, unsigned int port
)
876 return mv_hc_base_from_port(base
, port
) +
877 MV_SATAHC_ARBTR_REG_SZ
+
878 (mv_hardport_from_port(port
) * MV_PORT_REG_SZ
);
881 static void __iomem
*mv5_phy_base(void __iomem
*mmio
, unsigned int port
)
883 void __iomem
*hc_mmio
= mv_hc_base_from_port(mmio
, port
);
884 unsigned long ofs
= (mv_hardport_from_port(port
) + 1) * 0x100UL
;
886 return hc_mmio
+ ofs
;
889 static inline void __iomem
*mv_host_base(struct ata_host
*host
)
891 struct mv_host_priv
*hpriv
= host
->private_data
;
895 static inline void __iomem
*mv_ap_base(struct ata_port
*ap
)
897 return mv_port_base(mv_host_base(ap
->host
), ap
->port_no
);
900 static inline int mv_get_hc_count(unsigned long port_flags
)
902 return ((port_flags
& MV_FLAG_DUAL_HC
) ? 2 : 1);
906 * mv_save_cached_regs - (re-)initialize cached port registers
907 * @ap: the port whose registers we are caching
909 * Initialize the local cache of port registers,
910 * so that reading them over and over again can
911 * be avoided on the hotter paths of this driver.
912 * This saves a few microseconds each time we switch
913 * to/from EDMA mode to perform (eg.) a drive cache flush.
915 static void mv_save_cached_regs(struct ata_port
*ap
)
917 void __iomem
*port_mmio
= mv_ap_base(ap
);
918 struct mv_port_priv
*pp
= ap
->private_data
;
920 pp
->cached
.fiscfg
= readl(port_mmio
+ FISCFG
);
921 pp
->cached
.ltmode
= readl(port_mmio
+ LTMODE
);
922 pp
->cached
.haltcond
= readl(port_mmio
+ EDMA_HALTCOND
);
923 pp
->cached
.unknown_rsvd
= readl(port_mmio
+ EDMA_UNKNOWN_RSVD
);
927 * mv_write_cached_reg - write to a cached port register
928 * @addr: hardware address of the register
929 * @old: pointer to cached value of the register
930 * @new: new value for the register
932 * Write a new value to a cached register,
933 * but only if the value is different from before.
935 static inline void mv_write_cached_reg(void __iomem
*addr
, u32
*old
, u32
new)
941 * Workaround for 88SX60x1-B2 FEr SATA#13:
942 * Read-after-write is needed to prevent generating 64-bit
943 * write cycles on the PCI bus for SATA interface registers
944 * at offsets ending in 0x4 or 0xc.
946 * Looks like a lot of fuss, but it avoids an unnecessary
947 * +1 usec read-after-write delay for unaffected registers.
949 laddr
= (unsigned long)addr
& 0xffff;
950 if (laddr
>= 0x300 && laddr
<= 0x33c) {
952 if (laddr
== 0x4 || laddr
== 0xc) {
953 writelfl(new, addr
); /* read after write */
957 writel(new, addr
); /* unaffected by the errata */
961 static void mv_set_edma_ptrs(void __iomem
*port_mmio
,
962 struct mv_host_priv
*hpriv
,
963 struct mv_port_priv
*pp
)
968 * initialize request queue
970 pp
->req_idx
&= MV_MAX_Q_DEPTH_MASK
; /* paranoia */
971 index
= pp
->req_idx
<< EDMA_REQ_Q_PTR_SHIFT
;
973 WARN_ON(pp
->crqb_dma
& 0x3ff);
974 writel((pp
->crqb_dma
>> 16) >> 16, port_mmio
+ EDMA_REQ_Q_BASE_HI
);
975 writelfl((pp
->crqb_dma
& EDMA_REQ_Q_BASE_LO_MASK
) | index
,
976 port_mmio
+ EDMA_REQ_Q_IN_PTR
);
977 writelfl(index
, port_mmio
+ EDMA_REQ_Q_OUT_PTR
);
980 * initialize response queue
982 pp
->resp_idx
&= MV_MAX_Q_DEPTH_MASK
; /* paranoia */
983 index
= pp
->resp_idx
<< EDMA_RSP_Q_PTR_SHIFT
;
985 WARN_ON(pp
->crpb_dma
& 0xff);
986 writel((pp
->crpb_dma
>> 16) >> 16, port_mmio
+ EDMA_RSP_Q_BASE_HI
);
987 writelfl(index
, port_mmio
+ EDMA_RSP_Q_IN_PTR
);
988 writelfl((pp
->crpb_dma
& EDMA_RSP_Q_BASE_LO_MASK
) | index
,
989 port_mmio
+ EDMA_RSP_Q_OUT_PTR
);
992 static void mv_write_main_irq_mask(u32 mask
, struct mv_host_priv
*hpriv
)
995 * When writing to the main_irq_mask in hardware,
996 * we must ensure exclusivity between the interrupt coalescing bits
997 * and the corresponding individual port DONE_IRQ bits.
999 * Note that this register is really an "IRQ enable" register,
1000 * not an "IRQ mask" register as Marvell's naming might suggest.
1002 if (mask
& (ALL_PORTS_COAL_DONE
| PORTS_0_3_COAL_DONE
))
1003 mask
&= ~DONE_IRQ_0_3
;
1004 if (mask
& (ALL_PORTS_COAL_DONE
| PORTS_4_7_COAL_DONE
))
1005 mask
&= ~DONE_IRQ_4_7
;
1006 writelfl(mask
, hpriv
->main_irq_mask_addr
);
1009 static void mv_set_main_irq_mask(struct ata_host
*host
,
1010 u32 disable_bits
, u32 enable_bits
)
1012 struct mv_host_priv
*hpriv
= host
->private_data
;
1013 u32 old_mask
, new_mask
;
1015 old_mask
= hpriv
->main_irq_mask
;
1016 new_mask
= (old_mask
& ~disable_bits
) | enable_bits
;
1017 if (new_mask
!= old_mask
) {
1018 hpriv
->main_irq_mask
= new_mask
;
1019 mv_write_main_irq_mask(new_mask
, hpriv
);
1023 static void mv_enable_port_irqs(struct ata_port
*ap
,
1024 unsigned int port_bits
)
1026 unsigned int shift
, hardport
, port
= ap
->port_no
;
1027 u32 disable_bits
, enable_bits
;
1029 MV_PORT_TO_SHIFT_AND_HARDPORT(port
, shift
, hardport
);
1031 disable_bits
= (DONE_IRQ
| ERR_IRQ
) << shift
;
1032 enable_bits
= port_bits
<< shift
;
1033 mv_set_main_irq_mask(ap
->host
, disable_bits
, enable_bits
);
1036 static void mv_clear_and_enable_port_irqs(struct ata_port
*ap
,
1037 void __iomem
*port_mmio
,
1038 unsigned int port_irqs
)
1040 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
1041 int hardport
= mv_hardport_from_port(ap
->port_no
);
1042 void __iomem
*hc_mmio
= mv_hc_base_from_port(
1043 mv_host_base(ap
->host
), ap
->port_no
);
1046 /* clear EDMA event indicators, if any */
1047 writelfl(0, port_mmio
+ EDMA_ERR_IRQ_CAUSE
);
1049 /* clear pending irq events */
1050 hc_irq_cause
= ~((DEV_IRQ
| DMA_IRQ
) << hardport
);
1051 writelfl(hc_irq_cause
, hc_mmio
+ HC_IRQ_CAUSE
);
1053 /* clear FIS IRQ Cause */
1054 if (IS_GEN_IIE(hpriv
))
1055 writelfl(0, port_mmio
+ FIS_IRQ_CAUSE
);
1057 mv_enable_port_irqs(ap
, port_irqs
);
1060 static void mv_set_irq_coalescing(struct ata_host
*host
,
1061 unsigned int count
, unsigned int usecs
)
1063 struct mv_host_priv
*hpriv
= host
->private_data
;
1064 void __iomem
*mmio
= hpriv
->base
, *hc_mmio
;
1065 u32 coal_enable
= 0;
1066 unsigned long flags
;
1067 unsigned int clks
, is_dual_hc
= hpriv
->n_ports
> MV_PORTS_PER_HC
;
1068 const u32 coal_disable
= PORTS_0_3_COAL_DONE
| PORTS_4_7_COAL_DONE
|
1069 ALL_PORTS_COAL_DONE
;
1071 /* Disable IRQ coalescing if either threshold is zero */
1072 if (!usecs
|| !count
) {
1075 /* Respect maximum limits of the hardware */
1076 clks
= usecs
* COAL_CLOCKS_PER_USEC
;
1077 if (clks
> MAX_COAL_TIME_THRESHOLD
)
1078 clks
= MAX_COAL_TIME_THRESHOLD
;
1079 if (count
> MAX_COAL_IO_COUNT
)
1080 count
= MAX_COAL_IO_COUNT
;
1083 spin_lock_irqsave(&host
->lock
, flags
);
1084 mv_set_main_irq_mask(host
, coal_disable
, 0);
1086 if (is_dual_hc
&& !IS_GEN_I(hpriv
)) {
1088 * GEN_II/GEN_IIE with dual host controllers:
1089 * one set of global thresholds for the entire chip.
1091 writel(clks
, mmio
+ IRQ_COAL_TIME_THRESHOLD
);
1092 writel(count
, mmio
+ IRQ_COAL_IO_THRESHOLD
);
1093 /* clear leftover coal IRQ bit */
1094 writel(~ALL_PORTS_COAL_IRQ
, mmio
+ IRQ_COAL_CAUSE
);
1096 coal_enable
= ALL_PORTS_COAL_DONE
;
1097 clks
= count
= 0; /* force clearing of regular regs below */
1101 * All chips: independent thresholds for each HC on the chip.
1103 hc_mmio
= mv_hc_base_from_port(mmio
, 0);
1104 writel(clks
, hc_mmio
+ HC_IRQ_COAL_TIME_THRESHOLD
);
1105 writel(count
, hc_mmio
+ HC_IRQ_COAL_IO_THRESHOLD
);
1106 writel(~HC_COAL_IRQ
, hc_mmio
+ HC_IRQ_CAUSE
);
1108 coal_enable
|= PORTS_0_3_COAL_DONE
;
1110 hc_mmio
= mv_hc_base_from_port(mmio
, MV_PORTS_PER_HC
);
1111 writel(clks
, hc_mmio
+ HC_IRQ_COAL_TIME_THRESHOLD
);
1112 writel(count
, hc_mmio
+ HC_IRQ_COAL_IO_THRESHOLD
);
1113 writel(~HC_COAL_IRQ
, hc_mmio
+ HC_IRQ_CAUSE
);
1115 coal_enable
|= PORTS_4_7_COAL_DONE
;
1118 mv_set_main_irq_mask(host
, 0, coal_enable
);
1119 spin_unlock_irqrestore(&host
->lock
, flags
);
1123 * mv_start_edma - Enable eDMA engine
1124 * @pp: port private data
1126 * Verify the local cache of the eDMA state is accurate with a
1130 * Inherited from caller.
1132 static void mv_start_edma(struct ata_port
*ap
, void __iomem
*port_mmio
,
1133 struct mv_port_priv
*pp
, u8 protocol
)
1135 int want_ncq
= (protocol
== ATA_PROT_NCQ
);
1137 if (pp
->pp_flags
& MV_PP_FLAG_EDMA_EN
) {
1138 int using_ncq
= ((pp
->pp_flags
& MV_PP_FLAG_NCQ_EN
) != 0);
1139 if (want_ncq
!= using_ncq
)
1142 if (!(pp
->pp_flags
& MV_PP_FLAG_EDMA_EN
)) {
1143 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
1145 mv_edma_cfg(ap
, want_ncq
, 1);
1147 mv_set_edma_ptrs(port_mmio
, hpriv
, pp
);
1148 mv_clear_and_enable_port_irqs(ap
, port_mmio
, DONE_IRQ
|ERR_IRQ
);
1150 writelfl(EDMA_EN
, port_mmio
+ EDMA_CMD
);
1151 pp
->pp_flags
|= MV_PP_FLAG_EDMA_EN
;
1155 static void mv_wait_for_edma_empty_idle(struct ata_port
*ap
)
1157 void __iomem
*port_mmio
= mv_ap_base(ap
);
1158 const u32 empty_idle
= (EDMA_STATUS_CACHE_EMPTY
| EDMA_STATUS_IDLE
);
1159 const int per_loop
= 5, timeout
= (15 * 1000 / per_loop
);
1163 * Wait for the EDMA engine to finish transactions in progress.
1164 * No idea what a good "timeout" value might be, but measurements
1165 * indicate that it often requires hundreds of microseconds
1166 * with two drives in-use. So we use the 15msec value above
1167 * as a rough guess at what even more drives might require.
1169 for (i
= 0; i
< timeout
; ++i
) {
1170 u32 edma_stat
= readl(port_mmio
+ EDMA_STATUS
);
1171 if ((edma_stat
& empty_idle
) == empty_idle
)
1175 /* ata_port_info(ap, "%s: %u+ usecs\n", __func__, i); */
1179 * mv_stop_edma_engine - Disable eDMA engine
1180 * @port_mmio: io base address
1183 * Inherited from caller.
1185 static int mv_stop_edma_engine(void __iomem
*port_mmio
)
1189 /* Disable eDMA. The disable bit auto clears. */
1190 writelfl(EDMA_DS
, port_mmio
+ EDMA_CMD
);
1192 /* Wait for the chip to confirm eDMA is off. */
1193 for (i
= 10000; i
> 0; i
--) {
1194 u32 reg
= readl(port_mmio
+ EDMA_CMD
);
1195 if (!(reg
& EDMA_EN
))
1202 static int mv_stop_edma(struct ata_port
*ap
)
1204 void __iomem
*port_mmio
= mv_ap_base(ap
);
1205 struct mv_port_priv
*pp
= ap
->private_data
;
1208 if (!(pp
->pp_flags
& MV_PP_FLAG_EDMA_EN
))
1210 pp
->pp_flags
&= ~MV_PP_FLAG_EDMA_EN
;
1211 mv_wait_for_edma_empty_idle(ap
);
1212 if (mv_stop_edma_engine(port_mmio
)) {
1213 ata_port_err(ap
, "Unable to stop eDMA\n");
1216 mv_edma_cfg(ap
, 0, 0);
1220 static void mv_dump_mem(struct device
*dev
, void __iomem
*start
, unsigned bytes
)
1223 unsigned char linebuf
[38];
1225 for (b
= 0; b
< bytes
; ) {
1226 for (w
= 0, o
= 0; b
< bytes
&& w
< 4; w
++) {
1227 o
+= scnprintf(linebuf
+ o
, sizeof(linebuf
) - o
,
1228 "%08x ", readl(start
+ b
));
1231 dev_dbg(dev
, "%s: %p: %s\n",
1232 __func__
, start
+ b
, linebuf
);
1236 static void mv_dump_pci_cfg(struct pci_dev
*pdev
, unsigned bytes
)
1240 unsigned char linebuf
[38];
1242 for (b
= 0; b
< bytes
; ) {
1243 for (w
= 0, o
= 0; b
< bytes
&& w
< 4; w
++) {
1244 (void) pci_read_config_dword(pdev
, b
, &dw
);
1245 o
+= snprintf(linebuf
+ o
, sizeof(linebuf
) - o
,
1249 dev_dbg(&pdev
->dev
, "%s: %02x: %s\n",
1250 __func__
, b
, linebuf
);
1254 static void mv_dump_all_regs(void __iomem
*mmio_base
,
1255 struct pci_dev
*pdev
)
1257 void __iomem
*hc_base
;
1258 void __iomem
*port_base
;
1259 int start_port
, num_ports
, p
, start_hc
, num_hcs
, hc
;
1261 start_hc
= start_port
= 0;
1262 num_ports
= 8; /* should be benign for 4 port devs */
1265 "%s: All registers for port(s) %u-%u:\n", __func__
,
1266 start_port
, num_ports
> 1 ? num_ports
- 1 : start_port
);
1268 dev_dbg(&pdev
->dev
, "%s: PCI config space regs:\n", __func__
);
1269 mv_dump_pci_cfg(pdev
, 0x68);
1271 dev_dbg(&pdev
->dev
, "%s: PCI regs:\n", __func__
);
1272 mv_dump_mem(&pdev
->dev
, mmio_base
+0xc00, 0x3c);
1273 mv_dump_mem(&pdev
->dev
, mmio_base
+0xd00, 0x34);
1274 mv_dump_mem(&pdev
->dev
, mmio_base
+0xf00, 0x4);
1275 mv_dump_mem(&pdev
->dev
, mmio_base
+0x1d00, 0x6c);
1276 for (hc
= start_hc
; hc
< start_hc
+ num_hcs
; hc
++) {
1277 hc_base
= mv_hc_base(mmio_base
, hc
);
1278 dev_dbg(&pdev
->dev
, "%s: HC regs (HC %i):\n", __func__
, hc
);
1279 mv_dump_mem(&pdev
->dev
, hc_base
, 0x1c);
1281 for (p
= start_port
; p
< start_port
+ num_ports
; p
++) {
1282 port_base
= mv_port_base(mmio_base
, p
);
1283 dev_dbg(&pdev
->dev
, "%s: EDMA regs (port %i):\n", __func__
, p
);
1284 mv_dump_mem(&pdev
->dev
, port_base
, 0x54);
1285 dev_dbg(&pdev
->dev
, "%s: SATA regs (port %i):\n", __func__
, p
);
1286 mv_dump_mem(&pdev
->dev
, port_base
+0x300, 0x60);
1290 static unsigned int mv_scr_offset(unsigned int sc_reg_in
)
1294 switch (sc_reg_in
) {
1298 ofs
= SATA_STATUS
+ (sc_reg_in
* sizeof(u32
));
1301 ofs
= SATA_ACTIVE
; /* active is not with the others */
1310 static int mv_scr_read(struct ata_link
*link
, unsigned int sc_reg_in
, u32
*val
)
1312 unsigned int ofs
= mv_scr_offset(sc_reg_in
);
1314 if (ofs
!= 0xffffffffU
) {
1315 *val
= readl(mv_ap_base(link
->ap
) + ofs
);
1321 static int mv_scr_write(struct ata_link
*link
, unsigned int sc_reg_in
, u32 val
)
1323 unsigned int ofs
= mv_scr_offset(sc_reg_in
);
1325 if (ofs
!= 0xffffffffU
) {
1326 void __iomem
*addr
= mv_ap_base(link
->ap
) + ofs
;
1327 struct mv_host_priv
*hpriv
= link
->ap
->host
->private_data
;
1328 if (sc_reg_in
== SCR_CONTROL
) {
1330 * Workaround for 88SX60x1 FEr SATA#26:
1332 * COMRESETs have to take care not to accidentally
1333 * put the drive to sleep when writing SCR_CONTROL.
1334 * Setting bits 12..15 prevents this problem.
1336 * So if we see an outbound COMMRESET, set those bits.
1337 * Ditto for the followup write that clears the reset.
1339 * The proprietary driver does this for
1340 * all chip versions, and so do we.
1342 if ((val
& 0xf) == 1 || (readl(addr
) & 0xf) == 1)
1345 if (hpriv
->hp_flags
& MV_HP_FIX_LP_PHY_CTL
) {
1346 void __iomem
*lp_phy_addr
=
1347 mv_ap_base(link
->ap
) + LP_PHY_CTL
;
1349 * Set PHY speed according to SControl speed.
1352 LP_PHY_CTL_PIN_PU_PLL
|
1353 LP_PHY_CTL_PIN_PU_RX
|
1354 LP_PHY_CTL_PIN_PU_TX
;
1356 if ((val
& 0xf0) != 0x10)
1358 LP_PHY_CTL_GEN_TX_3G
|
1359 LP_PHY_CTL_GEN_RX_3G
;
1361 writelfl(lp_phy_val
, lp_phy_addr
);
1364 writelfl(val
, addr
);
1370 static void mv6_dev_config(struct ata_device
*adev
)
1373 * Deal with Gen-II ("mv6") hardware quirks/restrictions:
1375 * Gen-II does not support NCQ over a port multiplier
1376 * (no FIS-based switching).
1378 if (adev
->flags
& ATA_DFLAG_NCQ
) {
1379 if (sata_pmp_attached(adev
->link
->ap
)) {
1380 adev
->flags
&= ~ATA_DFLAG_NCQ
;
1382 "NCQ disabled for command-based switching\n");
1387 static int mv_qc_defer(struct ata_queued_cmd
*qc
)
1389 struct ata_link
*link
= qc
->dev
->link
;
1390 struct ata_port
*ap
= link
->ap
;
1391 struct mv_port_priv
*pp
= ap
->private_data
;
1394 * Don't allow new commands if we're in a delayed EH state
1395 * for NCQ and/or FIS-based switching.
1397 if (pp
->pp_flags
& MV_PP_FLAG_DELAYED_EH
)
1398 return ATA_DEFER_PORT
;
1400 /* PIO commands need exclusive link: no other commands [DMA or PIO]
1401 * can run concurrently.
1402 * set excl_link when we want to send a PIO command in DMA mode
1403 * or a non-NCQ command in NCQ mode.
1404 * When we receive a command from that link, and there are no
1405 * outstanding commands, mark a flag to clear excl_link and let
1406 * the command go through.
1408 if (unlikely(ap
->excl_link
)) {
1409 if (link
== ap
->excl_link
) {
1410 if (ap
->nr_active_links
)
1411 return ATA_DEFER_PORT
;
1412 qc
->flags
|= ATA_QCFLAG_CLEAR_EXCL
;
1415 return ATA_DEFER_PORT
;
1419 * If the port is completely idle, then allow the new qc.
1421 if (ap
->nr_active_links
== 0)
1425 * The port is operating in host queuing mode (EDMA) with NCQ
1426 * enabled, allow multiple NCQ commands. EDMA also allows
1427 * queueing multiple DMA commands but libata core currently
1430 if ((pp
->pp_flags
& MV_PP_FLAG_EDMA_EN
) &&
1431 (pp
->pp_flags
& MV_PP_FLAG_NCQ_EN
)) {
1432 if (ata_is_ncq(qc
->tf
.protocol
))
1435 ap
->excl_link
= link
;
1436 return ATA_DEFER_PORT
;
1440 return ATA_DEFER_PORT
;
1443 static void mv_config_fbs(struct ata_port
*ap
, int want_ncq
, int want_fbs
)
1445 struct mv_port_priv
*pp
= ap
->private_data
;
1446 void __iomem
*port_mmio
;
1448 u32 fiscfg
, *old_fiscfg
= &pp
->cached
.fiscfg
;
1449 u32 ltmode
, *old_ltmode
= &pp
->cached
.ltmode
;
1450 u32 haltcond
, *old_haltcond
= &pp
->cached
.haltcond
;
1452 ltmode
= *old_ltmode
& ~LTMODE_BIT8
;
1453 haltcond
= *old_haltcond
| EDMA_ERR_DEV
;
1456 fiscfg
= *old_fiscfg
| FISCFG_SINGLE_SYNC
;
1457 ltmode
= *old_ltmode
| LTMODE_BIT8
;
1459 haltcond
&= ~EDMA_ERR_DEV
;
1461 fiscfg
|= FISCFG_WAIT_DEV_ERR
;
1463 fiscfg
= *old_fiscfg
& ~(FISCFG_SINGLE_SYNC
| FISCFG_WAIT_DEV_ERR
);
1466 port_mmio
= mv_ap_base(ap
);
1467 mv_write_cached_reg(port_mmio
+ FISCFG
, old_fiscfg
, fiscfg
);
1468 mv_write_cached_reg(port_mmio
+ LTMODE
, old_ltmode
, ltmode
);
1469 mv_write_cached_reg(port_mmio
+ EDMA_HALTCOND
, old_haltcond
, haltcond
);
1472 static void mv_60x1_errata_sata25(struct ata_port
*ap
, int want_ncq
)
1474 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
1477 /* workaround for 88SX60x1 FEr SATA#25 (part 1) */
1478 old
= readl(hpriv
->base
+ GPIO_PORT_CTL
);
1480 new = old
| (1 << 22);
1482 new = old
& ~(1 << 22);
1484 writel(new, hpriv
->base
+ GPIO_PORT_CTL
);
1488 * mv_bmdma_enable - set a magic bit on GEN_IIE to allow bmdma
1489 * @ap: Port being initialized
1491 * There are two DMA modes on these chips: basic DMA, and EDMA.
1493 * Bit-0 of the "EDMA RESERVED" register enables/disables use
1494 * of basic DMA on the GEN_IIE versions of the chips.
1496 * This bit survives EDMA resets, and must be set for basic DMA
1497 * to function, and should be cleared when EDMA is active.
1499 static void mv_bmdma_enable_iie(struct ata_port
*ap
, int enable_bmdma
)
1501 struct mv_port_priv
*pp
= ap
->private_data
;
1502 u32
new, *old
= &pp
->cached
.unknown_rsvd
;
1508 mv_write_cached_reg(mv_ap_base(ap
) + EDMA_UNKNOWN_RSVD
, old
, new);
1512 * SOC chips have an issue whereby the HDD LEDs don't always blink
1513 * during I/O when NCQ is enabled. Enabling a special "LED blink" mode
1514 * of the SOC takes care of it, generating a steady blink rate when
1515 * any drive on the chip is active.
1517 * Unfortunately, the blink mode is a global hardware setting for the SOC,
1518 * so we must use it whenever at least one port on the SOC has NCQ enabled.
1520 * We turn "LED blink" off when NCQ is not in use anywhere, because the normal
1521 * LED operation works then, and provides better (more accurate) feedback.
1523 * Note that this code assumes that an SOC never has more than one HC onboard.
1525 static void mv_soc_led_blink_enable(struct ata_port
*ap
)
1527 struct ata_host
*host
= ap
->host
;
1528 struct mv_host_priv
*hpriv
= host
->private_data
;
1529 void __iomem
*hc_mmio
;
1532 if (hpriv
->hp_flags
& MV_HP_QUIRK_LED_BLINK_EN
)
1534 hpriv
->hp_flags
|= MV_HP_QUIRK_LED_BLINK_EN
;
1535 hc_mmio
= mv_hc_base_from_port(mv_host_base(host
), ap
->port_no
);
1536 led_ctrl
= readl(hc_mmio
+ SOC_LED_CTRL
);
1537 writel(led_ctrl
| SOC_LED_CTRL_BLINK
, hc_mmio
+ SOC_LED_CTRL
);
1540 static void mv_soc_led_blink_disable(struct ata_port
*ap
)
1542 struct ata_host
*host
= ap
->host
;
1543 struct mv_host_priv
*hpriv
= host
->private_data
;
1544 void __iomem
*hc_mmio
;
1548 if (!(hpriv
->hp_flags
& MV_HP_QUIRK_LED_BLINK_EN
))
1551 /* disable led-blink only if no ports are using NCQ */
1552 for (port
= 0; port
< hpriv
->n_ports
; port
++) {
1553 struct ata_port
*this_ap
= host
->ports
[port
];
1554 struct mv_port_priv
*pp
= this_ap
->private_data
;
1556 if (pp
->pp_flags
& MV_PP_FLAG_NCQ_EN
)
1560 hpriv
->hp_flags
&= ~MV_HP_QUIRK_LED_BLINK_EN
;
1561 hc_mmio
= mv_hc_base_from_port(mv_host_base(host
), ap
->port_no
);
1562 led_ctrl
= readl(hc_mmio
+ SOC_LED_CTRL
);
1563 writel(led_ctrl
& ~SOC_LED_CTRL_BLINK
, hc_mmio
+ SOC_LED_CTRL
);
1566 static void mv_edma_cfg(struct ata_port
*ap
, int want_ncq
, int want_edma
)
1569 struct mv_port_priv
*pp
= ap
->private_data
;
1570 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
1571 void __iomem
*port_mmio
= mv_ap_base(ap
);
1573 /* set up non-NCQ EDMA configuration */
1574 cfg
= EDMA_CFG_Q_DEPTH
; /* always 0x1f for *all* chips */
1576 ~(MV_PP_FLAG_FBS_EN
| MV_PP_FLAG_NCQ_EN
| MV_PP_FLAG_FAKE_ATA_BUSY
);
1578 if (IS_GEN_I(hpriv
))
1579 cfg
|= (1 << 8); /* enab config burst size mask */
1581 else if (IS_GEN_II(hpriv
)) {
1582 cfg
|= EDMA_CFG_RD_BRST_EXT
| EDMA_CFG_WR_BUFF_LEN
;
1583 mv_60x1_errata_sata25(ap
, want_ncq
);
1585 } else if (IS_GEN_IIE(hpriv
)) {
1586 int want_fbs
= sata_pmp_attached(ap
);
1588 * Possible future enhancement:
1590 * The chip can use FBS with non-NCQ, if we allow it,
1591 * But first we need to have the error handling in place
1592 * for this mode (datasheet section 7.3.15.4.2.3).
1593 * So disallow non-NCQ FBS for now.
1595 want_fbs
&= want_ncq
;
1597 mv_config_fbs(ap
, want_ncq
, want_fbs
);
1600 pp
->pp_flags
|= MV_PP_FLAG_FBS_EN
;
1601 cfg
|= EDMA_CFG_EDMA_FBS
; /* FIS-based switching */
1604 cfg
|= (1 << 23); /* do not mask PM field in rx'd FIS */
1606 cfg
|= (1 << 22); /* enab 4-entry host queue cache */
1608 cfg
|= (1 << 18); /* enab early completion */
1610 if (hpriv
->hp_flags
& MV_HP_CUT_THROUGH
)
1611 cfg
|= (1 << 17); /* enab cut-thru (dis stor&forwrd) */
1612 mv_bmdma_enable_iie(ap
, !want_edma
);
1614 if (IS_SOC(hpriv
)) {
1616 mv_soc_led_blink_enable(ap
);
1618 mv_soc_led_blink_disable(ap
);
1623 cfg
|= EDMA_CFG_NCQ
;
1624 pp
->pp_flags
|= MV_PP_FLAG_NCQ_EN
;
1627 writelfl(cfg
, port_mmio
+ EDMA_CFG
);
1630 static void mv_port_free_dma_mem(struct ata_port
*ap
)
1632 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
1633 struct mv_port_priv
*pp
= ap
->private_data
;
1637 dma_pool_free(hpriv
->crqb_pool
, pp
->crqb
, pp
->crqb_dma
);
1641 dma_pool_free(hpriv
->crpb_pool
, pp
->crpb
, pp
->crpb_dma
);
1645 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1646 * For later hardware, we have one unique sg_tbl per NCQ tag.
1648 for (tag
= 0; tag
< MV_MAX_Q_DEPTH
; ++tag
) {
1649 if (pp
->sg_tbl
[tag
]) {
1650 if (tag
== 0 || !IS_GEN_I(hpriv
))
1651 dma_pool_free(hpriv
->sg_tbl_pool
,
1653 pp
->sg_tbl_dma
[tag
]);
1654 pp
->sg_tbl
[tag
] = NULL
;
1660 * mv_port_start - Port specific init/start routine.
1661 * @ap: ATA channel to manipulate
1663 * Allocate and point to DMA memory, init port private memory,
1667 * Inherited from caller.
1669 static int mv_port_start(struct ata_port
*ap
)
1671 struct device
*dev
= ap
->host
->dev
;
1672 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
1673 struct mv_port_priv
*pp
;
1674 unsigned long flags
;
1677 pp
= devm_kzalloc(dev
, sizeof(*pp
), GFP_KERNEL
);
1680 ap
->private_data
= pp
;
1682 pp
->crqb
= dma_pool_zalloc(hpriv
->crqb_pool
, GFP_KERNEL
, &pp
->crqb_dma
);
1686 pp
->crpb
= dma_pool_zalloc(hpriv
->crpb_pool
, GFP_KERNEL
, &pp
->crpb_dma
);
1688 goto out_port_free_dma_mem
;
1690 /* 6041/6081 Rev. "C0" (and newer) are okay with async notify */
1691 if (hpriv
->hp_flags
& MV_HP_ERRATA_60X1C0
)
1692 ap
->flags
|= ATA_FLAG_AN
;
1694 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1695 * For later hardware, we need one unique sg_tbl per NCQ tag.
1697 for (tag
= 0; tag
< MV_MAX_Q_DEPTH
; ++tag
) {
1698 if (tag
== 0 || !IS_GEN_I(hpriv
)) {
1699 pp
->sg_tbl
[tag
] = dma_pool_alloc(hpriv
->sg_tbl_pool
,
1700 GFP_KERNEL
, &pp
->sg_tbl_dma
[tag
]);
1701 if (!pp
->sg_tbl
[tag
])
1702 goto out_port_free_dma_mem
;
1704 pp
->sg_tbl
[tag
] = pp
->sg_tbl
[0];
1705 pp
->sg_tbl_dma
[tag
] = pp
->sg_tbl_dma
[0];
1709 spin_lock_irqsave(ap
->lock
, flags
);
1710 mv_save_cached_regs(ap
);
1711 mv_edma_cfg(ap
, 0, 0);
1712 spin_unlock_irqrestore(ap
->lock
, flags
);
1716 out_port_free_dma_mem
:
1717 mv_port_free_dma_mem(ap
);
1722 * mv_port_stop - Port specific cleanup/stop routine.
1723 * @ap: ATA channel to manipulate
1725 * Stop DMA, cleanup port memory.
1728 * This routine uses the host lock to protect the DMA stop.
1730 static void mv_port_stop(struct ata_port
*ap
)
1732 unsigned long flags
;
1734 spin_lock_irqsave(ap
->lock
, flags
);
1736 mv_enable_port_irqs(ap
, 0);
1737 spin_unlock_irqrestore(ap
->lock
, flags
);
1738 mv_port_free_dma_mem(ap
);
1742 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1743 * @qc: queued command whose SG list to source from
1745 * Populate the SG list and mark the last entry.
1748 * Inherited from caller.
1750 static void mv_fill_sg(struct ata_queued_cmd
*qc
)
1752 struct mv_port_priv
*pp
= qc
->ap
->private_data
;
1753 struct scatterlist
*sg
;
1754 struct mv_sg
*mv_sg
, *last_sg
= NULL
;
1757 mv_sg
= pp
->sg_tbl
[qc
->hw_tag
];
1758 for_each_sg(qc
->sg
, sg
, qc
->n_elem
, si
) {
1759 dma_addr_t addr
= sg_dma_address(sg
);
1760 u32 sg_len
= sg_dma_len(sg
);
1763 u32 offset
= addr
& 0xffff;
1766 if (offset
+ len
> 0x10000)
1767 len
= 0x10000 - offset
;
1769 mv_sg
->addr
= cpu_to_le32(addr
& 0xffffffff);
1770 mv_sg
->addr_hi
= cpu_to_le32((addr
>> 16) >> 16);
1771 mv_sg
->flags_size
= cpu_to_le32(len
& 0xffff);
1772 mv_sg
->reserved
= 0;
1782 if (likely(last_sg
))
1783 last_sg
->flags_size
|= cpu_to_le32(EPRD_FLAG_END_OF_TBL
);
1784 mb(); /* ensure data structure is visible to the chipset */
1787 static void mv_crqb_pack_cmd(__le16
*cmdw
, u8 data
, u8 addr
, unsigned last
)
1789 u16 tmp
= data
| (addr
<< CRQB_CMD_ADDR_SHIFT
) | CRQB_CMD_CS
|
1790 (last
? CRQB_CMD_LAST
: 0);
1791 *cmdw
= cpu_to_le16(tmp
);
1795 * mv_sff_irq_clear - Clear hardware interrupt after DMA.
1796 * @ap: Port associated with this ATA transaction.
1798 * We need this only for ATAPI bmdma transactions,
1799 * as otherwise we experience spurious interrupts
1800 * after libata-sff handles the bmdma interrupts.
1802 static void mv_sff_irq_clear(struct ata_port
*ap
)
1804 mv_clear_and_enable_port_irqs(ap
, mv_ap_base(ap
), ERR_IRQ
);
1808 * mv_check_atapi_dma - Filter ATAPI cmds which are unsuitable for DMA.
1809 * @qc: queued command to check for chipset/DMA compatibility.
1811 * The bmdma engines cannot handle speculative data sizes
1812 * (bytecount under/over flow). So only allow DMA for
1813 * data transfer commands with known data sizes.
1816 * Inherited from caller.
1818 static int mv_check_atapi_dma(struct ata_queued_cmd
*qc
)
1820 struct scsi_cmnd
*scmd
= qc
->scsicmd
;
1823 switch (scmd
->cmnd
[0]) {
1831 case GPCMD_SEND_DVD_STRUCTURE
:
1832 case GPCMD_SEND_CUE_SHEET
:
1833 return 0; /* DMA is safe */
1836 return -EOPNOTSUPP
; /* use PIO instead */
1840 * mv_bmdma_setup - Set up BMDMA transaction
1841 * @qc: queued command to prepare DMA for.
1844 * Inherited from caller.
1846 static void mv_bmdma_setup(struct ata_queued_cmd
*qc
)
1848 struct ata_port
*ap
= qc
->ap
;
1849 void __iomem
*port_mmio
= mv_ap_base(ap
);
1850 struct mv_port_priv
*pp
= ap
->private_data
;
1854 /* clear all DMA cmd bits */
1855 writel(0, port_mmio
+ BMDMA_CMD
);
1857 /* load PRD table addr. */
1858 writel((pp
->sg_tbl_dma
[qc
->hw_tag
] >> 16) >> 16,
1859 port_mmio
+ BMDMA_PRD_HIGH
);
1860 writelfl(pp
->sg_tbl_dma
[qc
->hw_tag
],
1861 port_mmio
+ BMDMA_PRD_LOW
);
1863 /* issue r/w command */
1864 ap
->ops
->sff_exec_command(ap
, &qc
->tf
);
1868 * mv_bmdma_start - Start a BMDMA transaction
1869 * @qc: queued command to start DMA on.
1872 * Inherited from caller.
1874 static void mv_bmdma_start(struct ata_queued_cmd
*qc
)
1876 struct ata_port
*ap
= qc
->ap
;
1877 void __iomem
*port_mmio
= mv_ap_base(ap
);
1878 unsigned int rw
= (qc
->tf
.flags
& ATA_TFLAG_WRITE
);
1879 u32 cmd
= (rw
? 0 : ATA_DMA_WR
) | ATA_DMA_START
;
1881 /* start host DMA transaction */
1882 writelfl(cmd
, port_mmio
+ BMDMA_CMD
);
1886 * mv_bmdma_stop_ap - Stop BMDMA transfer
1889 * Clears the ATA_DMA_START flag in the bmdma control register
1892 * Inherited from caller.
1894 static void mv_bmdma_stop_ap(struct ata_port
*ap
)
1896 void __iomem
*port_mmio
= mv_ap_base(ap
);
1899 /* clear start/stop bit */
1900 cmd
= readl(port_mmio
+ BMDMA_CMD
);
1901 if (cmd
& ATA_DMA_START
) {
1902 cmd
&= ~ATA_DMA_START
;
1903 writelfl(cmd
, port_mmio
+ BMDMA_CMD
);
1905 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
1906 ata_sff_dma_pause(ap
);
1910 static void mv_bmdma_stop(struct ata_queued_cmd
*qc
)
1912 mv_bmdma_stop_ap(qc
->ap
);
1916 * mv_bmdma_status - Read BMDMA status
1917 * @ap: port for which to retrieve DMA status.
1919 * Read and return equivalent of the sff BMDMA status register.
1922 * Inherited from caller.
1924 static u8
mv_bmdma_status(struct ata_port
*ap
)
1926 void __iomem
*port_mmio
= mv_ap_base(ap
);
1930 * Other bits are valid only if ATA_DMA_ACTIVE==0,
1931 * and the ATA_DMA_INTR bit doesn't exist.
1933 reg
= readl(port_mmio
+ BMDMA_STATUS
);
1934 if (reg
& ATA_DMA_ACTIVE
)
1935 status
= ATA_DMA_ACTIVE
;
1936 else if (reg
& ATA_DMA_ERR
)
1937 status
= (reg
& ATA_DMA_ERR
) | ATA_DMA_INTR
;
1940 * Just because DMA_ACTIVE is 0 (DMA completed),
1941 * this does _not_ mean the device is "done".
1942 * So we should not yet be signalling ATA_DMA_INTR
1943 * in some cases. Eg. DSM/TRIM, and perhaps others.
1945 mv_bmdma_stop_ap(ap
);
1946 if (ioread8(ap
->ioaddr
.altstatus_addr
) & ATA_BUSY
)
1949 status
= ATA_DMA_INTR
;
1954 static void mv_rw_multi_errata_sata24(struct ata_queued_cmd
*qc
)
1956 struct ata_taskfile
*tf
= &qc
->tf
;
1958 * Workaround for 88SX60x1 FEr SATA#24.
1960 * Chip may corrupt WRITEs if multi_count >= 4kB.
1961 * Note that READs are unaffected.
1963 * It's not clear if this errata really means "4K bytes",
1964 * or if it always happens for multi_count > 7
1965 * regardless of device sector_size.
1967 * So, for safety, any write with multi_count > 7
1968 * gets converted here into a regular PIO write instead:
1970 if ((tf
->flags
& ATA_TFLAG_WRITE
) && is_multi_taskfile(tf
)) {
1971 if (qc
->dev
->multi_count
> 7) {
1972 switch (tf
->command
) {
1973 case ATA_CMD_WRITE_MULTI
:
1974 tf
->command
= ATA_CMD_PIO_WRITE
;
1976 case ATA_CMD_WRITE_MULTI_FUA_EXT
:
1977 tf
->flags
&= ~ATA_TFLAG_FUA
; /* ugh */
1979 case ATA_CMD_WRITE_MULTI_EXT
:
1980 tf
->command
= ATA_CMD_PIO_WRITE_EXT
;
1988 * mv_qc_prep - Host specific command preparation.
1989 * @qc: queued command to prepare
1991 * This routine simply redirects to the general purpose routine
1992 * if command is not DMA. Else, it handles prep of the CRQB
1993 * (command request block), does some sanity checking, and calls
1994 * the SG load routine.
1997 * Inherited from caller.
1999 static enum ata_completion_errors
mv_qc_prep(struct ata_queued_cmd
*qc
)
2001 struct ata_port
*ap
= qc
->ap
;
2002 struct mv_port_priv
*pp
= ap
->private_data
;
2004 struct ata_taskfile
*tf
= &qc
->tf
;
2008 switch (tf
->protocol
) {
2010 if (tf
->command
== ATA_CMD_DSM
)
2014 break; /* continue below */
2016 mv_rw_multi_errata_sata24(qc
);
2022 /* Fill in command request block
2024 if (!(tf
->flags
& ATA_TFLAG_WRITE
))
2025 flags
|= CRQB_FLAG_READ
;
2026 WARN_ON(MV_MAX_Q_DEPTH
<= qc
->hw_tag
);
2027 flags
|= qc
->hw_tag
<< CRQB_TAG_SHIFT
;
2028 flags
|= (qc
->dev
->link
->pmp
& 0xf) << CRQB_PMP_SHIFT
;
2030 /* get current queue index from software */
2031 in_index
= pp
->req_idx
;
2033 pp
->crqb
[in_index
].sg_addr
=
2034 cpu_to_le32(pp
->sg_tbl_dma
[qc
->hw_tag
] & 0xffffffff);
2035 pp
->crqb
[in_index
].sg_addr_hi
=
2036 cpu_to_le32((pp
->sg_tbl_dma
[qc
->hw_tag
] >> 16) >> 16);
2037 pp
->crqb
[in_index
].ctrl_flags
= cpu_to_le16(flags
);
2039 cw
= &pp
->crqb
[in_index
].ata_cmd
[0];
2041 /* Sadly, the CRQB cannot accommodate all registers--there are
2042 * only 11 bytes...so we must pick and choose required
2043 * registers based on the command. So, we drop feature and
2044 * hob_feature for [RW] DMA commands, but they are needed for
2045 * NCQ. NCQ will drop hob_nsect, which is not needed there
2046 * (nsect is used only for the tag; feat/hob_feat hold true nsect).
2048 switch (tf
->command
) {
2050 case ATA_CMD_READ_EXT
:
2052 case ATA_CMD_WRITE_EXT
:
2053 case ATA_CMD_WRITE_FUA_EXT
:
2054 mv_crqb_pack_cmd(cw
++, tf
->hob_nsect
, ATA_REG_NSECT
, 0);
2056 case ATA_CMD_FPDMA_READ
:
2057 case ATA_CMD_FPDMA_WRITE
:
2058 mv_crqb_pack_cmd(cw
++, tf
->hob_feature
, ATA_REG_FEATURE
, 0);
2059 mv_crqb_pack_cmd(cw
++, tf
->feature
, ATA_REG_FEATURE
, 0);
2062 /* The only other commands EDMA supports in non-queued and
2063 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
2064 * of which are defined/used by Linux. If we get here, this
2065 * driver needs work.
2067 ata_port_err(ap
, "%s: unsupported command: %.2x\n", __func__
,
2069 return AC_ERR_INVALID
;
2071 mv_crqb_pack_cmd(cw
++, tf
->nsect
, ATA_REG_NSECT
, 0);
2072 mv_crqb_pack_cmd(cw
++, tf
->hob_lbal
, ATA_REG_LBAL
, 0);
2073 mv_crqb_pack_cmd(cw
++, tf
->lbal
, ATA_REG_LBAL
, 0);
2074 mv_crqb_pack_cmd(cw
++, tf
->hob_lbam
, ATA_REG_LBAM
, 0);
2075 mv_crqb_pack_cmd(cw
++, tf
->lbam
, ATA_REG_LBAM
, 0);
2076 mv_crqb_pack_cmd(cw
++, tf
->hob_lbah
, ATA_REG_LBAH
, 0);
2077 mv_crqb_pack_cmd(cw
++, tf
->lbah
, ATA_REG_LBAH
, 0);
2078 mv_crqb_pack_cmd(cw
++, tf
->device
, ATA_REG_DEVICE
, 0);
2079 mv_crqb_pack_cmd(cw
++, tf
->command
, ATA_REG_CMD
, 1); /* last */
2081 if (!(qc
->flags
& ATA_QCFLAG_DMAMAP
))
2089 * mv_qc_prep_iie - Host specific command preparation.
2090 * @qc: queued command to prepare
2092 * This routine simply redirects to the general purpose routine
2093 * if command is not DMA. Else, it handles prep of the CRQB
2094 * (command request block), does some sanity checking, and calls
2095 * the SG load routine.
2098 * Inherited from caller.
2100 static enum ata_completion_errors
mv_qc_prep_iie(struct ata_queued_cmd
*qc
)
2102 struct ata_port
*ap
= qc
->ap
;
2103 struct mv_port_priv
*pp
= ap
->private_data
;
2104 struct mv_crqb_iie
*crqb
;
2105 struct ata_taskfile
*tf
= &qc
->tf
;
2109 if ((tf
->protocol
!= ATA_PROT_DMA
) &&
2110 (tf
->protocol
!= ATA_PROT_NCQ
))
2112 if (tf
->command
== ATA_CMD_DSM
)
2113 return AC_ERR_OK
; /* use bmdma for this */
2115 /* Fill in Gen IIE command request block */
2116 if (!(tf
->flags
& ATA_TFLAG_WRITE
))
2117 flags
|= CRQB_FLAG_READ
;
2119 WARN_ON(MV_MAX_Q_DEPTH
<= qc
->hw_tag
);
2120 flags
|= qc
->hw_tag
<< CRQB_TAG_SHIFT
;
2121 flags
|= qc
->hw_tag
<< CRQB_HOSTQ_SHIFT
;
2122 flags
|= (qc
->dev
->link
->pmp
& 0xf) << CRQB_PMP_SHIFT
;
2124 /* get current queue index from software */
2125 in_index
= pp
->req_idx
;
2127 crqb
= (struct mv_crqb_iie
*) &pp
->crqb
[in_index
];
2128 crqb
->addr
= cpu_to_le32(pp
->sg_tbl_dma
[qc
->hw_tag
] & 0xffffffff);
2129 crqb
->addr_hi
= cpu_to_le32((pp
->sg_tbl_dma
[qc
->hw_tag
] >> 16) >> 16);
2130 crqb
->flags
= cpu_to_le32(flags
);
2132 crqb
->ata_cmd
[0] = cpu_to_le32(
2133 (tf
->command
<< 16) |
2136 crqb
->ata_cmd
[1] = cpu_to_le32(
2142 crqb
->ata_cmd
[2] = cpu_to_le32(
2143 (tf
->hob_lbal
<< 0) |
2144 (tf
->hob_lbam
<< 8) |
2145 (tf
->hob_lbah
<< 16) |
2146 (tf
->hob_feature
<< 24)
2148 crqb
->ata_cmd
[3] = cpu_to_le32(
2150 (tf
->hob_nsect
<< 8)
2153 if (!(qc
->flags
& ATA_QCFLAG_DMAMAP
))
2161 * mv_sff_check_status - fetch device status, if valid
2162 * @ap: ATA port to fetch status from
2164 * When using command issue via mv_qc_issue_fis(),
2165 * the initial ATA_BUSY state does not show up in the
2166 * ATA status (shadow) register. This can confuse libata!
2168 * So we have a hook here to fake ATA_BUSY for that situation,
2169 * until the first time a BUSY, DRQ, or ERR bit is seen.
2171 * The rest of the time, it simply returns the ATA status register.
2173 static u8
mv_sff_check_status(struct ata_port
*ap
)
2175 u8 stat
= ioread8(ap
->ioaddr
.status_addr
);
2176 struct mv_port_priv
*pp
= ap
->private_data
;
2178 if (pp
->pp_flags
& MV_PP_FLAG_FAKE_ATA_BUSY
) {
2179 if (stat
& (ATA_BUSY
| ATA_DRQ
| ATA_ERR
))
2180 pp
->pp_flags
&= ~MV_PP_FLAG_FAKE_ATA_BUSY
;
2188 * mv_send_fis - Send a FIS, using the "Vendor-Unique FIS" register
2189 * @ap: ATA port to send a FIS
2190 * @fis: fis to be sent
2191 * @nwords: number of 32-bit words in the fis
2193 static unsigned int mv_send_fis(struct ata_port
*ap
, u32
*fis
, int nwords
)
2195 void __iomem
*port_mmio
= mv_ap_base(ap
);
2196 u32 ifctl
, old_ifctl
, ifstat
;
2197 int i
, timeout
= 200, final_word
= nwords
- 1;
2199 /* Initiate FIS transmission mode */
2200 old_ifctl
= readl(port_mmio
+ SATA_IFCTL
);
2201 ifctl
= 0x100 | (old_ifctl
& 0xf);
2202 writelfl(ifctl
, port_mmio
+ SATA_IFCTL
);
2204 /* Send all words of the FIS except for the final word */
2205 for (i
= 0; i
< final_word
; ++i
)
2206 writel(fis
[i
], port_mmio
+ VENDOR_UNIQUE_FIS
);
2208 /* Flag end-of-transmission, and then send the final word */
2209 writelfl(ifctl
| 0x200, port_mmio
+ SATA_IFCTL
);
2210 writelfl(fis
[final_word
], port_mmio
+ VENDOR_UNIQUE_FIS
);
2213 * Wait for FIS transmission to complete.
2214 * This typically takes just a single iteration.
2217 ifstat
= readl(port_mmio
+ SATA_IFSTAT
);
2218 } while (!(ifstat
& 0x1000) && --timeout
);
2220 /* Restore original port configuration */
2221 writelfl(old_ifctl
, port_mmio
+ SATA_IFCTL
);
2223 /* See if it worked */
2224 if ((ifstat
& 0x3000) != 0x1000) {
2225 ata_port_warn(ap
, "%s transmission error, ifstat=%08x\n",
2227 return AC_ERR_OTHER
;
2233 * mv_qc_issue_fis - Issue a command directly as a FIS
2234 * @qc: queued command to start
2236 * Note that the ATA shadow registers are not updated
2237 * after command issue, so the device will appear "READY"
2238 * if polled, even while it is BUSY processing the command.
2240 * So we use a status hook to fake ATA_BUSY until the drive changes state.
2242 * Note: we don't get updated shadow regs on *completion*
2243 * of non-data commands. So avoid sending them via this function,
2244 * as they will appear to have completed immediately.
2246 * GEN_IIE has special registers that we could get the result tf from,
2247 * but earlier chipsets do not. For now, we ignore those registers.
2249 static unsigned int mv_qc_issue_fis(struct ata_queued_cmd
*qc
)
2251 struct ata_port
*ap
= qc
->ap
;
2252 struct mv_port_priv
*pp
= ap
->private_data
;
2253 struct ata_link
*link
= qc
->dev
->link
;
2257 ata_tf_to_fis(&qc
->tf
, link
->pmp
, 1, (void *)fis
);
2258 err
= mv_send_fis(ap
, fis
, ARRAY_SIZE(fis
));
2262 switch (qc
->tf
.protocol
) {
2263 case ATAPI_PROT_PIO
:
2264 pp
->pp_flags
|= MV_PP_FLAG_FAKE_ATA_BUSY
;
2266 case ATAPI_PROT_NODATA
:
2267 ap
->hsm_task_state
= HSM_ST_FIRST
;
2270 pp
->pp_flags
|= MV_PP_FLAG_FAKE_ATA_BUSY
;
2271 if (qc
->tf
.flags
& ATA_TFLAG_WRITE
)
2272 ap
->hsm_task_state
= HSM_ST_FIRST
;
2274 ap
->hsm_task_state
= HSM_ST
;
2277 ap
->hsm_task_state
= HSM_ST_LAST
;
2281 if (qc
->tf
.flags
& ATA_TFLAG_POLLING
)
2282 ata_sff_queue_pio_task(link
, 0);
2287 * mv_qc_issue - Initiate a command to the host
2288 * @qc: queued command to start
2290 * This routine simply redirects to the general purpose routine
2291 * if command is not DMA. Else, it sanity checks our local
2292 * caches of the request producer/consumer indices then enables
2293 * DMA and bumps the request producer index.
2296 * Inherited from caller.
2298 static unsigned int mv_qc_issue(struct ata_queued_cmd
*qc
)
2300 static int limit_warnings
= 10;
2301 struct ata_port
*ap
= qc
->ap
;
2302 void __iomem
*port_mmio
= mv_ap_base(ap
);
2303 struct mv_port_priv
*pp
= ap
->private_data
;
2305 unsigned int port_irqs
;
2307 pp
->pp_flags
&= ~MV_PP_FLAG_FAKE_ATA_BUSY
; /* paranoia */
2309 switch (qc
->tf
.protocol
) {
2311 if (qc
->tf
.command
== ATA_CMD_DSM
) {
2312 if (!ap
->ops
->bmdma_setup
) /* no bmdma on GEN_I */
2313 return AC_ERR_OTHER
;
2314 break; /* use bmdma for this */
2318 mv_start_edma(ap
, port_mmio
, pp
, qc
->tf
.protocol
);
2319 pp
->req_idx
= (pp
->req_idx
+ 1) & MV_MAX_Q_DEPTH_MASK
;
2320 in_index
= pp
->req_idx
<< EDMA_REQ_Q_PTR_SHIFT
;
2322 /* Write the request in pointer to kick the EDMA to life */
2323 writelfl((pp
->crqb_dma
& EDMA_REQ_Q_BASE_LO_MASK
) | in_index
,
2324 port_mmio
+ EDMA_REQ_Q_IN_PTR
);
2329 * Errata SATA#16, SATA#24: warn if multiple DRQs expected.
2331 * Someday, we might implement special polling workarounds
2332 * for these, but it all seems rather unnecessary since we
2333 * normally use only DMA for commands which transfer more
2334 * than a single block of data.
2336 * Much of the time, this could just work regardless.
2337 * So for now, just log the incident, and allow the attempt.
2339 if (limit_warnings
> 0 && (qc
->nbytes
/ qc
->sect_size
) > 1) {
2341 ata_link_warn(qc
->dev
->link
, DRV_NAME
2342 ": attempting PIO w/multiple DRQ: "
2343 "this may fail due to h/w errata\n");
2346 case ATA_PROT_NODATA
:
2347 case ATAPI_PROT_PIO
:
2348 case ATAPI_PROT_NODATA
:
2349 if (ap
->flags
& ATA_FLAG_PIO_POLLING
)
2350 qc
->tf
.flags
|= ATA_TFLAG_POLLING
;
2354 if (qc
->tf
.flags
& ATA_TFLAG_POLLING
)
2355 port_irqs
= ERR_IRQ
; /* mask device interrupt when polling */
2357 port_irqs
= ERR_IRQ
| DONE_IRQ
; /* unmask all interrupts */
2360 * We're about to send a non-EDMA capable command to the
2361 * port. Turn off EDMA so there won't be problems accessing
2362 * shadow block, etc registers.
2365 mv_clear_and_enable_port_irqs(ap
, mv_ap_base(ap
), port_irqs
);
2366 mv_pmp_select(ap
, qc
->dev
->link
->pmp
);
2368 if (qc
->tf
.command
== ATA_CMD_READ_LOG_EXT
) {
2369 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
2371 * Workaround for 88SX60x1 FEr SATA#25 (part 2).
2373 * After any NCQ error, the READ_LOG_EXT command
2374 * from libata-eh *must* use mv_qc_issue_fis().
2375 * Otherwise it might fail, due to chip errata.
2377 * Rather than special-case it, we'll just *always*
2378 * use this method here for READ_LOG_EXT, making for
2381 if (IS_GEN_II(hpriv
))
2382 return mv_qc_issue_fis(qc
);
2384 return ata_bmdma_qc_issue(qc
);
2387 static struct ata_queued_cmd
*mv_get_active_qc(struct ata_port
*ap
)
2389 struct mv_port_priv
*pp
= ap
->private_data
;
2390 struct ata_queued_cmd
*qc
;
2392 if (pp
->pp_flags
& MV_PP_FLAG_NCQ_EN
)
2394 qc
= ata_qc_from_tag(ap
, ap
->link
.active_tag
);
2395 if (qc
&& !(qc
->tf
.flags
& ATA_TFLAG_POLLING
))
2400 static void mv_pmp_error_handler(struct ata_port
*ap
)
2402 unsigned int pmp
, pmp_map
;
2403 struct mv_port_priv
*pp
= ap
->private_data
;
2405 if (pp
->pp_flags
& MV_PP_FLAG_DELAYED_EH
) {
2407 * Perform NCQ error analysis on failed PMPs
2408 * before we freeze the port entirely.
2410 * The failed PMPs are marked earlier by mv_pmp_eh_prep().
2412 pmp_map
= pp
->delayed_eh_pmp_map
;
2413 pp
->pp_flags
&= ~MV_PP_FLAG_DELAYED_EH
;
2414 for (pmp
= 0; pmp_map
!= 0; pmp
++) {
2415 unsigned int this_pmp
= (1 << pmp
);
2416 if (pmp_map
& this_pmp
) {
2417 struct ata_link
*link
= &ap
->pmp_link
[pmp
];
2418 pmp_map
&= ~this_pmp
;
2419 ata_eh_analyze_ncq_error(link
);
2422 ata_port_freeze(ap
);
2424 sata_pmp_error_handler(ap
);
2427 static unsigned int mv_get_err_pmp_map(struct ata_port
*ap
)
2429 void __iomem
*port_mmio
= mv_ap_base(ap
);
2431 return readl(port_mmio
+ SATA_TESTCTL
) >> 16;
2434 static void mv_pmp_eh_prep(struct ata_port
*ap
, unsigned int pmp_map
)
2439 * Initialize EH info for PMPs which saw device errors
2441 for (pmp
= 0; pmp_map
!= 0; pmp
++) {
2442 unsigned int this_pmp
= (1 << pmp
);
2443 if (pmp_map
& this_pmp
) {
2444 struct ata_link
*link
= &ap
->pmp_link
[pmp
];
2445 struct ata_eh_info
*ehi
= &link
->eh_info
;
2447 pmp_map
&= ~this_pmp
;
2448 ata_ehi_clear_desc(ehi
);
2449 ata_ehi_push_desc(ehi
, "dev err");
2450 ehi
->err_mask
|= AC_ERR_DEV
;
2451 ehi
->action
|= ATA_EH_RESET
;
2452 ata_link_abort(link
);
2457 static int mv_req_q_empty(struct ata_port
*ap
)
2459 void __iomem
*port_mmio
= mv_ap_base(ap
);
2460 u32 in_ptr
, out_ptr
;
2462 in_ptr
= (readl(port_mmio
+ EDMA_REQ_Q_IN_PTR
)
2463 >> EDMA_REQ_Q_PTR_SHIFT
) & MV_MAX_Q_DEPTH_MASK
;
2464 out_ptr
= (readl(port_mmio
+ EDMA_REQ_Q_OUT_PTR
)
2465 >> EDMA_REQ_Q_PTR_SHIFT
) & MV_MAX_Q_DEPTH_MASK
;
2466 return (in_ptr
== out_ptr
); /* 1 == queue_is_empty */
2469 static int mv_handle_fbs_ncq_dev_err(struct ata_port
*ap
)
2471 struct mv_port_priv
*pp
= ap
->private_data
;
2473 unsigned int old_map
, new_map
;
2476 * Device error during FBS+NCQ operation:
2478 * Set a port flag to prevent further I/O being enqueued.
2479 * Leave the EDMA running to drain outstanding commands from this port.
2480 * Perform the post-mortem/EH only when all responses are complete.
2481 * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.2).
2483 if (!(pp
->pp_flags
& MV_PP_FLAG_DELAYED_EH
)) {
2484 pp
->pp_flags
|= MV_PP_FLAG_DELAYED_EH
;
2485 pp
->delayed_eh_pmp_map
= 0;
2487 old_map
= pp
->delayed_eh_pmp_map
;
2488 new_map
= old_map
| mv_get_err_pmp_map(ap
);
2490 if (old_map
!= new_map
) {
2491 pp
->delayed_eh_pmp_map
= new_map
;
2492 mv_pmp_eh_prep(ap
, new_map
& ~old_map
);
2494 failed_links
= hweight16(new_map
);
2497 "%s: pmp_map=%04x qc_map=%04llx failed_links=%d nr_active_links=%d\n",
2498 __func__
, pp
->delayed_eh_pmp_map
,
2499 ap
->qc_active
, failed_links
,
2500 ap
->nr_active_links
);
2502 if (ap
->nr_active_links
<= failed_links
&& mv_req_q_empty(ap
)) {
2503 mv_process_crpb_entries(ap
, pp
);
2506 ata_port_info(ap
, "%s: done\n", __func__
);
2507 return 1; /* handled */
2509 ata_port_info(ap
, "%s: waiting\n", __func__
);
2510 return 1; /* handled */
2513 static int mv_handle_fbs_non_ncq_dev_err(struct ata_port
*ap
)
2516 * Possible future enhancement:
2518 * FBS+non-NCQ operation is not yet implemented.
2519 * See related notes in mv_edma_cfg().
2521 * Device error during FBS+non-NCQ operation:
2523 * We need to snapshot the shadow registers for each failed command.
2524 * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.3).
2526 return 0; /* not handled */
2529 static int mv_handle_dev_err(struct ata_port
*ap
, u32 edma_err_cause
)
2531 struct mv_port_priv
*pp
= ap
->private_data
;
2533 if (!(pp
->pp_flags
& MV_PP_FLAG_EDMA_EN
))
2534 return 0; /* EDMA was not active: not handled */
2535 if (!(pp
->pp_flags
& MV_PP_FLAG_FBS_EN
))
2536 return 0; /* FBS was not active: not handled */
2538 if (!(edma_err_cause
& EDMA_ERR_DEV
))
2539 return 0; /* non DEV error: not handled */
2540 edma_err_cause
&= ~EDMA_ERR_IRQ_TRANSIENT
;
2541 if (edma_err_cause
& ~(EDMA_ERR_DEV
| EDMA_ERR_SELF_DIS
))
2542 return 0; /* other problems: not handled */
2544 if (pp
->pp_flags
& MV_PP_FLAG_NCQ_EN
) {
2546 * EDMA should NOT have self-disabled for this case.
2547 * If it did, then something is wrong elsewhere,
2548 * and we cannot handle it here.
2550 if (edma_err_cause
& EDMA_ERR_SELF_DIS
) {
2551 ata_port_warn(ap
, "%s: err_cause=0x%x pp_flags=0x%x\n",
2552 __func__
, edma_err_cause
, pp
->pp_flags
);
2553 return 0; /* not handled */
2555 return mv_handle_fbs_ncq_dev_err(ap
);
2558 * EDMA should have self-disabled for this case.
2559 * If it did not, then something is wrong elsewhere,
2560 * and we cannot handle it here.
2562 if (!(edma_err_cause
& EDMA_ERR_SELF_DIS
)) {
2563 ata_port_warn(ap
, "%s: err_cause=0x%x pp_flags=0x%x\n",
2564 __func__
, edma_err_cause
, pp
->pp_flags
);
2565 return 0; /* not handled */
2567 return mv_handle_fbs_non_ncq_dev_err(ap
);
2569 return 0; /* not handled */
2572 static void mv_unexpected_intr(struct ata_port
*ap
, int edma_was_enabled
)
2574 struct ata_eh_info
*ehi
= &ap
->link
.eh_info
;
2575 char *when
= "idle";
2577 ata_ehi_clear_desc(ehi
);
2578 if (edma_was_enabled
) {
2579 when
= "EDMA enabled";
2581 struct ata_queued_cmd
*qc
= ata_qc_from_tag(ap
, ap
->link
.active_tag
);
2582 if (qc
&& (qc
->tf
.flags
& ATA_TFLAG_POLLING
))
2585 ata_ehi_push_desc(ehi
, "unexpected device interrupt while %s", when
);
2586 ehi
->err_mask
|= AC_ERR_OTHER
;
2587 ehi
->action
|= ATA_EH_RESET
;
2588 ata_port_freeze(ap
);
2592 * mv_err_intr - Handle error interrupts on the port
2593 * @ap: ATA channel to manipulate
2595 * Most cases require a full reset of the chip's state machine,
2596 * which also performs a COMRESET.
2597 * Also, if the port disabled DMA, update our cached copy to match.
2600 * Inherited from caller.
2602 static void mv_err_intr(struct ata_port
*ap
)
2604 void __iomem
*port_mmio
= mv_ap_base(ap
);
2605 u32 edma_err_cause
, eh_freeze_mask
, serr
= 0;
2607 struct mv_port_priv
*pp
= ap
->private_data
;
2608 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
2609 unsigned int action
= 0, err_mask
= 0;
2610 struct ata_eh_info
*ehi
= &ap
->link
.eh_info
;
2611 struct ata_queued_cmd
*qc
;
2615 * Read and clear the SError and err_cause bits.
2616 * For GenIIe, if EDMA_ERR_TRANS_IRQ_7 is set, we also must read/clear
2617 * the FIS_IRQ_CAUSE register before clearing edma_err_cause.
2619 sata_scr_read(&ap
->link
, SCR_ERROR
, &serr
);
2620 sata_scr_write_flush(&ap
->link
, SCR_ERROR
, serr
);
2622 edma_err_cause
= readl(port_mmio
+ EDMA_ERR_IRQ_CAUSE
);
2623 if (IS_GEN_IIE(hpriv
) && (edma_err_cause
& EDMA_ERR_TRANS_IRQ_7
)) {
2624 fis_cause
= readl(port_mmio
+ FIS_IRQ_CAUSE
);
2625 writelfl(~fis_cause
, port_mmio
+ FIS_IRQ_CAUSE
);
2627 writelfl(~edma_err_cause
, port_mmio
+ EDMA_ERR_IRQ_CAUSE
);
2629 if (edma_err_cause
& EDMA_ERR_DEV
) {
2631 * Device errors during FIS-based switching operation
2632 * require special handling.
2634 if (mv_handle_dev_err(ap
, edma_err_cause
))
2638 qc
= mv_get_active_qc(ap
);
2639 ata_ehi_clear_desc(ehi
);
2640 ata_ehi_push_desc(ehi
, "edma_err_cause=%08x pp_flags=%08x",
2641 edma_err_cause
, pp
->pp_flags
);
2643 if (IS_GEN_IIE(hpriv
) && (edma_err_cause
& EDMA_ERR_TRANS_IRQ_7
)) {
2644 ata_ehi_push_desc(ehi
, "fis_cause=%08x", fis_cause
);
2645 if (fis_cause
& FIS_IRQ_CAUSE_AN
) {
2646 u32 ec
= edma_err_cause
&
2647 ~(EDMA_ERR_TRANS_IRQ_7
| EDMA_ERR_IRQ_TRANSIENT
);
2648 sata_async_notification(ap
);
2650 return; /* Just an AN; no need for the nukes */
2651 ata_ehi_push_desc(ehi
, "SDB notify");
2655 * All generations share these EDMA error cause bits:
2657 if (edma_err_cause
& EDMA_ERR_DEV
) {
2658 err_mask
|= AC_ERR_DEV
;
2659 action
|= ATA_EH_RESET
;
2660 ata_ehi_push_desc(ehi
, "dev error");
2662 if (edma_err_cause
& (EDMA_ERR_D_PAR
| EDMA_ERR_PRD_PAR
|
2663 EDMA_ERR_CRQB_PAR
| EDMA_ERR_CRPB_PAR
|
2664 EDMA_ERR_INTRL_PAR
)) {
2665 err_mask
|= AC_ERR_ATA_BUS
;
2666 action
|= ATA_EH_RESET
;
2667 ata_ehi_push_desc(ehi
, "parity error");
2669 if (edma_err_cause
& (EDMA_ERR_DEV_DCON
| EDMA_ERR_DEV_CON
)) {
2670 ata_ehi_hotplugged(ehi
);
2671 ata_ehi_push_desc(ehi
, edma_err_cause
& EDMA_ERR_DEV_DCON
?
2672 "dev disconnect" : "dev connect");
2673 action
|= ATA_EH_RESET
;
2677 * Gen-I has a different SELF_DIS bit,
2678 * different FREEZE bits, and no SERR bit:
2680 if (IS_GEN_I(hpriv
)) {
2681 eh_freeze_mask
= EDMA_EH_FREEZE_5
;
2682 if (edma_err_cause
& EDMA_ERR_SELF_DIS_5
) {
2683 pp
->pp_flags
&= ~MV_PP_FLAG_EDMA_EN
;
2684 ata_ehi_push_desc(ehi
, "EDMA self-disable");
2687 eh_freeze_mask
= EDMA_EH_FREEZE
;
2688 if (edma_err_cause
& EDMA_ERR_SELF_DIS
) {
2689 pp
->pp_flags
&= ~MV_PP_FLAG_EDMA_EN
;
2690 ata_ehi_push_desc(ehi
, "EDMA self-disable");
2692 if (edma_err_cause
& EDMA_ERR_SERR
) {
2693 ata_ehi_push_desc(ehi
, "SError=%08x", serr
);
2694 err_mask
|= AC_ERR_ATA_BUS
;
2695 action
|= ATA_EH_RESET
;
2700 err_mask
= AC_ERR_OTHER
;
2701 action
|= ATA_EH_RESET
;
2704 ehi
->serror
|= serr
;
2705 ehi
->action
|= action
;
2708 qc
->err_mask
|= err_mask
;
2710 ehi
->err_mask
|= err_mask
;
2712 if (err_mask
== AC_ERR_DEV
) {
2714 * Cannot do ata_port_freeze() here,
2715 * because it would kill PIO access,
2716 * which is needed for further diagnosis.
2720 } else if (edma_err_cause
& eh_freeze_mask
) {
2722 * Note to self: ata_port_freeze() calls ata_port_abort()
2724 ata_port_freeze(ap
);
2731 ata_link_abort(qc
->dev
->link
);
2737 static bool mv_process_crpb_response(struct ata_port
*ap
,
2738 struct mv_crpb
*response
, unsigned int tag
, int ncq_enabled
)
2741 u16 edma_status
= le16_to_cpu(response
->flags
);
2744 * edma_status from a response queue entry:
2745 * LSB is from EDMA_ERR_IRQ_CAUSE (non-NCQ only).
2746 * MSB is saved ATA status from command completion.
2749 u8 err_cause
= edma_status
& 0xff & ~EDMA_ERR_DEV
;
2752 * Error will be seen/handled by
2753 * mv_err_intr(). So do nothing at all here.
2758 ata_status
= edma_status
>> CRPB_FLAG_STATUS_SHIFT
;
2759 if (!ac_err_mask(ata_status
))
2761 /* else: leave it for mv_err_intr() */
2765 static void mv_process_crpb_entries(struct ata_port
*ap
, struct mv_port_priv
*pp
)
2767 void __iomem
*port_mmio
= mv_ap_base(ap
);
2768 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
2770 bool work_done
= false;
2772 int ncq_enabled
= (pp
->pp_flags
& MV_PP_FLAG_NCQ_EN
);
2774 /* Get the hardware queue position index */
2775 in_index
= (readl(port_mmio
+ EDMA_RSP_Q_IN_PTR
)
2776 >> EDMA_RSP_Q_PTR_SHIFT
) & MV_MAX_Q_DEPTH_MASK
;
2778 /* Process new responses from since the last time we looked */
2779 while (in_index
!= pp
->resp_idx
) {
2781 struct mv_crpb
*response
= &pp
->crpb
[pp
->resp_idx
];
2783 pp
->resp_idx
= (pp
->resp_idx
+ 1) & MV_MAX_Q_DEPTH_MASK
;
2785 if (IS_GEN_I(hpriv
)) {
2786 /* 50xx: no NCQ, only one command active at a time */
2787 tag
= ap
->link
.active_tag
;
2789 /* Gen II/IIE: get command tag from CRPB entry */
2790 tag
= le16_to_cpu(response
->id
) & 0x1f;
2792 if (mv_process_crpb_response(ap
, response
, tag
, ncq_enabled
))
2793 done_mask
|= 1 << tag
;
2798 ata_qc_complete_multiple(ap
, ata_qc_get_active(ap
) ^ done_mask
);
2800 /* Update the software queue position index in hardware */
2801 writelfl((pp
->crpb_dma
& EDMA_RSP_Q_BASE_LO_MASK
) |
2802 (pp
->resp_idx
<< EDMA_RSP_Q_PTR_SHIFT
),
2803 port_mmio
+ EDMA_RSP_Q_OUT_PTR
);
2807 static void mv_port_intr(struct ata_port
*ap
, u32 port_cause
)
2809 struct mv_port_priv
*pp
;
2810 int edma_was_enabled
;
2813 * Grab a snapshot of the EDMA_EN flag setting,
2814 * so that we have a consistent view for this port,
2815 * even if something we call of our routines changes it.
2817 pp
= ap
->private_data
;
2818 edma_was_enabled
= (pp
->pp_flags
& MV_PP_FLAG_EDMA_EN
);
2820 * Process completed CRPB response(s) before other events.
2822 if (edma_was_enabled
&& (port_cause
& DONE_IRQ
)) {
2823 mv_process_crpb_entries(ap
, pp
);
2824 if (pp
->pp_flags
& MV_PP_FLAG_DELAYED_EH
)
2825 mv_handle_fbs_ncq_dev_err(ap
);
2828 * Handle chip-reported errors, or continue on to handle PIO.
2830 if (unlikely(port_cause
& ERR_IRQ
)) {
2832 } else if (!edma_was_enabled
) {
2833 struct ata_queued_cmd
*qc
= mv_get_active_qc(ap
);
2835 ata_bmdma_port_intr(ap
, qc
);
2837 mv_unexpected_intr(ap
, edma_was_enabled
);
2842 * mv_host_intr - Handle all interrupts on the given host controller
2843 * @host: host specific structure
2844 * @main_irq_cause: Main interrupt cause register for the chip.
2847 * Inherited from caller.
2849 static int mv_host_intr(struct ata_host
*host
, u32 main_irq_cause
)
2851 struct mv_host_priv
*hpriv
= host
->private_data
;
2852 void __iomem
*mmio
= hpriv
->base
, *hc_mmio
;
2853 unsigned int handled
= 0, port
;
2855 /* If asserted, clear the "all ports" IRQ coalescing bit */
2856 if (main_irq_cause
& ALL_PORTS_COAL_DONE
)
2857 writel(~ALL_PORTS_COAL_IRQ
, mmio
+ IRQ_COAL_CAUSE
);
2859 for (port
= 0; port
< hpriv
->n_ports
; port
++) {
2860 struct ata_port
*ap
= host
->ports
[port
];
2861 unsigned int p
, shift
, hardport
, port_cause
;
2863 MV_PORT_TO_SHIFT_AND_HARDPORT(port
, shift
, hardport
);
2865 * Each hc within the host has its own hc_irq_cause register,
2866 * where the interrupting ports bits get ack'd.
2868 if (hardport
== 0) { /* first port on this hc ? */
2869 u32 hc_cause
= (main_irq_cause
>> shift
) & HC0_IRQ_PEND
;
2870 u32 port_mask
, ack_irqs
;
2872 * Skip this entire hc if nothing pending for any ports
2875 port
+= MV_PORTS_PER_HC
- 1;
2879 * We don't need/want to read the hc_irq_cause register,
2880 * because doing so hurts performance, and
2881 * main_irq_cause already gives us everything we need.
2883 * But we do have to *write* to the hc_irq_cause to ack
2884 * the ports that we are handling this time through.
2886 * This requires that we create a bitmap for those
2887 * ports which interrupted us, and use that bitmap
2888 * to ack (only) those ports via hc_irq_cause.
2891 if (hc_cause
& PORTS_0_3_COAL_DONE
)
2892 ack_irqs
= HC_COAL_IRQ
;
2893 for (p
= 0; p
< MV_PORTS_PER_HC
; ++p
) {
2894 if ((port
+ p
) >= hpriv
->n_ports
)
2896 port_mask
= (DONE_IRQ
| ERR_IRQ
) << (p
* 2);
2897 if (hc_cause
& port_mask
)
2898 ack_irqs
|= (DMA_IRQ
| DEV_IRQ
) << p
;
2900 hc_mmio
= mv_hc_base_from_port(mmio
, port
);
2901 writelfl(~ack_irqs
, hc_mmio
+ HC_IRQ_CAUSE
);
2905 * Handle interrupts signalled for this port:
2907 port_cause
= (main_irq_cause
>> shift
) & (DONE_IRQ
| ERR_IRQ
);
2909 mv_port_intr(ap
, port_cause
);
2914 static int mv_pci_error(struct ata_host
*host
, void __iomem
*mmio
)
2916 struct mv_host_priv
*hpriv
= host
->private_data
;
2917 struct ata_port
*ap
;
2918 struct ata_queued_cmd
*qc
;
2919 struct ata_eh_info
*ehi
;
2920 unsigned int i
, err_mask
, printed
= 0;
2923 err_cause
= readl(mmio
+ hpriv
->irq_cause_offset
);
2925 dev_err(host
->dev
, "PCI ERROR; PCI IRQ cause=0x%08x\n", err_cause
);
2927 dev_dbg(host
->dev
, "%s: All regs @ PCI error\n", __func__
);
2928 mv_dump_all_regs(mmio
, to_pci_dev(host
->dev
));
2930 writelfl(0, mmio
+ hpriv
->irq_cause_offset
);
2932 for (i
= 0; i
< host
->n_ports
; i
++) {
2933 ap
= host
->ports
[i
];
2934 if (!ata_link_offline(&ap
->link
)) {
2935 ehi
= &ap
->link
.eh_info
;
2936 ata_ehi_clear_desc(ehi
);
2938 ata_ehi_push_desc(ehi
,
2939 "PCI err cause 0x%08x", err_cause
);
2940 err_mask
= AC_ERR_HOST_BUS
;
2941 ehi
->action
= ATA_EH_RESET
;
2942 qc
= ata_qc_from_tag(ap
, ap
->link
.active_tag
);
2944 qc
->err_mask
|= err_mask
;
2946 ehi
->err_mask
|= err_mask
;
2948 ata_port_freeze(ap
);
2951 return 1; /* handled */
2955 * mv_interrupt - Main interrupt event handler
2957 * @dev_instance: private data; in this case the host structure
2959 * Read the read only register to determine if any host
2960 * controllers have pending interrupts. If so, call lower level
2961 * routine to handle. Also check for PCI errors which are only
2965 * This routine holds the host lock while processing pending
2968 static irqreturn_t
mv_interrupt(int irq
, void *dev_instance
)
2970 struct ata_host
*host
= dev_instance
;
2971 struct mv_host_priv
*hpriv
= host
->private_data
;
2972 unsigned int handled
= 0;
2973 int using_msi
= hpriv
->hp_flags
& MV_HP_FLAG_MSI
;
2974 u32 main_irq_cause
, pending_irqs
;
2976 spin_lock(&host
->lock
);
2978 /* for MSI: block new interrupts while in here */
2980 mv_write_main_irq_mask(0, hpriv
);
2982 main_irq_cause
= readl(hpriv
->main_irq_cause_addr
);
2983 pending_irqs
= main_irq_cause
& hpriv
->main_irq_mask
;
2985 * Deal with cases where we either have nothing pending, or have read
2986 * a bogus register value which can indicate HW removal or PCI fault.
2988 if (pending_irqs
&& main_irq_cause
!= 0xffffffffU
) {
2989 if (unlikely((pending_irqs
& PCI_ERR
) && !IS_SOC(hpriv
)))
2990 handled
= mv_pci_error(host
, hpriv
->base
);
2992 handled
= mv_host_intr(host
, pending_irqs
);
2995 /* for MSI: unmask; interrupt cause bits will retrigger now */
2997 mv_write_main_irq_mask(hpriv
->main_irq_mask
, hpriv
);
2999 spin_unlock(&host
->lock
);
3001 return IRQ_RETVAL(handled
);
3004 static unsigned int mv5_scr_offset(unsigned int sc_reg_in
)
3008 switch (sc_reg_in
) {
3012 ofs
= sc_reg_in
* sizeof(u32
);
3021 static int mv5_scr_read(struct ata_link
*link
, unsigned int sc_reg_in
, u32
*val
)
3023 struct mv_host_priv
*hpriv
= link
->ap
->host
->private_data
;
3024 void __iomem
*mmio
= hpriv
->base
;
3025 void __iomem
*addr
= mv5_phy_base(mmio
, link
->ap
->port_no
);
3026 unsigned int ofs
= mv5_scr_offset(sc_reg_in
);
3028 if (ofs
!= 0xffffffffU
) {
3029 *val
= readl(addr
+ ofs
);
3035 static int mv5_scr_write(struct ata_link
*link
, unsigned int sc_reg_in
, u32 val
)
3037 struct mv_host_priv
*hpriv
= link
->ap
->host
->private_data
;
3038 void __iomem
*mmio
= hpriv
->base
;
3039 void __iomem
*addr
= mv5_phy_base(mmio
, link
->ap
->port_no
);
3040 unsigned int ofs
= mv5_scr_offset(sc_reg_in
);
3042 if (ofs
!= 0xffffffffU
) {
3043 writelfl(val
, addr
+ ofs
);
3049 static void mv5_reset_bus(struct ata_host
*host
, void __iomem
*mmio
)
3051 struct pci_dev
*pdev
= to_pci_dev(host
->dev
);
3054 early_5080
= (pdev
->device
== 0x5080) && (pdev
->revision
== 0);
3057 u32 tmp
= readl(mmio
+ MV_PCI_EXP_ROM_BAR_CTL
);
3059 writel(tmp
, mmio
+ MV_PCI_EXP_ROM_BAR_CTL
);
3062 mv_reset_pci_bus(host
, mmio
);
3065 static void mv5_reset_flash(struct mv_host_priv
*hpriv
, void __iomem
*mmio
)
3067 writel(0x0fcfffff, mmio
+ FLASH_CTL
);
3070 static void mv5_read_preamp(struct mv_host_priv
*hpriv
, int idx
,
3073 void __iomem
*phy_mmio
= mv5_phy_base(mmio
, idx
);
3076 tmp
= readl(phy_mmio
+ MV5_PHY_MODE
);
3078 hpriv
->signal
[idx
].pre
= tmp
& 0x1800; /* bits 12:11 */
3079 hpriv
->signal
[idx
].amps
= tmp
& 0xe0; /* bits 7:5 */
3082 static void mv5_enable_leds(struct mv_host_priv
*hpriv
, void __iomem
*mmio
)
3086 writel(0, mmio
+ GPIO_PORT_CTL
);
3088 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
3090 tmp
= readl(mmio
+ MV_PCI_EXP_ROM_BAR_CTL
);
3092 writel(tmp
, mmio
+ MV_PCI_EXP_ROM_BAR_CTL
);
3095 static void mv5_phy_errata(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
3098 void __iomem
*phy_mmio
= mv5_phy_base(mmio
, port
);
3099 const u32 mask
= (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
3101 int fix_apm_sq
= (hpriv
->hp_flags
& MV_HP_ERRATA_50XXB0
);
3104 tmp
= readl(phy_mmio
+ MV5_LTMODE
);
3106 writel(tmp
, phy_mmio
+ MV5_LTMODE
);
3108 tmp
= readl(phy_mmio
+ MV5_PHY_CTL
);
3111 writel(tmp
, phy_mmio
+ MV5_PHY_CTL
);
3114 tmp
= readl(phy_mmio
+ MV5_PHY_MODE
);
3116 tmp
|= hpriv
->signal
[port
].pre
;
3117 tmp
|= hpriv
->signal
[port
].amps
;
3118 writel(tmp
, phy_mmio
+ MV5_PHY_MODE
);
3123 #define ZERO(reg) writel(0, port_mmio + (reg))
3124 static void mv5_reset_hc_port(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
3127 void __iomem
*port_mmio
= mv_port_base(mmio
, port
);
3129 mv_reset_channel(hpriv
, mmio
, port
);
3131 ZERO(0x028); /* command */
3132 writel(0x11f, port_mmio
+ EDMA_CFG
);
3133 ZERO(0x004); /* timer */
3134 ZERO(0x008); /* irq err cause */
3135 ZERO(0x00c); /* irq err mask */
3136 ZERO(0x010); /* rq bah */
3137 ZERO(0x014); /* rq inp */
3138 ZERO(0x018); /* rq outp */
3139 ZERO(0x01c); /* respq bah */
3140 ZERO(0x024); /* respq outp */
3141 ZERO(0x020); /* respq inp */
3142 ZERO(0x02c); /* test control */
3143 writel(0xbc, port_mmio
+ EDMA_IORDY_TMOUT
);
3147 #define ZERO(reg) writel(0, hc_mmio + (reg))
3148 static void mv5_reset_one_hc(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
3151 void __iomem
*hc_mmio
= mv_hc_base(mmio
, hc
);
3159 tmp
= readl(hc_mmio
+ 0x20);
3162 writel(tmp
, hc_mmio
+ 0x20);
3166 static int mv5_reset_hc(struct ata_host
*host
, void __iomem
*mmio
,
3169 struct mv_host_priv
*hpriv
= host
->private_data
;
3170 unsigned int hc
, port
;
3172 for (hc
= 0; hc
< n_hc
; hc
++) {
3173 for (port
= 0; port
< MV_PORTS_PER_HC
; port
++)
3174 mv5_reset_hc_port(hpriv
, mmio
,
3175 (hc
* MV_PORTS_PER_HC
) + port
);
3177 mv5_reset_one_hc(hpriv
, mmio
, hc
);
3184 #define ZERO(reg) writel(0, mmio + (reg))
3185 static void mv_reset_pci_bus(struct ata_host
*host
, void __iomem
*mmio
)
3187 struct mv_host_priv
*hpriv
= host
->private_data
;
3190 tmp
= readl(mmio
+ MV_PCI_MODE
);
3192 writel(tmp
, mmio
+ MV_PCI_MODE
);
3194 ZERO(MV_PCI_DISC_TIMER
);
3195 ZERO(MV_PCI_MSI_TRIGGER
);
3196 writel(0x000100ff, mmio
+ MV_PCI_XBAR_TMOUT
);
3197 ZERO(MV_PCI_SERR_MASK
);
3198 ZERO(hpriv
->irq_cause_offset
);
3199 ZERO(hpriv
->irq_mask_offset
);
3200 ZERO(MV_PCI_ERR_LOW_ADDRESS
);
3201 ZERO(MV_PCI_ERR_HIGH_ADDRESS
);
3202 ZERO(MV_PCI_ERR_ATTRIBUTE
);
3203 ZERO(MV_PCI_ERR_COMMAND
);
3207 static void mv6_reset_flash(struct mv_host_priv
*hpriv
, void __iomem
*mmio
)
3211 mv5_reset_flash(hpriv
, mmio
);
3213 tmp
= readl(mmio
+ GPIO_PORT_CTL
);
3215 tmp
|= (1 << 5) | (1 << 6);
3216 writel(tmp
, mmio
+ GPIO_PORT_CTL
);
3220 * mv6_reset_hc - Perform the 6xxx global soft reset
3221 * @mmio: base address of the HBA
3223 * This routine only applies to 6xxx parts.
3226 * Inherited from caller.
3228 static int mv6_reset_hc(struct ata_host
*host
, void __iomem
*mmio
,
3231 void __iomem
*reg
= mmio
+ PCI_MAIN_CMD_STS
;
3235 /* Following procedure defined in PCI "main command and status
3239 writel(t
| STOP_PCI_MASTER
, reg
);
3241 for (i
= 0; i
< 1000; i
++) {
3244 if (PCI_MASTER_EMPTY
& t
)
3247 if (!(PCI_MASTER_EMPTY
& t
)) {
3248 dev_err(host
->dev
, "PCI master won't flush\n");
3256 writel(t
| GLOB_SFT_RST
, reg
);
3259 } while (!(GLOB_SFT_RST
& t
) && (i
-- > 0));
3261 if (!(GLOB_SFT_RST
& t
)) {
3262 dev_err(host
->dev
, "can't set global reset\n");
3267 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
3270 writel(t
& ~(GLOB_SFT_RST
| STOP_PCI_MASTER
), reg
);
3273 } while ((GLOB_SFT_RST
& t
) && (i
-- > 0));
3275 if (GLOB_SFT_RST
& t
) {
3276 dev_err(host
->dev
, "can't clear global reset\n");
3283 static void mv6_read_preamp(struct mv_host_priv
*hpriv
, int idx
,
3286 void __iomem
*port_mmio
;
3289 tmp
= readl(mmio
+ RESET_CFG
);
3290 if ((tmp
& (1 << 0)) == 0) {
3291 hpriv
->signal
[idx
].amps
= 0x7 << 8;
3292 hpriv
->signal
[idx
].pre
= 0x1 << 5;
3296 port_mmio
= mv_port_base(mmio
, idx
);
3297 tmp
= readl(port_mmio
+ PHY_MODE2
);
3299 hpriv
->signal
[idx
].amps
= tmp
& 0x700; /* bits 10:8 */
3300 hpriv
->signal
[idx
].pre
= tmp
& 0xe0; /* bits 7:5 */
3303 static void mv6_enable_leds(struct mv_host_priv
*hpriv
, void __iomem
*mmio
)
3305 writel(0x00000060, mmio
+ GPIO_PORT_CTL
);
3308 static void mv6_phy_errata(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
3311 void __iomem
*port_mmio
= mv_port_base(mmio
, port
);
3313 u32 hp_flags
= hpriv
->hp_flags
;
3315 hp_flags
& (MV_HP_ERRATA_60X1B2
| MV_HP_ERRATA_60X1C0
);
3317 hp_flags
& (MV_HP_ERRATA_60X1B2
| MV_HP_ERRATA_60X1C0
);
3320 if (fix_phy_mode2
) {
3321 m2
= readl(port_mmio
+ PHY_MODE2
);
3324 writel(m2
, port_mmio
+ PHY_MODE2
);
3328 m2
= readl(port_mmio
+ PHY_MODE2
);
3329 m2
&= ~((1 << 16) | (1 << 31));
3330 writel(m2
, port_mmio
+ PHY_MODE2
);
3336 * Gen-II/IIe PHY_MODE3 errata RM#2:
3337 * Achieves better receiver noise performance than the h/w default:
3339 m3
= readl(port_mmio
+ PHY_MODE3
);
3340 m3
= (m3
& 0x1f) | (0x5555601 << 5);
3342 /* Guideline 88F5182 (GL# SATA-S11) */
3346 if (fix_phy_mode4
) {
3347 u32 m4
= readl(port_mmio
+ PHY_MODE4
);
3349 * Enforce reserved-bit restrictions on GenIIe devices only.
3350 * For earlier chipsets, force only the internal config field
3351 * (workaround for errata FEr SATA#10 part 1).
3353 if (IS_GEN_IIE(hpriv
))
3354 m4
= (m4
& ~PHY_MODE4_RSVD_ZEROS
) | PHY_MODE4_RSVD_ONES
;
3356 m4
= (m4
& ~PHY_MODE4_CFG_MASK
) | PHY_MODE4_CFG_VALUE
;
3357 writel(m4
, port_mmio
+ PHY_MODE4
);
3360 * Workaround for 60x1-B2 errata SATA#13:
3361 * Any write to PHY_MODE4 (above) may corrupt PHY_MODE3,
3362 * so we must always rewrite PHY_MODE3 after PHY_MODE4.
3363 * Or ensure we use writelfl() when writing PHY_MODE4.
3365 writel(m3
, port_mmio
+ PHY_MODE3
);
3367 /* Revert values of pre-emphasis and signal amps to the saved ones */
3368 m2
= readl(port_mmio
+ PHY_MODE2
);
3370 m2
&= ~MV_M2_PREAMP_MASK
;
3371 m2
|= hpriv
->signal
[port
].amps
;
3372 m2
|= hpriv
->signal
[port
].pre
;
3375 /* according to mvSata 3.6.1, some IIE values are fixed */
3376 if (IS_GEN_IIE(hpriv
)) {
3381 writel(m2
, port_mmio
+ PHY_MODE2
);
3384 /* TODO: use the generic LED interface to configure the SATA Presence */
3385 /* & Acitivy LEDs on the board */
3386 static void mv_soc_enable_leds(struct mv_host_priv
*hpriv
,
3392 static void mv_soc_read_preamp(struct mv_host_priv
*hpriv
, int idx
,
3395 void __iomem
*port_mmio
;
3398 port_mmio
= mv_port_base(mmio
, idx
);
3399 tmp
= readl(port_mmio
+ PHY_MODE2
);
3401 hpriv
->signal
[idx
].amps
= tmp
& 0x700; /* bits 10:8 */
3402 hpriv
->signal
[idx
].pre
= tmp
& 0xe0; /* bits 7:5 */
3406 #define ZERO(reg) writel(0, port_mmio + (reg))
3407 static void mv_soc_reset_hc_port(struct mv_host_priv
*hpriv
,
3408 void __iomem
*mmio
, unsigned int port
)
3410 void __iomem
*port_mmio
= mv_port_base(mmio
, port
);
3412 mv_reset_channel(hpriv
, mmio
, port
);
3414 ZERO(0x028); /* command */
3415 writel(0x101f, port_mmio
+ EDMA_CFG
);
3416 ZERO(0x004); /* timer */
3417 ZERO(0x008); /* irq err cause */
3418 ZERO(0x00c); /* irq err mask */
3419 ZERO(0x010); /* rq bah */
3420 ZERO(0x014); /* rq inp */
3421 ZERO(0x018); /* rq outp */
3422 ZERO(0x01c); /* respq bah */
3423 ZERO(0x024); /* respq outp */
3424 ZERO(0x020); /* respq inp */
3425 ZERO(0x02c); /* test control */
3426 writel(0x800, port_mmio
+ EDMA_IORDY_TMOUT
);
3431 #define ZERO(reg) writel(0, hc_mmio + (reg))
3432 static void mv_soc_reset_one_hc(struct mv_host_priv
*hpriv
,
3435 void __iomem
*hc_mmio
= mv_hc_base(mmio
, 0);
3445 static int mv_soc_reset_hc(struct ata_host
*host
,
3446 void __iomem
*mmio
, unsigned int n_hc
)
3448 struct mv_host_priv
*hpriv
= host
->private_data
;
3451 for (port
= 0; port
< hpriv
->n_ports
; port
++)
3452 mv_soc_reset_hc_port(hpriv
, mmio
, port
);
3454 mv_soc_reset_one_hc(hpriv
, mmio
);
3459 static void mv_soc_reset_flash(struct mv_host_priv
*hpriv
,
3465 static void mv_soc_reset_bus(struct ata_host
*host
, void __iomem
*mmio
)
3470 static void mv_soc_65n_phy_errata(struct mv_host_priv
*hpriv
,
3471 void __iomem
*mmio
, unsigned int port
)
3473 void __iomem
*port_mmio
= mv_port_base(mmio
, port
);
3476 reg
= readl(port_mmio
+ PHY_MODE3
);
3477 reg
&= ~(0x3 << 27); /* SELMUPF (bits 28:27) to 1 */
3479 reg
&= ~(0x3 << 29); /* SELMUPI (bits 30:29) to 1 */
3481 writel(reg
, port_mmio
+ PHY_MODE3
);
3483 reg
= readl(port_mmio
+ PHY_MODE4
);
3484 reg
&= ~0x1; /* SATU_OD8 (bit 0) to 0, reserved bit 16 must be set */
3486 writel(reg
, port_mmio
+ PHY_MODE4
);
3488 reg
= readl(port_mmio
+ PHY_MODE9_GEN2
);
3489 reg
&= ~0xf; /* TXAMP[3:0] (bits 3:0) to 8 */
3491 reg
&= ~(0x1 << 14); /* TXAMP[4] (bit 14) to 0 */
3492 writel(reg
, port_mmio
+ PHY_MODE9_GEN2
);
3494 reg
= readl(port_mmio
+ PHY_MODE9_GEN1
);
3495 reg
&= ~0xf; /* TXAMP[3:0] (bits 3:0) to 8 */
3497 reg
&= ~(0x1 << 14); /* TXAMP[4] (bit 14) to 0 */
3498 writel(reg
, port_mmio
+ PHY_MODE9_GEN1
);
3502 * soc_is_65 - check if the soc is 65 nano device
3504 * Detect the type of the SoC, this is done by reading the PHYCFG_OFS
3505 * register, this register should contain non-zero value and it exists only
3506 * in the 65 nano devices, when reading it from older devices we get 0.
3508 static bool soc_is_65n(struct mv_host_priv
*hpriv
)
3510 void __iomem
*port0_mmio
= mv_port_base(hpriv
->base
, 0);
3512 if (readl(port0_mmio
+ PHYCFG_OFS
))
3517 static void mv_setup_ifcfg(void __iomem
*port_mmio
, int want_gen2i
)
3519 u32 ifcfg
= readl(port_mmio
+ SATA_IFCFG
);
3521 ifcfg
= (ifcfg
& 0xf7f) | 0x9b1000; /* from chip spec */
3523 ifcfg
|= (1 << 7); /* enable gen2i speed */
3524 writelfl(ifcfg
, port_mmio
+ SATA_IFCFG
);
3527 static void mv_reset_channel(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
3528 unsigned int port_no
)
3530 void __iomem
*port_mmio
= mv_port_base(mmio
, port_no
);
3533 * The datasheet warns against setting EDMA_RESET when EDMA is active
3534 * (but doesn't say what the problem might be). So we first try
3535 * to disable the EDMA engine before doing the EDMA_RESET operation.
3537 mv_stop_edma_engine(port_mmio
);
3538 writelfl(EDMA_RESET
, port_mmio
+ EDMA_CMD
);
3540 if (!IS_GEN_I(hpriv
)) {
3541 /* Enable 3.0gb/s link speed: this survives EDMA_RESET */
3542 mv_setup_ifcfg(port_mmio
, 1);
3545 * Strobing EDMA_RESET here causes a hard reset of the SATA transport,
3546 * link, and physical layers. It resets all SATA interface registers
3547 * (except for SATA_IFCFG), and issues a COMRESET to the dev.
3549 writelfl(EDMA_RESET
, port_mmio
+ EDMA_CMD
);
3550 udelay(25); /* allow reset propagation */
3551 writelfl(0, port_mmio
+ EDMA_CMD
);
3553 hpriv
->ops
->phy_errata(hpriv
, mmio
, port_no
);
3555 if (IS_GEN_I(hpriv
))
3556 usleep_range(500, 1000);
3559 static void mv_pmp_select(struct ata_port
*ap
, int pmp
)
3561 if (sata_pmp_supported(ap
)) {
3562 void __iomem
*port_mmio
= mv_ap_base(ap
);
3563 u32 reg
= readl(port_mmio
+ SATA_IFCTL
);
3564 int old
= reg
& 0xf;
3567 reg
= (reg
& ~0xf) | pmp
;
3568 writelfl(reg
, port_mmio
+ SATA_IFCTL
);
3573 static int mv_pmp_hardreset(struct ata_link
*link
, unsigned int *class,
3574 unsigned long deadline
)
3576 mv_pmp_select(link
->ap
, sata_srst_pmp(link
));
3577 return sata_std_hardreset(link
, class, deadline
);
3580 static int mv_softreset(struct ata_link
*link
, unsigned int *class,
3581 unsigned long deadline
)
3583 mv_pmp_select(link
->ap
, sata_srst_pmp(link
));
3584 return ata_sff_softreset(link
, class, deadline
);
3587 static int mv_hardreset(struct ata_link
*link
, unsigned int *class,
3588 unsigned long deadline
)
3590 struct ata_port
*ap
= link
->ap
;
3591 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
3592 struct mv_port_priv
*pp
= ap
->private_data
;
3593 void __iomem
*mmio
= hpriv
->base
;
3594 int rc
, attempts
= 0, extra
= 0;
3598 mv_reset_channel(hpriv
, mmio
, ap
->port_no
);
3599 pp
->pp_flags
&= ~MV_PP_FLAG_EDMA_EN
;
3601 ~(MV_PP_FLAG_FBS_EN
| MV_PP_FLAG_NCQ_EN
| MV_PP_FLAG_FAKE_ATA_BUSY
);
3603 /* Workaround for errata FEr SATA#10 (part 2) */
3605 const unsigned int *timing
=
3606 sata_ehc_deb_timing(&link
->eh_context
);
3608 rc
= sata_link_hardreset(link
, timing
, deadline
+ extra
,
3610 rc
= online
? -EAGAIN
: rc
;
3613 sata_scr_read(link
, SCR_STATUS
, &sstatus
);
3614 if (!IS_GEN_I(hpriv
) && ++attempts
>= 5 && sstatus
== 0x121) {
3615 /* Force 1.5gb/s link speed and try again */
3616 mv_setup_ifcfg(mv_ap_base(ap
), 0);
3617 if (time_after(jiffies
+ HZ
, deadline
))
3618 extra
= HZ
; /* only extend it once, max */
3620 } while (sstatus
!= 0x0 && sstatus
!= 0x113 && sstatus
!= 0x123);
3621 mv_save_cached_regs(ap
);
3622 mv_edma_cfg(ap
, 0, 0);
3627 static void mv_eh_freeze(struct ata_port
*ap
)
3630 mv_enable_port_irqs(ap
, 0);
3633 static void mv_eh_thaw(struct ata_port
*ap
)
3635 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
3636 unsigned int port
= ap
->port_no
;
3637 unsigned int hardport
= mv_hardport_from_port(port
);
3638 void __iomem
*hc_mmio
= mv_hc_base_from_port(hpriv
->base
, port
);
3639 void __iomem
*port_mmio
= mv_ap_base(ap
);
3642 /* clear EDMA errors on this port */
3643 writel(0, port_mmio
+ EDMA_ERR_IRQ_CAUSE
);
3645 /* clear pending irq events */
3646 hc_irq_cause
= ~((DEV_IRQ
| DMA_IRQ
) << hardport
);
3647 writelfl(hc_irq_cause
, hc_mmio
+ HC_IRQ_CAUSE
);
3649 mv_enable_port_irqs(ap
, ERR_IRQ
);
3653 * mv_port_init - Perform some early initialization on a single port.
3654 * @port: libata data structure storing shadow register addresses
3655 * @port_mmio: base address of the port
3657 * Initialize shadow register mmio addresses, clear outstanding
3658 * interrupts on the port, and unmask interrupts for the future
3659 * start of the port.
3662 * Inherited from caller.
3664 static void mv_port_init(struct ata_ioports
*port
, void __iomem
*port_mmio
)
3666 void __iomem
*serr
, *shd_base
= port_mmio
+ SHD_BLK
;
3668 /* PIO related setup
3670 port
->data_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_DATA
);
3672 port
->feature_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_ERR
);
3673 port
->nsect_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_NSECT
);
3674 port
->lbal_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_LBAL
);
3675 port
->lbam_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_LBAM
);
3676 port
->lbah_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_LBAH
);
3677 port
->device_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_DEVICE
);
3679 port
->command_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_STATUS
);
3680 /* special case: control/altstatus doesn't have ATA_REG_ address */
3681 port
->altstatus_addr
= port
->ctl_addr
= shd_base
+ SHD_CTL_AST
;
3683 /* Clear any currently outstanding port interrupt conditions */
3684 serr
= port_mmio
+ mv_scr_offset(SCR_ERROR
);
3685 writelfl(readl(serr
), serr
);
3686 writelfl(0, port_mmio
+ EDMA_ERR_IRQ_CAUSE
);
3688 /* unmask all non-transient EDMA error interrupts */
3689 writelfl(~EDMA_ERR_IRQ_TRANSIENT
, port_mmio
+ EDMA_ERR_IRQ_MASK
);
3692 static unsigned int mv_in_pcix_mode(struct ata_host
*host
)
3694 struct mv_host_priv
*hpriv
= host
->private_data
;
3695 void __iomem
*mmio
= hpriv
->base
;
3698 if (IS_SOC(hpriv
) || !IS_PCIE(hpriv
))
3699 return 0; /* not PCI-X capable */
3700 reg
= readl(mmio
+ MV_PCI_MODE
);
3701 if ((reg
& MV_PCI_MODE_MASK
) == 0)
3702 return 0; /* conventional PCI mode */
3703 return 1; /* chip is in PCI-X mode */
3706 static int mv_pci_cut_through_okay(struct ata_host
*host
)
3708 struct mv_host_priv
*hpriv
= host
->private_data
;
3709 void __iomem
*mmio
= hpriv
->base
;
3712 if (!mv_in_pcix_mode(host
)) {
3713 reg
= readl(mmio
+ MV_PCI_COMMAND
);
3714 if (reg
& MV_PCI_COMMAND_MRDTRIG
)
3715 return 0; /* not okay */
3717 return 1; /* okay */
3720 static void mv_60x1b2_errata_pci7(struct ata_host
*host
)
3722 struct mv_host_priv
*hpriv
= host
->private_data
;
3723 void __iomem
*mmio
= hpriv
->base
;
3725 /* workaround for 60x1-B2 errata PCI#7 */
3726 if (mv_in_pcix_mode(host
)) {
3727 u32 reg
= readl(mmio
+ MV_PCI_COMMAND
);
3728 writelfl(reg
& ~MV_PCI_COMMAND_MWRCOM
, mmio
+ MV_PCI_COMMAND
);
3732 static int mv_chip_id(struct ata_host
*host
, unsigned int board_idx
)
3734 struct pci_dev
*pdev
= to_pci_dev(host
->dev
);
3735 struct mv_host_priv
*hpriv
= host
->private_data
;
3736 u32 hp_flags
= hpriv
->hp_flags
;
3738 switch (board_idx
) {
3740 hpriv
->ops
= &mv5xxx_ops
;
3741 hp_flags
|= MV_HP_GEN_I
;
3743 switch (pdev
->revision
) {
3745 hp_flags
|= MV_HP_ERRATA_50XXB0
;
3748 hp_flags
|= MV_HP_ERRATA_50XXB2
;
3751 dev_warn(&pdev
->dev
,
3752 "Applying 50XXB2 workarounds to unknown rev\n");
3753 hp_flags
|= MV_HP_ERRATA_50XXB2
;
3760 hpriv
->ops
= &mv5xxx_ops
;
3761 hp_flags
|= MV_HP_GEN_I
;
3763 switch (pdev
->revision
) {
3765 hp_flags
|= MV_HP_ERRATA_50XXB0
;
3768 hp_flags
|= MV_HP_ERRATA_50XXB2
;
3771 dev_warn(&pdev
->dev
,
3772 "Applying B2 workarounds to unknown rev\n");
3773 hp_flags
|= MV_HP_ERRATA_50XXB2
;
3780 hpriv
->ops
= &mv6xxx_ops
;
3781 hp_flags
|= MV_HP_GEN_II
;
3783 switch (pdev
->revision
) {
3785 mv_60x1b2_errata_pci7(host
);
3786 hp_flags
|= MV_HP_ERRATA_60X1B2
;
3789 hp_flags
|= MV_HP_ERRATA_60X1C0
;
3792 dev_warn(&pdev
->dev
,
3793 "Applying B2 workarounds to unknown rev\n");
3794 hp_flags
|= MV_HP_ERRATA_60X1B2
;
3800 hp_flags
|= MV_HP_PCIE
| MV_HP_CUT_THROUGH
;
3801 if (pdev
->vendor
== PCI_VENDOR_ID_TTI
&&
3802 (pdev
->device
== 0x2300 || pdev
->device
== 0x2310))
3805 * Highpoint RocketRAID PCIe 23xx series cards:
3807 * Unconfigured drives are treated as "Legacy"
3808 * by the BIOS, and it overwrites sector 8 with
3809 * a "Lgcy" metadata block prior to Linux boot.
3811 * Configured drives (RAID or JBOD) leave sector 8
3812 * alone, but instead overwrite a high numbered
3813 * sector for the RAID metadata. This sector can
3814 * be determined exactly, by truncating the physical
3815 * drive capacity to a nice even GB value.
3817 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
3819 * Warn the user, lest they think we're just buggy.
3821 dev_warn(&pdev
->dev
, "Highpoint RocketRAID"
3822 " BIOS CORRUPTS DATA on all attached drives,"
3823 " regardless of if/how they are configured."
3825 dev_warn(&pdev
->dev
, "For data safety, do not"
3826 " use sectors 8-9 on \"Legacy\" drives,"
3827 " and avoid the final two gigabytes on"
3828 " all RocketRAID BIOS initialized drives.\n");
3832 hpriv
->ops
= &mv6xxx_ops
;
3833 hp_flags
|= MV_HP_GEN_IIE
;
3834 if (board_idx
== chip_6042
&& mv_pci_cut_through_okay(host
))
3835 hp_flags
|= MV_HP_CUT_THROUGH
;
3837 switch (pdev
->revision
) {
3838 case 0x2: /* Rev.B0: the first/only public release */
3839 hp_flags
|= MV_HP_ERRATA_60X1C0
;
3842 dev_warn(&pdev
->dev
,
3843 "Applying 60X1C0 workarounds to unknown rev\n");
3844 hp_flags
|= MV_HP_ERRATA_60X1C0
;
3849 if (soc_is_65n(hpriv
))
3850 hpriv
->ops
= &mv_soc_65n_ops
;
3852 hpriv
->ops
= &mv_soc_ops
;
3853 hp_flags
|= MV_HP_FLAG_SOC
| MV_HP_GEN_IIE
|
3854 MV_HP_ERRATA_60X1C0
;
3858 dev_alert(host
->dev
, "BUG: invalid board index %u\n", board_idx
);
3862 hpriv
->hp_flags
= hp_flags
;
3863 if (hp_flags
& MV_HP_PCIE
) {
3864 hpriv
->irq_cause_offset
= PCIE_IRQ_CAUSE
;
3865 hpriv
->irq_mask_offset
= PCIE_IRQ_MASK
;
3866 hpriv
->unmask_all_irqs
= PCIE_UNMASK_ALL_IRQS
;
3868 hpriv
->irq_cause_offset
= PCI_IRQ_CAUSE
;
3869 hpriv
->irq_mask_offset
= PCI_IRQ_MASK
;
3870 hpriv
->unmask_all_irqs
= PCI_UNMASK_ALL_IRQS
;
3877 * mv_init_host - Perform some early initialization of the host.
3878 * @host: ATA host to initialize
3880 * If possible, do an early global reset of the host. Then do
3881 * our port init and clear/unmask all/relevant host interrupts.
3884 * Inherited from caller.
3886 static int mv_init_host(struct ata_host
*host
)
3888 int rc
= 0, n_hc
, port
, hc
;
3889 struct mv_host_priv
*hpriv
= host
->private_data
;
3890 void __iomem
*mmio
= hpriv
->base
;
3892 rc
= mv_chip_id(host
, hpriv
->board_idx
);
3896 if (IS_SOC(hpriv
)) {
3897 hpriv
->main_irq_cause_addr
= mmio
+ SOC_HC_MAIN_IRQ_CAUSE
;
3898 hpriv
->main_irq_mask_addr
= mmio
+ SOC_HC_MAIN_IRQ_MASK
;
3900 hpriv
->main_irq_cause_addr
= mmio
+ PCI_HC_MAIN_IRQ_CAUSE
;
3901 hpriv
->main_irq_mask_addr
= mmio
+ PCI_HC_MAIN_IRQ_MASK
;
3904 /* initialize shadow irq mask with register's value */
3905 hpriv
->main_irq_mask
= readl(hpriv
->main_irq_mask_addr
);
3907 /* global interrupt mask: 0 == mask everything */
3908 mv_set_main_irq_mask(host
, ~0, 0);
3910 n_hc
= mv_get_hc_count(host
->ports
[0]->flags
);
3912 for (port
= 0; port
< host
->n_ports
; port
++)
3913 if (hpriv
->ops
->read_preamp
)
3914 hpriv
->ops
->read_preamp(hpriv
, port
, mmio
);
3916 rc
= hpriv
->ops
->reset_hc(host
, mmio
, n_hc
);
3920 hpriv
->ops
->reset_flash(hpriv
, mmio
);
3921 hpriv
->ops
->reset_bus(host
, mmio
);
3922 hpriv
->ops
->enable_leds(hpriv
, mmio
);
3924 for (port
= 0; port
< host
->n_ports
; port
++) {
3925 struct ata_port
*ap
= host
->ports
[port
];
3926 void __iomem
*port_mmio
= mv_port_base(mmio
, port
);
3928 mv_port_init(&ap
->ioaddr
, port_mmio
);
3931 for (hc
= 0; hc
< n_hc
; hc
++) {
3932 void __iomem
*hc_mmio
= mv_hc_base(mmio
, hc
);
3934 dev_dbg(host
->dev
, "HC%i: HC config=0x%08x HC IRQ cause "
3935 "(before clear)=0x%08x\n", hc
,
3936 readl(hc_mmio
+ HC_CFG
),
3937 readl(hc_mmio
+ HC_IRQ_CAUSE
));
3939 /* Clear any currently outstanding hc interrupt conditions */
3940 writelfl(0, hc_mmio
+ HC_IRQ_CAUSE
);
3943 if (!IS_SOC(hpriv
)) {
3944 /* Clear any currently outstanding host interrupt conditions */
3945 writelfl(0, mmio
+ hpriv
->irq_cause_offset
);
3947 /* and unmask interrupt generation for host regs */
3948 writelfl(hpriv
->unmask_all_irqs
, mmio
+ hpriv
->irq_mask_offset
);
3952 * enable only global host interrupts for now.
3953 * The per-port interrupts get done later as ports are set up.
3955 mv_set_main_irq_mask(host
, 0, PCI_ERR
);
3956 mv_set_irq_coalescing(host
, irq_coalescing_io_count
,
3957 irq_coalescing_usecs
);
3962 static int mv_create_dma_pools(struct mv_host_priv
*hpriv
, struct device
*dev
)
3964 hpriv
->crqb_pool
= dmam_pool_create("crqb_q", dev
, MV_CRQB_Q_SZ
,
3966 if (!hpriv
->crqb_pool
)
3969 hpriv
->crpb_pool
= dmam_pool_create("crpb_q", dev
, MV_CRPB_Q_SZ
,
3971 if (!hpriv
->crpb_pool
)
3974 hpriv
->sg_tbl_pool
= dmam_pool_create("sg_tbl", dev
, MV_SG_TBL_SZ
,
3976 if (!hpriv
->sg_tbl_pool
)
3982 static void mv_conf_mbus_windows(struct mv_host_priv
*hpriv
,
3983 const struct mbus_dram_target_info
*dram
)
3987 for (i
= 0; i
< 4; i
++) {
3988 writel(0, hpriv
->base
+ WINDOW_CTRL(i
));
3989 writel(0, hpriv
->base
+ WINDOW_BASE(i
));
3992 for (i
= 0; i
< dram
->num_cs
; i
++) {
3993 const struct mbus_dram_window
*cs
= dram
->cs
+ i
;
3995 writel(((cs
->size
- 1) & 0xffff0000) |
3996 (cs
->mbus_attr
<< 8) |
3997 (dram
->mbus_dram_target_id
<< 4) | 1,
3998 hpriv
->base
+ WINDOW_CTRL(i
));
3999 writel(cs
->base
, hpriv
->base
+ WINDOW_BASE(i
));
4004 * mv_platform_probe - handle a positive probe of an soc Marvell
4006 * @pdev: platform device found
4009 * Inherited from caller.
4011 static int mv_platform_probe(struct platform_device
*pdev
)
4013 const struct mv_sata_platform_data
*mv_platform_data
;
4014 const struct mbus_dram_target_info
*dram
;
4015 const struct ata_port_info
*ppi
[] =
4016 { &mv_port_info
[chip_soc
], NULL
};
4017 struct ata_host
*host
;
4018 struct mv_host_priv
*hpriv
;
4019 struct resource
*res
;
4020 int n_ports
= 0, irq
= 0;
4024 ata_print_version_once(&pdev
->dev
, DRV_VERSION
);
4027 * Simple resource validation ..
4029 if (unlikely(pdev
->num_resources
!= 1)) {
4030 dev_err(&pdev
->dev
, "invalid number of resources\n");
4035 * Get the register base first
4037 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
4042 if (pdev
->dev
.of_node
) {
4043 rc
= of_property_read_u32(pdev
->dev
.of_node
, "nr-ports",
4047 "error parsing nr-ports property: %d\n", rc
);
4052 dev_err(&pdev
->dev
, "nr-ports must be positive: %d\n",
4057 irq
= irq_of_parse_and_map(pdev
->dev
.of_node
, 0);
4059 mv_platform_data
= dev_get_platdata(&pdev
->dev
);
4060 n_ports
= mv_platform_data
->n_ports
;
4061 irq
= platform_get_irq(pdev
, 0);
4068 host
= ata_host_alloc_pinfo(&pdev
->dev
, ppi
, n_ports
);
4069 hpriv
= devm_kzalloc(&pdev
->dev
, sizeof(*hpriv
), GFP_KERNEL
);
4071 if (!host
|| !hpriv
)
4073 hpriv
->port_clks
= devm_kcalloc(&pdev
->dev
,
4074 n_ports
, sizeof(struct clk
*),
4076 if (!hpriv
->port_clks
)
4078 hpriv
->port_phys
= devm_kcalloc(&pdev
->dev
,
4079 n_ports
, sizeof(struct phy
*),
4081 if (!hpriv
->port_phys
)
4083 host
->private_data
= hpriv
;
4084 hpriv
->board_idx
= chip_soc
;
4087 hpriv
->base
= devm_ioremap(&pdev
->dev
, res
->start
,
4088 resource_size(res
));
4092 hpriv
->base
-= SATAHC0_REG_BASE
;
4094 hpriv
->clk
= clk_get(&pdev
->dev
, NULL
);
4095 if (IS_ERR(hpriv
->clk
)) {
4096 dev_notice(&pdev
->dev
, "cannot get optional clkdev\n");
4098 rc
= clk_prepare_enable(hpriv
->clk
);
4103 for (port
= 0; port
< n_ports
; port
++) {
4104 char port_number
[16];
4105 sprintf(port_number
, "%d", port
);
4106 hpriv
->port_clks
[port
] = clk_get(&pdev
->dev
, port_number
);
4107 if (!IS_ERR(hpriv
->port_clks
[port
]))
4108 clk_prepare_enable(hpriv
->port_clks
[port
]);
4110 sprintf(port_number
, "port%d", port
);
4111 hpriv
->port_phys
[port
] = devm_phy_optional_get(&pdev
->dev
,
4113 if (IS_ERR(hpriv
->port_phys
[port
])) {
4114 rc
= PTR_ERR(hpriv
->port_phys
[port
]);
4115 hpriv
->port_phys
[port
] = NULL
;
4116 if (rc
!= -EPROBE_DEFER
)
4117 dev_warn(&pdev
->dev
, "error getting phy %d", rc
);
4119 /* Cleanup only the initialized ports */
4120 hpriv
->n_ports
= port
;
4123 phy_power_on(hpriv
->port_phys
[port
]);
4126 /* All the ports have been initialized */
4127 hpriv
->n_ports
= n_ports
;
4130 * (Re-)program MBUS remapping windows if we are asked to.
4132 dram
= mv_mbus_dram_info();
4134 mv_conf_mbus_windows(hpriv
, dram
);
4136 rc
= mv_create_dma_pools(hpriv
, &pdev
->dev
);
4141 * To allow disk hotplug on Armada 370/XP SoCs, the PHY speed must be
4142 * updated in the LP_PHY_CTL register.
4144 if (pdev
->dev
.of_node
&&
4145 of_device_is_compatible(pdev
->dev
.of_node
,
4146 "marvell,armada-370-sata"))
4147 hpriv
->hp_flags
|= MV_HP_FIX_LP_PHY_CTL
;
4149 /* initialize adapter */
4150 rc
= mv_init_host(host
);
4154 dev_info(&pdev
->dev
, "slots %u ports %d\n",
4155 (unsigned)MV_MAX_Q_DEPTH
, host
->n_ports
);
4157 rc
= ata_host_activate(host
, irq
, mv_interrupt
, IRQF_SHARED
, &mv6_sht
);
4162 if (!IS_ERR(hpriv
->clk
)) {
4163 clk_disable_unprepare(hpriv
->clk
);
4164 clk_put(hpriv
->clk
);
4166 for (port
= 0; port
< hpriv
->n_ports
; port
++) {
4167 if (!IS_ERR(hpriv
->port_clks
[port
])) {
4168 clk_disable_unprepare(hpriv
->port_clks
[port
]);
4169 clk_put(hpriv
->port_clks
[port
]);
4171 phy_power_off(hpriv
->port_phys
[port
]);
4179 * mv_platform_remove - unplug a platform interface
4180 * @pdev: platform device
4182 * A platform bus SATA device has been unplugged. Perform the needed
4183 * cleanup. Also called on module unload for any active devices.
4185 static void mv_platform_remove(struct platform_device
*pdev
)
4187 struct ata_host
*host
= platform_get_drvdata(pdev
);
4188 struct mv_host_priv
*hpriv
= host
->private_data
;
4190 ata_host_detach(host
);
4192 if (!IS_ERR(hpriv
->clk
)) {
4193 clk_disable_unprepare(hpriv
->clk
);
4194 clk_put(hpriv
->clk
);
4196 for (port
= 0; port
< host
->n_ports
; port
++) {
4197 if (!IS_ERR(hpriv
->port_clks
[port
])) {
4198 clk_disable_unprepare(hpriv
->port_clks
[port
]);
4199 clk_put(hpriv
->port_clks
[port
]);
4201 phy_power_off(hpriv
->port_phys
[port
]);
4205 #ifdef CONFIG_PM_SLEEP
4206 static int mv_platform_suspend(struct platform_device
*pdev
, pm_message_t state
)
4208 struct ata_host
*host
= platform_get_drvdata(pdev
);
4211 ata_host_suspend(host
, state
);
4215 static int mv_platform_resume(struct platform_device
*pdev
)
4217 struct ata_host
*host
= platform_get_drvdata(pdev
);
4218 const struct mbus_dram_target_info
*dram
;
4222 struct mv_host_priv
*hpriv
= host
->private_data
;
4225 * (Re-)program MBUS remapping windows if we are asked to.
4227 dram
= mv_mbus_dram_info();
4229 mv_conf_mbus_windows(hpriv
, dram
);
4231 /* initialize adapter */
4232 ret
= mv_init_host(host
);
4234 dev_err(&pdev
->dev
, "Error during HW init\n");
4237 ata_host_resume(host
);
4243 #define mv_platform_suspend NULL
4244 #define mv_platform_resume NULL
4248 static const struct of_device_id mv_sata_dt_ids
[] = {
4249 { .compatible
= "marvell,armada-370-sata", },
4250 { .compatible
= "marvell,orion-sata", },
4253 MODULE_DEVICE_TABLE(of
, mv_sata_dt_ids
);
4256 static struct platform_driver mv_platform_driver
= {
4257 .probe
= mv_platform_probe
,
4258 .remove_new
= mv_platform_remove
,
4259 .suspend
= mv_platform_suspend
,
4260 .resume
= mv_platform_resume
,
4263 .of_match_table
= of_match_ptr(mv_sata_dt_ids
),
4269 static int mv_pci_init_one(struct pci_dev
*pdev
,
4270 const struct pci_device_id
*ent
);
4271 #ifdef CONFIG_PM_SLEEP
4272 static int mv_pci_device_resume(struct pci_dev
*pdev
);
4275 static const struct pci_device_id mv_pci_tbl
[] = {
4276 { PCI_VDEVICE(MARVELL
, 0x5040), chip_504x
},
4277 { PCI_VDEVICE(MARVELL
, 0x5041), chip_504x
},
4278 { PCI_VDEVICE(MARVELL
, 0x5080), chip_5080
},
4279 { PCI_VDEVICE(MARVELL
, 0x5081), chip_508x
},
4280 /* RocketRAID 1720/174x have different identifiers */
4281 { PCI_VDEVICE(TTI
, 0x1720), chip_6042
},
4282 { PCI_VDEVICE(TTI
, 0x1740), chip_6042
},
4283 { PCI_VDEVICE(TTI
, 0x1742), chip_6042
},
4285 { PCI_VDEVICE(MARVELL
, 0x6040), chip_604x
},
4286 { PCI_VDEVICE(MARVELL
, 0x6041), chip_604x
},
4287 { PCI_VDEVICE(MARVELL
, 0x6042), chip_6042
},
4288 { PCI_VDEVICE(MARVELL
, 0x6080), chip_608x
},
4289 { PCI_VDEVICE(MARVELL
, 0x6081), chip_608x
},
4291 { PCI_VDEVICE(ADAPTEC2
, 0x0241), chip_604x
},
4293 /* Adaptec 1430SA */
4294 { PCI_VDEVICE(ADAPTEC2
, 0x0243), chip_7042
},
4296 /* Marvell 7042 support */
4297 { PCI_VDEVICE(MARVELL
, 0x7042), chip_7042
},
4299 /* Highpoint RocketRAID PCIe series */
4300 { PCI_VDEVICE(TTI
, 0x2300), chip_7042
},
4301 { PCI_VDEVICE(TTI
, 0x2310), chip_7042
},
4303 { } /* terminate list */
4306 static struct pci_driver mv_pci_driver
= {
4308 .id_table
= mv_pci_tbl
,
4309 .probe
= mv_pci_init_one
,
4310 .remove
= ata_pci_remove_one
,
4311 #ifdef CONFIG_PM_SLEEP
4312 .suspend
= ata_pci_device_suspend
,
4313 .resume
= mv_pci_device_resume
,
4317 MODULE_DEVICE_TABLE(pci
, mv_pci_tbl
);
4320 * mv_print_info - Dump key info to kernel log for perusal.
4321 * @host: ATA host to print info about
4323 * FIXME: complete this.
4326 * Inherited from caller.
4328 static void mv_print_info(struct ata_host
*host
)
4330 struct pci_dev
*pdev
= to_pci_dev(host
->dev
);
4331 struct mv_host_priv
*hpriv
= host
->private_data
;
4333 const char *scc_s
, *gen
;
4335 /* Use this to determine the HW stepping of the chip so we know
4336 * what errata to workaround
4338 pci_read_config_byte(pdev
, PCI_CLASS_DEVICE
, &scc
);
4341 else if (scc
== 0x01)
4346 if (IS_GEN_I(hpriv
))
4348 else if (IS_GEN_II(hpriv
))
4350 else if (IS_GEN_IIE(hpriv
))
4355 dev_info(&pdev
->dev
, "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
4356 gen
, (unsigned)MV_MAX_Q_DEPTH
, host
->n_ports
,
4357 scc_s
, (MV_HP_FLAG_MSI
& hpriv
->hp_flags
) ? "MSI" : "INTx");
4361 * mv_pci_init_one - handle a positive probe of a PCI Marvell host
4362 * @pdev: PCI device found
4363 * @ent: PCI device ID entry for the matched host
4366 * Inherited from caller.
4368 static int mv_pci_init_one(struct pci_dev
*pdev
,
4369 const struct pci_device_id
*ent
)
4371 unsigned int board_idx
= (unsigned int)ent
->driver_data
;
4372 const struct ata_port_info
*ppi
[] = { &mv_port_info
[board_idx
], NULL
};
4373 struct ata_host
*host
;
4374 struct mv_host_priv
*hpriv
;
4375 int n_ports
, port
, rc
;
4377 ata_print_version_once(&pdev
->dev
, DRV_VERSION
);
4380 n_ports
= mv_get_hc_count(ppi
[0]->flags
) * MV_PORTS_PER_HC
;
4382 host
= ata_host_alloc_pinfo(&pdev
->dev
, ppi
, n_ports
);
4383 hpriv
= devm_kzalloc(&pdev
->dev
, sizeof(*hpriv
), GFP_KERNEL
);
4384 if (!host
|| !hpriv
)
4386 host
->private_data
= hpriv
;
4387 hpriv
->n_ports
= n_ports
;
4388 hpriv
->board_idx
= board_idx
;
4390 /* acquire resources */
4391 rc
= pcim_enable_device(pdev
);
4395 rc
= pcim_iomap_regions(pdev
, 1 << MV_PRIMARY_BAR
, DRV_NAME
);
4397 pcim_pin_device(pdev
);
4400 host
->iomap
= pcim_iomap_table(pdev
);
4401 hpriv
->base
= host
->iomap
[MV_PRIMARY_BAR
];
4403 rc
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64));
4405 dev_err(&pdev
->dev
, "DMA enable failed\n");
4409 rc
= mv_create_dma_pools(hpriv
, &pdev
->dev
);
4413 for (port
= 0; port
< host
->n_ports
; port
++) {
4414 struct ata_port
*ap
= host
->ports
[port
];
4415 void __iomem
*port_mmio
= mv_port_base(hpriv
->base
, port
);
4416 unsigned int offset
= port_mmio
- hpriv
->base
;
4418 ata_port_pbar_desc(ap
, MV_PRIMARY_BAR
, -1, "mmio");
4419 ata_port_pbar_desc(ap
, MV_PRIMARY_BAR
, offset
, "port");
4422 /* initialize adapter */
4423 rc
= mv_init_host(host
);
4427 /* Enable message-switched interrupts, if requested */
4428 if (msi
&& pci_enable_msi(pdev
) == 0)
4429 hpriv
->hp_flags
|= MV_HP_FLAG_MSI
;
4431 mv_dump_pci_cfg(pdev
, 0x68);
4432 mv_print_info(host
);
4434 pci_set_master(pdev
);
4435 pci_try_set_mwi(pdev
);
4436 return ata_host_activate(host
, pdev
->irq
, mv_interrupt
, IRQF_SHARED
,
4437 IS_GEN_I(hpriv
) ? &mv5_sht
: &mv6_sht
);
4440 #ifdef CONFIG_PM_SLEEP
4441 static int mv_pci_device_resume(struct pci_dev
*pdev
)
4443 struct ata_host
*host
= pci_get_drvdata(pdev
);
4446 rc
= ata_pci_device_do_resume(pdev
);
4450 /* initialize adapter */
4451 rc
= mv_init_host(host
);
4455 ata_host_resume(host
);
4462 static int __init
mv_init(void)
4466 rc
= pci_register_driver(&mv_pci_driver
);
4470 rc
= platform_driver_register(&mv_platform_driver
);
4474 pci_unregister_driver(&mv_pci_driver
);
4479 static void __exit
mv_exit(void)
4482 pci_unregister_driver(&mv_pci_driver
);
4484 platform_driver_unregister(&mv_platform_driver
);
4487 MODULE_AUTHOR("Brett Russ");
4488 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
4489 MODULE_LICENSE("GPL v2");
4490 MODULE_VERSION(DRV_VERSION
);
4491 MODULE_ALIAS("platform:" DRV_NAME
);
4493 module_init(mv_init
);
4494 module_exit(mv_exit
);