2 * sata_mv.c - Marvell SATA support
4 * Copyright 2008-2009: Marvell Corporation, all rights reserved.
5 * Copyright 2005: EMC Corporation, all rights reserved.
6 * Copyright 2005 Red Hat, Inc. All rights reserved.
8 * Originally written by Brett Russ.
9 * Extensive overhaul and enhancement by Mark Lord <mlord@pobox.com>.
11 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; version 2 of the License.
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
31 * --> Develop a low-power-consumption strategy, and implement it.
33 * --> Add sysfs attributes for per-chip / per-HC IRQ coalescing thresholds.
35 * --> [Experiment, Marvell value added] Is it possible to use target
36 * mode to cross-connect two Linux boxes with Marvell cards? If so,
37 * creating LibATA target mode support would be very interesting.
39 * Target mode, for those without docs, is the ability to directly
40 * connect two SATA ports.
44 * 80x1-B2 errata PCI#11:
46 * Users of the 6041/6081 Rev.B2 chips (current is C0)
47 * should be careful to insert those cards only onto PCI-X bus #0,
48 * and only in device slots 0..7, not higher. The chips may not
49 * work correctly otherwise (note: this is a pretty rare condition).
52 #include <linux/kernel.h>
53 #include <linux/module.h>
54 #include <linux/pci.h>
55 #include <linux/init.h>
56 #include <linux/blkdev.h>
57 #include <linux/delay.h>
58 #include <linux/interrupt.h>
59 #include <linux/dmapool.h>
60 #include <linux/dma-mapping.h>
61 #include <linux/device.h>
62 #include <linux/platform_device.h>
63 #include <linux/ata_platform.h>
64 #include <linux/mbus.h>
65 #include <linux/bitops.h>
66 #include <scsi/scsi_host.h>
67 #include <scsi/scsi_cmnd.h>
68 #include <scsi/scsi_device.h>
69 #include <linux/libata.h>
71 #define DRV_NAME "sata_mv"
72 #define DRV_VERSION "1.27"
80 module_param(msi
, int, S_IRUGO
);
81 MODULE_PARM_DESC(msi
, "Enable use of PCI MSI (0=off, 1=on)");
84 static int irq_coalescing_io_count
;
85 module_param(irq_coalescing_io_count
, int, S_IRUGO
);
86 MODULE_PARM_DESC(irq_coalescing_io_count
,
87 "IRQ coalescing I/O count threshold (0..255)");
89 static int irq_coalescing_usecs
;
90 module_param(irq_coalescing_usecs
, int, S_IRUGO
);
91 MODULE_PARM_DESC(irq_coalescing_usecs
,
92 "IRQ coalescing time threshold in usecs");
95 /* BAR's are enumerated in terms of pci_resource_start() terms */
96 MV_PRIMARY_BAR
= 0, /* offset 0x10: memory space */
97 MV_IO_BAR
= 2, /* offset 0x18: IO space */
98 MV_MISC_BAR
= 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
100 MV_MAJOR_REG_AREA_SZ
= 0x10000, /* 64KB */
101 MV_MINOR_REG_AREA_SZ
= 0x2000, /* 8KB */
103 /* For use with both IRQ coalescing methods ("all ports" or "per-HC" */
104 COAL_CLOCKS_PER_USEC
= 150, /* for calculating COAL_TIMEs */
105 MAX_COAL_TIME_THRESHOLD
= ((1 << 24) - 1), /* internal clocks count */
106 MAX_COAL_IO_COUNT
= 255, /* completed I/O count */
111 * Per-chip ("all ports") interrupt coalescing feature.
112 * This is only for GEN_II / GEN_IIE hardware.
114 * Coalescing defers the interrupt until either the IO_THRESHOLD
115 * (count of completed I/Os) is met, or the TIME_THRESHOLD is met.
117 MV_COAL_REG_BASE
= 0x18000,
118 MV_IRQ_COAL_CAUSE
= (MV_COAL_REG_BASE
+ 0x08),
119 ALL_PORTS_COAL_IRQ
= (1 << 4), /* all ports irq event */
121 MV_IRQ_COAL_IO_THRESHOLD
= (MV_COAL_REG_BASE
+ 0xcc),
122 MV_IRQ_COAL_TIME_THRESHOLD
= (MV_COAL_REG_BASE
+ 0xd0),
125 * Registers for the (unused here) transaction coalescing feature:
127 MV_TRAN_COAL_CAUSE_LO
= (MV_COAL_REG_BASE
+ 0x88),
128 MV_TRAN_COAL_CAUSE_HI
= (MV_COAL_REG_BASE
+ 0x8c),
130 MV_SATAHC0_REG_BASE
= 0x20000,
131 MV_FLASH_CTL_OFS
= 0x1046c,
132 MV_GPIO_PORT_CTL_OFS
= 0x104f0,
133 MV_RESET_CFG_OFS
= 0x180d8,
135 MV_PCI_REG_SZ
= MV_MAJOR_REG_AREA_SZ
,
136 MV_SATAHC_REG_SZ
= MV_MAJOR_REG_AREA_SZ
,
137 MV_SATAHC_ARBTR_REG_SZ
= MV_MINOR_REG_AREA_SZ
, /* arbiter */
138 MV_PORT_REG_SZ
= MV_MINOR_REG_AREA_SZ
,
141 MV_MAX_Q_DEPTH_MASK
= MV_MAX_Q_DEPTH
- 1,
143 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
144 * CRPB needs alignment on a 256B boundary. Size == 256B
145 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
147 MV_CRQB_Q_SZ
= (32 * MV_MAX_Q_DEPTH
),
148 MV_CRPB_Q_SZ
= (8 * MV_MAX_Q_DEPTH
),
150 MV_SG_TBL_SZ
= (16 * MV_MAX_SG_CT
),
152 /* Determine hc from 0-7 port: hc = port >> MV_PORT_HC_SHIFT */
153 MV_PORT_HC_SHIFT
= 2,
154 MV_PORTS_PER_HC
= (1 << MV_PORT_HC_SHIFT
), /* 4 */
155 /* Determine hc port from 0-7 port: hardport = port & MV_PORT_MASK */
156 MV_PORT_MASK
= (MV_PORTS_PER_HC
- 1), /* 3 */
159 MV_FLAG_DUAL_HC
= (1 << 30), /* two SATA Host Controllers */
161 MV_COMMON_FLAGS
= ATA_FLAG_SATA
| ATA_FLAG_NO_LEGACY
|
162 ATA_FLAG_MMIO
| ATA_FLAG_PIO_POLLING
,
164 MV_GEN_I_FLAGS
= MV_COMMON_FLAGS
| ATA_FLAG_NO_ATAPI
,
166 MV_GEN_II_FLAGS
= MV_COMMON_FLAGS
| ATA_FLAG_NCQ
|
167 ATA_FLAG_PMP
| ATA_FLAG_ACPI_SATA
,
169 MV_GEN_IIE_FLAGS
= MV_GEN_II_FLAGS
| ATA_FLAG_AN
,
171 CRQB_FLAG_READ
= (1 << 0),
173 CRQB_IOID_SHIFT
= 6, /* CRQB Gen-II/IIE IO Id shift */
174 CRQB_PMP_SHIFT
= 12, /* CRQB Gen-II/IIE PMP shift */
175 CRQB_HOSTQ_SHIFT
= 17, /* CRQB Gen-II/IIE HostQueTag shift */
176 CRQB_CMD_ADDR_SHIFT
= 8,
177 CRQB_CMD_CS
= (0x2 << 11),
178 CRQB_CMD_LAST
= (1 << 15),
180 CRPB_FLAG_STATUS_SHIFT
= 8,
181 CRPB_IOID_SHIFT_6
= 5, /* CRPB Gen-II IO Id shift */
182 CRPB_IOID_SHIFT_7
= 7, /* CRPB Gen-IIE IO Id shift */
184 EPRD_FLAG_END_OF_TBL
= (1 << 31),
186 /* PCI interface registers */
188 PCI_COMMAND_OFS
= 0xc00,
189 PCI_COMMAND_MWRCOM
= (1 << 4), /* PCI Master Write Combining */
190 PCI_COMMAND_MRDTRIG
= (1 << 7), /* PCI Master Read Trigger */
192 PCI_MAIN_CMD_STS_OFS
= 0xd30,
193 STOP_PCI_MASTER
= (1 << 2),
194 PCI_MASTER_EMPTY
= (1 << 3),
195 GLOB_SFT_RST
= (1 << 4),
197 MV_PCI_MODE_OFS
= 0xd00,
198 MV_PCI_MODE_MASK
= 0x30,
200 MV_PCI_EXP_ROM_BAR_CTL
= 0xd2c,
201 MV_PCI_DISC_TIMER
= 0xd04,
202 MV_PCI_MSI_TRIGGER
= 0xc38,
203 MV_PCI_SERR_MASK
= 0xc28,
204 MV_PCI_XBAR_TMOUT_OFS
= 0x1d04,
205 MV_PCI_ERR_LOW_ADDRESS
= 0x1d40,
206 MV_PCI_ERR_HIGH_ADDRESS
= 0x1d44,
207 MV_PCI_ERR_ATTRIBUTE
= 0x1d48,
208 MV_PCI_ERR_COMMAND
= 0x1d50,
210 PCI_IRQ_CAUSE_OFS
= 0x1d58,
211 PCI_IRQ_MASK_OFS
= 0x1d5c,
212 PCI_UNMASK_ALL_IRQS
= 0x7fffff, /* bits 22-0 */
214 PCIE_IRQ_CAUSE_OFS
= 0x1900,
215 PCIE_IRQ_MASK_OFS
= 0x1910,
216 PCIE_UNMASK_ALL_IRQS
= 0x40a, /* assorted bits */
218 /* Host Controller Main Interrupt Cause/Mask registers (1 per-chip) */
219 PCI_HC_MAIN_IRQ_CAUSE_OFS
= 0x1d60,
220 PCI_HC_MAIN_IRQ_MASK_OFS
= 0x1d64,
221 SOC_HC_MAIN_IRQ_CAUSE_OFS
= 0x20020,
222 SOC_HC_MAIN_IRQ_MASK_OFS
= 0x20024,
223 ERR_IRQ
= (1 << 0), /* shift by (2 * port #) */
224 DONE_IRQ
= (1 << 1), /* shift by (2 * port #) */
225 HC0_IRQ_PEND
= 0x1ff, /* bits 0-8 = HC0's ports */
226 HC_SHIFT
= 9, /* bits 9-17 = HC1's ports */
227 DONE_IRQ_0_3
= 0x000000aa, /* DONE_IRQ ports 0,1,2,3 */
228 DONE_IRQ_4_7
= (DONE_IRQ_0_3
<< HC_SHIFT
), /* 4,5,6,7 */
230 TRAN_COAL_LO_DONE
= (1 << 19), /* transaction coalescing */
231 TRAN_COAL_HI_DONE
= (1 << 20), /* transaction coalescing */
232 PORTS_0_3_COAL_DONE
= (1 << 8), /* HC0 IRQ coalescing */
233 PORTS_4_7_COAL_DONE
= (1 << 17), /* HC1 IRQ coalescing */
234 ALL_PORTS_COAL_DONE
= (1 << 21), /* GEN_II(E) IRQ coalescing */
235 GPIO_INT
= (1 << 22),
236 SELF_INT
= (1 << 23),
237 TWSI_INT
= (1 << 24),
238 HC_MAIN_RSVD
= (0x7f << 25), /* bits 31-25 */
239 HC_MAIN_RSVD_5
= (0x1fff << 19), /* bits 31-19 */
240 HC_MAIN_RSVD_SOC
= (0x3fffffb << 6), /* bits 31-9, 7-6 */
242 /* SATAHC registers */
245 HC_IRQ_CAUSE_OFS
= 0x14,
246 DMA_IRQ
= (1 << 0), /* shift by port # */
247 HC_COAL_IRQ
= (1 << 4), /* IRQ coalescing */
248 DEV_IRQ
= (1 << 8), /* shift by port # */
251 * Per-HC (Host-Controller) interrupt coalescing feature.
252 * This is present on all chip generations.
254 * Coalescing defers the interrupt until either the IO_THRESHOLD
255 * (count of completed I/Os) is met, or the TIME_THRESHOLD is met.
257 HC_IRQ_COAL_IO_THRESHOLD_OFS
= 0x000c,
258 HC_IRQ_COAL_TIME_THRESHOLD_OFS
= 0x0010,
260 SOC_LED_CTRL_OFS
= 0x2c,
261 SOC_LED_CTRL_BLINK
= (1 << 0), /* Active LED blink */
262 SOC_LED_CTRL_ACT_PRESENCE
= (1 << 2), /* Multiplex dev presence */
263 /* with dev activity LED */
265 /* Shadow block registers */
267 SHD_CTL_AST_OFS
= 0x20, /* ofs from SHD_BLK_OFS */
270 SATA_STATUS_OFS
= 0x300, /* ctrl, err regs follow status */
271 SATA_ACTIVE_OFS
= 0x350,
272 SATA_FIS_IRQ_CAUSE_OFS
= 0x364,
273 SATA_FIS_IRQ_AN
= (1 << 9), /* async notification */
276 LTMODE_BIT8
= (1 << 8), /* unknown, but necessary */
280 PHY_MODE4_CFG_MASK
= 0x00000003, /* phy internal config field */
281 PHY_MODE4_CFG_VALUE
= 0x00000001, /* phy internal config field */
282 PHY_MODE4_RSVD_ZEROS
= 0x5de3fffa, /* Gen2e always write zeros */
283 PHY_MODE4_RSVD_ONES
= 0x00000005, /* Gen2e always write ones */
286 SATA_IFCTL_OFS
= 0x344,
287 SATA_TESTCTL_OFS
= 0x348,
288 SATA_IFSTAT_OFS
= 0x34c,
289 VENDOR_UNIQUE_FIS_OFS
= 0x35c,
292 FISCFG_WAIT_DEV_ERR
= (1 << 8), /* wait for host on DevErr */
293 FISCFG_SINGLE_SYNC
= (1 << 16), /* SYNC on DMA activation */
296 MV5_LTMODE_OFS
= 0x30,
297 MV5_PHY_CTL_OFS
= 0x0C,
298 SATA_INTERFACE_CFG_OFS
= 0x050,
300 MV_M2_PREAMP_MASK
= 0x7e0,
304 EDMA_CFG_Q_DEPTH
= 0x1f, /* max device queue depth */
305 EDMA_CFG_NCQ
= (1 << 5), /* for R/W FPDMA queued */
306 EDMA_CFG_NCQ_GO_ON_ERR
= (1 << 14), /* continue on error */
307 EDMA_CFG_RD_BRST_EXT
= (1 << 11), /* read burst 512B */
308 EDMA_CFG_WR_BUFF_LEN
= (1 << 13), /* write buffer 512B */
309 EDMA_CFG_EDMA_FBS
= (1 << 16), /* EDMA FIS-Based Switching */
310 EDMA_CFG_FBS
= (1 << 26), /* FIS-Based Switching */
312 EDMA_ERR_IRQ_CAUSE_OFS
= 0x8,
313 EDMA_ERR_IRQ_MASK_OFS
= 0xc,
314 EDMA_ERR_D_PAR
= (1 << 0), /* UDMA data parity err */
315 EDMA_ERR_PRD_PAR
= (1 << 1), /* UDMA PRD parity err */
316 EDMA_ERR_DEV
= (1 << 2), /* device error */
317 EDMA_ERR_DEV_DCON
= (1 << 3), /* device disconnect */
318 EDMA_ERR_DEV_CON
= (1 << 4), /* device connected */
319 EDMA_ERR_SERR
= (1 << 5), /* SError bits [WBDST] raised */
320 EDMA_ERR_SELF_DIS
= (1 << 7), /* Gen II/IIE self-disable */
321 EDMA_ERR_SELF_DIS_5
= (1 << 8), /* Gen I self-disable */
322 EDMA_ERR_BIST_ASYNC
= (1 << 8), /* BIST FIS or Async Notify */
323 EDMA_ERR_TRANS_IRQ_7
= (1 << 8), /* Gen IIE transprt layer irq */
324 EDMA_ERR_CRQB_PAR
= (1 << 9), /* CRQB parity error */
325 EDMA_ERR_CRPB_PAR
= (1 << 10), /* CRPB parity error */
326 EDMA_ERR_INTRL_PAR
= (1 << 11), /* internal parity error */
327 EDMA_ERR_IORDY
= (1 << 12), /* IORdy timeout */
329 EDMA_ERR_LNK_CTRL_RX
= (0xf << 13), /* link ctrl rx error */
330 EDMA_ERR_LNK_CTRL_RX_0
= (1 << 13), /* transient: CRC err */
331 EDMA_ERR_LNK_CTRL_RX_1
= (1 << 14), /* transient: FIFO err */
332 EDMA_ERR_LNK_CTRL_RX_2
= (1 << 15), /* fatal: caught SYNC */
333 EDMA_ERR_LNK_CTRL_RX_3
= (1 << 16), /* transient: FIS rx err */
335 EDMA_ERR_LNK_DATA_RX
= (0xf << 17), /* link data rx error */
337 EDMA_ERR_LNK_CTRL_TX
= (0x1f << 21), /* link ctrl tx error */
338 EDMA_ERR_LNK_CTRL_TX_0
= (1 << 21), /* transient: CRC err */
339 EDMA_ERR_LNK_CTRL_TX_1
= (1 << 22), /* transient: FIFO err */
340 EDMA_ERR_LNK_CTRL_TX_2
= (1 << 23), /* transient: caught SYNC */
341 EDMA_ERR_LNK_CTRL_TX_3
= (1 << 24), /* transient: caught DMAT */
342 EDMA_ERR_LNK_CTRL_TX_4
= (1 << 25), /* transient: FIS collision */
344 EDMA_ERR_LNK_DATA_TX
= (0x1f << 26), /* link data tx error */
346 EDMA_ERR_TRANS_PROTO
= (1 << 31), /* transport protocol error */
347 EDMA_ERR_OVERRUN_5
= (1 << 5),
348 EDMA_ERR_UNDERRUN_5
= (1 << 6),
350 EDMA_ERR_IRQ_TRANSIENT
= EDMA_ERR_LNK_CTRL_RX_0
|
351 EDMA_ERR_LNK_CTRL_RX_1
|
352 EDMA_ERR_LNK_CTRL_RX_3
|
353 EDMA_ERR_LNK_CTRL_TX
,
355 EDMA_EH_FREEZE
= EDMA_ERR_D_PAR
|
365 EDMA_ERR_LNK_CTRL_RX_2
|
366 EDMA_ERR_LNK_DATA_RX
|
367 EDMA_ERR_LNK_DATA_TX
|
368 EDMA_ERR_TRANS_PROTO
,
370 EDMA_EH_FREEZE_5
= EDMA_ERR_D_PAR
|
375 EDMA_ERR_UNDERRUN_5
|
376 EDMA_ERR_SELF_DIS_5
|
382 EDMA_REQ_Q_BASE_HI_OFS
= 0x10,
383 EDMA_REQ_Q_IN_PTR_OFS
= 0x14, /* also contains BASE_LO */
385 EDMA_REQ_Q_OUT_PTR_OFS
= 0x18,
386 EDMA_REQ_Q_PTR_SHIFT
= 5,
388 EDMA_RSP_Q_BASE_HI_OFS
= 0x1c,
389 EDMA_RSP_Q_IN_PTR_OFS
= 0x20,
390 EDMA_RSP_Q_OUT_PTR_OFS
= 0x24, /* also contains BASE_LO */
391 EDMA_RSP_Q_PTR_SHIFT
= 3,
393 EDMA_CMD_OFS
= 0x28, /* EDMA command register */
394 EDMA_EN
= (1 << 0), /* enable EDMA */
395 EDMA_DS
= (1 << 1), /* disable EDMA; self-negated */
396 EDMA_RESET
= (1 << 2), /* reset eng/trans/link/phy */
398 EDMA_STATUS_OFS
= 0x30, /* EDMA engine status */
399 EDMA_STATUS_CACHE_EMPTY
= (1 << 6), /* GenIIe command cache empty */
400 EDMA_STATUS_IDLE
= (1 << 7), /* GenIIe EDMA enabled/idle */
402 EDMA_IORDY_TMOUT_OFS
= 0x34,
403 EDMA_ARB_CFG_OFS
= 0x38,
405 EDMA_HALTCOND_OFS
= 0x60, /* GenIIe halt conditions */
406 EDMA_UNKNOWN_RSVD_OFS
= 0x6C, /* GenIIe unknown/reserved */
408 BMDMA_CMD_OFS
= 0x224, /* bmdma command register */
409 BMDMA_STATUS_OFS
= 0x228, /* bmdma status register */
410 BMDMA_PRD_LOW_OFS
= 0x22c, /* bmdma PRD addr 31:0 */
411 BMDMA_PRD_HIGH_OFS
= 0x230, /* bmdma PRD addr 63:32 */
413 /* Host private flags (hp_flags) */
414 MV_HP_FLAG_MSI
= (1 << 0),
415 MV_HP_ERRATA_50XXB0
= (1 << 1),
416 MV_HP_ERRATA_50XXB2
= (1 << 2),
417 MV_HP_ERRATA_60X1B2
= (1 << 3),
418 MV_HP_ERRATA_60X1C0
= (1 << 4),
419 MV_HP_GEN_I
= (1 << 6), /* Generation I: 50xx */
420 MV_HP_GEN_II
= (1 << 7), /* Generation II: 60xx */
421 MV_HP_GEN_IIE
= (1 << 8), /* Generation IIE: 6042/7042 */
422 MV_HP_PCIE
= (1 << 9), /* PCIe bus/regs: 7042 */
423 MV_HP_CUT_THROUGH
= (1 << 10), /* can use EDMA cut-through */
424 MV_HP_FLAG_SOC
= (1 << 11), /* SystemOnChip, no PCI */
425 MV_HP_QUIRK_LED_BLINK_EN
= (1 << 12), /* is led blinking enabled? */
427 /* Port private flags (pp_flags) */
428 MV_PP_FLAG_EDMA_EN
= (1 << 0), /* is EDMA engine enabled? */
429 MV_PP_FLAG_NCQ_EN
= (1 << 1), /* is EDMA set up for NCQ? */
430 MV_PP_FLAG_FBS_EN
= (1 << 2), /* is EDMA set up for FBS? */
431 MV_PP_FLAG_DELAYED_EH
= (1 << 3), /* delayed dev err handling */
432 MV_PP_FLAG_FAKE_ATA_BUSY
= (1 << 4), /* ignore initial ATA_DRDY */
435 #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
436 #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
437 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
438 #define IS_PCIE(hpriv) ((hpriv)->hp_flags & MV_HP_PCIE)
439 #define IS_SOC(hpriv) ((hpriv)->hp_flags & MV_HP_FLAG_SOC)
441 #define WINDOW_CTRL(i) (0x20030 + ((i) << 4))
442 #define WINDOW_BASE(i) (0x20034 + ((i) << 4))
445 /* DMA boundary 0xffff is required by the s/g splitting
446 * we need on /length/ in mv_fill-sg().
448 MV_DMA_BOUNDARY
= 0xffffU
,
450 /* mask of register bits containing lower 32 bits
451 * of EDMA request queue DMA address
453 EDMA_REQ_Q_BASE_LO_MASK
= 0xfffffc00U
,
455 /* ditto, for response queue */
456 EDMA_RSP_Q_BASE_LO_MASK
= 0xffffff00U
,
470 /* Command ReQuest Block: 32B */
486 /* Command ResPonse Block: 8B */
493 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
502 * We keep a local cache of a few frequently accessed port
503 * registers here, to avoid having to read them (very slow)
504 * when switching between EDMA and non-EDMA modes.
506 struct mv_cached_regs
{
513 struct mv_port_priv
{
514 struct mv_crqb
*crqb
;
516 struct mv_crpb
*crpb
;
518 struct mv_sg
*sg_tbl
[MV_MAX_Q_DEPTH
];
519 dma_addr_t sg_tbl_dma
[MV_MAX_Q_DEPTH
];
521 unsigned int req_idx
;
522 unsigned int resp_idx
;
525 struct mv_cached_regs cached
;
526 unsigned int delayed_eh_pmp_map
;
529 struct mv_port_signal
{
534 struct mv_host_priv
{
537 struct mv_port_signal signal
[8];
538 const struct mv_hw_ops
*ops
;
541 void __iomem
*main_irq_cause_addr
;
542 void __iomem
*main_irq_mask_addr
;
547 * These consistent DMA memory pools give us guaranteed
548 * alignment for hardware-accessed data structures,
549 * and less memory waste in accomplishing the alignment.
551 struct dma_pool
*crqb_pool
;
552 struct dma_pool
*crpb_pool
;
553 struct dma_pool
*sg_tbl_pool
;
557 void (*phy_errata
)(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
559 void (*enable_leds
)(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
560 void (*read_preamp
)(struct mv_host_priv
*hpriv
, int idx
,
562 int (*reset_hc
)(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
564 void (*reset_flash
)(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
565 void (*reset_bus
)(struct ata_host
*host
, void __iomem
*mmio
);
568 static int mv_scr_read(struct ata_link
*link
, unsigned int sc_reg_in
, u32
*val
);
569 static int mv_scr_write(struct ata_link
*link
, unsigned int sc_reg_in
, u32 val
);
570 static int mv5_scr_read(struct ata_link
*link
, unsigned int sc_reg_in
, u32
*val
);
571 static int mv5_scr_write(struct ata_link
*link
, unsigned int sc_reg_in
, u32 val
);
572 static int mv_port_start(struct ata_port
*ap
);
573 static void mv_port_stop(struct ata_port
*ap
);
574 static int mv_qc_defer(struct ata_queued_cmd
*qc
);
575 static void mv_qc_prep(struct ata_queued_cmd
*qc
);
576 static void mv_qc_prep_iie(struct ata_queued_cmd
*qc
);
577 static unsigned int mv_qc_issue(struct ata_queued_cmd
*qc
);
578 static int mv_hardreset(struct ata_link
*link
, unsigned int *class,
579 unsigned long deadline
);
580 static void mv_eh_freeze(struct ata_port
*ap
);
581 static void mv_eh_thaw(struct ata_port
*ap
);
582 static void mv6_dev_config(struct ata_device
*dev
);
584 static void mv5_phy_errata(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
586 static void mv5_enable_leds(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
587 static void mv5_read_preamp(struct mv_host_priv
*hpriv
, int idx
,
589 static int mv5_reset_hc(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
591 static void mv5_reset_flash(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
592 static void mv5_reset_bus(struct ata_host
*host
, void __iomem
*mmio
);
594 static void mv6_phy_errata(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
596 static void mv6_enable_leds(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
597 static void mv6_read_preamp(struct mv_host_priv
*hpriv
, int idx
,
599 static int mv6_reset_hc(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
601 static void mv6_reset_flash(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
602 static void mv_soc_enable_leds(struct mv_host_priv
*hpriv
,
604 static void mv_soc_read_preamp(struct mv_host_priv
*hpriv
, int idx
,
606 static int mv_soc_reset_hc(struct mv_host_priv
*hpriv
,
607 void __iomem
*mmio
, unsigned int n_hc
);
608 static void mv_soc_reset_flash(struct mv_host_priv
*hpriv
,
610 static void mv_soc_reset_bus(struct ata_host
*host
, void __iomem
*mmio
);
611 static void mv_reset_pci_bus(struct ata_host
*host
, void __iomem
*mmio
);
612 static void mv_reset_channel(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
613 unsigned int port_no
);
614 static int mv_stop_edma(struct ata_port
*ap
);
615 static int mv_stop_edma_engine(void __iomem
*port_mmio
);
616 static void mv_edma_cfg(struct ata_port
*ap
, int want_ncq
, int want_edma
);
618 static void mv_pmp_select(struct ata_port
*ap
, int pmp
);
619 static int mv_pmp_hardreset(struct ata_link
*link
, unsigned int *class,
620 unsigned long deadline
);
621 static int mv_softreset(struct ata_link
*link
, unsigned int *class,
622 unsigned long deadline
);
623 static void mv_pmp_error_handler(struct ata_port
*ap
);
624 static void mv_process_crpb_entries(struct ata_port
*ap
,
625 struct mv_port_priv
*pp
);
627 static void mv_sff_irq_clear(struct ata_port
*ap
);
628 static int mv_check_atapi_dma(struct ata_queued_cmd
*qc
);
629 static void mv_bmdma_setup(struct ata_queued_cmd
*qc
);
630 static void mv_bmdma_start(struct ata_queued_cmd
*qc
);
631 static void mv_bmdma_stop(struct ata_queued_cmd
*qc
);
632 static u8
mv_bmdma_status(struct ata_port
*ap
);
633 static u8
mv_sff_check_status(struct ata_port
*ap
);
635 /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
636 * because we have to allow room for worst case splitting of
637 * PRDs for 64K boundaries in mv_fill_sg().
639 static struct scsi_host_template mv5_sht
= {
640 ATA_BASE_SHT(DRV_NAME
),
641 .sg_tablesize
= MV_MAX_SG_CT
/ 2,
642 .dma_boundary
= MV_DMA_BOUNDARY
,
645 static struct scsi_host_template mv6_sht
= {
646 ATA_NCQ_SHT(DRV_NAME
),
647 .can_queue
= MV_MAX_Q_DEPTH
- 1,
648 .sg_tablesize
= MV_MAX_SG_CT
/ 2,
649 .dma_boundary
= MV_DMA_BOUNDARY
,
652 static struct ata_port_operations mv5_ops
= {
653 .inherits
= &ata_sff_port_ops
,
655 .lost_interrupt
= ATA_OP_NULL
,
657 .qc_defer
= mv_qc_defer
,
658 .qc_prep
= mv_qc_prep
,
659 .qc_issue
= mv_qc_issue
,
661 .freeze
= mv_eh_freeze
,
663 .hardreset
= mv_hardreset
,
664 .error_handler
= ata_std_error_handler
, /* avoid SFF EH */
665 .post_internal_cmd
= ATA_OP_NULL
,
667 .scr_read
= mv5_scr_read
,
668 .scr_write
= mv5_scr_write
,
670 .port_start
= mv_port_start
,
671 .port_stop
= mv_port_stop
,
674 static struct ata_port_operations mv6_ops
= {
675 .inherits
= &mv5_ops
,
676 .dev_config
= mv6_dev_config
,
677 .scr_read
= mv_scr_read
,
678 .scr_write
= mv_scr_write
,
680 .pmp_hardreset
= mv_pmp_hardreset
,
681 .pmp_softreset
= mv_softreset
,
682 .softreset
= mv_softreset
,
683 .error_handler
= mv_pmp_error_handler
,
685 .sff_check_status
= mv_sff_check_status
,
686 .sff_irq_clear
= mv_sff_irq_clear
,
687 .check_atapi_dma
= mv_check_atapi_dma
,
688 .bmdma_setup
= mv_bmdma_setup
,
689 .bmdma_start
= mv_bmdma_start
,
690 .bmdma_stop
= mv_bmdma_stop
,
691 .bmdma_status
= mv_bmdma_status
,
694 static struct ata_port_operations mv_iie_ops
= {
695 .inherits
= &mv6_ops
,
696 .dev_config
= ATA_OP_NULL
,
697 .qc_prep
= mv_qc_prep_iie
,
700 static const struct ata_port_info mv_port_info
[] = {
702 .flags
= MV_GEN_I_FLAGS
,
703 .pio_mask
= ATA_PIO4
,
704 .udma_mask
= ATA_UDMA6
,
705 .port_ops
= &mv5_ops
,
708 .flags
= MV_GEN_I_FLAGS
| MV_FLAG_DUAL_HC
,
709 .pio_mask
= ATA_PIO4
,
710 .udma_mask
= ATA_UDMA6
,
711 .port_ops
= &mv5_ops
,
714 .flags
= MV_GEN_I_FLAGS
| MV_FLAG_DUAL_HC
,
715 .pio_mask
= ATA_PIO4
,
716 .udma_mask
= ATA_UDMA6
,
717 .port_ops
= &mv5_ops
,
720 .flags
= MV_GEN_II_FLAGS
,
721 .pio_mask
= ATA_PIO4
,
722 .udma_mask
= ATA_UDMA6
,
723 .port_ops
= &mv6_ops
,
726 .flags
= MV_GEN_II_FLAGS
| MV_FLAG_DUAL_HC
,
727 .pio_mask
= ATA_PIO4
,
728 .udma_mask
= ATA_UDMA6
,
729 .port_ops
= &mv6_ops
,
732 .flags
= MV_GEN_IIE_FLAGS
,
733 .pio_mask
= ATA_PIO4
,
734 .udma_mask
= ATA_UDMA6
,
735 .port_ops
= &mv_iie_ops
,
738 .flags
= MV_GEN_IIE_FLAGS
,
739 .pio_mask
= ATA_PIO4
,
740 .udma_mask
= ATA_UDMA6
,
741 .port_ops
= &mv_iie_ops
,
744 .flags
= MV_GEN_IIE_FLAGS
,
745 .pio_mask
= ATA_PIO4
,
746 .udma_mask
= ATA_UDMA6
,
747 .port_ops
= &mv_iie_ops
,
751 static const struct pci_device_id mv_pci_tbl
[] = {
752 { PCI_VDEVICE(MARVELL
, 0x5040), chip_504x
},
753 { PCI_VDEVICE(MARVELL
, 0x5041), chip_504x
},
754 { PCI_VDEVICE(MARVELL
, 0x5080), chip_5080
},
755 { PCI_VDEVICE(MARVELL
, 0x5081), chip_508x
},
756 /* RocketRAID 1720/174x have different identifiers */
757 { PCI_VDEVICE(TTI
, 0x1720), chip_6042
},
758 { PCI_VDEVICE(TTI
, 0x1740), chip_6042
},
759 { PCI_VDEVICE(TTI
, 0x1742), chip_6042
},
761 { PCI_VDEVICE(MARVELL
, 0x6040), chip_604x
},
762 { PCI_VDEVICE(MARVELL
, 0x6041), chip_604x
},
763 { PCI_VDEVICE(MARVELL
, 0x6042), chip_6042
},
764 { PCI_VDEVICE(MARVELL
, 0x6080), chip_608x
},
765 { PCI_VDEVICE(MARVELL
, 0x6081), chip_608x
},
767 { PCI_VDEVICE(ADAPTEC2
, 0x0241), chip_604x
},
770 { PCI_VDEVICE(ADAPTEC2
, 0x0243), chip_7042
},
772 /* Marvell 7042 support */
773 { PCI_VDEVICE(MARVELL
, 0x7042), chip_7042
},
775 /* Highpoint RocketRAID PCIe series */
776 { PCI_VDEVICE(TTI
, 0x2300), chip_7042
},
777 { PCI_VDEVICE(TTI
, 0x2310), chip_7042
},
779 { } /* terminate list */
782 static const struct mv_hw_ops mv5xxx_ops
= {
783 .phy_errata
= mv5_phy_errata
,
784 .enable_leds
= mv5_enable_leds
,
785 .read_preamp
= mv5_read_preamp
,
786 .reset_hc
= mv5_reset_hc
,
787 .reset_flash
= mv5_reset_flash
,
788 .reset_bus
= mv5_reset_bus
,
791 static const struct mv_hw_ops mv6xxx_ops
= {
792 .phy_errata
= mv6_phy_errata
,
793 .enable_leds
= mv6_enable_leds
,
794 .read_preamp
= mv6_read_preamp
,
795 .reset_hc
= mv6_reset_hc
,
796 .reset_flash
= mv6_reset_flash
,
797 .reset_bus
= mv_reset_pci_bus
,
800 static const struct mv_hw_ops mv_soc_ops
= {
801 .phy_errata
= mv6_phy_errata
,
802 .enable_leds
= mv_soc_enable_leds
,
803 .read_preamp
= mv_soc_read_preamp
,
804 .reset_hc
= mv_soc_reset_hc
,
805 .reset_flash
= mv_soc_reset_flash
,
806 .reset_bus
= mv_soc_reset_bus
,
813 static inline void writelfl(unsigned long data
, void __iomem
*addr
)
816 (void) readl(addr
); /* flush to avoid PCI posted write */
819 static inline unsigned int mv_hc_from_port(unsigned int port
)
821 return port
>> MV_PORT_HC_SHIFT
;
824 static inline unsigned int mv_hardport_from_port(unsigned int port
)
826 return port
& MV_PORT_MASK
;
830 * Consolidate some rather tricky bit shift calculations.
831 * This is hot-path stuff, so not a function.
832 * Simple code, with two return values, so macro rather than inline.
834 * port is the sole input, in range 0..7.
835 * shift is one output, for use with main_irq_cause / main_irq_mask registers.
836 * hardport is the other output, in range 0..3.
838 * Note that port and hardport may be the same variable in some cases.
840 #define MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport) \
842 shift = mv_hc_from_port(port) * HC_SHIFT; \
843 hardport = mv_hardport_from_port(port); \
844 shift += hardport * 2; \
847 static inline void __iomem
*mv_hc_base(void __iomem
*base
, unsigned int hc
)
849 return (base
+ MV_SATAHC0_REG_BASE
+ (hc
* MV_SATAHC_REG_SZ
));
852 static inline void __iomem
*mv_hc_base_from_port(void __iomem
*base
,
855 return mv_hc_base(base
, mv_hc_from_port(port
));
858 static inline void __iomem
*mv_port_base(void __iomem
*base
, unsigned int port
)
860 return mv_hc_base_from_port(base
, port
) +
861 MV_SATAHC_ARBTR_REG_SZ
+
862 (mv_hardport_from_port(port
) * MV_PORT_REG_SZ
);
865 static void __iomem
*mv5_phy_base(void __iomem
*mmio
, unsigned int port
)
867 void __iomem
*hc_mmio
= mv_hc_base_from_port(mmio
, port
);
868 unsigned long ofs
= (mv_hardport_from_port(port
) + 1) * 0x100UL
;
870 return hc_mmio
+ ofs
;
873 static inline void __iomem
*mv_host_base(struct ata_host
*host
)
875 struct mv_host_priv
*hpriv
= host
->private_data
;
879 static inline void __iomem
*mv_ap_base(struct ata_port
*ap
)
881 return mv_port_base(mv_host_base(ap
->host
), ap
->port_no
);
884 static inline int mv_get_hc_count(unsigned long port_flags
)
886 return ((port_flags
& MV_FLAG_DUAL_HC
) ? 2 : 1);
890 * mv_save_cached_regs - (re-)initialize cached port registers
891 * @ap: the port whose registers we are caching
893 * Initialize the local cache of port registers,
894 * so that reading them over and over again can
895 * be avoided on the hotter paths of this driver.
896 * This saves a few microseconds each time we switch
897 * to/from EDMA mode to perform (eg.) a drive cache flush.
899 static void mv_save_cached_regs(struct ata_port
*ap
)
901 void __iomem
*port_mmio
= mv_ap_base(ap
);
902 struct mv_port_priv
*pp
= ap
->private_data
;
904 pp
->cached
.fiscfg
= readl(port_mmio
+ FISCFG_OFS
);
905 pp
->cached
.ltmode
= readl(port_mmio
+ LTMODE_OFS
);
906 pp
->cached
.haltcond
= readl(port_mmio
+ EDMA_HALTCOND_OFS
);
907 pp
->cached
.unknown_rsvd
= readl(port_mmio
+ EDMA_UNKNOWN_RSVD_OFS
);
911 * mv_write_cached_reg - write to a cached port register
912 * @addr: hardware address of the register
913 * @old: pointer to cached value of the register
914 * @new: new value for the register
916 * Write a new value to a cached register,
917 * but only if the value is different from before.
919 static inline void mv_write_cached_reg(void __iomem
*addr
, u32
*old
, u32
new)
927 static void mv_set_edma_ptrs(void __iomem
*port_mmio
,
928 struct mv_host_priv
*hpriv
,
929 struct mv_port_priv
*pp
)
934 * initialize request queue
936 pp
->req_idx
&= MV_MAX_Q_DEPTH_MASK
; /* paranoia */
937 index
= pp
->req_idx
<< EDMA_REQ_Q_PTR_SHIFT
;
939 WARN_ON(pp
->crqb_dma
& 0x3ff);
940 writel((pp
->crqb_dma
>> 16) >> 16, port_mmio
+ EDMA_REQ_Q_BASE_HI_OFS
);
941 writelfl((pp
->crqb_dma
& EDMA_REQ_Q_BASE_LO_MASK
) | index
,
942 port_mmio
+ EDMA_REQ_Q_IN_PTR_OFS
);
943 writelfl(index
, port_mmio
+ EDMA_REQ_Q_OUT_PTR_OFS
);
946 * initialize response queue
948 pp
->resp_idx
&= MV_MAX_Q_DEPTH_MASK
; /* paranoia */
949 index
= pp
->resp_idx
<< EDMA_RSP_Q_PTR_SHIFT
;
951 WARN_ON(pp
->crpb_dma
& 0xff);
952 writel((pp
->crpb_dma
>> 16) >> 16, port_mmio
+ EDMA_RSP_Q_BASE_HI_OFS
);
953 writelfl(index
, port_mmio
+ EDMA_RSP_Q_IN_PTR_OFS
);
954 writelfl((pp
->crpb_dma
& EDMA_RSP_Q_BASE_LO_MASK
) | index
,
955 port_mmio
+ EDMA_RSP_Q_OUT_PTR_OFS
);
958 static void mv_write_main_irq_mask(u32 mask
, struct mv_host_priv
*hpriv
)
961 * When writing to the main_irq_mask in hardware,
962 * we must ensure exclusivity between the interrupt coalescing bits
963 * and the corresponding individual port DONE_IRQ bits.
965 * Note that this register is really an "IRQ enable" register,
966 * not an "IRQ mask" register as Marvell's naming might suggest.
968 if (mask
& (ALL_PORTS_COAL_DONE
| PORTS_0_3_COAL_DONE
))
969 mask
&= ~DONE_IRQ_0_3
;
970 if (mask
& (ALL_PORTS_COAL_DONE
| PORTS_4_7_COAL_DONE
))
971 mask
&= ~DONE_IRQ_4_7
;
972 writelfl(mask
, hpriv
->main_irq_mask_addr
);
975 static void mv_set_main_irq_mask(struct ata_host
*host
,
976 u32 disable_bits
, u32 enable_bits
)
978 struct mv_host_priv
*hpriv
= host
->private_data
;
979 u32 old_mask
, new_mask
;
981 old_mask
= hpriv
->main_irq_mask
;
982 new_mask
= (old_mask
& ~disable_bits
) | enable_bits
;
983 if (new_mask
!= old_mask
) {
984 hpriv
->main_irq_mask
= new_mask
;
985 mv_write_main_irq_mask(new_mask
, hpriv
);
989 static void mv_enable_port_irqs(struct ata_port
*ap
,
990 unsigned int port_bits
)
992 unsigned int shift
, hardport
, port
= ap
->port_no
;
993 u32 disable_bits
, enable_bits
;
995 MV_PORT_TO_SHIFT_AND_HARDPORT(port
, shift
, hardport
);
997 disable_bits
= (DONE_IRQ
| ERR_IRQ
) << shift
;
998 enable_bits
= port_bits
<< shift
;
999 mv_set_main_irq_mask(ap
->host
, disable_bits
, enable_bits
);
1002 static void mv_clear_and_enable_port_irqs(struct ata_port
*ap
,
1003 void __iomem
*port_mmio
,
1004 unsigned int port_irqs
)
1006 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
1007 int hardport
= mv_hardport_from_port(ap
->port_no
);
1008 void __iomem
*hc_mmio
= mv_hc_base_from_port(
1009 mv_host_base(ap
->host
), ap
->port_no
);
1012 /* clear EDMA event indicators, if any */
1013 writelfl(0, port_mmio
+ EDMA_ERR_IRQ_CAUSE_OFS
);
1015 /* clear pending irq events */
1016 hc_irq_cause
= ~((DEV_IRQ
| DMA_IRQ
) << hardport
);
1017 writelfl(hc_irq_cause
, hc_mmio
+ HC_IRQ_CAUSE_OFS
);
1019 /* clear FIS IRQ Cause */
1020 if (IS_GEN_IIE(hpriv
))
1021 writelfl(0, port_mmio
+ SATA_FIS_IRQ_CAUSE_OFS
);
1023 mv_enable_port_irqs(ap
, port_irqs
);
1026 static void mv_set_irq_coalescing(struct ata_host
*host
,
1027 unsigned int count
, unsigned int usecs
)
1029 struct mv_host_priv
*hpriv
= host
->private_data
;
1030 void __iomem
*mmio
= hpriv
->base
, *hc_mmio
;
1031 u32 coal_enable
= 0;
1032 unsigned long flags
;
1033 unsigned int clks
, is_dual_hc
= hpriv
->n_ports
> MV_PORTS_PER_HC
;
1034 const u32 coal_disable
= PORTS_0_3_COAL_DONE
| PORTS_4_7_COAL_DONE
|
1035 ALL_PORTS_COAL_DONE
;
1037 /* Disable IRQ coalescing if either threshold is zero */
1038 if (!usecs
|| !count
) {
1041 /* Respect maximum limits of the hardware */
1042 clks
= usecs
* COAL_CLOCKS_PER_USEC
;
1043 if (clks
> MAX_COAL_TIME_THRESHOLD
)
1044 clks
= MAX_COAL_TIME_THRESHOLD
;
1045 if (count
> MAX_COAL_IO_COUNT
)
1046 count
= MAX_COAL_IO_COUNT
;
1049 spin_lock_irqsave(&host
->lock
, flags
);
1050 mv_set_main_irq_mask(host
, coal_disable
, 0);
1052 if (is_dual_hc
&& !IS_GEN_I(hpriv
)) {
1054 * GEN_II/GEN_IIE with dual host controllers:
1055 * one set of global thresholds for the entire chip.
1057 writel(clks
, mmio
+ MV_IRQ_COAL_TIME_THRESHOLD
);
1058 writel(count
, mmio
+ MV_IRQ_COAL_IO_THRESHOLD
);
1059 /* clear leftover coal IRQ bit */
1060 writel(~ALL_PORTS_COAL_IRQ
, mmio
+ MV_IRQ_COAL_CAUSE
);
1062 coal_enable
= ALL_PORTS_COAL_DONE
;
1063 clks
= count
= 0; /* force clearing of regular regs below */
1067 * All chips: independent thresholds for each HC on the chip.
1069 hc_mmio
= mv_hc_base_from_port(mmio
, 0);
1070 writel(clks
, hc_mmio
+ HC_IRQ_COAL_TIME_THRESHOLD_OFS
);
1071 writel(count
, hc_mmio
+ HC_IRQ_COAL_IO_THRESHOLD_OFS
);
1072 writel(~HC_COAL_IRQ
, hc_mmio
+ HC_IRQ_CAUSE_OFS
);
1074 coal_enable
|= PORTS_0_3_COAL_DONE
;
1076 hc_mmio
= mv_hc_base_from_port(mmio
, MV_PORTS_PER_HC
);
1077 writel(clks
, hc_mmio
+ HC_IRQ_COAL_TIME_THRESHOLD_OFS
);
1078 writel(count
, hc_mmio
+ HC_IRQ_COAL_IO_THRESHOLD_OFS
);
1079 writel(~HC_COAL_IRQ
, hc_mmio
+ HC_IRQ_CAUSE_OFS
);
1081 coal_enable
|= PORTS_4_7_COAL_DONE
;
1084 mv_set_main_irq_mask(host
, 0, coal_enable
);
1085 spin_unlock_irqrestore(&host
->lock
, flags
);
1089 * mv_start_edma - Enable eDMA engine
1090 * @base: port base address
1091 * @pp: port private data
1093 * Verify the local cache of the eDMA state is accurate with a
1097 * Inherited from caller.
1099 static void mv_start_edma(struct ata_port
*ap
, void __iomem
*port_mmio
,
1100 struct mv_port_priv
*pp
, u8 protocol
)
1102 int want_ncq
= (protocol
== ATA_PROT_NCQ
);
1104 if (pp
->pp_flags
& MV_PP_FLAG_EDMA_EN
) {
1105 int using_ncq
= ((pp
->pp_flags
& MV_PP_FLAG_NCQ_EN
) != 0);
1106 if (want_ncq
!= using_ncq
)
1109 if (!(pp
->pp_flags
& MV_PP_FLAG_EDMA_EN
)) {
1110 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
1112 mv_edma_cfg(ap
, want_ncq
, 1);
1114 mv_set_edma_ptrs(port_mmio
, hpriv
, pp
);
1115 mv_clear_and_enable_port_irqs(ap
, port_mmio
, DONE_IRQ
|ERR_IRQ
);
1117 writelfl(EDMA_EN
, port_mmio
+ EDMA_CMD_OFS
);
1118 pp
->pp_flags
|= MV_PP_FLAG_EDMA_EN
;
1122 static void mv_wait_for_edma_empty_idle(struct ata_port
*ap
)
1124 void __iomem
*port_mmio
= mv_ap_base(ap
);
1125 const u32 empty_idle
= (EDMA_STATUS_CACHE_EMPTY
| EDMA_STATUS_IDLE
);
1126 const int per_loop
= 5, timeout
= (15 * 1000 / per_loop
);
1130 * Wait for the EDMA engine to finish transactions in progress.
1131 * No idea what a good "timeout" value might be, but measurements
1132 * indicate that it often requires hundreds of microseconds
1133 * with two drives in-use. So we use the 15msec value above
1134 * as a rough guess at what even more drives might require.
1136 for (i
= 0; i
< timeout
; ++i
) {
1137 u32 edma_stat
= readl(port_mmio
+ EDMA_STATUS_OFS
);
1138 if ((edma_stat
& empty_idle
) == empty_idle
)
1142 /* ata_port_printk(ap, KERN_INFO, "%s: %u+ usecs\n", __func__, i); */
1146 * mv_stop_edma_engine - Disable eDMA engine
1147 * @port_mmio: io base address
1150 * Inherited from caller.
1152 static int mv_stop_edma_engine(void __iomem
*port_mmio
)
1156 /* Disable eDMA. The disable bit auto clears. */
1157 writelfl(EDMA_DS
, port_mmio
+ EDMA_CMD_OFS
);
1159 /* Wait for the chip to confirm eDMA is off. */
1160 for (i
= 10000; i
> 0; i
--) {
1161 u32 reg
= readl(port_mmio
+ EDMA_CMD_OFS
);
1162 if (!(reg
& EDMA_EN
))
1169 static int mv_stop_edma(struct ata_port
*ap
)
1171 void __iomem
*port_mmio
= mv_ap_base(ap
);
1172 struct mv_port_priv
*pp
= ap
->private_data
;
1175 if (!(pp
->pp_flags
& MV_PP_FLAG_EDMA_EN
))
1177 pp
->pp_flags
&= ~MV_PP_FLAG_EDMA_EN
;
1178 mv_wait_for_edma_empty_idle(ap
);
1179 if (mv_stop_edma_engine(port_mmio
)) {
1180 ata_port_printk(ap
, KERN_ERR
, "Unable to stop eDMA\n");
1183 mv_edma_cfg(ap
, 0, 0);
1188 static void mv_dump_mem(void __iomem
*start
, unsigned bytes
)
1191 for (b
= 0; b
< bytes
; ) {
1192 DPRINTK("%p: ", start
+ b
);
1193 for (w
= 0; b
< bytes
&& w
< 4; w
++) {
1194 printk("%08x ", readl(start
+ b
));
1202 static void mv_dump_pci_cfg(struct pci_dev
*pdev
, unsigned bytes
)
1207 for (b
= 0; b
< bytes
; ) {
1208 DPRINTK("%02x: ", b
);
1209 for (w
= 0; b
< bytes
&& w
< 4; w
++) {
1210 (void) pci_read_config_dword(pdev
, b
, &dw
);
1211 printk("%08x ", dw
);
1218 static void mv_dump_all_regs(void __iomem
*mmio_base
, int port
,
1219 struct pci_dev
*pdev
)
1222 void __iomem
*hc_base
= mv_hc_base(mmio_base
,
1223 port
>> MV_PORT_HC_SHIFT
);
1224 void __iomem
*port_base
;
1225 int start_port
, num_ports
, p
, start_hc
, num_hcs
, hc
;
1228 start_hc
= start_port
= 0;
1229 num_ports
= 8; /* shld be benign for 4 port devs */
1232 start_hc
= port
>> MV_PORT_HC_SHIFT
;
1234 num_ports
= num_hcs
= 1;
1236 DPRINTK("All registers for port(s) %u-%u:\n", start_port
,
1237 num_ports
> 1 ? num_ports
- 1 : start_port
);
1240 DPRINTK("PCI config space regs:\n");
1241 mv_dump_pci_cfg(pdev
, 0x68);
1243 DPRINTK("PCI regs:\n");
1244 mv_dump_mem(mmio_base
+0xc00, 0x3c);
1245 mv_dump_mem(mmio_base
+0xd00, 0x34);
1246 mv_dump_mem(mmio_base
+0xf00, 0x4);
1247 mv_dump_mem(mmio_base
+0x1d00, 0x6c);
1248 for (hc
= start_hc
; hc
< start_hc
+ num_hcs
; hc
++) {
1249 hc_base
= mv_hc_base(mmio_base
, hc
);
1250 DPRINTK("HC regs (HC %i):\n", hc
);
1251 mv_dump_mem(hc_base
, 0x1c);
1253 for (p
= start_port
; p
< start_port
+ num_ports
; p
++) {
1254 port_base
= mv_port_base(mmio_base
, p
);
1255 DPRINTK("EDMA regs (port %i):\n", p
);
1256 mv_dump_mem(port_base
, 0x54);
1257 DPRINTK("SATA regs (port %i):\n", p
);
1258 mv_dump_mem(port_base
+0x300, 0x60);
1263 static unsigned int mv_scr_offset(unsigned int sc_reg_in
)
1267 switch (sc_reg_in
) {
1271 ofs
= SATA_STATUS_OFS
+ (sc_reg_in
* sizeof(u32
));
1274 ofs
= SATA_ACTIVE_OFS
; /* active is not with the others */
1283 static int mv_scr_read(struct ata_link
*link
, unsigned int sc_reg_in
, u32
*val
)
1285 unsigned int ofs
= mv_scr_offset(sc_reg_in
);
1287 if (ofs
!= 0xffffffffU
) {
1288 *val
= readl(mv_ap_base(link
->ap
) + ofs
);
1294 static int mv_scr_write(struct ata_link
*link
, unsigned int sc_reg_in
, u32 val
)
1296 unsigned int ofs
= mv_scr_offset(sc_reg_in
);
1298 if (ofs
!= 0xffffffffU
) {
1299 writelfl(val
, mv_ap_base(link
->ap
) + ofs
);
1305 static void mv6_dev_config(struct ata_device
*adev
)
1308 * Deal with Gen-II ("mv6") hardware quirks/restrictions:
1310 * Gen-II does not support NCQ over a port multiplier
1311 * (no FIS-based switching).
1313 if (adev
->flags
& ATA_DFLAG_NCQ
) {
1314 if (sata_pmp_attached(adev
->link
->ap
)) {
1315 adev
->flags
&= ~ATA_DFLAG_NCQ
;
1316 ata_dev_printk(adev
, KERN_INFO
,
1317 "NCQ disabled for command-based switching\n");
1322 static int mv_qc_defer(struct ata_queued_cmd
*qc
)
1324 struct ata_link
*link
= qc
->dev
->link
;
1325 struct ata_port
*ap
= link
->ap
;
1326 struct mv_port_priv
*pp
= ap
->private_data
;
1329 * Don't allow new commands if we're in a delayed EH state
1330 * for NCQ and/or FIS-based switching.
1332 if (pp
->pp_flags
& MV_PP_FLAG_DELAYED_EH
)
1333 return ATA_DEFER_PORT
;
1335 * If the port is completely idle, then allow the new qc.
1337 if (ap
->nr_active_links
== 0)
1341 * The port is operating in host queuing mode (EDMA) with NCQ
1342 * enabled, allow multiple NCQ commands. EDMA also allows
1343 * queueing multiple DMA commands but libata core currently
1346 if ((pp
->pp_flags
& MV_PP_FLAG_EDMA_EN
) &&
1347 (pp
->pp_flags
& MV_PP_FLAG_NCQ_EN
) && ata_is_ncq(qc
->tf
.protocol
))
1350 return ATA_DEFER_PORT
;
1353 static void mv_config_fbs(struct ata_port
*ap
, int want_ncq
, int want_fbs
)
1355 struct mv_port_priv
*pp
= ap
->private_data
;
1356 void __iomem
*port_mmio
;
1358 u32 fiscfg
, *old_fiscfg
= &pp
->cached
.fiscfg
;
1359 u32 ltmode
, *old_ltmode
= &pp
->cached
.ltmode
;
1360 u32 haltcond
, *old_haltcond
= &pp
->cached
.haltcond
;
1362 ltmode
= *old_ltmode
& ~LTMODE_BIT8
;
1363 haltcond
= *old_haltcond
| EDMA_ERR_DEV
;
1366 fiscfg
= *old_fiscfg
| FISCFG_SINGLE_SYNC
;
1367 ltmode
= *old_ltmode
| LTMODE_BIT8
;
1369 haltcond
&= ~EDMA_ERR_DEV
;
1371 fiscfg
|= FISCFG_WAIT_DEV_ERR
;
1373 fiscfg
= *old_fiscfg
& ~(FISCFG_SINGLE_SYNC
| FISCFG_WAIT_DEV_ERR
);
1376 port_mmio
= mv_ap_base(ap
);
1377 mv_write_cached_reg(port_mmio
+ FISCFG_OFS
, old_fiscfg
, fiscfg
);
1378 mv_write_cached_reg(port_mmio
+ LTMODE_OFS
, old_ltmode
, ltmode
);
1379 mv_write_cached_reg(port_mmio
+ EDMA_HALTCOND_OFS
, old_haltcond
, haltcond
);
1382 static void mv_60x1_errata_sata25(struct ata_port
*ap
, int want_ncq
)
1384 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
1387 /* workaround for 88SX60x1 FEr SATA#25 (part 1) */
1388 old
= readl(hpriv
->base
+ MV_GPIO_PORT_CTL_OFS
);
1390 new = old
| (1 << 22);
1392 new = old
& ~(1 << 22);
1394 writel(new, hpriv
->base
+ MV_GPIO_PORT_CTL_OFS
);
1398 * mv_bmdma_enable - set a magic bit on GEN_IIE to allow bmdma
1399 * @ap: Port being initialized
1401 * There are two DMA modes on these chips: basic DMA, and EDMA.
1403 * Bit-0 of the "EDMA RESERVED" register enables/disables use
1404 * of basic DMA on the GEN_IIE versions of the chips.
1406 * This bit survives EDMA resets, and must be set for basic DMA
1407 * to function, and should be cleared when EDMA is active.
1409 static void mv_bmdma_enable_iie(struct ata_port
*ap
, int enable_bmdma
)
1411 struct mv_port_priv
*pp
= ap
->private_data
;
1412 u32
new, *old
= &pp
->cached
.unknown_rsvd
;
1418 mv_write_cached_reg(mv_ap_base(ap
) + EDMA_UNKNOWN_RSVD_OFS
, old
, new);
1422 * SOC chips have an issue whereby the HDD LEDs don't always blink
1423 * during I/O when NCQ is enabled. Enabling a special "LED blink" mode
1424 * of the SOC takes care of it, generating a steady blink rate when
1425 * any drive on the chip is active.
1427 * Unfortunately, the blink mode is a global hardware setting for the SOC,
1428 * so we must use it whenever at least one port on the SOC has NCQ enabled.
1430 * We turn "LED blink" off when NCQ is not in use anywhere, because the normal
1431 * LED operation works then, and provides better (more accurate) feedback.
1433 * Note that this code assumes that an SOC never has more than one HC onboard.
1435 static void mv_soc_led_blink_enable(struct ata_port
*ap
)
1437 struct ata_host
*host
= ap
->host
;
1438 struct mv_host_priv
*hpriv
= host
->private_data
;
1439 void __iomem
*hc_mmio
;
1442 if (hpriv
->hp_flags
& MV_HP_QUIRK_LED_BLINK_EN
)
1444 hpriv
->hp_flags
|= MV_HP_QUIRK_LED_BLINK_EN
;
1445 hc_mmio
= mv_hc_base_from_port(mv_host_base(host
), ap
->port_no
);
1446 led_ctrl
= readl(hc_mmio
+ SOC_LED_CTRL_OFS
);
1447 writel(led_ctrl
| SOC_LED_CTRL_BLINK
, hc_mmio
+ SOC_LED_CTRL_OFS
);
1450 static void mv_soc_led_blink_disable(struct ata_port
*ap
)
1452 struct ata_host
*host
= ap
->host
;
1453 struct mv_host_priv
*hpriv
= host
->private_data
;
1454 void __iomem
*hc_mmio
;
1458 if (!(hpriv
->hp_flags
& MV_HP_QUIRK_LED_BLINK_EN
))
1461 /* disable led-blink only if no ports are using NCQ */
1462 for (port
= 0; port
< hpriv
->n_ports
; port
++) {
1463 struct ata_port
*this_ap
= host
->ports
[port
];
1464 struct mv_port_priv
*pp
= this_ap
->private_data
;
1466 if (pp
->pp_flags
& MV_PP_FLAG_NCQ_EN
)
1470 hpriv
->hp_flags
&= ~MV_HP_QUIRK_LED_BLINK_EN
;
1471 hc_mmio
= mv_hc_base_from_port(mv_host_base(host
), ap
->port_no
);
1472 led_ctrl
= readl(hc_mmio
+ SOC_LED_CTRL_OFS
);
1473 writel(led_ctrl
& ~SOC_LED_CTRL_BLINK
, hc_mmio
+ SOC_LED_CTRL_OFS
);
1476 static void mv_edma_cfg(struct ata_port
*ap
, int want_ncq
, int want_edma
)
1479 struct mv_port_priv
*pp
= ap
->private_data
;
1480 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
1481 void __iomem
*port_mmio
= mv_ap_base(ap
);
1483 /* set up non-NCQ EDMA configuration */
1484 cfg
= EDMA_CFG_Q_DEPTH
; /* always 0x1f for *all* chips */
1486 ~(MV_PP_FLAG_FBS_EN
| MV_PP_FLAG_NCQ_EN
| MV_PP_FLAG_FAKE_ATA_BUSY
);
1488 if (IS_GEN_I(hpriv
))
1489 cfg
|= (1 << 8); /* enab config burst size mask */
1491 else if (IS_GEN_II(hpriv
)) {
1492 cfg
|= EDMA_CFG_RD_BRST_EXT
| EDMA_CFG_WR_BUFF_LEN
;
1493 mv_60x1_errata_sata25(ap
, want_ncq
);
1495 } else if (IS_GEN_IIE(hpriv
)) {
1496 int want_fbs
= sata_pmp_attached(ap
);
1498 * Possible future enhancement:
1500 * The chip can use FBS with non-NCQ, if we allow it,
1501 * But first we need to have the error handling in place
1502 * for this mode (datasheet section 7.3.15.4.2.3).
1503 * So disallow non-NCQ FBS for now.
1505 want_fbs
&= want_ncq
;
1507 mv_config_fbs(ap
, want_ncq
, want_fbs
);
1510 pp
->pp_flags
|= MV_PP_FLAG_FBS_EN
;
1511 cfg
|= EDMA_CFG_EDMA_FBS
; /* FIS-based switching */
1514 cfg
|= (1 << 23); /* do not mask PM field in rx'd FIS */
1516 cfg
|= (1 << 22); /* enab 4-entry host queue cache */
1518 cfg
|= (1 << 18); /* enab early completion */
1520 if (hpriv
->hp_flags
& MV_HP_CUT_THROUGH
)
1521 cfg
|= (1 << 17); /* enab cut-thru (dis stor&forwrd) */
1522 mv_bmdma_enable_iie(ap
, !want_edma
);
1524 if (IS_SOC(hpriv
)) {
1526 mv_soc_led_blink_enable(ap
);
1528 mv_soc_led_blink_disable(ap
);
1533 cfg
|= EDMA_CFG_NCQ
;
1534 pp
->pp_flags
|= MV_PP_FLAG_NCQ_EN
;
1537 writelfl(cfg
, port_mmio
+ EDMA_CFG_OFS
);
1540 static void mv_port_free_dma_mem(struct ata_port
*ap
)
1542 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
1543 struct mv_port_priv
*pp
= ap
->private_data
;
1547 dma_pool_free(hpriv
->crqb_pool
, pp
->crqb
, pp
->crqb_dma
);
1551 dma_pool_free(hpriv
->crpb_pool
, pp
->crpb
, pp
->crpb_dma
);
1555 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1556 * For later hardware, we have one unique sg_tbl per NCQ tag.
1558 for (tag
= 0; tag
< MV_MAX_Q_DEPTH
; ++tag
) {
1559 if (pp
->sg_tbl
[tag
]) {
1560 if (tag
== 0 || !IS_GEN_I(hpriv
))
1561 dma_pool_free(hpriv
->sg_tbl_pool
,
1563 pp
->sg_tbl_dma
[tag
]);
1564 pp
->sg_tbl
[tag
] = NULL
;
1570 * mv_port_start - Port specific init/start routine.
1571 * @ap: ATA channel to manipulate
1573 * Allocate and point to DMA memory, init port private memory,
1577 * Inherited from caller.
1579 static int mv_port_start(struct ata_port
*ap
)
1581 struct device
*dev
= ap
->host
->dev
;
1582 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
1583 struct mv_port_priv
*pp
;
1584 unsigned long flags
;
1587 pp
= devm_kzalloc(dev
, sizeof(*pp
), GFP_KERNEL
);
1590 ap
->private_data
= pp
;
1592 pp
->crqb
= dma_pool_alloc(hpriv
->crqb_pool
, GFP_KERNEL
, &pp
->crqb_dma
);
1595 memset(pp
->crqb
, 0, MV_CRQB_Q_SZ
);
1597 pp
->crpb
= dma_pool_alloc(hpriv
->crpb_pool
, GFP_KERNEL
, &pp
->crpb_dma
);
1599 goto out_port_free_dma_mem
;
1600 memset(pp
->crpb
, 0, MV_CRPB_Q_SZ
);
1602 /* 6041/6081 Rev. "C0" (and newer) are okay with async notify */
1603 if (hpriv
->hp_flags
& MV_HP_ERRATA_60X1C0
)
1604 ap
->flags
|= ATA_FLAG_AN
;
1606 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1607 * For later hardware, we need one unique sg_tbl per NCQ tag.
1609 for (tag
= 0; tag
< MV_MAX_Q_DEPTH
; ++tag
) {
1610 if (tag
== 0 || !IS_GEN_I(hpriv
)) {
1611 pp
->sg_tbl
[tag
] = dma_pool_alloc(hpriv
->sg_tbl_pool
,
1612 GFP_KERNEL
, &pp
->sg_tbl_dma
[tag
]);
1613 if (!pp
->sg_tbl
[tag
])
1614 goto out_port_free_dma_mem
;
1616 pp
->sg_tbl
[tag
] = pp
->sg_tbl
[0];
1617 pp
->sg_tbl_dma
[tag
] = pp
->sg_tbl_dma
[0];
1621 spin_lock_irqsave(ap
->lock
, flags
);
1622 mv_save_cached_regs(ap
);
1623 mv_edma_cfg(ap
, 0, 0);
1624 spin_unlock_irqrestore(ap
->lock
, flags
);
1628 out_port_free_dma_mem
:
1629 mv_port_free_dma_mem(ap
);
1634 * mv_port_stop - Port specific cleanup/stop routine.
1635 * @ap: ATA channel to manipulate
1637 * Stop DMA, cleanup port memory.
1640 * This routine uses the host lock to protect the DMA stop.
1642 static void mv_port_stop(struct ata_port
*ap
)
1644 unsigned long flags
;
1646 spin_lock_irqsave(ap
->lock
, flags
);
1648 mv_enable_port_irqs(ap
, 0);
1649 spin_unlock_irqrestore(ap
->lock
, flags
);
1650 mv_port_free_dma_mem(ap
);
1654 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1655 * @qc: queued command whose SG list to source from
1657 * Populate the SG list and mark the last entry.
1660 * Inherited from caller.
1662 static void mv_fill_sg(struct ata_queued_cmd
*qc
)
1664 struct mv_port_priv
*pp
= qc
->ap
->private_data
;
1665 struct scatterlist
*sg
;
1666 struct mv_sg
*mv_sg
, *last_sg
= NULL
;
1669 mv_sg
= pp
->sg_tbl
[qc
->tag
];
1670 for_each_sg(qc
->sg
, sg
, qc
->n_elem
, si
) {
1671 dma_addr_t addr
= sg_dma_address(sg
);
1672 u32 sg_len
= sg_dma_len(sg
);
1675 u32 offset
= addr
& 0xffff;
1678 if (offset
+ len
> 0x10000)
1679 len
= 0x10000 - offset
;
1681 mv_sg
->addr
= cpu_to_le32(addr
& 0xffffffff);
1682 mv_sg
->addr_hi
= cpu_to_le32((addr
>> 16) >> 16);
1683 mv_sg
->flags_size
= cpu_to_le32(len
& 0xffff);
1684 mv_sg
->reserved
= 0;
1694 if (likely(last_sg
))
1695 last_sg
->flags_size
|= cpu_to_le32(EPRD_FLAG_END_OF_TBL
);
1696 mb(); /* ensure data structure is visible to the chipset */
1699 static void mv_crqb_pack_cmd(__le16
*cmdw
, u8 data
, u8 addr
, unsigned last
)
1701 u16 tmp
= data
| (addr
<< CRQB_CMD_ADDR_SHIFT
) | CRQB_CMD_CS
|
1702 (last
? CRQB_CMD_LAST
: 0);
1703 *cmdw
= cpu_to_le16(tmp
);
1707 * mv_sff_irq_clear - Clear hardware interrupt after DMA.
1708 * @ap: Port associated with this ATA transaction.
1710 * We need this only for ATAPI bmdma transactions,
1711 * as otherwise we experience spurious interrupts
1712 * after libata-sff handles the bmdma interrupts.
1714 static void mv_sff_irq_clear(struct ata_port
*ap
)
1716 mv_clear_and_enable_port_irqs(ap
, mv_ap_base(ap
), ERR_IRQ
);
1720 * mv_check_atapi_dma - Filter ATAPI cmds which are unsuitable for DMA.
1721 * @qc: queued command to check for chipset/DMA compatibility.
1723 * The bmdma engines cannot handle speculative data sizes
1724 * (bytecount under/over flow). So only allow DMA for
1725 * data transfer commands with known data sizes.
1728 * Inherited from caller.
1730 static int mv_check_atapi_dma(struct ata_queued_cmd
*qc
)
1732 struct scsi_cmnd
*scmd
= qc
->scsicmd
;
1735 switch (scmd
->cmnd
[0]) {
1743 case GPCMD_SEND_DVD_STRUCTURE
:
1744 case GPCMD_SEND_CUE_SHEET
:
1745 return 0; /* DMA is safe */
1748 return -EOPNOTSUPP
; /* use PIO instead */
1752 * mv_bmdma_setup - Set up BMDMA transaction
1753 * @qc: queued command to prepare DMA for.
1756 * Inherited from caller.
1758 static void mv_bmdma_setup(struct ata_queued_cmd
*qc
)
1760 struct ata_port
*ap
= qc
->ap
;
1761 void __iomem
*port_mmio
= mv_ap_base(ap
);
1762 struct mv_port_priv
*pp
= ap
->private_data
;
1766 /* clear all DMA cmd bits */
1767 writel(0, port_mmio
+ BMDMA_CMD_OFS
);
1769 /* load PRD table addr. */
1770 writel((pp
->sg_tbl_dma
[qc
->tag
] >> 16) >> 16,
1771 port_mmio
+ BMDMA_PRD_HIGH_OFS
);
1772 writelfl(pp
->sg_tbl_dma
[qc
->tag
],
1773 port_mmio
+ BMDMA_PRD_LOW_OFS
);
1775 /* issue r/w command */
1776 ap
->ops
->sff_exec_command(ap
, &qc
->tf
);
1780 * mv_bmdma_start - Start a BMDMA transaction
1781 * @qc: queued command to start DMA on.
1784 * Inherited from caller.
1786 static void mv_bmdma_start(struct ata_queued_cmd
*qc
)
1788 struct ata_port
*ap
= qc
->ap
;
1789 void __iomem
*port_mmio
= mv_ap_base(ap
);
1790 unsigned int rw
= (qc
->tf
.flags
& ATA_TFLAG_WRITE
);
1791 u32 cmd
= (rw
? 0 : ATA_DMA_WR
) | ATA_DMA_START
;
1793 /* start host DMA transaction */
1794 writelfl(cmd
, port_mmio
+ BMDMA_CMD_OFS
);
1798 * mv_bmdma_stop - Stop BMDMA transfer
1799 * @qc: queued command to stop DMA on.
1801 * Clears the ATA_DMA_START flag in the bmdma control register
1804 * Inherited from caller.
1806 static void mv_bmdma_stop(struct ata_queued_cmd
*qc
)
1808 struct ata_port
*ap
= qc
->ap
;
1809 void __iomem
*port_mmio
= mv_ap_base(ap
);
1812 /* clear start/stop bit */
1813 cmd
= readl(port_mmio
+ BMDMA_CMD_OFS
);
1814 cmd
&= ~ATA_DMA_START
;
1815 writelfl(cmd
, port_mmio
+ BMDMA_CMD_OFS
);
1817 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
1818 ata_sff_dma_pause(ap
);
1822 * mv_bmdma_status - Read BMDMA status
1823 * @ap: port for which to retrieve DMA status.
1825 * Read and return equivalent of the sff BMDMA status register.
1828 * Inherited from caller.
1830 static u8
mv_bmdma_status(struct ata_port
*ap
)
1832 void __iomem
*port_mmio
= mv_ap_base(ap
);
1836 * Other bits are valid only if ATA_DMA_ACTIVE==0,
1837 * and the ATA_DMA_INTR bit doesn't exist.
1839 reg
= readl(port_mmio
+ BMDMA_STATUS_OFS
);
1840 if (reg
& ATA_DMA_ACTIVE
)
1841 status
= ATA_DMA_ACTIVE
;
1843 status
= (reg
& ATA_DMA_ERR
) | ATA_DMA_INTR
;
1848 * mv_qc_prep - Host specific command preparation.
1849 * @qc: queued command to prepare
1851 * This routine simply redirects to the general purpose routine
1852 * if command is not DMA. Else, it handles prep of the CRQB
1853 * (command request block), does some sanity checking, and calls
1854 * the SG load routine.
1857 * Inherited from caller.
1859 static void mv_qc_prep(struct ata_queued_cmd
*qc
)
1861 struct ata_port
*ap
= qc
->ap
;
1862 struct mv_port_priv
*pp
= ap
->private_data
;
1864 struct ata_taskfile
*tf
;
1868 if ((qc
->tf
.protocol
!= ATA_PROT_DMA
) &&
1869 (qc
->tf
.protocol
!= ATA_PROT_NCQ
))
1872 /* Fill in command request block
1874 if (!(qc
->tf
.flags
& ATA_TFLAG_WRITE
))
1875 flags
|= CRQB_FLAG_READ
;
1876 WARN_ON(MV_MAX_Q_DEPTH
<= qc
->tag
);
1877 flags
|= qc
->tag
<< CRQB_TAG_SHIFT
;
1878 flags
|= (qc
->dev
->link
->pmp
& 0xf) << CRQB_PMP_SHIFT
;
1880 /* get current queue index from software */
1881 in_index
= pp
->req_idx
;
1883 pp
->crqb
[in_index
].sg_addr
=
1884 cpu_to_le32(pp
->sg_tbl_dma
[qc
->tag
] & 0xffffffff);
1885 pp
->crqb
[in_index
].sg_addr_hi
=
1886 cpu_to_le32((pp
->sg_tbl_dma
[qc
->tag
] >> 16) >> 16);
1887 pp
->crqb
[in_index
].ctrl_flags
= cpu_to_le16(flags
);
1889 cw
= &pp
->crqb
[in_index
].ata_cmd
[0];
1892 /* Sadly, the CRQB cannot accomodate all registers--there are
1893 * only 11 bytes...so we must pick and choose required
1894 * registers based on the command. So, we drop feature and
1895 * hob_feature for [RW] DMA commands, but they are needed for
1896 * NCQ. NCQ will drop hob_nsect, which is not needed there
1897 * (nsect is used only for the tag; feat/hob_feat hold true nsect).
1899 switch (tf
->command
) {
1901 case ATA_CMD_READ_EXT
:
1903 case ATA_CMD_WRITE_EXT
:
1904 case ATA_CMD_WRITE_FUA_EXT
:
1905 mv_crqb_pack_cmd(cw
++, tf
->hob_nsect
, ATA_REG_NSECT
, 0);
1907 case ATA_CMD_FPDMA_READ
:
1908 case ATA_CMD_FPDMA_WRITE
:
1909 mv_crqb_pack_cmd(cw
++, tf
->hob_feature
, ATA_REG_FEATURE
, 0);
1910 mv_crqb_pack_cmd(cw
++, tf
->feature
, ATA_REG_FEATURE
, 0);
1913 /* The only other commands EDMA supports in non-queued and
1914 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1915 * of which are defined/used by Linux. If we get here, this
1916 * driver needs work.
1918 * FIXME: modify libata to give qc_prep a return value and
1919 * return error here.
1921 BUG_ON(tf
->command
);
1924 mv_crqb_pack_cmd(cw
++, tf
->nsect
, ATA_REG_NSECT
, 0);
1925 mv_crqb_pack_cmd(cw
++, tf
->hob_lbal
, ATA_REG_LBAL
, 0);
1926 mv_crqb_pack_cmd(cw
++, tf
->lbal
, ATA_REG_LBAL
, 0);
1927 mv_crqb_pack_cmd(cw
++, tf
->hob_lbam
, ATA_REG_LBAM
, 0);
1928 mv_crqb_pack_cmd(cw
++, tf
->lbam
, ATA_REG_LBAM
, 0);
1929 mv_crqb_pack_cmd(cw
++, tf
->hob_lbah
, ATA_REG_LBAH
, 0);
1930 mv_crqb_pack_cmd(cw
++, tf
->lbah
, ATA_REG_LBAH
, 0);
1931 mv_crqb_pack_cmd(cw
++, tf
->device
, ATA_REG_DEVICE
, 0);
1932 mv_crqb_pack_cmd(cw
++, tf
->command
, ATA_REG_CMD
, 1); /* last */
1934 if (!(qc
->flags
& ATA_QCFLAG_DMAMAP
))
1940 * mv_qc_prep_iie - Host specific command preparation.
1941 * @qc: queued command to prepare
1943 * This routine simply redirects to the general purpose routine
1944 * if command is not DMA. Else, it handles prep of the CRQB
1945 * (command request block), does some sanity checking, and calls
1946 * the SG load routine.
1949 * Inherited from caller.
1951 static void mv_qc_prep_iie(struct ata_queued_cmd
*qc
)
1953 struct ata_port
*ap
= qc
->ap
;
1954 struct mv_port_priv
*pp
= ap
->private_data
;
1955 struct mv_crqb_iie
*crqb
;
1956 struct ata_taskfile
*tf
;
1960 if ((qc
->tf
.protocol
!= ATA_PROT_DMA
) &&
1961 (qc
->tf
.protocol
!= ATA_PROT_NCQ
))
1964 /* Fill in Gen IIE command request block */
1965 if (!(qc
->tf
.flags
& ATA_TFLAG_WRITE
))
1966 flags
|= CRQB_FLAG_READ
;
1968 WARN_ON(MV_MAX_Q_DEPTH
<= qc
->tag
);
1969 flags
|= qc
->tag
<< CRQB_TAG_SHIFT
;
1970 flags
|= qc
->tag
<< CRQB_HOSTQ_SHIFT
;
1971 flags
|= (qc
->dev
->link
->pmp
& 0xf) << CRQB_PMP_SHIFT
;
1973 /* get current queue index from software */
1974 in_index
= pp
->req_idx
;
1976 crqb
= (struct mv_crqb_iie
*) &pp
->crqb
[in_index
];
1977 crqb
->addr
= cpu_to_le32(pp
->sg_tbl_dma
[qc
->tag
] & 0xffffffff);
1978 crqb
->addr_hi
= cpu_to_le32((pp
->sg_tbl_dma
[qc
->tag
] >> 16) >> 16);
1979 crqb
->flags
= cpu_to_le32(flags
);
1982 crqb
->ata_cmd
[0] = cpu_to_le32(
1983 (tf
->command
<< 16) |
1986 crqb
->ata_cmd
[1] = cpu_to_le32(
1992 crqb
->ata_cmd
[2] = cpu_to_le32(
1993 (tf
->hob_lbal
<< 0) |
1994 (tf
->hob_lbam
<< 8) |
1995 (tf
->hob_lbah
<< 16) |
1996 (tf
->hob_feature
<< 24)
1998 crqb
->ata_cmd
[3] = cpu_to_le32(
2000 (tf
->hob_nsect
<< 8)
2003 if (!(qc
->flags
& ATA_QCFLAG_DMAMAP
))
2009 * mv_sff_check_status - fetch device status, if valid
2010 * @ap: ATA port to fetch status from
2012 * When using command issue via mv_qc_issue_fis(),
2013 * the initial ATA_BUSY state does not show up in the
2014 * ATA status (shadow) register. This can confuse libata!
2016 * So we have a hook here to fake ATA_BUSY for that situation,
2017 * until the first time a BUSY, DRQ, or ERR bit is seen.
2019 * The rest of the time, it simply returns the ATA status register.
2021 static u8
mv_sff_check_status(struct ata_port
*ap
)
2023 u8 stat
= ioread8(ap
->ioaddr
.status_addr
);
2024 struct mv_port_priv
*pp
= ap
->private_data
;
2026 if (pp
->pp_flags
& MV_PP_FLAG_FAKE_ATA_BUSY
) {
2027 if (stat
& (ATA_BUSY
| ATA_DRQ
| ATA_ERR
))
2028 pp
->pp_flags
&= ~MV_PP_FLAG_FAKE_ATA_BUSY
;
2036 * mv_send_fis - Send a FIS, using the "Vendor-Unique FIS" register
2037 * @fis: fis to be sent
2038 * @nwords: number of 32-bit words in the fis
2040 static unsigned int mv_send_fis(struct ata_port
*ap
, u32
*fis
, int nwords
)
2042 void __iomem
*port_mmio
= mv_ap_base(ap
);
2043 u32 ifctl
, old_ifctl
, ifstat
;
2044 int i
, timeout
= 200, final_word
= nwords
- 1;
2046 /* Initiate FIS transmission mode */
2047 old_ifctl
= readl(port_mmio
+ SATA_IFCTL_OFS
);
2048 ifctl
= 0x100 | (old_ifctl
& 0xf);
2049 writelfl(ifctl
, port_mmio
+ SATA_IFCTL_OFS
);
2051 /* Send all words of the FIS except for the final word */
2052 for (i
= 0; i
< final_word
; ++i
)
2053 writel(fis
[i
], port_mmio
+ VENDOR_UNIQUE_FIS_OFS
);
2055 /* Flag end-of-transmission, and then send the final word */
2056 writelfl(ifctl
| 0x200, port_mmio
+ SATA_IFCTL_OFS
);
2057 writelfl(fis
[final_word
], port_mmio
+ VENDOR_UNIQUE_FIS_OFS
);
2060 * Wait for FIS transmission to complete.
2061 * This typically takes just a single iteration.
2064 ifstat
= readl(port_mmio
+ SATA_IFSTAT_OFS
);
2065 } while (!(ifstat
& 0x1000) && --timeout
);
2067 /* Restore original port configuration */
2068 writelfl(old_ifctl
, port_mmio
+ SATA_IFCTL_OFS
);
2070 /* See if it worked */
2071 if ((ifstat
& 0x3000) != 0x1000) {
2072 ata_port_printk(ap
, KERN_WARNING
,
2073 "%s transmission error, ifstat=%08x\n",
2075 return AC_ERR_OTHER
;
2081 * mv_qc_issue_fis - Issue a command directly as a FIS
2082 * @qc: queued command to start
2084 * Note that the ATA shadow registers are not updated
2085 * after command issue, so the device will appear "READY"
2086 * if polled, even while it is BUSY processing the command.
2088 * So we use a status hook to fake ATA_BUSY until the drive changes state.
2090 * Note: we don't get updated shadow regs on *completion*
2091 * of non-data commands. So avoid sending them via this function,
2092 * as they will appear to have completed immediately.
2094 * GEN_IIE has special registers that we could get the result tf from,
2095 * but earlier chipsets do not. For now, we ignore those registers.
2097 static unsigned int mv_qc_issue_fis(struct ata_queued_cmd
*qc
)
2099 struct ata_port
*ap
= qc
->ap
;
2100 struct mv_port_priv
*pp
= ap
->private_data
;
2101 struct ata_link
*link
= qc
->dev
->link
;
2105 ata_tf_to_fis(&qc
->tf
, link
->pmp
, 1, (void *)fis
);
2106 err
= mv_send_fis(ap
, fis
, sizeof(fis
) / sizeof(fis
[0]));
2110 switch (qc
->tf
.protocol
) {
2111 case ATAPI_PROT_PIO
:
2112 pp
->pp_flags
|= MV_PP_FLAG_FAKE_ATA_BUSY
;
2114 case ATAPI_PROT_NODATA
:
2115 ap
->hsm_task_state
= HSM_ST_FIRST
;
2118 pp
->pp_flags
|= MV_PP_FLAG_FAKE_ATA_BUSY
;
2119 if (qc
->tf
.flags
& ATA_TFLAG_WRITE
)
2120 ap
->hsm_task_state
= HSM_ST_FIRST
;
2122 ap
->hsm_task_state
= HSM_ST
;
2125 ap
->hsm_task_state
= HSM_ST_LAST
;
2129 if (qc
->tf
.flags
& ATA_TFLAG_POLLING
)
2130 ata_pio_queue_task(ap
, qc
, 0);
2135 * mv_qc_issue - Initiate a command to the host
2136 * @qc: queued command to start
2138 * This routine simply redirects to the general purpose routine
2139 * if command is not DMA. Else, it sanity checks our local
2140 * caches of the request producer/consumer indices then enables
2141 * DMA and bumps the request producer index.
2144 * Inherited from caller.
2146 static unsigned int mv_qc_issue(struct ata_queued_cmd
*qc
)
2148 static int limit_warnings
= 10;
2149 struct ata_port
*ap
= qc
->ap
;
2150 void __iomem
*port_mmio
= mv_ap_base(ap
);
2151 struct mv_port_priv
*pp
= ap
->private_data
;
2153 unsigned int port_irqs
;
2155 pp
->pp_flags
&= ~MV_PP_FLAG_FAKE_ATA_BUSY
; /* paranoia */
2157 switch (qc
->tf
.protocol
) {
2160 mv_start_edma(ap
, port_mmio
, pp
, qc
->tf
.protocol
);
2161 pp
->req_idx
= (pp
->req_idx
+ 1) & MV_MAX_Q_DEPTH_MASK
;
2162 in_index
= pp
->req_idx
<< EDMA_REQ_Q_PTR_SHIFT
;
2164 /* Write the request in pointer to kick the EDMA to life */
2165 writelfl((pp
->crqb_dma
& EDMA_REQ_Q_BASE_LO_MASK
) | in_index
,
2166 port_mmio
+ EDMA_REQ_Q_IN_PTR_OFS
);
2171 * Errata SATA#16, SATA#24: warn if multiple DRQs expected.
2173 * Someday, we might implement special polling workarounds
2174 * for these, but it all seems rather unnecessary since we
2175 * normally use only DMA for commands which transfer more
2176 * than a single block of data.
2178 * Much of the time, this could just work regardless.
2179 * So for now, just log the incident, and allow the attempt.
2181 if (limit_warnings
> 0 && (qc
->nbytes
/ qc
->sect_size
) > 1) {
2183 ata_link_printk(qc
->dev
->link
, KERN_WARNING
, DRV_NAME
2184 ": attempting PIO w/multiple DRQ: "
2185 "this may fail due to h/w errata\n");
2188 case ATA_PROT_NODATA
:
2189 case ATAPI_PROT_PIO
:
2190 case ATAPI_PROT_NODATA
:
2191 if (ap
->flags
& ATA_FLAG_PIO_POLLING
)
2192 qc
->tf
.flags
|= ATA_TFLAG_POLLING
;
2196 if (qc
->tf
.flags
& ATA_TFLAG_POLLING
)
2197 port_irqs
= ERR_IRQ
; /* mask device interrupt when polling */
2199 port_irqs
= ERR_IRQ
| DONE_IRQ
; /* unmask all interrupts */
2202 * We're about to send a non-EDMA capable command to the
2203 * port. Turn off EDMA so there won't be problems accessing
2204 * shadow block, etc registers.
2207 mv_clear_and_enable_port_irqs(ap
, mv_ap_base(ap
), port_irqs
);
2208 mv_pmp_select(ap
, qc
->dev
->link
->pmp
);
2210 if (qc
->tf
.command
== ATA_CMD_READ_LOG_EXT
) {
2211 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
2213 * Workaround for 88SX60x1 FEr SATA#25 (part 2).
2215 * After any NCQ error, the READ_LOG_EXT command
2216 * from libata-eh *must* use mv_qc_issue_fis().
2217 * Otherwise it might fail, due to chip errata.
2219 * Rather than special-case it, we'll just *always*
2220 * use this method here for READ_LOG_EXT, making for
2223 if (IS_GEN_II(hpriv
))
2224 return mv_qc_issue_fis(qc
);
2226 return ata_sff_qc_issue(qc
);
2229 static struct ata_queued_cmd
*mv_get_active_qc(struct ata_port
*ap
)
2231 struct mv_port_priv
*pp
= ap
->private_data
;
2232 struct ata_queued_cmd
*qc
;
2234 if (pp
->pp_flags
& MV_PP_FLAG_NCQ_EN
)
2236 qc
= ata_qc_from_tag(ap
, ap
->link
.active_tag
);
2238 if (qc
->tf
.flags
& ATA_TFLAG_POLLING
)
2240 else if (!(qc
->flags
& ATA_QCFLAG_ACTIVE
))
2246 static void mv_pmp_error_handler(struct ata_port
*ap
)
2248 unsigned int pmp
, pmp_map
;
2249 struct mv_port_priv
*pp
= ap
->private_data
;
2251 if (pp
->pp_flags
& MV_PP_FLAG_DELAYED_EH
) {
2253 * Perform NCQ error analysis on failed PMPs
2254 * before we freeze the port entirely.
2256 * The failed PMPs are marked earlier by mv_pmp_eh_prep().
2258 pmp_map
= pp
->delayed_eh_pmp_map
;
2259 pp
->pp_flags
&= ~MV_PP_FLAG_DELAYED_EH
;
2260 for (pmp
= 0; pmp_map
!= 0; pmp
++) {
2261 unsigned int this_pmp
= (1 << pmp
);
2262 if (pmp_map
& this_pmp
) {
2263 struct ata_link
*link
= &ap
->pmp_link
[pmp
];
2264 pmp_map
&= ~this_pmp
;
2265 ata_eh_analyze_ncq_error(link
);
2268 ata_port_freeze(ap
);
2270 sata_pmp_error_handler(ap
);
2273 static unsigned int mv_get_err_pmp_map(struct ata_port
*ap
)
2275 void __iomem
*port_mmio
= mv_ap_base(ap
);
2277 return readl(port_mmio
+ SATA_TESTCTL_OFS
) >> 16;
2280 static void mv_pmp_eh_prep(struct ata_port
*ap
, unsigned int pmp_map
)
2282 struct ata_eh_info
*ehi
;
2286 * Initialize EH info for PMPs which saw device errors
2288 ehi
= &ap
->link
.eh_info
;
2289 for (pmp
= 0; pmp_map
!= 0; pmp
++) {
2290 unsigned int this_pmp
= (1 << pmp
);
2291 if (pmp_map
& this_pmp
) {
2292 struct ata_link
*link
= &ap
->pmp_link
[pmp
];
2294 pmp_map
&= ~this_pmp
;
2295 ehi
= &link
->eh_info
;
2296 ata_ehi_clear_desc(ehi
);
2297 ata_ehi_push_desc(ehi
, "dev err");
2298 ehi
->err_mask
|= AC_ERR_DEV
;
2299 ehi
->action
|= ATA_EH_RESET
;
2300 ata_link_abort(link
);
2305 static int mv_req_q_empty(struct ata_port
*ap
)
2307 void __iomem
*port_mmio
= mv_ap_base(ap
);
2308 u32 in_ptr
, out_ptr
;
2310 in_ptr
= (readl(port_mmio
+ EDMA_REQ_Q_IN_PTR_OFS
)
2311 >> EDMA_REQ_Q_PTR_SHIFT
) & MV_MAX_Q_DEPTH_MASK
;
2312 out_ptr
= (readl(port_mmio
+ EDMA_REQ_Q_OUT_PTR_OFS
)
2313 >> EDMA_REQ_Q_PTR_SHIFT
) & MV_MAX_Q_DEPTH_MASK
;
2314 return (in_ptr
== out_ptr
); /* 1 == queue_is_empty */
2317 static int mv_handle_fbs_ncq_dev_err(struct ata_port
*ap
)
2319 struct mv_port_priv
*pp
= ap
->private_data
;
2321 unsigned int old_map
, new_map
;
2324 * Device error during FBS+NCQ operation:
2326 * Set a port flag to prevent further I/O being enqueued.
2327 * Leave the EDMA running to drain outstanding commands from this port.
2328 * Perform the post-mortem/EH only when all responses are complete.
2329 * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.2).
2331 if (!(pp
->pp_flags
& MV_PP_FLAG_DELAYED_EH
)) {
2332 pp
->pp_flags
|= MV_PP_FLAG_DELAYED_EH
;
2333 pp
->delayed_eh_pmp_map
= 0;
2335 old_map
= pp
->delayed_eh_pmp_map
;
2336 new_map
= old_map
| mv_get_err_pmp_map(ap
);
2338 if (old_map
!= new_map
) {
2339 pp
->delayed_eh_pmp_map
= new_map
;
2340 mv_pmp_eh_prep(ap
, new_map
& ~old_map
);
2342 failed_links
= hweight16(new_map
);
2344 ata_port_printk(ap
, KERN_INFO
, "%s: pmp_map=%04x qc_map=%04x "
2345 "failed_links=%d nr_active_links=%d\n",
2346 __func__
, pp
->delayed_eh_pmp_map
,
2347 ap
->qc_active
, failed_links
,
2348 ap
->nr_active_links
);
2350 if (ap
->nr_active_links
<= failed_links
&& mv_req_q_empty(ap
)) {
2351 mv_process_crpb_entries(ap
, pp
);
2354 ata_port_printk(ap
, KERN_INFO
, "%s: done\n", __func__
);
2355 return 1; /* handled */
2357 ata_port_printk(ap
, KERN_INFO
, "%s: waiting\n", __func__
);
2358 return 1; /* handled */
2361 static int mv_handle_fbs_non_ncq_dev_err(struct ata_port
*ap
)
2364 * Possible future enhancement:
2366 * FBS+non-NCQ operation is not yet implemented.
2367 * See related notes in mv_edma_cfg().
2369 * Device error during FBS+non-NCQ operation:
2371 * We need to snapshot the shadow registers for each failed command.
2372 * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.3).
2374 return 0; /* not handled */
2377 static int mv_handle_dev_err(struct ata_port
*ap
, u32 edma_err_cause
)
2379 struct mv_port_priv
*pp
= ap
->private_data
;
2381 if (!(pp
->pp_flags
& MV_PP_FLAG_EDMA_EN
))
2382 return 0; /* EDMA was not active: not handled */
2383 if (!(pp
->pp_flags
& MV_PP_FLAG_FBS_EN
))
2384 return 0; /* FBS was not active: not handled */
2386 if (!(edma_err_cause
& EDMA_ERR_DEV
))
2387 return 0; /* non DEV error: not handled */
2388 edma_err_cause
&= ~EDMA_ERR_IRQ_TRANSIENT
;
2389 if (edma_err_cause
& ~(EDMA_ERR_DEV
| EDMA_ERR_SELF_DIS
))
2390 return 0; /* other problems: not handled */
2392 if (pp
->pp_flags
& MV_PP_FLAG_NCQ_EN
) {
2394 * EDMA should NOT have self-disabled for this case.
2395 * If it did, then something is wrong elsewhere,
2396 * and we cannot handle it here.
2398 if (edma_err_cause
& EDMA_ERR_SELF_DIS
) {
2399 ata_port_printk(ap
, KERN_WARNING
,
2400 "%s: err_cause=0x%x pp_flags=0x%x\n",
2401 __func__
, edma_err_cause
, pp
->pp_flags
);
2402 return 0; /* not handled */
2404 return mv_handle_fbs_ncq_dev_err(ap
);
2407 * EDMA should have self-disabled for this case.
2408 * If it did not, then something is wrong elsewhere,
2409 * and we cannot handle it here.
2411 if (!(edma_err_cause
& EDMA_ERR_SELF_DIS
)) {
2412 ata_port_printk(ap
, KERN_WARNING
,
2413 "%s: err_cause=0x%x pp_flags=0x%x\n",
2414 __func__
, edma_err_cause
, pp
->pp_flags
);
2415 return 0; /* not handled */
2417 return mv_handle_fbs_non_ncq_dev_err(ap
);
2419 return 0; /* not handled */
2422 static void mv_unexpected_intr(struct ata_port
*ap
, int edma_was_enabled
)
2424 struct ata_eh_info
*ehi
= &ap
->link
.eh_info
;
2425 char *when
= "idle";
2427 ata_ehi_clear_desc(ehi
);
2428 if (!ap
|| (ap
->flags
& ATA_FLAG_DISABLED
)) {
2430 } else if (edma_was_enabled
) {
2431 when
= "EDMA enabled";
2433 struct ata_queued_cmd
*qc
= ata_qc_from_tag(ap
, ap
->link
.active_tag
);
2434 if (qc
&& (qc
->tf
.flags
& ATA_TFLAG_POLLING
))
2437 ata_ehi_push_desc(ehi
, "unexpected device interrupt while %s", when
);
2438 ehi
->err_mask
|= AC_ERR_OTHER
;
2439 ehi
->action
|= ATA_EH_RESET
;
2440 ata_port_freeze(ap
);
2444 * mv_err_intr - Handle error interrupts on the port
2445 * @ap: ATA channel to manipulate
2447 * Most cases require a full reset of the chip's state machine,
2448 * which also performs a COMRESET.
2449 * Also, if the port disabled DMA, update our cached copy to match.
2452 * Inherited from caller.
2454 static void mv_err_intr(struct ata_port
*ap
)
2456 void __iomem
*port_mmio
= mv_ap_base(ap
);
2457 u32 edma_err_cause
, eh_freeze_mask
, serr
= 0;
2459 struct mv_port_priv
*pp
= ap
->private_data
;
2460 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
2461 unsigned int action
= 0, err_mask
= 0;
2462 struct ata_eh_info
*ehi
= &ap
->link
.eh_info
;
2463 struct ata_queued_cmd
*qc
;
2467 * Read and clear the SError and err_cause bits.
2468 * For GenIIe, if EDMA_ERR_TRANS_IRQ_7 is set, we also must read/clear
2469 * the FIS_IRQ_CAUSE register before clearing edma_err_cause.
2471 sata_scr_read(&ap
->link
, SCR_ERROR
, &serr
);
2472 sata_scr_write_flush(&ap
->link
, SCR_ERROR
, serr
);
2474 edma_err_cause
= readl(port_mmio
+ EDMA_ERR_IRQ_CAUSE_OFS
);
2475 if (IS_GEN_IIE(hpriv
) && (edma_err_cause
& EDMA_ERR_TRANS_IRQ_7
)) {
2476 fis_cause
= readl(port_mmio
+ SATA_FIS_IRQ_CAUSE_OFS
);
2477 writelfl(~fis_cause
, port_mmio
+ SATA_FIS_IRQ_CAUSE_OFS
);
2479 writelfl(~edma_err_cause
, port_mmio
+ EDMA_ERR_IRQ_CAUSE_OFS
);
2481 if (edma_err_cause
& EDMA_ERR_DEV
) {
2483 * Device errors during FIS-based switching operation
2484 * require special handling.
2486 if (mv_handle_dev_err(ap
, edma_err_cause
))
2490 qc
= mv_get_active_qc(ap
);
2491 ata_ehi_clear_desc(ehi
);
2492 ata_ehi_push_desc(ehi
, "edma_err_cause=%08x pp_flags=%08x",
2493 edma_err_cause
, pp
->pp_flags
);
2495 if (IS_GEN_IIE(hpriv
) && (edma_err_cause
& EDMA_ERR_TRANS_IRQ_7
)) {
2496 ata_ehi_push_desc(ehi
, "fis_cause=%08x", fis_cause
);
2497 if (fis_cause
& SATA_FIS_IRQ_AN
) {
2498 u32 ec
= edma_err_cause
&
2499 ~(EDMA_ERR_TRANS_IRQ_7
| EDMA_ERR_IRQ_TRANSIENT
);
2500 sata_async_notification(ap
);
2502 return; /* Just an AN; no need for the nukes */
2503 ata_ehi_push_desc(ehi
, "SDB notify");
2507 * All generations share these EDMA error cause bits:
2509 if (edma_err_cause
& EDMA_ERR_DEV
) {
2510 err_mask
|= AC_ERR_DEV
;
2511 action
|= ATA_EH_RESET
;
2512 ata_ehi_push_desc(ehi
, "dev error");
2514 if (edma_err_cause
& (EDMA_ERR_D_PAR
| EDMA_ERR_PRD_PAR
|
2515 EDMA_ERR_CRQB_PAR
| EDMA_ERR_CRPB_PAR
|
2516 EDMA_ERR_INTRL_PAR
)) {
2517 err_mask
|= AC_ERR_ATA_BUS
;
2518 action
|= ATA_EH_RESET
;
2519 ata_ehi_push_desc(ehi
, "parity error");
2521 if (edma_err_cause
& (EDMA_ERR_DEV_DCON
| EDMA_ERR_DEV_CON
)) {
2522 ata_ehi_hotplugged(ehi
);
2523 ata_ehi_push_desc(ehi
, edma_err_cause
& EDMA_ERR_DEV_DCON
?
2524 "dev disconnect" : "dev connect");
2525 action
|= ATA_EH_RESET
;
2529 * Gen-I has a different SELF_DIS bit,
2530 * different FREEZE bits, and no SERR bit:
2532 if (IS_GEN_I(hpriv
)) {
2533 eh_freeze_mask
= EDMA_EH_FREEZE_5
;
2534 if (edma_err_cause
& EDMA_ERR_SELF_DIS_5
) {
2535 pp
->pp_flags
&= ~MV_PP_FLAG_EDMA_EN
;
2536 ata_ehi_push_desc(ehi
, "EDMA self-disable");
2539 eh_freeze_mask
= EDMA_EH_FREEZE
;
2540 if (edma_err_cause
& EDMA_ERR_SELF_DIS
) {
2541 pp
->pp_flags
&= ~MV_PP_FLAG_EDMA_EN
;
2542 ata_ehi_push_desc(ehi
, "EDMA self-disable");
2544 if (edma_err_cause
& EDMA_ERR_SERR
) {
2545 ata_ehi_push_desc(ehi
, "SError=%08x", serr
);
2546 err_mask
|= AC_ERR_ATA_BUS
;
2547 action
|= ATA_EH_RESET
;
2552 err_mask
= AC_ERR_OTHER
;
2553 action
|= ATA_EH_RESET
;
2556 ehi
->serror
|= serr
;
2557 ehi
->action
|= action
;
2560 qc
->err_mask
|= err_mask
;
2562 ehi
->err_mask
|= err_mask
;
2564 if (err_mask
== AC_ERR_DEV
) {
2566 * Cannot do ata_port_freeze() here,
2567 * because it would kill PIO access,
2568 * which is needed for further diagnosis.
2572 } else if (edma_err_cause
& eh_freeze_mask
) {
2574 * Note to self: ata_port_freeze() calls ata_port_abort()
2576 ata_port_freeze(ap
);
2583 ata_link_abort(qc
->dev
->link
);
2589 static void mv_process_crpb_response(struct ata_port
*ap
,
2590 struct mv_crpb
*response
, unsigned int tag
, int ncq_enabled
)
2592 struct ata_queued_cmd
*qc
= ata_qc_from_tag(ap
, tag
);
2596 u16 edma_status
= le16_to_cpu(response
->flags
);
2598 * edma_status from a response queue entry:
2599 * LSB is from EDMA_ERR_IRQ_CAUSE_OFS (non-NCQ only).
2600 * MSB is saved ATA status from command completion.
2603 u8 err_cause
= edma_status
& 0xff & ~EDMA_ERR_DEV
;
2606 * Error will be seen/handled by mv_err_intr().
2607 * So do nothing at all here.
2612 ata_status
= edma_status
>> CRPB_FLAG_STATUS_SHIFT
;
2613 if (!ac_err_mask(ata_status
))
2614 ata_qc_complete(qc
);
2615 /* else: leave it for mv_err_intr() */
2617 ata_port_printk(ap
, KERN_ERR
, "%s: no qc for tag=%d\n",
2622 static void mv_process_crpb_entries(struct ata_port
*ap
, struct mv_port_priv
*pp
)
2624 void __iomem
*port_mmio
= mv_ap_base(ap
);
2625 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
2627 bool work_done
= false;
2628 int ncq_enabled
= (pp
->pp_flags
& MV_PP_FLAG_NCQ_EN
);
2630 /* Get the hardware queue position index */
2631 in_index
= (readl(port_mmio
+ EDMA_RSP_Q_IN_PTR_OFS
)
2632 >> EDMA_RSP_Q_PTR_SHIFT
) & MV_MAX_Q_DEPTH_MASK
;
2634 /* Process new responses from since the last time we looked */
2635 while (in_index
!= pp
->resp_idx
) {
2637 struct mv_crpb
*response
= &pp
->crpb
[pp
->resp_idx
];
2639 pp
->resp_idx
= (pp
->resp_idx
+ 1) & MV_MAX_Q_DEPTH_MASK
;
2641 if (IS_GEN_I(hpriv
)) {
2642 /* 50xx: no NCQ, only one command active at a time */
2643 tag
= ap
->link
.active_tag
;
2645 /* Gen II/IIE: get command tag from CRPB entry */
2646 tag
= le16_to_cpu(response
->id
) & 0x1f;
2648 mv_process_crpb_response(ap
, response
, tag
, ncq_enabled
);
2652 /* Update the software queue position index in hardware */
2654 writelfl((pp
->crpb_dma
& EDMA_RSP_Q_BASE_LO_MASK
) |
2655 (pp
->resp_idx
<< EDMA_RSP_Q_PTR_SHIFT
),
2656 port_mmio
+ EDMA_RSP_Q_OUT_PTR_OFS
);
2659 static void mv_port_intr(struct ata_port
*ap
, u32 port_cause
)
2661 struct mv_port_priv
*pp
;
2662 int edma_was_enabled
;
2664 if (!ap
|| (ap
->flags
& ATA_FLAG_DISABLED
)) {
2665 mv_unexpected_intr(ap
, 0);
2669 * Grab a snapshot of the EDMA_EN flag setting,
2670 * so that we have a consistent view for this port,
2671 * even if something we call of our routines changes it.
2673 pp
= ap
->private_data
;
2674 edma_was_enabled
= (pp
->pp_flags
& MV_PP_FLAG_EDMA_EN
);
2676 * Process completed CRPB response(s) before other events.
2678 if (edma_was_enabled
&& (port_cause
& DONE_IRQ
)) {
2679 mv_process_crpb_entries(ap
, pp
);
2680 if (pp
->pp_flags
& MV_PP_FLAG_DELAYED_EH
)
2681 mv_handle_fbs_ncq_dev_err(ap
);
2684 * Handle chip-reported errors, or continue on to handle PIO.
2686 if (unlikely(port_cause
& ERR_IRQ
)) {
2688 } else if (!edma_was_enabled
) {
2689 struct ata_queued_cmd
*qc
= mv_get_active_qc(ap
);
2691 ata_sff_host_intr(ap
, qc
);
2693 mv_unexpected_intr(ap
, edma_was_enabled
);
2698 * mv_host_intr - Handle all interrupts on the given host controller
2699 * @host: host specific structure
2700 * @main_irq_cause: Main interrupt cause register for the chip.
2703 * Inherited from caller.
2705 static int mv_host_intr(struct ata_host
*host
, u32 main_irq_cause
)
2707 struct mv_host_priv
*hpriv
= host
->private_data
;
2708 void __iomem
*mmio
= hpriv
->base
, *hc_mmio
;
2709 unsigned int handled
= 0, port
;
2711 /* If asserted, clear the "all ports" IRQ coalescing bit */
2712 if (main_irq_cause
& ALL_PORTS_COAL_DONE
)
2713 writel(~ALL_PORTS_COAL_IRQ
, mmio
+ MV_IRQ_COAL_CAUSE
);
2715 for (port
= 0; port
< hpriv
->n_ports
; port
++) {
2716 struct ata_port
*ap
= host
->ports
[port
];
2717 unsigned int p
, shift
, hardport
, port_cause
;
2719 MV_PORT_TO_SHIFT_AND_HARDPORT(port
, shift
, hardport
);
2721 * Each hc within the host has its own hc_irq_cause register,
2722 * where the interrupting ports bits get ack'd.
2724 if (hardport
== 0) { /* first port on this hc ? */
2725 u32 hc_cause
= (main_irq_cause
>> shift
) & HC0_IRQ_PEND
;
2726 u32 port_mask
, ack_irqs
;
2728 * Skip this entire hc if nothing pending for any ports
2731 port
+= MV_PORTS_PER_HC
- 1;
2735 * We don't need/want to read the hc_irq_cause register,
2736 * because doing so hurts performance, and
2737 * main_irq_cause already gives us everything we need.
2739 * But we do have to *write* to the hc_irq_cause to ack
2740 * the ports that we are handling this time through.
2742 * This requires that we create a bitmap for those
2743 * ports which interrupted us, and use that bitmap
2744 * to ack (only) those ports via hc_irq_cause.
2747 if (hc_cause
& PORTS_0_3_COAL_DONE
)
2748 ack_irqs
= HC_COAL_IRQ
;
2749 for (p
= 0; p
< MV_PORTS_PER_HC
; ++p
) {
2750 if ((port
+ p
) >= hpriv
->n_ports
)
2752 port_mask
= (DONE_IRQ
| ERR_IRQ
) << (p
* 2);
2753 if (hc_cause
& port_mask
)
2754 ack_irqs
|= (DMA_IRQ
| DEV_IRQ
) << p
;
2756 hc_mmio
= mv_hc_base_from_port(mmio
, port
);
2757 writelfl(~ack_irqs
, hc_mmio
+ HC_IRQ_CAUSE_OFS
);
2761 * Handle interrupts signalled for this port:
2763 port_cause
= (main_irq_cause
>> shift
) & (DONE_IRQ
| ERR_IRQ
);
2765 mv_port_intr(ap
, port_cause
);
2770 static int mv_pci_error(struct ata_host
*host
, void __iomem
*mmio
)
2772 struct mv_host_priv
*hpriv
= host
->private_data
;
2773 struct ata_port
*ap
;
2774 struct ata_queued_cmd
*qc
;
2775 struct ata_eh_info
*ehi
;
2776 unsigned int i
, err_mask
, printed
= 0;
2779 err_cause
= readl(mmio
+ hpriv
->irq_cause_ofs
);
2781 dev_printk(KERN_ERR
, host
->dev
, "PCI ERROR; PCI IRQ cause=0x%08x\n",
2784 DPRINTK("All regs @ PCI error\n");
2785 mv_dump_all_regs(mmio
, -1, to_pci_dev(host
->dev
));
2787 writelfl(0, mmio
+ hpriv
->irq_cause_ofs
);
2789 for (i
= 0; i
< host
->n_ports
; i
++) {
2790 ap
= host
->ports
[i
];
2791 if (!ata_link_offline(&ap
->link
)) {
2792 ehi
= &ap
->link
.eh_info
;
2793 ata_ehi_clear_desc(ehi
);
2795 ata_ehi_push_desc(ehi
,
2796 "PCI err cause 0x%08x", err_cause
);
2797 err_mask
= AC_ERR_HOST_BUS
;
2798 ehi
->action
= ATA_EH_RESET
;
2799 qc
= ata_qc_from_tag(ap
, ap
->link
.active_tag
);
2801 qc
->err_mask
|= err_mask
;
2803 ehi
->err_mask
|= err_mask
;
2805 ata_port_freeze(ap
);
2808 return 1; /* handled */
2812 * mv_interrupt - Main interrupt event handler
2814 * @dev_instance: private data; in this case the host structure
2816 * Read the read only register to determine if any host
2817 * controllers have pending interrupts. If so, call lower level
2818 * routine to handle. Also check for PCI errors which are only
2822 * This routine holds the host lock while processing pending
2825 static irqreturn_t
mv_interrupt(int irq
, void *dev_instance
)
2827 struct ata_host
*host
= dev_instance
;
2828 struct mv_host_priv
*hpriv
= host
->private_data
;
2829 unsigned int handled
= 0;
2830 int using_msi
= hpriv
->hp_flags
& MV_HP_FLAG_MSI
;
2831 u32 main_irq_cause
, pending_irqs
;
2833 spin_lock(&host
->lock
);
2835 /* for MSI: block new interrupts while in here */
2837 mv_write_main_irq_mask(0, hpriv
);
2839 main_irq_cause
= readl(hpriv
->main_irq_cause_addr
);
2840 pending_irqs
= main_irq_cause
& hpriv
->main_irq_mask
;
2842 * Deal with cases where we either have nothing pending, or have read
2843 * a bogus register value which can indicate HW removal or PCI fault.
2845 if (pending_irqs
&& main_irq_cause
!= 0xffffffffU
) {
2846 if (unlikely((pending_irqs
& PCI_ERR
) && !IS_SOC(hpriv
)))
2847 handled
= mv_pci_error(host
, hpriv
->base
);
2849 handled
= mv_host_intr(host
, pending_irqs
);
2852 /* for MSI: unmask; interrupt cause bits will retrigger now */
2854 mv_write_main_irq_mask(hpriv
->main_irq_mask
, hpriv
);
2856 spin_unlock(&host
->lock
);
2858 return IRQ_RETVAL(handled
);
2861 static unsigned int mv5_scr_offset(unsigned int sc_reg_in
)
2865 switch (sc_reg_in
) {
2869 ofs
= sc_reg_in
* sizeof(u32
);
2878 static int mv5_scr_read(struct ata_link
*link
, unsigned int sc_reg_in
, u32
*val
)
2880 struct mv_host_priv
*hpriv
= link
->ap
->host
->private_data
;
2881 void __iomem
*mmio
= hpriv
->base
;
2882 void __iomem
*addr
= mv5_phy_base(mmio
, link
->ap
->port_no
);
2883 unsigned int ofs
= mv5_scr_offset(sc_reg_in
);
2885 if (ofs
!= 0xffffffffU
) {
2886 *val
= readl(addr
+ ofs
);
2892 static int mv5_scr_write(struct ata_link
*link
, unsigned int sc_reg_in
, u32 val
)
2894 struct mv_host_priv
*hpriv
= link
->ap
->host
->private_data
;
2895 void __iomem
*mmio
= hpriv
->base
;
2896 void __iomem
*addr
= mv5_phy_base(mmio
, link
->ap
->port_no
);
2897 unsigned int ofs
= mv5_scr_offset(sc_reg_in
);
2899 if (ofs
!= 0xffffffffU
) {
2900 writelfl(val
, addr
+ ofs
);
2906 static void mv5_reset_bus(struct ata_host
*host
, void __iomem
*mmio
)
2908 struct pci_dev
*pdev
= to_pci_dev(host
->dev
);
2911 early_5080
= (pdev
->device
== 0x5080) && (pdev
->revision
== 0);
2914 u32 tmp
= readl(mmio
+ MV_PCI_EXP_ROM_BAR_CTL
);
2916 writel(tmp
, mmio
+ MV_PCI_EXP_ROM_BAR_CTL
);
2919 mv_reset_pci_bus(host
, mmio
);
2922 static void mv5_reset_flash(struct mv_host_priv
*hpriv
, void __iomem
*mmio
)
2924 writel(0x0fcfffff, mmio
+ MV_FLASH_CTL_OFS
);
2927 static void mv5_read_preamp(struct mv_host_priv
*hpriv
, int idx
,
2930 void __iomem
*phy_mmio
= mv5_phy_base(mmio
, idx
);
2933 tmp
= readl(phy_mmio
+ MV5_PHY_MODE
);
2935 hpriv
->signal
[idx
].pre
= tmp
& 0x1800; /* bits 12:11 */
2936 hpriv
->signal
[idx
].amps
= tmp
& 0xe0; /* bits 7:5 */
2939 static void mv5_enable_leds(struct mv_host_priv
*hpriv
, void __iomem
*mmio
)
2943 writel(0, mmio
+ MV_GPIO_PORT_CTL_OFS
);
2945 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
2947 tmp
= readl(mmio
+ MV_PCI_EXP_ROM_BAR_CTL
);
2949 writel(tmp
, mmio
+ MV_PCI_EXP_ROM_BAR_CTL
);
2952 static void mv5_phy_errata(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
2955 void __iomem
*phy_mmio
= mv5_phy_base(mmio
, port
);
2956 const u32 mask
= (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
2958 int fix_apm_sq
= (hpriv
->hp_flags
& MV_HP_ERRATA_50XXB0
);
2961 tmp
= readl(phy_mmio
+ MV5_LTMODE_OFS
);
2963 writel(tmp
, phy_mmio
+ MV5_LTMODE_OFS
);
2965 tmp
= readl(phy_mmio
+ MV5_PHY_CTL_OFS
);
2968 writel(tmp
, phy_mmio
+ MV5_PHY_CTL_OFS
);
2971 tmp
= readl(phy_mmio
+ MV5_PHY_MODE
);
2973 tmp
|= hpriv
->signal
[port
].pre
;
2974 tmp
|= hpriv
->signal
[port
].amps
;
2975 writel(tmp
, phy_mmio
+ MV5_PHY_MODE
);
2980 #define ZERO(reg) writel(0, port_mmio + (reg))
2981 static void mv5_reset_hc_port(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
2984 void __iomem
*port_mmio
= mv_port_base(mmio
, port
);
2986 mv_reset_channel(hpriv
, mmio
, port
);
2988 ZERO(0x028); /* command */
2989 writel(0x11f, port_mmio
+ EDMA_CFG_OFS
);
2990 ZERO(0x004); /* timer */
2991 ZERO(0x008); /* irq err cause */
2992 ZERO(0x00c); /* irq err mask */
2993 ZERO(0x010); /* rq bah */
2994 ZERO(0x014); /* rq inp */
2995 ZERO(0x018); /* rq outp */
2996 ZERO(0x01c); /* respq bah */
2997 ZERO(0x024); /* respq outp */
2998 ZERO(0x020); /* respq inp */
2999 ZERO(0x02c); /* test control */
3000 writel(0xbc, port_mmio
+ EDMA_IORDY_TMOUT_OFS
);
3004 #define ZERO(reg) writel(0, hc_mmio + (reg))
3005 static void mv5_reset_one_hc(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
3008 void __iomem
*hc_mmio
= mv_hc_base(mmio
, hc
);
3016 tmp
= readl(hc_mmio
+ 0x20);
3019 writel(tmp
, hc_mmio
+ 0x20);
3023 static int mv5_reset_hc(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
3026 unsigned int hc
, port
;
3028 for (hc
= 0; hc
< n_hc
; hc
++) {
3029 for (port
= 0; port
< MV_PORTS_PER_HC
; port
++)
3030 mv5_reset_hc_port(hpriv
, mmio
,
3031 (hc
* MV_PORTS_PER_HC
) + port
);
3033 mv5_reset_one_hc(hpriv
, mmio
, hc
);
3040 #define ZERO(reg) writel(0, mmio + (reg))
3041 static void mv_reset_pci_bus(struct ata_host
*host
, void __iomem
*mmio
)
3043 struct mv_host_priv
*hpriv
= host
->private_data
;
3046 tmp
= readl(mmio
+ MV_PCI_MODE_OFS
);
3048 writel(tmp
, mmio
+ MV_PCI_MODE_OFS
);
3050 ZERO(MV_PCI_DISC_TIMER
);
3051 ZERO(MV_PCI_MSI_TRIGGER
);
3052 writel(0x000100ff, mmio
+ MV_PCI_XBAR_TMOUT_OFS
);
3053 ZERO(MV_PCI_SERR_MASK
);
3054 ZERO(hpriv
->irq_cause_ofs
);
3055 ZERO(hpriv
->irq_mask_ofs
);
3056 ZERO(MV_PCI_ERR_LOW_ADDRESS
);
3057 ZERO(MV_PCI_ERR_HIGH_ADDRESS
);
3058 ZERO(MV_PCI_ERR_ATTRIBUTE
);
3059 ZERO(MV_PCI_ERR_COMMAND
);
3063 static void mv6_reset_flash(struct mv_host_priv
*hpriv
, void __iomem
*mmio
)
3067 mv5_reset_flash(hpriv
, mmio
);
3069 tmp
= readl(mmio
+ MV_GPIO_PORT_CTL_OFS
);
3071 tmp
|= (1 << 5) | (1 << 6);
3072 writel(tmp
, mmio
+ MV_GPIO_PORT_CTL_OFS
);
3076 * mv6_reset_hc - Perform the 6xxx global soft reset
3077 * @mmio: base address of the HBA
3079 * This routine only applies to 6xxx parts.
3082 * Inherited from caller.
3084 static int mv6_reset_hc(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
3087 void __iomem
*reg
= mmio
+ PCI_MAIN_CMD_STS_OFS
;
3091 /* Following procedure defined in PCI "main command and status
3095 writel(t
| STOP_PCI_MASTER
, reg
);
3097 for (i
= 0; i
< 1000; i
++) {
3100 if (PCI_MASTER_EMPTY
& t
)
3103 if (!(PCI_MASTER_EMPTY
& t
)) {
3104 printk(KERN_ERR DRV_NAME
": PCI master won't flush\n");
3112 writel(t
| GLOB_SFT_RST
, reg
);
3115 } while (!(GLOB_SFT_RST
& t
) && (i
-- > 0));
3117 if (!(GLOB_SFT_RST
& t
)) {
3118 printk(KERN_ERR DRV_NAME
": can't set global reset\n");
3123 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
3126 writel(t
& ~(GLOB_SFT_RST
| STOP_PCI_MASTER
), reg
);
3129 } while ((GLOB_SFT_RST
& t
) && (i
-- > 0));
3131 if (GLOB_SFT_RST
& t
) {
3132 printk(KERN_ERR DRV_NAME
": can't clear global reset\n");
3139 static void mv6_read_preamp(struct mv_host_priv
*hpriv
, int idx
,
3142 void __iomem
*port_mmio
;
3145 tmp
= readl(mmio
+ MV_RESET_CFG_OFS
);
3146 if ((tmp
& (1 << 0)) == 0) {
3147 hpriv
->signal
[idx
].amps
= 0x7 << 8;
3148 hpriv
->signal
[idx
].pre
= 0x1 << 5;
3152 port_mmio
= mv_port_base(mmio
, idx
);
3153 tmp
= readl(port_mmio
+ PHY_MODE2
);
3155 hpriv
->signal
[idx
].amps
= tmp
& 0x700; /* bits 10:8 */
3156 hpriv
->signal
[idx
].pre
= tmp
& 0xe0; /* bits 7:5 */
3159 static void mv6_enable_leds(struct mv_host_priv
*hpriv
, void __iomem
*mmio
)
3161 writel(0x00000060, mmio
+ MV_GPIO_PORT_CTL_OFS
);
3164 static void mv6_phy_errata(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
3167 void __iomem
*port_mmio
= mv_port_base(mmio
, port
);
3169 u32 hp_flags
= hpriv
->hp_flags
;
3171 hp_flags
& (MV_HP_ERRATA_60X1B2
| MV_HP_ERRATA_60X1C0
);
3173 hp_flags
& (MV_HP_ERRATA_60X1B2
| MV_HP_ERRATA_60X1C0
);
3176 if (fix_phy_mode2
) {
3177 m2
= readl(port_mmio
+ PHY_MODE2
);
3180 writel(m2
, port_mmio
+ PHY_MODE2
);
3184 m2
= readl(port_mmio
+ PHY_MODE2
);
3185 m2
&= ~((1 << 16) | (1 << 31));
3186 writel(m2
, port_mmio
+ PHY_MODE2
);
3192 * Gen-II/IIe PHY_MODE3 errata RM#2:
3193 * Achieves better receiver noise performance than the h/w default:
3195 m3
= readl(port_mmio
+ PHY_MODE3
);
3196 m3
= (m3
& 0x1f) | (0x5555601 << 5);
3198 /* Guideline 88F5182 (GL# SATA-S11) */
3202 if (fix_phy_mode4
) {
3203 u32 m4
= readl(port_mmio
+ PHY_MODE4
);
3205 * Enforce reserved-bit restrictions on GenIIe devices only.
3206 * For earlier chipsets, force only the internal config field
3207 * (workaround for errata FEr SATA#10 part 1).
3209 if (IS_GEN_IIE(hpriv
))
3210 m4
= (m4
& ~PHY_MODE4_RSVD_ZEROS
) | PHY_MODE4_RSVD_ONES
;
3212 m4
= (m4
& ~PHY_MODE4_CFG_MASK
) | PHY_MODE4_CFG_VALUE
;
3213 writel(m4
, port_mmio
+ PHY_MODE4
);
3216 * Workaround for 60x1-B2 errata SATA#13:
3217 * Any write to PHY_MODE4 (above) may corrupt PHY_MODE3,
3218 * so we must always rewrite PHY_MODE3 after PHY_MODE4.
3220 writel(m3
, port_mmio
+ PHY_MODE3
);
3222 /* Revert values of pre-emphasis and signal amps to the saved ones */
3223 m2
= readl(port_mmio
+ PHY_MODE2
);
3225 m2
&= ~MV_M2_PREAMP_MASK
;
3226 m2
|= hpriv
->signal
[port
].amps
;
3227 m2
|= hpriv
->signal
[port
].pre
;
3230 /* according to mvSata 3.6.1, some IIE values are fixed */
3231 if (IS_GEN_IIE(hpriv
)) {
3236 writel(m2
, port_mmio
+ PHY_MODE2
);
3239 /* TODO: use the generic LED interface to configure the SATA Presence */
3240 /* & Acitivy LEDs on the board */
3241 static void mv_soc_enable_leds(struct mv_host_priv
*hpriv
,
3247 static void mv_soc_read_preamp(struct mv_host_priv
*hpriv
, int idx
,
3250 void __iomem
*port_mmio
;
3253 port_mmio
= mv_port_base(mmio
, idx
);
3254 tmp
= readl(port_mmio
+ PHY_MODE2
);
3256 hpriv
->signal
[idx
].amps
= tmp
& 0x700; /* bits 10:8 */
3257 hpriv
->signal
[idx
].pre
= tmp
& 0xe0; /* bits 7:5 */
3261 #define ZERO(reg) writel(0, port_mmio + (reg))
3262 static void mv_soc_reset_hc_port(struct mv_host_priv
*hpriv
,
3263 void __iomem
*mmio
, unsigned int port
)
3265 void __iomem
*port_mmio
= mv_port_base(mmio
, port
);
3267 mv_reset_channel(hpriv
, mmio
, port
);
3269 ZERO(0x028); /* command */
3270 writel(0x101f, port_mmio
+ EDMA_CFG_OFS
);
3271 ZERO(0x004); /* timer */
3272 ZERO(0x008); /* irq err cause */
3273 ZERO(0x00c); /* irq err mask */
3274 ZERO(0x010); /* rq bah */
3275 ZERO(0x014); /* rq inp */
3276 ZERO(0x018); /* rq outp */
3277 ZERO(0x01c); /* respq bah */
3278 ZERO(0x024); /* respq outp */
3279 ZERO(0x020); /* respq inp */
3280 ZERO(0x02c); /* test control */
3281 writel(0xbc, port_mmio
+ EDMA_IORDY_TMOUT_OFS
);
3286 #define ZERO(reg) writel(0, hc_mmio + (reg))
3287 static void mv_soc_reset_one_hc(struct mv_host_priv
*hpriv
,
3290 void __iomem
*hc_mmio
= mv_hc_base(mmio
, 0);
3300 static int mv_soc_reset_hc(struct mv_host_priv
*hpriv
,
3301 void __iomem
*mmio
, unsigned int n_hc
)
3305 for (port
= 0; port
< hpriv
->n_ports
; port
++)
3306 mv_soc_reset_hc_port(hpriv
, mmio
, port
);
3308 mv_soc_reset_one_hc(hpriv
, mmio
);
3313 static void mv_soc_reset_flash(struct mv_host_priv
*hpriv
,
3319 static void mv_soc_reset_bus(struct ata_host
*host
, void __iomem
*mmio
)
3324 static void mv_setup_ifcfg(void __iomem
*port_mmio
, int want_gen2i
)
3326 u32 ifcfg
= readl(port_mmio
+ SATA_INTERFACE_CFG_OFS
);
3328 ifcfg
= (ifcfg
& 0xf7f) | 0x9b1000; /* from chip spec */
3330 ifcfg
|= (1 << 7); /* enable gen2i speed */
3331 writelfl(ifcfg
, port_mmio
+ SATA_INTERFACE_CFG_OFS
);
3334 static void mv_reset_channel(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
3335 unsigned int port_no
)
3337 void __iomem
*port_mmio
= mv_port_base(mmio
, port_no
);
3340 * The datasheet warns against setting EDMA_RESET when EDMA is active
3341 * (but doesn't say what the problem might be). So we first try
3342 * to disable the EDMA engine before doing the EDMA_RESET operation.
3344 mv_stop_edma_engine(port_mmio
);
3345 writelfl(EDMA_RESET
, port_mmio
+ EDMA_CMD_OFS
);
3347 if (!IS_GEN_I(hpriv
)) {
3348 /* Enable 3.0gb/s link speed: this survives EDMA_RESET */
3349 mv_setup_ifcfg(port_mmio
, 1);
3352 * Strobing EDMA_RESET here causes a hard reset of the SATA transport,
3353 * link, and physical layers. It resets all SATA interface registers
3354 * (except for SATA_INTERFACE_CFG), and issues a COMRESET to the dev.
3356 writelfl(EDMA_RESET
, port_mmio
+ EDMA_CMD_OFS
);
3357 udelay(25); /* allow reset propagation */
3358 writelfl(0, port_mmio
+ EDMA_CMD_OFS
);
3360 hpriv
->ops
->phy_errata(hpriv
, mmio
, port_no
);
3362 if (IS_GEN_I(hpriv
))
3366 static void mv_pmp_select(struct ata_port
*ap
, int pmp
)
3368 if (sata_pmp_supported(ap
)) {
3369 void __iomem
*port_mmio
= mv_ap_base(ap
);
3370 u32 reg
= readl(port_mmio
+ SATA_IFCTL_OFS
);
3371 int old
= reg
& 0xf;
3374 reg
= (reg
& ~0xf) | pmp
;
3375 writelfl(reg
, port_mmio
+ SATA_IFCTL_OFS
);
3380 static int mv_pmp_hardreset(struct ata_link
*link
, unsigned int *class,
3381 unsigned long deadline
)
3383 mv_pmp_select(link
->ap
, sata_srst_pmp(link
));
3384 return sata_std_hardreset(link
, class, deadline
);
3387 static int mv_softreset(struct ata_link
*link
, unsigned int *class,
3388 unsigned long deadline
)
3390 mv_pmp_select(link
->ap
, sata_srst_pmp(link
));
3391 return ata_sff_softreset(link
, class, deadline
);
3394 static int mv_hardreset(struct ata_link
*link
, unsigned int *class,
3395 unsigned long deadline
)
3397 struct ata_port
*ap
= link
->ap
;
3398 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
3399 struct mv_port_priv
*pp
= ap
->private_data
;
3400 void __iomem
*mmio
= hpriv
->base
;
3401 int rc
, attempts
= 0, extra
= 0;
3405 mv_reset_channel(hpriv
, mmio
, ap
->port_no
);
3406 pp
->pp_flags
&= ~MV_PP_FLAG_EDMA_EN
;
3408 ~(MV_PP_FLAG_FBS_EN
| MV_PP_FLAG_NCQ_EN
| MV_PP_FLAG_FAKE_ATA_BUSY
);
3410 /* Workaround for errata FEr SATA#10 (part 2) */
3412 const unsigned long *timing
=
3413 sata_ehc_deb_timing(&link
->eh_context
);
3415 rc
= sata_link_hardreset(link
, timing
, deadline
+ extra
,
3417 rc
= online
? -EAGAIN
: rc
;
3420 sata_scr_read(link
, SCR_STATUS
, &sstatus
);
3421 if (!IS_GEN_I(hpriv
) && ++attempts
>= 5 && sstatus
== 0x121) {
3422 /* Force 1.5gb/s link speed and try again */
3423 mv_setup_ifcfg(mv_ap_base(ap
), 0);
3424 if (time_after(jiffies
+ HZ
, deadline
))
3425 extra
= HZ
; /* only extend it once, max */
3427 } while (sstatus
!= 0x0 && sstatus
!= 0x113 && sstatus
!= 0x123);
3428 mv_save_cached_regs(ap
);
3429 mv_edma_cfg(ap
, 0, 0);
3434 static void mv_eh_freeze(struct ata_port
*ap
)
3437 mv_enable_port_irqs(ap
, 0);
3440 static void mv_eh_thaw(struct ata_port
*ap
)
3442 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
3443 unsigned int port
= ap
->port_no
;
3444 unsigned int hardport
= mv_hardport_from_port(port
);
3445 void __iomem
*hc_mmio
= mv_hc_base_from_port(hpriv
->base
, port
);
3446 void __iomem
*port_mmio
= mv_ap_base(ap
);
3449 /* clear EDMA errors on this port */
3450 writel(0, port_mmio
+ EDMA_ERR_IRQ_CAUSE_OFS
);
3452 /* clear pending irq events */
3453 hc_irq_cause
= ~((DEV_IRQ
| DMA_IRQ
) << hardport
);
3454 writelfl(hc_irq_cause
, hc_mmio
+ HC_IRQ_CAUSE_OFS
);
3456 mv_enable_port_irqs(ap
, ERR_IRQ
);
3460 * mv_port_init - Perform some early initialization on a single port.
3461 * @port: libata data structure storing shadow register addresses
3462 * @port_mmio: base address of the port
3464 * Initialize shadow register mmio addresses, clear outstanding
3465 * interrupts on the port, and unmask interrupts for the future
3466 * start of the port.
3469 * Inherited from caller.
3471 static void mv_port_init(struct ata_ioports
*port
, void __iomem
*port_mmio
)
3473 void __iomem
*shd_base
= port_mmio
+ SHD_BLK_OFS
;
3476 /* PIO related setup
3478 port
->data_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_DATA
);
3480 port
->feature_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_ERR
);
3481 port
->nsect_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_NSECT
);
3482 port
->lbal_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_LBAL
);
3483 port
->lbam_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_LBAM
);
3484 port
->lbah_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_LBAH
);
3485 port
->device_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_DEVICE
);
3487 port
->command_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_STATUS
);
3488 /* special case: control/altstatus doesn't have ATA_REG_ address */
3489 port
->altstatus_addr
= port
->ctl_addr
= shd_base
+ SHD_CTL_AST_OFS
;
3492 port
->cmd_addr
= port
->bmdma_addr
= port
->scr_addr
= NULL
;
3494 /* Clear any currently outstanding port interrupt conditions */
3495 serr_ofs
= mv_scr_offset(SCR_ERROR
);
3496 writelfl(readl(port_mmio
+ serr_ofs
), port_mmio
+ serr_ofs
);
3497 writelfl(0, port_mmio
+ EDMA_ERR_IRQ_CAUSE_OFS
);
3499 /* unmask all non-transient EDMA error interrupts */
3500 writelfl(~EDMA_ERR_IRQ_TRANSIENT
, port_mmio
+ EDMA_ERR_IRQ_MASK_OFS
);
3502 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
3503 readl(port_mmio
+ EDMA_CFG_OFS
),
3504 readl(port_mmio
+ EDMA_ERR_IRQ_CAUSE_OFS
),
3505 readl(port_mmio
+ EDMA_ERR_IRQ_MASK_OFS
));
3508 static unsigned int mv_in_pcix_mode(struct ata_host
*host
)
3510 struct mv_host_priv
*hpriv
= host
->private_data
;
3511 void __iomem
*mmio
= hpriv
->base
;
3514 if (IS_SOC(hpriv
) || !IS_PCIE(hpriv
))
3515 return 0; /* not PCI-X capable */
3516 reg
= readl(mmio
+ MV_PCI_MODE_OFS
);
3517 if ((reg
& MV_PCI_MODE_MASK
) == 0)
3518 return 0; /* conventional PCI mode */
3519 return 1; /* chip is in PCI-X mode */
3522 static int mv_pci_cut_through_okay(struct ata_host
*host
)
3524 struct mv_host_priv
*hpriv
= host
->private_data
;
3525 void __iomem
*mmio
= hpriv
->base
;
3528 if (!mv_in_pcix_mode(host
)) {
3529 reg
= readl(mmio
+ PCI_COMMAND_OFS
);
3530 if (reg
& PCI_COMMAND_MRDTRIG
)
3531 return 0; /* not okay */
3533 return 1; /* okay */
3536 static void mv_60x1b2_errata_pci7(struct ata_host
*host
)
3538 struct mv_host_priv
*hpriv
= host
->private_data
;
3539 void __iomem
*mmio
= hpriv
->base
;
3541 /* workaround for 60x1-B2 errata PCI#7 */
3542 if (mv_in_pcix_mode(host
)) {
3543 u32 reg
= readl(mmio
+ PCI_COMMAND_OFS
);
3544 writelfl(reg
& ~PCI_COMMAND_MWRCOM
, mmio
+ PCI_COMMAND_OFS
);
3548 static int mv_chip_id(struct ata_host
*host
, unsigned int board_idx
)
3550 struct pci_dev
*pdev
= to_pci_dev(host
->dev
);
3551 struct mv_host_priv
*hpriv
= host
->private_data
;
3552 u32 hp_flags
= hpriv
->hp_flags
;
3554 switch (board_idx
) {
3556 hpriv
->ops
= &mv5xxx_ops
;
3557 hp_flags
|= MV_HP_GEN_I
;
3559 switch (pdev
->revision
) {
3561 hp_flags
|= MV_HP_ERRATA_50XXB0
;
3564 hp_flags
|= MV_HP_ERRATA_50XXB2
;
3567 dev_printk(KERN_WARNING
, &pdev
->dev
,
3568 "Applying 50XXB2 workarounds to unknown rev\n");
3569 hp_flags
|= MV_HP_ERRATA_50XXB2
;
3576 hpriv
->ops
= &mv5xxx_ops
;
3577 hp_flags
|= MV_HP_GEN_I
;
3579 switch (pdev
->revision
) {
3581 hp_flags
|= MV_HP_ERRATA_50XXB0
;
3584 hp_flags
|= MV_HP_ERRATA_50XXB2
;
3587 dev_printk(KERN_WARNING
, &pdev
->dev
,
3588 "Applying B2 workarounds to unknown rev\n");
3589 hp_flags
|= MV_HP_ERRATA_50XXB2
;
3596 hpriv
->ops
= &mv6xxx_ops
;
3597 hp_flags
|= MV_HP_GEN_II
;
3599 switch (pdev
->revision
) {
3601 mv_60x1b2_errata_pci7(host
);
3602 hp_flags
|= MV_HP_ERRATA_60X1B2
;
3605 hp_flags
|= MV_HP_ERRATA_60X1C0
;
3608 dev_printk(KERN_WARNING
, &pdev
->dev
,
3609 "Applying B2 workarounds to unknown rev\n");
3610 hp_flags
|= MV_HP_ERRATA_60X1B2
;
3616 hp_flags
|= MV_HP_PCIE
| MV_HP_CUT_THROUGH
;
3617 if (pdev
->vendor
== PCI_VENDOR_ID_TTI
&&
3618 (pdev
->device
== 0x2300 || pdev
->device
== 0x2310))
3621 * Highpoint RocketRAID PCIe 23xx series cards:
3623 * Unconfigured drives are treated as "Legacy"
3624 * by the BIOS, and it overwrites sector 8 with
3625 * a "Lgcy" metadata block prior to Linux boot.
3627 * Configured drives (RAID or JBOD) leave sector 8
3628 * alone, but instead overwrite a high numbered
3629 * sector for the RAID metadata. This sector can
3630 * be determined exactly, by truncating the physical
3631 * drive capacity to a nice even GB value.
3633 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
3635 * Warn the user, lest they think we're just buggy.
3637 printk(KERN_WARNING DRV_NAME
": Highpoint RocketRAID"
3638 " BIOS CORRUPTS DATA on all attached drives,"
3639 " regardless of if/how they are configured."
3641 printk(KERN_WARNING DRV_NAME
": For data safety, do not"
3642 " use sectors 8-9 on \"Legacy\" drives,"
3643 " and avoid the final two gigabytes on"
3644 " all RocketRAID BIOS initialized drives.\n");
3648 hpriv
->ops
= &mv6xxx_ops
;
3649 hp_flags
|= MV_HP_GEN_IIE
;
3650 if (board_idx
== chip_6042
&& mv_pci_cut_through_okay(host
))
3651 hp_flags
|= MV_HP_CUT_THROUGH
;
3653 switch (pdev
->revision
) {
3654 case 0x2: /* Rev.B0: the first/only public release */
3655 hp_flags
|= MV_HP_ERRATA_60X1C0
;
3658 dev_printk(KERN_WARNING
, &pdev
->dev
,
3659 "Applying 60X1C0 workarounds to unknown rev\n");
3660 hp_flags
|= MV_HP_ERRATA_60X1C0
;
3665 hpriv
->ops
= &mv_soc_ops
;
3666 hp_flags
|= MV_HP_FLAG_SOC
| MV_HP_GEN_IIE
|
3667 MV_HP_ERRATA_60X1C0
;
3671 dev_printk(KERN_ERR
, host
->dev
,
3672 "BUG: invalid board index %u\n", board_idx
);
3676 hpriv
->hp_flags
= hp_flags
;
3677 if (hp_flags
& MV_HP_PCIE
) {
3678 hpriv
->irq_cause_ofs
= PCIE_IRQ_CAUSE_OFS
;
3679 hpriv
->irq_mask_ofs
= PCIE_IRQ_MASK_OFS
;
3680 hpriv
->unmask_all_irqs
= PCIE_UNMASK_ALL_IRQS
;
3682 hpriv
->irq_cause_ofs
= PCI_IRQ_CAUSE_OFS
;
3683 hpriv
->irq_mask_ofs
= PCI_IRQ_MASK_OFS
;
3684 hpriv
->unmask_all_irqs
= PCI_UNMASK_ALL_IRQS
;
3691 * mv_init_host - Perform some early initialization of the host.
3692 * @host: ATA host to initialize
3693 * @board_idx: controller index
3695 * If possible, do an early global reset of the host. Then do
3696 * our port init and clear/unmask all/relevant host interrupts.
3699 * Inherited from caller.
3701 static int mv_init_host(struct ata_host
*host
, unsigned int board_idx
)
3703 int rc
= 0, n_hc
, port
, hc
;
3704 struct mv_host_priv
*hpriv
= host
->private_data
;
3705 void __iomem
*mmio
= hpriv
->base
;
3707 rc
= mv_chip_id(host
, board_idx
);
3711 if (IS_SOC(hpriv
)) {
3712 hpriv
->main_irq_cause_addr
= mmio
+ SOC_HC_MAIN_IRQ_CAUSE_OFS
;
3713 hpriv
->main_irq_mask_addr
= mmio
+ SOC_HC_MAIN_IRQ_MASK_OFS
;
3715 hpriv
->main_irq_cause_addr
= mmio
+ PCI_HC_MAIN_IRQ_CAUSE_OFS
;
3716 hpriv
->main_irq_mask_addr
= mmio
+ PCI_HC_MAIN_IRQ_MASK_OFS
;
3719 /* initialize shadow irq mask with register's value */
3720 hpriv
->main_irq_mask
= readl(hpriv
->main_irq_mask_addr
);
3722 /* global interrupt mask: 0 == mask everything */
3723 mv_set_main_irq_mask(host
, ~0, 0);
3725 n_hc
= mv_get_hc_count(host
->ports
[0]->flags
);
3727 for (port
= 0; port
< host
->n_ports
; port
++)
3728 hpriv
->ops
->read_preamp(hpriv
, port
, mmio
);
3730 rc
= hpriv
->ops
->reset_hc(hpriv
, mmio
, n_hc
);
3734 hpriv
->ops
->reset_flash(hpriv
, mmio
);
3735 hpriv
->ops
->reset_bus(host
, mmio
);
3736 hpriv
->ops
->enable_leds(hpriv
, mmio
);
3738 for (port
= 0; port
< host
->n_ports
; port
++) {
3739 struct ata_port
*ap
= host
->ports
[port
];
3740 void __iomem
*port_mmio
= mv_port_base(mmio
, port
);
3742 mv_port_init(&ap
->ioaddr
, port_mmio
);
3745 if (!IS_SOC(hpriv
)) {
3746 unsigned int offset
= port_mmio
- mmio
;
3747 ata_port_pbar_desc(ap
, MV_PRIMARY_BAR
, -1, "mmio");
3748 ata_port_pbar_desc(ap
, MV_PRIMARY_BAR
, offset
, "port");
3753 for (hc
= 0; hc
< n_hc
; hc
++) {
3754 void __iomem
*hc_mmio
= mv_hc_base(mmio
, hc
);
3756 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
3757 "(before clear)=0x%08x\n", hc
,
3758 readl(hc_mmio
+ HC_CFG_OFS
),
3759 readl(hc_mmio
+ HC_IRQ_CAUSE_OFS
));
3761 /* Clear any currently outstanding hc interrupt conditions */
3762 writelfl(0, hc_mmio
+ HC_IRQ_CAUSE_OFS
);
3765 if (!IS_SOC(hpriv
)) {
3766 /* Clear any currently outstanding host interrupt conditions */
3767 writelfl(0, mmio
+ hpriv
->irq_cause_ofs
);
3769 /* and unmask interrupt generation for host regs */
3770 writelfl(hpriv
->unmask_all_irqs
, mmio
+ hpriv
->irq_mask_ofs
);
3774 * enable only global host interrupts for now.
3775 * The per-port interrupts get done later as ports are set up.
3777 mv_set_main_irq_mask(host
, 0, PCI_ERR
);
3778 mv_set_irq_coalescing(host
, irq_coalescing_io_count
,
3779 irq_coalescing_usecs
);
3784 static int mv_create_dma_pools(struct mv_host_priv
*hpriv
, struct device
*dev
)
3786 hpriv
->crqb_pool
= dmam_pool_create("crqb_q", dev
, MV_CRQB_Q_SZ
,
3788 if (!hpriv
->crqb_pool
)
3791 hpriv
->crpb_pool
= dmam_pool_create("crpb_q", dev
, MV_CRPB_Q_SZ
,
3793 if (!hpriv
->crpb_pool
)
3796 hpriv
->sg_tbl_pool
= dmam_pool_create("sg_tbl", dev
, MV_SG_TBL_SZ
,
3798 if (!hpriv
->sg_tbl_pool
)
3804 static void mv_conf_mbus_windows(struct mv_host_priv
*hpriv
,
3805 struct mbus_dram_target_info
*dram
)
3809 for (i
= 0; i
< 4; i
++) {
3810 writel(0, hpriv
->base
+ WINDOW_CTRL(i
));
3811 writel(0, hpriv
->base
+ WINDOW_BASE(i
));
3814 for (i
= 0; i
< dram
->num_cs
; i
++) {
3815 struct mbus_dram_window
*cs
= dram
->cs
+ i
;
3817 writel(((cs
->size
- 1) & 0xffff0000) |
3818 (cs
->mbus_attr
<< 8) |
3819 (dram
->mbus_dram_target_id
<< 4) | 1,
3820 hpriv
->base
+ WINDOW_CTRL(i
));
3821 writel(cs
->base
, hpriv
->base
+ WINDOW_BASE(i
));
3826 * mv_platform_probe - handle a positive probe of an soc Marvell
3828 * @pdev: platform device found
3831 * Inherited from caller.
3833 static int mv_platform_probe(struct platform_device
*pdev
)
3835 static int printed_version
;
3836 const struct mv_sata_platform_data
*mv_platform_data
;
3837 const struct ata_port_info
*ppi
[] =
3838 { &mv_port_info
[chip_soc
], NULL
};
3839 struct ata_host
*host
;
3840 struct mv_host_priv
*hpriv
;
3841 struct resource
*res
;
3844 if (!printed_version
++)
3845 dev_printk(KERN_INFO
, &pdev
->dev
, "version " DRV_VERSION
"\n");
3848 * Simple resource validation ..
3850 if (unlikely(pdev
->num_resources
!= 2)) {
3851 dev_err(&pdev
->dev
, "invalid number of resources\n");
3856 * Get the register base first
3858 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
3863 mv_platform_data
= pdev
->dev
.platform_data
;
3864 n_ports
= mv_platform_data
->n_ports
;
3866 host
= ata_host_alloc_pinfo(&pdev
->dev
, ppi
, n_ports
);
3867 hpriv
= devm_kzalloc(&pdev
->dev
, sizeof(*hpriv
), GFP_KERNEL
);
3869 if (!host
|| !hpriv
)
3871 host
->private_data
= hpriv
;
3872 hpriv
->n_ports
= n_ports
;
3875 hpriv
->base
= devm_ioremap(&pdev
->dev
, res
->start
,
3876 res
->end
- res
->start
+ 1);
3877 hpriv
->base
-= MV_SATAHC0_REG_BASE
;
3880 * (Re-)program MBUS remapping windows if we are asked to.
3882 if (mv_platform_data
->dram
!= NULL
)
3883 mv_conf_mbus_windows(hpriv
, mv_platform_data
->dram
);
3885 rc
= mv_create_dma_pools(hpriv
, &pdev
->dev
);
3889 /* initialize adapter */
3890 rc
= mv_init_host(host
, chip_soc
);
3894 dev_printk(KERN_INFO
, &pdev
->dev
,
3895 "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH
,
3898 return ata_host_activate(host
, platform_get_irq(pdev
, 0), mv_interrupt
,
3899 IRQF_SHARED
, &mv6_sht
);
3904 * mv_platform_remove - unplug a platform interface
3905 * @pdev: platform device
3907 * A platform bus SATA device has been unplugged. Perform the needed
3908 * cleanup. Also called on module unload for any active devices.
3910 static int __devexit
mv_platform_remove(struct platform_device
*pdev
)
3912 struct device
*dev
= &pdev
->dev
;
3913 struct ata_host
*host
= dev_get_drvdata(dev
);
3915 ata_host_detach(host
);
3919 static struct platform_driver mv_platform_driver
= {
3920 .probe
= mv_platform_probe
,
3921 .remove
= __devexit_p(mv_platform_remove
),
3924 .owner
= THIS_MODULE
,
3930 static int mv_pci_init_one(struct pci_dev
*pdev
,
3931 const struct pci_device_id
*ent
);
3934 static struct pci_driver mv_pci_driver
= {
3936 .id_table
= mv_pci_tbl
,
3937 .probe
= mv_pci_init_one
,
3938 .remove
= ata_pci_remove_one
,
3941 /* move to PCI layer or libata core? */
3942 static int pci_go_64(struct pci_dev
*pdev
)
3946 if (!pci_set_dma_mask(pdev
, DMA_64BIT_MASK
)) {
3947 rc
= pci_set_consistent_dma_mask(pdev
, DMA_64BIT_MASK
);
3949 rc
= pci_set_consistent_dma_mask(pdev
, DMA_32BIT_MASK
);
3951 dev_printk(KERN_ERR
, &pdev
->dev
,
3952 "64-bit DMA enable failed\n");
3957 rc
= pci_set_dma_mask(pdev
, DMA_32BIT_MASK
);
3959 dev_printk(KERN_ERR
, &pdev
->dev
,
3960 "32-bit DMA enable failed\n");
3963 rc
= pci_set_consistent_dma_mask(pdev
, DMA_32BIT_MASK
);
3965 dev_printk(KERN_ERR
, &pdev
->dev
,
3966 "32-bit consistent DMA enable failed\n");
3975 * mv_print_info - Dump key info to kernel log for perusal.
3976 * @host: ATA host to print info about
3978 * FIXME: complete this.
3981 * Inherited from caller.
3983 static void mv_print_info(struct ata_host
*host
)
3985 struct pci_dev
*pdev
= to_pci_dev(host
->dev
);
3986 struct mv_host_priv
*hpriv
= host
->private_data
;
3988 const char *scc_s
, *gen
;
3990 /* Use this to determine the HW stepping of the chip so we know
3991 * what errata to workaround
3993 pci_read_config_byte(pdev
, PCI_CLASS_DEVICE
, &scc
);
3996 else if (scc
== 0x01)
4001 if (IS_GEN_I(hpriv
))
4003 else if (IS_GEN_II(hpriv
))
4005 else if (IS_GEN_IIE(hpriv
))
4010 dev_printk(KERN_INFO
, &pdev
->dev
,
4011 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
4012 gen
, (unsigned)MV_MAX_Q_DEPTH
, host
->n_ports
,
4013 scc_s
, (MV_HP_FLAG_MSI
& hpriv
->hp_flags
) ? "MSI" : "INTx");
4017 * mv_pci_init_one - handle a positive probe of a PCI Marvell host
4018 * @pdev: PCI device found
4019 * @ent: PCI device ID entry for the matched host
4022 * Inherited from caller.
4024 static int mv_pci_init_one(struct pci_dev
*pdev
,
4025 const struct pci_device_id
*ent
)
4027 static int printed_version
;
4028 unsigned int board_idx
= (unsigned int)ent
->driver_data
;
4029 const struct ata_port_info
*ppi
[] = { &mv_port_info
[board_idx
], NULL
};
4030 struct ata_host
*host
;
4031 struct mv_host_priv
*hpriv
;
4034 if (!printed_version
++)
4035 dev_printk(KERN_INFO
, &pdev
->dev
, "version " DRV_VERSION
"\n");
4038 n_ports
= mv_get_hc_count(ppi
[0]->flags
) * MV_PORTS_PER_HC
;
4040 host
= ata_host_alloc_pinfo(&pdev
->dev
, ppi
, n_ports
);
4041 hpriv
= devm_kzalloc(&pdev
->dev
, sizeof(*hpriv
), GFP_KERNEL
);
4042 if (!host
|| !hpriv
)
4044 host
->private_data
= hpriv
;
4045 hpriv
->n_ports
= n_ports
;
4047 /* acquire resources */
4048 rc
= pcim_enable_device(pdev
);
4052 rc
= pcim_iomap_regions(pdev
, 1 << MV_PRIMARY_BAR
, DRV_NAME
);
4054 pcim_pin_device(pdev
);
4057 host
->iomap
= pcim_iomap_table(pdev
);
4058 hpriv
->base
= host
->iomap
[MV_PRIMARY_BAR
];
4060 rc
= pci_go_64(pdev
);
4064 rc
= mv_create_dma_pools(hpriv
, &pdev
->dev
);
4068 /* initialize adapter */
4069 rc
= mv_init_host(host
, board_idx
);
4073 /* Enable message-switched interrupts, if requested */
4074 if (msi
&& pci_enable_msi(pdev
) == 0)
4075 hpriv
->hp_flags
|= MV_HP_FLAG_MSI
;
4077 mv_dump_pci_cfg(pdev
, 0x68);
4078 mv_print_info(host
);
4080 pci_set_master(pdev
);
4081 pci_try_set_mwi(pdev
);
4082 return ata_host_activate(host
, pdev
->irq
, mv_interrupt
, IRQF_SHARED
,
4083 IS_GEN_I(hpriv
) ? &mv5_sht
: &mv6_sht
);
4087 static int mv_platform_probe(struct platform_device
*pdev
);
4088 static int __devexit
mv_platform_remove(struct platform_device
*pdev
);
4090 static int __init
mv_init(void)
4094 rc
= pci_register_driver(&mv_pci_driver
);
4098 rc
= platform_driver_register(&mv_platform_driver
);
4102 pci_unregister_driver(&mv_pci_driver
);
4107 static void __exit
mv_exit(void)
4110 pci_unregister_driver(&mv_pci_driver
);
4112 platform_driver_unregister(&mv_platform_driver
);
4115 MODULE_AUTHOR("Brett Russ");
4116 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
4117 MODULE_LICENSE("GPL");
4118 MODULE_DEVICE_TABLE(pci
, mv_pci_tbl
);
4119 MODULE_VERSION(DRV_VERSION
);
4120 MODULE_ALIAS("platform:" DRV_NAME
);
4122 module_init(mv_init
);
4123 module_exit(mv_exit
);