2 * sata_sil24.c - Driver for Silicon Image 3124/3132 SATA-2 controllers
4 * Copyright 2005 Tejun Heo
6 * Based on preview driver from Silicon Image.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2, or (at your option) any
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/gfp.h>
23 #include <linux/pci.h>
24 #include <linux/blkdev.h>
25 #include <linux/delay.h>
26 #include <linux/interrupt.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/device.h>
29 #include <scsi/scsi_host.h>
30 #include <scsi/scsi_cmnd.h>
31 #include <linux/libata.h>
33 #define DRV_NAME "sata_sil24"
34 #define DRV_VERSION "1.1"
37 * Port request block (PRB) 32 bytes
47 * Scatter gather entry (SGE) 16 bytes
60 /* sil24 fetches in chunks of 64bytes. The first block
61 * contains the PRB and two SGEs. From the second block, it's
62 * consisted of four SGEs and called SGT. Calculate the
63 * number of SGTs that fit into one page.
65 SIL24_PRB_SZ
= sizeof(struct sil24_prb
)
66 + 2 * sizeof(struct sil24_sge
),
67 SIL24_MAX_SGT
= (PAGE_SIZE
- SIL24_PRB_SZ
)
68 / (4 * sizeof(struct sil24_sge
)),
70 /* This will give us one unused SGEs for ATA. This extra SGE
71 * will be used to store CDB for ATAPI devices.
73 SIL24_MAX_SGE
= 4 * SIL24_MAX_SGT
+ 1,
76 * Global controller registers (128 bytes @ BAR0)
79 HOST_SLOT_STAT
= 0x00, /* 32 bit slot stat * 4 */
83 HOST_BIST_CTRL
= 0x50,
84 HOST_BIST_PTRN
= 0x54,
85 HOST_BIST_STAT
= 0x58,
86 HOST_MEM_BIST_STAT
= 0x5c,
87 HOST_FLASH_CMD
= 0x70,
89 HOST_FLASH_DATA
= 0x74,
90 HOST_TRANSITION_DETECT
= 0x75,
91 HOST_GPIO_CTRL
= 0x76,
92 HOST_I2C_ADDR
= 0x78, /* 32 bit */
94 HOST_I2C_XFER_CNT
= 0x7e,
97 /* HOST_SLOT_STAT bits */
98 HOST_SSTAT_ATTN
= (1 << 31),
101 HOST_CTRL_M66EN
= (1 << 16), /* M66EN PCI bus signal */
102 HOST_CTRL_TRDY
= (1 << 17), /* latched PCI TRDY */
103 HOST_CTRL_STOP
= (1 << 18), /* latched PCI STOP */
104 HOST_CTRL_DEVSEL
= (1 << 19), /* latched PCI DEVSEL */
105 HOST_CTRL_REQ64
= (1 << 20), /* latched PCI REQ64 */
106 HOST_CTRL_GLOBAL_RST
= (1 << 31), /* global reset */
110 * (8192 bytes @ +0x0000, +0x2000, +0x4000 and +0x6000 @ BAR2)
112 PORT_REGS_SIZE
= 0x2000,
114 PORT_LRAM
= 0x0000, /* 31 LRAM slots and PMP regs */
115 PORT_LRAM_SLOT_SZ
= 0x0080, /* 32 bytes PRB + 2 SGE, ACT... */
117 PORT_PMP
= 0x0f80, /* 8 bytes PMP * 16 (128 bytes) */
118 PORT_PMP_STATUS
= 0x0000, /* port device status offset */
119 PORT_PMP_QACTIVE
= 0x0004, /* port device QActive offset */
120 PORT_PMP_SIZE
= 0x0008, /* 8 bytes per PMP */
123 PORT_CTRL_STAT
= 0x1000, /* write: ctrl-set, read: stat */
124 PORT_CTRL_CLR
= 0x1004, /* write: ctrl-clear */
125 PORT_IRQ_STAT
= 0x1008, /* high: status, low: interrupt */
126 PORT_IRQ_ENABLE_SET
= 0x1010, /* write: enable-set */
127 PORT_IRQ_ENABLE_CLR
= 0x1014, /* write: enable-clear */
128 PORT_ACTIVATE_UPPER_ADDR
= 0x101c,
129 PORT_EXEC_FIFO
= 0x1020, /* command execution fifo */
130 PORT_CMD_ERR
= 0x1024, /* command error number */
131 PORT_FIS_CFG
= 0x1028,
132 PORT_FIFO_THRES
= 0x102c,
134 PORT_DECODE_ERR_CNT
= 0x1040,
135 PORT_DECODE_ERR_THRESH
= 0x1042,
136 PORT_CRC_ERR_CNT
= 0x1044,
137 PORT_CRC_ERR_THRESH
= 0x1046,
138 PORT_HSHK_ERR_CNT
= 0x1048,
139 PORT_HSHK_ERR_THRESH
= 0x104a,
141 PORT_PHY_CFG
= 0x1050,
142 PORT_SLOT_STAT
= 0x1800,
143 PORT_CMD_ACTIVATE
= 0x1c00, /* 64 bit cmd activate * 31 (248 bytes) */
144 PORT_CONTEXT
= 0x1e04,
145 PORT_EXEC_DIAG
= 0x1e00, /* 32bit exec diag * 16 (64 bytes, 0-10 used on 3124) */
146 PORT_PSD_DIAG
= 0x1e40, /* 32bit psd diag * 16 (64 bytes, 0-8 used on 3124) */
147 PORT_SCONTROL
= 0x1f00,
148 PORT_SSTATUS
= 0x1f04,
149 PORT_SERROR
= 0x1f08,
150 PORT_SACTIVE
= 0x1f0c,
152 /* PORT_CTRL_STAT bits */
153 PORT_CS_PORT_RST
= (1 << 0), /* port reset */
154 PORT_CS_DEV_RST
= (1 << 1), /* device reset */
155 PORT_CS_INIT
= (1 << 2), /* port initialize */
156 PORT_CS_IRQ_WOC
= (1 << 3), /* interrupt write one to clear */
157 PORT_CS_CDB16
= (1 << 5), /* 0=12b cdb, 1=16b cdb */
158 PORT_CS_PMP_RESUME
= (1 << 6), /* PMP resume */
159 PORT_CS_32BIT_ACTV
= (1 << 10), /* 32-bit activation */
160 PORT_CS_PMP_EN
= (1 << 13), /* port multiplier enable */
161 PORT_CS_RDY
= (1 << 31), /* port ready to accept commands */
163 /* PORT_IRQ_STAT/ENABLE_SET/CLR */
164 /* bits[11:0] are masked */
165 PORT_IRQ_COMPLETE
= (1 << 0), /* command(s) completed */
166 PORT_IRQ_ERROR
= (1 << 1), /* command execution error */
167 PORT_IRQ_PORTRDY_CHG
= (1 << 2), /* port ready change */
168 PORT_IRQ_PWR_CHG
= (1 << 3), /* power management change */
169 PORT_IRQ_PHYRDY_CHG
= (1 << 4), /* PHY ready change */
170 PORT_IRQ_COMWAKE
= (1 << 5), /* COMWAKE received */
171 PORT_IRQ_UNK_FIS
= (1 << 6), /* unknown FIS received */
172 PORT_IRQ_DEV_XCHG
= (1 << 7), /* device exchanged */
173 PORT_IRQ_8B10B
= (1 << 8), /* 8b/10b decode error threshold */
174 PORT_IRQ_CRC
= (1 << 9), /* CRC error threshold */
175 PORT_IRQ_HANDSHAKE
= (1 << 10), /* handshake error threshold */
176 PORT_IRQ_SDB_NOTIFY
= (1 << 11), /* SDB notify received */
178 DEF_PORT_IRQ
= PORT_IRQ_COMPLETE
| PORT_IRQ_ERROR
|
179 PORT_IRQ_PHYRDY_CHG
| PORT_IRQ_DEV_XCHG
|
180 PORT_IRQ_UNK_FIS
| PORT_IRQ_SDB_NOTIFY
,
182 /* bits[27:16] are unmasked (raw) */
183 PORT_IRQ_RAW_SHIFT
= 16,
184 PORT_IRQ_MASKED_MASK
= 0x7ff,
185 PORT_IRQ_RAW_MASK
= (0x7ff << PORT_IRQ_RAW_SHIFT
),
187 /* ENABLE_SET/CLR specific, intr steering - 2 bit field */
188 PORT_IRQ_STEER_SHIFT
= 30,
189 PORT_IRQ_STEER_MASK
= (3 << PORT_IRQ_STEER_SHIFT
),
191 /* PORT_CMD_ERR constants */
192 PORT_CERR_DEV
= 1, /* Error bit in D2H Register FIS */
193 PORT_CERR_SDB
= 2, /* Error bit in SDB FIS */
194 PORT_CERR_DATA
= 3, /* Error in data FIS not detected by dev */
195 PORT_CERR_SEND
= 4, /* Initial cmd FIS transmission failure */
196 PORT_CERR_INCONSISTENT
= 5, /* Protocol mismatch */
197 PORT_CERR_DIRECTION
= 6, /* Data direction mismatch */
198 PORT_CERR_UNDERRUN
= 7, /* Ran out of SGEs while writing */
199 PORT_CERR_OVERRUN
= 8, /* Ran out of SGEs while reading */
200 PORT_CERR_PKT_PROT
= 11, /* DIR invalid in 1st PIO setup of ATAPI */
201 PORT_CERR_SGT_BOUNDARY
= 16, /* PLD ecode 00 - SGT not on qword boundary */
202 PORT_CERR_SGT_TGTABRT
= 17, /* PLD ecode 01 - target abort */
203 PORT_CERR_SGT_MSTABRT
= 18, /* PLD ecode 10 - master abort */
204 PORT_CERR_SGT_PCIPERR
= 19, /* PLD ecode 11 - PCI parity err while fetching SGT */
205 PORT_CERR_CMD_BOUNDARY
= 24, /* ctrl[15:13] 001 - PRB not on qword boundary */
206 PORT_CERR_CMD_TGTABRT
= 25, /* ctrl[15:13] 010 - target abort */
207 PORT_CERR_CMD_MSTABRT
= 26, /* ctrl[15:13] 100 - master abort */
208 PORT_CERR_CMD_PCIPERR
= 27, /* ctrl[15:13] 110 - PCI parity err while fetching PRB */
209 PORT_CERR_XFR_UNDEF
= 32, /* PSD ecode 00 - undefined */
210 PORT_CERR_XFR_TGTABRT
= 33, /* PSD ecode 01 - target abort */
211 PORT_CERR_XFR_MSTABRT
= 34, /* PSD ecode 10 - master abort */
212 PORT_CERR_XFR_PCIPERR
= 35, /* PSD ecode 11 - PCI prity err during transfer */
213 PORT_CERR_SENDSERVICE
= 36, /* FIS received while sending service */
215 /* bits of PRB control field */
216 PRB_CTRL_PROTOCOL
= (1 << 0), /* override def. ATA protocol */
217 PRB_CTRL_PACKET_READ
= (1 << 4), /* PACKET cmd read */
218 PRB_CTRL_PACKET_WRITE
= (1 << 5), /* PACKET cmd write */
219 PRB_CTRL_NIEN
= (1 << 6), /* Mask completion irq */
220 PRB_CTRL_SRST
= (1 << 7), /* Soft reset request (ign BSY?) */
222 /* PRB protocol field */
223 PRB_PROT_PACKET
= (1 << 0),
224 PRB_PROT_TCQ
= (1 << 1),
225 PRB_PROT_NCQ
= (1 << 2),
226 PRB_PROT_READ
= (1 << 3),
227 PRB_PROT_WRITE
= (1 << 4),
228 PRB_PROT_TRANSPARENT
= (1 << 5),
233 SGE_TRM
= (1 << 31), /* Last SGE in chain */
234 SGE_LNK
= (1 << 30), /* linked list
235 Points to SGT, not SGE */
236 SGE_DRD
= (1 << 29), /* discard data read (/dev/null)
237 data address ignored */
247 SIL24_COMMON_FLAGS
= ATA_FLAG_SATA
| ATA_FLAG_NO_LEGACY
|
248 ATA_FLAG_MMIO
| ATA_FLAG_PIO_DMA
|
249 ATA_FLAG_NCQ
| ATA_FLAG_ACPI_SATA
|
250 ATA_FLAG_AN
| ATA_FLAG_PMP
,
251 SIL24_FLAG_PCIX_IRQ_WOC
= (1 << 24), /* IRQ loss errata on PCI-X */
253 IRQ_STAT_4PORTS
= 0xf,
256 struct sil24_ata_block
{
257 struct sil24_prb prb
;
258 struct sil24_sge sge
[SIL24_MAX_SGE
];
261 struct sil24_atapi_block
{
262 struct sil24_prb prb
;
264 struct sil24_sge sge
[SIL24_MAX_SGE
];
267 union sil24_cmd_block
{
268 struct sil24_ata_block ata
;
269 struct sil24_atapi_block atapi
;
272 static struct sil24_cerr_info
{
273 unsigned int err_mask
, action
;
275 } sil24_cerr_db
[] = {
276 [0] = { AC_ERR_DEV
, 0,
278 [PORT_CERR_DEV
] = { AC_ERR_DEV
, 0,
279 "device error via D2H FIS" },
280 [PORT_CERR_SDB
] = { AC_ERR_DEV
, 0,
281 "device error via SDB FIS" },
282 [PORT_CERR_DATA
] = { AC_ERR_ATA_BUS
, ATA_EH_RESET
,
283 "error in data FIS" },
284 [PORT_CERR_SEND
] = { AC_ERR_ATA_BUS
, ATA_EH_RESET
,
285 "failed to transmit command FIS" },
286 [PORT_CERR_INCONSISTENT
] = { AC_ERR_HSM
, ATA_EH_RESET
,
287 "protocol mismatch" },
288 [PORT_CERR_DIRECTION
] = { AC_ERR_HSM
, ATA_EH_RESET
,
289 "data directon mismatch" },
290 [PORT_CERR_UNDERRUN
] = { AC_ERR_HSM
, ATA_EH_RESET
,
291 "ran out of SGEs while writing" },
292 [PORT_CERR_OVERRUN
] = { AC_ERR_HSM
, ATA_EH_RESET
,
293 "ran out of SGEs while reading" },
294 [PORT_CERR_PKT_PROT
] = { AC_ERR_HSM
, ATA_EH_RESET
,
295 "invalid data directon for ATAPI CDB" },
296 [PORT_CERR_SGT_BOUNDARY
] = { AC_ERR_SYSTEM
, ATA_EH_RESET
,
297 "SGT not on qword boundary" },
298 [PORT_CERR_SGT_TGTABRT
] = { AC_ERR_HOST_BUS
, ATA_EH_RESET
,
299 "PCI target abort while fetching SGT" },
300 [PORT_CERR_SGT_MSTABRT
] = { AC_ERR_HOST_BUS
, ATA_EH_RESET
,
301 "PCI master abort while fetching SGT" },
302 [PORT_CERR_SGT_PCIPERR
] = { AC_ERR_HOST_BUS
, ATA_EH_RESET
,
303 "PCI parity error while fetching SGT" },
304 [PORT_CERR_CMD_BOUNDARY
] = { AC_ERR_SYSTEM
, ATA_EH_RESET
,
305 "PRB not on qword boundary" },
306 [PORT_CERR_CMD_TGTABRT
] = { AC_ERR_HOST_BUS
, ATA_EH_RESET
,
307 "PCI target abort while fetching PRB" },
308 [PORT_CERR_CMD_MSTABRT
] = { AC_ERR_HOST_BUS
, ATA_EH_RESET
,
309 "PCI master abort while fetching PRB" },
310 [PORT_CERR_CMD_PCIPERR
] = { AC_ERR_HOST_BUS
, ATA_EH_RESET
,
311 "PCI parity error while fetching PRB" },
312 [PORT_CERR_XFR_UNDEF
] = { AC_ERR_HOST_BUS
, ATA_EH_RESET
,
313 "undefined error while transferring data" },
314 [PORT_CERR_XFR_TGTABRT
] = { AC_ERR_HOST_BUS
, ATA_EH_RESET
,
315 "PCI target abort while transferring data" },
316 [PORT_CERR_XFR_MSTABRT
] = { AC_ERR_HOST_BUS
, ATA_EH_RESET
,
317 "PCI master abort while transferring data" },
318 [PORT_CERR_XFR_PCIPERR
] = { AC_ERR_HOST_BUS
, ATA_EH_RESET
,
319 "PCI parity error while transferring data" },
320 [PORT_CERR_SENDSERVICE
] = { AC_ERR_HSM
, ATA_EH_RESET
,
321 "FIS received while sending service FIS" },
327 * The preview driver always returned 0 for status. We emulate it
328 * here from the previous interrupt.
330 struct sil24_port_priv
{
331 union sil24_cmd_block
*cmd_block
; /* 32 cmd blocks */
332 dma_addr_t cmd_block_dma
; /* DMA base addr for them */
336 static void sil24_dev_config(struct ata_device
*dev
);
337 static int sil24_scr_read(struct ata_link
*link
, unsigned sc_reg
, u32
*val
);
338 static int sil24_scr_write(struct ata_link
*link
, unsigned sc_reg
, u32 val
);
339 static int sil24_qc_defer(struct ata_queued_cmd
*qc
);
340 static void sil24_qc_prep(struct ata_queued_cmd
*qc
);
341 static unsigned int sil24_qc_issue(struct ata_queued_cmd
*qc
);
342 static bool sil24_qc_fill_rtf(struct ata_queued_cmd
*qc
);
343 static void sil24_pmp_attach(struct ata_port
*ap
);
344 static void sil24_pmp_detach(struct ata_port
*ap
);
345 static void sil24_freeze(struct ata_port
*ap
);
346 static void sil24_thaw(struct ata_port
*ap
);
347 static int sil24_softreset(struct ata_link
*link
, unsigned int *class,
348 unsigned long deadline
);
349 static int sil24_hardreset(struct ata_link
*link
, unsigned int *class,
350 unsigned long deadline
);
351 static int sil24_pmp_hardreset(struct ata_link
*link
, unsigned int *class,
352 unsigned long deadline
);
353 static void sil24_error_handler(struct ata_port
*ap
);
354 static void sil24_post_internal_cmd(struct ata_queued_cmd
*qc
);
355 static int sil24_port_start(struct ata_port
*ap
);
356 static int sil24_init_one(struct pci_dev
*pdev
, const struct pci_device_id
*ent
);
358 static int sil24_pci_device_resume(struct pci_dev
*pdev
);
359 static int sil24_port_resume(struct ata_port
*ap
);
362 static const struct pci_device_id sil24_pci_tbl
[] = {
363 { PCI_VDEVICE(CMD
, 0x3124), BID_SIL3124
},
364 { PCI_VDEVICE(INTEL
, 0x3124), BID_SIL3124
},
365 { PCI_VDEVICE(CMD
, 0x3132), BID_SIL3132
},
366 { PCI_VDEVICE(CMD
, 0x0242), BID_SIL3132
},
367 { PCI_VDEVICE(CMD
, 0x0244), BID_SIL3132
},
368 { PCI_VDEVICE(CMD
, 0x3131), BID_SIL3131
},
369 { PCI_VDEVICE(CMD
, 0x3531), BID_SIL3131
},
371 { } /* terminate list */
374 static struct pci_driver sil24_pci_driver
= {
376 .id_table
= sil24_pci_tbl
,
377 .probe
= sil24_init_one
,
378 .remove
= ata_pci_remove_one
,
380 .suspend
= ata_pci_device_suspend
,
381 .resume
= sil24_pci_device_resume
,
385 static struct scsi_host_template sil24_sht
= {
386 ATA_NCQ_SHT(DRV_NAME
),
387 .can_queue
= SIL24_MAX_CMDS
,
388 .sg_tablesize
= SIL24_MAX_SGE
,
389 .dma_boundary
= ATA_DMA_BOUNDARY
,
392 static struct ata_port_operations sil24_ops
= {
393 .inherits
= &sata_pmp_port_ops
,
395 .qc_defer
= sil24_qc_defer
,
396 .qc_prep
= sil24_qc_prep
,
397 .qc_issue
= sil24_qc_issue
,
398 .qc_fill_rtf
= sil24_qc_fill_rtf
,
400 .freeze
= sil24_freeze
,
402 .softreset
= sil24_softreset
,
403 .hardreset
= sil24_hardreset
,
404 .pmp_softreset
= sil24_softreset
,
405 .pmp_hardreset
= sil24_pmp_hardreset
,
406 .error_handler
= sil24_error_handler
,
407 .post_internal_cmd
= sil24_post_internal_cmd
,
408 .dev_config
= sil24_dev_config
,
410 .scr_read
= sil24_scr_read
,
411 .scr_write
= sil24_scr_write
,
412 .pmp_attach
= sil24_pmp_attach
,
413 .pmp_detach
= sil24_pmp_detach
,
415 .port_start
= sil24_port_start
,
417 .port_resume
= sil24_port_resume
,
421 static int sata_sil24_msi
; /* Disable MSI */
422 module_param_named(msi
, sata_sil24_msi
, bool, S_IRUGO
);
423 MODULE_PARM_DESC(msi
, "Enable MSI (Default: false)");
426 * Use bits 30-31 of port_flags to encode available port numbers.
427 * Current maxium is 4.
429 #define SIL24_NPORTS2FLAG(nports) ((((unsigned)(nports) - 1) & 0x3) << 30)
430 #define SIL24_FLAG2NPORTS(flag) ((((flag) >> 30) & 0x3) + 1)
432 static const struct ata_port_info sil24_port_info
[] = {
435 .flags
= SIL24_COMMON_FLAGS
| SIL24_NPORTS2FLAG(4) |
436 SIL24_FLAG_PCIX_IRQ_WOC
,
437 .pio_mask
= ATA_PIO4
,
438 .mwdma_mask
= ATA_MWDMA2
,
439 .udma_mask
= ATA_UDMA5
,
440 .port_ops
= &sil24_ops
,
444 .flags
= SIL24_COMMON_FLAGS
| SIL24_NPORTS2FLAG(2),
445 .pio_mask
= ATA_PIO4
,
446 .mwdma_mask
= ATA_MWDMA2
,
447 .udma_mask
= ATA_UDMA5
,
448 .port_ops
= &sil24_ops
,
450 /* sil_3131/sil_3531 */
452 .flags
= SIL24_COMMON_FLAGS
| SIL24_NPORTS2FLAG(1),
453 .pio_mask
= ATA_PIO4
,
454 .mwdma_mask
= ATA_MWDMA2
,
455 .udma_mask
= ATA_UDMA5
,
456 .port_ops
= &sil24_ops
,
460 static int sil24_tag(int tag
)
462 if (unlikely(ata_tag_internal(tag
)))
467 static unsigned long sil24_port_offset(struct ata_port
*ap
)
469 return ap
->port_no
* PORT_REGS_SIZE
;
472 static void __iomem
*sil24_port_base(struct ata_port
*ap
)
474 return ap
->host
->iomap
[SIL24_PORT_BAR
] + sil24_port_offset(ap
);
477 static void sil24_dev_config(struct ata_device
*dev
)
479 void __iomem
*port
= sil24_port_base(dev
->link
->ap
);
481 if (dev
->cdb_len
== 16)
482 writel(PORT_CS_CDB16
, port
+ PORT_CTRL_STAT
);
484 writel(PORT_CS_CDB16
, port
+ PORT_CTRL_CLR
);
487 static void sil24_read_tf(struct ata_port
*ap
, int tag
, struct ata_taskfile
*tf
)
489 void __iomem
*port
= sil24_port_base(ap
);
490 struct sil24_prb __iomem
*prb
;
493 prb
= port
+ PORT_LRAM
+ sil24_tag(tag
) * PORT_LRAM_SLOT_SZ
;
494 memcpy_fromio(fis
, prb
->fis
, sizeof(fis
));
495 ata_tf_from_fis(fis
, tf
);
498 static int sil24_scr_map
[] = {
505 static int sil24_scr_read(struct ata_link
*link
, unsigned sc_reg
, u32
*val
)
507 void __iomem
*scr_addr
= sil24_port_base(link
->ap
) + PORT_SCONTROL
;
509 if (sc_reg
< ARRAY_SIZE(sil24_scr_map
)) {
511 addr
= scr_addr
+ sil24_scr_map
[sc_reg
] * 4;
512 *val
= readl(scr_addr
+ sil24_scr_map
[sc_reg
] * 4);
518 static int sil24_scr_write(struct ata_link
*link
, unsigned sc_reg
, u32 val
)
520 void __iomem
*scr_addr
= sil24_port_base(link
->ap
) + PORT_SCONTROL
;
522 if (sc_reg
< ARRAY_SIZE(sil24_scr_map
)) {
524 addr
= scr_addr
+ sil24_scr_map
[sc_reg
] * 4;
525 writel(val
, scr_addr
+ sil24_scr_map
[sc_reg
] * 4);
531 static void sil24_config_port(struct ata_port
*ap
)
533 void __iomem
*port
= sil24_port_base(ap
);
535 /* configure IRQ WoC */
536 if (ap
->flags
& SIL24_FLAG_PCIX_IRQ_WOC
)
537 writel(PORT_CS_IRQ_WOC
, port
+ PORT_CTRL_STAT
);
539 writel(PORT_CS_IRQ_WOC
, port
+ PORT_CTRL_CLR
);
541 /* zero error counters. */
542 writew(0x8000, port
+ PORT_DECODE_ERR_THRESH
);
543 writew(0x8000, port
+ PORT_CRC_ERR_THRESH
);
544 writew(0x8000, port
+ PORT_HSHK_ERR_THRESH
);
545 writew(0x0000, port
+ PORT_DECODE_ERR_CNT
);
546 writew(0x0000, port
+ PORT_CRC_ERR_CNT
);
547 writew(0x0000, port
+ PORT_HSHK_ERR_CNT
);
549 /* always use 64bit activation */
550 writel(PORT_CS_32BIT_ACTV
, port
+ PORT_CTRL_CLR
);
552 /* clear port multiplier enable and resume bits */
553 writel(PORT_CS_PMP_EN
| PORT_CS_PMP_RESUME
, port
+ PORT_CTRL_CLR
);
556 static void sil24_config_pmp(struct ata_port
*ap
, int attached
)
558 void __iomem
*port
= sil24_port_base(ap
);
561 writel(PORT_CS_PMP_EN
, port
+ PORT_CTRL_STAT
);
563 writel(PORT_CS_PMP_EN
, port
+ PORT_CTRL_CLR
);
566 static void sil24_clear_pmp(struct ata_port
*ap
)
568 void __iomem
*port
= sil24_port_base(ap
);
571 writel(PORT_CS_PMP_RESUME
, port
+ PORT_CTRL_CLR
);
573 for (i
= 0; i
< SATA_PMP_MAX_PORTS
; i
++) {
574 void __iomem
*pmp_base
= port
+ PORT_PMP
+ i
* PORT_PMP_SIZE
;
576 writel(0, pmp_base
+ PORT_PMP_STATUS
);
577 writel(0, pmp_base
+ PORT_PMP_QACTIVE
);
581 static int sil24_init_port(struct ata_port
*ap
)
583 void __iomem
*port
= sil24_port_base(ap
);
584 struct sil24_port_priv
*pp
= ap
->private_data
;
587 /* clear PMP error status */
588 if (sata_pmp_attached(ap
))
591 writel(PORT_CS_INIT
, port
+ PORT_CTRL_STAT
);
592 ata_wait_register(ap
, port
+ PORT_CTRL_STAT
,
593 PORT_CS_INIT
, PORT_CS_INIT
, 10, 100);
594 tmp
= ata_wait_register(ap
, port
+ PORT_CTRL_STAT
,
595 PORT_CS_RDY
, 0, 10, 100);
597 if ((tmp
& (PORT_CS_INIT
| PORT_CS_RDY
)) != PORT_CS_RDY
) {
599 ap
->link
.eh_context
.i
.action
|= ATA_EH_RESET
;
606 static int sil24_exec_polled_cmd(struct ata_port
*ap
, int pmp
,
607 const struct ata_taskfile
*tf
,
608 int is_cmd
, u32 ctrl
,
609 unsigned long timeout_msec
)
611 void __iomem
*port
= sil24_port_base(ap
);
612 struct sil24_port_priv
*pp
= ap
->private_data
;
613 struct sil24_prb
*prb
= &pp
->cmd_block
[0].ata
.prb
;
614 dma_addr_t paddr
= pp
->cmd_block_dma
;
615 u32 irq_enabled
, irq_mask
, irq_stat
;
618 prb
->ctrl
= cpu_to_le16(ctrl
);
619 ata_tf_to_fis(tf
, pmp
, is_cmd
, prb
->fis
);
621 /* temporarily plug completion and error interrupts */
622 irq_enabled
= readl(port
+ PORT_IRQ_ENABLE_SET
);
623 writel(PORT_IRQ_COMPLETE
| PORT_IRQ_ERROR
, port
+ PORT_IRQ_ENABLE_CLR
);
626 * The barrier is required to ensure that writes to cmd_block reach
627 * the memory before the write to PORT_CMD_ACTIVATE.
630 writel((u32
)paddr
, port
+ PORT_CMD_ACTIVATE
);
631 writel((u64
)paddr
>> 32, port
+ PORT_CMD_ACTIVATE
+ 4);
633 irq_mask
= (PORT_IRQ_COMPLETE
| PORT_IRQ_ERROR
) << PORT_IRQ_RAW_SHIFT
;
634 irq_stat
= ata_wait_register(ap
, port
+ PORT_IRQ_STAT
, irq_mask
, 0x0,
637 writel(irq_mask
, port
+ PORT_IRQ_STAT
); /* clear IRQs */
638 irq_stat
>>= PORT_IRQ_RAW_SHIFT
;
640 if (irq_stat
& PORT_IRQ_COMPLETE
)
643 /* force port into known state */
646 if (irq_stat
& PORT_IRQ_ERROR
)
652 /* restore IRQ enabled */
653 writel(irq_enabled
, port
+ PORT_IRQ_ENABLE_SET
);
658 static int sil24_softreset(struct ata_link
*link
, unsigned int *class,
659 unsigned long deadline
)
661 struct ata_port
*ap
= link
->ap
;
662 int pmp
= sata_srst_pmp(link
);
663 unsigned long timeout_msec
= 0;
664 struct ata_taskfile tf
;
670 /* put the port into known state */
671 if (sil24_init_port(ap
)) {
672 reason
= "port not ready";
677 if (time_after(deadline
, jiffies
))
678 timeout_msec
= jiffies_to_msecs(deadline
- jiffies
);
680 ata_tf_init(link
->device
, &tf
); /* doesn't really matter */
681 rc
= sil24_exec_polled_cmd(ap
, pmp
, &tf
, 0, PRB_CTRL_SRST
,
687 reason
= "SRST command error";
691 sil24_read_tf(ap
, 0, &tf
);
692 *class = ata_dev_classify(&tf
);
694 DPRINTK("EXIT, class=%u\n", *class);
698 ata_link_printk(link
, KERN_ERR
, "softreset failed (%s)\n", reason
);
702 static int sil24_hardreset(struct ata_link
*link
, unsigned int *class,
703 unsigned long deadline
)
705 struct ata_port
*ap
= link
->ap
;
706 void __iomem
*port
= sil24_port_base(ap
);
707 struct sil24_port_priv
*pp
= ap
->private_data
;
708 int did_port_rst
= 0;
714 /* Sometimes, DEV_RST is not enough to recover the controller.
715 * This happens often after PM DMA CS errata.
717 if (pp
->do_port_rst
) {
718 ata_port_printk(ap
, KERN_WARNING
, "controller in dubious "
719 "state, performing PORT_RST\n");
721 writel(PORT_CS_PORT_RST
, port
+ PORT_CTRL_STAT
);
723 writel(PORT_CS_PORT_RST
, port
+ PORT_CTRL_CLR
);
724 ata_wait_register(ap
, port
+ PORT_CTRL_STAT
, PORT_CS_RDY
, 0,
727 /* restore port configuration */
728 sil24_config_port(ap
);
729 sil24_config_pmp(ap
, ap
->nr_pmp_links
);
735 /* sil24 does the right thing(tm) without any protection */
739 if (ata_link_online(link
))
742 writel(PORT_CS_DEV_RST
, port
+ PORT_CTRL_STAT
);
743 tmp
= ata_wait_register(ap
, port
+ PORT_CTRL_STAT
,
744 PORT_CS_DEV_RST
, PORT_CS_DEV_RST
, 10,
747 /* SStatus oscillates between zero and valid status after
748 * DEV_RST, debounce it.
750 rc
= sata_link_debounce(link
, sata_deb_timing_long
, deadline
);
752 reason
= "PHY debouncing failed";
756 if (tmp
& PORT_CS_DEV_RST
) {
757 if (ata_link_offline(link
))
759 reason
= "link not ready";
763 /* Sil24 doesn't store signature FIS after hardreset, so we
764 * can't wait for BSY to clear. Some devices take a long time
765 * to get ready and those devices will choke if we don't wait
766 * for BSY clearance here. Tell libata to perform follow-up
777 ata_link_printk(link
, KERN_ERR
, "hardreset failed (%s)\n", reason
);
781 static inline void sil24_fill_sg(struct ata_queued_cmd
*qc
,
782 struct sil24_sge
*sge
)
784 struct scatterlist
*sg
;
785 struct sil24_sge
*last_sge
= NULL
;
788 for_each_sg(qc
->sg
, sg
, qc
->n_elem
, si
) {
789 sge
->addr
= cpu_to_le64(sg_dma_address(sg
));
790 sge
->cnt
= cpu_to_le32(sg_dma_len(sg
));
797 last_sge
->flags
= cpu_to_le32(SGE_TRM
);
800 static int sil24_qc_defer(struct ata_queued_cmd
*qc
)
802 struct ata_link
*link
= qc
->dev
->link
;
803 struct ata_port
*ap
= link
->ap
;
804 u8 prot
= qc
->tf
.protocol
;
807 * There is a bug in the chip:
808 * Port LRAM Causes the PRB/SGT Data to be Corrupted
809 * If the host issues a read request for LRAM and SActive registers
810 * while active commands are available in the port, PRB/SGT data in
811 * the LRAM can become corrupted. This issue applies only when
812 * reading from, but not writing to, the LRAM.
814 * Therefore, reading LRAM when there is no particular error [and
815 * other commands may be outstanding] is prohibited.
817 * To avoid this bug there are two situations where a command must run
818 * exclusive of any other commands on the port:
820 * - ATAPI commands which check the sense data
821 * - Passthrough ATA commands which always have ATA_QCFLAG_RESULT_TF
825 int is_excl
= (ata_is_atapi(prot
) ||
826 (qc
->flags
& ATA_QCFLAG_RESULT_TF
));
828 if (unlikely(ap
->excl_link
)) {
829 if (link
== ap
->excl_link
) {
830 if (ap
->nr_active_links
)
831 return ATA_DEFER_PORT
;
832 qc
->flags
|= ATA_QCFLAG_CLEAR_EXCL
;
834 return ATA_DEFER_PORT
;
835 } else if (unlikely(is_excl
)) {
836 ap
->excl_link
= link
;
837 if (ap
->nr_active_links
)
838 return ATA_DEFER_PORT
;
839 qc
->flags
|= ATA_QCFLAG_CLEAR_EXCL
;
842 return ata_std_qc_defer(qc
);
845 static void sil24_qc_prep(struct ata_queued_cmd
*qc
)
847 struct ata_port
*ap
= qc
->ap
;
848 struct sil24_port_priv
*pp
= ap
->private_data
;
849 union sil24_cmd_block
*cb
;
850 struct sil24_prb
*prb
;
851 struct sil24_sge
*sge
;
854 cb
= &pp
->cmd_block
[sil24_tag(qc
->tag
)];
856 if (!ata_is_atapi(qc
->tf
.protocol
)) {
859 if (ata_is_data(qc
->tf
.protocol
)) {
861 ctrl
= PRB_CTRL_PROTOCOL
;
862 if (ata_is_ncq(qc
->tf
.protocol
))
863 prot
|= PRB_PROT_NCQ
;
864 if (qc
->tf
.flags
& ATA_TFLAG_WRITE
)
865 prot
|= PRB_PROT_WRITE
;
867 prot
|= PRB_PROT_READ
;
868 prb
->prot
= cpu_to_le16(prot
);
871 prb
= &cb
->atapi
.prb
;
873 memset(cb
->atapi
.cdb
, 0, sizeof(cb
->atapi
.cdb
));
874 memcpy(cb
->atapi
.cdb
, qc
->cdb
, qc
->dev
->cdb_len
);
876 if (ata_is_data(qc
->tf
.protocol
)) {
877 if (qc
->tf
.flags
& ATA_TFLAG_WRITE
)
878 ctrl
= PRB_CTRL_PACKET_WRITE
;
880 ctrl
= PRB_CTRL_PACKET_READ
;
884 prb
->ctrl
= cpu_to_le16(ctrl
);
885 ata_tf_to_fis(&qc
->tf
, qc
->dev
->link
->pmp
, 1, prb
->fis
);
887 if (qc
->flags
& ATA_QCFLAG_DMAMAP
)
888 sil24_fill_sg(qc
, sge
);
891 static unsigned int sil24_qc_issue(struct ata_queued_cmd
*qc
)
893 struct ata_port
*ap
= qc
->ap
;
894 struct sil24_port_priv
*pp
= ap
->private_data
;
895 void __iomem
*port
= sil24_port_base(ap
);
896 unsigned int tag
= sil24_tag(qc
->tag
);
898 void __iomem
*activate
;
900 paddr
= pp
->cmd_block_dma
+ tag
* sizeof(*pp
->cmd_block
);
901 activate
= port
+ PORT_CMD_ACTIVATE
+ tag
* 8;
904 * The barrier is required to ensure that writes to cmd_block reach
905 * the memory before the write to PORT_CMD_ACTIVATE.
908 writel((u32
)paddr
, activate
);
909 writel((u64
)paddr
>> 32, activate
+ 4);
914 static bool sil24_qc_fill_rtf(struct ata_queued_cmd
*qc
)
916 sil24_read_tf(qc
->ap
, qc
->tag
, &qc
->result_tf
);
920 static void sil24_pmp_attach(struct ata_port
*ap
)
922 u32
*gscr
= ap
->link
.device
->gscr
;
924 sil24_config_pmp(ap
, 1);
927 if (sata_pmp_gscr_vendor(gscr
) == 0x11ab &&
928 sata_pmp_gscr_devid(gscr
) == 0x4140) {
929 ata_port_printk(ap
, KERN_INFO
,
930 "disabling NCQ support due to sil24-mv4140 quirk\n");
931 ap
->flags
&= ~ATA_FLAG_NCQ
;
935 static void sil24_pmp_detach(struct ata_port
*ap
)
938 sil24_config_pmp(ap
, 0);
940 ap
->flags
|= ATA_FLAG_NCQ
;
943 static int sil24_pmp_hardreset(struct ata_link
*link
, unsigned int *class,
944 unsigned long deadline
)
948 rc
= sil24_init_port(link
->ap
);
950 ata_link_printk(link
, KERN_ERR
,
951 "hardreset failed (port not ready)\n");
955 return sata_std_hardreset(link
, class, deadline
);
958 static void sil24_freeze(struct ata_port
*ap
)
960 void __iomem
*port
= sil24_port_base(ap
);
962 /* Port-wide IRQ mask in HOST_CTRL doesn't really work, clear
963 * PORT_IRQ_ENABLE instead.
965 writel(0xffff, port
+ PORT_IRQ_ENABLE_CLR
);
968 static void sil24_thaw(struct ata_port
*ap
)
970 void __iomem
*port
= sil24_port_base(ap
);
974 tmp
= readl(port
+ PORT_IRQ_STAT
);
975 writel(tmp
, port
+ PORT_IRQ_STAT
);
977 /* turn IRQ back on */
978 writel(DEF_PORT_IRQ
, port
+ PORT_IRQ_ENABLE_SET
);
981 static void sil24_error_intr(struct ata_port
*ap
)
983 void __iomem
*port
= sil24_port_base(ap
);
984 struct sil24_port_priv
*pp
= ap
->private_data
;
985 struct ata_queued_cmd
*qc
= NULL
;
986 struct ata_link
*link
;
987 struct ata_eh_info
*ehi
;
988 int abort
= 0, freeze
= 0;
991 /* on error, we need to clear IRQ explicitly */
992 irq_stat
= readl(port
+ PORT_IRQ_STAT
);
993 writel(irq_stat
, port
+ PORT_IRQ_STAT
);
995 /* first, analyze and record host port events */
997 ehi
= &link
->eh_info
;
998 ata_ehi_clear_desc(ehi
);
1000 ata_ehi_push_desc(ehi
, "irq_stat 0x%08x", irq_stat
);
1002 if (irq_stat
& PORT_IRQ_SDB_NOTIFY
) {
1003 ata_ehi_push_desc(ehi
, "SDB notify");
1004 sata_async_notification(ap
);
1007 if (irq_stat
& (PORT_IRQ_PHYRDY_CHG
| PORT_IRQ_DEV_XCHG
)) {
1008 ata_ehi_hotplugged(ehi
);
1009 ata_ehi_push_desc(ehi
, "%s",
1010 irq_stat
& PORT_IRQ_PHYRDY_CHG
?
1011 "PHY RDY changed" : "device exchanged");
1015 if (irq_stat
& PORT_IRQ_UNK_FIS
) {
1016 ehi
->err_mask
|= AC_ERR_HSM
;
1017 ehi
->action
|= ATA_EH_RESET
;
1018 ata_ehi_push_desc(ehi
, "unknown FIS");
1022 /* deal with command error */
1023 if (irq_stat
& PORT_IRQ_ERROR
) {
1024 struct sil24_cerr_info
*ci
= NULL
;
1025 unsigned int err_mask
= 0, action
= 0;
1031 /* DMA Context Switch Failure in Port Multiplier Mode
1032 * errata. If we have active commands to 3 or more
1033 * devices, any error condition on active devices can
1034 * corrupt DMA context switching.
1036 if (ap
->nr_active_links
>= 3) {
1037 ehi
->err_mask
|= AC_ERR_OTHER
;
1038 ehi
->action
|= ATA_EH_RESET
;
1039 ata_ehi_push_desc(ehi
, "PMP DMA CS errata");
1040 pp
->do_port_rst
= 1;
1044 /* find out the offending link and qc */
1045 if (sata_pmp_attached(ap
)) {
1046 context
= readl(port
+ PORT_CONTEXT
);
1047 pmp
= (context
>> 5) & 0xf;
1049 if (pmp
< ap
->nr_pmp_links
) {
1050 link
= &ap
->pmp_link
[pmp
];
1051 ehi
= &link
->eh_info
;
1052 qc
= ata_qc_from_tag(ap
, link
->active_tag
);
1054 ata_ehi_clear_desc(ehi
);
1055 ata_ehi_push_desc(ehi
, "irq_stat 0x%08x",
1058 err_mask
|= AC_ERR_HSM
;
1059 action
|= ATA_EH_RESET
;
1063 qc
= ata_qc_from_tag(ap
, link
->active_tag
);
1065 /* analyze CMD_ERR */
1066 cerr
= readl(port
+ PORT_CMD_ERR
);
1067 if (cerr
< ARRAY_SIZE(sil24_cerr_db
))
1068 ci
= &sil24_cerr_db
[cerr
];
1070 if (ci
&& ci
->desc
) {
1071 err_mask
|= ci
->err_mask
;
1072 action
|= ci
->action
;
1073 if (action
& ATA_EH_RESET
)
1075 ata_ehi_push_desc(ehi
, "%s", ci
->desc
);
1077 err_mask
|= AC_ERR_OTHER
;
1078 action
|= ATA_EH_RESET
;
1080 ata_ehi_push_desc(ehi
, "unknown command error %d",
1084 /* record error info */
1086 qc
->err_mask
|= err_mask
;
1088 ehi
->err_mask
|= err_mask
;
1090 ehi
->action
|= action
;
1092 /* if PMP, resume */
1093 if (sata_pmp_attached(ap
))
1094 writel(PORT_CS_PMP_RESUME
, port
+ PORT_CTRL_STAT
);
1097 /* freeze or abort */
1099 ata_port_freeze(ap
);
1102 ata_link_abort(qc
->dev
->link
);
1108 static inline void sil24_host_intr(struct ata_port
*ap
)
1110 void __iomem
*port
= sil24_port_base(ap
);
1111 u32 slot_stat
, qc_active
;
1114 /* If PCIX_IRQ_WOC, there's an inherent race window between
1115 * clearing IRQ pending status and reading PORT_SLOT_STAT
1116 * which may cause spurious interrupts afterwards. This is
1117 * unavoidable and much better than losing interrupts which
1118 * happens if IRQ pending is cleared after reading
1121 if (ap
->flags
& SIL24_FLAG_PCIX_IRQ_WOC
)
1122 writel(PORT_IRQ_COMPLETE
, port
+ PORT_IRQ_STAT
);
1124 slot_stat
= readl(port
+ PORT_SLOT_STAT
);
1126 if (unlikely(slot_stat
& HOST_SSTAT_ATTN
)) {
1127 sil24_error_intr(ap
);
1131 qc_active
= slot_stat
& ~HOST_SSTAT_ATTN
;
1132 rc
= ata_qc_complete_multiple(ap
, qc_active
);
1136 struct ata_eh_info
*ehi
= &ap
->link
.eh_info
;
1137 ehi
->err_mask
|= AC_ERR_HSM
;
1138 ehi
->action
|= ATA_EH_RESET
;
1139 ata_port_freeze(ap
);
1143 /* spurious interrupts are expected if PCIX_IRQ_WOC */
1144 if (!(ap
->flags
& SIL24_FLAG_PCIX_IRQ_WOC
) && ata_ratelimit())
1145 ata_port_printk(ap
, KERN_INFO
, "spurious interrupt "
1146 "(slot_stat 0x%x active_tag %d sactive 0x%x)\n",
1147 slot_stat
, ap
->link
.active_tag
, ap
->link
.sactive
);
1150 static irqreturn_t
sil24_interrupt(int irq
, void *dev_instance
)
1152 struct ata_host
*host
= dev_instance
;
1153 void __iomem
*host_base
= host
->iomap
[SIL24_HOST_BAR
];
1154 unsigned handled
= 0;
1158 status
= readl(host_base
+ HOST_IRQ_STAT
);
1160 if (status
== 0xffffffff) {
1161 printk(KERN_ERR DRV_NAME
": IRQ status == 0xffffffff, "
1162 "PCI fault or device removal?\n");
1166 if (!(status
& IRQ_STAT_4PORTS
))
1169 spin_lock(&host
->lock
);
1171 for (i
= 0; i
< host
->n_ports
; i
++)
1172 if (status
& (1 << i
)) {
1173 sil24_host_intr(host
->ports
[i
]);
1177 spin_unlock(&host
->lock
);
1179 return IRQ_RETVAL(handled
);
1182 static void sil24_error_handler(struct ata_port
*ap
)
1184 struct sil24_port_priv
*pp
= ap
->private_data
;
1186 if (sil24_init_port(ap
))
1187 ata_eh_freeze_port(ap
);
1189 sata_pmp_error_handler(ap
);
1191 pp
->do_port_rst
= 0;
1194 static void sil24_post_internal_cmd(struct ata_queued_cmd
*qc
)
1196 struct ata_port
*ap
= qc
->ap
;
1198 /* make DMA engine forget about the failed command */
1199 if ((qc
->flags
& ATA_QCFLAG_FAILED
) && sil24_init_port(ap
))
1200 ata_eh_freeze_port(ap
);
1203 static int sil24_port_start(struct ata_port
*ap
)
1205 struct device
*dev
= ap
->host
->dev
;
1206 struct sil24_port_priv
*pp
;
1207 union sil24_cmd_block
*cb
;
1208 size_t cb_size
= sizeof(*cb
) * SIL24_MAX_CMDS
;
1211 pp
= devm_kzalloc(dev
, sizeof(*pp
), GFP_KERNEL
);
1215 cb
= dmam_alloc_coherent(dev
, cb_size
, &cb_dma
, GFP_KERNEL
);
1218 memset(cb
, 0, cb_size
);
1221 pp
->cmd_block_dma
= cb_dma
;
1223 ap
->private_data
= pp
;
1225 ata_port_pbar_desc(ap
, SIL24_HOST_BAR
, -1, "host");
1226 ata_port_pbar_desc(ap
, SIL24_PORT_BAR
, sil24_port_offset(ap
), "port");
1231 static void sil24_init_controller(struct ata_host
*host
)
1233 void __iomem
*host_base
= host
->iomap
[SIL24_HOST_BAR
];
1238 writel(0, host_base
+ HOST_FLASH_CMD
);
1240 /* clear global reset & mask interrupts during initialization */
1241 writel(0, host_base
+ HOST_CTRL
);
1244 for (i
= 0; i
< host
->n_ports
; i
++) {
1245 struct ata_port
*ap
= host
->ports
[i
];
1246 void __iomem
*port
= sil24_port_base(ap
);
1249 /* Initial PHY setting */
1250 writel(0x20c, port
+ PORT_PHY_CFG
);
1252 /* Clear port RST */
1253 tmp
= readl(port
+ PORT_CTRL_STAT
);
1254 if (tmp
& PORT_CS_PORT_RST
) {
1255 writel(PORT_CS_PORT_RST
, port
+ PORT_CTRL_CLR
);
1256 tmp
= ata_wait_register(NULL
, port
+ PORT_CTRL_STAT
,
1258 PORT_CS_PORT_RST
, 10, 100);
1259 if (tmp
& PORT_CS_PORT_RST
)
1260 dev_printk(KERN_ERR
, host
->dev
,
1261 "failed to clear port RST\n");
1264 /* configure port */
1265 sil24_config_port(ap
);
1268 /* Turn on interrupts */
1269 writel(IRQ_STAT_4PORTS
, host_base
+ HOST_CTRL
);
1272 static int sil24_init_one(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
1274 extern int __MARKER__sil24_cmd_block_is_sized_wrongly
;
1275 static int printed_version
;
1276 struct ata_port_info pi
= sil24_port_info
[ent
->driver_data
];
1277 const struct ata_port_info
*ppi
[] = { &pi
, NULL
};
1278 void __iomem
* const *iomap
;
1279 struct ata_host
*host
;
1283 /* cause link error if sil24_cmd_block is sized wrongly */
1284 if (sizeof(union sil24_cmd_block
) != PAGE_SIZE
)
1285 __MARKER__sil24_cmd_block_is_sized_wrongly
= 1;
1287 if (!printed_version
++)
1288 dev_printk(KERN_DEBUG
, &pdev
->dev
, "version " DRV_VERSION
"\n");
1290 /* acquire resources */
1291 rc
= pcim_enable_device(pdev
);
1295 rc
= pcim_iomap_regions(pdev
,
1296 (1 << SIL24_HOST_BAR
) | (1 << SIL24_PORT_BAR
),
1300 iomap
= pcim_iomap_table(pdev
);
1302 /* apply workaround for completion IRQ loss on PCI-X errata */
1303 if (pi
.flags
& SIL24_FLAG_PCIX_IRQ_WOC
) {
1304 tmp
= readl(iomap
[SIL24_HOST_BAR
] + HOST_CTRL
);
1305 if (tmp
& (HOST_CTRL_TRDY
| HOST_CTRL_STOP
| HOST_CTRL_DEVSEL
))
1306 dev_printk(KERN_INFO
, &pdev
->dev
,
1307 "Applying completion IRQ loss on PCI-X "
1310 pi
.flags
&= ~SIL24_FLAG_PCIX_IRQ_WOC
;
1313 /* allocate and fill host */
1314 host
= ata_host_alloc_pinfo(&pdev
->dev
, ppi
,
1315 SIL24_FLAG2NPORTS(ppi
[0]->flags
));
1318 host
->iomap
= iomap
;
1320 /* configure and activate the device */
1321 if (!pci_set_dma_mask(pdev
, DMA_BIT_MASK(64))) {
1322 rc
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64));
1324 rc
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(32));
1326 dev_printk(KERN_ERR
, &pdev
->dev
,
1327 "64-bit DMA enable failed\n");
1332 rc
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
1334 dev_printk(KERN_ERR
, &pdev
->dev
,
1335 "32-bit DMA enable failed\n");
1338 rc
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(32));
1340 dev_printk(KERN_ERR
, &pdev
->dev
,
1341 "32-bit consistent DMA enable failed\n");
1346 /* Set max read request size to 4096. This slightly increases
1347 * write throughput for pci-e variants.
1349 pcie_set_readrq(pdev
, 4096);
1351 sil24_init_controller(host
);
1353 if (sata_sil24_msi
&& !pci_enable_msi(pdev
)) {
1354 dev_printk(KERN_INFO
, &pdev
->dev
, "Using MSI\n");
1358 pci_set_master(pdev
);
1359 return ata_host_activate(host
, pdev
->irq
, sil24_interrupt
, IRQF_SHARED
,
1364 static int sil24_pci_device_resume(struct pci_dev
*pdev
)
1366 struct ata_host
*host
= dev_get_drvdata(&pdev
->dev
);
1367 void __iomem
*host_base
= host
->iomap
[SIL24_HOST_BAR
];
1370 rc
= ata_pci_device_do_resume(pdev
);
1374 if (pdev
->dev
.power
.power_state
.event
== PM_EVENT_SUSPEND
)
1375 writel(HOST_CTRL_GLOBAL_RST
, host_base
+ HOST_CTRL
);
1377 sil24_init_controller(host
);
1379 ata_host_resume(host
);
1384 static int sil24_port_resume(struct ata_port
*ap
)
1386 sil24_config_pmp(ap
, ap
->nr_pmp_links
);
1391 static int __init
sil24_init(void)
1393 return pci_register_driver(&sil24_pci_driver
);
1396 static void __exit
sil24_exit(void)
1398 pci_unregister_driver(&sil24_pci_driver
);
1401 MODULE_AUTHOR("Tejun Heo");
1402 MODULE_DESCRIPTION("Silicon Image 3124/3132 SATA low-level driver");
1403 MODULE_LICENSE("GPL");
1404 MODULE_DEVICE_TABLE(pci
, sil24_pci_tbl
);
1406 module_init(sil24_init
);
1407 module_exit(sil24_exit
);