1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * sata_sil24.c - Driver for Silicon Image 3124/3132 SATA-2 controllers
5 * Copyright 2005 Tejun Heo
7 * Based on preview driver from Silicon Image.
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/gfp.h>
13 #include <linux/pci.h>
14 #include <linux/blkdev.h>
15 #include <linux/delay.h>
16 #include <linux/interrupt.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/device.h>
19 #include <scsi/scsi_host.h>
20 #include <scsi/scsi_cmnd.h>
21 #include <linux/libata.h>
23 #define DRV_NAME "sata_sil24"
24 #define DRV_VERSION "1.1"
27 * Port request block (PRB) 32 bytes
37 * Scatter gather entry (SGE) 16 bytes
50 /* sil24 fetches in chunks of 64bytes. The first block
51 * contains the PRB and two SGEs. From the second block, it's
52 * consisted of four SGEs and called SGT. Calculate the
53 * number of SGTs that fit into one page.
55 SIL24_PRB_SZ
= sizeof(struct sil24_prb
)
56 + 2 * sizeof(struct sil24_sge
),
57 SIL24_MAX_SGT
= (PAGE_SIZE
- SIL24_PRB_SZ
)
58 / (4 * sizeof(struct sil24_sge
)),
60 /* This will give us one unused SGEs for ATA. This extra SGE
61 * will be used to store CDB for ATAPI devices.
63 SIL24_MAX_SGE
= 4 * SIL24_MAX_SGT
+ 1,
66 * Global controller registers (128 bytes @ BAR0)
69 HOST_SLOT_STAT
= 0x00, /* 32 bit slot stat * 4 */
73 HOST_BIST_CTRL
= 0x50,
74 HOST_BIST_PTRN
= 0x54,
75 HOST_BIST_STAT
= 0x58,
76 HOST_MEM_BIST_STAT
= 0x5c,
77 HOST_FLASH_CMD
= 0x70,
79 HOST_FLASH_DATA
= 0x74,
80 HOST_TRANSITION_DETECT
= 0x75,
81 HOST_GPIO_CTRL
= 0x76,
82 HOST_I2C_ADDR
= 0x78, /* 32 bit */
84 HOST_I2C_XFER_CNT
= 0x7e,
87 /* HOST_SLOT_STAT bits */
88 HOST_SSTAT_ATTN
= (1 << 31),
91 HOST_CTRL_M66EN
= (1 << 16), /* M66EN PCI bus signal */
92 HOST_CTRL_TRDY
= (1 << 17), /* latched PCI TRDY */
93 HOST_CTRL_STOP
= (1 << 18), /* latched PCI STOP */
94 HOST_CTRL_DEVSEL
= (1 << 19), /* latched PCI DEVSEL */
95 HOST_CTRL_REQ64
= (1 << 20), /* latched PCI REQ64 */
96 HOST_CTRL_GLOBAL_RST
= (1 << 31), /* global reset */
100 * (8192 bytes @ +0x0000, +0x2000, +0x4000 and +0x6000 @ BAR2)
102 PORT_REGS_SIZE
= 0x2000,
104 PORT_LRAM
= 0x0000, /* 31 LRAM slots and PMP regs */
105 PORT_LRAM_SLOT_SZ
= 0x0080, /* 32 bytes PRB + 2 SGE, ACT... */
107 PORT_PMP
= 0x0f80, /* 8 bytes PMP * 16 (128 bytes) */
108 PORT_PMP_STATUS
= 0x0000, /* port device status offset */
109 PORT_PMP_QACTIVE
= 0x0004, /* port device QActive offset */
110 PORT_PMP_SIZE
= 0x0008, /* 8 bytes per PMP */
113 PORT_CTRL_STAT
= 0x1000, /* write: ctrl-set, read: stat */
114 PORT_CTRL_CLR
= 0x1004, /* write: ctrl-clear */
115 PORT_IRQ_STAT
= 0x1008, /* high: status, low: interrupt */
116 PORT_IRQ_ENABLE_SET
= 0x1010, /* write: enable-set */
117 PORT_IRQ_ENABLE_CLR
= 0x1014, /* write: enable-clear */
118 PORT_ACTIVATE_UPPER_ADDR
= 0x101c,
119 PORT_EXEC_FIFO
= 0x1020, /* command execution fifo */
120 PORT_CMD_ERR
= 0x1024, /* command error number */
121 PORT_FIS_CFG
= 0x1028,
122 PORT_FIFO_THRES
= 0x102c,
124 PORT_DECODE_ERR_CNT
= 0x1040,
125 PORT_DECODE_ERR_THRESH
= 0x1042,
126 PORT_CRC_ERR_CNT
= 0x1044,
127 PORT_CRC_ERR_THRESH
= 0x1046,
128 PORT_HSHK_ERR_CNT
= 0x1048,
129 PORT_HSHK_ERR_THRESH
= 0x104a,
131 PORT_PHY_CFG
= 0x1050,
132 PORT_SLOT_STAT
= 0x1800,
133 PORT_CMD_ACTIVATE
= 0x1c00, /* 64 bit cmd activate * 31 (248 bytes) */
134 PORT_CONTEXT
= 0x1e04,
135 PORT_EXEC_DIAG
= 0x1e00, /* 32bit exec diag * 16 (64 bytes, 0-10 used on 3124) */
136 PORT_PSD_DIAG
= 0x1e40, /* 32bit psd diag * 16 (64 bytes, 0-8 used on 3124) */
137 PORT_SCONTROL
= 0x1f00,
138 PORT_SSTATUS
= 0x1f04,
139 PORT_SERROR
= 0x1f08,
140 PORT_SACTIVE
= 0x1f0c,
142 /* PORT_CTRL_STAT bits */
143 PORT_CS_PORT_RST
= (1 << 0), /* port reset */
144 PORT_CS_DEV_RST
= (1 << 1), /* device reset */
145 PORT_CS_INIT
= (1 << 2), /* port initialize */
146 PORT_CS_IRQ_WOC
= (1 << 3), /* interrupt write one to clear */
147 PORT_CS_CDB16
= (1 << 5), /* 0=12b cdb, 1=16b cdb */
148 PORT_CS_PMP_RESUME
= (1 << 6), /* PMP resume */
149 PORT_CS_32BIT_ACTV
= (1 << 10), /* 32-bit activation */
150 PORT_CS_PMP_EN
= (1 << 13), /* port multiplier enable */
151 PORT_CS_RDY
= (1 << 31), /* port ready to accept commands */
153 /* PORT_IRQ_STAT/ENABLE_SET/CLR */
154 /* bits[11:0] are masked */
155 PORT_IRQ_COMPLETE
= (1 << 0), /* command(s) completed */
156 PORT_IRQ_ERROR
= (1 << 1), /* command execution error */
157 PORT_IRQ_PORTRDY_CHG
= (1 << 2), /* port ready change */
158 PORT_IRQ_PWR_CHG
= (1 << 3), /* power management change */
159 PORT_IRQ_PHYRDY_CHG
= (1 << 4), /* PHY ready change */
160 PORT_IRQ_COMWAKE
= (1 << 5), /* COMWAKE received */
161 PORT_IRQ_UNK_FIS
= (1 << 6), /* unknown FIS received */
162 PORT_IRQ_DEV_XCHG
= (1 << 7), /* device exchanged */
163 PORT_IRQ_8B10B
= (1 << 8), /* 8b/10b decode error threshold */
164 PORT_IRQ_CRC
= (1 << 9), /* CRC error threshold */
165 PORT_IRQ_HANDSHAKE
= (1 << 10), /* handshake error threshold */
166 PORT_IRQ_SDB_NOTIFY
= (1 << 11), /* SDB notify received */
168 DEF_PORT_IRQ
= PORT_IRQ_COMPLETE
| PORT_IRQ_ERROR
|
169 PORT_IRQ_PHYRDY_CHG
| PORT_IRQ_DEV_XCHG
|
170 PORT_IRQ_UNK_FIS
| PORT_IRQ_SDB_NOTIFY
,
172 /* bits[27:16] are unmasked (raw) */
173 PORT_IRQ_RAW_SHIFT
= 16,
174 PORT_IRQ_MASKED_MASK
= 0x7ff,
175 PORT_IRQ_RAW_MASK
= (0x7ff << PORT_IRQ_RAW_SHIFT
),
177 /* ENABLE_SET/CLR specific, intr steering - 2 bit field */
178 PORT_IRQ_STEER_SHIFT
= 30,
179 PORT_IRQ_STEER_MASK
= (3 << PORT_IRQ_STEER_SHIFT
),
181 /* PORT_CMD_ERR constants */
182 PORT_CERR_DEV
= 1, /* Error bit in D2H Register FIS */
183 PORT_CERR_SDB
= 2, /* Error bit in SDB FIS */
184 PORT_CERR_DATA
= 3, /* Error in data FIS not detected by dev */
185 PORT_CERR_SEND
= 4, /* Initial cmd FIS transmission failure */
186 PORT_CERR_INCONSISTENT
= 5, /* Protocol mismatch */
187 PORT_CERR_DIRECTION
= 6, /* Data direction mismatch */
188 PORT_CERR_UNDERRUN
= 7, /* Ran out of SGEs while writing */
189 PORT_CERR_OVERRUN
= 8, /* Ran out of SGEs while reading */
190 PORT_CERR_PKT_PROT
= 11, /* DIR invalid in 1st PIO setup of ATAPI */
191 PORT_CERR_SGT_BOUNDARY
= 16, /* PLD ecode 00 - SGT not on qword boundary */
192 PORT_CERR_SGT_TGTABRT
= 17, /* PLD ecode 01 - target abort */
193 PORT_CERR_SGT_MSTABRT
= 18, /* PLD ecode 10 - master abort */
194 PORT_CERR_SGT_PCIPERR
= 19, /* PLD ecode 11 - PCI parity err while fetching SGT */
195 PORT_CERR_CMD_BOUNDARY
= 24, /* ctrl[15:13] 001 - PRB not on qword boundary */
196 PORT_CERR_CMD_TGTABRT
= 25, /* ctrl[15:13] 010 - target abort */
197 PORT_CERR_CMD_MSTABRT
= 26, /* ctrl[15:13] 100 - master abort */
198 PORT_CERR_CMD_PCIPERR
= 27, /* ctrl[15:13] 110 - PCI parity err while fetching PRB */
199 PORT_CERR_XFR_UNDEF
= 32, /* PSD ecode 00 - undefined */
200 PORT_CERR_XFR_TGTABRT
= 33, /* PSD ecode 01 - target abort */
201 PORT_CERR_XFR_MSTABRT
= 34, /* PSD ecode 10 - master abort */
202 PORT_CERR_XFR_PCIPERR
= 35, /* PSD ecode 11 - PCI prity err during transfer */
203 PORT_CERR_SENDSERVICE
= 36, /* FIS received while sending service */
205 /* bits of PRB control field */
206 PRB_CTRL_PROTOCOL
= (1 << 0), /* override def. ATA protocol */
207 PRB_CTRL_PACKET_READ
= (1 << 4), /* PACKET cmd read */
208 PRB_CTRL_PACKET_WRITE
= (1 << 5), /* PACKET cmd write */
209 PRB_CTRL_NIEN
= (1 << 6), /* Mask completion irq */
210 PRB_CTRL_SRST
= (1 << 7), /* Soft reset request (ign BSY?) */
212 /* PRB protocol field */
213 PRB_PROT_PACKET
= (1 << 0),
214 PRB_PROT_TCQ
= (1 << 1),
215 PRB_PROT_NCQ
= (1 << 2),
216 PRB_PROT_READ
= (1 << 3),
217 PRB_PROT_WRITE
= (1 << 4),
218 PRB_PROT_TRANSPARENT
= (1 << 5),
223 SGE_TRM
= (1 << 31), /* Last SGE in chain */
224 SGE_LNK
= (1 << 30), /* linked list
225 Points to SGT, not SGE */
226 SGE_DRD
= (1 << 29), /* discard data read (/dev/null)
227 data address ignored */
237 SIL24_COMMON_FLAGS
= ATA_FLAG_SATA
| ATA_FLAG_PIO_DMA
|
238 ATA_FLAG_NCQ
| ATA_FLAG_ACPI_SATA
|
239 ATA_FLAG_AN
| ATA_FLAG_PMP
,
240 SIL24_FLAG_PCIX_IRQ_WOC
= (1 << 24), /* IRQ loss errata on PCI-X */
242 IRQ_STAT_4PORTS
= 0xf,
245 struct sil24_ata_block
{
246 struct sil24_prb prb
;
247 struct sil24_sge sge
[SIL24_MAX_SGE
];
250 struct sil24_atapi_block
{
251 struct sil24_prb prb
;
253 struct sil24_sge sge
[SIL24_MAX_SGE
];
256 union sil24_cmd_block
{
257 struct sil24_ata_block ata
;
258 struct sil24_atapi_block atapi
;
261 static const struct sil24_cerr_info
{
262 unsigned int err_mask
, action
;
264 } sil24_cerr_db
[] = {
265 [0] = { AC_ERR_DEV
, 0,
267 [PORT_CERR_DEV
] = { AC_ERR_DEV
, 0,
268 "device error via D2H FIS" },
269 [PORT_CERR_SDB
] = { AC_ERR_DEV
, 0,
270 "device error via SDB FIS" },
271 [PORT_CERR_DATA
] = { AC_ERR_ATA_BUS
, ATA_EH_RESET
,
272 "error in data FIS" },
273 [PORT_CERR_SEND
] = { AC_ERR_ATA_BUS
, ATA_EH_RESET
,
274 "failed to transmit command FIS" },
275 [PORT_CERR_INCONSISTENT
] = { AC_ERR_HSM
, ATA_EH_RESET
,
276 "protocol mismatch" },
277 [PORT_CERR_DIRECTION
] = { AC_ERR_HSM
, ATA_EH_RESET
,
278 "data direction mismatch" },
279 [PORT_CERR_UNDERRUN
] = { AC_ERR_HSM
, ATA_EH_RESET
,
280 "ran out of SGEs while writing" },
281 [PORT_CERR_OVERRUN
] = { AC_ERR_HSM
, ATA_EH_RESET
,
282 "ran out of SGEs while reading" },
283 [PORT_CERR_PKT_PROT
] = { AC_ERR_HSM
, ATA_EH_RESET
,
284 "invalid data direction for ATAPI CDB" },
285 [PORT_CERR_SGT_BOUNDARY
] = { AC_ERR_SYSTEM
, ATA_EH_RESET
,
286 "SGT not on qword boundary" },
287 [PORT_CERR_SGT_TGTABRT
] = { AC_ERR_HOST_BUS
, ATA_EH_RESET
,
288 "PCI target abort while fetching SGT" },
289 [PORT_CERR_SGT_MSTABRT
] = { AC_ERR_HOST_BUS
, ATA_EH_RESET
,
290 "PCI master abort while fetching SGT" },
291 [PORT_CERR_SGT_PCIPERR
] = { AC_ERR_HOST_BUS
, ATA_EH_RESET
,
292 "PCI parity error while fetching SGT" },
293 [PORT_CERR_CMD_BOUNDARY
] = { AC_ERR_SYSTEM
, ATA_EH_RESET
,
294 "PRB not on qword boundary" },
295 [PORT_CERR_CMD_TGTABRT
] = { AC_ERR_HOST_BUS
, ATA_EH_RESET
,
296 "PCI target abort while fetching PRB" },
297 [PORT_CERR_CMD_MSTABRT
] = { AC_ERR_HOST_BUS
, ATA_EH_RESET
,
298 "PCI master abort while fetching PRB" },
299 [PORT_CERR_CMD_PCIPERR
] = { AC_ERR_HOST_BUS
, ATA_EH_RESET
,
300 "PCI parity error while fetching PRB" },
301 [PORT_CERR_XFR_UNDEF
] = { AC_ERR_HOST_BUS
, ATA_EH_RESET
,
302 "undefined error while transferring data" },
303 [PORT_CERR_XFR_TGTABRT
] = { AC_ERR_HOST_BUS
, ATA_EH_RESET
,
304 "PCI target abort while transferring data" },
305 [PORT_CERR_XFR_MSTABRT
] = { AC_ERR_HOST_BUS
, ATA_EH_RESET
,
306 "PCI master abort while transferring data" },
307 [PORT_CERR_XFR_PCIPERR
] = { AC_ERR_HOST_BUS
, ATA_EH_RESET
,
308 "PCI parity error while transferring data" },
309 [PORT_CERR_SENDSERVICE
] = { AC_ERR_HSM
, ATA_EH_RESET
,
310 "FIS received while sending service FIS" },
316 * The preview driver always returned 0 for status. We emulate it
317 * here from the previous interrupt.
319 struct sil24_port_priv
{
320 union sil24_cmd_block
*cmd_block
; /* 32 cmd blocks */
321 dma_addr_t cmd_block_dma
; /* DMA base addr for them */
325 static void sil24_dev_config(struct ata_device
*dev
);
326 static int sil24_scr_read(struct ata_link
*link
, unsigned sc_reg
, u32
*val
);
327 static int sil24_scr_write(struct ata_link
*link
, unsigned sc_reg
, u32 val
);
328 static int sil24_qc_defer(struct ata_queued_cmd
*qc
);
329 static enum ata_completion_errors
sil24_qc_prep(struct ata_queued_cmd
*qc
);
330 static unsigned int sil24_qc_issue(struct ata_queued_cmd
*qc
);
331 static void sil24_qc_fill_rtf(struct ata_queued_cmd
*qc
);
332 static void sil24_pmp_attach(struct ata_port
*ap
);
333 static void sil24_pmp_detach(struct ata_port
*ap
);
334 static void sil24_freeze(struct ata_port
*ap
);
335 static void sil24_thaw(struct ata_port
*ap
);
336 static int sil24_softreset(struct ata_link
*link
, unsigned int *class,
337 unsigned long deadline
);
338 static int sil24_hardreset(struct ata_link
*link
, unsigned int *class,
339 unsigned long deadline
);
340 static int sil24_pmp_hardreset(struct ata_link
*link
, unsigned int *class,
341 unsigned long deadline
);
342 static void sil24_error_handler(struct ata_port
*ap
);
343 static void sil24_post_internal_cmd(struct ata_queued_cmd
*qc
);
344 static int sil24_port_start(struct ata_port
*ap
);
345 static int sil24_init_one(struct pci_dev
*pdev
, const struct pci_device_id
*ent
);
346 #ifdef CONFIG_PM_SLEEP
347 static int sil24_pci_device_resume(struct pci_dev
*pdev
);
350 static int sil24_port_resume(struct ata_port
*ap
);
353 static const struct pci_device_id sil24_pci_tbl
[] = {
354 { PCI_VDEVICE(CMD
, 0x3124), BID_SIL3124
},
355 { PCI_VDEVICE(INTEL
, 0x3124), BID_SIL3124
},
356 { PCI_VDEVICE(CMD
, 0x3132), BID_SIL3132
},
357 { PCI_VDEVICE(CMD
, 0x0242), BID_SIL3132
},
358 { PCI_VDEVICE(CMD
, 0x0244), BID_SIL3132
},
359 { PCI_VDEVICE(CMD
, 0x3131), BID_SIL3131
},
360 { PCI_VDEVICE(CMD
, 0x3531), BID_SIL3131
},
362 { } /* terminate list */
365 static struct pci_driver sil24_pci_driver
= {
367 .id_table
= sil24_pci_tbl
,
368 .probe
= sil24_init_one
,
369 .remove
= ata_pci_remove_one
,
370 #ifdef CONFIG_PM_SLEEP
371 .suspend
= ata_pci_device_suspend
,
372 .resume
= sil24_pci_device_resume
,
376 static const struct scsi_host_template sil24_sht
= {
377 __ATA_BASE_SHT(DRV_NAME
),
378 .can_queue
= SIL24_MAX_CMDS
,
379 .sg_tablesize
= SIL24_MAX_SGE
,
380 .dma_boundary
= ATA_DMA_BOUNDARY
,
381 .tag_alloc_policy
= BLK_TAG_ALLOC_FIFO
,
382 .sdev_groups
= ata_ncq_sdev_groups
,
383 .change_queue_depth
= ata_scsi_change_queue_depth
,
384 .device_configure
= ata_scsi_device_configure
387 static struct ata_port_operations sil24_ops
= {
388 .inherits
= &sata_pmp_port_ops
,
390 .qc_defer
= sil24_qc_defer
,
391 .qc_prep
= sil24_qc_prep
,
392 .qc_issue
= sil24_qc_issue
,
393 .qc_fill_rtf
= sil24_qc_fill_rtf
,
395 .freeze
= sil24_freeze
,
397 .softreset
= sil24_softreset
,
398 .hardreset
= sil24_hardreset
,
399 .pmp_softreset
= sil24_softreset
,
400 .pmp_hardreset
= sil24_pmp_hardreset
,
401 .error_handler
= sil24_error_handler
,
402 .post_internal_cmd
= sil24_post_internal_cmd
,
403 .dev_config
= sil24_dev_config
,
405 .scr_read
= sil24_scr_read
,
406 .scr_write
= sil24_scr_write
,
407 .pmp_attach
= sil24_pmp_attach
,
408 .pmp_detach
= sil24_pmp_detach
,
410 .port_start
= sil24_port_start
,
412 .port_resume
= sil24_port_resume
,
416 static bool sata_sil24_msi
; /* Disable MSI */
417 module_param_named(msi
, sata_sil24_msi
, bool, S_IRUGO
);
418 MODULE_PARM_DESC(msi
, "Enable MSI (Default: false)");
421 * Use bits 30-31 of port_flags to encode available port numbers.
422 * Current maxium is 4.
424 #define SIL24_NPORTS2FLAG(nports) ((((unsigned)(nports) - 1) & 0x3) << 30)
425 #define SIL24_FLAG2NPORTS(flag) ((((flag) >> 30) & 0x3) + 1)
427 static const struct ata_port_info sil24_port_info
[] = {
430 .flags
= SIL24_COMMON_FLAGS
| SIL24_NPORTS2FLAG(4) |
431 SIL24_FLAG_PCIX_IRQ_WOC
,
432 .pio_mask
= ATA_PIO4
,
433 .mwdma_mask
= ATA_MWDMA2
,
434 .udma_mask
= ATA_UDMA5
,
435 .port_ops
= &sil24_ops
,
439 .flags
= SIL24_COMMON_FLAGS
| SIL24_NPORTS2FLAG(2),
440 .pio_mask
= ATA_PIO4
,
441 .mwdma_mask
= ATA_MWDMA2
,
442 .udma_mask
= ATA_UDMA5
,
443 .port_ops
= &sil24_ops
,
445 /* sil_3131/sil_3531 */
447 .flags
= SIL24_COMMON_FLAGS
| SIL24_NPORTS2FLAG(1),
448 .pio_mask
= ATA_PIO4
,
449 .mwdma_mask
= ATA_MWDMA2
,
450 .udma_mask
= ATA_UDMA5
,
451 .port_ops
= &sil24_ops
,
455 static int sil24_tag(int tag
)
457 if (unlikely(ata_tag_internal(tag
)))
462 static unsigned long sil24_port_offset(struct ata_port
*ap
)
464 return ap
->port_no
* PORT_REGS_SIZE
;
467 static void __iomem
*sil24_port_base(struct ata_port
*ap
)
469 return ap
->host
->iomap
[SIL24_PORT_BAR
] + sil24_port_offset(ap
);
472 static void sil24_dev_config(struct ata_device
*dev
)
474 void __iomem
*port
= sil24_port_base(dev
->link
->ap
);
476 if (dev
->cdb_len
== 16)
477 writel(PORT_CS_CDB16
, port
+ PORT_CTRL_STAT
);
479 writel(PORT_CS_CDB16
, port
+ PORT_CTRL_CLR
);
482 static void sil24_read_tf(struct ata_port
*ap
, int tag
, struct ata_taskfile
*tf
)
484 void __iomem
*port
= sil24_port_base(ap
);
485 struct sil24_prb __iomem
*prb
;
488 prb
= port
+ PORT_LRAM
+ sil24_tag(tag
) * PORT_LRAM_SLOT_SZ
;
489 memcpy_fromio(fis
, prb
->fis
, sizeof(fis
));
490 ata_tf_from_fis(fis
, tf
);
493 static int sil24_scr_map
[] = {
500 static int sil24_scr_read(struct ata_link
*link
, unsigned sc_reg
, u32
*val
)
502 void __iomem
*scr_addr
= sil24_port_base(link
->ap
) + PORT_SCONTROL
;
504 if (sc_reg
< ARRAY_SIZE(sil24_scr_map
)) {
505 *val
= readl(scr_addr
+ sil24_scr_map
[sc_reg
] * 4);
511 static int sil24_scr_write(struct ata_link
*link
, unsigned sc_reg
, u32 val
)
513 void __iomem
*scr_addr
= sil24_port_base(link
->ap
) + PORT_SCONTROL
;
515 if (sc_reg
< ARRAY_SIZE(sil24_scr_map
)) {
516 writel(val
, scr_addr
+ sil24_scr_map
[sc_reg
] * 4);
522 static void sil24_config_port(struct ata_port
*ap
)
524 void __iomem
*port
= sil24_port_base(ap
);
526 /* configure IRQ WoC */
527 if (ap
->flags
& SIL24_FLAG_PCIX_IRQ_WOC
)
528 writel(PORT_CS_IRQ_WOC
, port
+ PORT_CTRL_STAT
);
530 writel(PORT_CS_IRQ_WOC
, port
+ PORT_CTRL_CLR
);
532 /* zero error counters. */
533 writew(0x8000, port
+ PORT_DECODE_ERR_THRESH
);
534 writew(0x8000, port
+ PORT_CRC_ERR_THRESH
);
535 writew(0x8000, port
+ PORT_HSHK_ERR_THRESH
);
536 writew(0x0000, port
+ PORT_DECODE_ERR_CNT
);
537 writew(0x0000, port
+ PORT_CRC_ERR_CNT
);
538 writew(0x0000, port
+ PORT_HSHK_ERR_CNT
);
540 /* always use 64bit activation */
541 writel(PORT_CS_32BIT_ACTV
, port
+ PORT_CTRL_CLR
);
543 /* clear port multiplier enable and resume bits */
544 writel(PORT_CS_PMP_EN
| PORT_CS_PMP_RESUME
, port
+ PORT_CTRL_CLR
);
547 static void sil24_config_pmp(struct ata_port
*ap
, int attached
)
549 void __iomem
*port
= sil24_port_base(ap
);
552 writel(PORT_CS_PMP_EN
, port
+ PORT_CTRL_STAT
);
554 writel(PORT_CS_PMP_EN
, port
+ PORT_CTRL_CLR
);
557 static void sil24_clear_pmp(struct ata_port
*ap
)
559 void __iomem
*port
= sil24_port_base(ap
);
562 writel(PORT_CS_PMP_RESUME
, port
+ PORT_CTRL_CLR
);
564 for (i
= 0; i
< SATA_PMP_MAX_PORTS
; i
++) {
565 void __iomem
*pmp_base
= port
+ PORT_PMP
+ i
* PORT_PMP_SIZE
;
567 writel(0, pmp_base
+ PORT_PMP_STATUS
);
568 writel(0, pmp_base
+ PORT_PMP_QACTIVE
);
572 static int sil24_init_port(struct ata_port
*ap
)
574 void __iomem
*port
= sil24_port_base(ap
);
575 struct sil24_port_priv
*pp
= ap
->private_data
;
578 /* clear PMP error status */
579 if (sata_pmp_attached(ap
))
582 writel(PORT_CS_INIT
, port
+ PORT_CTRL_STAT
);
583 ata_wait_register(ap
, port
+ PORT_CTRL_STAT
,
584 PORT_CS_INIT
, PORT_CS_INIT
, 10, 100);
585 tmp
= ata_wait_register(ap
, port
+ PORT_CTRL_STAT
,
586 PORT_CS_RDY
, 0, 10, 100);
588 if ((tmp
& (PORT_CS_INIT
| PORT_CS_RDY
)) != PORT_CS_RDY
) {
590 ap
->link
.eh_context
.i
.action
|= ATA_EH_RESET
;
597 static int sil24_exec_polled_cmd(struct ata_port
*ap
, int pmp
,
598 const struct ata_taskfile
*tf
,
599 int is_cmd
, u32 ctrl
,
600 unsigned int timeout_msec
)
602 void __iomem
*port
= sil24_port_base(ap
);
603 struct sil24_port_priv
*pp
= ap
->private_data
;
604 struct sil24_prb
*prb
= &pp
->cmd_block
[0].ata
.prb
;
605 dma_addr_t paddr
= pp
->cmd_block_dma
;
606 u32 irq_enabled
, irq_mask
, irq_stat
;
609 prb
->ctrl
= cpu_to_le16(ctrl
);
610 ata_tf_to_fis(tf
, pmp
, is_cmd
, prb
->fis
);
612 /* temporarily plug completion and error interrupts */
613 irq_enabled
= readl(port
+ PORT_IRQ_ENABLE_SET
);
614 writel(PORT_IRQ_COMPLETE
| PORT_IRQ_ERROR
, port
+ PORT_IRQ_ENABLE_CLR
);
617 * The barrier is required to ensure that writes to cmd_block reach
618 * the memory before the write to PORT_CMD_ACTIVATE.
621 writel((u32
)paddr
, port
+ PORT_CMD_ACTIVATE
);
622 writel((u64
)paddr
>> 32, port
+ PORT_CMD_ACTIVATE
+ 4);
624 irq_mask
= (PORT_IRQ_COMPLETE
| PORT_IRQ_ERROR
) << PORT_IRQ_RAW_SHIFT
;
625 irq_stat
= ata_wait_register(ap
, port
+ PORT_IRQ_STAT
, irq_mask
, 0x0,
628 writel(irq_mask
, port
+ PORT_IRQ_STAT
); /* clear IRQs */
629 irq_stat
>>= PORT_IRQ_RAW_SHIFT
;
631 if (irq_stat
& PORT_IRQ_COMPLETE
)
634 /* force port into known state */
637 if (irq_stat
& PORT_IRQ_ERROR
)
643 /* restore IRQ enabled */
644 writel(irq_enabled
, port
+ PORT_IRQ_ENABLE_SET
);
649 static int sil24_softreset(struct ata_link
*link
, unsigned int *class,
650 unsigned long deadline
)
652 struct ata_port
*ap
= link
->ap
;
653 int pmp
= sata_srst_pmp(link
);
654 unsigned int timeout_msec
= 0;
655 struct ata_taskfile tf
;
659 /* put the port into known state */
660 if (sil24_init_port(ap
)) {
661 reason
= "port not ready";
666 if (time_after(deadline
, jiffies
))
667 timeout_msec
= jiffies_to_msecs(deadline
- jiffies
);
669 ata_tf_init(link
->device
, &tf
); /* doesn't really matter */
670 rc
= sil24_exec_polled_cmd(ap
, pmp
, &tf
, 0, PRB_CTRL_SRST
,
676 reason
= "SRST command error";
680 sil24_read_tf(ap
, 0, &tf
);
681 *class = ata_port_classify(ap
, &tf
);
686 ata_link_err(link
, "softreset failed (%s)\n", reason
);
690 static int sil24_hardreset(struct ata_link
*link
, unsigned int *class,
691 unsigned long deadline
)
693 struct ata_port
*ap
= link
->ap
;
694 void __iomem
*port
= sil24_port_base(ap
);
695 struct sil24_port_priv
*pp
= ap
->private_data
;
696 int did_port_rst
= 0;
702 /* Sometimes, DEV_RST is not enough to recover the controller.
703 * This happens often after PM DMA CS errata.
705 if (pp
->do_port_rst
) {
707 "controller in dubious state, performing PORT_RST\n");
709 writel(PORT_CS_PORT_RST
, port
+ PORT_CTRL_STAT
);
711 writel(PORT_CS_PORT_RST
, port
+ PORT_CTRL_CLR
);
712 ata_wait_register(ap
, port
+ PORT_CTRL_STAT
, PORT_CS_RDY
, 0,
715 /* restore port configuration */
716 sil24_config_port(ap
);
717 sil24_config_pmp(ap
, ap
->nr_pmp_links
);
723 /* sil24 does the right thing(tm) without any protection */
727 if (ata_link_online(link
))
730 writel(PORT_CS_DEV_RST
, port
+ PORT_CTRL_STAT
);
731 tmp
= ata_wait_register(ap
, port
+ PORT_CTRL_STAT
,
732 PORT_CS_DEV_RST
, PORT_CS_DEV_RST
, 10,
735 /* SStatus oscillates between zero and valid status after
736 * DEV_RST, debounce it.
738 rc
= sata_link_debounce(link
, sata_deb_timing_long
, deadline
);
740 reason
= "PHY debouncing failed";
744 if (tmp
& PORT_CS_DEV_RST
) {
745 if (ata_link_offline(link
))
747 reason
= "link not ready";
751 /* Sil24 doesn't store signature FIS after hardreset, so we
752 * can't wait for BSY to clear. Some devices take a long time
753 * to get ready and those devices will choke if we don't wait
754 * for BSY clearance here. Tell libata to perform follow-up
765 ata_link_err(link
, "hardreset failed (%s)\n", reason
);
769 static inline void sil24_fill_sg(struct ata_queued_cmd
*qc
,
770 struct sil24_sge
*sge
)
772 struct scatterlist
*sg
;
773 struct sil24_sge
*last_sge
= NULL
;
776 for_each_sg(qc
->sg
, sg
, qc
->n_elem
, si
) {
777 sge
->addr
= cpu_to_le64(sg_dma_address(sg
));
778 sge
->cnt
= cpu_to_le32(sg_dma_len(sg
));
785 last_sge
->flags
= cpu_to_le32(SGE_TRM
);
788 static int sil24_qc_defer(struct ata_queued_cmd
*qc
)
790 struct ata_link
*link
= qc
->dev
->link
;
791 struct ata_port
*ap
= link
->ap
;
792 u8 prot
= qc
->tf
.protocol
;
795 * There is a bug in the chip:
796 * Port LRAM Causes the PRB/SGT Data to be Corrupted
797 * If the host issues a read request for LRAM and SActive registers
798 * while active commands are available in the port, PRB/SGT data in
799 * the LRAM can become corrupted. This issue applies only when
800 * reading from, but not writing to, the LRAM.
802 * Therefore, reading LRAM when there is no particular error [and
803 * other commands may be outstanding] is prohibited.
805 * To avoid this bug there are two situations where a command must run
806 * exclusive of any other commands on the port:
808 * - ATAPI commands which check the sense data
809 * - Passthrough ATA commands which always have ATA_QCFLAG_RESULT_TF
813 int is_excl
= (ata_is_atapi(prot
) ||
814 (qc
->flags
& ATA_QCFLAG_RESULT_TF
));
816 if (unlikely(ap
->excl_link
)) {
817 if (link
== ap
->excl_link
) {
818 if (ap
->nr_active_links
)
819 return ATA_DEFER_PORT
;
820 qc
->flags
|= ATA_QCFLAG_CLEAR_EXCL
;
822 return ATA_DEFER_PORT
;
823 } else if (unlikely(is_excl
)) {
824 ap
->excl_link
= link
;
825 if (ap
->nr_active_links
)
826 return ATA_DEFER_PORT
;
827 qc
->flags
|= ATA_QCFLAG_CLEAR_EXCL
;
830 return ata_std_qc_defer(qc
);
833 static enum ata_completion_errors
sil24_qc_prep(struct ata_queued_cmd
*qc
)
835 struct ata_port
*ap
= qc
->ap
;
836 struct sil24_port_priv
*pp
= ap
->private_data
;
837 union sil24_cmd_block
*cb
;
838 struct sil24_prb
*prb
;
839 struct sil24_sge
*sge
;
842 cb
= &pp
->cmd_block
[sil24_tag(qc
->hw_tag
)];
844 if (!ata_is_atapi(qc
->tf
.protocol
)) {
847 if (ata_is_data(qc
->tf
.protocol
)) {
849 ctrl
= PRB_CTRL_PROTOCOL
;
850 if (ata_is_ncq(qc
->tf
.protocol
))
851 prot
|= PRB_PROT_NCQ
;
852 if (qc
->tf
.flags
& ATA_TFLAG_WRITE
)
853 prot
|= PRB_PROT_WRITE
;
855 prot
|= PRB_PROT_READ
;
856 prb
->prot
= cpu_to_le16(prot
);
859 prb
= &cb
->atapi
.prb
;
861 memset(cb
->atapi
.cdb
, 0, sizeof(cb
->atapi
.cdb
));
862 memcpy(cb
->atapi
.cdb
, qc
->cdb
, qc
->dev
->cdb_len
);
864 if (ata_is_data(qc
->tf
.protocol
)) {
865 if (qc
->tf
.flags
& ATA_TFLAG_WRITE
)
866 ctrl
= PRB_CTRL_PACKET_WRITE
;
868 ctrl
= PRB_CTRL_PACKET_READ
;
872 prb
->ctrl
= cpu_to_le16(ctrl
);
873 ata_tf_to_fis(&qc
->tf
, qc
->dev
->link
->pmp
, 1, prb
->fis
);
875 if (qc
->flags
& ATA_QCFLAG_DMAMAP
)
876 sil24_fill_sg(qc
, sge
);
881 static unsigned int sil24_qc_issue(struct ata_queued_cmd
*qc
)
883 struct ata_port
*ap
= qc
->ap
;
884 struct sil24_port_priv
*pp
= ap
->private_data
;
885 void __iomem
*port
= sil24_port_base(ap
);
886 unsigned int tag
= sil24_tag(qc
->hw_tag
);
888 void __iomem
*activate
;
890 paddr
= pp
->cmd_block_dma
+ tag
* sizeof(*pp
->cmd_block
);
891 activate
= port
+ PORT_CMD_ACTIVATE
+ tag
* 8;
894 * The barrier is required to ensure that writes to cmd_block reach
895 * the memory before the write to PORT_CMD_ACTIVATE.
898 writel((u32
)paddr
, activate
);
899 writel((u64
)paddr
>> 32, activate
+ 4);
904 static void sil24_qc_fill_rtf(struct ata_queued_cmd
*qc
)
906 sil24_read_tf(qc
->ap
, qc
->hw_tag
, &qc
->result_tf
);
909 static void sil24_pmp_attach(struct ata_port
*ap
)
911 u32
*gscr
= ap
->link
.device
->gscr
;
913 sil24_config_pmp(ap
, 1);
916 if (sata_pmp_gscr_vendor(gscr
) == 0x11ab &&
917 sata_pmp_gscr_devid(gscr
) == 0x4140) {
919 "disabling NCQ support due to sil24-mv4140 quirk\n");
920 ap
->flags
&= ~ATA_FLAG_NCQ
;
924 static void sil24_pmp_detach(struct ata_port
*ap
)
927 sil24_config_pmp(ap
, 0);
929 ap
->flags
|= ATA_FLAG_NCQ
;
932 static int sil24_pmp_hardreset(struct ata_link
*link
, unsigned int *class,
933 unsigned long deadline
)
937 rc
= sil24_init_port(link
->ap
);
939 ata_link_err(link
, "hardreset failed (port not ready)\n");
943 return sata_std_hardreset(link
, class, deadline
);
946 static void sil24_freeze(struct ata_port
*ap
)
948 void __iomem
*port
= sil24_port_base(ap
);
950 /* Port-wide IRQ mask in HOST_CTRL doesn't really work, clear
951 * PORT_IRQ_ENABLE instead.
953 writel(0xffff, port
+ PORT_IRQ_ENABLE_CLR
);
956 static void sil24_thaw(struct ata_port
*ap
)
958 void __iomem
*port
= sil24_port_base(ap
);
962 tmp
= readl(port
+ PORT_IRQ_STAT
);
963 writel(tmp
, port
+ PORT_IRQ_STAT
);
965 /* turn IRQ back on */
966 writel(DEF_PORT_IRQ
, port
+ PORT_IRQ_ENABLE_SET
);
969 static void sil24_error_intr(struct ata_port
*ap
)
971 void __iomem
*port
= sil24_port_base(ap
);
972 struct sil24_port_priv
*pp
= ap
->private_data
;
973 struct ata_queued_cmd
*qc
= NULL
;
974 struct ata_link
*link
;
975 struct ata_eh_info
*ehi
;
976 int abort
= 0, freeze
= 0;
979 /* on error, we need to clear IRQ explicitly */
980 irq_stat
= readl(port
+ PORT_IRQ_STAT
);
981 writel(irq_stat
, port
+ PORT_IRQ_STAT
);
983 /* first, analyze and record host port events */
985 ehi
= &link
->eh_info
;
986 ata_ehi_clear_desc(ehi
);
988 ata_ehi_push_desc(ehi
, "irq_stat 0x%08x", irq_stat
);
990 if (irq_stat
& PORT_IRQ_SDB_NOTIFY
) {
991 ata_ehi_push_desc(ehi
, "SDB notify");
992 sata_async_notification(ap
);
995 if (irq_stat
& (PORT_IRQ_PHYRDY_CHG
| PORT_IRQ_DEV_XCHG
)) {
996 ata_ehi_hotplugged(ehi
);
997 ata_ehi_push_desc(ehi
, "%s",
998 irq_stat
& PORT_IRQ_PHYRDY_CHG
?
999 "PHY RDY changed" : "device exchanged");
1003 if (irq_stat
& PORT_IRQ_UNK_FIS
) {
1004 ehi
->err_mask
|= AC_ERR_HSM
;
1005 ehi
->action
|= ATA_EH_RESET
;
1006 ata_ehi_push_desc(ehi
, "unknown FIS");
1010 /* deal with command error */
1011 if (irq_stat
& PORT_IRQ_ERROR
) {
1012 const struct sil24_cerr_info
*ci
= NULL
;
1013 unsigned int err_mask
= 0, action
= 0;
1019 /* DMA Context Switch Failure in Port Multiplier Mode
1020 * errata. If we have active commands to 3 or more
1021 * devices, any error condition on active devices can
1022 * corrupt DMA context switching.
1024 if (ap
->nr_active_links
>= 3) {
1025 ehi
->err_mask
|= AC_ERR_OTHER
;
1026 ehi
->action
|= ATA_EH_RESET
;
1027 ata_ehi_push_desc(ehi
, "PMP DMA CS errata");
1028 pp
->do_port_rst
= 1;
1032 /* find out the offending link and qc */
1033 if (sata_pmp_attached(ap
)) {
1034 context
= readl(port
+ PORT_CONTEXT
);
1035 pmp
= (context
>> 5) & 0xf;
1037 if (pmp
< ap
->nr_pmp_links
) {
1038 link
= &ap
->pmp_link
[pmp
];
1039 ehi
= &link
->eh_info
;
1040 qc
= ata_qc_from_tag(ap
, link
->active_tag
);
1042 ata_ehi_clear_desc(ehi
);
1043 ata_ehi_push_desc(ehi
, "irq_stat 0x%08x",
1046 err_mask
|= AC_ERR_HSM
;
1047 action
|= ATA_EH_RESET
;
1051 qc
= ata_qc_from_tag(ap
, link
->active_tag
);
1053 /* analyze CMD_ERR */
1054 cerr
= readl(port
+ PORT_CMD_ERR
);
1055 if (cerr
< ARRAY_SIZE(sil24_cerr_db
))
1056 ci
= &sil24_cerr_db
[cerr
];
1058 if (ci
&& ci
->desc
) {
1059 err_mask
|= ci
->err_mask
;
1060 action
|= ci
->action
;
1061 if (action
& ATA_EH_RESET
)
1063 ata_ehi_push_desc(ehi
, "%s", ci
->desc
);
1065 err_mask
|= AC_ERR_OTHER
;
1066 action
|= ATA_EH_RESET
;
1068 ata_ehi_push_desc(ehi
, "unknown command error %d",
1072 /* record error info */
1074 qc
->err_mask
|= err_mask
;
1076 ehi
->err_mask
|= err_mask
;
1078 ehi
->action
|= action
;
1080 /* if PMP, resume */
1081 if (sata_pmp_attached(ap
))
1082 writel(PORT_CS_PMP_RESUME
, port
+ PORT_CTRL_STAT
);
1085 /* freeze or abort */
1087 ata_port_freeze(ap
);
1090 ata_link_abort(qc
->dev
->link
);
1096 static inline void sil24_host_intr(struct ata_port
*ap
)
1098 void __iomem
*port
= sil24_port_base(ap
);
1099 u32 slot_stat
, qc_active
;
1102 /* If PCIX_IRQ_WOC, there's an inherent race window between
1103 * clearing IRQ pending status and reading PORT_SLOT_STAT
1104 * which may cause spurious interrupts afterwards. This is
1105 * unavoidable and much better than losing interrupts which
1106 * happens if IRQ pending is cleared after reading
1109 if (ap
->flags
& SIL24_FLAG_PCIX_IRQ_WOC
)
1110 writel(PORT_IRQ_COMPLETE
, port
+ PORT_IRQ_STAT
);
1112 slot_stat
= readl(port
+ PORT_SLOT_STAT
);
1114 if (unlikely(slot_stat
& HOST_SSTAT_ATTN
)) {
1115 sil24_error_intr(ap
);
1119 qc_active
= slot_stat
& ~HOST_SSTAT_ATTN
;
1120 rc
= ata_qc_complete_multiple(ap
, qc_active
);
1124 struct ata_eh_info
*ehi
= &ap
->link
.eh_info
;
1125 ehi
->err_mask
|= AC_ERR_HSM
;
1126 ehi
->action
|= ATA_EH_RESET
;
1127 ata_port_freeze(ap
);
1131 /* spurious interrupts are expected if PCIX_IRQ_WOC */
1132 if (!(ap
->flags
& SIL24_FLAG_PCIX_IRQ_WOC
) && ata_ratelimit())
1134 "spurious interrupt (slot_stat 0x%x active_tag %d sactive 0x%x)\n",
1135 slot_stat
, ap
->link
.active_tag
, ap
->link
.sactive
);
1138 static irqreturn_t
sil24_interrupt(int irq
, void *dev_instance
)
1140 struct ata_host
*host
= dev_instance
;
1141 void __iomem
*host_base
= host
->iomap
[SIL24_HOST_BAR
];
1142 unsigned handled
= 0;
1146 status
= readl(host_base
+ HOST_IRQ_STAT
);
1148 if (status
== 0xffffffff) {
1149 dev_err(host
->dev
, "IRQ status == 0xffffffff, "
1150 "PCI fault or device removal?\n");
1154 if (!(status
& IRQ_STAT_4PORTS
))
1157 spin_lock(&host
->lock
);
1159 for (i
= 0; i
< host
->n_ports
; i
++)
1160 if (status
& (1 << i
)) {
1161 sil24_host_intr(host
->ports
[i
]);
1165 spin_unlock(&host
->lock
);
1167 return IRQ_RETVAL(handled
);
1170 static void sil24_error_handler(struct ata_port
*ap
)
1172 struct sil24_port_priv
*pp
= ap
->private_data
;
1174 if (sil24_init_port(ap
))
1175 ata_eh_freeze_port(ap
);
1177 sata_pmp_error_handler(ap
);
1179 pp
->do_port_rst
= 0;
1182 static void sil24_post_internal_cmd(struct ata_queued_cmd
*qc
)
1184 struct ata_port
*ap
= qc
->ap
;
1186 /* make DMA engine forget about the failed command */
1187 if ((qc
->flags
& ATA_QCFLAG_EH
) && sil24_init_port(ap
))
1188 ata_eh_freeze_port(ap
);
1191 static int sil24_port_start(struct ata_port
*ap
)
1193 struct device
*dev
= ap
->host
->dev
;
1194 struct sil24_port_priv
*pp
;
1195 union sil24_cmd_block
*cb
;
1196 size_t cb_size
= sizeof(*cb
) * SIL24_MAX_CMDS
;
1199 pp
= devm_kzalloc(dev
, sizeof(*pp
), GFP_KERNEL
);
1203 cb
= dmam_alloc_coherent(dev
, cb_size
, &cb_dma
, GFP_KERNEL
);
1208 pp
->cmd_block_dma
= cb_dma
;
1210 ap
->private_data
= pp
;
1212 ata_port_pbar_desc(ap
, SIL24_HOST_BAR
, -1, "host");
1213 ata_port_pbar_desc(ap
, SIL24_PORT_BAR
, sil24_port_offset(ap
), "port");
1218 static void sil24_init_controller(struct ata_host
*host
)
1220 void __iomem
*host_base
= host
->iomap
[SIL24_HOST_BAR
];
1225 writel(0, host_base
+ HOST_FLASH_CMD
);
1227 /* clear global reset & mask interrupts during initialization */
1228 writel(0, host_base
+ HOST_CTRL
);
1231 for (i
= 0; i
< host
->n_ports
; i
++) {
1232 struct ata_port
*ap
= host
->ports
[i
];
1233 void __iomem
*port
= sil24_port_base(ap
);
1236 /* Initial PHY setting */
1237 writel(0x20c, port
+ PORT_PHY_CFG
);
1239 /* Clear port RST */
1240 tmp
= readl(port
+ PORT_CTRL_STAT
);
1241 if (tmp
& PORT_CS_PORT_RST
) {
1242 writel(PORT_CS_PORT_RST
, port
+ PORT_CTRL_CLR
);
1243 tmp
= ata_wait_register(NULL
, port
+ PORT_CTRL_STAT
,
1245 PORT_CS_PORT_RST
, 10, 100);
1246 if (tmp
& PORT_CS_PORT_RST
)
1248 "failed to clear port RST\n");
1251 /* configure port */
1252 sil24_config_port(ap
);
1255 /* Turn on interrupts */
1256 writel(IRQ_STAT_4PORTS
, host_base
+ HOST_CTRL
);
1259 static int sil24_init_one(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
1261 extern int __MARKER__sil24_cmd_block_is_sized_wrongly
;
1262 struct ata_port_info pi
= sil24_port_info
[ent
->driver_data
];
1263 const struct ata_port_info
*ppi
[] = { &pi
, NULL
};
1264 void __iomem
* const *iomap
;
1265 struct ata_host
*host
;
1269 /* cause link error if sil24_cmd_block is sized wrongly */
1270 if (sizeof(union sil24_cmd_block
) != PAGE_SIZE
)
1271 __MARKER__sil24_cmd_block_is_sized_wrongly
= 1;
1273 ata_print_version_once(&pdev
->dev
, DRV_VERSION
);
1275 /* acquire resources */
1276 rc
= pcim_enable_device(pdev
);
1280 rc
= pcim_iomap_regions(pdev
,
1281 (1 << SIL24_HOST_BAR
) | (1 << SIL24_PORT_BAR
),
1285 iomap
= pcim_iomap_table(pdev
);
1287 /* apply workaround for completion IRQ loss on PCI-X errata */
1288 if (pi
.flags
& SIL24_FLAG_PCIX_IRQ_WOC
) {
1289 tmp
= readl(iomap
[SIL24_HOST_BAR
] + HOST_CTRL
);
1290 if (tmp
& (HOST_CTRL_TRDY
| HOST_CTRL_STOP
| HOST_CTRL_DEVSEL
))
1291 dev_info(&pdev
->dev
,
1292 "Applying completion IRQ loss on PCI-X errata fix\n");
1294 pi
.flags
&= ~SIL24_FLAG_PCIX_IRQ_WOC
;
1297 /* allocate and fill host */
1298 host
= ata_host_alloc_pinfo(&pdev
->dev
, ppi
,
1299 SIL24_FLAG2NPORTS(ppi
[0]->flags
));
1302 host
->iomap
= iomap
;
1304 /* configure and activate the device */
1305 rc
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64));
1307 dev_err(&pdev
->dev
, "DMA enable failed\n");
1311 /* Set max read request size to 4096. This slightly increases
1312 * write throughput for pci-e variants.
1314 pcie_set_readrq(pdev
, 4096);
1316 sil24_init_controller(host
);
1318 if (sata_sil24_msi
&& !pci_enable_msi(pdev
)) {
1319 dev_info(&pdev
->dev
, "Using MSI\n");
1323 pci_set_master(pdev
);
1324 return ata_host_activate(host
, pdev
->irq
, sil24_interrupt
, IRQF_SHARED
,
1328 #ifdef CONFIG_PM_SLEEP
1329 static int sil24_pci_device_resume(struct pci_dev
*pdev
)
1331 struct ata_host
*host
= pci_get_drvdata(pdev
);
1332 void __iomem
*host_base
= host
->iomap
[SIL24_HOST_BAR
];
1335 rc
= ata_pci_device_do_resume(pdev
);
1339 if (pdev
->dev
.power
.power_state
.event
== PM_EVENT_SUSPEND
)
1340 writel(HOST_CTRL_GLOBAL_RST
, host_base
+ HOST_CTRL
);
1342 sil24_init_controller(host
);
1344 ata_host_resume(host
);
1351 static int sil24_port_resume(struct ata_port
*ap
)
1353 sil24_config_pmp(ap
, ap
->nr_pmp_links
);
1358 module_pci_driver(sil24_pci_driver
);
1360 MODULE_AUTHOR("Tejun Heo");
1361 MODULE_DESCRIPTION("Silicon Image 3124/3132 SATA low-level driver");
1362 MODULE_LICENSE("GPL");
1363 MODULE_DEVICE_TABLE(pci
, sil24_pci_tbl
);