mt76: avoid rx reorder buffer overflow
[linux/fpc-iii.git] / drivers / ata / sata_nv.c
blobeb9dc14e5147aaebbc210c1481ce146a6d9c57a4
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * sata_nv.c - NVIDIA nForce SATA
5 * Copyright 2004 NVIDIA Corp. All rights reserved.
6 * Copyright 2004 Andrew Chew
8 * libata documentation is available via 'make {ps|pdf}docs',
9 * as Documentation/driver-api/libata.rst
11 * No hardware documentation available outside of NVIDIA.
12 * This driver programs the NVIDIA SATA controller in a similar
13 * fashion as with other PCI IDE BMDMA controllers, with a few
14 * NV-specific details such as register offsets, SATA phy location,
15 * hotplug info, etc.
17 * CK804/MCP04 controllers support an alternate programming interface
18 * similar to the ADMA specification (with some modifications).
19 * This allows the use of NCQ. Non-DMA-mapped ATA commands are still
20 * sent through the legacy interface.
23 #include <linux/kernel.h>
24 #include <linux/module.h>
25 #include <linux/gfp.h>
26 #include <linux/pci.h>
27 #include <linux/blkdev.h>
28 #include <linux/delay.h>
29 #include <linux/interrupt.h>
30 #include <linux/device.h>
31 #include <scsi/scsi_host.h>
32 #include <scsi/scsi_device.h>
33 #include <linux/libata.h>
35 #define DRV_NAME "sata_nv"
36 #define DRV_VERSION "3.5"
38 #define NV_ADMA_DMA_BOUNDARY 0xffffffffUL
40 enum {
41 NV_MMIO_BAR = 5,
43 NV_PORTS = 2,
44 NV_PIO_MASK = ATA_PIO4,
45 NV_MWDMA_MASK = ATA_MWDMA2,
46 NV_UDMA_MASK = ATA_UDMA6,
47 NV_PORT0_SCR_REG_OFFSET = 0x00,
48 NV_PORT1_SCR_REG_OFFSET = 0x40,
50 /* INT_STATUS/ENABLE */
51 NV_INT_STATUS = 0x10,
52 NV_INT_ENABLE = 0x11,
53 NV_INT_STATUS_CK804 = 0x440,
54 NV_INT_ENABLE_CK804 = 0x441,
56 /* INT_STATUS/ENABLE bits */
57 NV_INT_DEV = 0x01,
58 NV_INT_PM = 0x02,
59 NV_INT_ADDED = 0x04,
60 NV_INT_REMOVED = 0x08,
62 NV_INT_PORT_SHIFT = 4, /* each port occupies 4 bits */
64 NV_INT_ALL = 0x0f,
65 NV_INT_MASK = NV_INT_DEV |
66 NV_INT_ADDED | NV_INT_REMOVED,
68 /* INT_CONFIG */
69 NV_INT_CONFIG = 0x12,
70 NV_INT_CONFIG_METHD = 0x01, // 0 = INT, 1 = SMI
72 // For PCI config register 20
73 NV_MCP_SATA_CFG_20 = 0x50,
74 NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
75 NV_MCP_SATA_CFG_20_PORT0_EN = (1 << 17),
76 NV_MCP_SATA_CFG_20_PORT1_EN = (1 << 16),
77 NV_MCP_SATA_CFG_20_PORT0_PWB_EN = (1 << 14),
78 NV_MCP_SATA_CFG_20_PORT1_PWB_EN = (1 << 12),
80 NV_ADMA_MAX_CPBS = 32,
81 NV_ADMA_CPB_SZ = 128,
82 NV_ADMA_APRD_SZ = 16,
83 NV_ADMA_SGTBL_LEN = (1024 - NV_ADMA_CPB_SZ) /
84 NV_ADMA_APRD_SZ,
85 NV_ADMA_SGTBL_TOTAL_LEN = NV_ADMA_SGTBL_LEN + 5,
86 NV_ADMA_SGTBL_SZ = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
87 NV_ADMA_PORT_PRIV_DMA_SZ = NV_ADMA_MAX_CPBS *
88 (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
90 /* BAR5 offset to ADMA general registers */
91 NV_ADMA_GEN = 0x400,
92 NV_ADMA_GEN_CTL = 0x00,
93 NV_ADMA_NOTIFIER_CLEAR = 0x30,
95 /* BAR5 offset to ADMA ports */
96 NV_ADMA_PORT = 0x480,
98 /* size of ADMA port register space */
99 NV_ADMA_PORT_SIZE = 0x100,
101 /* ADMA port registers */
102 NV_ADMA_CTL = 0x40,
103 NV_ADMA_CPB_COUNT = 0x42,
104 NV_ADMA_NEXT_CPB_IDX = 0x43,
105 NV_ADMA_STAT = 0x44,
106 NV_ADMA_CPB_BASE_LOW = 0x48,
107 NV_ADMA_CPB_BASE_HIGH = 0x4C,
108 NV_ADMA_APPEND = 0x50,
109 NV_ADMA_NOTIFIER = 0x68,
110 NV_ADMA_NOTIFIER_ERROR = 0x6C,
112 /* NV_ADMA_CTL register bits */
113 NV_ADMA_CTL_HOTPLUG_IEN = (1 << 0),
114 NV_ADMA_CTL_CHANNEL_RESET = (1 << 5),
115 NV_ADMA_CTL_GO = (1 << 7),
116 NV_ADMA_CTL_AIEN = (1 << 8),
117 NV_ADMA_CTL_READ_NON_COHERENT = (1 << 11),
118 NV_ADMA_CTL_WRITE_NON_COHERENT = (1 << 12),
120 /* CPB response flag bits */
121 NV_CPB_RESP_DONE = (1 << 0),
122 NV_CPB_RESP_ATA_ERR = (1 << 3),
123 NV_CPB_RESP_CMD_ERR = (1 << 4),
124 NV_CPB_RESP_CPB_ERR = (1 << 7),
126 /* CPB control flag bits */
127 NV_CPB_CTL_CPB_VALID = (1 << 0),
128 NV_CPB_CTL_QUEUE = (1 << 1),
129 NV_CPB_CTL_APRD_VALID = (1 << 2),
130 NV_CPB_CTL_IEN = (1 << 3),
131 NV_CPB_CTL_FPDMA = (1 << 4),
133 /* APRD flags */
134 NV_APRD_WRITE = (1 << 1),
135 NV_APRD_END = (1 << 2),
136 NV_APRD_CONT = (1 << 3),
138 /* NV_ADMA_STAT flags */
139 NV_ADMA_STAT_TIMEOUT = (1 << 0),
140 NV_ADMA_STAT_HOTUNPLUG = (1 << 1),
141 NV_ADMA_STAT_HOTPLUG = (1 << 2),
142 NV_ADMA_STAT_CPBERR = (1 << 4),
143 NV_ADMA_STAT_SERROR = (1 << 5),
144 NV_ADMA_STAT_CMD_COMPLETE = (1 << 6),
145 NV_ADMA_STAT_IDLE = (1 << 8),
146 NV_ADMA_STAT_LEGACY = (1 << 9),
147 NV_ADMA_STAT_STOPPED = (1 << 10),
148 NV_ADMA_STAT_DONE = (1 << 12),
149 NV_ADMA_STAT_ERR = NV_ADMA_STAT_CPBERR |
150 NV_ADMA_STAT_TIMEOUT,
152 /* port flags */
153 NV_ADMA_PORT_REGISTER_MODE = (1 << 0),
154 NV_ADMA_ATAPI_SETUP_COMPLETE = (1 << 1),
156 /* MCP55 reg offset */
157 NV_CTL_MCP55 = 0x400,
158 NV_INT_STATUS_MCP55 = 0x440,
159 NV_INT_ENABLE_MCP55 = 0x444,
160 NV_NCQ_REG_MCP55 = 0x448,
162 /* MCP55 */
163 NV_INT_ALL_MCP55 = 0xffff,
164 NV_INT_PORT_SHIFT_MCP55 = 16, /* each port occupies 16 bits */
165 NV_INT_MASK_MCP55 = NV_INT_ALL_MCP55 & 0xfffd,
167 /* SWNCQ ENABLE BITS*/
168 NV_CTL_PRI_SWNCQ = 0x02,
169 NV_CTL_SEC_SWNCQ = 0x04,
171 /* SW NCQ status bits*/
172 NV_SWNCQ_IRQ_DEV = (1 << 0),
173 NV_SWNCQ_IRQ_PM = (1 << 1),
174 NV_SWNCQ_IRQ_ADDED = (1 << 2),
175 NV_SWNCQ_IRQ_REMOVED = (1 << 3),
177 NV_SWNCQ_IRQ_BACKOUT = (1 << 4),
178 NV_SWNCQ_IRQ_SDBFIS = (1 << 5),
179 NV_SWNCQ_IRQ_DHREGFIS = (1 << 6),
180 NV_SWNCQ_IRQ_DMASETUP = (1 << 7),
182 NV_SWNCQ_IRQ_HOTPLUG = NV_SWNCQ_IRQ_ADDED |
183 NV_SWNCQ_IRQ_REMOVED,
187 /* ADMA Physical Region Descriptor - one SG segment */
188 struct nv_adma_prd {
189 __le64 addr;
190 __le32 len;
191 u8 flags;
192 u8 packet_len;
193 __le16 reserved;
196 enum nv_adma_regbits {
197 CMDEND = (1 << 15), /* end of command list */
198 WNB = (1 << 14), /* wait-not-BSY */
199 IGN = (1 << 13), /* ignore this entry */
200 CS1n = (1 << (4 + 8)), /* std. PATA signals follow... */
201 DA2 = (1 << (2 + 8)),
202 DA1 = (1 << (1 + 8)),
203 DA0 = (1 << (0 + 8)),
206 /* ADMA Command Parameter Block
207 The first 5 SG segments are stored inside the Command Parameter Block itself.
208 If there are more than 5 segments the remainder are stored in a separate
209 memory area indicated by next_aprd. */
210 struct nv_adma_cpb {
211 u8 resp_flags; /* 0 */
212 u8 reserved1; /* 1 */
213 u8 ctl_flags; /* 2 */
214 /* len is length of taskfile in 64 bit words */
215 u8 len; /* 3 */
216 u8 tag; /* 4 */
217 u8 next_cpb_idx; /* 5 */
218 __le16 reserved2; /* 6-7 */
219 __le16 tf[12]; /* 8-31 */
220 struct nv_adma_prd aprd[5]; /* 32-111 */
221 __le64 next_aprd; /* 112-119 */
222 __le64 reserved3; /* 120-127 */
226 struct nv_adma_port_priv {
227 struct nv_adma_cpb *cpb;
228 dma_addr_t cpb_dma;
229 struct nv_adma_prd *aprd;
230 dma_addr_t aprd_dma;
231 void __iomem *ctl_block;
232 void __iomem *gen_block;
233 void __iomem *notifier_clear_block;
234 u64 adma_dma_mask;
235 u8 flags;
236 int last_issue_ncq;
239 struct nv_host_priv {
240 unsigned long type;
243 struct defer_queue {
244 u32 defer_bits;
245 unsigned int head;
246 unsigned int tail;
247 unsigned int tag[ATA_MAX_QUEUE];
250 enum ncq_saw_flag_list {
251 ncq_saw_d2h = (1U << 0),
252 ncq_saw_dmas = (1U << 1),
253 ncq_saw_sdb = (1U << 2),
254 ncq_saw_backout = (1U << 3),
257 struct nv_swncq_port_priv {
258 struct ata_bmdma_prd *prd; /* our SG list */
259 dma_addr_t prd_dma; /* and its DMA mapping */
260 void __iomem *sactive_block;
261 void __iomem *irq_block;
262 void __iomem *tag_block;
263 u32 qc_active;
265 unsigned int last_issue_tag;
267 /* fifo circular queue to store deferral command */
268 struct defer_queue defer_queue;
270 /* for NCQ interrupt analysis */
271 u32 dhfis_bits;
272 u32 dmafis_bits;
273 u32 sdbfis_bits;
275 unsigned int ncq_flags;
279 #define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & (1 << (19 + (12 * (PORT)))))
281 static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
282 #ifdef CONFIG_PM_SLEEP
283 static int nv_pci_device_resume(struct pci_dev *pdev);
284 #endif
285 static void nv_ck804_host_stop(struct ata_host *host);
286 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
287 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
288 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
289 static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
290 static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
292 static int nv_hardreset(struct ata_link *link, unsigned int *class,
293 unsigned long deadline);
294 static void nv_nf2_freeze(struct ata_port *ap);
295 static void nv_nf2_thaw(struct ata_port *ap);
296 static void nv_ck804_freeze(struct ata_port *ap);
297 static void nv_ck804_thaw(struct ata_port *ap);
298 static int nv_adma_slave_config(struct scsi_device *sdev);
299 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
300 static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc);
301 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
302 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
303 static void nv_adma_irq_clear(struct ata_port *ap);
304 static int nv_adma_port_start(struct ata_port *ap);
305 static void nv_adma_port_stop(struct ata_port *ap);
306 #ifdef CONFIG_PM
307 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
308 static int nv_adma_port_resume(struct ata_port *ap);
309 #endif
310 static void nv_adma_freeze(struct ata_port *ap);
311 static void nv_adma_thaw(struct ata_port *ap);
312 static void nv_adma_error_handler(struct ata_port *ap);
313 static void nv_adma_host_stop(struct ata_host *host);
314 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
315 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
317 static void nv_mcp55_thaw(struct ata_port *ap);
318 static void nv_mcp55_freeze(struct ata_port *ap);
319 static void nv_swncq_error_handler(struct ata_port *ap);
320 static int nv_swncq_slave_config(struct scsi_device *sdev);
321 static int nv_swncq_port_start(struct ata_port *ap);
322 static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc);
323 static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
324 static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc);
325 static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis);
326 static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance);
327 #ifdef CONFIG_PM
328 static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg);
329 static int nv_swncq_port_resume(struct ata_port *ap);
330 #endif
332 enum nv_host_type
334 GENERIC,
335 NFORCE2,
336 NFORCE3 = NFORCE2, /* NF2 == NF3 as far as sata_nv is concerned */
337 CK804,
338 ADMA,
339 MCP5x,
340 SWNCQ,
343 static const struct pci_device_id nv_pci_tbl[] = {
344 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
345 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
346 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
347 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
348 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
349 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
350 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
351 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), MCP5x },
352 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), MCP5x },
353 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), MCP5x },
354 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), MCP5x },
355 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
356 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
357 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
359 { } /* terminate list */
362 static struct pci_driver nv_pci_driver = {
363 .name = DRV_NAME,
364 .id_table = nv_pci_tbl,
365 .probe = nv_init_one,
366 #ifdef CONFIG_PM_SLEEP
367 .suspend = ata_pci_device_suspend,
368 .resume = nv_pci_device_resume,
369 #endif
370 .remove = ata_pci_remove_one,
373 static struct scsi_host_template nv_sht = {
374 ATA_BMDMA_SHT(DRV_NAME),
377 static struct scsi_host_template nv_adma_sht = {
378 ATA_NCQ_SHT(DRV_NAME),
379 .can_queue = NV_ADMA_MAX_CPBS,
380 .sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN,
381 .dma_boundary = NV_ADMA_DMA_BOUNDARY,
382 .slave_configure = nv_adma_slave_config,
385 static struct scsi_host_template nv_swncq_sht = {
386 ATA_NCQ_SHT(DRV_NAME),
387 .can_queue = ATA_MAX_QUEUE - 1,
388 .sg_tablesize = LIBATA_MAX_PRD,
389 .dma_boundary = ATA_DMA_BOUNDARY,
390 .slave_configure = nv_swncq_slave_config,
394 * NV SATA controllers have various different problems with hardreset
395 * protocol depending on the specific controller and device.
397 * GENERIC:
399 * bko11195 reports that link doesn't come online after hardreset on
400 * generic nv's and there have been several other similar reports on
401 * linux-ide.
403 * bko12351#c23 reports that warmplug on MCP61 doesn't work with
404 * softreset.
406 * NF2/3:
408 * bko3352 reports nf2/3 controllers can't determine device signature
409 * reliably after hardreset. The following thread reports detection
410 * failure on cold boot with the standard debouncing timing.
412 * http://thread.gmane.org/gmane.linux.ide/34098
414 * bko12176 reports that hardreset fails to bring up the link during
415 * boot on nf2.
417 * CK804:
419 * For initial probing after boot and hot plugging, hardreset mostly
420 * works fine on CK804 but curiously, reprobing on the initial port
421 * by rescanning or rmmod/insmod fails to acquire the initial D2H Reg
422 * FIS in somewhat undeterministic way.
424 * SWNCQ:
426 * bko12351 reports that when SWNCQ is enabled, for hotplug to work,
427 * hardreset should be used and hardreset can't report proper
428 * signature, which suggests that mcp5x is closer to nf2 as long as
429 * reset quirkiness is concerned.
431 * bko12703 reports that boot probing fails for intel SSD with
432 * hardreset. Link fails to come online. Softreset works fine.
434 * The failures are varied but the following patterns seem true for
435 * all flavors.
437 * - Softreset during boot always works.
439 * - Hardreset during boot sometimes fails to bring up the link on
440 * certain comibnations and device signature acquisition is
441 * unreliable.
443 * - Hardreset is often necessary after hotplug.
445 * So, preferring softreset for boot probing and error handling (as
446 * hardreset might bring down the link) but using hardreset for
447 * post-boot probing should work around the above issues in most
448 * cases. Define nv_hardreset() which only kicks in for post-boot
449 * probing and use it for all variants.
451 static struct ata_port_operations nv_generic_ops = {
452 .inherits = &ata_bmdma_port_ops,
453 .lost_interrupt = ATA_OP_NULL,
454 .scr_read = nv_scr_read,
455 .scr_write = nv_scr_write,
456 .hardreset = nv_hardreset,
459 static struct ata_port_operations nv_nf2_ops = {
460 .inherits = &nv_generic_ops,
461 .freeze = nv_nf2_freeze,
462 .thaw = nv_nf2_thaw,
465 static struct ata_port_operations nv_ck804_ops = {
466 .inherits = &nv_generic_ops,
467 .freeze = nv_ck804_freeze,
468 .thaw = nv_ck804_thaw,
469 .host_stop = nv_ck804_host_stop,
472 static struct ata_port_operations nv_adma_ops = {
473 .inherits = &nv_ck804_ops,
475 .check_atapi_dma = nv_adma_check_atapi_dma,
476 .sff_tf_read = nv_adma_tf_read,
477 .qc_defer = ata_std_qc_defer,
478 .qc_prep = nv_adma_qc_prep,
479 .qc_issue = nv_adma_qc_issue,
480 .sff_irq_clear = nv_adma_irq_clear,
482 .freeze = nv_adma_freeze,
483 .thaw = nv_adma_thaw,
484 .error_handler = nv_adma_error_handler,
485 .post_internal_cmd = nv_adma_post_internal_cmd,
487 .port_start = nv_adma_port_start,
488 .port_stop = nv_adma_port_stop,
489 #ifdef CONFIG_PM
490 .port_suspend = nv_adma_port_suspend,
491 .port_resume = nv_adma_port_resume,
492 #endif
493 .host_stop = nv_adma_host_stop,
496 static struct ata_port_operations nv_swncq_ops = {
497 .inherits = &nv_generic_ops,
499 .qc_defer = ata_std_qc_defer,
500 .qc_prep = nv_swncq_qc_prep,
501 .qc_issue = nv_swncq_qc_issue,
503 .freeze = nv_mcp55_freeze,
504 .thaw = nv_mcp55_thaw,
505 .error_handler = nv_swncq_error_handler,
507 #ifdef CONFIG_PM
508 .port_suspend = nv_swncq_port_suspend,
509 .port_resume = nv_swncq_port_resume,
510 #endif
511 .port_start = nv_swncq_port_start,
514 struct nv_pi_priv {
515 irq_handler_t irq_handler;
516 struct scsi_host_template *sht;
519 #define NV_PI_PRIV(_irq_handler, _sht) \
520 &(struct nv_pi_priv){ .irq_handler = _irq_handler, .sht = _sht }
522 static const struct ata_port_info nv_port_info[] = {
523 /* generic */
525 .flags = ATA_FLAG_SATA,
526 .pio_mask = NV_PIO_MASK,
527 .mwdma_mask = NV_MWDMA_MASK,
528 .udma_mask = NV_UDMA_MASK,
529 .port_ops = &nv_generic_ops,
530 .private_data = NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
532 /* nforce2/3 */
534 .flags = ATA_FLAG_SATA,
535 .pio_mask = NV_PIO_MASK,
536 .mwdma_mask = NV_MWDMA_MASK,
537 .udma_mask = NV_UDMA_MASK,
538 .port_ops = &nv_nf2_ops,
539 .private_data = NV_PI_PRIV(nv_nf2_interrupt, &nv_sht),
541 /* ck804 */
543 .flags = ATA_FLAG_SATA,
544 .pio_mask = NV_PIO_MASK,
545 .mwdma_mask = NV_MWDMA_MASK,
546 .udma_mask = NV_UDMA_MASK,
547 .port_ops = &nv_ck804_ops,
548 .private_data = NV_PI_PRIV(nv_ck804_interrupt, &nv_sht),
550 /* ADMA */
552 .flags = ATA_FLAG_SATA | ATA_FLAG_NCQ,
553 .pio_mask = NV_PIO_MASK,
554 .mwdma_mask = NV_MWDMA_MASK,
555 .udma_mask = NV_UDMA_MASK,
556 .port_ops = &nv_adma_ops,
557 .private_data = NV_PI_PRIV(nv_adma_interrupt, &nv_adma_sht),
559 /* MCP5x */
561 .flags = ATA_FLAG_SATA,
562 .pio_mask = NV_PIO_MASK,
563 .mwdma_mask = NV_MWDMA_MASK,
564 .udma_mask = NV_UDMA_MASK,
565 .port_ops = &nv_generic_ops,
566 .private_data = NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
568 /* SWNCQ */
570 .flags = ATA_FLAG_SATA | ATA_FLAG_NCQ,
571 .pio_mask = NV_PIO_MASK,
572 .mwdma_mask = NV_MWDMA_MASK,
573 .udma_mask = NV_UDMA_MASK,
574 .port_ops = &nv_swncq_ops,
575 .private_data = NV_PI_PRIV(nv_swncq_interrupt, &nv_swncq_sht),
579 MODULE_AUTHOR("NVIDIA");
580 MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
581 MODULE_LICENSE("GPL");
582 MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
583 MODULE_VERSION(DRV_VERSION);
585 static bool adma_enabled;
586 static bool swncq_enabled = true;
587 static bool msi_enabled;
589 static void nv_adma_register_mode(struct ata_port *ap)
591 struct nv_adma_port_priv *pp = ap->private_data;
592 void __iomem *mmio = pp->ctl_block;
593 u16 tmp, status;
594 int count = 0;
596 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
597 return;
599 status = readw(mmio + NV_ADMA_STAT);
600 while (!(status & NV_ADMA_STAT_IDLE) && count < 20) {
601 ndelay(50);
602 status = readw(mmio + NV_ADMA_STAT);
603 count++;
605 if (count == 20)
606 ata_port_warn(ap, "timeout waiting for ADMA IDLE, stat=0x%hx\n",
607 status);
609 tmp = readw(mmio + NV_ADMA_CTL);
610 writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
612 count = 0;
613 status = readw(mmio + NV_ADMA_STAT);
614 while (!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
615 ndelay(50);
616 status = readw(mmio + NV_ADMA_STAT);
617 count++;
619 if (count == 20)
620 ata_port_warn(ap,
621 "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
622 status);
624 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
627 static void nv_adma_mode(struct ata_port *ap)
629 struct nv_adma_port_priv *pp = ap->private_data;
630 void __iomem *mmio = pp->ctl_block;
631 u16 tmp, status;
632 int count = 0;
634 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
635 return;
637 WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
639 tmp = readw(mmio + NV_ADMA_CTL);
640 writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
642 status = readw(mmio + NV_ADMA_STAT);
643 while (((status & NV_ADMA_STAT_LEGACY) ||
644 !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
645 ndelay(50);
646 status = readw(mmio + NV_ADMA_STAT);
647 count++;
649 if (count == 20)
650 ata_port_warn(ap,
651 "timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
652 status);
654 pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
657 static int nv_adma_slave_config(struct scsi_device *sdev)
659 struct ata_port *ap = ata_shost_to_port(sdev->host);
660 struct nv_adma_port_priv *pp = ap->private_data;
661 struct nv_adma_port_priv *port0, *port1;
662 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
663 unsigned long segment_boundary, flags;
664 unsigned short sg_tablesize;
665 int rc;
666 int adma_enable;
667 u32 current_reg, new_reg, config_mask;
669 rc = ata_scsi_slave_config(sdev);
671 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
672 /* Not a proper libata device, ignore */
673 return rc;
675 spin_lock_irqsave(ap->lock, flags);
677 if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) {
679 * NVIDIA reports that ADMA mode does not support ATAPI commands.
680 * Therefore ATAPI commands are sent through the legacy interface.
681 * However, the legacy interface only supports 32-bit DMA.
682 * Restrict DMA parameters as required by the legacy interface
683 * when an ATAPI device is connected.
685 segment_boundary = ATA_DMA_BOUNDARY;
686 /* Subtract 1 since an extra entry may be needed for padding, see
687 libata-scsi.c */
688 sg_tablesize = LIBATA_MAX_PRD - 1;
690 /* Since the legacy DMA engine is in use, we need to disable ADMA
691 on the port. */
692 adma_enable = 0;
693 nv_adma_register_mode(ap);
694 } else {
695 segment_boundary = NV_ADMA_DMA_BOUNDARY;
696 sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
697 adma_enable = 1;
700 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
702 if (ap->port_no == 1)
703 config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
704 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
705 else
706 config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
707 NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
709 if (adma_enable) {
710 new_reg = current_reg | config_mask;
711 pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
712 } else {
713 new_reg = current_reg & ~config_mask;
714 pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
717 if (current_reg != new_reg)
718 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
720 port0 = ap->host->ports[0]->private_data;
721 port1 = ap->host->ports[1]->private_data;
722 if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
723 (port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
725 * We have to set the DMA mask to 32-bit if either port is in
726 * ATAPI mode, since they are on the same PCI device which is
727 * used for DMA mapping. If either SCSI device is not allocated
728 * yet, it's OK since that port will discover its correct
729 * setting when it does get allocated.
731 rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK);
732 } else {
733 rc = dma_set_mask(&pdev->dev, pp->adma_dma_mask);
736 blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
737 blk_queue_max_segments(sdev->request_queue, sg_tablesize);
738 ata_port_info(ap,
739 "DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
740 (unsigned long long)*ap->host->dev->dma_mask,
741 segment_boundary, sg_tablesize);
743 spin_unlock_irqrestore(ap->lock, flags);
745 return rc;
748 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
750 struct nv_adma_port_priv *pp = qc->ap->private_data;
751 return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
754 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
756 /* Other than when internal or pass-through commands are executed,
757 the only time this function will be called in ADMA mode will be
758 if a command fails. In the failure case we don't care about going
759 into register mode with ADMA commands pending, as the commands will
760 all shortly be aborted anyway. We assume that NCQ commands are not
761 issued via passthrough, which is the only way that switching into
762 ADMA mode could abort outstanding commands. */
763 nv_adma_register_mode(ap);
765 ata_sff_tf_read(ap, tf);
768 static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
770 unsigned int idx = 0;
772 if (tf->flags & ATA_TFLAG_ISADDR) {
773 if (tf->flags & ATA_TFLAG_LBA48) {
774 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature | WNB);
775 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
776 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->hob_lbal);
777 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->hob_lbam);
778 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->hob_lbah);
779 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature);
780 } else
781 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature | WNB);
783 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->nsect);
784 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->lbal);
785 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->lbam);
786 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah);
789 if (tf->flags & ATA_TFLAG_DEVICE)
790 cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
792 cpb[idx++] = cpu_to_le16((ATA_REG_CMD << 8) | tf->command | CMDEND);
794 while (idx < 12)
795 cpb[idx++] = cpu_to_le16(IGN);
797 return idx;
800 static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
802 struct nv_adma_port_priv *pp = ap->private_data;
803 u8 flags = pp->cpb[cpb_num].resp_flags;
805 VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
807 if (unlikely((force_err ||
808 flags & (NV_CPB_RESP_ATA_ERR |
809 NV_CPB_RESP_CMD_ERR |
810 NV_CPB_RESP_CPB_ERR)))) {
811 struct ata_eh_info *ehi = &ap->link.eh_info;
812 int freeze = 0;
814 ata_ehi_clear_desc(ehi);
815 __ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x: ", flags);
816 if (flags & NV_CPB_RESP_ATA_ERR) {
817 ata_ehi_push_desc(ehi, "ATA error");
818 ehi->err_mask |= AC_ERR_DEV;
819 } else if (flags & NV_CPB_RESP_CMD_ERR) {
820 ata_ehi_push_desc(ehi, "CMD error");
821 ehi->err_mask |= AC_ERR_DEV;
822 } else if (flags & NV_CPB_RESP_CPB_ERR) {
823 ata_ehi_push_desc(ehi, "CPB error");
824 ehi->err_mask |= AC_ERR_SYSTEM;
825 freeze = 1;
826 } else {
827 /* notifier error, but no error in CPB flags? */
828 ata_ehi_push_desc(ehi, "unknown");
829 ehi->err_mask |= AC_ERR_OTHER;
830 freeze = 1;
832 /* Kill all commands. EH will determine what actually failed. */
833 if (freeze)
834 ata_port_freeze(ap);
835 else
836 ata_port_abort(ap);
837 return -1;
840 if (likely(flags & NV_CPB_RESP_DONE))
841 return 1;
842 return 0;
845 static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
847 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
849 /* freeze if hotplugged */
850 if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
851 ata_port_freeze(ap);
852 return 1;
855 /* bail out if not our interrupt */
856 if (!(irq_stat & NV_INT_DEV))
857 return 0;
859 /* DEV interrupt w/ no active qc? */
860 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
861 ata_sff_check_status(ap);
862 return 1;
865 /* handle interrupt */
866 return ata_bmdma_port_intr(ap, qc);
869 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
871 struct ata_host *host = dev_instance;
872 int i, handled = 0;
873 u32 notifier_clears[2];
875 spin_lock(&host->lock);
877 for (i = 0; i < host->n_ports; i++) {
878 struct ata_port *ap = host->ports[i];
879 struct nv_adma_port_priv *pp = ap->private_data;
880 void __iomem *mmio = pp->ctl_block;
881 u16 status;
882 u32 gen_ctl;
883 u32 notifier, notifier_error;
885 notifier_clears[i] = 0;
887 /* if ADMA is disabled, use standard ata interrupt handler */
888 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
889 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
890 >> (NV_INT_PORT_SHIFT * i);
891 handled += nv_host_intr(ap, irq_stat);
892 continue;
895 /* if in ATA register mode, check for standard interrupts */
896 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
897 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
898 >> (NV_INT_PORT_SHIFT * i);
899 if (ata_tag_valid(ap->link.active_tag))
900 /** NV_INT_DEV indication seems unreliable
901 at times at least in ADMA mode. Force it
902 on always when a command is active, to
903 prevent losing interrupts. */
904 irq_stat |= NV_INT_DEV;
905 handled += nv_host_intr(ap, irq_stat);
908 notifier = readl(mmio + NV_ADMA_NOTIFIER);
909 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
910 notifier_clears[i] = notifier | notifier_error;
912 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
914 if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
915 !notifier_error)
916 /* Nothing to do */
917 continue;
919 status = readw(mmio + NV_ADMA_STAT);
922 * Clear status. Ensure the controller sees the
923 * clearing before we start looking at any of the CPB
924 * statuses, so that any CPB completions after this
925 * point in the handler will raise another interrupt.
927 writew(status, mmio + NV_ADMA_STAT);
928 readw(mmio + NV_ADMA_STAT); /* flush posted write */
929 rmb();
931 handled++; /* irq handled if we got here */
933 /* freeze if hotplugged or controller error */
934 if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
935 NV_ADMA_STAT_HOTUNPLUG |
936 NV_ADMA_STAT_TIMEOUT |
937 NV_ADMA_STAT_SERROR))) {
938 struct ata_eh_info *ehi = &ap->link.eh_info;
940 ata_ehi_clear_desc(ehi);
941 __ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status);
942 if (status & NV_ADMA_STAT_TIMEOUT) {
943 ehi->err_mask |= AC_ERR_SYSTEM;
944 ata_ehi_push_desc(ehi, "timeout");
945 } else if (status & NV_ADMA_STAT_HOTPLUG) {
946 ata_ehi_hotplugged(ehi);
947 ata_ehi_push_desc(ehi, "hotplug");
948 } else if (status & NV_ADMA_STAT_HOTUNPLUG) {
949 ata_ehi_hotplugged(ehi);
950 ata_ehi_push_desc(ehi, "hot unplug");
951 } else if (status & NV_ADMA_STAT_SERROR) {
952 /* let EH analyze SError and figure out cause */
953 ata_ehi_push_desc(ehi, "SError");
954 } else
955 ata_ehi_push_desc(ehi, "unknown");
956 ata_port_freeze(ap);
957 continue;
960 if (status & (NV_ADMA_STAT_DONE |
961 NV_ADMA_STAT_CPBERR |
962 NV_ADMA_STAT_CMD_COMPLETE)) {
963 u32 check_commands = notifier_clears[i];
964 u32 done_mask = 0;
965 int pos, rc;
967 if (status & NV_ADMA_STAT_CPBERR) {
968 /* check all active commands */
969 if (ata_tag_valid(ap->link.active_tag))
970 check_commands = 1 <<
971 ap->link.active_tag;
972 else
973 check_commands = ap->link.sactive;
976 /* check CPBs for completed commands */
977 while ((pos = ffs(check_commands))) {
978 pos--;
979 rc = nv_adma_check_cpb(ap, pos,
980 notifier_error & (1 << pos));
981 if (rc > 0)
982 done_mask |= 1 << pos;
983 else if (unlikely(rc < 0))
984 check_commands = 0;
985 check_commands &= ~(1 << pos);
987 ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask);
991 if (notifier_clears[0] || notifier_clears[1]) {
992 /* Note: Both notifier clear registers must be written
993 if either is set, even if one is zero, according to NVIDIA. */
994 struct nv_adma_port_priv *pp = host->ports[0]->private_data;
995 writel(notifier_clears[0], pp->notifier_clear_block);
996 pp = host->ports[1]->private_data;
997 writel(notifier_clears[1], pp->notifier_clear_block);
1000 spin_unlock(&host->lock);
1002 return IRQ_RETVAL(handled);
1005 static void nv_adma_freeze(struct ata_port *ap)
1007 struct nv_adma_port_priv *pp = ap->private_data;
1008 void __iomem *mmio = pp->ctl_block;
1009 u16 tmp;
1011 nv_ck804_freeze(ap);
1013 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1014 return;
1016 /* clear any outstanding CK804 notifications */
1017 writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1018 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1020 /* Disable interrupt */
1021 tmp = readw(mmio + NV_ADMA_CTL);
1022 writew(tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1023 mmio + NV_ADMA_CTL);
1024 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1027 static void nv_adma_thaw(struct ata_port *ap)
1029 struct nv_adma_port_priv *pp = ap->private_data;
1030 void __iomem *mmio = pp->ctl_block;
1031 u16 tmp;
1033 nv_ck804_thaw(ap);
1035 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1036 return;
1038 /* Enable interrupt */
1039 tmp = readw(mmio + NV_ADMA_CTL);
1040 writew(tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1041 mmio + NV_ADMA_CTL);
1042 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1045 static void nv_adma_irq_clear(struct ata_port *ap)
1047 struct nv_adma_port_priv *pp = ap->private_data;
1048 void __iomem *mmio = pp->ctl_block;
1049 u32 notifier_clears[2];
1051 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
1052 ata_bmdma_irq_clear(ap);
1053 return;
1056 /* clear any outstanding CK804 notifications */
1057 writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1058 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1060 /* clear ADMA status */
1061 writew(0xffff, mmio + NV_ADMA_STAT);
1063 /* clear notifiers - note both ports need to be written with
1064 something even though we are only clearing on one */
1065 if (ap->port_no == 0) {
1066 notifier_clears[0] = 0xFFFFFFFF;
1067 notifier_clears[1] = 0;
1068 } else {
1069 notifier_clears[0] = 0;
1070 notifier_clears[1] = 0xFFFFFFFF;
1072 pp = ap->host->ports[0]->private_data;
1073 writel(notifier_clears[0], pp->notifier_clear_block);
1074 pp = ap->host->ports[1]->private_data;
1075 writel(notifier_clears[1], pp->notifier_clear_block);
1078 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
1080 struct nv_adma_port_priv *pp = qc->ap->private_data;
1082 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
1083 ata_bmdma_post_internal_cmd(qc);
1086 static int nv_adma_port_start(struct ata_port *ap)
1088 struct device *dev = ap->host->dev;
1089 struct nv_adma_port_priv *pp;
1090 int rc;
1091 void *mem;
1092 dma_addr_t mem_dma;
1093 void __iomem *mmio;
1094 struct pci_dev *pdev = to_pci_dev(dev);
1095 u16 tmp;
1097 VPRINTK("ENTER\n");
1100 * Ensure DMA mask is set to 32-bit before allocating legacy PRD and
1101 * pad buffers.
1103 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1104 if (rc)
1105 return rc;
1107 /* we might fallback to bmdma, allocate bmdma resources */
1108 rc = ata_bmdma_port_start(ap);
1109 if (rc)
1110 return rc;
1112 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1113 if (!pp)
1114 return -ENOMEM;
1116 mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
1117 ap->port_no * NV_ADMA_PORT_SIZE;
1118 pp->ctl_block = mmio;
1119 pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
1120 pp->notifier_clear_block = pp->gen_block +
1121 NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
1124 * Now that the legacy PRD and padding buffer are allocated we can
1125 * raise the DMA mask to allocate the CPB/APRD table.
1127 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1129 pp->adma_dma_mask = *dev->dma_mask;
1131 mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
1132 &mem_dma, GFP_KERNEL);
1133 if (!mem)
1134 return -ENOMEM;
1137 * First item in chunk of DMA memory:
1138 * 128-byte command parameter block (CPB)
1139 * one for each command tag
1141 pp->cpb = mem;
1142 pp->cpb_dma = mem_dma;
1144 writel(mem_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
1145 writel((mem_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
1147 mem += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1148 mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1151 * Second item: block of ADMA_SGTBL_LEN s/g entries
1153 pp->aprd = mem;
1154 pp->aprd_dma = mem_dma;
1156 ap->private_data = pp;
1158 /* clear any outstanding interrupt conditions */
1159 writew(0xffff, mmio + NV_ADMA_STAT);
1161 /* initialize port variables */
1162 pp->flags = NV_ADMA_PORT_REGISTER_MODE;
1164 /* clear CPB fetch count */
1165 writew(0, mmio + NV_ADMA_CPB_COUNT);
1167 /* clear GO for register mode, enable interrupt */
1168 tmp = readw(mmio + NV_ADMA_CTL);
1169 writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1170 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1172 tmp = readw(mmio + NV_ADMA_CTL);
1173 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1174 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1175 udelay(1);
1176 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1177 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1179 return 0;
1182 static void nv_adma_port_stop(struct ata_port *ap)
1184 struct nv_adma_port_priv *pp = ap->private_data;
1185 void __iomem *mmio = pp->ctl_block;
1187 VPRINTK("ENTER\n");
1188 writew(0, mmio + NV_ADMA_CTL);
1191 #ifdef CONFIG_PM
1192 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1194 struct nv_adma_port_priv *pp = ap->private_data;
1195 void __iomem *mmio = pp->ctl_block;
1197 /* Go to register mode - clears GO */
1198 nv_adma_register_mode(ap);
1200 /* clear CPB fetch count */
1201 writew(0, mmio + NV_ADMA_CPB_COUNT);
1203 /* disable interrupt, shut down port */
1204 writew(0, mmio + NV_ADMA_CTL);
1206 return 0;
1209 static int nv_adma_port_resume(struct ata_port *ap)
1211 struct nv_adma_port_priv *pp = ap->private_data;
1212 void __iomem *mmio = pp->ctl_block;
1213 u16 tmp;
1215 /* set CPB block location */
1216 writel(pp->cpb_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
1217 writel((pp->cpb_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
1219 /* clear any outstanding interrupt conditions */
1220 writew(0xffff, mmio + NV_ADMA_STAT);
1222 /* initialize port variables */
1223 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1225 /* clear CPB fetch count */
1226 writew(0, mmio + NV_ADMA_CPB_COUNT);
1228 /* clear GO for register mode, enable interrupt */
1229 tmp = readw(mmio + NV_ADMA_CTL);
1230 writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1231 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1233 tmp = readw(mmio + NV_ADMA_CTL);
1234 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1235 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1236 udelay(1);
1237 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1238 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1240 return 0;
1242 #endif
1244 static void nv_adma_setup_port(struct ata_port *ap)
1246 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1247 struct ata_ioports *ioport = &ap->ioaddr;
1249 VPRINTK("ENTER\n");
1251 mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE;
1253 ioport->cmd_addr = mmio;
1254 ioport->data_addr = mmio + (ATA_REG_DATA * 4);
1255 ioport->error_addr =
1256 ioport->feature_addr = mmio + (ATA_REG_ERR * 4);
1257 ioport->nsect_addr = mmio + (ATA_REG_NSECT * 4);
1258 ioport->lbal_addr = mmio + (ATA_REG_LBAL * 4);
1259 ioport->lbam_addr = mmio + (ATA_REG_LBAM * 4);
1260 ioport->lbah_addr = mmio + (ATA_REG_LBAH * 4);
1261 ioport->device_addr = mmio + (ATA_REG_DEVICE * 4);
1262 ioport->status_addr =
1263 ioport->command_addr = mmio + (ATA_REG_STATUS * 4);
1264 ioport->altstatus_addr =
1265 ioport->ctl_addr = mmio + 0x20;
1268 static int nv_adma_host_init(struct ata_host *host)
1270 struct pci_dev *pdev = to_pci_dev(host->dev);
1271 unsigned int i;
1272 u32 tmp32;
1274 VPRINTK("ENTER\n");
1276 /* enable ADMA on the ports */
1277 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1278 tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1279 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1280 NV_MCP_SATA_CFG_20_PORT1_EN |
1281 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1283 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1285 for (i = 0; i < host->n_ports; i++)
1286 nv_adma_setup_port(host->ports[i]);
1288 return 0;
1291 static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1292 struct scatterlist *sg,
1293 int idx,
1294 struct nv_adma_prd *aprd)
1296 u8 flags = 0;
1297 if (qc->tf.flags & ATA_TFLAG_WRITE)
1298 flags |= NV_APRD_WRITE;
1299 if (idx == qc->n_elem - 1)
1300 flags |= NV_APRD_END;
1301 else if (idx != 4)
1302 flags |= NV_APRD_CONT;
1304 aprd->addr = cpu_to_le64(((u64)sg_dma_address(sg)));
1305 aprd->len = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
1306 aprd->flags = flags;
1307 aprd->packet_len = 0;
1310 static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1312 struct nv_adma_port_priv *pp = qc->ap->private_data;
1313 struct nv_adma_prd *aprd;
1314 struct scatterlist *sg;
1315 unsigned int si;
1317 VPRINTK("ENTER\n");
1319 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1320 aprd = (si < 5) ? &cpb->aprd[si] :
1321 &pp->aprd[NV_ADMA_SGTBL_LEN * qc->hw_tag + (si-5)];
1322 nv_adma_fill_aprd(qc, sg, si, aprd);
1324 if (si > 5)
1325 cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->hw_tag)));
1326 else
1327 cpb->next_aprd = cpu_to_le64(0);
1330 static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
1332 struct nv_adma_port_priv *pp = qc->ap->private_data;
1334 /* ADMA engine can only be used for non-ATAPI DMA commands,
1335 or interrupt-driven no-data commands. */
1336 if ((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
1337 (qc->tf.flags & ATA_TFLAG_POLLING))
1338 return 1;
1340 if ((qc->flags & ATA_QCFLAG_DMAMAP) ||
1341 (qc->tf.protocol == ATA_PROT_NODATA))
1342 return 0;
1344 return 1;
1347 static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc)
1349 struct nv_adma_port_priv *pp = qc->ap->private_data;
1350 struct nv_adma_cpb *cpb = &pp->cpb[qc->hw_tag];
1351 u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
1352 NV_CPB_CTL_IEN;
1354 if (nv_adma_use_reg_mode(qc)) {
1355 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1356 (qc->flags & ATA_QCFLAG_DMAMAP));
1357 nv_adma_register_mode(qc->ap);
1358 ata_bmdma_qc_prep(qc);
1359 return AC_ERR_OK;
1362 cpb->resp_flags = NV_CPB_RESP_DONE;
1363 wmb();
1364 cpb->ctl_flags = 0;
1365 wmb();
1367 cpb->len = 3;
1368 cpb->tag = qc->hw_tag;
1369 cpb->next_cpb_idx = 0;
1371 /* turn on NCQ flags for NCQ commands */
1372 if (qc->tf.protocol == ATA_PROT_NCQ)
1373 ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1375 VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1377 nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1379 if (qc->flags & ATA_QCFLAG_DMAMAP) {
1380 nv_adma_fill_sg(qc, cpb);
1381 ctl_flags |= NV_CPB_CTL_APRD_VALID;
1382 } else
1383 memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
1385 /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID
1386 until we are finished filling in all of the contents */
1387 wmb();
1388 cpb->ctl_flags = ctl_flags;
1389 wmb();
1390 cpb->resp_flags = 0;
1392 return AC_ERR_OK;
1395 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1397 struct nv_adma_port_priv *pp = qc->ap->private_data;
1398 void __iomem *mmio = pp->ctl_block;
1399 int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
1401 VPRINTK("ENTER\n");
1403 /* We can't handle result taskfile with NCQ commands, since
1404 retrieving the taskfile switches us out of ADMA mode and would abort
1405 existing commands. */
1406 if (unlikely(qc->tf.protocol == ATA_PROT_NCQ &&
1407 (qc->flags & ATA_QCFLAG_RESULT_TF))) {
1408 ata_dev_err(qc->dev, "NCQ w/ RESULT_TF not allowed\n");
1409 return AC_ERR_SYSTEM;
1412 if (nv_adma_use_reg_mode(qc)) {
1413 /* use ATA register mode */
1414 VPRINTK("using ATA register mode: 0x%lx\n", qc->flags);
1415 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1416 (qc->flags & ATA_QCFLAG_DMAMAP));
1417 nv_adma_register_mode(qc->ap);
1418 return ata_bmdma_qc_issue(qc);
1419 } else
1420 nv_adma_mode(qc->ap);
1422 /* write append register, command tag in lower 8 bits
1423 and (number of cpbs to append -1) in top 8 bits */
1424 wmb();
1426 if (curr_ncq != pp->last_issue_ncq) {
1427 /* Seems to need some delay before switching between NCQ and
1428 non-NCQ commands, else we get command timeouts and such. */
1429 udelay(20);
1430 pp->last_issue_ncq = curr_ncq;
1433 writew(qc->hw_tag, mmio + NV_ADMA_APPEND);
1435 DPRINTK("Issued tag %u\n", qc->hw_tag);
1437 return 0;
1440 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1442 struct ata_host *host = dev_instance;
1443 unsigned int i;
1444 unsigned int handled = 0;
1445 unsigned long flags;
1447 spin_lock_irqsave(&host->lock, flags);
1449 for (i = 0; i < host->n_ports; i++) {
1450 struct ata_port *ap = host->ports[i];
1451 struct ata_queued_cmd *qc;
1453 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1454 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
1455 handled += ata_bmdma_port_intr(ap, qc);
1456 } else {
1458 * No request pending? Clear interrupt status
1459 * anyway, in case there's one pending.
1461 ap->ops->sff_check_status(ap);
1465 spin_unlock_irqrestore(&host->lock, flags);
1467 return IRQ_RETVAL(handled);
1470 static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
1472 int i, handled = 0;
1474 for (i = 0; i < host->n_ports; i++) {
1475 handled += nv_host_intr(host->ports[i], irq_stat);
1476 irq_stat >>= NV_INT_PORT_SHIFT;
1479 return IRQ_RETVAL(handled);
1482 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
1484 struct ata_host *host = dev_instance;
1485 u8 irq_stat;
1486 irqreturn_t ret;
1488 spin_lock(&host->lock);
1489 irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
1490 ret = nv_do_interrupt(host, irq_stat);
1491 spin_unlock(&host->lock);
1493 return ret;
1496 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
1498 struct ata_host *host = dev_instance;
1499 u8 irq_stat;
1500 irqreturn_t ret;
1502 spin_lock(&host->lock);
1503 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1504 ret = nv_do_interrupt(host, irq_stat);
1505 spin_unlock(&host->lock);
1507 return ret;
1510 static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
1512 if (sc_reg > SCR_CONTROL)
1513 return -EINVAL;
1515 *val = ioread32(link->ap->ioaddr.scr_addr + (sc_reg * 4));
1516 return 0;
1519 static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
1521 if (sc_reg > SCR_CONTROL)
1522 return -EINVAL;
1524 iowrite32(val, link->ap->ioaddr.scr_addr + (sc_reg * 4));
1525 return 0;
1528 static int nv_hardreset(struct ata_link *link, unsigned int *class,
1529 unsigned long deadline)
1531 struct ata_eh_context *ehc = &link->eh_context;
1533 /* Do hardreset iff it's post-boot probing, please read the
1534 * comment above port ops for details.
1536 if (!(link->ap->pflags & ATA_PFLAG_LOADING) &&
1537 !ata_dev_enabled(link->device))
1538 sata_link_hardreset(link, sata_deb_timing_hotplug, deadline,
1539 NULL, NULL);
1540 else {
1541 const unsigned long *timing = sata_ehc_deb_timing(ehc);
1542 int rc;
1544 if (!(ehc->i.flags & ATA_EHI_QUIET))
1545 ata_link_info(link,
1546 "nv: skipping hardreset on occupied port\n");
1548 /* make sure the link is online */
1549 rc = sata_link_resume(link, timing, deadline);
1550 /* whine about phy resume failure but proceed */
1551 if (rc && rc != -EOPNOTSUPP)
1552 ata_link_warn(link, "failed to resume link (errno=%d)\n",
1553 rc);
1556 /* device signature acquisition is unreliable */
1557 return -EAGAIN;
1560 static void nv_nf2_freeze(struct ata_port *ap)
1562 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1563 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1564 u8 mask;
1566 mask = ioread8(scr_addr + NV_INT_ENABLE);
1567 mask &= ~(NV_INT_ALL << shift);
1568 iowrite8(mask, scr_addr + NV_INT_ENABLE);
1571 static void nv_nf2_thaw(struct ata_port *ap)
1573 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1574 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1575 u8 mask;
1577 iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
1579 mask = ioread8(scr_addr + NV_INT_ENABLE);
1580 mask |= (NV_INT_MASK << shift);
1581 iowrite8(mask, scr_addr + NV_INT_ENABLE);
1584 static void nv_ck804_freeze(struct ata_port *ap)
1586 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1587 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1588 u8 mask;
1590 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1591 mask &= ~(NV_INT_ALL << shift);
1592 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1595 static void nv_ck804_thaw(struct ata_port *ap)
1597 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1598 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1599 u8 mask;
1601 writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1603 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1604 mask |= (NV_INT_MASK << shift);
1605 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1608 static void nv_mcp55_freeze(struct ata_port *ap)
1610 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1611 int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1612 u32 mask;
1614 writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1616 mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1617 mask &= ~(NV_INT_ALL_MCP55 << shift);
1618 writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1621 static void nv_mcp55_thaw(struct ata_port *ap)
1623 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1624 int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1625 u32 mask;
1627 writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1629 mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1630 mask |= (NV_INT_MASK_MCP55 << shift);
1631 writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1634 static void nv_adma_error_handler(struct ata_port *ap)
1636 struct nv_adma_port_priv *pp = ap->private_data;
1637 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
1638 void __iomem *mmio = pp->ctl_block;
1639 int i;
1640 u16 tmp;
1642 if (ata_tag_valid(ap->link.active_tag) || ap->link.sactive) {
1643 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1644 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1645 u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
1646 u32 status = readw(mmio + NV_ADMA_STAT);
1647 u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
1648 u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
1650 ata_port_err(ap,
1651 "EH in ADMA mode, notifier 0x%X "
1652 "notifier_error 0x%X gen_ctl 0x%X status 0x%X "
1653 "next cpb count 0x%X next cpb idx 0x%x\n",
1654 notifier, notifier_error, gen_ctl, status,
1655 cpb_count, next_cpb_idx);
1657 for (i = 0; i < NV_ADMA_MAX_CPBS; i++) {
1658 struct nv_adma_cpb *cpb = &pp->cpb[i];
1659 if ((ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) ||
1660 ap->link.sactive & (1 << i))
1661 ata_port_err(ap,
1662 "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1663 i, cpb->ctl_flags, cpb->resp_flags);
1667 /* Push us back into port register mode for error handling. */
1668 nv_adma_register_mode(ap);
1670 /* Mark all of the CPBs as invalid to prevent them from
1671 being executed */
1672 for (i = 0; i < NV_ADMA_MAX_CPBS; i++)
1673 pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1675 /* clear CPB fetch count */
1676 writew(0, mmio + NV_ADMA_CPB_COUNT);
1678 /* Reset channel */
1679 tmp = readw(mmio + NV_ADMA_CTL);
1680 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1681 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1682 udelay(1);
1683 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1684 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1687 ata_bmdma_error_handler(ap);
1690 static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc)
1692 struct nv_swncq_port_priv *pp = ap->private_data;
1693 struct defer_queue *dq = &pp->defer_queue;
1695 /* queue is full */
1696 WARN_ON(dq->tail - dq->head == ATA_MAX_QUEUE);
1697 dq->defer_bits |= (1 << qc->hw_tag);
1698 dq->tag[dq->tail++ & (ATA_MAX_QUEUE - 1)] = qc->hw_tag;
1701 static struct ata_queued_cmd *nv_swncq_qc_from_dq(struct ata_port *ap)
1703 struct nv_swncq_port_priv *pp = ap->private_data;
1704 struct defer_queue *dq = &pp->defer_queue;
1705 unsigned int tag;
1707 if (dq->head == dq->tail) /* null queue */
1708 return NULL;
1710 tag = dq->tag[dq->head & (ATA_MAX_QUEUE - 1)];
1711 dq->tag[dq->head++ & (ATA_MAX_QUEUE - 1)] = ATA_TAG_POISON;
1712 WARN_ON(!(dq->defer_bits & (1 << tag)));
1713 dq->defer_bits &= ~(1 << tag);
1715 return ata_qc_from_tag(ap, tag);
1718 static void nv_swncq_fis_reinit(struct ata_port *ap)
1720 struct nv_swncq_port_priv *pp = ap->private_data;
1722 pp->dhfis_bits = 0;
1723 pp->dmafis_bits = 0;
1724 pp->sdbfis_bits = 0;
1725 pp->ncq_flags = 0;
1728 static void nv_swncq_pp_reinit(struct ata_port *ap)
1730 struct nv_swncq_port_priv *pp = ap->private_data;
1731 struct defer_queue *dq = &pp->defer_queue;
1733 dq->head = 0;
1734 dq->tail = 0;
1735 dq->defer_bits = 0;
1736 pp->qc_active = 0;
1737 pp->last_issue_tag = ATA_TAG_POISON;
1738 nv_swncq_fis_reinit(ap);
1741 static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis)
1743 struct nv_swncq_port_priv *pp = ap->private_data;
1745 writew(fis, pp->irq_block);
1748 static void __ata_bmdma_stop(struct ata_port *ap)
1750 struct ata_queued_cmd qc;
1752 qc.ap = ap;
1753 ata_bmdma_stop(&qc);
1756 static void nv_swncq_ncq_stop(struct ata_port *ap)
1758 struct nv_swncq_port_priv *pp = ap->private_data;
1759 unsigned int i;
1760 u32 sactive;
1761 u32 done_mask;
1763 ata_port_err(ap, "EH in SWNCQ mode,QC:qc_active 0x%llX sactive 0x%X\n",
1764 ap->qc_active, ap->link.sactive);
1765 ata_port_err(ap,
1766 "SWNCQ:qc_active 0x%X defer_bits 0x%X last_issue_tag 0x%x\n "
1767 "dhfis 0x%X dmafis 0x%X sdbfis 0x%X\n",
1768 pp->qc_active, pp->defer_queue.defer_bits, pp->last_issue_tag,
1769 pp->dhfis_bits, pp->dmafis_bits, pp->sdbfis_bits);
1771 ata_port_err(ap, "ATA_REG 0x%X ERR_REG 0x%X\n",
1772 ap->ops->sff_check_status(ap),
1773 ioread8(ap->ioaddr.error_addr));
1775 sactive = readl(pp->sactive_block);
1776 done_mask = pp->qc_active ^ sactive;
1778 ata_port_err(ap, "tag : dhfis dmafis sdbfis sactive\n");
1779 for (i = 0; i < ATA_MAX_QUEUE; i++) {
1780 u8 err = 0;
1781 if (pp->qc_active & (1 << i))
1782 err = 0;
1783 else if (done_mask & (1 << i))
1784 err = 1;
1785 else
1786 continue;
1788 ata_port_err(ap,
1789 "tag 0x%x: %01x %01x %01x %01x %s\n", i,
1790 (pp->dhfis_bits >> i) & 0x1,
1791 (pp->dmafis_bits >> i) & 0x1,
1792 (pp->sdbfis_bits >> i) & 0x1,
1793 (sactive >> i) & 0x1,
1794 (err ? "error! tag doesn't exit" : " "));
1797 nv_swncq_pp_reinit(ap);
1798 ap->ops->sff_irq_clear(ap);
1799 __ata_bmdma_stop(ap);
1800 nv_swncq_irq_clear(ap, 0xffff);
1803 static void nv_swncq_error_handler(struct ata_port *ap)
1805 struct ata_eh_context *ehc = &ap->link.eh_context;
1807 if (ap->link.sactive) {
1808 nv_swncq_ncq_stop(ap);
1809 ehc->i.action |= ATA_EH_RESET;
1812 ata_bmdma_error_handler(ap);
1815 #ifdef CONFIG_PM
1816 static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg)
1818 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1819 u32 tmp;
1821 /* clear irq */
1822 writel(~0, mmio + NV_INT_STATUS_MCP55);
1824 /* disable irq */
1825 writel(0, mmio + NV_INT_ENABLE_MCP55);
1827 /* disable swncq */
1828 tmp = readl(mmio + NV_CTL_MCP55);
1829 tmp &= ~(NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ);
1830 writel(tmp, mmio + NV_CTL_MCP55);
1832 return 0;
1835 static int nv_swncq_port_resume(struct ata_port *ap)
1837 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1838 u32 tmp;
1840 /* clear irq */
1841 writel(~0, mmio + NV_INT_STATUS_MCP55);
1843 /* enable irq */
1844 writel(0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1846 /* enable swncq */
1847 tmp = readl(mmio + NV_CTL_MCP55);
1848 writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1850 return 0;
1852 #endif
1854 static void nv_swncq_host_init(struct ata_host *host)
1856 u32 tmp;
1857 void __iomem *mmio = host->iomap[NV_MMIO_BAR];
1858 struct pci_dev *pdev = to_pci_dev(host->dev);
1859 u8 regval;
1861 /* disable ECO 398 */
1862 pci_read_config_byte(pdev, 0x7f, &regval);
1863 regval &= ~(1 << 7);
1864 pci_write_config_byte(pdev, 0x7f, regval);
1866 /* enable swncq */
1867 tmp = readl(mmio + NV_CTL_MCP55);
1868 VPRINTK("HOST_CTL:0x%X\n", tmp);
1869 writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1871 /* enable irq intr */
1872 tmp = readl(mmio + NV_INT_ENABLE_MCP55);
1873 VPRINTK("HOST_ENABLE:0x%X\n", tmp);
1874 writel(tmp | 0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1876 /* clear port irq */
1877 writel(~0x0, mmio + NV_INT_STATUS_MCP55);
1880 static int nv_swncq_slave_config(struct scsi_device *sdev)
1882 struct ata_port *ap = ata_shost_to_port(sdev->host);
1883 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
1884 struct ata_device *dev;
1885 int rc;
1886 u8 rev;
1887 u8 check_maxtor = 0;
1888 unsigned char model_num[ATA_ID_PROD_LEN + 1];
1890 rc = ata_scsi_slave_config(sdev);
1891 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
1892 /* Not a proper libata device, ignore */
1893 return rc;
1895 dev = &ap->link.device[sdev->id];
1896 if (!(ap->flags & ATA_FLAG_NCQ) || dev->class == ATA_DEV_ATAPI)
1897 return rc;
1899 /* if MCP51 and Maxtor, then disable ncq */
1900 if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA ||
1901 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2)
1902 check_maxtor = 1;
1904 /* if MCP55 and rev <= a2 and Maxtor, then disable ncq */
1905 if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA ||
1906 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2) {
1907 pci_read_config_byte(pdev, 0x8, &rev);
1908 if (rev <= 0xa2)
1909 check_maxtor = 1;
1912 if (!check_maxtor)
1913 return rc;
1915 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
1917 if (strncmp(model_num, "Maxtor", 6) == 0) {
1918 ata_scsi_change_queue_depth(sdev, 1);
1919 ata_dev_notice(dev, "Disabling SWNCQ mode (depth %x)\n",
1920 sdev->queue_depth);
1923 return rc;
1926 static int nv_swncq_port_start(struct ata_port *ap)
1928 struct device *dev = ap->host->dev;
1929 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1930 struct nv_swncq_port_priv *pp;
1931 int rc;
1933 /* we might fallback to bmdma, allocate bmdma resources */
1934 rc = ata_bmdma_port_start(ap);
1935 if (rc)
1936 return rc;
1938 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1939 if (!pp)
1940 return -ENOMEM;
1942 pp->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE,
1943 &pp->prd_dma, GFP_KERNEL);
1944 if (!pp->prd)
1945 return -ENOMEM;
1947 ap->private_data = pp;
1948 pp->sactive_block = ap->ioaddr.scr_addr + 4 * SCR_ACTIVE;
1949 pp->irq_block = mmio + NV_INT_STATUS_MCP55 + ap->port_no * 2;
1950 pp->tag_block = mmio + NV_NCQ_REG_MCP55 + ap->port_no * 2;
1952 return 0;
1955 static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc)
1957 if (qc->tf.protocol != ATA_PROT_NCQ) {
1958 ata_bmdma_qc_prep(qc);
1959 return AC_ERR_OK;
1962 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1963 return AC_ERR_OK;
1965 nv_swncq_fill_sg(qc);
1967 return AC_ERR_OK;
1970 static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
1972 struct ata_port *ap = qc->ap;
1973 struct scatterlist *sg;
1974 struct nv_swncq_port_priv *pp = ap->private_data;
1975 struct ata_bmdma_prd *prd;
1976 unsigned int si, idx;
1978 prd = pp->prd + ATA_MAX_PRD * qc->hw_tag;
1980 idx = 0;
1981 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1982 u32 addr, offset;
1983 u32 sg_len, len;
1985 addr = (u32)sg_dma_address(sg);
1986 sg_len = sg_dma_len(sg);
1988 while (sg_len) {
1989 offset = addr & 0xffff;
1990 len = sg_len;
1991 if ((offset + sg_len) > 0x10000)
1992 len = 0x10000 - offset;
1994 prd[idx].addr = cpu_to_le32(addr);
1995 prd[idx].flags_len = cpu_to_le32(len & 0xffff);
1997 idx++;
1998 sg_len -= len;
1999 addr += len;
2003 prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2006 static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap,
2007 struct ata_queued_cmd *qc)
2009 struct nv_swncq_port_priv *pp = ap->private_data;
2011 if (qc == NULL)
2012 return 0;
2014 DPRINTK("Enter\n");
2016 writel((1 << qc->hw_tag), pp->sactive_block);
2017 pp->last_issue_tag = qc->hw_tag;
2018 pp->dhfis_bits &= ~(1 << qc->hw_tag);
2019 pp->dmafis_bits &= ~(1 << qc->hw_tag);
2020 pp->qc_active |= (0x1 << qc->hw_tag);
2022 ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
2023 ap->ops->sff_exec_command(ap, &qc->tf);
2025 DPRINTK("Issued tag %u\n", qc->hw_tag);
2027 return 0;
2030 static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc)
2032 struct ata_port *ap = qc->ap;
2033 struct nv_swncq_port_priv *pp = ap->private_data;
2035 if (qc->tf.protocol != ATA_PROT_NCQ)
2036 return ata_bmdma_qc_issue(qc);
2038 DPRINTK("Enter\n");
2040 if (!pp->qc_active)
2041 nv_swncq_issue_atacmd(ap, qc);
2042 else
2043 nv_swncq_qc_to_dq(ap, qc); /* add qc to defer queue */
2045 return 0;
2048 static void nv_swncq_hotplug(struct ata_port *ap, u32 fis)
2050 u32 serror;
2051 struct ata_eh_info *ehi = &ap->link.eh_info;
2053 ata_ehi_clear_desc(ehi);
2055 /* AHCI needs SError cleared; otherwise, it might lock up */
2056 sata_scr_read(&ap->link, SCR_ERROR, &serror);
2057 sata_scr_write(&ap->link, SCR_ERROR, serror);
2059 /* analyze @irq_stat */
2060 if (fis & NV_SWNCQ_IRQ_ADDED)
2061 ata_ehi_push_desc(ehi, "hot plug");
2062 else if (fis & NV_SWNCQ_IRQ_REMOVED)
2063 ata_ehi_push_desc(ehi, "hot unplug");
2065 ata_ehi_hotplugged(ehi);
2067 /* okay, let's hand over to EH */
2068 ehi->serror |= serror;
2070 ata_port_freeze(ap);
2073 static int nv_swncq_sdbfis(struct ata_port *ap)
2075 struct ata_queued_cmd *qc;
2076 struct nv_swncq_port_priv *pp = ap->private_data;
2077 struct ata_eh_info *ehi = &ap->link.eh_info;
2078 u32 sactive;
2079 u32 done_mask;
2080 u8 host_stat;
2081 u8 lack_dhfis = 0;
2083 host_stat = ap->ops->bmdma_status(ap);
2084 if (unlikely(host_stat & ATA_DMA_ERR)) {
2085 /* error when transferring data to/from memory */
2086 ata_ehi_clear_desc(ehi);
2087 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
2088 ehi->err_mask |= AC_ERR_HOST_BUS;
2089 ehi->action |= ATA_EH_RESET;
2090 return -EINVAL;
2093 ap->ops->sff_irq_clear(ap);
2094 __ata_bmdma_stop(ap);
2096 sactive = readl(pp->sactive_block);
2097 done_mask = pp->qc_active ^ sactive;
2099 pp->qc_active &= ~done_mask;
2100 pp->dhfis_bits &= ~done_mask;
2101 pp->dmafis_bits &= ~done_mask;
2102 pp->sdbfis_bits |= done_mask;
2103 ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
2105 if (!ap->qc_active) {
2106 DPRINTK("over\n");
2107 nv_swncq_pp_reinit(ap);
2108 return 0;
2111 if (pp->qc_active & pp->dhfis_bits)
2112 return 0;
2114 if ((pp->ncq_flags & ncq_saw_backout) ||
2115 (pp->qc_active ^ pp->dhfis_bits))
2116 /* if the controller can't get a device to host register FIS,
2117 * The driver needs to reissue the new command.
2119 lack_dhfis = 1;
2121 DPRINTK("id 0x%x QC: qc_active 0x%x,"
2122 "SWNCQ:qc_active 0x%X defer_bits %X "
2123 "dhfis 0x%X dmafis 0x%X last_issue_tag %x\n",
2124 ap->print_id, ap->qc_active, pp->qc_active,
2125 pp->defer_queue.defer_bits, pp->dhfis_bits,
2126 pp->dmafis_bits, pp->last_issue_tag);
2128 nv_swncq_fis_reinit(ap);
2130 if (lack_dhfis) {
2131 qc = ata_qc_from_tag(ap, pp->last_issue_tag);
2132 nv_swncq_issue_atacmd(ap, qc);
2133 return 0;
2136 if (pp->defer_queue.defer_bits) {
2137 /* send deferral queue command */
2138 qc = nv_swncq_qc_from_dq(ap);
2139 WARN_ON(qc == NULL);
2140 nv_swncq_issue_atacmd(ap, qc);
2143 return 0;
2146 static inline u32 nv_swncq_tag(struct ata_port *ap)
2148 struct nv_swncq_port_priv *pp = ap->private_data;
2149 u32 tag;
2151 tag = readb(pp->tag_block) >> 2;
2152 return (tag & 0x1f);
2155 static void nv_swncq_dmafis(struct ata_port *ap)
2157 struct ata_queued_cmd *qc;
2158 unsigned int rw;
2159 u8 dmactl;
2160 u32 tag;
2161 struct nv_swncq_port_priv *pp = ap->private_data;
2163 __ata_bmdma_stop(ap);
2164 tag = nv_swncq_tag(ap);
2166 DPRINTK("dma setup tag 0x%x\n", tag);
2167 qc = ata_qc_from_tag(ap, tag);
2169 if (unlikely(!qc))
2170 return;
2172 rw = qc->tf.flags & ATA_TFLAG_WRITE;
2174 /* load PRD table addr. */
2175 iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->hw_tag,
2176 ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2178 /* specify data direction, triple-check start bit is clear */
2179 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2180 dmactl &= ~ATA_DMA_WR;
2181 if (!rw)
2182 dmactl |= ATA_DMA_WR;
2184 iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2187 static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis)
2189 struct nv_swncq_port_priv *pp = ap->private_data;
2190 struct ata_queued_cmd *qc;
2191 struct ata_eh_info *ehi = &ap->link.eh_info;
2192 u32 serror;
2193 u8 ata_stat;
2195 ata_stat = ap->ops->sff_check_status(ap);
2196 nv_swncq_irq_clear(ap, fis);
2197 if (!fis)
2198 return;
2200 if (ap->pflags & ATA_PFLAG_FROZEN)
2201 return;
2203 if (fis & NV_SWNCQ_IRQ_HOTPLUG) {
2204 nv_swncq_hotplug(ap, fis);
2205 return;
2208 if (!pp->qc_active)
2209 return;
2211 if (ap->ops->scr_read(&ap->link, SCR_ERROR, &serror))
2212 return;
2213 ap->ops->scr_write(&ap->link, SCR_ERROR, serror);
2215 if (ata_stat & ATA_ERR) {
2216 ata_ehi_clear_desc(ehi);
2217 ata_ehi_push_desc(ehi, "Ata error. fis:0x%X", fis);
2218 ehi->err_mask |= AC_ERR_DEV;
2219 ehi->serror |= serror;
2220 ehi->action |= ATA_EH_RESET;
2221 ata_port_freeze(ap);
2222 return;
2225 if (fis & NV_SWNCQ_IRQ_BACKOUT) {
2226 /* If the IRQ is backout, driver must issue
2227 * the new command again some time later.
2229 pp->ncq_flags |= ncq_saw_backout;
2232 if (fis & NV_SWNCQ_IRQ_SDBFIS) {
2233 pp->ncq_flags |= ncq_saw_sdb;
2234 DPRINTK("id 0x%x SWNCQ: qc_active 0x%X "
2235 "dhfis 0x%X dmafis 0x%X sactive 0x%X\n",
2236 ap->print_id, pp->qc_active, pp->dhfis_bits,
2237 pp->dmafis_bits, readl(pp->sactive_block));
2238 if (nv_swncq_sdbfis(ap) < 0)
2239 goto irq_error;
2242 if (fis & NV_SWNCQ_IRQ_DHREGFIS) {
2243 /* The interrupt indicates the new command
2244 * was transmitted correctly to the drive.
2246 pp->dhfis_bits |= (0x1 << pp->last_issue_tag);
2247 pp->ncq_flags |= ncq_saw_d2h;
2248 if (pp->ncq_flags & (ncq_saw_sdb | ncq_saw_backout)) {
2249 ata_ehi_push_desc(ehi, "illegal fis transaction");
2250 ehi->err_mask |= AC_ERR_HSM;
2251 ehi->action |= ATA_EH_RESET;
2252 goto irq_error;
2255 if (!(fis & NV_SWNCQ_IRQ_DMASETUP) &&
2256 !(pp->ncq_flags & ncq_saw_dmas)) {
2257 ata_stat = ap->ops->sff_check_status(ap);
2258 if (ata_stat & ATA_BUSY)
2259 goto irq_exit;
2261 if (pp->defer_queue.defer_bits) {
2262 DPRINTK("send next command\n");
2263 qc = nv_swncq_qc_from_dq(ap);
2264 nv_swncq_issue_atacmd(ap, qc);
2269 if (fis & NV_SWNCQ_IRQ_DMASETUP) {
2270 /* program the dma controller with appropriate PRD buffers
2271 * and start the DMA transfer for requested command.
2273 pp->dmafis_bits |= (0x1 << nv_swncq_tag(ap));
2274 pp->ncq_flags |= ncq_saw_dmas;
2275 nv_swncq_dmafis(ap);
2278 irq_exit:
2279 return;
2280 irq_error:
2281 ata_ehi_push_desc(ehi, "fis:0x%x", fis);
2282 ata_port_freeze(ap);
2283 return;
2286 static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance)
2288 struct ata_host *host = dev_instance;
2289 unsigned int i;
2290 unsigned int handled = 0;
2291 unsigned long flags;
2292 u32 irq_stat;
2294 spin_lock_irqsave(&host->lock, flags);
2296 irq_stat = readl(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_MCP55);
2298 for (i = 0; i < host->n_ports; i++) {
2299 struct ata_port *ap = host->ports[i];
2301 if (ap->link.sactive) {
2302 nv_swncq_host_interrupt(ap, (u16)irq_stat);
2303 handled = 1;
2304 } else {
2305 if (irq_stat) /* reserve Hotplug */
2306 nv_swncq_irq_clear(ap, 0xfff0);
2308 handled += nv_host_intr(ap, (u8)irq_stat);
2310 irq_stat >>= NV_INT_PORT_SHIFT_MCP55;
2313 spin_unlock_irqrestore(&host->lock, flags);
2315 return IRQ_RETVAL(handled);
2318 static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2320 const struct ata_port_info *ppi[] = { NULL, NULL };
2321 struct nv_pi_priv *ipriv;
2322 struct ata_host *host;
2323 struct nv_host_priv *hpriv;
2324 int rc;
2325 u32 bar;
2326 void __iomem *base;
2327 unsigned long type = ent->driver_data;
2329 // Make sure this is a SATA controller by counting the number of bars
2330 // (NVIDIA SATA controllers will always have six bars). Otherwise,
2331 // it's an IDE controller and we ignore it.
2332 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
2333 if (pci_resource_start(pdev, bar) == 0)
2334 return -ENODEV;
2336 ata_print_version_once(&pdev->dev, DRV_VERSION);
2338 rc = pcim_enable_device(pdev);
2339 if (rc)
2340 return rc;
2342 /* determine type and allocate host */
2343 if (type == CK804 && adma_enabled) {
2344 dev_notice(&pdev->dev, "Using ADMA mode\n");
2345 type = ADMA;
2346 } else if (type == MCP5x && swncq_enabled) {
2347 dev_notice(&pdev->dev, "Using SWNCQ mode\n");
2348 type = SWNCQ;
2351 ppi[0] = &nv_port_info[type];
2352 ipriv = ppi[0]->private_data;
2353 rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
2354 if (rc)
2355 return rc;
2357 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2358 if (!hpriv)
2359 return -ENOMEM;
2360 hpriv->type = type;
2361 host->private_data = hpriv;
2363 /* request and iomap NV_MMIO_BAR */
2364 rc = pcim_iomap_regions(pdev, 1 << NV_MMIO_BAR, DRV_NAME);
2365 if (rc)
2366 return rc;
2368 /* configure SCR access */
2369 base = host->iomap[NV_MMIO_BAR];
2370 host->ports[0]->ioaddr.scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
2371 host->ports[1]->ioaddr.scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
2373 /* enable SATA space for CK804 */
2374 if (type >= CK804) {
2375 u8 regval;
2377 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2378 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2379 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2382 /* init ADMA */
2383 if (type == ADMA) {
2384 rc = nv_adma_host_init(host);
2385 if (rc)
2386 return rc;
2387 } else if (type == SWNCQ)
2388 nv_swncq_host_init(host);
2390 if (msi_enabled) {
2391 dev_notice(&pdev->dev, "Using MSI\n");
2392 pci_enable_msi(pdev);
2395 pci_set_master(pdev);
2396 return ata_pci_sff_activate_host(host, ipriv->irq_handler, ipriv->sht);
2399 #ifdef CONFIG_PM_SLEEP
2400 static int nv_pci_device_resume(struct pci_dev *pdev)
2402 struct ata_host *host = pci_get_drvdata(pdev);
2403 struct nv_host_priv *hpriv = host->private_data;
2404 int rc;
2406 rc = ata_pci_device_do_resume(pdev);
2407 if (rc)
2408 return rc;
2410 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2411 if (hpriv->type >= CK804) {
2412 u8 regval;
2414 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2415 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2416 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2418 if (hpriv->type == ADMA) {
2419 u32 tmp32;
2420 struct nv_adma_port_priv *pp;
2421 /* enable/disable ADMA on the ports appropriately */
2422 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2424 pp = host->ports[0]->private_data;
2425 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2426 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2427 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2428 else
2429 tmp32 |= (NV_MCP_SATA_CFG_20_PORT0_EN |
2430 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2431 pp = host->ports[1]->private_data;
2432 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2433 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
2434 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2435 else
2436 tmp32 |= (NV_MCP_SATA_CFG_20_PORT1_EN |
2437 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2439 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2443 ata_host_resume(host);
2445 return 0;
2447 #endif
2449 static void nv_ck804_host_stop(struct ata_host *host)
2451 struct pci_dev *pdev = to_pci_dev(host->dev);
2452 u8 regval;
2454 /* disable SATA space for CK804 */
2455 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2456 regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2457 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2460 static void nv_adma_host_stop(struct ata_host *host)
2462 struct pci_dev *pdev = to_pci_dev(host->dev);
2463 u32 tmp32;
2465 /* disable ADMA on the ports */
2466 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2467 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2468 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
2469 NV_MCP_SATA_CFG_20_PORT1_EN |
2470 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2472 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2474 nv_ck804_host_stop(host);
2477 module_pci_driver(nv_pci_driver);
2479 module_param_named(adma, adma_enabled, bool, 0444);
2480 MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: false)");
2481 module_param_named(swncq, swncq_enabled, bool, 0444);
2482 MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: true)");
2483 module_param_named(msi, msi_enabled, bool, 0444);
2484 MODULE_PARM_DESC(msi, "Enable use of MSI (Default: false)");