1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * sata_nv.c - NVIDIA nForce SATA
5 * Copyright 2004 NVIDIA Corp. All rights reserved.
6 * Copyright 2004 Andrew Chew
8 * libata documentation is available via 'make {ps|pdf}docs',
9 * as Documentation/driver-api/libata.rst
11 * No hardware documentation available outside of NVIDIA.
12 * This driver programs the NVIDIA SATA controller in a similar
13 * fashion as with other PCI IDE BMDMA controllers, with a few
14 * NV-specific details such as register offsets, SATA phy location,
17 * CK804/MCP04 controllers support an alternate programming interface
18 * similar to the ADMA specification (with some modifications).
19 * This allows the use of NCQ. Non-DMA-mapped ATA commands are still
20 * sent through the legacy interface.
23 #include <linux/kernel.h>
24 #include <linux/module.h>
25 #include <linux/gfp.h>
26 #include <linux/pci.h>
27 #include <linux/blkdev.h>
28 #include <linux/delay.h>
29 #include <linux/interrupt.h>
30 #include <linux/device.h>
31 #include <scsi/scsi_host.h>
32 #include <scsi/scsi_device.h>
33 #include <linux/libata.h>
34 #include <trace/events/libata.h>
36 #define DRV_NAME "sata_nv"
37 #define DRV_VERSION "3.5"
39 #define NV_ADMA_DMA_BOUNDARY 0xffffffffUL
45 NV_PIO_MASK
= ATA_PIO4
,
46 NV_MWDMA_MASK
= ATA_MWDMA2
,
47 NV_UDMA_MASK
= ATA_UDMA6
,
48 NV_PORT0_SCR_REG_OFFSET
= 0x00,
49 NV_PORT1_SCR_REG_OFFSET
= 0x40,
51 /* INT_STATUS/ENABLE */
54 NV_INT_STATUS_CK804
= 0x440,
55 NV_INT_ENABLE_CK804
= 0x441,
57 /* INT_STATUS/ENABLE bits */
61 NV_INT_REMOVED
= 0x08,
63 NV_INT_PORT_SHIFT
= 4, /* each port occupies 4 bits */
66 NV_INT_MASK
= NV_INT_DEV
|
67 NV_INT_ADDED
| NV_INT_REMOVED
,
71 NV_INT_CONFIG_METHD
= 0x01, // 0 = INT, 1 = SMI
73 // For PCI config register 20
74 NV_MCP_SATA_CFG_20
= 0x50,
75 NV_MCP_SATA_CFG_20_SATA_SPACE_EN
= 0x04,
76 NV_MCP_SATA_CFG_20_PORT0_EN
= (1 << 17),
77 NV_MCP_SATA_CFG_20_PORT1_EN
= (1 << 16),
78 NV_MCP_SATA_CFG_20_PORT0_PWB_EN
= (1 << 14),
79 NV_MCP_SATA_CFG_20_PORT1_PWB_EN
= (1 << 12),
81 NV_ADMA_MAX_CPBS
= 32,
84 NV_ADMA_SGTBL_LEN
= (1024 - NV_ADMA_CPB_SZ
) /
86 NV_ADMA_SGTBL_TOTAL_LEN
= NV_ADMA_SGTBL_LEN
+ 5,
87 NV_ADMA_SGTBL_SZ
= NV_ADMA_SGTBL_LEN
* NV_ADMA_APRD_SZ
,
88 NV_ADMA_PORT_PRIV_DMA_SZ
= NV_ADMA_MAX_CPBS
*
89 (NV_ADMA_CPB_SZ
+ NV_ADMA_SGTBL_SZ
),
91 /* BAR5 offset to ADMA general registers */
93 NV_ADMA_GEN_CTL
= 0x00,
94 NV_ADMA_NOTIFIER_CLEAR
= 0x30,
96 /* BAR5 offset to ADMA ports */
99 /* size of ADMA port register space */
100 NV_ADMA_PORT_SIZE
= 0x100,
102 /* ADMA port registers */
104 NV_ADMA_CPB_COUNT
= 0x42,
105 NV_ADMA_NEXT_CPB_IDX
= 0x43,
107 NV_ADMA_CPB_BASE_LOW
= 0x48,
108 NV_ADMA_CPB_BASE_HIGH
= 0x4C,
109 NV_ADMA_APPEND
= 0x50,
110 NV_ADMA_NOTIFIER
= 0x68,
111 NV_ADMA_NOTIFIER_ERROR
= 0x6C,
113 /* NV_ADMA_CTL register bits */
114 NV_ADMA_CTL_HOTPLUG_IEN
= (1 << 0),
115 NV_ADMA_CTL_CHANNEL_RESET
= (1 << 5),
116 NV_ADMA_CTL_GO
= (1 << 7),
117 NV_ADMA_CTL_AIEN
= (1 << 8),
118 NV_ADMA_CTL_READ_NON_COHERENT
= (1 << 11),
119 NV_ADMA_CTL_WRITE_NON_COHERENT
= (1 << 12),
121 /* CPB response flag bits */
122 NV_CPB_RESP_DONE
= (1 << 0),
123 NV_CPB_RESP_ATA_ERR
= (1 << 3),
124 NV_CPB_RESP_CMD_ERR
= (1 << 4),
125 NV_CPB_RESP_CPB_ERR
= (1 << 7),
127 /* CPB control flag bits */
128 NV_CPB_CTL_CPB_VALID
= (1 << 0),
129 NV_CPB_CTL_QUEUE
= (1 << 1),
130 NV_CPB_CTL_APRD_VALID
= (1 << 2),
131 NV_CPB_CTL_IEN
= (1 << 3),
132 NV_CPB_CTL_FPDMA
= (1 << 4),
135 NV_APRD_WRITE
= (1 << 1),
136 NV_APRD_END
= (1 << 2),
137 NV_APRD_CONT
= (1 << 3),
139 /* NV_ADMA_STAT flags */
140 NV_ADMA_STAT_TIMEOUT
= (1 << 0),
141 NV_ADMA_STAT_HOTUNPLUG
= (1 << 1),
142 NV_ADMA_STAT_HOTPLUG
= (1 << 2),
143 NV_ADMA_STAT_CPBERR
= (1 << 4),
144 NV_ADMA_STAT_SERROR
= (1 << 5),
145 NV_ADMA_STAT_CMD_COMPLETE
= (1 << 6),
146 NV_ADMA_STAT_IDLE
= (1 << 8),
147 NV_ADMA_STAT_LEGACY
= (1 << 9),
148 NV_ADMA_STAT_STOPPED
= (1 << 10),
149 NV_ADMA_STAT_DONE
= (1 << 12),
150 NV_ADMA_STAT_ERR
= NV_ADMA_STAT_CPBERR
|
151 NV_ADMA_STAT_TIMEOUT
,
154 NV_ADMA_PORT_REGISTER_MODE
= (1 << 0),
155 NV_ADMA_ATAPI_SETUP_COMPLETE
= (1 << 1),
157 /* MCP55 reg offset */
158 NV_CTL_MCP55
= 0x400,
159 NV_INT_STATUS_MCP55
= 0x440,
160 NV_INT_ENABLE_MCP55
= 0x444,
161 NV_NCQ_REG_MCP55
= 0x448,
164 NV_INT_ALL_MCP55
= 0xffff,
165 NV_INT_PORT_SHIFT_MCP55
= 16, /* each port occupies 16 bits */
166 NV_INT_MASK_MCP55
= NV_INT_ALL_MCP55
& 0xfffd,
168 /* SWNCQ ENABLE BITS*/
169 NV_CTL_PRI_SWNCQ
= 0x02,
170 NV_CTL_SEC_SWNCQ
= 0x04,
172 /* SW NCQ status bits*/
173 NV_SWNCQ_IRQ_DEV
= (1 << 0),
174 NV_SWNCQ_IRQ_PM
= (1 << 1),
175 NV_SWNCQ_IRQ_ADDED
= (1 << 2),
176 NV_SWNCQ_IRQ_REMOVED
= (1 << 3),
178 NV_SWNCQ_IRQ_BACKOUT
= (1 << 4),
179 NV_SWNCQ_IRQ_SDBFIS
= (1 << 5),
180 NV_SWNCQ_IRQ_DHREGFIS
= (1 << 6),
181 NV_SWNCQ_IRQ_DMASETUP
= (1 << 7),
183 NV_SWNCQ_IRQ_HOTPLUG
= NV_SWNCQ_IRQ_ADDED
|
184 NV_SWNCQ_IRQ_REMOVED
,
188 /* ADMA Physical Region Descriptor - one SG segment */
197 enum nv_adma_regbits
{
198 CMDEND
= (1 << 15), /* end of command list */
199 WNB
= (1 << 14), /* wait-not-BSY */
200 IGN
= (1 << 13), /* ignore this entry */
201 CS1n
= (1 << (4 + 8)), /* std. PATA signals follow... */
202 DA2
= (1 << (2 + 8)),
203 DA1
= (1 << (1 + 8)),
204 DA0
= (1 << (0 + 8)),
207 /* ADMA Command Parameter Block
208 The first 5 SG segments are stored inside the Command Parameter Block itself.
209 If there are more than 5 segments the remainder are stored in a separate
210 memory area indicated by next_aprd. */
212 u8 resp_flags
; /* 0 */
213 u8 reserved1
; /* 1 */
214 u8 ctl_flags
; /* 2 */
215 /* len is length of taskfile in 64 bit words */
218 u8 next_cpb_idx
; /* 5 */
219 __le16 reserved2
; /* 6-7 */
220 __le16 tf
[12]; /* 8-31 */
221 struct nv_adma_prd aprd
[5]; /* 32-111 */
222 __le64 next_aprd
; /* 112-119 */
223 __le64 reserved3
; /* 120-127 */
227 struct nv_adma_port_priv
{
228 struct nv_adma_cpb
*cpb
;
230 struct nv_adma_prd
*aprd
;
232 void __iomem
*ctl_block
;
233 void __iomem
*gen_block
;
234 void __iomem
*notifier_clear_block
;
240 struct nv_host_priv
{
248 unsigned int tag
[ATA_MAX_QUEUE
];
251 enum ncq_saw_flag_list
{
252 ncq_saw_d2h
= (1U << 0),
253 ncq_saw_dmas
= (1U << 1),
254 ncq_saw_sdb
= (1U << 2),
255 ncq_saw_backout
= (1U << 3),
258 struct nv_swncq_port_priv
{
259 struct ata_bmdma_prd
*prd
; /* our SG list */
260 dma_addr_t prd_dma
; /* and its DMA mapping */
261 void __iomem
*sactive_block
;
262 void __iomem
*irq_block
;
263 void __iomem
*tag_block
;
266 unsigned int last_issue_tag
;
268 /* fifo circular queue to store deferral command */
269 struct defer_queue defer_queue
;
271 /* for NCQ interrupt analysis */
276 unsigned int ncq_flags
;
280 #define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & (1 << (19 + (12 * (PORT)))))
282 static int nv_init_one(struct pci_dev
*pdev
, const struct pci_device_id
*ent
);
283 #ifdef CONFIG_PM_SLEEP
284 static int nv_pci_device_resume(struct pci_dev
*pdev
);
286 static void nv_ck804_host_stop(struct ata_host
*host
);
287 static irqreturn_t
nv_generic_interrupt(int irq
, void *dev_instance
);
288 static irqreturn_t
nv_nf2_interrupt(int irq
, void *dev_instance
);
289 static irqreturn_t
nv_ck804_interrupt(int irq
, void *dev_instance
);
290 static int nv_scr_read(struct ata_link
*link
, unsigned int sc_reg
, u32
*val
);
291 static int nv_scr_write(struct ata_link
*link
, unsigned int sc_reg
, u32 val
);
293 static int nv_hardreset(struct ata_link
*link
, unsigned int *class,
294 unsigned long deadline
);
295 static void nv_nf2_freeze(struct ata_port
*ap
);
296 static void nv_nf2_thaw(struct ata_port
*ap
);
297 static void nv_ck804_freeze(struct ata_port
*ap
);
298 static void nv_ck804_thaw(struct ata_port
*ap
);
299 static int nv_adma_device_configure(struct scsi_device
*sdev
,
300 struct queue_limits
*lim
);
301 static int nv_adma_check_atapi_dma(struct ata_queued_cmd
*qc
);
302 static enum ata_completion_errors
nv_adma_qc_prep(struct ata_queued_cmd
*qc
);
303 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd
*qc
);
304 static irqreturn_t
nv_adma_interrupt(int irq
, void *dev_instance
);
305 static void nv_adma_irq_clear(struct ata_port
*ap
);
306 static int nv_adma_port_start(struct ata_port
*ap
);
307 static void nv_adma_port_stop(struct ata_port
*ap
);
309 static int nv_adma_port_suspend(struct ata_port
*ap
, pm_message_t mesg
);
310 static int nv_adma_port_resume(struct ata_port
*ap
);
312 static void nv_adma_freeze(struct ata_port
*ap
);
313 static void nv_adma_thaw(struct ata_port
*ap
);
314 static void nv_adma_error_handler(struct ata_port
*ap
);
315 static void nv_adma_host_stop(struct ata_host
*host
);
316 static void nv_adma_post_internal_cmd(struct ata_queued_cmd
*qc
);
317 static void nv_adma_tf_read(struct ata_port
*ap
, struct ata_taskfile
*tf
);
319 static void nv_mcp55_thaw(struct ata_port
*ap
);
320 static void nv_mcp55_freeze(struct ata_port
*ap
);
321 static void nv_swncq_error_handler(struct ata_port
*ap
);
322 static int nv_swncq_device_configure(struct scsi_device
*sdev
,
323 struct queue_limits
*lim
);
324 static int nv_swncq_port_start(struct ata_port
*ap
);
325 static enum ata_completion_errors
nv_swncq_qc_prep(struct ata_queued_cmd
*qc
);
326 static void nv_swncq_fill_sg(struct ata_queued_cmd
*qc
);
327 static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd
*qc
);
328 static void nv_swncq_irq_clear(struct ata_port
*ap
, u16 fis
);
329 static irqreturn_t
nv_swncq_interrupt(int irq
, void *dev_instance
);
331 static int nv_swncq_port_suspend(struct ata_port
*ap
, pm_message_t mesg
);
332 static int nv_swncq_port_resume(struct ata_port
*ap
);
339 NFORCE3
= NFORCE2
, /* NF2 == NF3 as far as sata_nv is concerned */
346 static const struct pci_device_id nv_pci_tbl
[] = {
347 { PCI_VDEVICE(NVIDIA
, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA
), NFORCE2
},
348 { PCI_VDEVICE(NVIDIA
, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA
), NFORCE3
},
349 { PCI_VDEVICE(NVIDIA
, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2
), NFORCE3
},
350 { PCI_VDEVICE(NVIDIA
, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA
), CK804
},
351 { PCI_VDEVICE(NVIDIA
, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2
), CK804
},
352 { PCI_VDEVICE(NVIDIA
, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA
), CK804
},
353 { PCI_VDEVICE(NVIDIA
, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2
), CK804
},
354 { PCI_VDEVICE(NVIDIA
, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA
), MCP5x
},
355 { PCI_VDEVICE(NVIDIA
, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2
), MCP5x
},
356 { PCI_VDEVICE(NVIDIA
, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA
), MCP5x
},
357 { PCI_VDEVICE(NVIDIA
, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2
), MCP5x
},
358 { PCI_VDEVICE(NVIDIA
, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA
), GENERIC
},
359 { PCI_VDEVICE(NVIDIA
, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2
), GENERIC
},
360 { PCI_VDEVICE(NVIDIA
, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3
), GENERIC
},
362 { } /* terminate list */
365 static struct pci_driver nv_pci_driver
= {
367 .id_table
= nv_pci_tbl
,
368 .probe
= nv_init_one
,
369 #ifdef CONFIG_PM_SLEEP
370 .suspend
= ata_pci_device_suspend
,
371 .resume
= nv_pci_device_resume
,
373 .remove
= ata_pci_remove_one
,
376 static const struct scsi_host_template nv_sht
= {
377 ATA_BMDMA_SHT(DRV_NAME
),
380 static const struct scsi_host_template nv_adma_sht
= {
381 __ATA_BASE_SHT(DRV_NAME
),
382 .can_queue
= NV_ADMA_MAX_CPBS
,
383 .sg_tablesize
= NV_ADMA_SGTBL_TOTAL_LEN
,
384 .dma_boundary
= NV_ADMA_DMA_BOUNDARY
,
385 .device_configure
= nv_adma_device_configure
,
386 .sdev_groups
= ata_ncq_sdev_groups
,
387 .change_queue_depth
= ata_scsi_change_queue_depth
,
388 .tag_alloc_policy
= BLK_TAG_ALLOC_RR
,
391 static const struct scsi_host_template nv_swncq_sht
= {
392 __ATA_BASE_SHT(DRV_NAME
),
393 .can_queue
= ATA_MAX_QUEUE
- 1,
394 .sg_tablesize
= LIBATA_MAX_PRD
,
395 .dma_boundary
= ATA_DMA_BOUNDARY
,
396 .device_configure
= nv_swncq_device_configure
,
397 .sdev_groups
= ata_ncq_sdev_groups
,
398 .change_queue_depth
= ata_scsi_change_queue_depth
,
399 .tag_alloc_policy
= BLK_TAG_ALLOC_RR
,
403 * NV SATA controllers have various different problems with hardreset
404 * protocol depending on the specific controller and device.
408 * bko11195 reports that link doesn't come online after hardreset on
409 * generic nv's and there have been several other similar reports on
412 * bko12351#c23 reports that warmplug on MCP61 doesn't work with
417 * bko3352 reports nf2/3 controllers can't determine device signature
418 * reliably after hardreset. The following thread reports detection
419 * failure on cold boot with the standard debouncing timing.
421 * http://thread.gmane.org/gmane.linux.ide/34098
423 * bko12176 reports that hardreset fails to bring up the link during
428 * For initial probing after boot and hot plugging, hardreset mostly
429 * works fine on CK804 but curiously, reprobing on the initial port
430 * by rescanning or rmmod/insmod fails to acquire the initial D2H Reg
431 * FIS in somewhat undeterministic way.
435 * bko12351 reports that when SWNCQ is enabled, for hotplug to work,
436 * hardreset should be used and hardreset can't report proper
437 * signature, which suggests that mcp5x is closer to nf2 as long as
438 * reset quirkiness is concerned.
440 * bko12703 reports that boot probing fails for intel SSD with
441 * hardreset. Link fails to come online. Softreset works fine.
443 * The failures are varied but the following patterns seem true for
446 * - Softreset during boot always works.
448 * - Hardreset during boot sometimes fails to bring up the link on
449 * certain comibnations and device signature acquisition is
452 * - Hardreset is often necessary after hotplug.
454 * So, preferring softreset for boot probing and error handling (as
455 * hardreset might bring down the link) but using hardreset for
456 * post-boot probing should work around the above issues in most
457 * cases. Define nv_hardreset() which only kicks in for post-boot
458 * probing and use it for all variants.
460 static struct ata_port_operations nv_generic_ops
= {
461 .inherits
= &ata_bmdma_port_ops
,
462 .lost_interrupt
= ATA_OP_NULL
,
463 .scr_read
= nv_scr_read
,
464 .scr_write
= nv_scr_write
,
465 .hardreset
= nv_hardreset
,
468 static struct ata_port_operations nv_nf2_ops
= {
469 .inherits
= &nv_generic_ops
,
470 .freeze
= nv_nf2_freeze
,
474 static struct ata_port_operations nv_ck804_ops
= {
475 .inherits
= &nv_generic_ops
,
476 .freeze
= nv_ck804_freeze
,
477 .thaw
= nv_ck804_thaw
,
478 .host_stop
= nv_ck804_host_stop
,
481 static struct ata_port_operations nv_adma_ops
= {
482 .inherits
= &nv_ck804_ops
,
484 .check_atapi_dma
= nv_adma_check_atapi_dma
,
485 .sff_tf_read
= nv_adma_tf_read
,
486 .qc_defer
= ata_std_qc_defer
,
487 .qc_prep
= nv_adma_qc_prep
,
488 .qc_issue
= nv_adma_qc_issue
,
489 .sff_irq_clear
= nv_adma_irq_clear
,
491 .freeze
= nv_adma_freeze
,
492 .thaw
= nv_adma_thaw
,
493 .error_handler
= nv_adma_error_handler
,
494 .post_internal_cmd
= nv_adma_post_internal_cmd
,
496 .port_start
= nv_adma_port_start
,
497 .port_stop
= nv_adma_port_stop
,
499 .port_suspend
= nv_adma_port_suspend
,
500 .port_resume
= nv_adma_port_resume
,
502 .host_stop
= nv_adma_host_stop
,
505 static struct ata_port_operations nv_swncq_ops
= {
506 .inherits
= &nv_generic_ops
,
508 .qc_defer
= ata_std_qc_defer
,
509 .qc_prep
= nv_swncq_qc_prep
,
510 .qc_issue
= nv_swncq_qc_issue
,
512 .freeze
= nv_mcp55_freeze
,
513 .thaw
= nv_mcp55_thaw
,
514 .error_handler
= nv_swncq_error_handler
,
517 .port_suspend
= nv_swncq_port_suspend
,
518 .port_resume
= nv_swncq_port_resume
,
520 .port_start
= nv_swncq_port_start
,
524 irq_handler_t irq_handler
;
525 const struct scsi_host_template
*sht
;
528 #define NV_PI_PRIV(_irq_handler, _sht) \
529 &(struct nv_pi_priv){ .irq_handler = _irq_handler, .sht = _sht }
531 static const struct ata_port_info nv_port_info
[] = {
534 .flags
= ATA_FLAG_SATA
,
535 .pio_mask
= NV_PIO_MASK
,
536 .mwdma_mask
= NV_MWDMA_MASK
,
537 .udma_mask
= NV_UDMA_MASK
,
538 .port_ops
= &nv_generic_ops
,
539 .private_data
= NV_PI_PRIV(nv_generic_interrupt
, &nv_sht
),
543 .flags
= ATA_FLAG_SATA
,
544 .pio_mask
= NV_PIO_MASK
,
545 .mwdma_mask
= NV_MWDMA_MASK
,
546 .udma_mask
= NV_UDMA_MASK
,
547 .port_ops
= &nv_nf2_ops
,
548 .private_data
= NV_PI_PRIV(nv_nf2_interrupt
, &nv_sht
),
552 .flags
= ATA_FLAG_SATA
,
553 .pio_mask
= NV_PIO_MASK
,
554 .mwdma_mask
= NV_MWDMA_MASK
,
555 .udma_mask
= NV_UDMA_MASK
,
556 .port_ops
= &nv_ck804_ops
,
557 .private_data
= NV_PI_PRIV(nv_ck804_interrupt
, &nv_sht
),
561 .flags
= ATA_FLAG_SATA
| ATA_FLAG_NCQ
,
562 .pio_mask
= NV_PIO_MASK
,
563 .mwdma_mask
= NV_MWDMA_MASK
,
564 .udma_mask
= NV_UDMA_MASK
,
565 .port_ops
= &nv_adma_ops
,
566 .private_data
= NV_PI_PRIV(nv_adma_interrupt
, &nv_adma_sht
),
570 .flags
= ATA_FLAG_SATA
,
571 .pio_mask
= NV_PIO_MASK
,
572 .mwdma_mask
= NV_MWDMA_MASK
,
573 .udma_mask
= NV_UDMA_MASK
,
574 .port_ops
= &nv_generic_ops
,
575 .private_data
= NV_PI_PRIV(nv_generic_interrupt
, &nv_sht
),
579 .flags
= ATA_FLAG_SATA
| ATA_FLAG_NCQ
,
580 .pio_mask
= NV_PIO_MASK
,
581 .mwdma_mask
= NV_MWDMA_MASK
,
582 .udma_mask
= NV_UDMA_MASK
,
583 .port_ops
= &nv_swncq_ops
,
584 .private_data
= NV_PI_PRIV(nv_swncq_interrupt
, &nv_swncq_sht
),
588 MODULE_AUTHOR("NVIDIA");
589 MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
590 MODULE_LICENSE("GPL");
591 MODULE_DEVICE_TABLE(pci
, nv_pci_tbl
);
592 MODULE_VERSION(DRV_VERSION
);
594 static bool adma_enabled
;
595 static bool swncq_enabled
= true;
596 static bool msi_enabled
;
598 static void nv_adma_register_mode(struct ata_port
*ap
)
600 struct nv_adma_port_priv
*pp
= ap
->private_data
;
601 void __iomem
*mmio
= pp
->ctl_block
;
605 if (pp
->flags
& NV_ADMA_PORT_REGISTER_MODE
)
608 status
= readw(mmio
+ NV_ADMA_STAT
);
609 while (!(status
& NV_ADMA_STAT_IDLE
) && count
< 20) {
611 status
= readw(mmio
+ NV_ADMA_STAT
);
615 ata_port_warn(ap
, "timeout waiting for ADMA IDLE, stat=0x%hx\n",
618 tmp
= readw(mmio
+ NV_ADMA_CTL
);
619 writew(tmp
& ~NV_ADMA_CTL_GO
, mmio
+ NV_ADMA_CTL
);
622 status
= readw(mmio
+ NV_ADMA_STAT
);
623 while (!(status
& NV_ADMA_STAT_LEGACY
) && count
< 20) {
625 status
= readw(mmio
+ NV_ADMA_STAT
);
630 "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
633 pp
->flags
|= NV_ADMA_PORT_REGISTER_MODE
;
636 static void nv_adma_mode(struct ata_port
*ap
)
638 struct nv_adma_port_priv
*pp
= ap
->private_data
;
639 void __iomem
*mmio
= pp
->ctl_block
;
643 if (!(pp
->flags
& NV_ADMA_PORT_REGISTER_MODE
))
646 WARN_ON(pp
->flags
& NV_ADMA_ATAPI_SETUP_COMPLETE
);
648 tmp
= readw(mmio
+ NV_ADMA_CTL
);
649 writew(tmp
| NV_ADMA_CTL_GO
, mmio
+ NV_ADMA_CTL
);
651 status
= readw(mmio
+ NV_ADMA_STAT
);
652 while (((status
& NV_ADMA_STAT_LEGACY
) ||
653 !(status
& NV_ADMA_STAT_IDLE
)) && count
< 20) {
655 status
= readw(mmio
+ NV_ADMA_STAT
);
660 "timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
663 pp
->flags
&= ~NV_ADMA_PORT_REGISTER_MODE
;
666 static int nv_adma_device_configure(struct scsi_device
*sdev
,
667 struct queue_limits
*lim
)
669 struct ata_port
*ap
= ata_shost_to_port(sdev
->host
);
670 struct nv_adma_port_priv
*pp
= ap
->private_data
;
671 struct nv_adma_port_priv
*port0
, *port1
;
672 struct pci_dev
*pdev
= to_pci_dev(ap
->host
->dev
);
673 unsigned long segment_boundary
, flags
;
674 unsigned short sg_tablesize
;
677 u32 current_reg
, new_reg
, config_mask
;
679 rc
= ata_scsi_device_configure(sdev
, lim
);
681 if (sdev
->id
>= ATA_MAX_DEVICES
|| sdev
->channel
|| sdev
->lun
)
682 /* Not a proper libata device, ignore */
685 spin_lock_irqsave(ap
->lock
, flags
);
687 if (ap
->link
.device
[sdev
->id
].class == ATA_DEV_ATAPI
) {
689 * NVIDIA reports that ADMA mode does not support ATAPI commands.
690 * Therefore ATAPI commands are sent through the legacy interface.
691 * However, the legacy interface only supports 32-bit DMA.
692 * Restrict DMA parameters as required by the legacy interface
693 * when an ATAPI device is connected.
695 segment_boundary
= ATA_DMA_BOUNDARY
;
696 /* Subtract 1 since an extra entry may be needed for padding, see
698 sg_tablesize
= LIBATA_MAX_PRD
- 1;
700 /* Since the legacy DMA engine is in use, we need to disable ADMA
703 nv_adma_register_mode(ap
);
705 segment_boundary
= NV_ADMA_DMA_BOUNDARY
;
706 sg_tablesize
= NV_ADMA_SGTBL_TOTAL_LEN
;
710 pci_read_config_dword(pdev
, NV_MCP_SATA_CFG_20
, ¤t_reg
);
712 if (ap
->port_no
== 1)
713 config_mask
= NV_MCP_SATA_CFG_20_PORT1_EN
|
714 NV_MCP_SATA_CFG_20_PORT1_PWB_EN
;
716 config_mask
= NV_MCP_SATA_CFG_20_PORT0_EN
|
717 NV_MCP_SATA_CFG_20_PORT0_PWB_EN
;
720 new_reg
= current_reg
| config_mask
;
721 pp
->flags
&= ~NV_ADMA_ATAPI_SETUP_COMPLETE
;
723 new_reg
= current_reg
& ~config_mask
;
724 pp
->flags
|= NV_ADMA_ATAPI_SETUP_COMPLETE
;
727 if (current_reg
!= new_reg
)
728 pci_write_config_dword(pdev
, NV_MCP_SATA_CFG_20
, new_reg
);
730 port0
= ap
->host
->ports
[0]->private_data
;
731 port1
= ap
->host
->ports
[1]->private_data
;
732 if ((port0
->flags
& NV_ADMA_ATAPI_SETUP_COMPLETE
) ||
733 (port1
->flags
& NV_ADMA_ATAPI_SETUP_COMPLETE
)) {
735 * We have to set the DMA mask to 32-bit if either port is in
736 * ATAPI mode, since they are on the same PCI device which is
737 * used for DMA mapping. If either SCSI device is not allocated
738 * yet, it's OK since that port will discover its correct
739 * setting when it does get allocated.
741 rc
= dma_set_mask(&pdev
->dev
, ATA_DMA_MASK
);
743 rc
= dma_set_mask(&pdev
->dev
, pp
->adma_dma_mask
);
746 lim
->seg_boundary_mask
= segment_boundary
;
747 lim
->max_segments
= sg_tablesize
;
749 "DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
750 (unsigned long long)*ap
->host
->dev
->dma_mask
,
751 segment_boundary
, sg_tablesize
);
753 spin_unlock_irqrestore(ap
->lock
, flags
);
758 static int nv_adma_check_atapi_dma(struct ata_queued_cmd
*qc
)
760 struct nv_adma_port_priv
*pp
= qc
->ap
->private_data
;
761 return !(pp
->flags
& NV_ADMA_ATAPI_SETUP_COMPLETE
);
764 static void nv_adma_tf_read(struct ata_port
*ap
, struct ata_taskfile
*tf
)
766 /* Other than when internal or pass-through commands are executed,
767 the only time this function will be called in ADMA mode will be
768 if a command fails. In the failure case we don't care about going
769 into register mode with ADMA commands pending, as the commands will
770 all shortly be aborted anyway. We assume that NCQ commands are not
771 issued via passthrough, which is the only way that switching into
772 ADMA mode could abort outstanding commands. */
773 nv_adma_register_mode(ap
);
775 ata_sff_tf_read(ap
, tf
);
778 static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile
*tf
, __le16
*cpb
)
780 unsigned int idx
= 0;
782 if (tf
->flags
& ATA_TFLAG_ISADDR
) {
783 if (tf
->flags
& ATA_TFLAG_LBA48
) {
784 cpb
[idx
++] = cpu_to_le16((ATA_REG_ERR
<< 8) | tf
->hob_feature
| WNB
);
785 cpb
[idx
++] = cpu_to_le16((ATA_REG_NSECT
<< 8) | tf
->hob_nsect
);
786 cpb
[idx
++] = cpu_to_le16((ATA_REG_LBAL
<< 8) | tf
->hob_lbal
);
787 cpb
[idx
++] = cpu_to_le16((ATA_REG_LBAM
<< 8) | tf
->hob_lbam
);
788 cpb
[idx
++] = cpu_to_le16((ATA_REG_LBAH
<< 8) | tf
->hob_lbah
);
789 cpb
[idx
++] = cpu_to_le16((ATA_REG_ERR
<< 8) | tf
->feature
);
791 cpb
[idx
++] = cpu_to_le16((ATA_REG_ERR
<< 8) | tf
->feature
| WNB
);
793 cpb
[idx
++] = cpu_to_le16((ATA_REG_NSECT
<< 8) | tf
->nsect
);
794 cpb
[idx
++] = cpu_to_le16((ATA_REG_LBAL
<< 8) | tf
->lbal
);
795 cpb
[idx
++] = cpu_to_le16((ATA_REG_LBAM
<< 8) | tf
->lbam
);
796 cpb
[idx
++] = cpu_to_le16((ATA_REG_LBAH
<< 8) | tf
->lbah
);
799 if (tf
->flags
& ATA_TFLAG_DEVICE
)
800 cpb
[idx
++] = cpu_to_le16((ATA_REG_DEVICE
<< 8) | tf
->device
);
802 cpb
[idx
++] = cpu_to_le16((ATA_REG_CMD
<< 8) | tf
->command
| CMDEND
);
805 cpb
[idx
++] = cpu_to_le16(IGN
);
810 static int nv_adma_check_cpb(struct ata_port
*ap
, int cpb_num
, int force_err
)
812 struct nv_adma_port_priv
*pp
= ap
->private_data
;
813 u8 flags
= pp
->cpb
[cpb_num
].resp_flags
;
815 ata_port_dbg(ap
, "CPB %d, flags=0x%x\n", cpb_num
, flags
);
817 if (unlikely((force_err
||
818 flags
& (NV_CPB_RESP_ATA_ERR
|
819 NV_CPB_RESP_CMD_ERR
|
820 NV_CPB_RESP_CPB_ERR
)))) {
821 struct ata_eh_info
*ehi
= &ap
->link
.eh_info
;
824 ata_ehi_clear_desc(ehi
);
825 __ata_ehi_push_desc(ehi
, "CPB resp_flags 0x%x: ", flags
);
826 if (flags
& NV_CPB_RESP_ATA_ERR
) {
827 ata_ehi_push_desc(ehi
, "ATA error");
828 ehi
->err_mask
|= AC_ERR_DEV
;
829 } else if (flags
& NV_CPB_RESP_CMD_ERR
) {
830 ata_ehi_push_desc(ehi
, "CMD error");
831 ehi
->err_mask
|= AC_ERR_DEV
;
832 } else if (flags
& NV_CPB_RESP_CPB_ERR
) {
833 ata_ehi_push_desc(ehi
, "CPB error");
834 ehi
->err_mask
|= AC_ERR_SYSTEM
;
837 /* notifier error, but no error in CPB flags? */
838 ata_ehi_push_desc(ehi
, "unknown");
839 ehi
->err_mask
|= AC_ERR_OTHER
;
842 /* Kill all commands. EH will determine what actually failed. */
850 if (likely(flags
& NV_CPB_RESP_DONE
))
855 static int nv_host_intr(struct ata_port
*ap
, u8 irq_stat
)
857 struct ata_queued_cmd
*qc
= ata_qc_from_tag(ap
, ap
->link
.active_tag
);
859 /* freeze if hotplugged */
860 if (unlikely(irq_stat
& (NV_INT_ADDED
| NV_INT_REMOVED
))) {
865 /* bail out if not our interrupt */
866 if (!(irq_stat
& NV_INT_DEV
))
869 /* DEV interrupt w/ no active qc? */
870 if (unlikely(!qc
|| (qc
->tf
.flags
& ATA_TFLAG_POLLING
))) {
871 ata_sff_check_status(ap
);
875 /* handle interrupt */
876 return ata_bmdma_port_intr(ap
, qc
);
879 static irqreturn_t
nv_adma_interrupt(int irq
, void *dev_instance
)
881 struct ata_host
*host
= dev_instance
;
883 u32 notifier_clears
[2];
885 spin_lock(&host
->lock
);
887 for (i
= 0; i
< host
->n_ports
; i
++) {
888 struct ata_port
*ap
= host
->ports
[i
];
889 struct nv_adma_port_priv
*pp
= ap
->private_data
;
890 void __iomem
*mmio
= pp
->ctl_block
;
893 u32 notifier
, notifier_error
;
895 notifier_clears
[i
] = 0;
897 /* if ADMA is disabled, use standard ata interrupt handler */
898 if (pp
->flags
& NV_ADMA_ATAPI_SETUP_COMPLETE
) {
899 u8 irq_stat
= readb(host
->iomap
[NV_MMIO_BAR
] + NV_INT_STATUS_CK804
)
900 >> (NV_INT_PORT_SHIFT
* i
);
901 handled
+= nv_host_intr(ap
, irq_stat
);
905 /* if in ATA register mode, check for standard interrupts */
906 if (pp
->flags
& NV_ADMA_PORT_REGISTER_MODE
) {
907 u8 irq_stat
= readb(host
->iomap
[NV_MMIO_BAR
] + NV_INT_STATUS_CK804
)
908 >> (NV_INT_PORT_SHIFT
* i
);
909 if (ata_tag_valid(ap
->link
.active_tag
))
910 /** NV_INT_DEV indication seems unreliable
911 at times at least in ADMA mode. Force it
912 on always when a command is active, to
913 prevent losing interrupts. */
914 irq_stat
|= NV_INT_DEV
;
915 handled
+= nv_host_intr(ap
, irq_stat
);
918 notifier
= readl(mmio
+ NV_ADMA_NOTIFIER
);
919 notifier_error
= readl(mmio
+ NV_ADMA_NOTIFIER_ERROR
);
920 notifier_clears
[i
] = notifier
| notifier_error
;
922 gen_ctl
= readl(pp
->gen_block
+ NV_ADMA_GEN_CTL
);
924 if (!NV_ADMA_CHECK_INTR(gen_ctl
, ap
->port_no
) && !notifier
&&
929 status
= readw(mmio
+ NV_ADMA_STAT
);
932 * Clear status. Ensure the controller sees the
933 * clearing before we start looking at any of the CPB
934 * statuses, so that any CPB completions after this
935 * point in the handler will raise another interrupt.
937 writew(status
, mmio
+ NV_ADMA_STAT
);
938 readw(mmio
+ NV_ADMA_STAT
); /* flush posted write */
941 handled
++; /* irq handled if we got here */
943 /* freeze if hotplugged or controller error */
944 if (unlikely(status
& (NV_ADMA_STAT_HOTPLUG
|
945 NV_ADMA_STAT_HOTUNPLUG
|
946 NV_ADMA_STAT_TIMEOUT
|
947 NV_ADMA_STAT_SERROR
))) {
948 struct ata_eh_info
*ehi
= &ap
->link
.eh_info
;
950 ata_ehi_clear_desc(ehi
);
951 __ata_ehi_push_desc(ehi
, "ADMA status 0x%08x: ", status
);
952 if (status
& NV_ADMA_STAT_TIMEOUT
) {
953 ehi
->err_mask
|= AC_ERR_SYSTEM
;
954 ata_ehi_push_desc(ehi
, "timeout");
955 } else if (status
& NV_ADMA_STAT_HOTPLUG
) {
956 ata_ehi_hotplugged(ehi
);
957 ata_ehi_push_desc(ehi
, "hotplug");
958 } else if (status
& NV_ADMA_STAT_HOTUNPLUG
) {
959 ata_ehi_hotplugged(ehi
);
960 ata_ehi_push_desc(ehi
, "hot unplug");
961 } else if (status
& NV_ADMA_STAT_SERROR
) {
962 /* let EH analyze SError and figure out cause */
963 ata_ehi_push_desc(ehi
, "SError");
965 ata_ehi_push_desc(ehi
, "unknown");
970 if (status
& (NV_ADMA_STAT_DONE
|
971 NV_ADMA_STAT_CPBERR
|
972 NV_ADMA_STAT_CMD_COMPLETE
)) {
973 u32 check_commands
= notifier_clears
[i
];
977 if (status
& NV_ADMA_STAT_CPBERR
) {
978 /* check all active commands */
979 if (ata_tag_valid(ap
->link
.active_tag
))
980 check_commands
= 1 <<
983 check_commands
= ap
->link
.sactive
;
986 /* check CPBs for completed commands */
987 while ((pos
= ffs(check_commands
))) {
989 rc
= nv_adma_check_cpb(ap
, pos
,
990 notifier_error
& (1 << pos
));
992 done_mask
|= 1 << pos
;
993 else if (unlikely(rc
< 0))
995 check_commands
&= ~(1 << pos
);
997 ata_qc_complete_multiple(ap
, ata_qc_get_active(ap
) ^ done_mask
);
1001 if (notifier_clears
[0] || notifier_clears
[1]) {
1002 /* Note: Both notifier clear registers must be written
1003 if either is set, even if one is zero, according to NVIDIA. */
1004 struct nv_adma_port_priv
*pp
= host
->ports
[0]->private_data
;
1005 writel(notifier_clears
[0], pp
->notifier_clear_block
);
1006 pp
= host
->ports
[1]->private_data
;
1007 writel(notifier_clears
[1], pp
->notifier_clear_block
);
1010 spin_unlock(&host
->lock
);
1012 return IRQ_RETVAL(handled
);
1015 static void nv_adma_freeze(struct ata_port
*ap
)
1017 struct nv_adma_port_priv
*pp
= ap
->private_data
;
1018 void __iomem
*mmio
= pp
->ctl_block
;
1021 nv_ck804_freeze(ap
);
1023 if (pp
->flags
& NV_ADMA_ATAPI_SETUP_COMPLETE
)
1026 /* clear any outstanding CK804 notifications */
1027 writeb(NV_INT_ALL
<< (ap
->port_no
* NV_INT_PORT_SHIFT
),
1028 ap
->host
->iomap
[NV_MMIO_BAR
] + NV_INT_STATUS_CK804
);
1030 /* Disable interrupt */
1031 tmp
= readw(mmio
+ NV_ADMA_CTL
);
1032 writew(tmp
& ~(NV_ADMA_CTL_AIEN
| NV_ADMA_CTL_HOTPLUG_IEN
),
1033 mmio
+ NV_ADMA_CTL
);
1034 readw(mmio
+ NV_ADMA_CTL
); /* flush posted write */
1037 static void nv_adma_thaw(struct ata_port
*ap
)
1039 struct nv_adma_port_priv
*pp
= ap
->private_data
;
1040 void __iomem
*mmio
= pp
->ctl_block
;
1045 if (pp
->flags
& NV_ADMA_ATAPI_SETUP_COMPLETE
)
1048 /* Enable interrupt */
1049 tmp
= readw(mmio
+ NV_ADMA_CTL
);
1050 writew(tmp
| (NV_ADMA_CTL_AIEN
| NV_ADMA_CTL_HOTPLUG_IEN
),
1051 mmio
+ NV_ADMA_CTL
);
1052 readw(mmio
+ NV_ADMA_CTL
); /* flush posted write */
1055 static void nv_adma_irq_clear(struct ata_port
*ap
)
1057 struct nv_adma_port_priv
*pp
= ap
->private_data
;
1058 void __iomem
*mmio
= pp
->ctl_block
;
1059 u32 notifier_clears
[2];
1061 if (pp
->flags
& NV_ADMA_ATAPI_SETUP_COMPLETE
) {
1062 ata_bmdma_irq_clear(ap
);
1066 /* clear any outstanding CK804 notifications */
1067 writeb(NV_INT_ALL
<< (ap
->port_no
* NV_INT_PORT_SHIFT
),
1068 ap
->host
->iomap
[NV_MMIO_BAR
] + NV_INT_STATUS_CK804
);
1070 /* clear ADMA status */
1071 writew(0xffff, mmio
+ NV_ADMA_STAT
);
1073 /* clear notifiers - note both ports need to be written with
1074 something even though we are only clearing on one */
1075 if (ap
->port_no
== 0) {
1076 notifier_clears
[0] = 0xFFFFFFFF;
1077 notifier_clears
[1] = 0;
1079 notifier_clears
[0] = 0;
1080 notifier_clears
[1] = 0xFFFFFFFF;
1082 pp
= ap
->host
->ports
[0]->private_data
;
1083 writel(notifier_clears
[0], pp
->notifier_clear_block
);
1084 pp
= ap
->host
->ports
[1]->private_data
;
1085 writel(notifier_clears
[1], pp
->notifier_clear_block
);
1088 static void nv_adma_post_internal_cmd(struct ata_queued_cmd
*qc
)
1090 struct nv_adma_port_priv
*pp
= qc
->ap
->private_data
;
1092 if (pp
->flags
& NV_ADMA_PORT_REGISTER_MODE
)
1093 ata_bmdma_post_internal_cmd(qc
);
1096 static int nv_adma_port_start(struct ata_port
*ap
)
1098 struct device
*dev
= ap
->host
->dev
;
1099 struct nv_adma_port_priv
*pp
;
1104 struct pci_dev
*pdev
= to_pci_dev(dev
);
1108 * Ensure DMA mask is set to 32-bit before allocating legacy PRD and
1111 rc
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
1115 /* we might fallback to bmdma, allocate bmdma resources */
1116 rc
= ata_bmdma_port_start(ap
);
1120 pp
= devm_kzalloc(dev
, sizeof(*pp
), GFP_KERNEL
);
1124 mmio
= ap
->host
->iomap
[NV_MMIO_BAR
] + NV_ADMA_PORT
+
1125 ap
->port_no
* NV_ADMA_PORT_SIZE
;
1126 pp
->ctl_block
= mmio
;
1127 pp
->gen_block
= ap
->host
->iomap
[NV_MMIO_BAR
] + NV_ADMA_GEN
;
1128 pp
->notifier_clear_block
= pp
->gen_block
+
1129 NV_ADMA_NOTIFIER_CLEAR
+ (4 * ap
->port_no
);
1132 * Now that the legacy PRD and padding buffer are allocated we can
1133 * raise the DMA mask to allocate the CPB/APRD table.
1135 dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64));
1137 pp
->adma_dma_mask
= *dev
->dma_mask
;
1139 mem
= dmam_alloc_coherent(dev
, NV_ADMA_PORT_PRIV_DMA_SZ
,
1140 &mem_dma
, GFP_KERNEL
);
1145 * First item in chunk of DMA memory:
1146 * 128-byte command parameter block (CPB)
1147 * one for each command tag
1150 pp
->cpb_dma
= mem_dma
;
1152 writel(mem_dma
& 0xFFFFFFFF, mmio
+ NV_ADMA_CPB_BASE_LOW
);
1153 writel((mem_dma
>> 16) >> 16, mmio
+ NV_ADMA_CPB_BASE_HIGH
);
1155 mem
+= NV_ADMA_MAX_CPBS
* NV_ADMA_CPB_SZ
;
1156 mem_dma
+= NV_ADMA_MAX_CPBS
* NV_ADMA_CPB_SZ
;
1159 * Second item: block of ADMA_SGTBL_LEN s/g entries
1162 pp
->aprd_dma
= mem_dma
;
1164 ap
->private_data
= pp
;
1166 /* clear any outstanding interrupt conditions */
1167 writew(0xffff, mmio
+ NV_ADMA_STAT
);
1169 /* initialize port variables */
1170 pp
->flags
= NV_ADMA_PORT_REGISTER_MODE
;
1172 /* clear CPB fetch count */
1173 writew(0, mmio
+ NV_ADMA_CPB_COUNT
);
1175 /* clear GO for register mode, enable interrupt */
1176 tmp
= readw(mmio
+ NV_ADMA_CTL
);
1177 writew((tmp
& ~NV_ADMA_CTL_GO
) | NV_ADMA_CTL_AIEN
|
1178 NV_ADMA_CTL_HOTPLUG_IEN
, mmio
+ NV_ADMA_CTL
);
1180 tmp
= readw(mmio
+ NV_ADMA_CTL
);
1181 writew(tmp
| NV_ADMA_CTL_CHANNEL_RESET
, mmio
+ NV_ADMA_CTL
);
1182 readw(mmio
+ NV_ADMA_CTL
); /* flush posted write */
1184 writew(tmp
& ~NV_ADMA_CTL_CHANNEL_RESET
, mmio
+ NV_ADMA_CTL
);
1185 readw(mmio
+ NV_ADMA_CTL
); /* flush posted write */
1190 static void nv_adma_port_stop(struct ata_port
*ap
)
1192 struct nv_adma_port_priv
*pp
= ap
->private_data
;
1193 void __iomem
*mmio
= pp
->ctl_block
;
1195 writew(0, mmio
+ NV_ADMA_CTL
);
1199 static int nv_adma_port_suspend(struct ata_port
*ap
, pm_message_t mesg
)
1201 struct nv_adma_port_priv
*pp
= ap
->private_data
;
1202 void __iomem
*mmio
= pp
->ctl_block
;
1204 /* Go to register mode - clears GO */
1205 nv_adma_register_mode(ap
);
1207 /* clear CPB fetch count */
1208 writew(0, mmio
+ NV_ADMA_CPB_COUNT
);
1210 /* disable interrupt, shut down port */
1211 writew(0, mmio
+ NV_ADMA_CTL
);
1216 static int nv_adma_port_resume(struct ata_port
*ap
)
1218 struct nv_adma_port_priv
*pp
= ap
->private_data
;
1219 void __iomem
*mmio
= pp
->ctl_block
;
1222 /* set CPB block location */
1223 writel(pp
->cpb_dma
& 0xFFFFFFFF, mmio
+ NV_ADMA_CPB_BASE_LOW
);
1224 writel((pp
->cpb_dma
>> 16) >> 16, mmio
+ NV_ADMA_CPB_BASE_HIGH
);
1226 /* clear any outstanding interrupt conditions */
1227 writew(0xffff, mmio
+ NV_ADMA_STAT
);
1229 /* initialize port variables */
1230 pp
->flags
|= NV_ADMA_PORT_REGISTER_MODE
;
1232 /* clear CPB fetch count */
1233 writew(0, mmio
+ NV_ADMA_CPB_COUNT
);
1235 /* clear GO for register mode, enable interrupt */
1236 tmp
= readw(mmio
+ NV_ADMA_CTL
);
1237 writew((tmp
& ~NV_ADMA_CTL_GO
) | NV_ADMA_CTL_AIEN
|
1238 NV_ADMA_CTL_HOTPLUG_IEN
, mmio
+ NV_ADMA_CTL
);
1240 tmp
= readw(mmio
+ NV_ADMA_CTL
);
1241 writew(tmp
| NV_ADMA_CTL_CHANNEL_RESET
, mmio
+ NV_ADMA_CTL
);
1242 readw(mmio
+ NV_ADMA_CTL
); /* flush posted write */
1244 writew(tmp
& ~NV_ADMA_CTL_CHANNEL_RESET
, mmio
+ NV_ADMA_CTL
);
1245 readw(mmio
+ NV_ADMA_CTL
); /* flush posted write */
1251 static void nv_adma_setup_port(struct ata_port
*ap
)
1253 void __iomem
*mmio
= ap
->host
->iomap
[NV_MMIO_BAR
];
1254 struct ata_ioports
*ioport
= &ap
->ioaddr
;
1256 mmio
+= NV_ADMA_PORT
+ ap
->port_no
* NV_ADMA_PORT_SIZE
;
1258 ioport
->cmd_addr
= mmio
;
1259 ioport
->data_addr
= mmio
+ (ATA_REG_DATA
* 4);
1260 ioport
->error_addr
=
1261 ioport
->feature_addr
= mmio
+ (ATA_REG_ERR
* 4);
1262 ioport
->nsect_addr
= mmio
+ (ATA_REG_NSECT
* 4);
1263 ioport
->lbal_addr
= mmio
+ (ATA_REG_LBAL
* 4);
1264 ioport
->lbam_addr
= mmio
+ (ATA_REG_LBAM
* 4);
1265 ioport
->lbah_addr
= mmio
+ (ATA_REG_LBAH
* 4);
1266 ioport
->device_addr
= mmio
+ (ATA_REG_DEVICE
* 4);
1267 ioport
->status_addr
=
1268 ioport
->command_addr
= mmio
+ (ATA_REG_STATUS
* 4);
1269 ioport
->altstatus_addr
=
1270 ioport
->ctl_addr
= mmio
+ 0x20;
1273 static int nv_adma_host_init(struct ata_host
*host
)
1275 struct pci_dev
*pdev
= to_pci_dev(host
->dev
);
1279 /* enable ADMA on the ports */
1280 pci_read_config_dword(pdev
, NV_MCP_SATA_CFG_20
, &tmp32
);
1281 tmp32
|= NV_MCP_SATA_CFG_20_PORT0_EN
|
1282 NV_MCP_SATA_CFG_20_PORT0_PWB_EN
|
1283 NV_MCP_SATA_CFG_20_PORT1_EN
|
1284 NV_MCP_SATA_CFG_20_PORT1_PWB_EN
;
1286 pci_write_config_dword(pdev
, NV_MCP_SATA_CFG_20
, tmp32
);
1288 for (i
= 0; i
< host
->n_ports
; i
++)
1289 nv_adma_setup_port(host
->ports
[i
]);
1294 static void nv_adma_fill_aprd(struct ata_queued_cmd
*qc
,
1295 struct scatterlist
*sg
,
1297 struct nv_adma_prd
*aprd
)
1300 if (qc
->tf
.flags
& ATA_TFLAG_WRITE
)
1301 flags
|= NV_APRD_WRITE
;
1302 if (idx
== qc
->n_elem
- 1)
1303 flags
|= NV_APRD_END
;
1305 flags
|= NV_APRD_CONT
;
1307 aprd
->addr
= cpu_to_le64(((u64
)sg_dma_address(sg
)));
1308 aprd
->len
= cpu_to_le32(((u32
)sg_dma_len(sg
))); /* len in bytes */
1309 aprd
->flags
= flags
;
1310 aprd
->packet_len
= 0;
1313 static void nv_adma_fill_sg(struct ata_queued_cmd
*qc
, struct nv_adma_cpb
*cpb
)
1315 struct nv_adma_port_priv
*pp
= qc
->ap
->private_data
;
1316 struct nv_adma_prd
*aprd
;
1317 struct scatterlist
*sg
;
1320 for_each_sg(qc
->sg
, sg
, qc
->n_elem
, si
) {
1321 aprd
= (si
< 5) ? &cpb
->aprd
[si
] :
1322 &pp
->aprd
[NV_ADMA_SGTBL_LEN
* qc
->hw_tag
+ (si
-5)];
1323 nv_adma_fill_aprd(qc
, sg
, si
, aprd
);
1326 cpb
->next_aprd
= cpu_to_le64(((u64
)(pp
->aprd_dma
+ NV_ADMA_SGTBL_SZ
* qc
->hw_tag
)));
1328 cpb
->next_aprd
= cpu_to_le64(0);
1331 static int nv_adma_use_reg_mode(struct ata_queued_cmd
*qc
)
1333 struct nv_adma_port_priv
*pp
= qc
->ap
->private_data
;
1335 /* ADMA engine can only be used for non-ATAPI DMA commands,
1336 or interrupt-driven no-data commands. */
1337 if ((pp
->flags
& NV_ADMA_ATAPI_SETUP_COMPLETE
) ||
1338 (qc
->tf
.flags
& ATA_TFLAG_POLLING
))
1341 if ((qc
->flags
& ATA_QCFLAG_DMAMAP
) ||
1342 (qc
->tf
.protocol
== ATA_PROT_NODATA
))
1348 static enum ata_completion_errors
nv_adma_qc_prep(struct ata_queued_cmd
*qc
)
1350 struct nv_adma_port_priv
*pp
= qc
->ap
->private_data
;
1351 struct nv_adma_cpb
*cpb
= &pp
->cpb
[qc
->hw_tag
];
1352 u8 ctl_flags
= NV_CPB_CTL_CPB_VALID
|
1355 if (nv_adma_use_reg_mode(qc
)) {
1356 BUG_ON(!(pp
->flags
& NV_ADMA_ATAPI_SETUP_COMPLETE
) &&
1357 (qc
->flags
& ATA_QCFLAG_DMAMAP
));
1358 nv_adma_register_mode(qc
->ap
);
1359 ata_bmdma_qc_prep(qc
);
1363 cpb
->resp_flags
= NV_CPB_RESP_DONE
;
1369 cpb
->tag
= qc
->hw_tag
;
1370 cpb
->next_cpb_idx
= 0;
1372 /* turn on NCQ flags for NCQ commands */
1373 if (qc
->tf
.protocol
== ATA_PROT_NCQ
)
1374 ctl_flags
|= NV_CPB_CTL_QUEUE
| NV_CPB_CTL_FPDMA
;
1376 nv_adma_tf_to_cpb(&qc
->tf
, cpb
->tf
);
1378 if (qc
->flags
& ATA_QCFLAG_DMAMAP
) {
1379 nv_adma_fill_sg(qc
, cpb
);
1380 ctl_flags
|= NV_CPB_CTL_APRD_VALID
;
1382 memset(&cpb
->aprd
[0], 0, sizeof(struct nv_adma_prd
) * 5);
1384 /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID
1385 until we are finished filling in all of the contents */
1387 cpb
->ctl_flags
= ctl_flags
;
1389 cpb
->resp_flags
= 0;
1394 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd
*qc
)
1396 struct nv_adma_port_priv
*pp
= qc
->ap
->private_data
;
1397 void __iomem
*mmio
= pp
->ctl_block
;
1398 int curr_ncq
= (qc
->tf
.protocol
== ATA_PROT_NCQ
);
1400 /* We can't handle result taskfile with NCQ commands, since
1401 retrieving the taskfile switches us out of ADMA mode and would abort
1402 existing commands. */
1403 if (unlikely(qc
->tf
.protocol
== ATA_PROT_NCQ
&&
1404 (qc
->flags
& ATA_QCFLAG_RESULT_TF
))) {
1405 ata_dev_err(qc
->dev
, "NCQ w/ RESULT_TF not allowed\n");
1406 return AC_ERR_SYSTEM
;
1409 if (nv_adma_use_reg_mode(qc
)) {
1410 /* use ATA register mode */
1411 BUG_ON(!(pp
->flags
& NV_ADMA_ATAPI_SETUP_COMPLETE
) &&
1412 (qc
->flags
& ATA_QCFLAG_DMAMAP
));
1413 nv_adma_register_mode(qc
->ap
);
1414 return ata_bmdma_qc_issue(qc
);
1416 nv_adma_mode(qc
->ap
);
1418 /* write append register, command tag in lower 8 bits
1419 and (number of cpbs to append -1) in top 8 bits */
1422 if (curr_ncq
!= pp
->last_issue_ncq
) {
1423 /* Seems to need some delay before switching between NCQ and
1424 non-NCQ commands, else we get command timeouts and such. */
1426 pp
->last_issue_ncq
= curr_ncq
;
1429 writew(qc
->hw_tag
, mmio
+ NV_ADMA_APPEND
);
1434 static irqreturn_t
nv_generic_interrupt(int irq
, void *dev_instance
)
1436 struct ata_host
*host
= dev_instance
;
1438 unsigned int handled
= 0;
1439 unsigned long flags
;
1441 spin_lock_irqsave(&host
->lock
, flags
);
1443 for (i
= 0; i
< host
->n_ports
; i
++) {
1444 struct ata_port
*ap
= host
->ports
[i
];
1445 struct ata_queued_cmd
*qc
;
1447 qc
= ata_qc_from_tag(ap
, ap
->link
.active_tag
);
1448 if (qc
&& (!(qc
->tf
.flags
& ATA_TFLAG_POLLING
))) {
1449 handled
+= ata_bmdma_port_intr(ap
, qc
);
1452 * No request pending? Clear interrupt status
1453 * anyway, in case there's one pending.
1455 ap
->ops
->sff_check_status(ap
);
1459 spin_unlock_irqrestore(&host
->lock
, flags
);
1461 return IRQ_RETVAL(handled
);
1464 static irqreturn_t
nv_do_interrupt(struct ata_host
*host
, u8 irq_stat
)
1468 for (i
= 0; i
< host
->n_ports
; i
++) {
1469 handled
+= nv_host_intr(host
->ports
[i
], irq_stat
);
1470 irq_stat
>>= NV_INT_PORT_SHIFT
;
1473 return IRQ_RETVAL(handled
);
1476 static irqreturn_t
nv_nf2_interrupt(int irq
, void *dev_instance
)
1478 struct ata_host
*host
= dev_instance
;
1482 spin_lock(&host
->lock
);
1483 irq_stat
= ioread8(host
->ports
[0]->ioaddr
.scr_addr
+ NV_INT_STATUS
);
1484 ret
= nv_do_interrupt(host
, irq_stat
);
1485 spin_unlock(&host
->lock
);
1490 static irqreturn_t
nv_ck804_interrupt(int irq
, void *dev_instance
)
1492 struct ata_host
*host
= dev_instance
;
1496 spin_lock(&host
->lock
);
1497 irq_stat
= readb(host
->iomap
[NV_MMIO_BAR
] + NV_INT_STATUS_CK804
);
1498 ret
= nv_do_interrupt(host
, irq_stat
);
1499 spin_unlock(&host
->lock
);
1504 static int nv_scr_read(struct ata_link
*link
, unsigned int sc_reg
, u32
*val
)
1506 if (sc_reg
> SCR_CONTROL
)
1509 *val
= ioread32(link
->ap
->ioaddr
.scr_addr
+ (sc_reg
* 4));
1513 static int nv_scr_write(struct ata_link
*link
, unsigned int sc_reg
, u32 val
)
1515 if (sc_reg
> SCR_CONTROL
)
1518 iowrite32(val
, link
->ap
->ioaddr
.scr_addr
+ (sc_reg
* 4));
1522 static int nv_hardreset(struct ata_link
*link
, unsigned int *class,
1523 unsigned long deadline
)
1525 struct ata_eh_context
*ehc
= &link
->eh_context
;
1527 /* Do hardreset iff it's post-boot probing, please read the
1528 * comment above port ops for details.
1530 if (!(link
->ap
->pflags
& ATA_PFLAG_LOADING
) &&
1531 !ata_dev_enabled(link
->device
))
1532 sata_link_hardreset(link
, sata_deb_timing_hotplug
, deadline
,
1535 const unsigned int *timing
= sata_ehc_deb_timing(ehc
);
1538 if (!(ehc
->i
.flags
& ATA_EHI_QUIET
))
1540 "nv: skipping hardreset on occupied port\n");
1542 /* make sure the link is online */
1543 rc
= sata_link_resume(link
, timing
, deadline
);
1544 /* whine about phy resume failure but proceed */
1545 if (rc
&& rc
!= -EOPNOTSUPP
)
1546 ata_link_warn(link
, "failed to resume link (errno=%d)\n",
1550 /* device signature acquisition is unreliable */
1554 static void nv_nf2_freeze(struct ata_port
*ap
)
1556 void __iomem
*scr_addr
= ap
->host
->ports
[0]->ioaddr
.scr_addr
;
1557 int shift
= ap
->port_no
* NV_INT_PORT_SHIFT
;
1560 mask
= ioread8(scr_addr
+ NV_INT_ENABLE
);
1561 mask
&= ~(NV_INT_ALL
<< shift
);
1562 iowrite8(mask
, scr_addr
+ NV_INT_ENABLE
);
1565 static void nv_nf2_thaw(struct ata_port
*ap
)
1567 void __iomem
*scr_addr
= ap
->host
->ports
[0]->ioaddr
.scr_addr
;
1568 int shift
= ap
->port_no
* NV_INT_PORT_SHIFT
;
1571 iowrite8(NV_INT_ALL
<< shift
, scr_addr
+ NV_INT_STATUS
);
1573 mask
= ioread8(scr_addr
+ NV_INT_ENABLE
);
1574 mask
|= (NV_INT_MASK
<< shift
);
1575 iowrite8(mask
, scr_addr
+ NV_INT_ENABLE
);
1578 static void nv_ck804_freeze(struct ata_port
*ap
)
1580 void __iomem
*mmio_base
= ap
->host
->iomap
[NV_MMIO_BAR
];
1581 int shift
= ap
->port_no
* NV_INT_PORT_SHIFT
;
1584 mask
= readb(mmio_base
+ NV_INT_ENABLE_CK804
);
1585 mask
&= ~(NV_INT_ALL
<< shift
);
1586 writeb(mask
, mmio_base
+ NV_INT_ENABLE_CK804
);
1589 static void nv_ck804_thaw(struct ata_port
*ap
)
1591 void __iomem
*mmio_base
= ap
->host
->iomap
[NV_MMIO_BAR
];
1592 int shift
= ap
->port_no
* NV_INT_PORT_SHIFT
;
1595 writeb(NV_INT_ALL
<< shift
, mmio_base
+ NV_INT_STATUS_CK804
);
1597 mask
= readb(mmio_base
+ NV_INT_ENABLE_CK804
);
1598 mask
|= (NV_INT_MASK
<< shift
);
1599 writeb(mask
, mmio_base
+ NV_INT_ENABLE_CK804
);
1602 static void nv_mcp55_freeze(struct ata_port
*ap
)
1604 void __iomem
*mmio_base
= ap
->host
->iomap
[NV_MMIO_BAR
];
1605 int shift
= ap
->port_no
* NV_INT_PORT_SHIFT_MCP55
;
1608 writel(NV_INT_ALL_MCP55
<< shift
, mmio_base
+ NV_INT_STATUS_MCP55
);
1610 mask
= readl(mmio_base
+ NV_INT_ENABLE_MCP55
);
1611 mask
&= ~(NV_INT_ALL_MCP55
<< shift
);
1612 writel(mask
, mmio_base
+ NV_INT_ENABLE_MCP55
);
1615 static void nv_mcp55_thaw(struct ata_port
*ap
)
1617 void __iomem
*mmio_base
= ap
->host
->iomap
[NV_MMIO_BAR
];
1618 int shift
= ap
->port_no
* NV_INT_PORT_SHIFT_MCP55
;
1621 writel(NV_INT_ALL_MCP55
<< shift
, mmio_base
+ NV_INT_STATUS_MCP55
);
1623 mask
= readl(mmio_base
+ NV_INT_ENABLE_MCP55
);
1624 mask
|= (NV_INT_MASK_MCP55
<< shift
);
1625 writel(mask
, mmio_base
+ NV_INT_ENABLE_MCP55
);
1628 static void nv_adma_error_handler(struct ata_port
*ap
)
1630 struct nv_adma_port_priv
*pp
= ap
->private_data
;
1631 if (!(pp
->flags
& NV_ADMA_PORT_REGISTER_MODE
)) {
1632 void __iomem
*mmio
= pp
->ctl_block
;
1636 if (ata_tag_valid(ap
->link
.active_tag
) || ap
->link
.sactive
) {
1637 u32 notifier
= readl(mmio
+ NV_ADMA_NOTIFIER
);
1638 u32 notifier_error
= readl(mmio
+ NV_ADMA_NOTIFIER_ERROR
);
1639 u32 gen_ctl
= readl(pp
->gen_block
+ NV_ADMA_GEN_CTL
);
1640 u32 status
= readw(mmio
+ NV_ADMA_STAT
);
1641 u8 cpb_count
= readb(mmio
+ NV_ADMA_CPB_COUNT
);
1642 u8 next_cpb_idx
= readb(mmio
+ NV_ADMA_NEXT_CPB_IDX
);
1645 "EH in ADMA mode, notifier 0x%X "
1646 "notifier_error 0x%X gen_ctl 0x%X status 0x%X "
1647 "next cpb count 0x%X next cpb idx 0x%x\n",
1648 notifier
, notifier_error
, gen_ctl
, status
,
1649 cpb_count
, next_cpb_idx
);
1651 for (i
= 0; i
< NV_ADMA_MAX_CPBS
; i
++) {
1652 struct nv_adma_cpb
*cpb
= &pp
->cpb
[i
];
1653 if ((ata_tag_valid(ap
->link
.active_tag
) && i
== ap
->link
.active_tag
) ||
1654 ap
->link
.sactive
& (1 << i
))
1656 "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1657 i
, cpb
->ctl_flags
, cpb
->resp_flags
);
1661 /* Push us back into port register mode for error handling. */
1662 nv_adma_register_mode(ap
);
1664 /* Mark all of the CPBs as invalid to prevent them from
1666 for (i
= 0; i
< NV_ADMA_MAX_CPBS
; i
++)
1667 pp
->cpb
[i
].ctl_flags
&= ~NV_CPB_CTL_CPB_VALID
;
1669 /* clear CPB fetch count */
1670 writew(0, mmio
+ NV_ADMA_CPB_COUNT
);
1673 tmp
= readw(mmio
+ NV_ADMA_CTL
);
1674 writew(tmp
| NV_ADMA_CTL_CHANNEL_RESET
, mmio
+ NV_ADMA_CTL
);
1675 readw(mmio
+ NV_ADMA_CTL
); /* flush posted write */
1677 writew(tmp
& ~NV_ADMA_CTL_CHANNEL_RESET
, mmio
+ NV_ADMA_CTL
);
1678 readw(mmio
+ NV_ADMA_CTL
); /* flush posted write */
1681 ata_bmdma_error_handler(ap
);
1684 static void nv_swncq_qc_to_dq(struct ata_port
*ap
, struct ata_queued_cmd
*qc
)
1686 struct nv_swncq_port_priv
*pp
= ap
->private_data
;
1687 struct defer_queue
*dq
= &pp
->defer_queue
;
1690 WARN_ON(dq
->tail
- dq
->head
== ATA_MAX_QUEUE
);
1691 dq
->defer_bits
|= (1 << qc
->hw_tag
);
1692 dq
->tag
[dq
->tail
++ & (ATA_MAX_QUEUE
- 1)] = qc
->hw_tag
;
1695 static struct ata_queued_cmd
*nv_swncq_qc_from_dq(struct ata_port
*ap
)
1697 struct nv_swncq_port_priv
*pp
= ap
->private_data
;
1698 struct defer_queue
*dq
= &pp
->defer_queue
;
1701 if (dq
->head
== dq
->tail
) /* null queue */
1704 tag
= dq
->tag
[dq
->head
& (ATA_MAX_QUEUE
- 1)];
1705 dq
->tag
[dq
->head
++ & (ATA_MAX_QUEUE
- 1)] = ATA_TAG_POISON
;
1706 WARN_ON(!(dq
->defer_bits
& (1 << tag
)));
1707 dq
->defer_bits
&= ~(1 << tag
);
1709 return ata_qc_from_tag(ap
, tag
);
1712 static void nv_swncq_fis_reinit(struct ata_port
*ap
)
1714 struct nv_swncq_port_priv
*pp
= ap
->private_data
;
1717 pp
->dmafis_bits
= 0;
1718 pp
->sdbfis_bits
= 0;
1722 static void nv_swncq_pp_reinit(struct ata_port
*ap
)
1724 struct nv_swncq_port_priv
*pp
= ap
->private_data
;
1725 struct defer_queue
*dq
= &pp
->defer_queue
;
1731 pp
->last_issue_tag
= ATA_TAG_POISON
;
1732 nv_swncq_fis_reinit(ap
);
1735 static void nv_swncq_irq_clear(struct ata_port
*ap
, u16 fis
)
1737 struct nv_swncq_port_priv
*pp
= ap
->private_data
;
1739 writew(fis
, pp
->irq_block
);
1742 static void __ata_bmdma_stop(struct ata_port
*ap
)
1744 struct ata_queued_cmd qc
;
1747 ata_bmdma_stop(&qc
);
1750 static void nv_swncq_ncq_stop(struct ata_port
*ap
)
1752 struct nv_swncq_port_priv
*pp
= ap
->private_data
;
1757 ata_port_err(ap
, "EH in SWNCQ mode,QC:qc_active 0x%llX sactive 0x%X\n",
1758 ap
->qc_active
, ap
->link
.sactive
);
1760 "SWNCQ:qc_active 0x%X defer_bits 0x%X last_issue_tag 0x%x\n "
1761 "dhfis 0x%X dmafis 0x%X sdbfis 0x%X\n",
1762 pp
->qc_active
, pp
->defer_queue
.defer_bits
, pp
->last_issue_tag
,
1763 pp
->dhfis_bits
, pp
->dmafis_bits
, pp
->sdbfis_bits
);
1765 ata_port_err(ap
, "ATA_REG 0x%X ERR_REG 0x%X\n",
1766 ap
->ops
->sff_check_status(ap
),
1767 ioread8(ap
->ioaddr
.error_addr
));
1769 sactive
= readl(pp
->sactive_block
);
1770 done_mask
= pp
->qc_active
^ sactive
;
1772 ata_port_err(ap
, "tag : dhfis dmafis sdbfis sactive\n");
1773 for (i
= 0; i
< ATA_MAX_QUEUE
; i
++) {
1775 if (pp
->qc_active
& (1 << i
))
1777 else if (done_mask
& (1 << i
))
1783 "tag 0x%x: %01x %01x %01x %01x %s\n", i
,
1784 (pp
->dhfis_bits
>> i
) & 0x1,
1785 (pp
->dmafis_bits
>> i
) & 0x1,
1786 (pp
->sdbfis_bits
>> i
) & 0x1,
1787 (sactive
>> i
) & 0x1,
1788 (err
? "error! tag doesn't exit" : " "));
1791 nv_swncq_pp_reinit(ap
);
1792 ap
->ops
->sff_irq_clear(ap
);
1793 __ata_bmdma_stop(ap
);
1794 nv_swncq_irq_clear(ap
, 0xffff);
1797 static void nv_swncq_error_handler(struct ata_port
*ap
)
1799 struct ata_eh_context
*ehc
= &ap
->link
.eh_context
;
1801 if (ap
->link
.sactive
) {
1802 nv_swncq_ncq_stop(ap
);
1803 ehc
->i
.action
|= ATA_EH_RESET
;
1806 ata_bmdma_error_handler(ap
);
1810 static int nv_swncq_port_suspend(struct ata_port
*ap
, pm_message_t mesg
)
1812 void __iomem
*mmio
= ap
->host
->iomap
[NV_MMIO_BAR
];
1816 writel(~0, mmio
+ NV_INT_STATUS_MCP55
);
1819 writel(0, mmio
+ NV_INT_ENABLE_MCP55
);
1822 tmp
= readl(mmio
+ NV_CTL_MCP55
);
1823 tmp
&= ~(NV_CTL_PRI_SWNCQ
| NV_CTL_SEC_SWNCQ
);
1824 writel(tmp
, mmio
+ NV_CTL_MCP55
);
1829 static int nv_swncq_port_resume(struct ata_port
*ap
)
1831 void __iomem
*mmio
= ap
->host
->iomap
[NV_MMIO_BAR
];
1835 writel(~0, mmio
+ NV_INT_STATUS_MCP55
);
1838 writel(0x00fd00fd, mmio
+ NV_INT_ENABLE_MCP55
);
1841 tmp
= readl(mmio
+ NV_CTL_MCP55
);
1842 writel(tmp
| NV_CTL_PRI_SWNCQ
| NV_CTL_SEC_SWNCQ
, mmio
+ NV_CTL_MCP55
);
1848 static void nv_swncq_host_init(struct ata_host
*host
)
1851 void __iomem
*mmio
= host
->iomap
[NV_MMIO_BAR
];
1852 struct pci_dev
*pdev
= to_pci_dev(host
->dev
);
1855 /* disable ECO 398 */
1856 pci_read_config_byte(pdev
, 0x7f, ®val
);
1857 regval
&= ~(1 << 7);
1858 pci_write_config_byte(pdev
, 0x7f, regval
);
1861 tmp
= readl(mmio
+ NV_CTL_MCP55
);
1862 dev_dbg(&pdev
->dev
, "HOST_CTL:0x%X\n", tmp
);
1863 writel(tmp
| NV_CTL_PRI_SWNCQ
| NV_CTL_SEC_SWNCQ
, mmio
+ NV_CTL_MCP55
);
1865 /* enable irq intr */
1866 tmp
= readl(mmio
+ NV_INT_ENABLE_MCP55
);
1867 dev_dbg(&pdev
->dev
, "HOST_ENABLE:0x%X\n", tmp
);
1868 writel(tmp
| 0x00fd00fd, mmio
+ NV_INT_ENABLE_MCP55
);
1870 /* clear port irq */
1871 writel(~0x0, mmio
+ NV_INT_STATUS_MCP55
);
1874 static int nv_swncq_device_configure(struct scsi_device
*sdev
,
1875 struct queue_limits
*lim
)
1877 struct ata_port
*ap
= ata_shost_to_port(sdev
->host
);
1878 struct pci_dev
*pdev
= to_pci_dev(ap
->host
->dev
);
1879 struct ata_device
*dev
;
1882 u8 check_maxtor
= 0;
1883 unsigned char model_num
[ATA_ID_PROD_LEN
+ 1];
1885 rc
= ata_scsi_device_configure(sdev
, lim
);
1886 if (sdev
->id
>= ATA_MAX_DEVICES
|| sdev
->channel
|| sdev
->lun
)
1887 /* Not a proper libata device, ignore */
1890 dev
= &ap
->link
.device
[sdev
->id
];
1891 if (!(ap
->flags
& ATA_FLAG_NCQ
) || dev
->class == ATA_DEV_ATAPI
)
1894 /* if MCP51 and Maxtor, then disable ncq */
1895 if (pdev
->device
== PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA
||
1896 pdev
->device
== PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2
)
1899 /* if MCP55 and rev <= a2 and Maxtor, then disable ncq */
1900 if (pdev
->device
== PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA
||
1901 pdev
->device
== PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2
) {
1902 pci_read_config_byte(pdev
, 0x8, &rev
);
1910 ata_id_c_string(dev
->id
, model_num
, ATA_ID_PROD
, sizeof(model_num
));
1912 if (strncmp(model_num
, "Maxtor", 6) == 0) {
1913 ata_scsi_change_queue_depth(sdev
, 1);
1914 ata_dev_notice(dev
, "Disabling SWNCQ mode (depth %x)\n",
1921 static int nv_swncq_port_start(struct ata_port
*ap
)
1923 struct device
*dev
= ap
->host
->dev
;
1924 void __iomem
*mmio
= ap
->host
->iomap
[NV_MMIO_BAR
];
1925 struct nv_swncq_port_priv
*pp
;
1928 /* we might fallback to bmdma, allocate bmdma resources */
1929 rc
= ata_bmdma_port_start(ap
);
1933 pp
= devm_kzalloc(dev
, sizeof(*pp
), GFP_KERNEL
);
1937 pp
->prd
= dmam_alloc_coherent(dev
, ATA_PRD_TBL_SZ
* ATA_MAX_QUEUE
,
1938 &pp
->prd_dma
, GFP_KERNEL
);
1942 ap
->private_data
= pp
;
1943 pp
->sactive_block
= ap
->ioaddr
.scr_addr
+ 4 * SCR_ACTIVE
;
1944 pp
->irq_block
= mmio
+ NV_INT_STATUS_MCP55
+ ap
->port_no
* 2;
1945 pp
->tag_block
= mmio
+ NV_NCQ_REG_MCP55
+ ap
->port_no
* 2;
1950 static enum ata_completion_errors
nv_swncq_qc_prep(struct ata_queued_cmd
*qc
)
1952 if (qc
->tf
.protocol
!= ATA_PROT_NCQ
) {
1953 ata_bmdma_qc_prep(qc
);
1957 if (!(qc
->flags
& ATA_QCFLAG_DMAMAP
))
1960 nv_swncq_fill_sg(qc
);
1965 static void nv_swncq_fill_sg(struct ata_queued_cmd
*qc
)
1967 struct ata_port
*ap
= qc
->ap
;
1968 struct scatterlist
*sg
;
1969 struct nv_swncq_port_priv
*pp
= ap
->private_data
;
1970 struct ata_bmdma_prd
*prd
;
1971 unsigned int si
, idx
;
1973 prd
= pp
->prd
+ ATA_MAX_PRD
* qc
->hw_tag
;
1976 for_each_sg(qc
->sg
, sg
, qc
->n_elem
, si
) {
1980 addr
= (u32
)sg_dma_address(sg
);
1981 sg_len
= sg_dma_len(sg
);
1984 offset
= addr
& 0xffff;
1986 if ((offset
+ sg_len
) > 0x10000)
1987 len
= 0x10000 - offset
;
1989 prd
[idx
].addr
= cpu_to_le32(addr
);
1990 prd
[idx
].flags_len
= cpu_to_le32(len
& 0xffff);
1998 prd
[idx
- 1].flags_len
|= cpu_to_le32(ATA_PRD_EOT
);
2001 static unsigned int nv_swncq_issue_atacmd(struct ata_port
*ap
,
2002 struct ata_queued_cmd
*qc
)
2004 struct nv_swncq_port_priv
*pp
= ap
->private_data
;
2009 writel((1 << qc
->hw_tag
), pp
->sactive_block
);
2010 pp
->last_issue_tag
= qc
->hw_tag
;
2011 pp
->dhfis_bits
&= ~(1 << qc
->hw_tag
);
2012 pp
->dmafis_bits
&= ~(1 << qc
->hw_tag
);
2013 pp
->qc_active
|= (0x1 << qc
->hw_tag
);
2015 trace_ata_tf_load(ap
, &qc
->tf
);
2016 ap
->ops
->sff_tf_load(ap
, &qc
->tf
); /* load tf registers */
2017 trace_ata_exec_command(ap
, &qc
->tf
, qc
->hw_tag
);
2018 ap
->ops
->sff_exec_command(ap
, &qc
->tf
);
2023 static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd
*qc
)
2025 struct ata_port
*ap
= qc
->ap
;
2026 struct nv_swncq_port_priv
*pp
= ap
->private_data
;
2028 if (qc
->tf
.protocol
!= ATA_PROT_NCQ
)
2029 return ata_bmdma_qc_issue(qc
);
2032 nv_swncq_issue_atacmd(ap
, qc
);
2034 nv_swncq_qc_to_dq(ap
, qc
); /* add qc to defer queue */
2039 static void nv_swncq_hotplug(struct ata_port
*ap
, u32 fis
)
2042 struct ata_eh_info
*ehi
= &ap
->link
.eh_info
;
2044 ata_ehi_clear_desc(ehi
);
2046 /* AHCI needs SError cleared; otherwise, it might lock up */
2047 sata_scr_read(&ap
->link
, SCR_ERROR
, &serror
);
2048 sata_scr_write(&ap
->link
, SCR_ERROR
, serror
);
2050 /* analyze @irq_stat */
2051 if (fis
& NV_SWNCQ_IRQ_ADDED
)
2052 ata_ehi_push_desc(ehi
, "hot plug");
2053 else if (fis
& NV_SWNCQ_IRQ_REMOVED
)
2054 ata_ehi_push_desc(ehi
, "hot unplug");
2056 ata_ehi_hotplugged(ehi
);
2058 /* okay, let's hand over to EH */
2059 ehi
->serror
|= serror
;
2061 ata_port_freeze(ap
);
2064 static int nv_swncq_sdbfis(struct ata_port
*ap
)
2066 struct ata_queued_cmd
*qc
;
2067 struct nv_swncq_port_priv
*pp
= ap
->private_data
;
2068 struct ata_eh_info
*ehi
= &ap
->link
.eh_info
;
2074 host_stat
= ap
->ops
->bmdma_status(ap
);
2075 trace_ata_bmdma_status(ap
, host_stat
);
2076 if (unlikely(host_stat
& ATA_DMA_ERR
)) {
2077 /* error when transferring data to/from memory */
2078 ata_ehi_clear_desc(ehi
);
2079 ata_ehi_push_desc(ehi
, "BMDMA stat 0x%x", host_stat
);
2080 ehi
->err_mask
|= AC_ERR_HOST_BUS
;
2081 ehi
->action
|= ATA_EH_RESET
;
2085 ap
->ops
->sff_irq_clear(ap
);
2086 __ata_bmdma_stop(ap
);
2088 sactive
= readl(pp
->sactive_block
);
2089 done_mask
= pp
->qc_active
^ sactive
;
2091 pp
->qc_active
&= ~done_mask
;
2092 pp
->dhfis_bits
&= ~done_mask
;
2093 pp
->dmafis_bits
&= ~done_mask
;
2094 pp
->sdbfis_bits
|= done_mask
;
2095 ata_qc_complete_multiple(ap
, ata_qc_get_active(ap
) ^ done_mask
);
2097 if (!ap
->qc_active
) {
2098 ata_port_dbg(ap
, "over\n");
2099 nv_swncq_pp_reinit(ap
);
2103 if (pp
->qc_active
& pp
->dhfis_bits
)
2106 if ((pp
->ncq_flags
& ncq_saw_backout
) ||
2107 (pp
->qc_active
^ pp
->dhfis_bits
))
2108 /* if the controller can't get a device to host register FIS,
2109 * The driver needs to reissue the new command.
2113 ata_port_dbg(ap
, "QC: qc_active 0x%llx,"
2114 "SWNCQ:qc_active 0x%X defer_bits %X "
2115 "dhfis 0x%X dmafis 0x%X last_issue_tag %x\n",
2116 ap
->qc_active
, pp
->qc_active
,
2117 pp
->defer_queue
.defer_bits
, pp
->dhfis_bits
,
2118 pp
->dmafis_bits
, pp
->last_issue_tag
);
2120 nv_swncq_fis_reinit(ap
);
2123 qc
= ata_qc_from_tag(ap
, pp
->last_issue_tag
);
2124 nv_swncq_issue_atacmd(ap
, qc
);
2128 if (pp
->defer_queue
.defer_bits
) {
2129 /* send deferral queue command */
2130 qc
= nv_swncq_qc_from_dq(ap
);
2131 WARN_ON(qc
== NULL
);
2132 nv_swncq_issue_atacmd(ap
, qc
);
2138 static inline u32
nv_swncq_tag(struct ata_port
*ap
)
2140 struct nv_swncq_port_priv
*pp
= ap
->private_data
;
2143 tag
= readb(pp
->tag_block
) >> 2;
2144 return (tag
& 0x1f);
2147 static void nv_swncq_dmafis(struct ata_port
*ap
)
2149 struct ata_queued_cmd
*qc
;
2153 struct nv_swncq_port_priv
*pp
= ap
->private_data
;
2155 __ata_bmdma_stop(ap
);
2156 tag
= nv_swncq_tag(ap
);
2158 ata_port_dbg(ap
, "dma setup tag 0x%x\n", tag
);
2159 qc
= ata_qc_from_tag(ap
, tag
);
2164 rw
= qc
->tf
.flags
& ATA_TFLAG_WRITE
;
2166 /* load PRD table addr. */
2167 iowrite32(pp
->prd_dma
+ ATA_PRD_TBL_SZ
* qc
->hw_tag
,
2168 ap
->ioaddr
.bmdma_addr
+ ATA_DMA_TABLE_OFS
);
2170 /* specify data direction, triple-check start bit is clear */
2171 dmactl
= ioread8(ap
->ioaddr
.bmdma_addr
+ ATA_DMA_CMD
);
2172 dmactl
&= ~ATA_DMA_WR
;
2174 dmactl
|= ATA_DMA_WR
;
2176 iowrite8(dmactl
| ATA_DMA_START
, ap
->ioaddr
.bmdma_addr
+ ATA_DMA_CMD
);
2179 static void nv_swncq_host_interrupt(struct ata_port
*ap
, u16 fis
)
2181 struct nv_swncq_port_priv
*pp
= ap
->private_data
;
2182 struct ata_queued_cmd
*qc
;
2183 struct ata_eh_info
*ehi
= &ap
->link
.eh_info
;
2187 ata_stat
= ap
->ops
->sff_check_status(ap
);
2188 nv_swncq_irq_clear(ap
, fis
);
2192 if (ata_port_is_frozen(ap
))
2195 if (fis
& NV_SWNCQ_IRQ_HOTPLUG
) {
2196 nv_swncq_hotplug(ap
, fis
);
2203 if (ap
->ops
->scr_read(&ap
->link
, SCR_ERROR
, &serror
))
2205 ap
->ops
->scr_write(&ap
->link
, SCR_ERROR
, serror
);
2207 if (ata_stat
& ATA_ERR
) {
2208 ata_ehi_clear_desc(ehi
);
2209 ata_ehi_push_desc(ehi
, "Ata error. fis:0x%X", fis
);
2210 ehi
->err_mask
|= AC_ERR_DEV
;
2211 ehi
->serror
|= serror
;
2212 ehi
->action
|= ATA_EH_RESET
;
2213 ata_port_freeze(ap
);
2217 if (fis
& NV_SWNCQ_IRQ_BACKOUT
) {
2218 /* If the IRQ is backout, driver must issue
2219 * the new command again some time later.
2221 pp
->ncq_flags
|= ncq_saw_backout
;
2224 if (fis
& NV_SWNCQ_IRQ_SDBFIS
) {
2225 pp
->ncq_flags
|= ncq_saw_sdb
;
2226 ata_port_dbg(ap
, "SWNCQ: qc_active 0x%X "
2227 "dhfis 0x%X dmafis 0x%X sactive 0x%X\n",
2228 pp
->qc_active
, pp
->dhfis_bits
,
2229 pp
->dmafis_bits
, readl(pp
->sactive_block
));
2230 if (nv_swncq_sdbfis(ap
) < 0)
2234 if (fis
& NV_SWNCQ_IRQ_DHREGFIS
) {
2235 /* The interrupt indicates the new command
2236 * was transmitted correctly to the drive.
2238 pp
->dhfis_bits
|= (0x1 << pp
->last_issue_tag
);
2239 pp
->ncq_flags
|= ncq_saw_d2h
;
2240 if (pp
->ncq_flags
& (ncq_saw_sdb
| ncq_saw_backout
)) {
2241 ata_ehi_push_desc(ehi
, "illegal fis transaction");
2242 ehi
->err_mask
|= AC_ERR_HSM
;
2243 ehi
->action
|= ATA_EH_RESET
;
2247 if (!(fis
& NV_SWNCQ_IRQ_DMASETUP
) &&
2248 !(pp
->ncq_flags
& ncq_saw_dmas
)) {
2249 ata_stat
= ap
->ops
->sff_check_status(ap
);
2250 if (ata_stat
& ATA_BUSY
)
2253 if (pp
->defer_queue
.defer_bits
) {
2254 ata_port_dbg(ap
, "send next command\n");
2255 qc
= nv_swncq_qc_from_dq(ap
);
2256 nv_swncq_issue_atacmd(ap
, qc
);
2261 if (fis
& NV_SWNCQ_IRQ_DMASETUP
) {
2262 /* program the dma controller with appropriate PRD buffers
2263 * and start the DMA transfer for requested command.
2265 pp
->dmafis_bits
|= (0x1 << nv_swncq_tag(ap
));
2266 pp
->ncq_flags
|= ncq_saw_dmas
;
2267 nv_swncq_dmafis(ap
);
2273 ata_ehi_push_desc(ehi
, "fis:0x%x", fis
);
2274 ata_port_freeze(ap
);
2278 static irqreturn_t
nv_swncq_interrupt(int irq
, void *dev_instance
)
2280 struct ata_host
*host
= dev_instance
;
2282 unsigned int handled
= 0;
2283 unsigned long flags
;
2286 spin_lock_irqsave(&host
->lock
, flags
);
2288 irq_stat
= readl(host
->iomap
[NV_MMIO_BAR
] + NV_INT_STATUS_MCP55
);
2290 for (i
= 0; i
< host
->n_ports
; i
++) {
2291 struct ata_port
*ap
= host
->ports
[i
];
2293 if (ap
->link
.sactive
) {
2294 nv_swncq_host_interrupt(ap
, (u16
)irq_stat
);
2297 if (irq_stat
) /* reserve Hotplug */
2298 nv_swncq_irq_clear(ap
, 0xfff0);
2300 handled
+= nv_host_intr(ap
, (u8
)irq_stat
);
2302 irq_stat
>>= NV_INT_PORT_SHIFT_MCP55
;
2305 spin_unlock_irqrestore(&host
->lock
, flags
);
2307 return IRQ_RETVAL(handled
);
2310 static int nv_init_one(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
2312 const struct ata_port_info
*ppi
[] = { NULL
, NULL
};
2313 struct nv_pi_priv
*ipriv
;
2314 struct ata_host
*host
;
2315 struct nv_host_priv
*hpriv
;
2319 unsigned long type
= ent
->driver_data
;
2321 // Make sure this is a SATA controller by counting the number of bars
2322 // (NVIDIA SATA controllers will always have six bars). Otherwise,
2323 // it's an IDE controller and we ignore it.
2324 for (bar
= 0; bar
< PCI_STD_NUM_BARS
; bar
++)
2325 if (pci_resource_start(pdev
, bar
) == 0)
2328 ata_print_version_once(&pdev
->dev
, DRV_VERSION
);
2330 rc
= pcim_enable_device(pdev
);
2334 /* determine type and allocate host */
2335 if (type
== CK804
&& adma_enabled
) {
2336 dev_notice(&pdev
->dev
, "Using ADMA mode\n");
2338 } else if (type
== MCP5x
&& swncq_enabled
) {
2339 dev_notice(&pdev
->dev
, "Using SWNCQ mode\n");
2343 ppi
[0] = &nv_port_info
[type
];
2344 ipriv
= ppi
[0]->private_data
;
2345 rc
= ata_pci_bmdma_prepare_host(pdev
, ppi
, &host
);
2349 hpriv
= devm_kzalloc(&pdev
->dev
, sizeof(*hpriv
), GFP_KERNEL
);
2353 host
->private_data
= hpriv
;
2355 /* request and iomap NV_MMIO_BAR */
2356 rc
= pcim_iomap_regions(pdev
, 1 << NV_MMIO_BAR
, DRV_NAME
);
2360 /* configure SCR access */
2361 base
= host
->iomap
[NV_MMIO_BAR
];
2362 host
->ports
[0]->ioaddr
.scr_addr
= base
+ NV_PORT0_SCR_REG_OFFSET
;
2363 host
->ports
[1]->ioaddr
.scr_addr
= base
+ NV_PORT1_SCR_REG_OFFSET
;
2365 /* enable SATA space for CK804 */
2366 if (type
>= CK804
) {
2369 pci_read_config_byte(pdev
, NV_MCP_SATA_CFG_20
, ®val
);
2370 regval
|= NV_MCP_SATA_CFG_20_SATA_SPACE_EN
;
2371 pci_write_config_byte(pdev
, NV_MCP_SATA_CFG_20
, regval
);
2376 rc
= nv_adma_host_init(host
);
2379 } else if (type
== SWNCQ
)
2380 nv_swncq_host_init(host
);
2383 dev_notice(&pdev
->dev
, "Using MSI\n");
2384 pci_enable_msi(pdev
);
2387 pci_set_master(pdev
);
2388 return ata_pci_sff_activate_host(host
, ipriv
->irq_handler
, ipriv
->sht
);
2391 #ifdef CONFIG_PM_SLEEP
2392 static int nv_pci_device_resume(struct pci_dev
*pdev
)
2394 struct ata_host
*host
= pci_get_drvdata(pdev
);
2395 struct nv_host_priv
*hpriv
= host
->private_data
;
2398 rc
= ata_pci_device_do_resume(pdev
);
2402 if (pdev
->dev
.power
.power_state
.event
== PM_EVENT_SUSPEND
) {
2403 if (hpriv
->type
>= CK804
) {
2406 pci_read_config_byte(pdev
, NV_MCP_SATA_CFG_20
, ®val
);
2407 regval
|= NV_MCP_SATA_CFG_20_SATA_SPACE_EN
;
2408 pci_write_config_byte(pdev
, NV_MCP_SATA_CFG_20
, regval
);
2410 if (hpriv
->type
== ADMA
) {
2412 struct nv_adma_port_priv
*pp
;
2413 /* enable/disable ADMA on the ports appropriately */
2414 pci_read_config_dword(pdev
, NV_MCP_SATA_CFG_20
, &tmp32
);
2416 pp
= host
->ports
[0]->private_data
;
2417 if (pp
->flags
& NV_ADMA_ATAPI_SETUP_COMPLETE
)
2418 tmp32
&= ~(NV_MCP_SATA_CFG_20_PORT0_EN
|
2419 NV_MCP_SATA_CFG_20_PORT0_PWB_EN
);
2421 tmp32
|= (NV_MCP_SATA_CFG_20_PORT0_EN
|
2422 NV_MCP_SATA_CFG_20_PORT0_PWB_EN
);
2423 pp
= host
->ports
[1]->private_data
;
2424 if (pp
->flags
& NV_ADMA_ATAPI_SETUP_COMPLETE
)
2425 tmp32
&= ~(NV_MCP_SATA_CFG_20_PORT1_EN
|
2426 NV_MCP_SATA_CFG_20_PORT1_PWB_EN
);
2428 tmp32
|= (NV_MCP_SATA_CFG_20_PORT1_EN
|
2429 NV_MCP_SATA_CFG_20_PORT1_PWB_EN
);
2431 pci_write_config_dword(pdev
, NV_MCP_SATA_CFG_20
, tmp32
);
2435 ata_host_resume(host
);
2441 static void nv_ck804_host_stop(struct ata_host
*host
)
2443 struct pci_dev
*pdev
= to_pci_dev(host
->dev
);
2446 /* disable SATA space for CK804 */
2447 pci_read_config_byte(pdev
, NV_MCP_SATA_CFG_20
, ®val
);
2448 regval
&= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN
;
2449 pci_write_config_byte(pdev
, NV_MCP_SATA_CFG_20
, regval
);
2452 static void nv_adma_host_stop(struct ata_host
*host
)
2454 struct pci_dev
*pdev
= to_pci_dev(host
->dev
);
2457 /* disable ADMA on the ports */
2458 pci_read_config_dword(pdev
, NV_MCP_SATA_CFG_20
, &tmp32
);
2459 tmp32
&= ~(NV_MCP_SATA_CFG_20_PORT0_EN
|
2460 NV_MCP_SATA_CFG_20_PORT0_PWB_EN
|
2461 NV_MCP_SATA_CFG_20_PORT1_EN
|
2462 NV_MCP_SATA_CFG_20_PORT1_PWB_EN
);
2464 pci_write_config_dword(pdev
, NV_MCP_SATA_CFG_20
, tmp32
);
2466 nv_ck804_host_stop(host
);
2469 module_pci_driver(nv_pci_driver
);
2471 module_param_named(adma
, adma_enabled
, bool, 0444);
2472 MODULE_PARM_DESC(adma
, "Enable use of ADMA (Default: false)");
2473 module_param_named(swncq
, swncq_enabled
, bool, 0444);
2474 MODULE_PARM_DESC(swncq
, "Enable use of SWNCQ (Default: true)");
2475 module_param_named(msi
, msi_enabled
, bool, 0444);
2476 MODULE_PARM_DESC(msi
, "Enable use of MSI (Default: false)");