sata_nv: Cleanup taskfile setup
[linux-2.6/verdex.git] / drivers / ata / sata_nv.c
blob57dace43b3370a7a165ddd3a5f9cc6aa52998a51
1 /*
2 * sata_nv.c - NVIDIA nForce SATA
4 * Copyright 2004 NVIDIA Corp. All rights reserved.
5 * Copyright 2004 Andrew Chew
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 * libata documentation is available via 'make {ps|pdf}docs',
24 * as Documentation/DocBook/libata.*
26 * No hardware documentation available outside of NVIDIA.
27 * This driver programs the NVIDIA SATA controller in a similar
28 * fashion as with other PCI IDE BMDMA controllers, with a few
29 * NV-specific details such as register offsets, SATA phy location,
30 * hotplug info, etc.
32 * CK804/MCP04 controllers support an alternate programming interface
33 * similar to the ADMA specification (with some modifications).
34 * This allows the use of NCQ. Non-DMA-mapped ATA commands are still
35 * sent through the legacy interface.
39 #include <linux/kernel.h>
40 #include <linux/module.h>
41 #include <linux/pci.h>
42 #include <linux/init.h>
43 #include <linux/blkdev.h>
44 #include <linux/delay.h>
45 #include <linux/interrupt.h>
46 #include <linux/device.h>
47 #include <scsi/scsi_host.h>
48 #include <scsi/scsi_device.h>
49 #include <linux/libata.h>
51 #define DRV_NAME "sata_nv"
52 #define DRV_VERSION "3.3"
54 #define NV_ADMA_DMA_BOUNDARY 0xffffffffUL
56 enum {
57 NV_MMIO_BAR = 5,
59 NV_PORTS = 2,
60 NV_PIO_MASK = 0x1f,
61 NV_MWDMA_MASK = 0x07,
62 NV_UDMA_MASK = 0x7f,
63 NV_PORT0_SCR_REG_OFFSET = 0x00,
64 NV_PORT1_SCR_REG_OFFSET = 0x40,
66 /* INT_STATUS/ENABLE */
67 NV_INT_STATUS = 0x10,
68 NV_INT_ENABLE = 0x11,
69 NV_INT_STATUS_CK804 = 0x440,
70 NV_INT_ENABLE_CK804 = 0x441,
72 /* INT_STATUS/ENABLE bits */
73 NV_INT_DEV = 0x01,
74 NV_INT_PM = 0x02,
75 NV_INT_ADDED = 0x04,
76 NV_INT_REMOVED = 0x08,
78 NV_INT_PORT_SHIFT = 4, /* each port occupies 4 bits */
80 NV_INT_ALL = 0x0f,
81 NV_INT_MASK = NV_INT_DEV |
82 NV_INT_ADDED | NV_INT_REMOVED,
84 /* INT_CONFIG */
85 NV_INT_CONFIG = 0x12,
86 NV_INT_CONFIG_METHD = 0x01, // 0 = INT, 1 = SMI
88 // For PCI config register 20
89 NV_MCP_SATA_CFG_20 = 0x50,
90 NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
91 NV_MCP_SATA_CFG_20_PORT0_EN = (1 << 17),
92 NV_MCP_SATA_CFG_20_PORT1_EN = (1 << 16),
93 NV_MCP_SATA_CFG_20_PORT0_PWB_EN = (1 << 14),
94 NV_MCP_SATA_CFG_20_PORT1_PWB_EN = (1 << 12),
96 NV_ADMA_MAX_CPBS = 32,
97 NV_ADMA_CPB_SZ = 128,
98 NV_ADMA_APRD_SZ = 16,
99 NV_ADMA_SGTBL_LEN = (1024 - NV_ADMA_CPB_SZ) /
100 NV_ADMA_APRD_SZ,
101 NV_ADMA_SGTBL_TOTAL_LEN = NV_ADMA_SGTBL_LEN + 5,
102 NV_ADMA_SGTBL_SZ = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
103 NV_ADMA_PORT_PRIV_DMA_SZ = NV_ADMA_MAX_CPBS *
104 (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
106 /* BAR5 offset to ADMA general registers */
107 NV_ADMA_GEN = 0x400,
108 NV_ADMA_GEN_CTL = 0x00,
109 NV_ADMA_NOTIFIER_CLEAR = 0x30,
111 /* BAR5 offset to ADMA ports */
112 NV_ADMA_PORT = 0x480,
114 /* size of ADMA port register space */
115 NV_ADMA_PORT_SIZE = 0x100,
117 /* ADMA port registers */
118 NV_ADMA_CTL = 0x40,
119 NV_ADMA_CPB_COUNT = 0x42,
120 NV_ADMA_NEXT_CPB_IDX = 0x43,
121 NV_ADMA_STAT = 0x44,
122 NV_ADMA_CPB_BASE_LOW = 0x48,
123 NV_ADMA_CPB_BASE_HIGH = 0x4C,
124 NV_ADMA_APPEND = 0x50,
125 NV_ADMA_NOTIFIER = 0x68,
126 NV_ADMA_NOTIFIER_ERROR = 0x6C,
128 /* NV_ADMA_CTL register bits */
129 NV_ADMA_CTL_HOTPLUG_IEN = (1 << 0),
130 NV_ADMA_CTL_CHANNEL_RESET = (1 << 5),
131 NV_ADMA_CTL_GO = (1 << 7),
132 NV_ADMA_CTL_AIEN = (1 << 8),
133 NV_ADMA_CTL_READ_NON_COHERENT = (1 << 11),
134 NV_ADMA_CTL_WRITE_NON_COHERENT = (1 << 12),
136 /* CPB response flag bits */
137 NV_CPB_RESP_DONE = (1 << 0),
138 NV_CPB_RESP_ATA_ERR = (1 << 3),
139 NV_CPB_RESP_CMD_ERR = (1 << 4),
140 NV_CPB_RESP_CPB_ERR = (1 << 7),
142 /* CPB control flag bits */
143 NV_CPB_CTL_CPB_VALID = (1 << 0),
144 NV_CPB_CTL_QUEUE = (1 << 1),
145 NV_CPB_CTL_APRD_VALID = (1 << 2),
146 NV_CPB_CTL_IEN = (1 << 3),
147 NV_CPB_CTL_FPDMA = (1 << 4),
149 /* APRD flags */
150 NV_APRD_WRITE = (1 << 1),
151 NV_APRD_END = (1 << 2),
152 NV_APRD_CONT = (1 << 3),
154 /* NV_ADMA_STAT flags */
155 NV_ADMA_STAT_TIMEOUT = (1 << 0),
156 NV_ADMA_STAT_HOTUNPLUG = (1 << 1),
157 NV_ADMA_STAT_HOTPLUG = (1 << 2),
158 NV_ADMA_STAT_CPBERR = (1 << 4),
159 NV_ADMA_STAT_SERROR = (1 << 5),
160 NV_ADMA_STAT_CMD_COMPLETE = (1 << 6),
161 NV_ADMA_STAT_IDLE = (1 << 8),
162 NV_ADMA_STAT_LEGACY = (1 << 9),
163 NV_ADMA_STAT_STOPPED = (1 << 10),
164 NV_ADMA_STAT_DONE = (1 << 12),
165 NV_ADMA_STAT_ERR = NV_ADMA_STAT_CPBERR |
166 NV_ADMA_STAT_TIMEOUT,
168 /* port flags */
169 NV_ADMA_PORT_REGISTER_MODE = (1 << 0),
170 NV_ADMA_ATAPI_SETUP_COMPLETE = (1 << 1),
174 /* ADMA Physical Region Descriptor - one SG segment */
175 struct nv_adma_prd {
176 __le64 addr;
177 __le32 len;
178 u8 flags;
179 u8 packet_len;
180 __le16 reserved;
183 enum nv_adma_regbits {
184 CMDEND = (1 << 15), /* end of command list */
185 WNB = (1 << 14), /* wait-not-BSY */
186 IGN = (1 << 13), /* ignore this entry */
187 CS1n = (1 << (4 + 8)), /* std. PATA signals follow... */
188 DA2 = (1 << (2 + 8)),
189 DA1 = (1 << (1 + 8)),
190 DA0 = (1 << (0 + 8)),
193 /* ADMA Command Parameter Block
194 The first 5 SG segments are stored inside the Command Parameter Block itself.
195 If there are more than 5 segments the remainder are stored in a separate
196 memory area indicated by next_aprd. */
197 struct nv_adma_cpb {
198 u8 resp_flags; /* 0 */
199 u8 reserved1; /* 1 */
200 u8 ctl_flags; /* 2 */
201 /* len is length of taskfile in 64 bit words */
202 u8 len; /* 3 */
203 u8 tag; /* 4 */
204 u8 next_cpb_idx; /* 5 */
205 __le16 reserved2; /* 6-7 */
206 __le16 tf[12]; /* 8-31 */
207 struct nv_adma_prd aprd[5]; /* 32-111 */
208 __le64 next_aprd; /* 112-119 */
209 __le64 reserved3; /* 120-127 */
213 struct nv_adma_port_priv {
214 struct nv_adma_cpb *cpb;
215 dma_addr_t cpb_dma;
216 struct nv_adma_prd *aprd;
217 dma_addr_t aprd_dma;
218 void __iomem * ctl_block;
219 void __iomem * gen_block;
220 void __iomem * notifier_clear_block;
221 u8 flags;
222 int last_issue_ncq;
225 struct nv_host_priv {
226 unsigned long type;
229 #define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & ( 1 << (19 + (12 * (PORT)))))
231 static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
232 static void nv_remove_one (struct pci_dev *pdev);
233 static int nv_pci_device_resume(struct pci_dev *pdev);
234 static void nv_ck804_host_stop(struct ata_host *host);
235 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
236 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
237 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
238 static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg);
239 static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
241 static void nv_nf2_freeze(struct ata_port *ap);
242 static void nv_nf2_thaw(struct ata_port *ap);
243 static void nv_ck804_freeze(struct ata_port *ap);
244 static void nv_ck804_thaw(struct ata_port *ap);
245 static void nv_error_handler(struct ata_port *ap);
246 static int nv_adma_slave_config(struct scsi_device *sdev);
247 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
248 static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
249 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
250 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
251 static void nv_adma_irq_clear(struct ata_port *ap);
252 static int nv_adma_port_start(struct ata_port *ap);
253 static void nv_adma_port_stop(struct ata_port *ap);
254 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
255 static int nv_adma_port_resume(struct ata_port *ap);
256 static void nv_adma_error_handler(struct ata_port *ap);
257 static void nv_adma_host_stop(struct ata_host *host);
258 static void nv_adma_bmdma_setup(struct ata_queued_cmd *qc);
259 static void nv_adma_bmdma_start(struct ata_queued_cmd *qc);
260 static void nv_adma_bmdma_stop(struct ata_queued_cmd *qc);
261 static u8 nv_adma_bmdma_status(struct ata_port *ap);
263 enum nv_host_type
265 GENERIC,
266 NFORCE2,
267 NFORCE3 = NFORCE2, /* NF2 == NF3 as far as sata_nv is concerned */
268 CK804,
269 ADMA
272 static const struct pci_device_id nv_pci_tbl[] = {
273 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
274 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
275 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
276 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
277 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
278 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
279 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
280 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), GENERIC },
281 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), GENERIC },
282 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), GENERIC },
283 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), GENERIC },
284 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
285 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
286 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
287 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
288 PCI_ANY_ID, PCI_ANY_ID,
289 PCI_CLASS_STORAGE_IDE<<8, 0xffff00, GENERIC },
290 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
291 PCI_ANY_ID, PCI_ANY_ID,
292 PCI_CLASS_STORAGE_RAID<<8, 0xffff00, GENERIC },
294 { } /* terminate list */
297 static struct pci_driver nv_pci_driver = {
298 .name = DRV_NAME,
299 .id_table = nv_pci_tbl,
300 .probe = nv_init_one,
301 .suspend = ata_pci_device_suspend,
302 .resume = nv_pci_device_resume,
303 .remove = nv_remove_one,
306 static struct scsi_host_template nv_sht = {
307 .module = THIS_MODULE,
308 .name = DRV_NAME,
309 .ioctl = ata_scsi_ioctl,
310 .queuecommand = ata_scsi_queuecmd,
311 .can_queue = ATA_DEF_QUEUE,
312 .this_id = ATA_SHT_THIS_ID,
313 .sg_tablesize = LIBATA_MAX_PRD,
314 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
315 .emulated = ATA_SHT_EMULATED,
316 .use_clustering = ATA_SHT_USE_CLUSTERING,
317 .proc_name = DRV_NAME,
318 .dma_boundary = ATA_DMA_BOUNDARY,
319 .slave_configure = ata_scsi_slave_config,
320 .slave_destroy = ata_scsi_slave_destroy,
321 .bios_param = ata_std_bios_param,
322 .suspend = ata_scsi_device_suspend,
323 .resume = ata_scsi_device_resume,
326 static struct scsi_host_template nv_adma_sht = {
327 .module = THIS_MODULE,
328 .name = DRV_NAME,
329 .ioctl = ata_scsi_ioctl,
330 .queuecommand = ata_scsi_queuecmd,
331 .can_queue = NV_ADMA_MAX_CPBS,
332 .this_id = ATA_SHT_THIS_ID,
333 .sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN,
334 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
335 .emulated = ATA_SHT_EMULATED,
336 .use_clustering = ATA_SHT_USE_CLUSTERING,
337 .proc_name = DRV_NAME,
338 .dma_boundary = NV_ADMA_DMA_BOUNDARY,
339 .slave_configure = nv_adma_slave_config,
340 .slave_destroy = ata_scsi_slave_destroy,
341 .bios_param = ata_std_bios_param,
342 .suspend = ata_scsi_device_suspend,
343 .resume = ata_scsi_device_resume,
346 static const struct ata_port_operations nv_generic_ops = {
347 .port_disable = ata_port_disable,
348 .tf_load = ata_tf_load,
349 .tf_read = ata_tf_read,
350 .exec_command = ata_exec_command,
351 .check_status = ata_check_status,
352 .dev_select = ata_std_dev_select,
353 .bmdma_setup = ata_bmdma_setup,
354 .bmdma_start = ata_bmdma_start,
355 .bmdma_stop = ata_bmdma_stop,
356 .bmdma_status = ata_bmdma_status,
357 .qc_prep = ata_qc_prep,
358 .qc_issue = ata_qc_issue_prot,
359 .freeze = ata_bmdma_freeze,
360 .thaw = ata_bmdma_thaw,
361 .error_handler = nv_error_handler,
362 .post_internal_cmd = ata_bmdma_post_internal_cmd,
363 .data_xfer = ata_data_xfer,
364 .irq_handler = nv_generic_interrupt,
365 .irq_clear = ata_bmdma_irq_clear,
366 .irq_on = ata_irq_on,
367 .irq_ack = ata_irq_ack,
368 .scr_read = nv_scr_read,
369 .scr_write = nv_scr_write,
370 .port_start = ata_port_start,
373 static const struct ata_port_operations nv_nf2_ops = {
374 .port_disable = ata_port_disable,
375 .tf_load = ata_tf_load,
376 .tf_read = ata_tf_read,
377 .exec_command = ata_exec_command,
378 .check_status = ata_check_status,
379 .dev_select = ata_std_dev_select,
380 .bmdma_setup = ata_bmdma_setup,
381 .bmdma_start = ata_bmdma_start,
382 .bmdma_stop = ata_bmdma_stop,
383 .bmdma_status = ata_bmdma_status,
384 .qc_prep = ata_qc_prep,
385 .qc_issue = ata_qc_issue_prot,
386 .freeze = nv_nf2_freeze,
387 .thaw = nv_nf2_thaw,
388 .error_handler = nv_error_handler,
389 .post_internal_cmd = ata_bmdma_post_internal_cmd,
390 .data_xfer = ata_data_xfer,
391 .irq_handler = nv_nf2_interrupt,
392 .irq_clear = ata_bmdma_irq_clear,
393 .irq_on = ata_irq_on,
394 .irq_ack = ata_irq_ack,
395 .scr_read = nv_scr_read,
396 .scr_write = nv_scr_write,
397 .port_start = ata_port_start,
400 static const struct ata_port_operations nv_ck804_ops = {
401 .port_disable = ata_port_disable,
402 .tf_load = ata_tf_load,
403 .tf_read = ata_tf_read,
404 .exec_command = ata_exec_command,
405 .check_status = ata_check_status,
406 .dev_select = ata_std_dev_select,
407 .bmdma_setup = ata_bmdma_setup,
408 .bmdma_start = ata_bmdma_start,
409 .bmdma_stop = ata_bmdma_stop,
410 .bmdma_status = ata_bmdma_status,
411 .qc_prep = ata_qc_prep,
412 .qc_issue = ata_qc_issue_prot,
413 .freeze = nv_ck804_freeze,
414 .thaw = nv_ck804_thaw,
415 .error_handler = nv_error_handler,
416 .post_internal_cmd = ata_bmdma_post_internal_cmd,
417 .data_xfer = ata_data_xfer,
418 .irq_handler = nv_ck804_interrupt,
419 .irq_clear = ata_bmdma_irq_clear,
420 .irq_on = ata_irq_on,
421 .irq_ack = ata_irq_ack,
422 .scr_read = nv_scr_read,
423 .scr_write = nv_scr_write,
424 .port_start = ata_port_start,
425 .host_stop = nv_ck804_host_stop,
428 static const struct ata_port_operations nv_adma_ops = {
429 .port_disable = ata_port_disable,
430 .tf_load = ata_tf_load,
431 .tf_read = ata_tf_read,
432 .check_atapi_dma = nv_adma_check_atapi_dma,
433 .exec_command = ata_exec_command,
434 .check_status = ata_check_status,
435 .dev_select = ata_std_dev_select,
436 .bmdma_setup = nv_adma_bmdma_setup,
437 .bmdma_start = nv_adma_bmdma_start,
438 .bmdma_stop = nv_adma_bmdma_stop,
439 .bmdma_status = nv_adma_bmdma_status,
440 .qc_prep = nv_adma_qc_prep,
441 .qc_issue = nv_adma_qc_issue,
442 .freeze = nv_ck804_freeze,
443 .thaw = nv_ck804_thaw,
444 .error_handler = nv_adma_error_handler,
445 .post_internal_cmd = nv_adma_bmdma_stop,
446 .data_xfer = ata_data_xfer,
447 .irq_handler = nv_adma_interrupt,
448 .irq_clear = nv_adma_irq_clear,
449 .irq_on = ata_irq_on,
450 .irq_ack = ata_irq_ack,
451 .scr_read = nv_scr_read,
452 .scr_write = nv_scr_write,
453 .port_start = nv_adma_port_start,
454 .port_stop = nv_adma_port_stop,
455 .port_suspend = nv_adma_port_suspend,
456 .port_resume = nv_adma_port_resume,
457 .host_stop = nv_adma_host_stop,
460 static struct ata_port_info nv_port_info[] = {
461 /* generic */
463 .sht = &nv_sht,
464 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
465 ATA_FLAG_HRST_TO_RESUME,
466 .pio_mask = NV_PIO_MASK,
467 .mwdma_mask = NV_MWDMA_MASK,
468 .udma_mask = NV_UDMA_MASK,
469 .port_ops = &nv_generic_ops,
471 /* nforce2/3 */
473 .sht = &nv_sht,
474 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
475 ATA_FLAG_HRST_TO_RESUME,
476 .pio_mask = NV_PIO_MASK,
477 .mwdma_mask = NV_MWDMA_MASK,
478 .udma_mask = NV_UDMA_MASK,
479 .port_ops = &nv_nf2_ops,
481 /* ck804 */
483 .sht = &nv_sht,
484 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
485 ATA_FLAG_HRST_TO_RESUME,
486 .pio_mask = NV_PIO_MASK,
487 .mwdma_mask = NV_MWDMA_MASK,
488 .udma_mask = NV_UDMA_MASK,
489 .port_ops = &nv_ck804_ops,
491 /* ADMA */
493 .sht = &nv_adma_sht,
494 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
495 ATA_FLAG_HRST_TO_RESUME |
496 ATA_FLAG_MMIO | ATA_FLAG_NCQ,
497 .pio_mask = NV_PIO_MASK,
498 .mwdma_mask = NV_MWDMA_MASK,
499 .udma_mask = NV_UDMA_MASK,
500 .port_ops = &nv_adma_ops,
504 MODULE_AUTHOR("NVIDIA");
505 MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
506 MODULE_LICENSE("GPL");
507 MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
508 MODULE_VERSION(DRV_VERSION);
510 static int adma_enabled = 1;
512 static void nv_adma_register_mode(struct ata_port *ap)
514 struct nv_adma_port_priv *pp = ap->private_data;
515 void __iomem *mmio = pp->ctl_block;
516 u16 tmp, status;
517 int count = 0;
519 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
520 return;
522 status = readw(mmio + NV_ADMA_STAT);
523 while(!(status & NV_ADMA_STAT_IDLE) && count < 20) {
524 ndelay(50);
525 status = readw(mmio + NV_ADMA_STAT);
526 count++;
528 if(count == 20)
529 ata_port_printk(ap, KERN_WARNING,
530 "timeout waiting for ADMA IDLE, stat=0x%hx\n",
531 status);
533 tmp = readw(mmio + NV_ADMA_CTL);
534 writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
536 count = 0;
537 status = readw(mmio + NV_ADMA_STAT);
538 while(!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
539 ndelay(50);
540 status = readw(mmio + NV_ADMA_STAT);
541 count++;
543 if(count == 20)
544 ata_port_printk(ap, KERN_WARNING,
545 "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
546 status);
548 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
551 static void nv_adma_mode(struct ata_port *ap)
553 struct nv_adma_port_priv *pp = ap->private_data;
554 void __iomem *mmio = pp->ctl_block;
555 u16 tmp, status;
556 int count = 0;
558 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
559 return;
561 WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
563 tmp = readw(mmio + NV_ADMA_CTL);
564 writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
566 status = readw(mmio + NV_ADMA_STAT);
567 while(((status & NV_ADMA_STAT_LEGACY) ||
568 !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
569 ndelay(50);
570 status = readw(mmio + NV_ADMA_STAT);
571 count++;
573 if(count == 20)
574 ata_port_printk(ap, KERN_WARNING,
575 "timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
576 status);
578 pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
581 static int nv_adma_slave_config(struct scsi_device *sdev)
583 struct ata_port *ap = ata_shost_to_port(sdev->host);
584 struct nv_adma_port_priv *pp = ap->private_data;
585 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
586 u64 bounce_limit;
587 unsigned long segment_boundary;
588 unsigned short sg_tablesize;
589 int rc;
590 int adma_enable;
591 u32 current_reg, new_reg, config_mask;
593 rc = ata_scsi_slave_config(sdev);
595 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
596 /* Not a proper libata device, ignore */
597 return rc;
599 if (ap->device[sdev->id].class == ATA_DEV_ATAPI) {
601 * NVIDIA reports that ADMA mode does not support ATAPI commands.
602 * Therefore ATAPI commands are sent through the legacy interface.
603 * However, the legacy interface only supports 32-bit DMA.
604 * Restrict DMA parameters as required by the legacy interface
605 * when an ATAPI device is connected.
607 bounce_limit = ATA_DMA_MASK;
608 segment_boundary = ATA_DMA_BOUNDARY;
609 /* Subtract 1 since an extra entry may be needed for padding, see
610 libata-scsi.c */
611 sg_tablesize = LIBATA_MAX_PRD - 1;
613 /* Since the legacy DMA engine is in use, we need to disable ADMA
614 on the port. */
615 adma_enable = 0;
616 nv_adma_register_mode(ap);
618 else {
619 bounce_limit = *ap->dev->dma_mask;
620 segment_boundary = NV_ADMA_DMA_BOUNDARY;
621 sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
622 adma_enable = 1;
625 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
627 if(ap->port_no == 1)
628 config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
629 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
630 else
631 config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
632 NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
634 if(adma_enable) {
635 new_reg = current_reg | config_mask;
636 pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
638 else {
639 new_reg = current_reg & ~config_mask;
640 pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
643 if(current_reg != new_reg)
644 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
646 blk_queue_bounce_limit(sdev->request_queue, bounce_limit);
647 blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
648 blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize);
649 ata_port_printk(ap, KERN_INFO,
650 "bounce limit 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
651 (unsigned long long)bounce_limit, segment_boundary, sg_tablesize);
652 return rc;
655 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
657 struct nv_adma_port_priv *pp = qc->ap->private_data;
658 return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
661 static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
663 unsigned int idx = 0;
665 if(tf->flags & ATA_TFLAG_ISADDR) {
666 if (tf->flags & ATA_TFLAG_LBA48) {
667 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature | WNB);
668 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
669 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->hob_lbal);
670 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->hob_lbam);
671 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->hob_lbah);
672 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature);
673 } else
674 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature | WNB);
676 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->nsect);
677 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->lbal);
678 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->lbam);
679 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah);
682 if(tf->flags & ATA_TFLAG_DEVICE)
683 cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
685 cpb[idx++] = cpu_to_le16((ATA_REG_CMD << 8) | tf->command | CMDEND);
687 while(idx < 12)
688 cpb[idx++] = cpu_to_le16(IGN);
690 return idx;
693 static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
695 struct nv_adma_port_priv *pp = ap->private_data;
696 u8 flags = pp->cpb[cpb_num].resp_flags;
698 VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
700 if (unlikely((force_err ||
701 flags & (NV_CPB_RESP_ATA_ERR |
702 NV_CPB_RESP_CMD_ERR |
703 NV_CPB_RESP_CPB_ERR)))) {
704 struct ata_eh_info *ehi = &ap->eh_info;
705 int freeze = 0;
707 ata_ehi_clear_desc(ehi);
708 ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x", flags );
709 if (flags & NV_CPB_RESP_ATA_ERR) {
710 ata_ehi_push_desc(ehi, ": ATA error");
711 ehi->err_mask |= AC_ERR_DEV;
712 } else if (flags & NV_CPB_RESP_CMD_ERR) {
713 ata_ehi_push_desc(ehi, ": CMD error");
714 ehi->err_mask |= AC_ERR_DEV;
715 } else if (flags & NV_CPB_RESP_CPB_ERR) {
716 ata_ehi_push_desc(ehi, ": CPB error");
717 ehi->err_mask |= AC_ERR_SYSTEM;
718 freeze = 1;
719 } else {
720 /* notifier error, but no error in CPB flags? */
721 ehi->err_mask |= AC_ERR_OTHER;
722 freeze = 1;
724 /* Kill all commands. EH will determine what actually failed. */
725 if (freeze)
726 ata_port_freeze(ap);
727 else
728 ata_port_abort(ap);
729 return 1;
732 if (flags & NV_CPB_RESP_DONE) {
733 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num);
734 VPRINTK("CPB flags done, flags=0x%x\n", flags);
735 if (likely(qc)) {
736 /* Grab the ATA port status for non-NCQ commands.
737 For NCQ commands the current status may have nothing to do with
738 the command just completed. */
739 if (qc->tf.protocol != ATA_PROT_NCQ) {
740 u8 ata_status = readb(pp->ctl_block + (ATA_REG_STATUS * 4));
741 qc->err_mask |= ac_err_mask(ata_status);
743 DPRINTK("Completing qc from tag %d with err_mask %u\n",cpb_num,
744 qc->err_mask);
745 ata_qc_complete(qc);
748 return 0;
751 static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
753 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
755 /* freeze if hotplugged */
756 if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
757 ata_port_freeze(ap);
758 return 1;
761 /* bail out if not our interrupt */
762 if (!(irq_stat & NV_INT_DEV))
763 return 0;
765 /* DEV interrupt w/ no active qc? */
766 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
767 ata_check_status(ap);
768 return 1;
771 /* handle interrupt */
772 return ata_host_intr(ap, qc);
775 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
777 struct ata_host *host = dev_instance;
778 int i, handled = 0;
779 u32 notifier_clears[2];
781 spin_lock(&host->lock);
783 for (i = 0; i < host->n_ports; i++) {
784 struct ata_port *ap = host->ports[i];
785 notifier_clears[i] = 0;
787 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
788 struct nv_adma_port_priv *pp = ap->private_data;
789 void __iomem *mmio = pp->ctl_block;
790 u16 status;
791 u32 gen_ctl;
792 u32 notifier, notifier_error;
794 /* if in ATA register mode, use standard ata interrupt handler */
795 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
796 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
797 >> (NV_INT_PORT_SHIFT * i);
798 if(ata_tag_valid(ap->active_tag))
799 /** NV_INT_DEV indication seems unreliable at times
800 at least in ADMA mode. Force it on always when a
801 command is active, to prevent losing interrupts. */
802 irq_stat |= NV_INT_DEV;
803 handled += nv_host_intr(ap, irq_stat);
804 continue;
807 notifier = readl(mmio + NV_ADMA_NOTIFIER);
808 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
809 notifier_clears[i] = notifier | notifier_error;
811 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
813 if( !NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
814 !notifier_error)
815 /* Nothing to do */
816 continue;
818 status = readw(mmio + NV_ADMA_STAT);
820 /* Clear status. Ensure the controller sees the clearing before we start
821 looking at any of the CPB statuses, so that any CPB completions after
822 this point in the handler will raise another interrupt. */
823 writew(status, mmio + NV_ADMA_STAT);
824 readw(mmio + NV_ADMA_STAT); /* flush posted write */
825 rmb();
827 handled++; /* irq handled if we got here */
829 /* freeze if hotplugged or controller error */
830 if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
831 NV_ADMA_STAT_HOTUNPLUG |
832 NV_ADMA_STAT_TIMEOUT |
833 NV_ADMA_STAT_SERROR))) {
834 struct ata_eh_info *ehi = &ap->eh_info;
836 ata_ehi_clear_desc(ehi);
837 ata_ehi_push_desc(ehi, "ADMA status 0x%08x", status );
838 if (status & NV_ADMA_STAT_TIMEOUT) {
839 ehi->err_mask |= AC_ERR_SYSTEM;
840 ata_ehi_push_desc(ehi, ": timeout");
841 } else if (status & NV_ADMA_STAT_HOTPLUG) {
842 ata_ehi_hotplugged(ehi);
843 ata_ehi_push_desc(ehi, ": hotplug");
844 } else if (status & NV_ADMA_STAT_HOTUNPLUG) {
845 ata_ehi_hotplugged(ehi);
846 ata_ehi_push_desc(ehi, ": hot unplug");
847 } else if (status & NV_ADMA_STAT_SERROR) {
848 /* let libata analyze SError and figure out the cause */
849 ata_ehi_push_desc(ehi, ": SError");
851 ata_port_freeze(ap);
852 continue;
855 if (status & (NV_ADMA_STAT_DONE |
856 NV_ADMA_STAT_CPBERR)) {
857 /** Check CPBs for completed commands */
859 if (ata_tag_valid(ap->active_tag)) {
860 /* Non-NCQ command */
861 nv_adma_check_cpb(ap, ap->active_tag,
862 notifier_error & (1 << ap->active_tag));
863 } else {
864 int pos, error = 0;
865 u32 active = ap->sactive;
867 while ((pos = ffs(active)) && !error) {
868 pos--;
869 error = nv_adma_check_cpb(ap, pos,
870 notifier_error & (1 << pos) );
871 active &= ~(1 << pos );
878 if(notifier_clears[0] || notifier_clears[1]) {
879 /* Note: Both notifier clear registers must be written
880 if either is set, even if one is zero, according to NVIDIA. */
881 struct nv_adma_port_priv *pp = host->ports[0]->private_data;
882 writel(notifier_clears[0], pp->notifier_clear_block);
883 pp = host->ports[1]->private_data;
884 writel(notifier_clears[1], pp->notifier_clear_block);
887 spin_unlock(&host->lock);
889 return IRQ_RETVAL(handled);
892 static void nv_adma_irq_clear(struct ata_port *ap)
894 struct nv_adma_port_priv *pp = ap->private_data;
895 void __iomem *mmio = pp->ctl_block;
896 u16 status = readw(mmio + NV_ADMA_STAT);
897 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
898 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
899 void __iomem *dma_stat_addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS;
901 /* clear ADMA status */
902 writew(status, mmio + NV_ADMA_STAT);
903 writel(notifier | notifier_error,
904 pp->notifier_clear_block);
906 /** clear legacy status */
907 iowrite8(ioread8(dma_stat_addr), dma_stat_addr);
910 static void nv_adma_bmdma_setup(struct ata_queued_cmd *qc)
912 struct ata_port *ap = qc->ap;
913 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
914 struct nv_adma_port_priv *pp = ap->private_data;
915 u8 dmactl;
917 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
918 WARN_ON(1);
919 return;
922 /* load PRD table addr. */
923 iowrite32(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
925 /* specify data direction, triple-check start bit is clear */
926 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
927 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
928 if (!rw)
929 dmactl |= ATA_DMA_WR;
931 iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
933 /* issue r/w command */
934 ata_exec_command(ap, &qc->tf);
937 static void nv_adma_bmdma_start(struct ata_queued_cmd *qc)
939 struct ata_port *ap = qc->ap;
940 struct nv_adma_port_priv *pp = ap->private_data;
941 u8 dmactl;
943 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
944 WARN_ON(1);
945 return;
948 /* start host DMA transaction */
949 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
950 iowrite8(dmactl | ATA_DMA_START,
951 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
954 static void nv_adma_bmdma_stop(struct ata_queued_cmd *qc)
956 struct ata_port *ap = qc->ap;
957 struct nv_adma_port_priv *pp = ap->private_data;
959 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
960 return;
962 /* clear start/stop bit */
963 iowrite8(ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START,
964 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
966 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
967 ata_altstatus(ap); /* dummy read */
970 static u8 nv_adma_bmdma_status(struct ata_port *ap)
972 struct nv_adma_port_priv *pp = ap->private_data;
974 WARN_ON(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE));
976 return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
979 static int nv_adma_port_start(struct ata_port *ap)
981 struct device *dev = ap->host->dev;
982 struct nv_adma_port_priv *pp;
983 int rc;
984 void *mem;
985 dma_addr_t mem_dma;
986 void __iomem *mmio;
987 u16 tmp;
989 VPRINTK("ENTER\n");
991 rc = ata_port_start(ap);
992 if (rc)
993 return rc;
995 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
996 if (!pp)
997 return -ENOMEM;
999 mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
1000 ap->port_no * NV_ADMA_PORT_SIZE;
1001 pp->ctl_block = mmio;
1002 pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
1003 pp->notifier_clear_block = pp->gen_block +
1004 NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
1006 mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
1007 &mem_dma, GFP_KERNEL);
1008 if (!mem)
1009 return -ENOMEM;
1010 memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
1013 * First item in chunk of DMA memory:
1014 * 128-byte command parameter block (CPB)
1015 * one for each command tag
1017 pp->cpb = mem;
1018 pp->cpb_dma = mem_dma;
1020 writel(mem_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
1021 writel((mem_dma >> 16 ) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
1023 mem += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1024 mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1027 * Second item: block of ADMA_SGTBL_LEN s/g entries
1029 pp->aprd = mem;
1030 pp->aprd_dma = mem_dma;
1032 ap->private_data = pp;
1034 /* clear any outstanding interrupt conditions */
1035 writew(0xffff, mmio + NV_ADMA_STAT);
1037 /* initialize port variables */
1038 pp->flags = NV_ADMA_PORT_REGISTER_MODE;
1040 /* clear CPB fetch count */
1041 writew(0, mmio + NV_ADMA_CPB_COUNT);
1043 /* clear GO for register mode, enable interrupt */
1044 tmp = readw(mmio + NV_ADMA_CTL);
1045 writew( (tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN, mmio + NV_ADMA_CTL);
1047 tmp = readw(mmio + NV_ADMA_CTL);
1048 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1049 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1050 udelay(1);
1051 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1052 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1054 return 0;
1057 static void nv_adma_port_stop(struct ata_port *ap)
1059 struct nv_adma_port_priv *pp = ap->private_data;
1060 void __iomem *mmio = pp->ctl_block;
1062 VPRINTK("ENTER\n");
1063 writew(0, mmio + NV_ADMA_CTL);
1066 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1068 struct nv_adma_port_priv *pp = ap->private_data;
1069 void __iomem *mmio = pp->ctl_block;
1071 /* Go to register mode - clears GO */
1072 nv_adma_register_mode(ap);
1074 /* clear CPB fetch count */
1075 writew(0, mmio + NV_ADMA_CPB_COUNT);
1077 /* disable interrupt, shut down port */
1078 writew(0, mmio + NV_ADMA_CTL);
1080 return 0;
1083 static int nv_adma_port_resume(struct ata_port *ap)
1085 struct nv_adma_port_priv *pp = ap->private_data;
1086 void __iomem *mmio = pp->ctl_block;
1087 u16 tmp;
1089 /* set CPB block location */
1090 writel(pp->cpb_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
1091 writel((pp->cpb_dma >> 16 ) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
1093 /* clear any outstanding interrupt conditions */
1094 writew(0xffff, mmio + NV_ADMA_STAT);
1096 /* initialize port variables */
1097 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1099 /* clear CPB fetch count */
1100 writew(0, mmio + NV_ADMA_CPB_COUNT);
1102 /* clear GO for register mode, enable interrupt */
1103 tmp = readw(mmio + NV_ADMA_CTL);
1104 writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN, mmio + NV_ADMA_CTL);
1106 tmp = readw(mmio + NV_ADMA_CTL);
1107 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1108 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1109 udelay(1);
1110 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1111 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1113 return 0;
1116 static void nv_adma_setup_port(struct ata_probe_ent *probe_ent, unsigned int port)
1118 void __iomem *mmio = probe_ent->iomap[NV_MMIO_BAR];
1119 struct ata_ioports *ioport = &probe_ent->port[port];
1121 VPRINTK("ENTER\n");
1123 mmio += NV_ADMA_PORT + port * NV_ADMA_PORT_SIZE;
1125 ioport->cmd_addr = mmio;
1126 ioport->data_addr = mmio + (ATA_REG_DATA * 4);
1127 ioport->error_addr =
1128 ioport->feature_addr = mmio + (ATA_REG_ERR * 4);
1129 ioport->nsect_addr = mmio + (ATA_REG_NSECT * 4);
1130 ioport->lbal_addr = mmio + (ATA_REG_LBAL * 4);
1131 ioport->lbam_addr = mmio + (ATA_REG_LBAM * 4);
1132 ioport->lbah_addr = mmio + (ATA_REG_LBAH * 4);
1133 ioport->device_addr = mmio + (ATA_REG_DEVICE * 4);
1134 ioport->status_addr =
1135 ioport->command_addr = mmio + (ATA_REG_STATUS * 4);
1136 ioport->altstatus_addr =
1137 ioport->ctl_addr = mmio + 0x20;
1140 static int nv_adma_host_init(struct ata_probe_ent *probe_ent)
1142 struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
1143 unsigned int i;
1144 u32 tmp32;
1146 VPRINTK("ENTER\n");
1148 /* enable ADMA on the ports */
1149 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1150 tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1151 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1152 NV_MCP_SATA_CFG_20_PORT1_EN |
1153 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1155 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1157 for (i = 0; i < probe_ent->n_ports; i++)
1158 nv_adma_setup_port(probe_ent, i);
1160 return 0;
1163 static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1164 struct scatterlist *sg,
1165 int idx,
1166 struct nv_adma_prd *aprd)
1168 u8 flags = 0;
1169 if (qc->tf.flags & ATA_TFLAG_WRITE)
1170 flags |= NV_APRD_WRITE;
1171 if (idx == qc->n_elem - 1)
1172 flags |= NV_APRD_END;
1173 else if (idx != 4)
1174 flags |= NV_APRD_CONT;
1176 aprd->addr = cpu_to_le64(((u64)sg_dma_address(sg)));
1177 aprd->len = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
1178 aprd->flags = flags;
1179 aprd->packet_len = 0;
1182 static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1184 struct nv_adma_port_priv *pp = qc->ap->private_data;
1185 unsigned int idx;
1186 struct nv_adma_prd *aprd;
1187 struct scatterlist *sg;
1189 VPRINTK("ENTER\n");
1191 idx = 0;
1193 ata_for_each_sg(sg, qc) {
1194 aprd = (idx < 5) ? &cpb->aprd[idx] : &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (idx-5)];
1195 nv_adma_fill_aprd(qc, sg, idx, aprd);
1196 idx++;
1198 if (idx > 5)
1199 cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
1200 else
1201 cpb->next_aprd = cpu_to_le64(0);
1204 static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
1206 struct nv_adma_port_priv *pp = qc->ap->private_data;
1208 /* ADMA engine can only be used for non-ATAPI DMA commands,
1209 or interrupt-driven no-data commands. */
1210 if((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
1211 (qc->tf.flags & ATA_TFLAG_POLLING))
1212 return 1;
1214 if((qc->flags & ATA_QCFLAG_DMAMAP) ||
1215 (qc->tf.protocol == ATA_PROT_NODATA))
1216 return 0;
1218 return 1;
1221 static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1223 struct nv_adma_port_priv *pp = qc->ap->private_data;
1224 struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];
1225 u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
1226 NV_CPB_CTL_IEN;
1228 if (nv_adma_use_reg_mode(qc)) {
1229 nv_adma_register_mode(qc->ap);
1230 ata_qc_prep(qc);
1231 return;
1234 cpb->resp_flags = NV_CPB_RESP_DONE;
1235 wmb();
1236 cpb->ctl_flags = 0;
1237 wmb();
1239 cpb->len = 3;
1240 cpb->tag = qc->tag;
1241 cpb->next_cpb_idx = 0;
1243 /* turn on NCQ flags for NCQ commands */
1244 if (qc->tf.protocol == ATA_PROT_NCQ)
1245 ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1247 VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1249 nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1251 if(qc->flags & ATA_QCFLAG_DMAMAP) {
1252 nv_adma_fill_sg(qc, cpb);
1253 ctl_flags |= NV_CPB_CTL_APRD_VALID;
1254 } else
1255 memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
1257 /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID until we are
1258 finished filling in all of the contents */
1259 wmb();
1260 cpb->ctl_flags = ctl_flags;
1261 wmb();
1262 cpb->resp_flags = 0;
1265 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1267 struct nv_adma_port_priv *pp = qc->ap->private_data;
1268 void __iomem *mmio = pp->ctl_block;
1269 int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
1271 VPRINTK("ENTER\n");
1273 if (nv_adma_use_reg_mode(qc)) {
1274 /* use ATA register mode */
1275 VPRINTK("using ATA register mode: 0x%lx\n", qc->flags);
1276 nv_adma_register_mode(qc->ap);
1277 return ata_qc_issue_prot(qc);
1278 } else
1279 nv_adma_mode(qc->ap);
1281 /* write append register, command tag in lower 8 bits
1282 and (number of cpbs to append -1) in top 8 bits */
1283 wmb();
1285 if(curr_ncq != pp->last_issue_ncq) {
1286 /* Seems to need some delay before switching between NCQ and non-NCQ
1287 commands, else we get command timeouts and such. */
1288 udelay(20);
1289 pp->last_issue_ncq = curr_ncq;
1292 writew(qc->tag, mmio + NV_ADMA_APPEND);
1294 DPRINTK("Issued tag %u\n",qc->tag);
1296 return 0;
1299 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1301 struct ata_host *host = dev_instance;
1302 unsigned int i;
1303 unsigned int handled = 0;
1304 unsigned long flags;
1306 spin_lock_irqsave(&host->lock, flags);
1308 for (i = 0; i < host->n_ports; i++) {
1309 struct ata_port *ap;
1311 ap = host->ports[i];
1312 if (ap &&
1313 !(ap->flags & ATA_FLAG_DISABLED)) {
1314 struct ata_queued_cmd *qc;
1316 qc = ata_qc_from_tag(ap, ap->active_tag);
1317 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
1318 handled += ata_host_intr(ap, qc);
1319 else
1320 // No request pending? Clear interrupt status
1321 // anyway, in case there's one pending.
1322 ap->ops->check_status(ap);
1327 spin_unlock_irqrestore(&host->lock, flags);
1329 return IRQ_RETVAL(handled);
1332 static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
1334 int i, handled = 0;
1336 for (i = 0; i < host->n_ports; i++) {
1337 struct ata_port *ap = host->ports[i];
1339 if (ap && !(ap->flags & ATA_FLAG_DISABLED))
1340 handled += nv_host_intr(ap, irq_stat);
1342 irq_stat >>= NV_INT_PORT_SHIFT;
1345 return IRQ_RETVAL(handled);
1348 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
1350 struct ata_host *host = dev_instance;
1351 u8 irq_stat;
1352 irqreturn_t ret;
1354 spin_lock(&host->lock);
1355 irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
1356 ret = nv_do_interrupt(host, irq_stat);
1357 spin_unlock(&host->lock);
1359 return ret;
1362 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
1364 struct ata_host *host = dev_instance;
1365 u8 irq_stat;
1366 irqreturn_t ret;
1368 spin_lock(&host->lock);
1369 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1370 ret = nv_do_interrupt(host, irq_stat);
1371 spin_unlock(&host->lock);
1373 return ret;
1376 static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg)
1378 if (sc_reg > SCR_CONTROL)
1379 return 0xffffffffU;
1381 return ioread32(ap->ioaddr.scr_addr + (sc_reg * 4));
1384 static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
1386 if (sc_reg > SCR_CONTROL)
1387 return;
1389 iowrite32(val, ap->ioaddr.scr_addr + (sc_reg * 4));
1392 static void nv_nf2_freeze(struct ata_port *ap)
1394 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1395 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1396 u8 mask;
1398 mask = ioread8(scr_addr + NV_INT_ENABLE);
1399 mask &= ~(NV_INT_ALL << shift);
1400 iowrite8(mask, scr_addr + NV_INT_ENABLE);
1403 static void nv_nf2_thaw(struct ata_port *ap)
1405 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1406 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1407 u8 mask;
1409 iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
1411 mask = ioread8(scr_addr + NV_INT_ENABLE);
1412 mask |= (NV_INT_MASK << shift);
1413 iowrite8(mask, scr_addr + NV_INT_ENABLE);
1416 static void nv_ck804_freeze(struct ata_port *ap)
1418 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1419 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1420 u8 mask;
1422 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1423 mask &= ~(NV_INT_ALL << shift);
1424 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1427 static void nv_ck804_thaw(struct ata_port *ap)
1429 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1430 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1431 u8 mask;
1433 writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1435 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1436 mask |= (NV_INT_MASK << shift);
1437 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1440 static int nv_hardreset(struct ata_port *ap, unsigned int *class)
1442 unsigned int dummy;
1444 /* SATA hardreset fails to retrieve proper device signature on
1445 * some controllers. Don't classify on hardreset. For more
1446 * info, see http://bugme.osdl.org/show_bug.cgi?id=3352
1448 return sata_std_hardreset(ap, &dummy);
1451 static void nv_error_handler(struct ata_port *ap)
1453 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1454 nv_hardreset, ata_std_postreset);
1457 static void nv_adma_error_handler(struct ata_port *ap)
1459 struct nv_adma_port_priv *pp = ap->private_data;
1460 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
1461 void __iomem *mmio = pp->ctl_block;
1462 int i;
1463 u16 tmp;
1465 if(ata_tag_valid(ap->active_tag) || ap->sactive) {
1466 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1467 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1468 u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
1469 u32 status = readw(mmio + NV_ADMA_STAT);
1470 u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
1471 u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
1473 ata_port_printk(ap, KERN_ERR, "EH in ADMA mode, notifier 0x%X "
1474 "notifier_error 0x%X gen_ctl 0x%X status 0x%X "
1475 "next cpb count 0x%X next cpb idx 0x%x\n",
1476 notifier, notifier_error, gen_ctl, status,
1477 cpb_count, next_cpb_idx);
1479 for( i=0;i<NV_ADMA_MAX_CPBS;i++) {
1480 struct nv_adma_cpb *cpb = &pp->cpb[i];
1481 if( (ata_tag_valid(ap->active_tag) && i == ap->active_tag) ||
1482 ap->sactive & (1 << i) )
1483 ata_port_printk(ap, KERN_ERR,
1484 "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1485 i, cpb->ctl_flags, cpb->resp_flags);
1489 /* Push us back into port register mode for error handling. */
1490 nv_adma_register_mode(ap);
1492 /* Mark all of the CPBs as invalid to prevent them from being executed */
1493 for( i=0;i<NV_ADMA_MAX_CPBS;i++)
1494 pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1496 /* clear CPB fetch count */
1497 writew(0, mmio + NV_ADMA_CPB_COUNT);
1499 /* Reset channel */
1500 tmp = readw(mmio + NV_ADMA_CTL);
1501 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1502 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1503 udelay(1);
1504 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1505 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1508 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1509 nv_hardreset, ata_std_postreset);
1512 static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1514 static int printed_version = 0;
1515 struct ata_port_info *ppi[2];
1516 struct ata_probe_ent *probe_ent;
1517 struct nv_host_priv *hpriv;
1518 int rc;
1519 u32 bar;
1520 void __iomem *base;
1521 unsigned long type = ent->driver_data;
1522 int mask_set = 0;
1524 // Make sure this is a SATA controller by counting the number of bars
1525 // (NVIDIA SATA controllers will always have six bars). Otherwise,
1526 // it's an IDE controller and we ignore it.
1527 for (bar=0; bar<6; bar++)
1528 if (pci_resource_start(pdev, bar) == 0)
1529 return -ENODEV;
1531 if (!printed_version++)
1532 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
1534 rc = pcim_enable_device(pdev);
1535 if (rc)
1536 return rc;
1538 rc = pci_request_regions(pdev, DRV_NAME);
1539 if (rc) {
1540 pcim_pin_device(pdev);
1541 return rc;
1544 if(type >= CK804 && adma_enabled) {
1545 dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n");
1546 type = ADMA;
1547 if(!pci_set_dma_mask(pdev, DMA_64BIT_MASK) &&
1548 !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))
1549 mask_set = 1;
1552 if(!mask_set) {
1553 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
1554 if (rc)
1555 return rc;
1556 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
1557 if (rc)
1558 return rc;
1561 rc = -ENOMEM;
1563 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
1564 if (!hpriv)
1565 return -ENOMEM;
1567 ppi[0] = ppi[1] = &nv_port_info[type];
1568 probe_ent = ata_pci_init_native_mode(pdev, ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
1569 if (!probe_ent)
1570 return -ENOMEM;
1572 if (!pcim_iomap(pdev, NV_MMIO_BAR, 0))
1573 return -EIO;
1574 probe_ent->iomap = pcim_iomap_table(pdev);
1576 probe_ent->private_data = hpriv;
1577 hpriv->type = type;
1579 base = probe_ent->iomap[NV_MMIO_BAR];
1580 probe_ent->port[0].scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
1581 probe_ent->port[1].scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
1583 /* enable SATA space for CK804 */
1584 if (type >= CK804) {
1585 u8 regval;
1587 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
1588 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1589 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1592 pci_set_master(pdev);
1594 if (type == ADMA) {
1595 rc = nv_adma_host_init(probe_ent);
1596 if (rc)
1597 return rc;
1600 rc = ata_device_add(probe_ent);
1601 if (rc != NV_PORTS)
1602 return -ENODEV;
1604 devm_kfree(&pdev->dev, probe_ent);
1605 return 0;
1608 static void nv_remove_one (struct pci_dev *pdev)
1610 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1611 struct nv_host_priv *hpriv = host->private_data;
1613 ata_pci_remove_one(pdev);
1614 kfree(hpriv);
1617 static int nv_pci_device_resume(struct pci_dev *pdev)
1619 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1620 struct nv_host_priv *hpriv = host->private_data;
1621 int rc;
1623 rc = ata_pci_device_do_resume(pdev);
1624 if(rc)
1625 return rc;
1627 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
1628 if(hpriv->type >= CK804) {
1629 u8 regval;
1631 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
1632 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1633 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1635 if(hpriv->type == ADMA) {
1636 u32 tmp32;
1637 struct nv_adma_port_priv *pp;
1638 /* enable/disable ADMA on the ports appropriately */
1639 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1641 pp = host->ports[0]->private_data;
1642 if(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1643 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
1644 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
1645 else
1646 tmp32 |= (NV_MCP_SATA_CFG_20_PORT0_EN |
1647 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
1648 pp = host->ports[1]->private_data;
1649 if(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1650 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
1651 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1652 else
1653 tmp32 |= (NV_MCP_SATA_CFG_20_PORT1_EN |
1654 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1656 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1660 ata_host_resume(host);
1662 return 0;
1665 static void nv_ck804_host_stop(struct ata_host *host)
1667 struct pci_dev *pdev = to_pci_dev(host->dev);
1668 u8 regval;
1670 /* disable SATA space for CK804 */
1671 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
1672 regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1673 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1676 static void nv_adma_host_stop(struct ata_host *host)
1678 struct pci_dev *pdev = to_pci_dev(host->dev);
1679 u32 tmp32;
1681 /* disable ADMA on the ports */
1682 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1683 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
1684 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1685 NV_MCP_SATA_CFG_20_PORT1_EN |
1686 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1688 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1690 nv_ck804_host_stop(host);
1693 static int __init nv_init(void)
1695 return pci_register_driver(&nv_pci_driver);
1698 static void __exit nv_exit(void)
1700 pci_unregister_driver(&nv_pci_driver);
1703 module_init(nv_init);
1704 module_exit(nv_exit);
1705 module_param_named(adma, adma_enabled, bool, 0444);
1706 MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: true)");