libata: update libata LLDs to use devres
[linux-2.6/verdex.git] / drivers / ata / sata_nv.c
blob18361a38aee71ad4d083b49683593d5f9b4c1281
1 /*
2 * sata_nv.c - NVIDIA nForce SATA
4 * Copyright 2004 NVIDIA Corp. All rights reserved.
5 * Copyright 2004 Andrew Chew
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 * libata documentation is available via 'make {ps|pdf}docs',
24 * as Documentation/DocBook/libata.*
26 * No hardware documentation available outside of NVIDIA.
27 * This driver programs the NVIDIA SATA controller in a similar
28 * fashion as with other PCI IDE BMDMA controllers, with a few
29 * NV-specific details such as register offsets, SATA phy location,
30 * hotplug info, etc.
32 * CK804/MCP04 controllers support an alternate programming interface
33 * similar to the ADMA specification (with some modifications).
34 * This allows the use of NCQ. Non-DMA-mapped ATA commands are still
35 * sent through the legacy interface.
39 #include <linux/kernel.h>
40 #include <linux/module.h>
41 #include <linux/pci.h>
42 #include <linux/init.h>
43 #include <linux/blkdev.h>
44 #include <linux/delay.h>
45 #include <linux/interrupt.h>
46 #include <linux/device.h>
47 #include <scsi/scsi_host.h>
48 #include <scsi/scsi_device.h>
49 #include <linux/libata.h>
51 #define DRV_NAME "sata_nv"
52 #define DRV_VERSION "3.3"
54 #define NV_ADMA_DMA_BOUNDARY 0xffffffffUL
56 enum {
57 NV_PORTS = 2,
58 NV_PIO_MASK = 0x1f,
59 NV_MWDMA_MASK = 0x07,
60 NV_UDMA_MASK = 0x7f,
61 NV_PORT0_SCR_REG_OFFSET = 0x00,
62 NV_PORT1_SCR_REG_OFFSET = 0x40,
64 /* INT_STATUS/ENABLE */
65 NV_INT_STATUS = 0x10,
66 NV_INT_ENABLE = 0x11,
67 NV_INT_STATUS_CK804 = 0x440,
68 NV_INT_ENABLE_CK804 = 0x441,
70 /* INT_STATUS/ENABLE bits */
71 NV_INT_DEV = 0x01,
72 NV_INT_PM = 0x02,
73 NV_INT_ADDED = 0x04,
74 NV_INT_REMOVED = 0x08,
76 NV_INT_PORT_SHIFT = 4, /* each port occupies 4 bits */
78 NV_INT_ALL = 0x0f,
79 NV_INT_MASK = NV_INT_DEV |
80 NV_INT_ADDED | NV_INT_REMOVED,
82 /* INT_CONFIG */
83 NV_INT_CONFIG = 0x12,
84 NV_INT_CONFIG_METHD = 0x01, // 0 = INT, 1 = SMI
86 // For PCI config register 20
87 NV_MCP_SATA_CFG_20 = 0x50,
88 NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
89 NV_MCP_SATA_CFG_20_PORT0_EN = (1 << 17),
90 NV_MCP_SATA_CFG_20_PORT1_EN = (1 << 16),
91 NV_MCP_SATA_CFG_20_PORT0_PWB_EN = (1 << 14),
92 NV_MCP_SATA_CFG_20_PORT1_PWB_EN = (1 << 12),
94 NV_ADMA_MAX_CPBS = 32,
95 NV_ADMA_CPB_SZ = 128,
96 NV_ADMA_APRD_SZ = 16,
97 NV_ADMA_SGTBL_LEN = (1024 - NV_ADMA_CPB_SZ) /
98 NV_ADMA_APRD_SZ,
99 NV_ADMA_SGTBL_TOTAL_LEN = NV_ADMA_SGTBL_LEN + 5,
100 NV_ADMA_SGTBL_SZ = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
101 NV_ADMA_PORT_PRIV_DMA_SZ = NV_ADMA_MAX_CPBS *
102 (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
104 /* BAR5 offset to ADMA general registers */
105 NV_ADMA_GEN = 0x400,
106 NV_ADMA_GEN_CTL = 0x00,
107 NV_ADMA_NOTIFIER_CLEAR = 0x30,
109 /* BAR5 offset to ADMA ports */
110 NV_ADMA_PORT = 0x480,
112 /* size of ADMA port register space */
113 NV_ADMA_PORT_SIZE = 0x100,
115 /* ADMA port registers */
116 NV_ADMA_CTL = 0x40,
117 NV_ADMA_CPB_COUNT = 0x42,
118 NV_ADMA_NEXT_CPB_IDX = 0x43,
119 NV_ADMA_STAT = 0x44,
120 NV_ADMA_CPB_BASE_LOW = 0x48,
121 NV_ADMA_CPB_BASE_HIGH = 0x4C,
122 NV_ADMA_APPEND = 0x50,
123 NV_ADMA_NOTIFIER = 0x68,
124 NV_ADMA_NOTIFIER_ERROR = 0x6C,
126 /* NV_ADMA_CTL register bits */
127 NV_ADMA_CTL_HOTPLUG_IEN = (1 << 0),
128 NV_ADMA_CTL_CHANNEL_RESET = (1 << 5),
129 NV_ADMA_CTL_GO = (1 << 7),
130 NV_ADMA_CTL_AIEN = (1 << 8),
131 NV_ADMA_CTL_READ_NON_COHERENT = (1 << 11),
132 NV_ADMA_CTL_WRITE_NON_COHERENT = (1 << 12),
134 /* CPB response flag bits */
135 NV_CPB_RESP_DONE = (1 << 0),
136 NV_CPB_RESP_ATA_ERR = (1 << 3),
137 NV_CPB_RESP_CMD_ERR = (1 << 4),
138 NV_CPB_RESP_CPB_ERR = (1 << 7),
140 /* CPB control flag bits */
141 NV_CPB_CTL_CPB_VALID = (1 << 0),
142 NV_CPB_CTL_QUEUE = (1 << 1),
143 NV_CPB_CTL_APRD_VALID = (1 << 2),
144 NV_CPB_CTL_IEN = (1 << 3),
145 NV_CPB_CTL_FPDMA = (1 << 4),
147 /* APRD flags */
148 NV_APRD_WRITE = (1 << 1),
149 NV_APRD_END = (1 << 2),
150 NV_APRD_CONT = (1 << 3),
152 /* NV_ADMA_STAT flags */
153 NV_ADMA_STAT_TIMEOUT = (1 << 0),
154 NV_ADMA_STAT_HOTUNPLUG = (1 << 1),
155 NV_ADMA_STAT_HOTPLUG = (1 << 2),
156 NV_ADMA_STAT_CPBERR = (1 << 4),
157 NV_ADMA_STAT_SERROR = (1 << 5),
158 NV_ADMA_STAT_CMD_COMPLETE = (1 << 6),
159 NV_ADMA_STAT_IDLE = (1 << 8),
160 NV_ADMA_STAT_LEGACY = (1 << 9),
161 NV_ADMA_STAT_STOPPED = (1 << 10),
162 NV_ADMA_STAT_DONE = (1 << 12),
163 NV_ADMA_STAT_ERR = NV_ADMA_STAT_CPBERR |
164 NV_ADMA_STAT_TIMEOUT,
166 /* port flags */
167 NV_ADMA_PORT_REGISTER_MODE = (1 << 0),
168 NV_ADMA_ATAPI_SETUP_COMPLETE = (1 << 1),
172 /* ADMA Physical Region Descriptor - one SG segment */
173 struct nv_adma_prd {
174 __le64 addr;
175 __le32 len;
176 u8 flags;
177 u8 packet_len;
178 __le16 reserved;
181 enum nv_adma_regbits {
182 CMDEND = (1 << 15), /* end of command list */
183 WNB = (1 << 14), /* wait-not-BSY */
184 IGN = (1 << 13), /* ignore this entry */
185 CS1n = (1 << (4 + 8)), /* std. PATA signals follow... */
186 DA2 = (1 << (2 + 8)),
187 DA1 = (1 << (1 + 8)),
188 DA0 = (1 << (0 + 8)),
191 /* ADMA Command Parameter Block
192 The first 5 SG segments are stored inside the Command Parameter Block itself.
193 If there are more than 5 segments the remainder are stored in a separate
194 memory area indicated by next_aprd. */
195 struct nv_adma_cpb {
196 u8 resp_flags; /* 0 */
197 u8 reserved1; /* 1 */
198 u8 ctl_flags; /* 2 */
199 /* len is length of taskfile in 64 bit words */
200 u8 len; /* 3 */
201 u8 tag; /* 4 */
202 u8 next_cpb_idx; /* 5 */
203 __le16 reserved2; /* 6-7 */
204 __le16 tf[12]; /* 8-31 */
205 struct nv_adma_prd aprd[5]; /* 32-111 */
206 __le64 next_aprd; /* 112-119 */
207 __le64 reserved3; /* 120-127 */
211 struct nv_adma_port_priv {
212 struct nv_adma_cpb *cpb;
213 dma_addr_t cpb_dma;
214 struct nv_adma_prd *aprd;
215 dma_addr_t aprd_dma;
216 void __iomem * ctl_block;
217 void __iomem * gen_block;
218 void __iomem * notifier_clear_block;
219 u8 flags;
222 struct nv_host_priv {
223 unsigned long type;
226 #define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & ( 1 << (19 + (12 * (PORT)))))
228 static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
229 static void nv_remove_one (struct pci_dev *pdev);
230 static int nv_pci_device_resume(struct pci_dev *pdev);
231 static void nv_ck804_host_stop(struct ata_host *host);
232 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
233 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
234 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
235 static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg);
236 static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
238 static void nv_nf2_freeze(struct ata_port *ap);
239 static void nv_nf2_thaw(struct ata_port *ap);
240 static void nv_ck804_freeze(struct ata_port *ap);
241 static void nv_ck804_thaw(struct ata_port *ap);
242 static void nv_error_handler(struct ata_port *ap);
243 static int nv_adma_slave_config(struct scsi_device *sdev);
244 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
245 static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
246 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
247 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
248 static void nv_adma_irq_clear(struct ata_port *ap);
249 static int nv_adma_port_start(struct ata_port *ap);
250 static void nv_adma_port_stop(struct ata_port *ap);
251 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
252 static int nv_adma_port_resume(struct ata_port *ap);
253 static void nv_adma_error_handler(struct ata_port *ap);
254 static void nv_adma_host_stop(struct ata_host *host);
255 static void nv_adma_bmdma_setup(struct ata_queued_cmd *qc);
256 static void nv_adma_bmdma_start(struct ata_queued_cmd *qc);
257 static void nv_adma_bmdma_stop(struct ata_queued_cmd *qc);
258 static u8 nv_adma_bmdma_status(struct ata_port *ap);
260 enum nv_host_type
262 GENERIC,
263 NFORCE2,
264 NFORCE3 = NFORCE2, /* NF2 == NF3 as far as sata_nv is concerned */
265 CK804,
266 ADMA
269 static const struct pci_device_id nv_pci_tbl[] = {
270 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
271 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
272 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
273 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
274 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
275 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
276 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
277 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), GENERIC },
278 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), GENERIC },
279 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), GENERIC },
280 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), GENERIC },
281 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
282 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
283 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
284 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
285 PCI_ANY_ID, PCI_ANY_ID,
286 PCI_CLASS_STORAGE_IDE<<8, 0xffff00, GENERIC },
287 { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
288 PCI_ANY_ID, PCI_ANY_ID,
289 PCI_CLASS_STORAGE_RAID<<8, 0xffff00, GENERIC },
291 { } /* terminate list */
294 static struct pci_driver nv_pci_driver = {
295 .name = DRV_NAME,
296 .id_table = nv_pci_tbl,
297 .probe = nv_init_one,
298 .suspend = ata_pci_device_suspend,
299 .resume = nv_pci_device_resume,
300 .remove = nv_remove_one,
303 static struct scsi_host_template nv_sht = {
304 .module = THIS_MODULE,
305 .name = DRV_NAME,
306 .ioctl = ata_scsi_ioctl,
307 .queuecommand = ata_scsi_queuecmd,
308 .can_queue = ATA_DEF_QUEUE,
309 .this_id = ATA_SHT_THIS_ID,
310 .sg_tablesize = LIBATA_MAX_PRD,
311 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
312 .emulated = ATA_SHT_EMULATED,
313 .use_clustering = ATA_SHT_USE_CLUSTERING,
314 .proc_name = DRV_NAME,
315 .dma_boundary = ATA_DMA_BOUNDARY,
316 .slave_configure = ata_scsi_slave_config,
317 .slave_destroy = ata_scsi_slave_destroy,
318 .bios_param = ata_std_bios_param,
319 .suspend = ata_scsi_device_suspend,
320 .resume = ata_scsi_device_resume,
323 static struct scsi_host_template nv_adma_sht = {
324 .module = THIS_MODULE,
325 .name = DRV_NAME,
326 .ioctl = ata_scsi_ioctl,
327 .queuecommand = ata_scsi_queuecmd,
328 .can_queue = NV_ADMA_MAX_CPBS,
329 .this_id = ATA_SHT_THIS_ID,
330 .sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN,
331 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
332 .emulated = ATA_SHT_EMULATED,
333 .use_clustering = ATA_SHT_USE_CLUSTERING,
334 .proc_name = DRV_NAME,
335 .dma_boundary = NV_ADMA_DMA_BOUNDARY,
336 .slave_configure = nv_adma_slave_config,
337 .slave_destroy = ata_scsi_slave_destroy,
338 .bios_param = ata_std_bios_param,
339 .suspend = ata_scsi_device_suspend,
340 .resume = ata_scsi_device_resume,
343 static const struct ata_port_operations nv_generic_ops = {
344 .port_disable = ata_port_disable,
345 .tf_load = ata_tf_load,
346 .tf_read = ata_tf_read,
347 .exec_command = ata_exec_command,
348 .check_status = ata_check_status,
349 .dev_select = ata_std_dev_select,
350 .bmdma_setup = ata_bmdma_setup,
351 .bmdma_start = ata_bmdma_start,
352 .bmdma_stop = ata_bmdma_stop,
353 .bmdma_status = ata_bmdma_status,
354 .qc_prep = ata_qc_prep,
355 .qc_issue = ata_qc_issue_prot,
356 .freeze = ata_bmdma_freeze,
357 .thaw = ata_bmdma_thaw,
358 .error_handler = nv_error_handler,
359 .post_internal_cmd = ata_bmdma_post_internal_cmd,
360 .data_xfer = ata_pio_data_xfer,
361 .irq_handler = nv_generic_interrupt,
362 .irq_clear = ata_bmdma_irq_clear,
363 .scr_read = nv_scr_read,
364 .scr_write = nv_scr_write,
365 .port_start = ata_port_start,
368 static const struct ata_port_operations nv_nf2_ops = {
369 .port_disable = ata_port_disable,
370 .tf_load = ata_tf_load,
371 .tf_read = ata_tf_read,
372 .exec_command = ata_exec_command,
373 .check_status = ata_check_status,
374 .dev_select = ata_std_dev_select,
375 .bmdma_setup = ata_bmdma_setup,
376 .bmdma_start = ata_bmdma_start,
377 .bmdma_stop = ata_bmdma_stop,
378 .bmdma_status = ata_bmdma_status,
379 .qc_prep = ata_qc_prep,
380 .qc_issue = ata_qc_issue_prot,
381 .freeze = nv_nf2_freeze,
382 .thaw = nv_nf2_thaw,
383 .error_handler = nv_error_handler,
384 .post_internal_cmd = ata_bmdma_post_internal_cmd,
385 .data_xfer = ata_pio_data_xfer,
386 .irq_handler = nv_nf2_interrupt,
387 .irq_clear = ata_bmdma_irq_clear,
388 .scr_read = nv_scr_read,
389 .scr_write = nv_scr_write,
390 .port_start = ata_port_start,
393 static const struct ata_port_operations nv_ck804_ops = {
394 .port_disable = ata_port_disable,
395 .tf_load = ata_tf_load,
396 .tf_read = ata_tf_read,
397 .exec_command = ata_exec_command,
398 .check_status = ata_check_status,
399 .dev_select = ata_std_dev_select,
400 .bmdma_setup = ata_bmdma_setup,
401 .bmdma_start = ata_bmdma_start,
402 .bmdma_stop = ata_bmdma_stop,
403 .bmdma_status = ata_bmdma_status,
404 .qc_prep = ata_qc_prep,
405 .qc_issue = ata_qc_issue_prot,
406 .freeze = nv_ck804_freeze,
407 .thaw = nv_ck804_thaw,
408 .error_handler = nv_error_handler,
409 .post_internal_cmd = ata_bmdma_post_internal_cmd,
410 .data_xfer = ata_pio_data_xfer,
411 .irq_handler = nv_ck804_interrupt,
412 .irq_clear = ata_bmdma_irq_clear,
413 .scr_read = nv_scr_read,
414 .scr_write = nv_scr_write,
415 .port_start = ata_port_start,
416 .host_stop = nv_ck804_host_stop,
419 static const struct ata_port_operations nv_adma_ops = {
420 .port_disable = ata_port_disable,
421 .tf_load = ata_tf_load,
422 .tf_read = ata_tf_read,
423 .check_atapi_dma = nv_adma_check_atapi_dma,
424 .exec_command = ata_exec_command,
425 .check_status = ata_check_status,
426 .dev_select = ata_std_dev_select,
427 .bmdma_setup = nv_adma_bmdma_setup,
428 .bmdma_start = nv_adma_bmdma_start,
429 .bmdma_stop = nv_adma_bmdma_stop,
430 .bmdma_status = nv_adma_bmdma_status,
431 .qc_prep = nv_adma_qc_prep,
432 .qc_issue = nv_adma_qc_issue,
433 .freeze = nv_ck804_freeze,
434 .thaw = nv_ck804_thaw,
435 .error_handler = nv_adma_error_handler,
436 .post_internal_cmd = nv_adma_bmdma_stop,
437 .data_xfer = ata_mmio_data_xfer,
438 .irq_handler = nv_adma_interrupt,
439 .irq_clear = nv_adma_irq_clear,
440 .scr_read = nv_scr_read,
441 .scr_write = nv_scr_write,
442 .port_start = nv_adma_port_start,
443 .port_stop = nv_adma_port_stop,
444 .port_suspend = nv_adma_port_suspend,
445 .port_resume = nv_adma_port_resume,
446 .host_stop = nv_adma_host_stop,
449 static struct ata_port_info nv_port_info[] = {
450 /* generic */
452 .sht = &nv_sht,
453 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
454 ATA_FLAG_HRST_TO_RESUME,
455 .pio_mask = NV_PIO_MASK,
456 .mwdma_mask = NV_MWDMA_MASK,
457 .udma_mask = NV_UDMA_MASK,
458 .port_ops = &nv_generic_ops,
460 /* nforce2/3 */
462 .sht = &nv_sht,
463 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
464 ATA_FLAG_HRST_TO_RESUME,
465 .pio_mask = NV_PIO_MASK,
466 .mwdma_mask = NV_MWDMA_MASK,
467 .udma_mask = NV_UDMA_MASK,
468 .port_ops = &nv_nf2_ops,
470 /* ck804 */
472 .sht = &nv_sht,
473 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
474 ATA_FLAG_HRST_TO_RESUME,
475 .pio_mask = NV_PIO_MASK,
476 .mwdma_mask = NV_MWDMA_MASK,
477 .udma_mask = NV_UDMA_MASK,
478 .port_ops = &nv_ck804_ops,
480 /* ADMA */
482 .sht = &nv_adma_sht,
483 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
484 ATA_FLAG_HRST_TO_RESUME |
485 ATA_FLAG_MMIO | ATA_FLAG_NCQ,
486 .pio_mask = NV_PIO_MASK,
487 .mwdma_mask = NV_MWDMA_MASK,
488 .udma_mask = NV_UDMA_MASK,
489 .port_ops = &nv_adma_ops,
493 MODULE_AUTHOR("NVIDIA");
494 MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
495 MODULE_LICENSE("GPL");
496 MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
497 MODULE_VERSION(DRV_VERSION);
499 static int adma_enabled = 1;
501 static void nv_adma_register_mode(struct ata_port *ap)
503 struct nv_adma_port_priv *pp = ap->private_data;
504 void __iomem *mmio = pp->ctl_block;
505 u16 tmp;
507 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
508 return;
510 tmp = readw(mmio + NV_ADMA_CTL);
511 writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
513 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
516 static void nv_adma_mode(struct ata_port *ap)
518 struct nv_adma_port_priv *pp = ap->private_data;
519 void __iomem *mmio = pp->ctl_block;
520 u16 tmp;
522 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
523 return;
525 WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
527 tmp = readw(mmio + NV_ADMA_CTL);
528 writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
530 pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
533 static int nv_adma_slave_config(struct scsi_device *sdev)
535 struct ata_port *ap = ata_shost_to_port(sdev->host);
536 struct nv_adma_port_priv *pp = ap->private_data;
537 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
538 u64 bounce_limit;
539 unsigned long segment_boundary;
540 unsigned short sg_tablesize;
541 int rc;
542 int adma_enable;
543 u32 current_reg, new_reg, config_mask;
545 rc = ata_scsi_slave_config(sdev);
547 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
548 /* Not a proper libata device, ignore */
549 return rc;
551 if (ap->device[sdev->id].class == ATA_DEV_ATAPI) {
553 * NVIDIA reports that ADMA mode does not support ATAPI commands.
554 * Therefore ATAPI commands are sent through the legacy interface.
555 * However, the legacy interface only supports 32-bit DMA.
556 * Restrict DMA parameters as required by the legacy interface
557 * when an ATAPI device is connected.
559 bounce_limit = ATA_DMA_MASK;
560 segment_boundary = ATA_DMA_BOUNDARY;
561 /* Subtract 1 since an extra entry may be needed for padding, see
562 libata-scsi.c */
563 sg_tablesize = LIBATA_MAX_PRD - 1;
565 /* Since the legacy DMA engine is in use, we need to disable ADMA
566 on the port. */
567 adma_enable = 0;
568 nv_adma_register_mode(ap);
570 else {
571 bounce_limit = *ap->dev->dma_mask;
572 segment_boundary = NV_ADMA_DMA_BOUNDARY;
573 sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
574 adma_enable = 1;
577 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
579 if(ap->port_no == 1)
580 config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
581 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
582 else
583 config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
584 NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
586 if(adma_enable) {
587 new_reg = current_reg | config_mask;
588 pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
590 else {
591 new_reg = current_reg & ~config_mask;
592 pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
595 if(current_reg != new_reg)
596 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
598 blk_queue_bounce_limit(sdev->request_queue, bounce_limit);
599 blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
600 blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize);
601 ata_port_printk(ap, KERN_INFO,
602 "bounce limit 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
603 (unsigned long long)bounce_limit, segment_boundary, sg_tablesize);
604 return rc;
607 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
609 struct nv_adma_port_priv *pp = qc->ap->private_data;
610 return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
613 static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
615 unsigned int idx = 0;
617 cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device | WNB);
619 if ((tf->flags & ATA_TFLAG_LBA48) == 0) {
620 cpb[idx++] = cpu_to_le16(IGN);
621 cpb[idx++] = cpu_to_le16(IGN);
622 cpb[idx++] = cpu_to_le16(IGN);
623 cpb[idx++] = cpu_to_le16(IGN);
624 cpb[idx++] = cpu_to_le16(IGN);
626 else {
627 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature);
628 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
629 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->hob_lbal);
630 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->hob_lbam);
631 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->hob_lbah);
633 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature);
634 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->nsect);
635 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->lbal);
636 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->lbam);
637 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah);
639 cpb[idx++] = cpu_to_le16((ATA_REG_CMD << 8) | tf->command | CMDEND);
641 return idx;
644 static void nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
646 struct nv_adma_port_priv *pp = ap->private_data;
647 int complete = 0, have_err = 0;
648 u8 flags = pp->cpb[cpb_num].resp_flags;
650 VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
652 if (flags & NV_CPB_RESP_DONE) {
653 VPRINTK("CPB flags done, flags=0x%x\n", flags);
654 complete = 1;
656 if (flags & NV_CPB_RESP_ATA_ERR) {
657 ata_port_printk(ap, KERN_ERR, "CPB flags ATA err, flags=0x%x\n", flags);
658 have_err = 1;
659 complete = 1;
661 if (flags & NV_CPB_RESP_CMD_ERR) {
662 ata_port_printk(ap, KERN_ERR, "CPB flags CMD err, flags=0x%x\n", flags);
663 have_err = 1;
664 complete = 1;
666 if (flags & NV_CPB_RESP_CPB_ERR) {
667 ata_port_printk(ap, KERN_ERR, "CPB flags CPB err, flags=0x%x\n", flags);
668 have_err = 1;
669 complete = 1;
671 if(complete || force_err)
673 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num);
674 if(likely(qc)) {
675 u8 ata_status = 0;
676 /* Only use the ATA port status for non-NCQ commands.
677 For NCQ commands the current status may have nothing to do with
678 the command just completed. */
679 if(qc->tf.protocol != ATA_PROT_NCQ)
680 ata_status = readb(pp->ctl_block + (ATA_REG_STATUS * 4));
682 if(have_err || force_err)
683 ata_status |= ATA_ERR;
685 qc->err_mask |= ac_err_mask(ata_status);
686 DPRINTK("Completing qc from tag %d with err_mask %u\n",cpb_num,
687 qc->err_mask);
688 ata_qc_complete(qc);
693 static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
695 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->active_tag);
697 /* freeze if hotplugged */
698 if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
699 ata_port_freeze(ap);
700 return 1;
703 /* bail out if not our interrupt */
704 if (!(irq_stat & NV_INT_DEV))
705 return 0;
707 /* DEV interrupt w/ no active qc? */
708 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
709 ata_check_status(ap);
710 return 1;
713 /* handle interrupt */
714 return ata_host_intr(ap, qc);
717 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
719 struct ata_host *host = dev_instance;
720 int i, handled = 0;
721 u32 notifier_clears[2];
723 spin_lock(&host->lock);
725 for (i = 0; i < host->n_ports; i++) {
726 struct ata_port *ap = host->ports[i];
727 notifier_clears[i] = 0;
729 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
730 struct nv_adma_port_priv *pp = ap->private_data;
731 void __iomem *mmio = pp->ctl_block;
732 u16 status;
733 u32 gen_ctl;
734 int have_global_err = 0;
735 u32 notifier, notifier_error;
737 /* if in ATA register mode, use standard ata interrupt handler */
738 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
739 u8 irq_stat = readb(host->mmio_base + NV_INT_STATUS_CK804)
740 >> (NV_INT_PORT_SHIFT * i);
741 if(ata_tag_valid(ap->active_tag))
742 /** NV_INT_DEV indication seems unreliable at times
743 at least in ADMA mode. Force it on always when a
744 command is active, to prevent losing interrupts. */
745 irq_stat |= NV_INT_DEV;
746 handled += nv_host_intr(ap, irq_stat);
747 continue;
750 notifier = readl(mmio + NV_ADMA_NOTIFIER);
751 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
752 notifier_clears[i] = notifier | notifier_error;
754 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
756 if( !NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
757 !notifier_error)
758 /* Nothing to do */
759 continue;
761 status = readw(mmio + NV_ADMA_STAT);
763 /* Clear status. Ensure the controller sees the clearing before we start
764 looking at any of the CPB statuses, so that any CPB completions after
765 this point in the handler will raise another interrupt. */
766 writew(status, mmio + NV_ADMA_STAT);
767 readw(mmio + NV_ADMA_STAT); /* flush posted write */
768 rmb();
770 /* freeze if hotplugged */
771 if (unlikely(status & (NV_ADMA_STAT_HOTPLUG | NV_ADMA_STAT_HOTUNPLUG))) {
772 ata_port_printk(ap, KERN_NOTICE, "Hotplug event, freezing\n");
773 ata_port_freeze(ap);
774 handled++;
775 continue;
778 if (status & NV_ADMA_STAT_TIMEOUT) {
779 ata_port_printk(ap, KERN_ERR, "timeout, stat=0x%x\n", status);
780 have_global_err = 1;
782 if (status & NV_ADMA_STAT_CPBERR) {
783 ata_port_printk(ap, KERN_ERR, "CPB error, stat=0x%x\n", status);
784 have_global_err = 1;
786 if ((status & NV_ADMA_STAT_DONE) || have_global_err) {
787 /** Check CPBs for completed commands */
789 if(ata_tag_valid(ap->active_tag))
790 /* Non-NCQ command */
791 nv_adma_check_cpb(ap, ap->active_tag, have_global_err ||
792 (notifier_error & (1 << ap->active_tag)));
793 else {
794 int pos;
795 u32 active = ap->sactive;
796 while( (pos = ffs(active)) ) {
797 pos--;
798 nv_adma_check_cpb(ap, pos, have_global_err ||
799 (notifier_error & (1 << pos)) );
800 active &= ~(1 << pos );
805 handled++; /* irq handled if we got here */
809 if(notifier_clears[0] || notifier_clears[1]) {
810 /* Note: Both notifier clear registers must be written
811 if either is set, even if one is zero, according to NVIDIA. */
812 struct nv_adma_port_priv *pp = host->ports[0]->private_data;
813 writel(notifier_clears[0], pp->notifier_clear_block);
814 pp = host->ports[1]->private_data;
815 writel(notifier_clears[1], pp->notifier_clear_block);
818 spin_unlock(&host->lock);
820 return IRQ_RETVAL(handled);
823 static void nv_adma_irq_clear(struct ata_port *ap)
825 struct nv_adma_port_priv *pp = ap->private_data;
826 void __iomem *mmio = pp->ctl_block;
827 u16 status = readw(mmio + NV_ADMA_STAT);
828 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
829 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
830 unsigned long dma_stat_addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS;
832 /* clear ADMA status */
833 writew(status, mmio + NV_ADMA_STAT);
834 writel(notifier | notifier_error,
835 pp->notifier_clear_block);
837 /** clear legacy status */
838 outb(inb(dma_stat_addr), dma_stat_addr);
841 static void nv_adma_bmdma_setup(struct ata_queued_cmd *qc)
843 struct ata_port *ap = qc->ap;
844 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
845 struct nv_adma_port_priv *pp = ap->private_data;
846 u8 dmactl;
848 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
849 WARN_ON(1);
850 return;
853 /* load PRD table addr. */
854 outl(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
856 /* specify data direction, triple-check start bit is clear */
857 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
858 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
859 if (!rw)
860 dmactl |= ATA_DMA_WR;
862 outb(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
864 /* issue r/w command */
865 ata_exec_command(ap, &qc->tf);
868 static void nv_adma_bmdma_start(struct ata_queued_cmd *qc)
870 struct ata_port *ap = qc->ap;
871 struct nv_adma_port_priv *pp = ap->private_data;
872 u8 dmactl;
874 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
875 WARN_ON(1);
876 return;
879 /* start host DMA transaction */
880 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
881 outb(dmactl | ATA_DMA_START,
882 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
885 static void nv_adma_bmdma_stop(struct ata_queued_cmd *qc)
887 struct ata_port *ap = qc->ap;
888 struct nv_adma_port_priv *pp = ap->private_data;
890 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
891 return;
893 /* clear start/stop bit */
894 outb(inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START,
895 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
897 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
898 ata_altstatus(ap); /* dummy read */
901 static u8 nv_adma_bmdma_status(struct ata_port *ap)
903 struct nv_adma_port_priv *pp = ap->private_data;
905 WARN_ON(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE));
907 return inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
910 static int nv_adma_port_start(struct ata_port *ap)
912 struct device *dev = ap->host->dev;
913 struct nv_adma_port_priv *pp;
914 int rc;
915 void *mem;
916 dma_addr_t mem_dma;
917 void __iomem *mmio;
918 u16 tmp;
920 VPRINTK("ENTER\n");
922 rc = ata_port_start(ap);
923 if (rc)
924 return rc;
926 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
927 if (!pp)
928 return -ENOMEM;
930 mmio = ap->host->mmio_base + NV_ADMA_PORT +
931 ap->port_no * NV_ADMA_PORT_SIZE;
932 pp->ctl_block = mmio;
933 pp->gen_block = ap->host->mmio_base + NV_ADMA_GEN;
934 pp->notifier_clear_block = pp->gen_block +
935 NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
937 mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
938 &mem_dma, GFP_KERNEL);
939 if (!mem)
940 return -ENOMEM;
941 memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
944 * First item in chunk of DMA memory:
945 * 128-byte command parameter block (CPB)
946 * one for each command tag
948 pp->cpb = mem;
949 pp->cpb_dma = mem_dma;
951 writel(mem_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
952 writel((mem_dma >> 16 ) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
954 mem += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
955 mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
958 * Second item: block of ADMA_SGTBL_LEN s/g entries
960 pp->aprd = mem;
961 pp->aprd_dma = mem_dma;
963 ap->private_data = pp;
965 /* clear any outstanding interrupt conditions */
966 writew(0xffff, mmio + NV_ADMA_STAT);
968 /* initialize port variables */
969 pp->flags = NV_ADMA_PORT_REGISTER_MODE;
971 /* clear CPB fetch count */
972 writew(0, mmio + NV_ADMA_CPB_COUNT);
974 /* clear GO for register mode, enable interrupt */
975 tmp = readw(mmio + NV_ADMA_CTL);
976 writew( (tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN, mmio + NV_ADMA_CTL);
978 tmp = readw(mmio + NV_ADMA_CTL);
979 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
980 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
981 udelay(1);
982 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
983 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
985 return 0;
988 static void nv_adma_port_stop(struct ata_port *ap)
990 struct nv_adma_port_priv *pp = ap->private_data;
991 void __iomem *mmio = pp->ctl_block;
993 VPRINTK("ENTER\n");
994 writew(0, mmio + NV_ADMA_CTL);
997 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
999 struct nv_adma_port_priv *pp = ap->private_data;
1000 void __iomem *mmio = pp->ctl_block;
1002 /* Go to register mode - clears GO */
1003 nv_adma_register_mode(ap);
1005 /* clear CPB fetch count */
1006 writew(0, mmio + NV_ADMA_CPB_COUNT);
1008 /* disable interrupt, shut down port */
1009 writew(0, mmio + NV_ADMA_CTL);
1011 return 0;
1014 static int nv_adma_port_resume(struct ata_port *ap)
1016 struct nv_adma_port_priv *pp = ap->private_data;
1017 void __iomem *mmio = pp->ctl_block;
1018 u16 tmp;
1020 /* set CPB block location */
1021 writel(pp->cpb_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
1022 writel((pp->cpb_dma >> 16 ) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
1024 /* clear any outstanding interrupt conditions */
1025 writew(0xffff, mmio + NV_ADMA_STAT);
1027 /* initialize port variables */
1028 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1030 /* clear CPB fetch count */
1031 writew(0, mmio + NV_ADMA_CPB_COUNT);
1033 /* clear GO for register mode, enable interrupt */
1034 tmp = readw(mmio + NV_ADMA_CTL);
1035 writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN, mmio + NV_ADMA_CTL);
1037 tmp = readw(mmio + NV_ADMA_CTL);
1038 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1039 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1040 udelay(1);
1041 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1042 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1044 return 0;
1047 static void nv_adma_setup_port(struct ata_probe_ent *probe_ent, unsigned int port)
1049 void __iomem *mmio = probe_ent->mmio_base;
1050 struct ata_ioports *ioport = &probe_ent->port[port];
1052 VPRINTK("ENTER\n");
1054 mmio += NV_ADMA_PORT + port * NV_ADMA_PORT_SIZE;
1056 ioport->cmd_addr = (unsigned long) mmio;
1057 ioport->data_addr = (unsigned long) mmio + (ATA_REG_DATA * 4);
1058 ioport->error_addr =
1059 ioport->feature_addr = (unsigned long) mmio + (ATA_REG_ERR * 4);
1060 ioport->nsect_addr = (unsigned long) mmio + (ATA_REG_NSECT * 4);
1061 ioport->lbal_addr = (unsigned long) mmio + (ATA_REG_LBAL * 4);
1062 ioport->lbam_addr = (unsigned long) mmio + (ATA_REG_LBAM * 4);
1063 ioport->lbah_addr = (unsigned long) mmio + (ATA_REG_LBAH * 4);
1064 ioport->device_addr = (unsigned long) mmio + (ATA_REG_DEVICE * 4);
1065 ioport->status_addr =
1066 ioport->command_addr = (unsigned long) mmio + (ATA_REG_STATUS * 4);
1067 ioport->altstatus_addr =
1068 ioport->ctl_addr = (unsigned long) mmio + 0x20;
1071 static int nv_adma_host_init(struct ata_probe_ent *probe_ent)
1073 struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
1074 unsigned int i;
1075 u32 tmp32;
1077 VPRINTK("ENTER\n");
1079 /* enable ADMA on the ports */
1080 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1081 tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1082 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1083 NV_MCP_SATA_CFG_20_PORT1_EN |
1084 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1086 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1088 for (i = 0; i < probe_ent->n_ports; i++)
1089 nv_adma_setup_port(probe_ent, i);
1091 return 0;
1094 static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1095 struct scatterlist *sg,
1096 int idx,
1097 struct nv_adma_prd *aprd)
1099 u8 flags;
1101 memset(aprd, 0, sizeof(struct nv_adma_prd));
1103 flags = 0;
1104 if (qc->tf.flags & ATA_TFLAG_WRITE)
1105 flags |= NV_APRD_WRITE;
1106 if (idx == qc->n_elem - 1)
1107 flags |= NV_APRD_END;
1108 else if (idx != 4)
1109 flags |= NV_APRD_CONT;
1111 aprd->addr = cpu_to_le64(((u64)sg_dma_address(sg)));
1112 aprd->len = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
1113 aprd->flags = flags;
1116 static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1118 struct nv_adma_port_priv *pp = qc->ap->private_data;
1119 unsigned int idx;
1120 struct nv_adma_prd *aprd;
1121 struct scatterlist *sg;
1123 VPRINTK("ENTER\n");
1125 idx = 0;
1127 ata_for_each_sg(sg, qc) {
1128 aprd = (idx < 5) ? &cpb->aprd[idx] : &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (idx-5)];
1129 nv_adma_fill_aprd(qc, sg, idx, aprd);
1130 idx++;
1132 if (idx > 5)
1133 cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
1136 static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1138 struct nv_adma_port_priv *pp = qc->ap->private_data;
1139 struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];
1140 u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
1141 NV_CPB_CTL_APRD_VALID |
1142 NV_CPB_CTL_IEN;
1144 if (!(qc->flags & ATA_QCFLAG_DMAMAP) ||
1145 (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
1146 nv_adma_register_mode(qc->ap);
1147 ata_qc_prep(qc);
1148 return;
1151 memset(cpb, 0, sizeof(struct nv_adma_cpb));
1153 cpb->len = 3;
1154 cpb->tag = qc->tag;
1155 cpb->next_cpb_idx = 0;
1157 /* turn on NCQ flags for NCQ commands */
1158 if (qc->tf.protocol == ATA_PROT_NCQ)
1159 ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1161 VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1163 nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1165 nv_adma_fill_sg(qc, cpb);
1167 /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID until we are
1168 finished filling in all of the contents */
1169 wmb();
1170 cpb->ctl_flags = ctl_flags;
1173 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1175 struct nv_adma_port_priv *pp = qc->ap->private_data;
1176 void __iomem *mmio = pp->ctl_block;
1178 VPRINTK("ENTER\n");
1180 if (!(qc->flags & ATA_QCFLAG_DMAMAP) ||
1181 (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
1182 /* use ATA register mode */
1183 VPRINTK("no dmamap or ATAPI, using ATA register mode: 0x%lx\n", qc->flags);
1184 nv_adma_register_mode(qc->ap);
1185 return ata_qc_issue_prot(qc);
1186 } else
1187 nv_adma_mode(qc->ap);
1189 /* write append register, command tag in lower 8 bits
1190 and (number of cpbs to append -1) in top 8 bits */
1191 wmb();
1192 writew(qc->tag, mmio + NV_ADMA_APPEND);
1194 DPRINTK("Issued tag %u\n",qc->tag);
1196 return 0;
1199 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1201 struct ata_host *host = dev_instance;
1202 unsigned int i;
1203 unsigned int handled = 0;
1204 unsigned long flags;
1206 spin_lock_irqsave(&host->lock, flags);
1208 for (i = 0; i < host->n_ports; i++) {
1209 struct ata_port *ap;
1211 ap = host->ports[i];
1212 if (ap &&
1213 !(ap->flags & ATA_FLAG_DISABLED)) {
1214 struct ata_queued_cmd *qc;
1216 qc = ata_qc_from_tag(ap, ap->active_tag);
1217 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
1218 handled += ata_host_intr(ap, qc);
1219 else
1220 // No request pending? Clear interrupt status
1221 // anyway, in case there's one pending.
1222 ap->ops->check_status(ap);
1227 spin_unlock_irqrestore(&host->lock, flags);
1229 return IRQ_RETVAL(handled);
1232 static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
1234 int i, handled = 0;
1236 for (i = 0; i < host->n_ports; i++) {
1237 struct ata_port *ap = host->ports[i];
1239 if (ap && !(ap->flags & ATA_FLAG_DISABLED))
1240 handled += nv_host_intr(ap, irq_stat);
1242 irq_stat >>= NV_INT_PORT_SHIFT;
1245 return IRQ_RETVAL(handled);
1248 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
1250 struct ata_host *host = dev_instance;
1251 u8 irq_stat;
1252 irqreturn_t ret;
1254 spin_lock(&host->lock);
1255 irq_stat = inb(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
1256 ret = nv_do_interrupt(host, irq_stat);
1257 spin_unlock(&host->lock);
1259 return ret;
1262 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
1264 struct ata_host *host = dev_instance;
1265 u8 irq_stat;
1266 irqreturn_t ret;
1268 spin_lock(&host->lock);
1269 irq_stat = readb(host->mmio_base + NV_INT_STATUS_CK804);
1270 ret = nv_do_interrupt(host, irq_stat);
1271 spin_unlock(&host->lock);
1273 return ret;
1276 static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg)
1278 if (sc_reg > SCR_CONTROL)
1279 return 0xffffffffU;
1281 return ioread32((void __iomem *)ap->ioaddr.scr_addr + (sc_reg * 4));
1284 static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
1286 if (sc_reg > SCR_CONTROL)
1287 return;
1289 iowrite32(val, (void __iomem *)ap->ioaddr.scr_addr + (sc_reg * 4));
1292 static void nv_nf2_freeze(struct ata_port *ap)
1294 unsigned long scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1295 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1296 u8 mask;
1298 mask = inb(scr_addr + NV_INT_ENABLE);
1299 mask &= ~(NV_INT_ALL << shift);
1300 outb(mask, scr_addr + NV_INT_ENABLE);
1303 static void nv_nf2_thaw(struct ata_port *ap)
1305 unsigned long scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1306 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1307 u8 mask;
1309 outb(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
1311 mask = inb(scr_addr + NV_INT_ENABLE);
1312 mask |= (NV_INT_MASK << shift);
1313 outb(mask, scr_addr + NV_INT_ENABLE);
1316 static void nv_ck804_freeze(struct ata_port *ap)
1318 void __iomem *mmio_base = ap->host->mmio_base;
1319 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1320 u8 mask;
1322 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1323 mask &= ~(NV_INT_ALL << shift);
1324 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1327 static void nv_ck804_thaw(struct ata_port *ap)
1329 void __iomem *mmio_base = ap->host->mmio_base;
1330 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1331 u8 mask;
1333 writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1335 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1336 mask |= (NV_INT_MASK << shift);
1337 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1340 static int nv_hardreset(struct ata_port *ap, unsigned int *class)
1342 unsigned int dummy;
1344 /* SATA hardreset fails to retrieve proper device signature on
1345 * some controllers. Don't classify on hardreset. For more
1346 * info, see http://bugme.osdl.org/show_bug.cgi?id=3352
1348 return sata_std_hardreset(ap, &dummy);
1351 static void nv_error_handler(struct ata_port *ap)
1353 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1354 nv_hardreset, ata_std_postreset);
1357 static void nv_adma_error_handler(struct ata_port *ap)
1359 struct nv_adma_port_priv *pp = ap->private_data;
1360 if(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
1361 void __iomem *mmio = pp->ctl_block;
1362 int i;
1363 u16 tmp;
1365 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1366 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1367 u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
1368 u32 status = readw(mmio + NV_ADMA_STAT);
1370 ata_port_printk(ap, KERN_ERR, "EH in ADMA mode, notifier 0x%X "
1371 "notifier_error 0x%X gen_ctl 0x%X status 0x%X\n",
1372 notifier, notifier_error, gen_ctl, status);
1374 for( i=0;i<NV_ADMA_MAX_CPBS;i++) {
1375 struct nv_adma_cpb *cpb = &pp->cpb[i];
1376 if( cpb->ctl_flags || cpb->resp_flags )
1377 ata_port_printk(ap, KERN_ERR,
1378 "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1379 i, cpb->ctl_flags, cpb->resp_flags);
1382 /* Push us back into port register mode for error handling. */
1383 nv_adma_register_mode(ap);
1385 ata_port_printk(ap, KERN_ERR, "Resetting port\n");
1387 /* Mark all of the CPBs as invalid to prevent them from being executed */
1388 for( i=0;i<NV_ADMA_MAX_CPBS;i++)
1389 pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1391 /* clear CPB fetch count */
1392 writew(0, mmio + NV_ADMA_CPB_COUNT);
1394 /* Reset channel */
1395 tmp = readw(mmio + NV_ADMA_CTL);
1396 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1397 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1398 udelay(1);
1399 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1400 readl( mmio + NV_ADMA_CTL ); /* flush posted write */
1403 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1404 nv_hardreset, ata_std_postreset);
1407 static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1409 static int printed_version = 0;
1410 struct ata_port_info *ppi[2];
1411 struct ata_probe_ent *probe_ent;
1412 struct nv_host_priv *hpriv;
1413 int rc;
1414 u32 bar;
1415 unsigned long base;
1416 unsigned long type = ent->driver_data;
1417 int mask_set = 0;
1419 // Make sure this is a SATA controller by counting the number of bars
1420 // (NVIDIA SATA controllers will always have six bars). Otherwise,
1421 // it's an IDE controller and we ignore it.
1422 for (bar=0; bar<6; bar++)
1423 if (pci_resource_start(pdev, bar) == 0)
1424 return -ENODEV;
1426 if (!printed_version++)
1427 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
1429 rc = pcim_enable_device(pdev);
1430 if (rc)
1431 return rc;
1433 rc = pci_request_regions(pdev, DRV_NAME);
1434 if (rc) {
1435 pcim_pin_device(pdev);
1436 return rc;
1439 if(type >= CK804 && adma_enabled) {
1440 dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n");
1441 type = ADMA;
1442 if(!pci_set_dma_mask(pdev, DMA_64BIT_MASK) &&
1443 !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))
1444 mask_set = 1;
1447 if(!mask_set) {
1448 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
1449 if (rc)
1450 return rc;
1451 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
1452 if (rc)
1453 return rc;
1456 rc = -ENOMEM;
1458 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
1459 if (!hpriv)
1460 return -ENOMEM;
1462 ppi[0] = ppi[1] = &nv_port_info[type];
1463 probe_ent = ata_pci_init_native_mode(pdev, ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
1464 if (!probe_ent)
1465 return -ENOMEM;
1467 probe_ent->mmio_base = pcim_iomap(pdev, 5, 0);
1468 if (!probe_ent->mmio_base)
1469 return -EIO;
1471 probe_ent->private_data = hpriv;
1472 hpriv->type = type;
1474 base = (unsigned long)probe_ent->mmio_base;
1476 probe_ent->port[0].scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
1477 probe_ent->port[1].scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
1479 /* enable SATA space for CK804 */
1480 if (type >= CK804) {
1481 u8 regval;
1483 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
1484 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1485 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1488 pci_set_master(pdev);
1490 if (type == ADMA) {
1491 rc = nv_adma_host_init(probe_ent);
1492 if (rc)
1493 return rc;
1496 rc = ata_device_add(probe_ent);
1497 if (rc != NV_PORTS)
1498 return -ENODEV;
1500 devm_kfree(&pdev->dev, probe_ent);
1501 return 0;
1504 static void nv_remove_one (struct pci_dev *pdev)
1506 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1507 struct nv_host_priv *hpriv = host->private_data;
1509 ata_pci_remove_one(pdev);
1510 kfree(hpriv);
1513 static int nv_pci_device_resume(struct pci_dev *pdev)
1515 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1516 struct nv_host_priv *hpriv = host->private_data;
1518 ata_pci_device_do_resume(pdev);
1520 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
1521 if(hpriv->type >= CK804) {
1522 u8 regval;
1524 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
1525 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1526 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1528 if(hpriv->type == ADMA) {
1529 u32 tmp32;
1530 struct nv_adma_port_priv *pp;
1531 /* enable/disable ADMA on the ports appropriately */
1532 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1534 pp = host->ports[0]->private_data;
1535 if(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1536 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
1537 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
1538 else
1539 tmp32 |= (NV_MCP_SATA_CFG_20_PORT0_EN |
1540 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
1541 pp = host->ports[1]->private_data;
1542 if(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1543 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
1544 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1545 else
1546 tmp32 |= (NV_MCP_SATA_CFG_20_PORT1_EN |
1547 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1549 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1553 ata_host_resume(host);
1555 return 0;
1558 static void nv_ck804_host_stop(struct ata_host *host)
1560 struct pci_dev *pdev = to_pci_dev(host->dev);
1561 u8 regval;
1563 /* disable SATA space for CK804 */
1564 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
1565 regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
1566 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
1569 static void nv_adma_host_stop(struct ata_host *host)
1571 struct pci_dev *pdev = to_pci_dev(host->dev);
1572 u32 tmp32;
1574 /* disable ADMA on the ports */
1575 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1576 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
1577 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1578 NV_MCP_SATA_CFG_20_PORT1_EN |
1579 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
1581 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1583 nv_ck804_host_stop(host);
1586 static int __init nv_init(void)
1588 return pci_register_driver(&nv_pci_driver);
1591 static void __exit nv_exit(void)
1593 pci_unregister_driver(&nv_pci_driver);
1596 module_init(nv_init);
1597 module_exit(nv_exit);
1598 module_param_named(adma, adma_enabled, bool, 0444);
1599 MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: true)");