x86: delay CPA self-test and repeat it
[wrt350n-kernel.git] / drivers / ata / sata_via.c
blob3ef072ff319d28b8514e9ade2fde843d1a23ad2f
1 /*
2 * sata_via.c - VIA Serial ATA controllers
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
30 * Hardware documentation available under NDA.
33 * To-do list:
34 * - VT6421 PATA support
38 #include <linux/kernel.h>
39 #include <linux/module.h>
40 #include <linux/pci.h>
41 #include <linux/init.h>
42 #include <linux/blkdev.h>
43 #include <linux/delay.h>
44 #include <linux/device.h>
45 #include <scsi/scsi_host.h>
46 #include <linux/libata.h>
48 #define DRV_NAME "sata_via"
49 #define DRV_VERSION "2.3"
51 enum board_ids_enum {
52 vt6420,
53 vt6421,
56 enum {
57 SATA_CHAN_ENAB = 0x40, /* SATA channel enable */
58 SATA_INT_GATE = 0x41, /* SATA interrupt gating */
59 SATA_NATIVE_MODE = 0x42, /* Native mode enable */
60 PATA_UDMA_TIMING = 0xB3, /* PATA timing for DMA/ cable detect */
61 PATA_PIO_TIMING = 0xAB, /* PATA timing register */
63 PORT0 = (1 << 1),
64 PORT1 = (1 << 0),
65 ALL_PORTS = PORT0 | PORT1,
67 NATIVE_MODE_ALL = (1 << 7) | (1 << 6) | (1 << 5) | (1 << 4),
69 SATA_EXT_PHY = (1 << 6), /* 0==use PATA, 1==ext phy */
72 static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
73 static int svia_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val);
74 static int svia_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val);
75 static void svia_noop_freeze(struct ata_port *ap);
76 static void vt6420_error_handler(struct ata_port *ap);
77 static int vt6421_pata_cable_detect(struct ata_port *ap);
78 static void vt6421_set_pio_mode(struct ata_port *ap, struct ata_device *adev);
79 static void vt6421_set_dma_mode(struct ata_port *ap, struct ata_device *adev);
81 static const struct pci_device_id svia_pci_tbl[] = {
82 { PCI_VDEVICE(VIA, 0x5337), vt6420 },
83 { PCI_VDEVICE(VIA, 0x0591), vt6420 },
84 { PCI_VDEVICE(VIA, 0x3149), vt6420 },
85 { PCI_VDEVICE(VIA, 0x3249), vt6421 },
86 { PCI_VDEVICE(VIA, 0x5287), vt6420 },
87 { PCI_VDEVICE(VIA, 0x5372), vt6420 },
88 { PCI_VDEVICE(VIA, 0x7372), vt6420 },
90 { } /* terminate list */
93 static struct pci_driver svia_pci_driver = {
94 .name = DRV_NAME,
95 .id_table = svia_pci_tbl,
96 .probe = svia_init_one,
97 #ifdef CONFIG_PM
98 .suspend = ata_pci_device_suspend,
99 .resume = ata_pci_device_resume,
100 #endif
101 .remove = ata_pci_remove_one,
104 static struct scsi_host_template svia_sht = {
105 .module = THIS_MODULE,
106 .name = DRV_NAME,
107 .ioctl = ata_scsi_ioctl,
108 .queuecommand = ata_scsi_queuecmd,
109 .can_queue = ATA_DEF_QUEUE,
110 .this_id = ATA_SHT_THIS_ID,
111 .sg_tablesize = LIBATA_MAX_PRD,
112 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
113 .emulated = ATA_SHT_EMULATED,
114 .use_clustering = ATA_SHT_USE_CLUSTERING,
115 .proc_name = DRV_NAME,
116 .dma_boundary = ATA_DMA_BOUNDARY,
117 .slave_configure = ata_scsi_slave_config,
118 .slave_destroy = ata_scsi_slave_destroy,
119 .bios_param = ata_std_bios_param,
122 static const struct ata_port_operations vt6420_sata_ops = {
123 .tf_load = ata_tf_load,
124 .tf_read = ata_tf_read,
125 .check_status = ata_check_status,
126 .exec_command = ata_exec_command,
127 .dev_select = ata_std_dev_select,
129 .bmdma_setup = ata_bmdma_setup,
130 .bmdma_start = ata_bmdma_start,
131 .bmdma_stop = ata_bmdma_stop,
132 .bmdma_status = ata_bmdma_status,
134 .qc_prep = ata_qc_prep,
135 .qc_issue = ata_qc_issue_prot,
136 .data_xfer = ata_data_xfer,
138 .freeze = svia_noop_freeze,
139 .thaw = ata_bmdma_thaw,
140 .error_handler = vt6420_error_handler,
141 .post_internal_cmd = ata_bmdma_post_internal_cmd,
143 .irq_clear = ata_bmdma_irq_clear,
144 .irq_on = ata_irq_on,
146 .port_start = ata_port_start,
149 static const struct ata_port_operations vt6421_pata_ops = {
150 .set_piomode = vt6421_set_pio_mode,
151 .set_dmamode = vt6421_set_dma_mode,
153 .tf_load = ata_tf_load,
154 .tf_read = ata_tf_read,
155 .check_status = ata_check_status,
156 .exec_command = ata_exec_command,
157 .dev_select = ata_std_dev_select,
159 .bmdma_setup = ata_bmdma_setup,
160 .bmdma_start = ata_bmdma_start,
161 .bmdma_stop = ata_bmdma_stop,
162 .bmdma_status = ata_bmdma_status,
164 .qc_prep = ata_qc_prep,
165 .qc_issue = ata_qc_issue_prot,
166 .data_xfer = ata_data_xfer,
168 .freeze = ata_bmdma_freeze,
169 .thaw = ata_bmdma_thaw,
170 .error_handler = ata_bmdma_error_handler,
171 .post_internal_cmd = ata_bmdma_post_internal_cmd,
172 .cable_detect = vt6421_pata_cable_detect,
174 .irq_clear = ata_bmdma_irq_clear,
175 .irq_on = ata_irq_on,
177 .port_start = ata_port_start,
180 static const struct ata_port_operations vt6421_sata_ops = {
181 .tf_load = ata_tf_load,
182 .tf_read = ata_tf_read,
183 .check_status = ata_check_status,
184 .exec_command = ata_exec_command,
185 .dev_select = ata_std_dev_select,
187 .bmdma_setup = ata_bmdma_setup,
188 .bmdma_start = ata_bmdma_start,
189 .bmdma_stop = ata_bmdma_stop,
190 .bmdma_status = ata_bmdma_status,
192 .qc_prep = ata_qc_prep,
193 .qc_issue = ata_qc_issue_prot,
194 .data_xfer = ata_data_xfer,
196 .freeze = ata_bmdma_freeze,
197 .thaw = ata_bmdma_thaw,
198 .error_handler = ata_bmdma_error_handler,
199 .post_internal_cmd = ata_bmdma_post_internal_cmd,
200 .cable_detect = ata_cable_sata,
202 .irq_clear = ata_bmdma_irq_clear,
203 .irq_on = ata_irq_on,
205 .scr_read = svia_scr_read,
206 .scr_write = svia_scr_write,
208 .port_start = ata_port_start,
211 static const struct ata_port_info vt6420_port_info = {
212 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
213 .pio_mask = 0x1f,
214 .mwdma_mask = 0x07,
215 .udma_mask = ATA_UDMA6,
216 .port_ops = &vt6420_sata_ops,
219 static struct ata_port_info vt6421_sport_info = {
220 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
221 .pio_mask = 0x1f,
222 .mwdma_mask = 0x07,
223 .udma_mask = ATA_UDMA6,
224 .port_ops = &vt6421_sata_ops,
227 static struct ata_port_info vt6421_pport_info = {
228 .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_NO_LEGACY,
229 .pio_mask = 0x1f,
230 .mwdma_mask = 0,
231 .udma_mask = ATA_UDMA6,
232 .port_ops = &vt6421_pata_ops,
235 MODULE_AUTHOR("Jeff Garzik");
236 MODULE_DESCRIPTION("SCSI low-level driver for VIA SATA controllers");
237 MODULE_LICENSE("GPL");
238 MODULE_DEVICE_TABLE(pci, svia_pci_tbl);
239 MODULE_VERSION(DRV_VERSION);
241 static int svia_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val)
243 if (sc_reg > SCR_CONTROL)
244 return -EINVAL;
245 *val = ioread32(ap->ioaddr.scr_addr + (4 * sc_reg));
246 return 0;
249 static int svia_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val)
251 if (sc_reg > SCR_CONTROL)
252 return -EINVAL;
253 iowrite32(val, ap->ioaddr.scr_addr + (4 * sc_reg));
254 return 0;
257 static void svia_noop_freeze(struct ata_port *ap)
259 /* Some VIA controllers choke if ATA_NIEN is manipulated in
260 * certain way. Leave it alone and just clear pending IRQ.
262 ata_chk_status(ap);
263 ata_bmdma_irq_clear(ap);
267 * vt6420_prereset - prereset for vt6420
268 * @link: target ATA link
269 * @deadline: deadline jiffies for the operation
271 * SCR registers on vt6420 are pieces of shit and may hang the
272 * whole machine completely if accessed with the wrong timing.
273 * To avoid such catastrophe, vt6420 doesn't provide generic SCR
274 * access operations, but uses SStatus and SControl only during
275 * boot probing in controlled way.
277 * As the old (pre EH update) probing code is proven to work, we
278 * strictly follow the access pattern.
280 * LOCKING:
281 * Kernel thread context (may sleep)
283 * RETURNS:
284 * 0 on success, -errno otherwise.
286 static int vt6420_prereset(struct ata_link *link, unsigned long deadline)
288 struct ata_port *ap = link->ap;
289 struct ata_eh_context *ehc = &ap->link.eh_context;
290 unsigned long timeout = jiffies + (HZ * 5);
291 u32 sstatus, scontrol;
292 int online;
294 /* don't do any SCR stuff if we're not loading */
295 if (!(ap->pflags & ATA_PFLAG_LOADING))
296 goto skip_scr;
298 /* Resume phy. This is the old SATA resume sequence */
299 svia_scr_write(ap, SCR_CONTROL, 0x300);
300 svia_scr_read(ap, SCR_CONTROL, &scontrol); /* flush */
302 /* wait for phy to become ready, if necessary */
303 do {
304 msleep(200);
305 svia_scr_read(ap, SCR_STATUS, &sstatus);
306 if ((sstatus & 0xf) != 1)
307 break;
308 } while (time_before(jiffies, timeout));
310 /* open code sata_print_link_status() */
311 svia_scr_read(ap, SCR_STATUS, &sstatus);
312 svia_scr_read(ap, SCR_CONTROL, &scontrol);
314 online = (sstatus & 0xf) == 0x3;
316 ata_port_printk(ap, KERN_INFO,
317 "SATA link %s 1.5 Gbps (SStatus %X SControl %X)\n",
318 online ? "up" : "down", sstatus, scontrol);
320 /* SStatus is read one more time */
321 svia_scr_read(ap, SCR_STATUS, &sstatus);
323 if (!online) {
324 /* tell EH to bail */
325 ehc->i.action &= ~ATA_EH_RESET_MASK;
326 return 0;
329 skip_scr:
330 /* wait for !BSY */
331 ata_wait_ready(ap, deadline);
333 return 0;
336 static void vt6420_error_handler(struct ata_port *ap)
338 return ata_bmdma_drive_eh(ap, vt6420_prereset, ata_std_softreset,
339 NULL, ata_std_postreset);
342 static int vt6421_pata_cable_detect(struct ata_port *ap)
344 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
345 u8 tmp;
347 pci_read_config_byte(pdev, PATA_UDMA_TIMING, &tmp);
348 if (tmp & 0x10)
349 return ATA_CBL_PATA40;
350 return ATA_CBL_PATA80;
353 static void vt6421_set_pio_mode(struct ata_port *ap, struct ata_device *adev)
355 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
356 static const u8 pio_bits[] = { 0xA8, 0x65, 0x65, 0x31, 0x20 };
357 pci_write_config_byte(pdev, PATA_PIO_TIMING, pio_bits[adev->pio_mode - XFER_PIO_0]);
360 static void vt6421_set_dma_mode(struct ata_port *ap, struct ata_device *adev)
362 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
363 static const u8 udma_bits[] = { 0xEE, 0xE8, 0xE6, 0xE4, 0xE2, 0xE1, 0xE0, 0xE0 };
364 pci_write_config_byte(pdev, PATA_UDMA_TIMING, udma_bits[adev->dma_mode - XFER_UDMA_0]);
367 static const unsigned int svia_bar_sizes[] = {
368 8, 4, 8, 4, 16, 256
371 static const unsigned int vt6421_bar_sizes[] = {
372 16, 16, 16, 16, 32, 128
375 static void __iomem *svia_scr_addr(void __iomem *addr, unsigned int port)
377 return addr + (port * 128);
380 static void __iomem *vt6421_scr_addr(void __iomem *addr, unsigned int port)
382 return addr + (port * 64);
385 static void vt6421_init_addrs(struct ata_port *ap)
387 void __iomem * const * iomap = ap->host->iomap;
388 void __iomem *reg_addr = iomap[ap->port_no];
389 void __iomem *bmdma_addr = iomap[4] + (ap->port_no * 8);
390 struct ata_ioports *ioaddr = &ap->ioaddr;
392 ioaddr->cmd_addr = reg_addr;
393 ioaddr->altstatus_addr =
394 ioaddr->ctl_addr = (void __iomem *)
395 ((unsigned long)(reg_addr + 8) | ATA_PCI_CTL_OFS);
396 ioaddr->bmdma_addr = bmdma_addr;
397 ioaddr->scr_addr = vt6421_scr_addr(iomap[5], ap->port_no);
399 ata_std_ports(ioaddr);
401 ata_port_pbar_desc(ap, ap->port_no, -1, "port");
402 ata_port_pbar_desc(ap, 4, ap->port_no * 8, "bmdma");
405 static int vt6420_prepare_host(struct pci_dev *pdev, struct ata_host **r_host)
407 const struct ata_port_info *ppi[] = { &vt6420_port_info, NULL };
408 struct ata_host *host;
409 int rc;
411 rc = ata_pci_prepare_sff_host(pdev, ppi, &host);
412 if (rc)
413 return rc;
414 *r_host = host;
416 rc = pcim_iomap_regions(pdev, 1 << 5, DRV_NAME);
417 if (rc) {
418 dev_printk(KERN_ERR, &pdev->dev, "failed to iomap PCI BAR 5\n");
419 return rc;
422 host->ports[0]->ioaddr.scr_addr = svia_scr_addr(host->iomap[5], 0);
423 host->ports[1]->ioaddr.scr_addr = svia_scr_addr(host->iomap[5], 1);
425 return 0;
428 static int vt6421_prepare_host(struct pci_dev *pdev, struct ata_host **r_host)
430 const struct ata_port_info *ppi[] =
431 { &vt6421_sport_info, &vt6421_sport_info, &vt6421_pport_info };
432 struct ata_host *host;
433 int i, rc;
435 *r_host = host = ata_host_alloc_pinfo(&pdev->dev, ppi, ARRAY_SIZE(ppi));
436 if (!host) {
437 dev_printk(KERN_ERR, &pdev->dev, "failed to allocate host\n");
438 return -ENOMEM;
441 rc = pcim_iomap_regions(pdev, 0x3f, DRV_NAME);
442 if (rc) {
443 dev_printk(KERN_ERR, &pdev->dev, "failed to request/iomap "
444 "PCI BARs (errno=%d)\n", rc);
445 return rc;
447 host->iomap = pcim_iomap_table(pdev);
449 for (i = 0; i < host->n_ports; i++)
450 vt6421_init_addrs(host->ports[i]);
452 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
453 if (rc)
454 return rc;
455 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
456 if (rc)
457 return rc;
459 return 0;
462 static void svia_configure(struct pci_dev *pdev)
464 u8 tmp8;
466 pci_read_config_byte(pdev, PCI_INTERRUPT_LINE, &tmp8);
467 dev_printk(KERN_INFO, &pdev->dev, "routed to hard irq line %d\n",
468 (int) (tmp8 & 0xf0) == 0xf0 ? 0 : tmp8 & 0x0f);
470 /* make sure SATA channels are enabled */
471 pci_read_config_byte(pdev, SATA_CHAN_ENAB, &tmp8);
472 if ((tmp8 & ALL_PORTS) != ALL_PORTS) {
473 dev_printk(KERN_DEBUG, &pdev->dev,
474 "enabling SATA channels (0x%x)\n",
475 (int) tmp8);
476 tmp8 |= ALL_PORTS;
477 pci_write_config_byte(pdev, SATA_CHAN_ENAB, tmp8);
480 /* make sure interrupts for each channel sent to us */
481 pci_read_config_byte(pdev, SATA_INT_GATE, &tmp8);
482 if ((tmp8 & ALL_PORTS) != ALL_PORTS) {
483 dev_printk(KERN_DEBUG, &pdev->dev,
484 "enabling SATA channel interrupts (0x%x)\n",
485 (int) tmp8);
486 tmp8 |= ALL_PORTS;
487 pci_write_config_byte(pdev, SATA_INT_GATE, tmp8);
490 /* make sure native mode is enabled */
491 pci_read_config_byte(pdev, SATA_NATIVE_MODE, &tmp8);
492 if ((tmp8 & NATIVE_MODE_ALL) != NATIVE_MODE_ALL) {
493 dev_printk(KERN_DEBUG, &pdev->dev,
494 "enabling SATA channel native mode (0x%x)\n",
495 (int) tmp8);
496 tmp8 |= NATIVE_MODE_ALL;
497 pci_write_config_byte(pdev, SATA_NATIVE_MODE, tmp8);
501 static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
503 static int printed_version;
504 unsigned int i;
505 int rc;
506 struct ata_host *host;
507 int board_id = (int) ent->driver_data;
508 const unsigned *bar_sizes;
510 if (!printed_version++)
511 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
513 rc = pcim_enable_device(pdev);
514 if (rc)
515 return rc;
517 if (board_id == vt6420)
518 bar_sizes = &svia_bar_sizes[0];
519 else
520 bar_sizes = &vt6421_bar_sizes[0];
522 for (i = 0; i < ARRAY_SIZE(svia_bar_sizes); i++)
523 if ((pci_resource_start(pdev, i) == 0) ||
524 (pci_resource_len(pdev, i) < bar_sizes[i])) {
525 dev_printk(KERN_ERR, &pdev->dev,
526 "invalid PCI BAR %u (sz 0x%llx, val 0x%llx)\n",
528 (unsigned long long)pci_resource_start(pdev, i),
529 (unsigned long long)pci_resource_len(pdev, i));
530 return -ENODEV;
533 if (board_id == vt6420)
534 rc = vt6420_prepare_host(pdev, &host);
535 else
536 rc = vt6421_prepare_host(pdev, &host);
537 if (rc)
538 return rc;
540 svia_configure(pdev);
542 pci_set_master(pdev);
543 return ata_host_activate(host, pdev->irq, ata_interrupt, IRQF_SHARED,
544 &svia_sht);
547 static int __init svia_init(void)
549 return pci_register_driver(&svia_pci_driver);
552 static void __exit svia_exit(void)
554 pci_unregister_driver(&svia_pci_driver);
557 module_init(svia_init);
558 module_exit(svia_exit);