[PATCH] libata-dev: Move out the HSM code from ata_host_intr()
[linux-2.6/verdex.git] / drivers / scsi / libata-core.c
blob7214530ac161ff0eec95fb7631a9307b738ce359
1 /*
2 * libata-core.c - helper library for ATA
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
35 #include <linux/config.h>
36 #include <linux/kernel.h>
37 #include <linux/module.h>
38 #include <linux/pci.h>
39 #include <linux/init.h>
40 #include <linux/list.h>
41 #include <linux/mm.h>
42 #include <linux/highmem.h>
43 #include <linux/spinlock.h>
44 #include <linux/blkdev.h>
45 #include <linux/delay.h>
46 #include <linux/timer.h>
47 #include <linux/interrupt.h>
48 #include <linux/completion.h>
49 #include <linux/suspend.h>
50 #include <linux/workqueue.h>
51 #include <linux/jiffies.h>
52 #include <linux/scatterlist.h>
53 #include <scsi/scsi.h>
54 #include "scsi_priv.h"
55 #include <scsi/scsi_cmnd.h>
56 #include <scsi/scsi_host.h>
57 #include <linux/libata.h>
58 #include <asm/io.h>
59 #include <asm/semaphore.h>
60 #include <asm/byteorder.h>
62 #include "libata.h"
64 static unsigned int ata_dev_init_params(struct ata_port *ap,
65 struct ata_device *dev);
66 static void ata_set_mode(struct ata_port *ap);
67 static unsigned int ata_dev_set_xfermode(struct ata_port *ap,
68 struct ata_device *dev);
69 static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev);
70 static void ata_pio_error(struct ata_port *ap);
72 static unsigned int ata_unique_id = 1;
73 static struct workqueue_struct *ata_wq;
75 int atapi_enabled = 1;
76 module_param(atapi_enabled, int, 0444);
77 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
79 int libata_fua = 0;
80 module_param_named(fua, libata_fua, int, 0444);
81 MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
83 MODULE_AUTHOR("Jeff Garzik");
84 MODULE_DESCRIPTION("Library module for ATA devices");
85 MODULE_LICENSE("GPL");
86 MODULE_VERSION(DRV_VERSION);
89 /**
90 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
91 * @tf: Taskfile to convert
92 * @fis: Buffer into which data will output
93 * @pmp: Port multiplier port
95 * Converts a standard ATA taskfile to a Serial ATA
96 * FIS structure (Register - Host to Device).
98 * LOCKING:
99 * Inherited from caller.
102 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
104 fis[0] = 0x27; /* Register - Host to Device FIS */
105 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
106 bit 7 indicates Command FIS */
107 fis[2] = tf->command;
108 fis[3] = tf->feature;
110 fis[4] = tf->lbal;
111 fis[5] = tf->lbam;
112 fis[6] = tf->lbah;
113 fis[7] = tf->device;
115 fis[8] = tf->hob_lbal;
116 fis[9] = tf->hob_lbam;
117 fis[10] = tf->hob_lbah;
118 fis[11] = tf->hob_feature;
120 fis[12] = tf->nsect;
121 fis[13] = tf->hob_nsect;
122 fis[14] = 0;
123 fis[15] = tf->ctl;
125 fis[16] = 0;
126 fis[17] = 0;
127 fis[18] = 0;
128 fis[19] = 0;
132 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
133 * @fis: Buffer from which data will be input
134 * @tf: Taskfile to output
136 * Converts a serial ATA FIS structure to a standard ATA taskfile.
138 * LOCKING:
139 * Inherited from caller.
142 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
144 tf->command = fis[2]; /* status */
145 tf->feature = fis[3]; /* error */
147 tf->lbal = fis[4];
148 tf->lbam = fis[5];
149 tf->lbah = fis[6];
150 tf->device = fis[7];
152 tf->hob_lbal = fis[8];
153 tf->hob_lbam = fis[9];
154 tf->hob_lbah = fis[10];
156 tf->nsect = fis[12];
157 tf->hob_nsect = fis[13];
160 static const u8 ata_rw_cmds[] = {
161 /* pio multi */
162 ATA_CMD_READ_MULTI,
163 ATA_CMD_WRITE_MULTI,
164 ATA_CMD_READ_MULTI_EXT,
165 ATA_CMD_WRITE_MULTI_EXT,
169 ATA_CMD_WRITE_MULTI_FUA_EXT,
170 /* pio */
171 ATA_CMD_PIO_READ,
172 ATA_CMD_PIO_WRITE,
173 ATA_CMD_PIO_READ_EXT,
174 ATA_CMD_PIO_WRITE_EXT,
179 /* dma */
180 ATA_CMD_READ,
181 ATA_CMD_WRITE,
182 ATA_CMD_READ_EXT,
183 ATA_CMD_WRITE_EXT,
187 ATA_CMD_WRITE_FUA_EXT
191 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
192 * @qc: command to examine and configure
194 * Examine the device configuration and tf->flags to calculate
195 * the proper read/write commands and protocol to use.
197 * LOCKING:
198 * caller.
200 int ata_rwcmd_protocol(struct ata_queued_cmd *qc)
202 struct ata_taskfile *tf = &qc->tf;
203 struct ata_device *dev = qc->dev;
204 u8 cmd;
206 int index, fua, lba48, write;
208 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
209 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
210 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
212 if (dev->flags & ATA_DFLAG_PIO) {
213 tf->protocol = ATA_PROT_PIO;
214 index = dev->multi_count ? 0 : 8;
215 } else if (lba48 && (qc->ap->flags & ATA_FLAG_PIO_LBA48)) {
216 /* Unable to use DMA due to host limitation */
217 tf->protocol = ATA_PROT_PIO;
218 index = dev->multi_count ? 0 : 8;
219 } else {
220 tf->protocol = ATA_PROT_DMA;
221 index = 16;
224 cmd = ata_rw_cmds[index + fua + lba48 + write];
225 if (cmd) {
226 tf->command = cmd;
227 return 0;
229 return -1;
233 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
234 * @pio_mask: pio_mask
235 * @mwdma_mask: mwdma_mask
236 * @udma_mask: udma_mask
238 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
239 * unsigned int xfer_mask.
241 * LOCKING:
242 * None.
244 * RETURNS:
245 * Packed xfer_mask.
247 static unsigned int ata_pack_xfermask(unsigned int pio_mask,
248 unsigned int mwdma_mask,
249 unsigned int udma_mask)
251 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
252 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
253 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
257 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
258 * @xfer_mask: xfer_mask to unpack
259 * @pio_mask: resulting pio_mask
260 * @mwdma_mask: resulting mwdma_mask
261 * @udma_mask: resulting udma_mask
263 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
264 * Any NULL distination masks will be ignored.
266 static void ata_unpack_xfermask(unsigned int xfer_mask,
267 unsigned int *pio_mask,
268 unsigned int *mwdma_mask,
269 unsigned int *udma_mask)
271 if (pio_mask)
272 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
273 if (mwdma_mask)
274 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
275 if (udma_mask)
276 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
279 static const struct ata_xfer_ent {
280 unsigned int shift, bits;
281 u8 base;
282 } ata_xfer_tbl[] = {
283 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
284 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
285 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
286 { -1, },
290 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
291 * @xfer_mask: xfer_mask of interest
293 * Return matching XFER_* value for @xfer_mask. Only the highest
294 * bit of @xfer_mask is considered.
296 * LOCKING:
297 * None.
299 * RETURNS:
300 * Matching XFER_* value, 0 if no match found.
302 static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
304 int highbit = fls(xfer_mask) - 1;
305 const struct ata_xfer_ent *ent;
307 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
308 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
309 return ent->base + highbit - ent->shift;
310 return 0;
314 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
315 * @xfer_mode: XFER_* of interest
317 * Return matching xfer_mask for @xfer_mode.
319 * LOCKING:
320 * None.
322 * RETURNS:
323 * Matching xfer_mask, 0 if no match found.
325 static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
327 const struct ata_xfer_ent *ent;
329 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
330 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
331 return 1 << (ent->shift + xfer_mode - ent->base);
332 return 0;
336 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
337 * @xfer_mode: XFER_* of interest
339 * Return matching xfer_shift for @xfer_mode.
341 * LOCKING:
342 * None.
344 * RETURNS:
345 * Matching xfer_shift, -1 if no match found.
347 static int ata_xfer_mode2shift(unsigned int xfer_mode)
349 const struct ata_xfer_ent *ent;
351 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
352 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
353 return ent->shift;
354 return -1;
358 * ata_mode_string - convert xfer_mask to string
359 * @xfer_mask: mask of bits supported; only highest bit counts.
361 * Determine string which represents the highest speed
362 * (highest bit in @modemask).
364 * LOCKING:
365 * None.
367 * RETURNS:
368 * Constant C string representing highest speed listed in
369 * @mode_mask, or the constant C string "<n/a>".
371 static const char *ata_mode_string(unsigned int xfer_mask)
373 static const char * const xfer_mode_str[] = {
374 "PIO0",
375 "PIO1",
376 "PIO2",
377 "PIO3",
378 "PIO4",
379 "MWDMA0",
380 "MWDMA1",
381 "MWDMA2",
382 "UDMA/16",
383 "UDMA/25",
384 "UDMA/33",
385 "UDMA/44",
386 "UDMA/66",
387 "UDMA/100",
388 "UDMA/133",
389 "UDMA7",
391 int highbit;
393 highbit = fls(xfer_mask) - 1;
394 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
395 return xfer_mode_str[highbit];
396 return "<n/a>";
399 static void ata_dev_disable(struct ata_port *ap, struct ata_device *dev)
401 if (ata_dev_present(dev)) {
402 printk(KERN_WARNING "ata%u: dev %u disabled\n",
403 ap->id, dev->devno);
404 dev->class++;
409 * ata_pio_devchk - PATA device presence detection
410 * @ap: ATA channel to examine
411 * @device: Device to examine (starting at zero)
413 * This technique was originally described in
414 * Hale Landis's ATADRVR (www.ata-atapi.com), and
415 * later found its way into the ATA/ATAPI spec.
417 * Write a pattern to the ATA shadow registers,
418 * and if a device is present, it will respond by
419 * correctly storing and echoing back the
420 * ATA shadow register contents.
422 * LOCKING:
423 * caller.
426 static unsigned int ata_pio_devchk(struct ata_port *ap,
427 unsigned int device)
429 struct ata_ioports *ioaddr = &ap->ioaddr;
430 u8 nsect, lbal;
432 ap->ops->dev_select(ap, device);
434 outb(0x55, ioaddr->nsect_addr);
435 outb(0xaa, ioaddr->lbal_addr);
437 outb(0xaa, ioaddr->nsect_addr);
438 outb(0x55, ioaddr->lbal_addr);
440 outb(0x55, ioaddr->nsect_addr);
441 outb(0xaa, ioaddr->lbal_addr);
443 nsect = inb(ioaddr->nsect_addr);
444 lbal = inb(ioaddr->lbal_addr);
446 if ((nsect == 0x55) && (lbal == 0xaa))
447 return 1; /* we found a device */
449 return 0; /* nothing found */
453 * ata_mmio_devchk - PATA device presence detection
454 * @ap: ATA channel to examine
455 * @device: Device to examine (starting at zero)
457 * This technique was originally described in
458 * Hale Landis's ATADRVR (www.ata-atapi.com), and
459 * later found its way into the ATA/ATAPI spec.
461 * Write a pattern to the ATA shadow registers,
462 * and if a device is present, it will respond by
463 * correctly storing and echoing back the
464 * ATA shadow register contents.
466 * LOCKING:
467 * caller.
470 static unsigned int ata_mmio_devchk(struct ata_port *ap,
471 unsigned int device)
473 struct ata_ioports *ioaddr = &ap->ioaddr;
474 u8 nsect, lbal;
476 ap->ops->dev_select(ap, device);
478 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
479 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
481 writeb(0xaa, (void __iomem *) ioaddr->nsect_addr);
482 writeb(0x55, (void __iomem *) ioaddr->lbal_addr);
484 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
485 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
487 nsect = readb((void __iomem *) ioaddr->nsect_addr);
488 lbal = readb((void __iomem *) ioaddr->lbal_addr);
490 if ((nsect == 0x55) && (lbal == 0xaa))
491 return 1; /* we found a device */
493 return 0; /* nothing found */
497 * ata_devchk - PATA device presence detection
498 * @ap: ATA channel to examine
499 * @device: Device to examine (starting at zero)
501 * Dispatch ATA device presence detection, depending
502 * on whether we are using PIO or MMIO to talk to the
503 * ATA shadow registers.
505 * LOCKING:
506 * caller.
509 static unsigned int ata_devchk(struct ata_port *ap,
510 unsigned int device)
512 if (ap->flags & ATA_FLAG_MMIO)
513 return ata_mmio_devchk(ap, device);
514 return ata_pio_devchk(ap, device);
518 * ata_dev_classify - determine device type based on ATA-spec signature
519 * @tf: ATA taskfile register set for device to be identified
521 * Determine from taskfile register contents whether a device is
522 * ATA or ATAPI, as per "Signature and persistence" section
523 * of ATA/PI spec (volume 1, sect 5.14).
525 * LOCKING:
526 * None.
528 * RETURNS:
529 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
530 * the event of failure.
533 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
535 /* Apple's open source Darwin code hints that some devices only
536 * put a proper signature into the LBA mid/high registers,
537 * So, we only check those. It's sufficient for uniqueness.
540 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
541 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
542 DPRINTK("found ATA device by sig\n");
543 return ATA_DEV_ATA;
546 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
547 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
548 DPRINTK("found ATAPI device by sig\n");
549 return ATA_DEV_ATAPI;
552 DPRINTK("unknown device\n");
553 return ATA_DEV_UNKNOWN;
557 * ata_dev_try_classify - Parse returned ATA device signature
558 * @ap: ATA channel to examine
559 * @device: Device to examine (starting at zero)
560 * @r_err: Value of error register on completion
562 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
563 * an ATA/ATAPI-defined set of values is placed in the ATA
564 * shadow registers, indicating the results of device detection
565 * and diagnostics.
567 * Select the ATA device, and read the values from the ATA shadow
568 * registers. Then parse according to the Error register value,
569 * and the spec-defined values examined by ata_dev_classify().
571 * LOCKING:
572 * caller.
574 * RETURNS:
575 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
578 static unsigned int
579 ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
581 struct ata_taskfile tf;
582 unsigned int class;
583 u8 err;
585 ap->ops->dev_select(ap, device);
587 memset(&tf, 0, sizeof(tf));
589 ap->ops->tf_read(ap, &tf);
590 err = tf.feature;
591 if (r_err)
592 *r_err = err;
594 /* see if device passed diags */
595 if (err == 1)
596 /* do nothing */ ;
597 else if ((device == 0) && (err == 0x81))
598 /* do nothing */ ;
599 else
600 return ATA_DEV_NONE;
602 /* determine if device is ATA or ATAPI */
603 class = ata_dev_classify(&tf);
605 if (class == ATA_DEV_UNKNOWN)
606 return ATA_DEV_NONE;
607 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
608 return ATA_DEV_NONE;
609 return class;
613 * ata_id_string - Convert IDENTIFY DEVICE page into string
614 * @id: IDENTIFY DEVICE results we will examine
615 * @s: string into which data is output
616 * @ofs: offset into identify device page
617 * @len: length of string to return. must be an even number.
619 * The strings in the IDENTIFY DEVICE page are broken up into
620 * 16-bit chunks. Run through the string, and output each
621 * 8-bit chunk linearly, regardless of platform.
623 * LOCKING:
624 * caller.
627 void ata_id_string(const u16 *id, unsigned char *s,
628 unsigned int ofs, unsigned int len)
630 unsigned int c;
632 while (len > 0) {
633 c = id[ofs] >> 8;
634 *s = c;
635 s++;
637 c = id[ofs] & 0xff;
638 *s = c;
639 s++;
641 ofs++;
642 len -= 2;
647 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
648 * @id: IDENTIFY DEVICE results we will examine
649 * @s: string into which data is output
650 * @ofs: offset into identify device page
651 * @len: length of string to return. must be an odd number.
653 * This function is identical to ata_id_string except that it
654 * trims trailing spaces and terminates the resulting string with
655 * null. @len must be actual maximum length (even number) + 1.
657 * LOCKING:
658 * caller.
660 void ata_id_c_string(const u16 *id, unsigned char *s,
661 unsigned int ofs, unsigned int len)
663 unsigned char *p;
665 WARN_ON(!(len & 1));
667 ata_id_string(id, s, ofs, len - 1);
669 p = s + strnlen(s, len - 1);
670 while (p > s && p[-1] == ' ')
671 p--;
672 *p = '\0';
675 static u64 ata_id_n_sectors(const u16 *id)
677 if (ata_id_has_lba(id)) {
678 if (ata_id_has_lba48(id))
679 return ata_id_u64(id, 100);
680 else
681 return ata_id_u32(id, 60);
682 } else {
683 if (ata_id_current_chs_valid(id))
684 return ata_id_u32(id, 57);
685 else
686 return id[1] * id[3] * id[6];
691 * ata_noop_dev_select - Select device 0/1 on ATA bus
692 * @ap: ATA channel to manipulate
693 * @device: ATA device (numbered from zero) to select
695 * This function performs no actual function.
697 * May be used as the dev_select() entry in ata_port_operations.
699 * LOCKING:
700 * caller.
702 void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
708 * ata_std_dev_select - Select device 0/1 on ATA bus
709 * @ap: ATA channel to manipulate
710 * @device: ATA device (numbered from zero) to select
712 * Use the method defined in the ATA specification to
713 * make either device 0, or device 1, active on the
714 * ATA channel. Works with both PIO and MMIO.
716 * May be used as the dev_select() entry in ata_port_operations.
718 * LOCKING:
719 * caller.
722 void ata_std_dev_select (struct ata_port *ap, unsigned int device)
724 u8 tmp;
726 if (device == 0)
727 tmp = ATA_DEVICE_OBS;
728 else
729 tmp = ATA_DEVICE_OBS | ATA_DEV1;
731 if (ap->flags & ATA_FLAG_MMIO) {
732 writeb(tmp, (void __iomem *) ap->ioaddr.device_addr);
733 } else {
734 outb(tmp, ap->ioaddr.device_addr);
736 ata_pause(ap); /* needed; also flushes, for mmio */
740 * ata_dev_select - Select device 0/1 on ATA bus
741 * @ap: ATA channel to manipulate
742 * @device: ATA device (numbered from zero) to select
743 * @wait: non-zero to wait for Status register BSY bit to clear
744 * @can_sleep: non-zero if context allows sleeping
746 * Use the method defined in the ATA specification to
747 * make either device 0, or device 1, active on the
748 * ATA channel.
750 * This is a high-level version of ata_std_dev_select(),
751 * which additionally provides the services of inserting
752 * the proper pauses and status polling, where needed.
754 * LOCKING:
755 * caller.
758 void ata_dev_select(struct ata_port *ap, unsigned int device,
759 unsigned int wait, unsigned int can_sleep)
761 VPRINTK("ENTER, ata%u: device %u, wait %u\n",
762 ap->id, device, wait);
764 if (wait)
765 ata_wait_idle(ap);
767 ap->ops->dev_select(ap, device);
769 if (wait) {
770 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
771 msleep(150);
772 ata_wait_idle(ap);
777 * ata_dump_id - IDENTIFY DEVICE info debugging output
778 * @id: IDENTIFY DEVICE page to dump
780 * Dump selected 16-bit words from the given IDENTIFY DEVICE
781 * page.
783 * LOCKING:
784 * caller.
787 static inline void ata_dump_id(const u16 *id)
789 DPRINTK("49==0x%04x "
790 "53==0x%04x "
791 "63==0x%04x "
792 "64==0x%04x "
793 "75==0x%04x \n",
794 id[49],
795 id[53],
796 id[63],
797 id[64],
798 id[75]);
799 DPRINTK("80==0x%04x "
800 "81==0x%04x "
801 "82==0x%04x "
802 "83==0x%04x "
803 "84==0x%04x \n",
804 id[80],
805 id[81],
806 id[82],
807 id[83],
808 id[84]);
809 DPRINTK("88==0x%04x "
810 "93==0x%04x\n",
811 id[88],
812 id[93]);
816 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
817 * @id: IDENTIFY data to compute xfer mask from
819 * Compute the xfermask for this device. This is not as trivial
820 * as it seems if we must consider early devices correctly.
822 * FIXME: pre IDE drive timing (do we care ?).
824 * LOCKING:
825 * None.
827 * RETURNS:
828 * Computed xfermask
830 static unsigned int ata_id_xfermask(const u16 *id)
832 unsigned int pio_mask, mwdma_mask, udma_mask;
834 /* Usual case. Word 53 indicates word 64 is valid */
835 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
836 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
837 pio_mask <<= 3;
838 pio_mask |= 0x7;
839 } else {
840 /* If word 64 isn't valid then Word 51 high byte holds
841 * the PIO timing number for the maximum. Turn it into
842 * a mask.
844 pio_mask = (2 << (id[ATA_ID_OLD_PIO_MODES] & 0xFF)) - 1 ;
846 /* But wait.. there's more. Design your standards by
847 * committee and you too can get a free iordy field to
848 * process. However its the speeds not the modes that
849 * are supported... Note drivers using the timing API
850 * will get this right anyway
854 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
856 udma_mask = 0;
857 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
858 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
860 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
864 * ata_port_queue_task - Queue port_task
865 * @ap: The ata_port to queue port_task for
867 * Schedule @fn(@data) for execution after @delay jiffies using
868 * port_task. There is one port_task per port and it's the
869 * user(low level driver)'s responsibility to make sure that only
870 * one task is active at any given time.
872 * libata core layer takes care of synchronization between
873 * port_task and EH. ata_port_queue_task() may be ignored for EH
874 * synchronization.
876 * LOCKING:
877 * Inherited from caller.
879 void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), void *data,
880 unsigned long delay)
882 int rc;
884 if (ap->flags & ATA_FLAG_FLUSH_PORT_TASK)
885 return;
887 PREPARE_WORK(&ap->port_task, fn, data);
889 if (!delay)
890 rc = queue_work(ata_wq, &ap->port_task);
891 else
892 rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
894 /* rc == 0 means that another user is using port task */
895 WARN_ON(rc == 0);
899 * ata_port_flush_task - Flush port_task
900 * @ap: The ata_port to flush port_task for
902 * After this function completes, port_task is guranteed not to
903 * be running or scheduled.
905 * LOCKING:
906 * Kernel thread context (may sleep)
908 void ata_port_flush_task(struct ata_port *ap)
910 unsigned long flags;
912 DPRINTK("ENTER\n");
914 spin_lock_irqsave(&ap->host_set->lock, flags);
915 ap->flags |= ATA_FLAG_FLUSH_PORT_TASK;
916 spin_unlock_irqrestore(&ap->host_set->lock, flags);
918 DPRINTK("flush #1\n");
919 flush_workqueue(ata_wq);
922 * At this point, if a task is running, it's guaranteed to see
923 * the FLUSH flag; thus, it will never queue pio tasks again.
924 * Cancel and flush.
926 if (!cancel_delayed_work(&ap->port_task)) {
927 DPRINTK("flush #2\n");
928 flush_workqueue(ata_wq);
931 spin_lock_irqsave(&ap->host_set->lock, flags);
932 ap->flags &= ~ATA_FLAG_FLUSH_PORT_TASK;
933 spin_unlock_irqrestore(&ap->host_set->lock, flags);
935 DPRINTK("EXIT\n");
938 void ata_qc_complete_internal(struct ata_queued_cmd *qc)
940 struct completion *waiting = qc->private_data;
942 qc->ap->ops->tf_read(qc->ap, &qc->tf);
943 complete(waiting);
947 * ata_exec_internal - execute libata internal command
948 * @ap: Port to which the command is sent
949 * @dev: Device to which the command is sent
950 * @tf: Taskfile registers for the command and the result
951 * @dma_dir: Data tranfer direction of the command
952 * @buf: Data buffer of the command
953 * @buflen: Length of data buffer
955 * Executes libata internal command with timeout. @tf contains
956 * command on entry and result on return. Timeout and error
957 * conditions are reported via return value. No recovery action
958 * is taken after a command times out. It's caller's duty to
959 * clean up after timeout.
961 * LOCKING:
962 * None. Should be called with kernel context, might sleep.
965 static unsigned
966 ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
967 struct ata_taskfile *tf,
968 int dma_dir, void *buf, unsigned int buflen)
970 u8 command = tf->command;
971 struct ata_queued_cmd *qc;
972 DECLARE_COMPLETION(wait);
973 unsigned long flags;
974 unsigned int err_mask;
976 spin_lock_irqsave(&ap->host_set->lock, flags);
978 qc = ata_qc_new_init(ap, dev);
979 BUG_ON(qc == NULL);
981 qc->tf = *tf;
982 qc->dma_dir = dma_dir;
983 if (dma_dir != DMA_NONE) {
984 ata_sg_init_one(qc, buf, buflen);
985 qc->nsect = buflen / ATA_SECT_SIZE;
988 qc->private_data = &wait;
989 qc->complete_fn = ata_qc_complete_internal;
991 qc->err_mask = ata_qc_issue(qc);
992 if (qc->err_mask)
993 ata_qc_complete(qc);
995 spin_unlock_irqrestore(&ap->host_set->lock, flags);
997 if (!wait_for_completion_timeout(&wait, ATA_TMOUT_INTERNAL)) {
998 ata_port_flush_task(ap);
1000 spin_lock_irqsave(&ap->host_set->lock, flags);
1002 /* We're racing with irq here. If we lose, the
1003 * following test prevents us from completing the qc
1004 * again. If completion irq occurs after here but
1005 * before the caller cleans up, it will result in a
1006 * spurious interrupt. We can live with that.
1008 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1009 qc->err_mask = AC_ERR_TIMEOUT;
1010 ata_qc_complete(qc);
1011 printk(KERN_WARNING "ata%u: qc timeout (cmd 0x%x)\n",
1012 ap->id, command);
1015 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1018 *tf = qc->tf;
1019 err_mask = qc->err_mask;
1021 ata_qc_free(qc);
1023 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1024 * Until those drivers are fixed, we detect the condition
1025 * here, fail the command with AC_ERR_SYSTEM and reenable the
1026 * port.
1028 * Note that this doesn't change any behavior as internal
1029 * command failure results in disabling the device in the
1030 * higher layer for LLDDs without new reset/EH callbacks.
1032 * Kill the following code as soon as those drivers are fixed.
1034 if (ap->flags & ATA_FLAG_PORT_DISABLED) {
1035 err_mask |= AC_ERR_SYSTEM;
1036 ata_port_probe(ap);
1039 return err_mask;
1043 * ata_pio_need_iordy - check if iordy needed
1044 * @adev: ATA device
1046 * Check if the current speed of the device requires IORDY. Used
1047 * by various controllers for chip configuration.
1050 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1052 int pio;
1053 int speed = adev->pio_mode - XFER_PIO_0;
1055 if (speed < 2)
1056 return 0;
1057 if (speed > 2)
1058 return 1;
1060 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1062 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1063 pio = adev->id[ATA_ID_EIDE_PIO];
1064 /* Is the speed faster than the drive allows non IORDY ? */
1065 if (pio) {
1066 /* This is cycle times not frequency - watch the logic! */
1067 if (pio > 240) /* PIO2 is 240nS per cycle */
1068 return 1;
1069 return 0;
1072 return 0;
1076 * ata_dev_read_id - Read ID data from the specified device
1077 * @ap: port on which target device resides
1078 * @dev: target device
1079 * @p_class: pointer to class of the target device (may be changed)
1080 * @post_reset: is this read ID post-reset?
1081 * @p_id: read IDENTIFY page (newly allocated)
1083 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1084 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1085 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1086 * for pre-ATA4 drives.
1088 * LOCKING:
1089 * Kernel thread context (may sleep)
1091 * RETURNS:
1092 * 0 on success, -errno otherwise.
1094 static int ata_dev_read_id(struct ata_port *ap, struct ata_device *dev,
1095 unsigned int *p_class, int post_reset, u16 **p_id)
1097 unsigned int class = *p_class;
1098 struct ata_taskfile tf;
1099 unsigned int err_mask = 0;
1100 u16 *id;
1101 const char *reason;
1102 int rc;
1104 DPRINTK("ENTER, host %u, dev %u\n", ap->id, dev->devno);
1106 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1108 id = kmalloc(sizeof(id[0]) * ATA_ID_WORDS, GFP_KERNEL);
1109 if (id == NULL) {
1110 rc = -ENOMEM;
1111 reason = "out of memory";
1112 goto err_out;
1115 retry:
1116 ata_tf_init(ap, &tf, dev->devno);
1118 switch (class) {
1119 case ATA_DEV_ATA:
1120 tf.command = ATA_CMD_ID_ATA;
1121 break;
1122 case ATA_DEV_ATAPI:
1123 tf.command = ATA_CMD_ID_ATAPI;
1124 break;
1125 default:
1126 rc = -ENODEV;
1127 reason = "unsupported class";
1128 goto err_out;
1131 tf.protocol = ATA_PROT_PIO;
1133 err_mask = ata_exec_internal(ap, dev, &tf, DMA_FROM_DEVICE,
1134 id, sizeof(id[0]) * ATA_ID_WORDS);
1135 if (err_mask) {
1136 rc = -EIO;
1137 reason = "I/O error";
1138 goto err_out;
1141 swap_buf_le16(id, ATA_ID_WORDS);
1143 /* sanity check */
1144 if ((class == ATA_DEV_ATA) != ata_id_is_ata(id)) {
1145 rc = -EINVAL;
1146 reason = "device reports illegal type";
1147 goto err_out;
1150 if (post_reset && class == ATA_DEV_ATA) {
1152 * The exact sequence expected by certain pre-ATA4 drives is:
1153 * SRST RESET
1154 * IDENTIFY
1155 * INITIALIZE DEVICE PARAMETERS
1156 * anything else..
1157 * Some drives were very specific about that exact sequence.
1159 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1160 err_mask = ata_dev_init_params(ap, dev);
1161 if (err_mask) {
1162 rc = -EIO;
1163 reason = "INIT_DEV_PARAMS failed";
1164 goto err_out;
1167 /* current CHS translation info (id[53-58]) might be
1168 * changed. reread the identify device info.
1170 post_reset = 0;
1171 goto retry;
1175 *p_class = class;
1176 *p_id = id;
1177 return 0;
1179 err_out:
1180 printk(KERN_WARNING "ata%u: dev %u failed to IDENTIFY (%s)\n",
1181 ap->id, dev->devno, reason);
1182 kfree(id);
1183 return rc;
1186 static inline u8 ata_dev_knobble(const struct ata_port *ap,
1187 struct ata_device *dev)
1189 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
1193 * ata_dev_configure - Configure the specified ATA/ATAPI device
1194 * @ap: Port on which target device resides
1195 * @dev: Target device to configure
1196 * @print_info: Enable device info printout
1198 * Configure @dev according to @dev->id. Generic and low-level
1199 * driver specific fixups are also applied.
1201 * LOCKING:
1202 * Kernel thread context (may sleep)
1204 * RETURNS:
1205 * 0 on success, -errno otherwise
1207 static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev,
1208 int print_info)
1210 const u16 *id = dev->id;
1211 unsigned int xfer_mask;
1212 int i, rc;
1214 if (!ata_dev_present(dev)) {
1215 DPRINTK("ENTER/EXIT (host %u, dev %u) -- nodev\n",
1216 ap->id, dev->devno);
1217 return 0;
1220 DPRINTK("ENTER, host %u, dev %u\n", ap->id, dev->devno);
1222 /* print device capabilities */
1223 if (print_info)
1224 printk(KERN_DEBUG "ata%u: dev %u cfg 49:%04x 82:%04x 83:%04x "
1225 "84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n",
1226 ap->id, dev->devno, id[49], id[82], id[83],
1227 id[84], id[85], id[86], id[87], id[88]);
1229 /* initialize to-be-configured parameters */
1230 dev->flags = 0;
1231 dev->max_sectors = 0;
1232 dev->cdb_len = 0;
1233 dev->n_sectors = 0;
1234 dev->cylinders = 0;
1235 dev->heads = 0;
1236 dev->sectors = 0;
1239 * common ATA, ATAPI feature tests
1242 /* find max transfer mode; for printk only */
1243 xfer_mask = ata_id_xfermask(id);
1245 ata_dump_id(id);
1247 /* ATA-specific feature tests */
1248 if (dev->class == ATA_DEV_ATA) {
1249 dev->n_sectors = ata_id_n_sectors(id);
1251 if (ata_id_has_lba(id)) {
1252 const char *lba_desc;
1254 lba_desc = "LBA";
1255 dev->flags |= ATA_DFLAG_LBA;
1256 if (ata_id_has_lba48(id)) {
1257 dev->flags |= ATA_DFLAG_LBA48;
1258 lba_desc = "LBA48";
1261 /* print device info to dmesg */
1262 if (print_info)
1263 printk(KERN_INFO "ata%u: dev %u ATA-%d, "
1264 "max %s, %Lu sectors: %s\n",
1265 ap->id, dev->devno,
1266 ata_id_major_version(id),
1267 ata_mode_string(xfer_mask),
1268 (unsigned long long)dev->n_sectors,
1269 lba_desc);
1270 } else {
1271 /* CHS */
1273 /* Default translation */
1274 dev->cylinders = id[1];
1275 dev->heads = id[3];
1276 dev->sectors = id[6];
1278 if (ata_id_current_chs_valid(id)) {
1279 /* Current CHS translation is valid. */
1280 dev->cylinders = id[54];
1281 dev->heads = id[55];
1282 dev->sectors = id[56];
1285 /* print device info to dmesg */
1286 if (print_info)
1287 printk(KERN_INFO "ata%u: dev %u ATA-%d, "
1288 "max %s, %Lu sectors: CHS %u/%u/%u\n",
1289 ap->id, dev->devno,
1290 ata_id_major_version(id),
1291 ata_mode_string(xfer_mask),
1292 (unsigned long long)dev->n_sectors,
1293 dev->cylinders, dev->heads, dev->sectors);
1296 if (dev->id[59] & 0x100) {
1297 dev->multi_count = dev->id[59] & 0xff;
1298 DPRINTK("ata%u: dev %u multi count %u\n",
1299 ap->id, device, dev->multi_count);
1302 dev->cdb_len = 16;
1305 /* ATAPI-specific feature tests */
1306 else if (dev->class == ATA_DEV_ATAPI) {
1307 rc = atapi_cdb_len(id);
1308 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1309 printk(KERN_WARNING "ata%u: unsupported CDB len\n", ap->id);
1310 rc = -EINVAL;
1311 goto err_out_nosup;
1313 dev->cdb_len = (unsigned int) rc;
1315 if (ata_id_cdb_intr(dev->id))
1316 dev->flags |= ATA_DFLAG_CDB_INTR;
1318 /* print device info to dmesg */
1319 if (print_info)
1320 printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n",
1321 ap->id, dev->devno, ata_mode_string(xfer_mask));
1324 ap->host->max_cmd_len = 0;
1325 for (i = 0; i < ATA_MAX_DEVICES; i++)
1326 ap->host->max_cmd_len = max_t(unsigned int,
1327 ap->host->max_cmd_len,
1328 ap->device[i].cdb_len);
1330 /* limit bridge transfers to udma5, 200 sectors */
1331 if (ata_dev_knobble(ap, dev)) {
1332 if (print_info)
1333 printk(KERN_INFO "ata%u(%u): applying bridge limits\n",
1334 ap->id, dev->devno);
1335 dev->udma_mask &= ATA_UDMA5;
1336 dev->max_sectors = ATA_MAX_SECTORS;
1339 if (ap->ops->dev_config)
1340 ap->ops->dev_config(ap, dev);
1342 DPRINTK("EXIT, drv_stat = 0x%x\n", ata_chk_status(ap));
1343 return 0;
1345 err_out_nosup:
1346 DPRINTK("EXIT, err\n");
1347 return rc;
1351 * ata_bus_probe - Reset and probe ATA bus
1352 * @ap: Bus to probe
1354 * Master ATA bus probing function. Initiates a hardware-dependent
1355 * bus reset, then attempts to identify any devices found on
1356 * the bus.
1358 * LOCKING:
1359 * PCI/etc. bus probe sem.
1361 * RETURNS:
1362 * Zero on success, non-zero on error.
1365 static int ata_bus_probe(struct ata_port *ap)
1367 unsigned int classes[ATA_MAX_DEVICES];
1368 unsigned int i, rc, found = 0;
1370 ata_port_probe(ap);
1372 /* reset and determine device classes */
1373 for (i = 0; i < ATA_MAX_DEVICES; i++)
1374 classes[i] = ATA_DEV_UNKNOWN;
1376 if (ap->ops->probe_reset) {
1377 rc = ap->ops->probe_reset(ap, classes);
1378 if (rc) {
1379 printk("ata%u: reset failed (errno=%d)\n", ap->id, rc);
1380 return rc;
1382 } else {
1383 ap->ops->phy_reset(ap);
1385 if (!(ap->flags & ATA_FLAG_PORT_DISABLED))
1386 for (i = 0; i < ATA_MAX_DEVICES; i++)
1387 classes[i] = ap->device[i].class;
1389 ata_port_probe(ap);
1392 for (i = 0; i < ATA_MAX_DEVICES; i++)
1393 if (classes[i] == ATA_DEV_UNKNOWN)
1394 classes[i] = ATA_DEV_NONE;
1396 /* read IDENTIFY page and configure devices */
1397 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1398 struct ata_device *dev = &ap->device[i];
1400 dev->class = classes[i];
1402 if (!ata_dev_present(dev))
1403 continue;
1405 WARN_ON(dev->id != NULL);
1406 if (ata_dev_read_id(ap, dev, &dev->class, 1, &dev->id)) {
1407 dev->class = ATA_DEV_NONE;
1408 continue;
1411 if (ata_dev_configure(ap, dev, 1)) {
1412 ata_dev_disable(ap, dev);
1413 continue;
1416 found = 1;
1419 if (!found)
1420 goto err_out_disable;
1422 ata_set_mode(ap);
1423 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1424 goto err_out_disable;
1426 return 0;
1428 err_out_disable:
1429 ap->ops->port_disable(ap);
1430 return -1;
1434 * ata_port_probe - Mark port as enabled
1435 * @ap: Port for which we indicate enablement
1437 * Modify @ap data structure such that the system
1438 * thinks that the entire port is enabled.
1440 * LOCKING: host_set lock, or some other form of
1441 * serialization.
1444 void ata_port_probe(struct ata_port *ap)
1446 ap->flags &= ~ATA_FLAG_PORT_DISABLED;
1450 * sata_print_link_status - Print SATA link status
1451 * @ap: SATA port to printk link status about
1453 * This function prints link speed and status of a SATA link.
1455 * LOCKING:
1456 * None.
1458 static void sata_print_link_status(struct ata_port *ap)
1460 u32 sstatus, tmp;
1461 const char *speed;
1463 if (!ap->ops->scr_read)
1464 return;
1466 sstatus = scr_read(ap, SCR_STATUS);
1468 if (sata_dev_present(ap)) {
1469 tmp = (sstatus >> 4) & 0xf;
1470 if (tmp & (1 << 0))
1471 speed = "1.5";
1472 else if (tmp & (1 << 1))
1473 speed = "3.0";
1474 else
1475 speed = "<unknown>";
1476 printk(KERN_INFO "ata%u: SATA link up %s Gbps (SStatus %X)\n",
1477 ap->id, speed, sstatus);
1478 } else {
1479 printk(KERN_INFO "ata%u: SATA link down (SStatus %X)\n",
1480 ap->id, sstatus);
1485 * __sata_phy_reset - Wake/reset a low-level SATA PHY
1486 * @ap: SATA port associated with target SATA PHY.
1488 * This function issues commands to standard SATA Sxxx
1489 * PHY registers, to wake up the phy (and device), and
1490 * clear any reset condition.
1492 * LOCKING:
1493 * PCI/etc. bus probe sem.
1496 void __sata_phy_reset(struct ata_port *ap)
1498 u32 sstatus;
1499 unsigned long timeout = jiffies + (HZ * 5);
1501 if (ap->flags & ATA_FLAG_SATA_RESET) {
1502 /* issue phy wake/reset */
1503 scr_write_flush(ap, SCR_CONTROL, 0x301);
1504 /* Couldn't find anything in SATA I/II specs, but
1505 * AHCI-1.1 10.4.2 says at least 1 ms. */
1506 mdelay(1);
1508 scr_write_flush(ap, SCR_CONTROL, 0x300); /* phy wake/clear reset */
1510 /* wait for phy to become ready, if necessary */
1511 do {
1512 msleep(200);
1513 sstatus = scr_read(ap, SCR_STATUS);
1514 if ((sstatus & 0xf) != 1)
1515 break;
1516 } while (time_before(jiffies, timeout));
1518 /* print link status */
1519 sata_print_link_status(ap);
1521 /* TODO: phy layer with polling, timeouts, etc. */
1522 if (sata_dev_present(ap))
1523 ata_port_probe(ap);
1524 else
1525 ata_port_disable(ap);
1527 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1528 return;
1530 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
1531 ata_port_disable(ap);
1532 return;
1535 ap->cbl = ATA_CBL_SATA;
1539 * sata_phy_reset - Reset SATA bus.
1540 * @ap: SATA port associated with target SATA PHY.
1542 * This function resets the SATA bus, and then probes
1543 * the bus for devices.
1545 * LOCKING:
1546 * PCI/etc. bus probe sem.
1549 void sata_phy_reset(struct ata_port *ap)
1551 __sata_phy_reset(ap);
1552 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1553 return;
1554 ata_bus_reset(ap);
1558 * ata_dev_pair - return other device on cable
1559 * @ap: port
1560 * @adev: device
1562 * Obtain the other device on the same cable, or if none is
1563 * present NULL is returned
1566 struct ata_device *ata_dev_pair(struct ata_port *ap, struct ata_device *adev)
1568 struct ata_device *pair = &ap->device[1 - adev->devno];
1569 if (!ata_dev_present(pair))
1570 return NULL;
1571 return pair;
1575 * ata_port_disable - Disable port.
1576 * @ap: Port to be disabled.
1578 * Modify @ap data structure such that the system
1579 * thinks that the entire port is disabled, and should
1580 * never attempt to probe or communicate with devices
1581 * on this port.
1583 * LOCKING: host_set lock, or some other form of
1584 * serialization.
1587 void ata_port_disable(struct ata_port *ap)
1589 ap->device[0].class = ATA_DEV_NONE;
1590 ap->device[1].class = ATA_DEV_NONE;
1591 ap->flags |= ATA_FLAG_PORT_DISABLED;
1595 * This mode timing computation functionality is ported over from
1596 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
1599 * PIO 0-5, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
1600 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
1601 * for PIO 5, which is a nonstandard extension and UDMA6, which
1602 * is currently supported only by Maxtor drives.
1605 static const struct ata_timing ata_timing[] = {
1607 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
1608 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
1609 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
1610 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
1612 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
1613 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
1614 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
1616 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
1618 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
1619 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
1620 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
1622 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
1623 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
1624 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
1626 /* { XFER_PIO_5, 20, 50, 30, 100, 50, 30, 100, 0 }, */
1627 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
1628 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
1630 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
1631 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
1632 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
1634 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
1636 { 0xFF }
1639 #define ENOUGH(v,unit) (((v)-1)/(unit)+1)
1640 #define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
1642 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
1644 q->setup = EZ(t->setup * 1000, T);
1645 q->act8b = EZ(t->act8b * 1000, T);
1646 q->rec8b = EZ(t->rec8b * 1000, T);
1647 q->cyc8b = EZ(t->cyc8b * 1000, T);
1648 q->active = EZ(t->active * 1000, T);
1649 q->recover = EZ(t->recover * 1000, T);
1650 q->cycle = EZ(t->cycle * 1000, T);
1651 q->udma = EZ(t->udma * 1000, UT);
1654 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
1655 struct ata_timing *m, unsigned int what)
1657 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
1658 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
1659 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
1660 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
1661 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
1662 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
1663 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
1664 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
1667 static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
1669 const struct ata_timing *t;
1671 for (t = ata_timing; t->mode != speed; t++)
1672 if (t->mode == 0xFF)
1673 return NULL;
1674 return t;
1677 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
1678 struct ata_timing *t, int T, int UT)
1680 const struct ata_timing *s;
1681 struct ata_timing p;
1684 * Find the mode.
1687 if (!(s = ata_timing_find_mode(speed)))
1688 return -EINVAL;
1690 memcpy(t, s, sizeof(*s));
1693 * If the drive is an EIDE drive, it can tell us it needs extended
1694 * PIO/MW_DMA cycle timing.
1697 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
1698 memset(&p, 0, sizeof(p));
1699 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
1700 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
1701 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
1702 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
1703 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
1705 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
1709 * Convert the timing to bus clock counts.
1712 ata_timing_quantize(t, t, T, UT);
1715 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
1716 * S.M.A.R.T * and some other commands. We have to ensure that the
1717 * DMA cycle timing is slower/equal than the fastest PIO timing.
1720 if (speed > XFER_PIO_4) {
1721 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
1722 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
1726 * Lengthen active & recovery time so that cycle time is correct.
1729 if (t->act8b + t->rec8b < t->cyc8b) {
1730 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
1731 t->rec8b = t->cyc8b - t->act8b;
1734 if (t->active + t->recover < t->cycle) {
1735 t->active += (t->cycle - (t->active + t->recover)) / 2;
1736 t->recover = t->cycle - t->active;
1739 return 0;
1742 static int ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev)
1744 unsigned int err_mask;
1745 int rc;
1747 if (dev->xfer_shift == ATA_SHIFT_PIO)
1748 dev->flags |= ATA_DFLAG_PIO;
1750 err_mask = ata_dev_set_xfermode(ap, dev);
1751 if (err_mask) {
1752 printk(KERN_ERR
1753 "ata%u: failed to set xfermode (err_mask=0x%x)\n",
1754 ap->id, err_mask);
1755 return -EIO;
1758 rc = ata_dev_revalidate(ap, dev, 0);
1759 if (rc) {
1760 printk(KERN_ERR
1761 "ata%u: failed to revalidate after set xfermode\n",
1762 ap->id);
1763 return rc;
1766 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
1767 dev->xfer_shift, (int)dev->xfer_mode);
1769 printk(KERN_INFO "ata%u: dev %u configured for %s\n",
1770 ap->id, dev->devno,
1771 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
1772 return 0;
1775 static int ata_host_set_pio(struct ata_port *ap)
1777 int i;
1779 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1780 struct ata_device *dev = &ap->device[i];
1782 if (!ata_dev_present(dev))
1783 continue;
1785 if (!dev->pio_mode) {
1786 printk(KERN_WARNING "ata%u: no PIO support for device %d.\n", ap->id, i);
1787 return -1;
1790 dev->xfer_mode = dev->pio_mode;
1791 dev->xfer_shift = ATA_SHIFT_PIO;
1792 if (ap->ops->set_piomode)
1793 ap->ops->set_piomode(ap, dev);
1796 return 0;
1799 static void ata_host_set_dma(struct ata_port *ap)
1801 int i;
1803 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1804 struct ata_device *dev = &ap->device[i];
1806 if (!ata_dev_present(dev) || !dev->dma_mode)
1807 continue;
1809 dev->xfer_mode = dev->dma_mode;
1810 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
1811 if (ap->ops->set_dmamode)
1812 ap->ops->set_dmamode(ap, dev);
1817 * ata_set_mode - Program timings and issue SET FEATURES - XFER
1818 * @ap: port on which timings will be programmed
1820 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.).
1822 * LOCKING:
1823 * PCI/etc. bus probe sem.
1825 static void ata_set_mode(struct ata_port *ap)
1827 int i, rc;
1829 /* step 1: calculate xfer_mask */
1830 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1831 struct ata_device *dev = &ap->device[i];
1832 unsigned int pio_mask, dma_mask;
1834 if (!ata_dev_present(dev))
1835 continue;
1837 ata_dev_xfermask(ap, dev);
1839 /* TODO: let LLDD filter dev->*_mask here */
1841 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
1842 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
1843 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
1844 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
1847 /* step 2: always set host PIO timings */
1848 rc = ata_host_set_pio(ap);
1849 if (rc)
1850 goto err_out;
1852 /* step 3: set host DMA timings */
1853 ata_host_set_dma(ap);
1855 /* step 4: update devices' xfer mode */
1856 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1857 struct ata_device *dev = &ap->device[i];
1859 if (!ata_dev_present(dev))
1860 continue;
1862 if (ata_dev_set_mode(ap, dev))
1863 goto err_out;
1866 if (ap->ops->post_set_mode)
1867 ap->ops->post_set_mode(ap);
1869 return;
1871 err_out:
1872 ata_port_disable(ap);
1876 * ata_tf_to_host - issue ATA taskfile to host controller
1877 * @ap: port to which command is being issued
1878 * @tf: ATA taskfile register set
1880 * Issues ATA taskfile register set to ATA host controller,
1881 * with proper synchronization with interrupt handler and
1882 * other threads.
1884 * LOCKING:
1885 * spin_lock_irqsave(host_set lock)
1888 static inline void ata_tf_to_host(struct ata_port *ap,
1889 const struct ata_taskfile *tf)
1891 ap->ops->tf_load(ap, tf);
1892 ap->ops->exec_command(ap, tf);
1896 * ata_busy_sleep - sleep until BSY clears, or timeout
1897 * @ap: port containing status register to be polled
1898 * @tmout_pat: impatience timeout
1899 * @tmout: overall timeout
1901 * Sleep until ATA Status register bit BSY clears,
1902 * or a timeout occurs.
1904 * LOCKING: None.
1907 unsigned int ata_busy_sleep (struct ata_port *ap,
1908 unsigned long tmout_pat, unsigned long tmout)
1910 unsigned long timer_start, timeout;
1911 u8 status;
1913 status = ata_busy_wait(ap, ATA_BUSY, 300);
1914 timer_start = jiffies;
1915 timeout = timer_start + tmout_pat;
1916 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
1917 msleep(50);
1918 status = ata_busy_wait(ap, ATA_BUSY, 3);
1921 if (status & ATA_BUSY)
1922 printk(KERN_WARNING "ata%u is slow to respond, "
1923 "please be patient\n", ap->id);
1925 timeout = timer_start + tmout;
1926 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
1927 msleep(50);
1928 status = ata_chk_status(ap);
1931 if (status & ATA_BUSY) {
1932 printk(KERN_ERR "ata%u failed to respond (%lu secs)\n",
1933 ap->id, tmout / HZ);
1934 return 1;
1937 return 0;
1940 static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
1942 struct ata_ioports *ioaddr = &ap->ioaddr;
1943 unsigned int dev0 = devmask & (1 << 0);
1944 unsigned int dev1 = devmask & (1 << 1);
1945 unsigned long timeout;
1947 /* if device 0 was found in ata_devchk, wait for its
1948 * BSY bit to clear
1950 if (dev0)
1951 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1953 /* if device 1 was found in ata_devchk, wait for
1954 * register access, then wait for BSY to clear
1956 timeout = jiffies + ATA_TMOUT_BOOT;
1957 while (dev1) {
1958 u8 nsect, lbal;
1960 ap->ops->dev_select(ap, 1);
1961 if (ap->flags & ATA_FLAG_MMIO) {
1962 nsect = readb((void __iomem *) ioaddr->nsect_addr);
1963 lbal = readb((void __iomem *) ioaddr->lbal_addr);
1964 } else {
1965 nsect = inb(ioaddr->nsect_addr);
1966 lbal = inb(ioaddr->lbal_addr);
1968 if ((nsect == 1) && (lbal == 1))
1969 break;
1970 if (time_after(jiffies, timeout)) {
1971 dev1 = 0;
1972 break;
1974 msleep(50); /* give drive a breather */
1976 if (dev1)
1977 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1979 /* is all this really necessary? */
1980 ap->ops->dev_select(ap, 0);
1981 if (dev1)
1982 ap->ops->dev_select(ap, 1);
1983 if (dev0)
1984 ap->ops->dev_select(ap, 0);
1987 static unsigned int ata_bus_softreset(struct ata_port *ap,
1988 unsigned int devmask)
1990 struct ata_ioports *ioaddr = &ap->ioaddr;
1992 DPRINTK("ata%u: bus reset via SRST\n", ap->id);
1994 /* software reset. causes dev0 to be selected */
1995 if (ap->flags & ATA_FLAG_MMIO) {
1996 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1997 udelay(20); /* FIXME: flush */
1998 writeb(ap->ctl | ATA_SRST, (void __iomem *) ioaddr->ctl_addr);
1999 udelay(20); /* FIXME: flush */
2000 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2001 } else {
2002 outb(ap->ctl, ioaddr->ctl_addr);
2003 udelay(10);
2004 outb(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
2005 udelay(10);
2006 outb(ap->ctl, ioaddr->ctl_addr);
2009 /* spec mandates ">= 2ms" before checking status.
2010 * We wait 150ms, because that was the magic delay used for
2011 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
2012 * between when the ATA command register is written, and then
2013 * status is checked. Because waiting for "a while" before
2014 * checking status is fine, post SRST, we perform this magic
2015 * delay here as well.
2017 * Old drivers/ide uses the 2mS rule and then waits for ready
2019 msleep(150);
2022 /* Before we perform post reset processing we want to see if
2023 the bus shows 0xFF because the odd clown forgets the D7 pulldown
2024 resistor */
2026 if (ata_check_status(ap) == 0xFF)
2027 return 1; /* Positive is failure for some reason */
2029 ata_bus_post_reset(ap, devmask);
2031 return 0;
2035 * ata_bus_reset - reset host port and associated ATA channel
2036 * @ap: port to reset
2038 * This is typically the first time we actually start issuing
2039 * commands to the ATA channel. We wait for BSY to clear, then
2040 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
2041 * result. Determine what devices, if any, are on the channel
2042 * by looking at the device 0/1 error register. Look at the signature
2043 * stored in each device's taskfile registers, to determine if
2044 * the device is ATA or ATAPI.
2046 * LOCKING:
2047 * PCI/etc. bus probe sem.
2048 * Obtains host_set lock.
2050 * SIDE EFFECTS:
2051 * Sets ATA_FLAG_PORT_DISABLED if bus reset fails.
2054 void ata_bus_reset(struct ata_port *ap)
2056 struct ata_ioports *ioaddr = &ap->ioaddr;
2057 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2058 u8 err;
2059 unsigned int dev0, dev1 = 0, devmask = 0;
2061 DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no);
2063 /* determine if device 0/1 are present */
2064 if (ap->flags & ATA_FLAG_SATA_RESET)
2065 dev0 = 1;
2066 else {
2067 dev0 = ata_devchk(ap, 0);
2068 if (slave_possible)
2069 dev1 = ata_devchk(ap, 1);
2072 if (dev0)
2073 devmask |= (1 << 0);
2074 if (dev1)
2075 devmask |= (1 << 1);
2077 /* select device 0 again */
2078 ap->ops->dev_select(ap, 0);
2080 /* issue bus reset */
2081 if (ap->flags & ATA_FLAG_SRST)
2082 if (ata_bus_softreset(ap, devmask))
2083 goto err_out;
2086 * determine by signature whether we have ATA or ATAPI devices
2088 ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
2089 if ((slave_possible) && (err != 0x81))
2090 ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
2092 /* re-enable interrupts */
2093 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
2094 ata_irq_on(ap);
2096 /* is double-select really necessary? */
2097 if (ap->device[1].class != ATA_DEV_NONE)
2098 ap->ops->dev_select(ap, 1);
2099 if (ap->device[0].class != ATA_DEV_NONE)
2100 ap->ops->dev_select(ap, 0);
2102 /* if no devices were detected, disable this port */
2103 if ((ap->device[0].class == ATA_DEV_NONE) &&
2104 (ap->device[1].class == ATA_DEV_NONE))
2105 goto err_out;
2107 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
2108 /* set up device control for ATA_FLAG_SATA_RESET */
2109 if (ap->flags & ATA_FLAG_MMIO)
2110 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2111 else
2112 outb(ap->ctl, ioaddr->ctl_addr);
2115 DPRINTK("EXIT\n");
2116 return;
2118 err_out:
2119 printk(KERN_ERR "ata%u: disabling port\n", ap->id);
2120 ap->ops->port_disable(ap);
2122 DPRINTK("EXIT\n");
2125 static int sata_phy_resume(struct ata_port *ap)
2127 unsigned long timeout = jiffies + (HZ * 5);
2128 u32 sstatus;
2130 scr_write_flush(ap, SCR_CONTROL, 0x300);
2132 /* Wait for phy to become ready, if necessary. */
2133 do {
2134 msleep(200);
2135 sstatus = scr_read(ap, SCR_STATUS);
2136 if ((sstatus & 0xf) != 1)
2137 return 0;
2138 } while (time_before(jiffies, timeout));
2140 return -1;
2144 * ata_std_probeinit - initialize probing
2145 * @ap: port to be probed
2147 * @ap is about to be probed. Initialize it. This function is
2148 * to be used as standard callback for ata_drive_probe_reset().
2150 * NOTE!!! Do not use this function as probeinit if a low level
2151 * driver implements only hardreset. Just pass NULL as probeinit
2152 * in that case. Using this function is probably okay but doing
2153 * so makes reset sequence different from the original
2154 * ->phy_reset implementation and Jeff nervous. :-P
2156 extern void ata_std_probeinit(struct ata_port *ap)
2158 if (ap->flags & ATA_FLAG_SATA && ap->ops->scr_read) {
2159 sata_phy_resume(ap);
2160 if (sata_dev_present(ap))
2161 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2166 * ata_std_softreset - reset host port via ATA SRST
2167 * @ap: port to reset
2168 * @verbose: fail verbosely
2169 * @classes: resulting classes of attached devices
2171 * Reset host port using ATA SRST. This function is to be used
2172 * as standard callback for ata_drive_*_reset() functions.
2174 * LOCKING:
2175 * Kernel thread context (may sleep)
2177 * RETURNS:
2178 * 0 on success, -errno otherwise.
2180 int ata_std_softreset(struct ata_port *ap, int verbose, unsigned int *classes)
2182 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2183 unsigned int devmask = 0, err_mask;
2184 u8 err;
2186 DPRINTK("ENTER\n");
2188 if (ap->ops->scr_read && !sata_dev_present(ap)) {
2189 classes[0] = ATA_DEV_NONE;
2190 goto out;
2193 /* determine if device 0/1 are present */
2194 if (ata_devchk(ap, 0))
2195 devmask |= (1 << 0);
2196 if (slave_possible && ata_devchk(ap, 1))
2197 devmask |= (1 << 1);
2199 /* select device 0 again */
2200 ap->ops->dev_select(ap, 0);
2202 /* issue bus reset */
2203 DPRINTK("about to softreset, devmask=%x\n", devmask);
2204 err_mask = ata_bus_softreset(ap, devmask);
2205 if (err_mask) {
2206 if (verbose)
2207 printk(KERN_ERR "ata%u: SRST failed (err_mask=0x%x)\n",
2208 ap->id, err_mask);
2209 else
2210 DPRINTK("EXIT, softreset failed (err_mask=0x%x)\n",
2211 err_mask);
2212 return -EIO;
2215 /* determine by signature whether we have ATA or ATAPI devices */
2216 classes[0] = ata_dev_try_classify(ap, 0, &err);
2217 if (slave_possible && err != 0x81)
2218 classes[1] = ata_dev_try_classify(ap, 1, &err);
2220 out:
2221 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
2222 return 0;
2226 * sata_std_hardreset - reset host port via SATA phy reset
2227 * @ap: port to reset
2228 * @verbose: fail verbosely
2229 * @class: resulting class of attached device
2231 * SATA phy-reset host port using DET bits of SControl register.
2232 * This function is to be used as standard callback for
2233 * ata_drive_*_reset().
2235 * LOCKING:
2236 * Kernel thread context (may sleep)
2238 * RETURNS:
2239 * 0 on success, -errno otherwise.
2241 int sata_std_hardreset(struct ata_port *ap, int verbose, unsigned int *class)
2243 DPRINTK("ENTER\n");
2245 /* Issue phy wake/reset */
2246 scr_write_flush(ap, SCR_CONTROL, 0x301);
2249 * Couldn't find anything in SATA I/II specs, but AHCI-1.1
2250 * 10.4.2 says at least 1 ms.
2252 msleep(1);
2254 /* Bring phy back */
2255 sata_phy_resume(ap);
2257 /* TODO: phy layer with polling, timeouts, etc. */
2258 if (!sata_dev_present(ap)) {
2259 *class = ATA_DEV_NONE;
2260 DPRINTK("EXIT, link offline\n");
2261 return 0;
2264 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2265 if (verbose)
2266 printk(KERN_ERR "ata%u: COMRESET failed "
2267 "(device not ready)\n", ap->id);
2268 else
2269 DPRINTK("EXIT, device not ready\n");
2270 return -EIO;
2273 ap->ops->dev_select(ap, 0); /* probably unnecessary */
2275 *class = ata_dev_try_classify(ap, 0, NULL);
2277 DPRINTK("EXIT, class=%u\n", *class);
2278 return 0;
2282 * ata_std_postreset - standard postreset callback
2283 * @ap: the target ata_port
2284 * @classes: classes of attached devices
2286 * This function is invoked after a successful reset. Note that
2287 * the device might have been reset more than once using
2288 * different reset methods before postreset is invoked.
2290 * This function is to be used as standard callback for
2291 * ata_drive_*_reset().
2293 * LOCKING:
2294 * Kernel thread context (may sleep)
2296 void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
2298 DPRINTK("ENTER\n");
2300 /* set cable type if it isn't already set */
2301 if (ap->cbl == ATA_CBL_NONE && ap->flags & ATA_FLAG_SATA)
2302 ap->cbl = ATA_CBL_SATA;
2304 /* print link status */
2305 if (ap->cbl == ATA_CBL_SATA)
2306 sata_print_link_status(ap);
2308 /* re-enable interrupts */
2309 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
2310 ata_irq_on(ap);
2312 /* is double-select really necessary? */
2313 if (classes[0] != ATA_DEV_NONE)
2314 ap->ops->dev_select(ap, 1);
2315 if (classes[1] != ATA_DEV_NONE)
2316 ap->ops->dev_select(ap, 0);
2318 /* bail out if no device is present */
2319 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2320 DPRINTK("EXIT, no device\n");
2321 return;
2324 /* set up device control */
2325 if (ap->ioaddr.ctl_addr) {
2326 if (ap->flags & ATA_FLAG_MMIO)
2327 writeb(ap->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
2328 else
2329 outb(ap->ctl, ap->ioaddr.ctl_addr);
2332 DPRINTK("EXIT\n");
2336 * ata_std_probe_reset - standard probe reset method
2337 * @ap: prot to perform probe-reset
2338 * @classes: resulting classes of attached devices
2340 * The stock off-the-shelf ->probe_reset method.
2342 * LOCKING:
2343 * Kernel thread context (may sleep)
2345 * RETURNS:
2346 * 0 on success, -errno otherwise.
2348 int ata_std_probe_reset(struct ata_port *ap, unsigned int *classes)
2350 ata_reset_fn_t hardreset;
2352 hardreset = NULL;
2353 if (ap->flags & ATA_FLAG_SATA && ap->ops->scr_read)
2354 hardreset = sata_std_hardreset;
2356 return ata_drive_probe_reset(ap, ata_std_probeinit,
2357 ata_std_softreset, hardreset,
2358 ata_std_postreset, classes);
2361 static int do_probe_reset(struct ata_port *ap, ata_reset_fn_t reset,
2362 ata_postreset_fn_t postreset,
2363 unsigned int *classes)
2365 int i, rc;
2367 for (i = 0; i < ATA_MAX_DEVICES; i++)
2368 classes[i] = ATA_DEV_UNKNOWN;
2370 rc = reset(ap, 0, classes);
2371 if (rc)
2372 return rc;
2374 /* If any class isn't ATA_DEV_UNKNOWN, consider classification
2375 * is complete and convert all ATA_DEV_UNKNOWN to
2376 * ATA_DEV_NONE.
2378 for (i = 0; i < ATA_MAX_DEVICES; i++)
2379 if (classes[i] != ATA_DEV_UNKNOWN)
2380 break;
2382 if (i < ATA_MAX_DEVICES)
2383 for (i = 0; i < ATA_MAX_DEVICES; i++)
2384 if (classes[i] == ATA_DEV_UNKNOWN)
2385 classes[i] = ATA_DEV_NONE;
2387 if (postreset)
2388 postreset(ap, classes);
2390 return classes[0] != ATA_DEV_UNKNOWN ? 0 : -ENODEV;
2394 * ata_drive_probe_reset - Perform probe reset with given methods
2395 * @ap: port to reset
2396 * @probeinit: probeinit method (can be NULL)
2397 * @softreset: softreset method (can be NULL)
2398 * @hardreset: hardreset method (can be NULL)
2399 * @postreset: postreset method (can be NULL)
2400 * @classes: resulting classes of attached devices
2402 * Reset the specified port and classify attached devices using
2403 * given methods. This function prefers softreset but tries all
2404 * possible reset sequences to reset and classify devices. This
2405 * function is intended to be used for constructing ->probe_reset
2406 * callback by low level drivers.
2408 * Reset methods should follow the following rules.
2410 * - Return 0 on sucess, -errno on failure.
2411 * - If classification is supported, fill classes[] with
2412 * recognized class codes.
2413 * - If classification is not supported, leave classes[] alone.
2414 * - If verbose is non-zero, print error message on failure;
2415 * otherwise, shut up.
2417 * LOCKING:
2418 * Kernel thread context (may sleep)
2420 * RETURNS:
2421 * 0 on success, -EINVAL if no reset method is avaliable, -ENODEV
2422 * if classification fails, and any error code from reset
2423 * methods.
2425 int ata_drive_probe_reset(struct ata_port *ap, ata_probeinit_fn_t probeinit,
2426 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
2427 ata_postreset_fn_t postreset, unsigned int *classes)
2429 int rc = -EINVAL;
2431 if (probeinit)
2432 probeinit(ap);
2434 if (softreset) {
2435 rc = do_probe_reset(ap, softreset, postreset, classes);
2436 if (rc == 0)
2437 return 0;
2440 if (!hardreset)
2441 return rc;
2443 rc = do_probe_reset(ap, hardreset, postreset, classes);
2444 if (rc == 0 || rc != -ENODEV)
2445 return rc;
2447 if (softreset)
2448 rc = do_probe_reset(ap, softreset, postreset, classes);
2450 return rc;
2454 * ata_dev_same_device - Determine whether new ID matches configured device
2455 * @ap: port on which the device to compare against resides
2456 * @dev: device to compare against
2457 * @new_class: class of the new device
2458 * @new_id: IDENTIFY page of the new device
2460 * Compare @new_class and @new_id against @dev and determine
2461 * whether @dev is the device indicated by @new_class and
2462 * @new_id.
2464 * LOCKING:
2465 * None.
2467 * RETURNS:
2468 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
2470 static int ata_dev_same_device(struct ata_port *ap, struct ata_device *dev,
2471 unsigned int new_class, const u16 *new_id)
2473 const u16 *old_id = dev->id;
2474 unsigned char model[2][41], serial[2][21];
2475 u64 new_n_sectors;
2477 if (dev->class != new_class) {
2478 printk(KERN_INFO
2479 "ata%u: dev %u class mismatch %d != %d\n",
2480 ap->id, dev->devno, dev->class, new_class);
2481 return 0;
2484 ata_id_c_string(old_id, model[0], ATA_ID_PROD_OFS, sizeof(model[0]));
2485 ata_id_c_string(new_id, model[1], ATA_ID_PROD_OFS, sizeof(model[1]));
2486 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO_OFS, sizeof(serial[0]));
2487 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO_OFS, sizeof(serial[1]));
2488 new_n_sectors = ata_id_n_sectors(new_id);
2490 if (strcmp(model[0], model[1])) {
2491 printk(KERN_INFO
2492 "ata%u: dev %u model number mismatch '%s' != '%s'\n",
2493 ap->id, dev->devno, model[0], model[1]);
2494 return 0;
2497 if (strcmp(serial[0], serial[1])) {
2498 printk(KERN_INFO
2499 "ata%u: dev %u serial number mismatch '%s' != '%s'\n",
2500 ap->id, dev->devno, serial[0], serial[1]);
2501 return 0;
2504 if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) {
2505 printk(KERN_INFO
2506 "ata%u: dev %u n_sectors mismatch %llu != %llu\n",
2507 ap->id, dev->devno, (unsigned long long)dev->n_sectors,
2508 (unsigned long long)new_n_sectors);
2509 return 0;
2512 return 1;
2516 * ata_dev_revalidate - Revalidate ATA device
2517 * @ap: port on which the device to revalidate resides
2518 * @dev: device to revalidate
2519 * @post_reset: is this revalidation after reset?
2521 * Re-read IDENTIFY page and make sure @dev is still attached to
2522 * the port.
2524 * LOCKING:
2525 * Kernel thread context (may sleep)
2527 * RETURNS:
2528 * 0 on success, negative errno otherwise
2530 int ata_dev_revalidate(struct ata_port *ap, struct ata_device *dev,
2531 int post_reset)
2533 unsigned int class;
2534 u16 *id;
2535 int rc;
2537 if (!ata_dev_present(dev))
2538 return -ENODEV;
2540 class = dev->class;
2541 id = NULL;
2543 /* allocate & read ID data */
2544 rc = ata_dev_read_id(ap, dev, &class, post_reset, &id);
2545 if (rc)
2546 goto fail;
2548 /* is the device still there? */
2549 if (!ata_dev_same_device(ap, dev, class, id)) {
2550 rc = -ENODEV;
2551 goto fail;
2554 kfree(dev->id);
2555 dev->id = id;
2557 /* configure device according to the new ID */
2558 return ata_dev_configure(ap, dev, 0);
2560 fail:
2561 printk(KERN_ERR "ata%u: dev %u revalidation failed (errno=%d)\n",
2562 ap->id, dev->devno, rc);
2563 kfree(id);
2564 return rc;
2567 static const char * const ata_dma_blacklist [] = {
2568 "WDC AC11000H", NULL,
2569 "WDC AC22100H", NULL,
2570 "WDC AC32500H", NULL,
2571 "WDC AC33100H", NULL,
2572 "WDC AC31600H", NULL,
2573 "WDC AC32100H", "24.09P07",
2574 "WDC AC23200L", "21.10N21",
2575 "Compaq CRD-8241B", NULL,
2576 "CRD-8400B", NULL,
2577 "CRD-8480B", NULL,
2578 "CRD-8482B", NULL,
2579 "CRD-84", NULL,
2580 "SanDisk SDP3B", NULL,
2581 "SanDisk SDP3B-64", NULL,
2582 "SANYO CD-ROM CRD", NULL,
2583 "HITACHI CDR-8", NULL,
2584 "HITACHI CDR-8335", NULL,
2585 "HITACHI CDR-8435", NULL,
2586 "Toshiba CD-ROM XM-6202B", NULL,
2587 "TOSHIBA CD-ROM XM-1702BC", NULL,
2588 "CD-532E-A", NULL,
2589 "E-IDE CD-ROM CR-840", NULL,
2590 "CD-ROM Drive/F5A", NULL,
2591 "WPI CDD-820", NULL,
2592 "SAMSUNG CD-ROM SC-148C", NULL,
2593 "SAMSUNG CD-ROM SC", NULL,
2594 "SanDisk SDP3B-64", NULL,
2595 "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,
2596 "_NEC DV5800A", NULL,
2597 "SAMSUNG CD-ROM SN-124", "N001"
2600 static int ata_strim(char *s, size_t len)
2602 len = strnlen(s, len);
2604 /* ATAPI specifies that empty space is blank-filled; remove blanks */
2605 while ((len > 0) && (s[len - 1] == ' ')) {
2606 len--;
2607 s[len] = 0;
2609 return len;
2612 static int ata_dma_blacklisted(const struct ata_device *dev)
2614 unsigned char model_num[40];
2615 unsigned char model_rev[16];
2616 unsigned int nlen, rlen;
2617 int i;
2619 ata_id_string(dev->id, model_num, ATA_ID_PROD_OFS,
2620 sizeof(model_num));
2621 ata_id_string(dev->id, model_rev, ATA_ID_FW_REV_OFS,
2622 sizeof(model_rev));
2623 nlen = ata_strim(model_num, sizeof(model_num));
2624 rlen = ata_strim(model_rev, sizeof(model_rev));
2626 for (i = 0; i < ARRAY_SIZE(ata_dma_blacklist); i += 2) {
2627 if (!strncmp(ata_dma_blacklist[i], model_num, nlen)) {
2628 if (ata_dma_blacklist[i+1] == NULL)
2629 return 1;
2630 if (!strncmp(ata_dma_blacklist[i], model_rev, rlen))
2631 return 1;
2634 return 0;
2638 * ata_dev_xfermask - Compute supported xfermask of the given device
2639 * @ap: Port on which the device to compute xfermask for resides
2640 * @dev: Device to compute xfermask for
2642 * Compute supported xfermask of @dev and store it in
2643 * dev->*_mask. This function is responsible for applying all
2644 * known limits including host controller limits, device
2645 * blacklist, etc...
2647 * LOCKING:
2648 * None.
2650 static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev)
2652 unsigned long xfer_mask;
2653 int i;
2655 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
2656 ap->udma_mask);
2658 /* use port-wide xfermask for now */
2659 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2660 struct ata_device *d = &ap->device[i];
2661 if (!ata_dev_present(d))
2662 continue;
2663 xfer_mask &= ata_pack_xfermask(d->pio_mask, d->mwdma_mask,
2664 d->udma_mask);
2665 xfer_mask &= ata_id_xfermask(d->id);
2666 if (ata_dma_blacklisted(d))
2667 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
2670 if (ata_dma_blacklisted(dev))
2671 printk(KERN_WARNING "ata%u: dev %u is on DMA blacklist, "
2672 "disabling DMA\n", ap->id, dev->devno);
2674 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2675 &dev->udma_mask);
2679 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
2680 * @ap: Port associated with device @dev
2681 * @dev: Device to which command will be sent
2683 * Issue SET FEATURES - XFER MODE command to device @dev
2684 * on port @ap.
2686 * LOCKING:
2687 * PCI/etc. bus probe sem.
2689 * RETURNS:
2690 * 0 on success, AC_ERR_* mask otherwise.
2693 static unsigned int ata_dev_set_xfermode(struct ata_port *ap,
2694 struct ata_device *dev)
2696 struct ata_taskfile tf;
2697 unsigned int err_mask;
2699 /* set up set-features taskfile */
2700 DPRINTK("set features - xfer mode\n");
2702 ata_tf_init(ap, &tf, dev->devno);
2703 tf.command = ATA_CMD_SET_FEATURES;
2704 tf.feature = SETFEATURES_XFER;
2705 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2706 tf.protocol = ATA_PROT_NODATA;
2707 tf.nsect = dev->xfer_mode;
2709 err_mask = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0);
2711 DPRINTK("EXIT, err_mask=%x\n", err_mask);
2712 return err_mask;
2716 * ata_dev_init_params - Issue INIT DEV PARAMS command
2717 * @ap: Port associated with device @dev
2718 * @dev: Device to which command will be sent
2720 * LOCKING:
2721 * Kernel thread context (may sleep)
2723 * RETURNS:
2724 * 0 on success, AC_ERR_* mask otherwise.
2727 static unsigned int ata_dev_init_params(struct ata_port *ap,
2728 struct ata_device *dev)
2730 struct ata_taskfile tf;
2731 unsigned int err_mask;
2732 u16 sectors = dev->id[6];
2733 u16 heads = dev->id[3];
2735 /* Number of sectors per track 1-255. Number of heads 1-16 */
2736 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
2737 return 0;
2739 /* set up init dev params taskfile */
2740 DPRINTK("init dev params \n");
2742 ata_tf_init(ap, &tf, dev->devno);
2743 tf.command = ATA_CMD_INIT_DEV_PARAMS;
2744 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2745 tf.protocol = ATA_PROT_NODATA;
2746 tf.nsect = sectors;
2747 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
2749 err_mask = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0);
2751 DPRINTK("EXIT, err_mask=%x\n", err_mask);
2752 return err_mask;
2756 * ata_sg_clean - Unmap DMA memory associated with command
2757 * @qc: Command containing DMA memory to be released
2759 * Unmap all mapped DMA memory associated with this command.
2761 * LOCKING:
2762 * spin_lock_irqsave(host_set lock)
2765 static void ata_sg_clean(struct ata_queued_cmd *qc)
2767 struct ata_port *ap = qc->ap;
2768 struct scatterlist *sg = qc->__sg;
2769 int dir = qc->dma_dir;
2770 void *pad_buf = NULL;
2772 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
2773 WARN_ON(sg == NULL);
2775 if (qc->flags & ATA_QCFLAG_SINGLE)
2776 WARN_ON(qc->n_elem > 1);
2778 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
2780 /* if we padded the buffer out to 32-bit bound, and data
2781 * xfer direction is from-device, we must copy from the
2782 * pad buffer back into the supplied buffer
2784 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
2785 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
2787 if (qc->flags & ATA_QCFLAG_SG) {
2788 if (qc->n_elem)
2789 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
2790 /* restore last sg */
2791 sg[qc->orig_n_elem - 1].length += qc->pad_len;
2792 if (pad_buf) {
2793 struct scatterlist *psg = &qc->pad_sgent;
2794 void *addr = kmap_atomic(psg->page, KM_IRQ0);
2795 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
2796 kunmap_atomic(addr, KM_IRQ0);
2798 } else {
2799 if (qc->n_elem)
2800 dma_unmap_single(ap->dev,
2801 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
2802 dir);
2803 /* restore sg */
2804 sg->length += qc->pad_len;
2805 if (pad_buf)
2806 memcpy(qc->buf_virt + sg->length - qc->pad_len,
2807 pad_buf, qc->pad_len);
2810 qc->flags &= ~ATA_QCFLAG_DMAMAP;
2811 qc->__sg = NULL;
2815 * ata_fill_sg - Fill PCI IDE PRD table
2816 * @qc: Metadata associated with taskfile to be transferred
2818 * Fill PCI IDE PRD (scatter-gather) table with segments
2819 * associated with the current disk command.
2821 * LOCKING:
2822 * spin_lock_irqsave(host_set lock)
2825 static void ata_fill_sg(struct ata_queued_cmd *qc)
2827 struct ata_port *ap = qc->ap;
2828 struct scatterlist *sg;
2829 unsigned int idx;
2831 WARN_ON(qc->__sg == NULL);
2832 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
2834 idx = 0;
2835 ata_for_each_sg(sg, qc) {
2836 u32 addr, offset;
2837 u32 sg_len, len;
2839 /* determine if physical DMA addr spans 64K boundary.
2840 * Note h/w doesn't support 64-bit, so we unconditionally
2841 * truncate dma_addr_t to u32.
2843 addr = (u32) sg_dma_address(sg);
2844 sg_len = sg_dma_len(sg);
2846 while (sg_len) {
2847 offset = addr & 0xffff;
2848 len = sg_len;
2849 if ((offset + sg_len) > 0x10000)
2850 len = 0x10000 - offset;
2852 ap->prd[idx].addr = cpu_to_le32(addr);
2853 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
2854 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
2856 idx++;
2857 sg_len -= len;
2858 addr += len;
2862 if (idx)
2863 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2866 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
2867 * @qc: Metadata associated with taskfile to check
2869 * Allow low-level driver to filter ATA PACKET commands, returning
2870 * a status indicating whether or not it is OK to use DMA for the
2871 * supplied PACKET command.
2873 * LOCKING:
2874 * spin_lock_irqsave(host_set lock)
2876 * RETURNS: 0 when ATAPI DMA can be used
2877 * nonzero otherwise
2879 int ata_check_atapi_dma(struct ata_queued_cmd *qc)
2881 struct ata_port *ap = qc->ap;
2882 int rc = 0; /* Assume ATAPI DMA is OK by default */
2884 if (ap->ops->check_atapi_dma)
2885 rc = ap->ops->check_atapi_dma(qc);
2887 return rc;
2890 * ata_qc_prep - Prepare taskfile for submission
2891 * @qc: Metadata associated with taskfile to be prepared
2893 * Prepare ATA taskfile for submission.
2895 * LOCKING:
2896 * spin_lock_irqsave(host_set lock)
2898 void ata_qc_prep(struct ata_queued_cmd *qc)
2900 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2901 return;
2903 ata_fill_sg(qc);
2906 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
2909 * ata_sg_init_one - Associate command with memory buffer
2910 * @qc: Command to be associated
2911 * @buf: Memory buffer
2912 * @buflen: Length of memory buffer, in bytes.
2914 * Initialize the data-related elements of queued_cmd @qc
2915 * to point to a single memory buffer, @buf of byte length @buflen.
2917 * LOCKING:
2918 * spin_lock_irqsave(host_set lock)
2921 void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
2923 struct scatterlist *sg;
2925 qc->flags |= ATA_QCFLAG_SINGLE;
2927 memset(&qc->sgent, 0, sizeof(qc->sgent));
2928 qc->__sg = &qc->sgent;
2929 qc->n_elem = 1;
2930 qc->orig_n_elem = 1;
2931 qc->buf_virt = buf;
2933 sg = qc->__sg;
2934 sg_init_one(sg, buf, buflen);
2938 * ata_sg_init - Associate command with scatter-gather table.
2939 * @qc: Command to be associated
2940 * @sg: Scatter-gather table.
2941 * @n_elem: Number of elements in s/g table.
2943 * Initialize the data-related elements of queued_cmd @qc
2944 * to point to a scatter-gather table @sg, containing @n_elem
2945 * elements.
2947 * LOCKING:
2948 * spin_lock_irqsave(host_set lock)
2951 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
2952 unsigned int n_elem)
2954 qc->flags |= ATA_QCFLAG_SG;
2955 qc->__sg = sg;
2956 qc->n_elem = n_elem;
2957 qc->orig_n_elem = n_elem;
2961 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
2962 * @qc: Command with memory buffer to be mapped.
2964 * DMA-map the memory buffer associated with queued_cmd @qc.
2966 * LOCKING:
2967 * spin_lock_irqsave(host_set lock)
2969 * RETURNS:
2970 * Zero on success, negative on error.
2973 static int ata_sg_setup_one(struct ata_queued_cmd *qc)
2975 struct ata_port *ap = qc->ap;
2976 int dir = qc->dma_dir;
2977 struct scatterlist *sg = qc->__sg;
2978 dma_addr_t dma_address;
2979 int trim_sg = 0;
2981 /* we must lengthen transfers to end on a 32-bit boundary */
2982 qc->pad_len = sg->length & 3;
2983 if (qc->pad_len) {
2984 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
2985 struct scatterlist *psg = &qc->pad_sgent;
2987 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
2989 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
2991 if (qc->tf.flags & ATA_TFLAG_WRITE)
2992 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
2993 qc->pad_len);
2995 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
2996 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
2997 /* trim sg */
2998 sg->length -= qc->pad_len;
2999 if (sg->length == 0)
3000 trim_sg = 1;
3002 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
3003 sg->length, qc->pad_len);
3006 if (trim_sg) {
3007 qc->n_elem--;
3008 goto skip_map;
3011 dma_address = dma_map_single(ap->dev, qc->buf_virt,
3012 sg->length, dir);
3013 if (dma_mapping_error(dma_address)) {
3014 /* restore sg */
3015 sg->length += qc->pad_len;
3016 return -1;
3019 sg_dma_address(sg) = dma_address;
3020 sg_dma_len(sg) = sg->length;
3022 skip_map:
3023 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
3024 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3026 return 0;
3030 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
3031 * @qc: Command with scatter-gather table to be mapped.
3033 * DMA-map the scatter-gather table associated with queued_cmd @qc.
3035 * LOCKING:
3036 * spin_lock_irqsave(host_set lock)
3038 * RETURNS:
3039 * Zero on success, negative on error.
3043 static int ata_sg_setup(struct ata_queued_cmd *qc)
3045 struct ata_port *ap = qc->ap;
3046 struct scatterlist *sg = qc->__sg;
3047 struct scatterlist *lsg = &sg[qc->n_elem - 1];
3048 int n_elem, pre_n_elem, dir, trim_sg = 0;
3050 VPRINTK("ENTER, ata%u\n", ap->id);
3051 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
3053 /* we must lengthen transfers to end on a 32-bit boundary */
3054 qc->pad_len = lsg->length & 3;
3055 if (qc->pad_len) {
3056 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3057 struct scatterlist *psg = &qc->pad_sgent;
3058 unsigned int offset;
3060 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
3062 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3065 * psg->page/offset are used to copy to-be-written
3066 * data in this function or read data in ata_sg_clean.
3068 offset = lsg->offset + lsg->length - qc->pad_len;
3069 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
3070 psg->offset = offset_in_page(offset);
3072 if (qc->tf.flags & ATA_TFLAG_WRITE) {
3073 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3074 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
3075 kunmap_atomic(addr, KM_IRQ0);
3078 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3079 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3080 /* trim last sg */
3081 lsg->length -= qc->pad_len;
3082 if (lsg->length == 0)
3083 trim_sg = 1;
3085 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
3086 qc->n_elem - 1, lsg->length, qc->pad_len);
3089 pre_n_elem = qc->n_elem;
3090 if (trim_sg && pre_n_elem)
3091 pre_n_elem--;
3093 if (!pre_n_elem) {
3094 n_elem = 0;
3095 goto skip_map;
3098 dir = qc->dma_dir;
3099 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
3100 if (n_elem < 1) {
3101 /* restore last sg */
3102 lsg->length += qc->pad_len;
3103 return -1;
3106 DPRINTK("%d sg elements mapped\n", n_elem);
3108 skip_map:
3109 qc->n_elem = n_elem;
3111 return 0;
3115 * ata_poll_qc_complete - turn irq back on and finish qc
3116 * @qc: Command to complete
3117 * @err_mask: ATA status register content
3119 * LOCKING:
3120 * None. (grabs host lock)
3123 void ata_poll_qc_complete(struct ata_queued_cmd *qc)
3125 struct ata_port *ap = qc->ap;
3126 unsigned long flags;
3128 spin_lock_irqsave(&ap->host_set->lock, flags);
3129 ata_irq_on(ap);
3130 ata_qc_complete(qc);
3131 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3135 * ata_pio_poll - poll using PIO, depending on current state
3136 * @ap: the target ata_port
3138 * LOCKING:
3139 * None. (executing in kernel thread context)
3141 * RETURNS:
3142 * timeout value to use
3145 static unsigned long ata_pio_poll(struct ata_port *ap)
3147 struct ata_queued_cmd *qc;
3148 u8 status;
3149 unsigned int poll_state = HSM_ST_UNKNOWN;
3150 unsigned int reg_state = HSM_ST_UNKNOWN;
3152 qc = ata_qc_from_tag(ap, ap->active_tag);
3153 WARN_ON(qc == NULL);
3155 switch (ap->hsm_task_state) {
3156 case HSM_ST:
3157 case HSM_ST_POLL:
3158 poll_state = HSM_ST_POLL;
3159 reg_state = HSM_ST;
3160 break;
3161 case HSM_ST_LAST:
3162 case HSM_ST_LAST_POLL:
3163 poll_state = HSM_ST_LAST_POLL;
3164 reg_state = HSM_ST_LAST;
3165 break;
3166 default:
3167 BUG();
3168 break;
3171 status = ata_chk_status(ap);
3172 if (status & ATA_BUSY) {
3173 if (time_after(jiffies, ap->pio_task_timeout)) {
3174 qc->err_mask |= AC_ERR_TIMEOUT;
3175 ap->hsm_task_state = HSM_ST_TMOUT;
3176 return 0;
3178 ap->hsm_task_state = poll_state;
3179 return ATA_SHORT_PAUSE;
3182 ap->hsm_task_state = reg_state;
3183 return 0;
3187 * ata_pio_complete - check if drive is busy or idle
3188 * @ap: the target ata_port
3190 * LOCKING:
3191 * None. (executing in kernel thread context)
3193 * RETURNS:
3194 * Zero if qc completed.
3195 * Non-zero if has next.
3198 static int ata_pio_complete (struct ata_port *ap)
3200 struct ata_queued_cmd *qc;
3201 u8 drv_stat;
3204 * This is purely heuristic. This is a fast path. Sometimes when
3205 * we enter, BSY will be cleared in a chk-status or two. If not,
3206 * the drive is probably seeking or something. Snooze for a couple
3207 * msecs, then chk-status again. If still busy, fall back to
3208 * HSM_ST_LAST_POLL state.
3210 drv_stat = ata_busy_wait(ap, ATA_BUSY, 10);
3211 if (drv_stat & ATA_BUSY) {
3212 msleep(2);
3213 drv_stat = ata_busy_wait(ap, ATA_BUSY, 10);
3214 if (drv_stat & ATA_BUSY) {
3215 ap->hsm_task_state = HSM_ST_LAST_POLL;
3216 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
3217 return 1;
3221 qc = ata_qc_from_tag(ap, ap->active_tag);
3222 WARN_ON(qc == NULL);
3224 drv_stat = ata_wait_idle(ap);
3225 if (!ata_ok(drv_stat)) {
3226 qc->err_mask |= __ac_err_mask(drv_stat);
3227 ap->hsm_task_state = HSM_ST_ERR;
3228 return 1;
3231 ap->hsm_task_state = HSM_ST_IDLE;
3233 WARN_ON(qc->err_mask);
3234 ata_poll_qc_complete(qc);
3236 /* another command may start at this point */
3238 return 0;
3243 * swap_buf_le16 - swap halves of 16-bit words in place
3244 * @buf: Buffer to swap
3245 * @buf_words: Number of 16-bit words in buffer.
3247 * Swap halves of 16-bit words if needed to convert from
3248 * little-endian byte order to native cpu byte order, or
3249 * vice-versa.
3251 * LOCKING:
3252 * Inherited from caller.
3254 void swap_buf_le16(u16 *buf, unsigned int buf_words)
3256 #ifdef __BIG_ENDIAN
3257 unsigned int i;
3259 for (i = 0; i < buf_words; i++)
3260 buf[i] = le16_to_cpu(buf[i]);
3261 #endif /* __BIG_ENDIAN */
3265 * ata_mmio_data_xfer - Transfer data by MMIO
3266 * @ap: port to read/write
3267 * @buf: data buffer
3268 * @buflen: buffer length
3269 * @write_data: read/write
3271 * Transfer data from/to the device data register by MMIO.
3273 * LOCKING:
3274 * Inherited from caller.
3277 static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf,
3278 unsigned int buflen, int write_data)
3280 unsigned int i;
3281 unsigned int words = buflen >> 1;
3282 u16 *buf16 = (u16 *) buf;
3283 void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr;
3285 /* Transfer multiple of 2 bytes */
3286 if (write_data) {
3287 for (i = 0; i < words; i++)
3288 writew(le16_to_cpu(buf16[i]), mmio);
3289 } else {
3290 for (i = 0; i < words; i++)
3291 buf16[i] = cpu_to_le16(readw(mmio));
3294 /* Transfer trailing 1 byte, if any. */
3295 if (unlikely(buflen & 0x01)) {
3296 u16 align_buf[1] = { 0 };
3297 unsigned char *trailing_buf = buf + buflen - 1;
3299 if (write_data) {
3300 memcpy(align_buf, trailing_buf, 1);
3301 writew(le16_to_cpu(align_buf[0]), mmio);
3302 } else {
3303 align_buf[0] = cpu_to_le16(readw(mmio));
3304 memcpy(trailing_buf, align_buf, 1);
3310 * ata_pio_data_xfer - Transfer data by PIO
3311 * @ap: port to read/write
3312 * @buf: data buffer
3313 * @buflen: buffer length
3314 * @write_data: read/write
3316 * Transfer data from/to the device data register by PIO.
3318 * LOCKING:
3319 * Inherited from caller.
3322 static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf,
3323 unsigned int buflen, int write_data)
3325 unsigned int words = buflen >> 1;
3327 /* Transfer multiple of 2 bytes */
3328 if (write_data)
3329 outsw(ap->ioaddr.data_addr, buf, words);
3330 else
3331 insw(ap->ioaddr.data_addr, buf, words);
3333 /* Transfer trailing 1 byte, if any. */
3334 if (unlikely(buflen & 0x01)) {
3335 u16 align_buf[1] = { 0 };
3336 unsigned char *trailing_buf = buf + buflen - 1;
3338 if (write_data) {
3339 memcpy(align_buf, trailing_buf, 1);
3340 outw(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
3341 } else {
3342 align_buf[0] = cpu_to_le16(inw(ap->ioaddr.data_addr));
3343 memcpy(trailing_buf, align_buf, 1);
3349 * ata_data_xfer - Transfer data from/to the data register.
3350 * @ap: port to read/write
3351 * @buf: data buffer
3352 * @buflen: buffer length
3353 * @do_write: read/write
3355 * Transfer data from/to the device data register.
3357 * LOCKING:
3358 * Inherited from caller.
3361 static void ata_data_xfer(struct ata_port *ap, unsigned char *buf,
3362 unsigned int buflen, int do_write)
3364 /* Make the crap hardware pay the costs not the good stuff */
3365 if (unlikely(ap->flags & ATA_FLAG_IRQ_MASK)) {
3366 unsigned long flags;
3367 local_irq_save(flags);
3368 if (ap->flags & ATA_FLAG_MMIO)
3369 ata_mmio_data_xfer(ap, buf, buflen, do_write);
3370 else
3371 ata_pio_data_xfer(ap, buf, buflen, do_write);
3372 local_irq_restore(flags);
3373 } else {
3374 if (ap->flags & ATA_FLAG_MMIO)
3375 ata_mmio_data_xfer(ap, buf, buflen, do_write);
3376 else
3377 ata_pio_data_xfer(ap, buf, buflen, do_write);
3382 * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data.
3383 * @qc: Command on going
3385 * Transfer ATA_SECT_SIZE of data from/to the ATA device.
3387 * LOCKING:
3388 * Inherited from caller.
3391 static void ata_pio_sector(struct ata_queued_cmd *qc)
3393 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3394 struct scatterlist *sg = qc->__sg;
3395 struct ata_port *ap = qc->ap;
3396 struct page *page;
3397 unsigned int offset;
3398 unsigned char *buf;
3400 if (qc->cursect == (qc->nsect - 1))
3401 ap->hsm_task_state = HSM_ST_LAST;
3403 page = sg[qc->cursg].page;
3404 offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE;
3406 /* get the current page and offset */
3407 page = nth_page(page, (offset >> PAGE_SHIFT));
3408 offset %= PAGE_SIZE;
3410 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3412 if (PageHighMem(page)) {
3413 unsigned long flags;
3415 local_irq_save(flags);
3416 buf = kmap_atomic(page, KM_IRQ0);
3418 /* do the actual data transfer */
3419 ata_data_xfer(ap, buf + offset, ATA_SECT_SIZE, do_write);
3421 kunmap_atomic(buf, KM_IRQ0);
3422 local_irq_restore(flags);
3423 } else {
3424 buf = page_address(page);
3425 ata_data_xfer(ap, buf + offset, ATA_SECT_SIZE, do_write);
3428 qc->cursect++;
3429 qc->cursg_ofs++;
3431 if ((qc->cursg_ofs * ATA_SECT_SIZE) == (&sg[qc->cursg])->length) {
3432 qc->cursg++;
3433 qc->cursg_ofs = 0;
3438 * ata_pio_sectors - Transfer one or many 512-byte sectors.
3439 * @qc: Command on going
3441 * Transfer one or many ATA_SECT_SIZE of data from/to the
3442 * ATA device for the DRQ request.
3444 * LOCKING:
3445 * Inherited from caller.
3448 static void ata_pio_sectors(struct ata_queued_cmd *qc)
3450 if (is_multi_taskfile(&qc->tf)) {
3451 /* READ/WRITE MULTIPLE */
3452 unsigned int nsect;
3454 WARN_ON(qc->dev->multi_count == 0);
3456 nsect = min(qc->nsect - qc->cursect, qc->dev->multi_count);
3457 while (nsect--)
3458 ata_pio_sector(qc);
3459 } else
3460 ata_pio_sector(qc);
3464 * atapi_send_cdb - Write CDB bytes to hardware
3465 * @ap: Port to which ATAPI device is attached.
3466 * @qc: Taskfile currently active
3468 * When device has indicated its readiness to accept
3469 * a CDB, this function is called. Send the CDB.
3471 * LOCKING:
3472 * caller.
3475 static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
3477 /* send SCSI cdb */
3478 DPRINTK("send cdb\n");
3479 WARN_ON(qc->dev->cdb_len < 12);
3481 ata_data_xfer(ap, qc->cdb, qc->dev->cdb_len, 1);
3482 ata_altstatus(ap); /* flush */
3484 switch (qc->tf.protocol) {
3485 case ATA_PROT_ATAPI:
3486 ap->hsm_task_state = HSM_ST;
3487 break;
3488 case ATA_PROT_ATAPI_NODATA:
3489 ap->hsm_task_state = HSM_ST_LAST;
3490 break;
3491 case ATA_PROT_ATAPI_DMA:
3492 ap->hsm_task_state = HSM_ST_LAST;
3493 /* initiate bmdma */
3494 ap->ops->bmdma_start(qc);
3495 break;
3500 * ata_pio_first_block - Write first data block to hardware
3501 * @ap: Port to which ATA/ATAPI device is attached.
3503 * When device has indicated its readiness to accept
3504 * the data, this function sends out the CDB or
3505 * the first data block by PIO.
3506 * After this,
3507 * - If polling, ata_pio_task() handles the rest.
3508 * - Otherwise, interrupt handler takes over.
3510 * LOCKING:
3511 * Kernel thread context (may sleep)
3513 * RETURNS:
3514 * Zero if irq handler takes over
3515 * Non-zero if has next (polling).
3518 static int ata_pio_first_block(struct ata_port *ap)
3520 struct ata_queued_cmd *qc;
3521 u8 status;
3522 unsigned long flags;
3523 int has_next;
3525 qc = ata_qc_from_tag(ap, ap->active_tag);
3526 WARN_ON(qc == NULL);
3527 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
3529 /* if polling, we will stay in the work queue after sending the data.
3530 * otherwise, interrupt handler takes over after sending the data.
3532 has_next = (qc->tf.flags & ATA_TFLAG_POLLING);
3534 /* sleep-wait for BSY to clear */
3535 DPRINTK("busy wait\n");
3536 if (ata_busy_sleep(ap, ATA_TMOUT_DATAOUT_QUICK, ATA_TMOUT_DATAOUT)) {
3537 qc->err_mask |= AC_ERR_TIMEOUT;
3538 ap->hsm_task_state = HSM_ST_TMOUT;
3539 goto err_out;
3542 /* make sure DRQ is set */
3543 status = ata_chk_status(ap);
3544 if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ) {
3545 /* device status error */
3546 qc->err_mask |= AC_ERR_HSM;
3547 ap->hsm_task_state = HSM_ST_ERR;
3548 goto err_out;
3551 /* Send the CDB (atapi) or the first data block (ata pio out).
3552 * During the state transition, interrupt handler shouldn't
3553 * be invoked before the data transfer is complete and
3554 * hsm_task_state is changed. Hence, the following locking.
3556 spin_lock_irqsave(&ap->host_set->lock, flags);
3558 if (qc->tf.protocol == ATA_PROT_PIO) {
3559 /* PIO data out protocol.
3560 * send first data block.
3563 /* ata_pio_sectors() might change the state to HSM_ST_LAST.
3564 * so, the state is changed here before ata_pio_sectors().
3566 ap->hsm_task_state = HSM_ST;
3567 ata_pio_sectors(qc);
3568 ata_altstatus(ap); /* flush */
3569 } else
3570 /* send CDB */
3571 atapi_send_cdb(ap, qc);
3573 spin_unlock_irqrestore(&ap->host_set->lock, flags);
3575 /* if polling, ata_pio_task() handles the rest.
3576 * otherwise, interrupt handler takes over from here.
3578 return has_next;
3580 err_out:
3581 return 1; /* has next */
3585 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
3586 * @qc: Command on going
3587 * @bytes: number of bytes
3589 * Transfer Transfer data from/to the ATAPI device.
3591 * LOCKING:
3592 * Inherited from caller.
3596 static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
3598 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
3599 struct scatterlist *sg = qc->__sg;
3600 struct ata_port *ap = qc->ap;
3601 struct page *page;
3602 unsigned char *buf;
3603 unsigned int offset, count;
3605 if (qc->curbytes + bytes >= qc->nbytes)
3606 ap->hsm_task_state = HSM_ST_LAST;
3608 next_sg:
3609 if (unlikely(qc->cursg >= qc->n_elem)) {
3611 * The end of qc->sg is reached and the device expects
3612 * more data to transfer. In order not to overrun qc->sg
3613 * and fulfill length specified in the byte count register,
3614 * - for read case, discard trailing data from the device
3615 * - for write case, padding zero data to the device
3617 u16 pad_buf[1] = { 0 };
3618 unsigned int words = bytes >> 1;
3619 unsigned int i;
3621 if (words) /* warning if bytes > 1 */
3622 printk(KERN_WARNING "ata%u: %u bytes trailing data\n",
3623 ap->id, bytes);
3625 for (i = 0; i < words; i++)
3626 ata_data_xfer(ap, (unsigned char*)pad_buf, 2, do_write);
3628 ap->hsm_task_state = HSM_ST_LAST;
3629 return;
3632 sg = &qc->__sg[qc->cursg];
3634 page = sg->page;
3635 offset = sg->offset + qc->cursg_ofs;
3637 /* get the current page and offset */
3638 page = nth_page(page, (offset >> PAGE_SHIFT));
3639 offset %= PAGE_SIZE;
3641 /* don't overrun current sg */
3642 count = min(sg->length - qc->cursg_ofs, bytes);
3644 /* don't cross page boundaries */
3645 count = min(count, (unsigned int)PAGE_SIZE - offset);
3647 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3649 if (PageHighMem(page)) {
3650 unsigned long flags;
3652 local_irq_save(flags);
3653 buf = kmap_atomic(page, KM_IRQ0);
3655 /* do the actual data transfer */
3656 ata_data_xfer(ap, buf + offset, count, do_write);
3658 kunmap_atomic(buf, KM_IRQ0);
3659 local_irq_restore(flags);
3660 } else {
3661 buf = page_address(page);
3662 ata_data_xfer(ap, buf + offset, count, do_write);
3665 bytes -= count;
3666 qc->curbytes += count;
3667 qc->cursg_ofs += count;
3669 if (qc->cursg_ofs == sg->length) {
3670 qc->cursg++;
3671 qc->cursg_ofs = 0;
3674 if (bytes)
3675 goto next_sg;
3679 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
3680 * @qc: Command on going
3682 * Transfer Transfer data from/to the ATAPI device.
3684 * LOCKING:
3685 * Inherited from caller.
3688 static void atapi_pio_bytes(struct ata_queued_cmd *qc)
3690 struct ata_port *ap = qc->ap;
3691 struct ata_device *dev = qc->dev;
3692 unsigned int ireason, bc_lo, bc_hi, bytes;
3693 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
3695 ap->ops->tf_read(ap, &qc->tf);
3696 ireason = qc->tf.nsect;
3697 bc_lo = qc->tf.lbam;
3698 bc_hi = qc->tf.lbah;
3699 bytes = (bc_hi << 8) | bc_lo;
3701 /* shall be cleared to zero, indicating xfer of data */
3702 if (ireason & (1 << 0))
3703 goto err_out;
3705 /* make sure transfer direction matches expected */
3706 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
3707 if (do_write != i_write)
3708 goto err_out;
3710 VPRINTK("ata%u: xfering %d bytes\n", ap->id, bytes);
3712 __atapi_pio_bytes(qc, bytes);
3714 return;
3716 err_out:
3717 printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n",
3718 ap->id, dev->devno);
3719 qc->err_mask |= AC_ERR_HSM;
3720 ap->hsm_task_state = HSM_ST_ERR;
3724 * ata_pio_block - start PIO on a block
3725 * @ap: the target ata_port
3727 * LOCKING:
3728 * None. (executing in kernel thread context)
3731 static void ata_pio_block(struct ata_port *ap)
3733 struct ata_queued_cmd *qc;
3734 u8 status;
3737 * This is purely heuristic. This is a fast path.
3738 * Sometimes when we enter, BSY will be cleared in
3739 * a chk-status or two. If not, the drive is probably seeking
3740 * or something. Snooze for a couple msecs, then
3741 * chk-status again. If still busy, fall back to
3742 * HSM_ST_POLL state.
3744 status = ata_busy_wait(ap, ATA_BUSY, 5);
3745 if (status & ATA_BUSY) {
3746 msleep(2);
3747 status = ata_busy_wait(ap, ATA_BUSY, 10);
3748 if (status & ATA_BUSY) {
3749 ap->hsm_task_state = HSM_ST_POLL;
3750 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
3751 return;
3755 qc = ata_qc_from_tag(ap, ap->active_tag);
3756 WARN_ON(qc == NULL);
3758 /* check error */
3759 if (status & (ATA_ERR | ATA_DF)) {
3760 qc->err_mask |= AC_ERR_DEV;
3761 ap->hsm_task_state = HSM_ST_ERR;
3762 return;
3765 /* transfer data if any */
3766 if (is_atapi_taskfile(&qc->tf)) {
3767 /* DRQ=0 means no more data to transfer */
3768 if ((status & ATA_DRQ) == 0) {
3769 ap->hsm_task_state = HSM_ST_LAST;
3770 return;
3773 atapi_pio_bytes(qc);
3774 } else {
3775 /* handle BSY=0, DRQ=0 as error */
3776 if ((status & ATA_DRQ) == 0) {
3777 qc->err_mask |= AC_ERR_HSM;
3778 ap->hsm_task_state = HSM_ST_ERR;
3779 return;
3782 ata_pio_sectors(qc);
3785 ata_altstatus(ap); /* flush */
3788 static void ata_pio_error(struct ata_port *ap)
3790 struct ata_queued_cmd *qc;
3792 qc = ata_qc_from_tag(ap, ap->active_tag);
3793 WARN_ON(qc == NULL);
3795 if (qc->tf.command != ATA_CMD_PACKET)
3796 printk(KERN_WARNING "ata%u: PIO error\n", ap->id);
3798 /* make sure qc->err_mask is available to
3799 * know what's wrong and recover
3801 WARN_ON(qc->err_mask == 0);
3803 ap->hsm_task_state = HSM_ST_IDLE;
3805 ata_poll_qc_complete(qc);
3808 static void ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
3809 u8 status)
3811 /* check error */
3812 if (unlikely(status & (ATA_ERR | ATA_DF))) {
3813 qc->err_mask |= AC_ERR_DEV;
3814 ap->hsm_task_state = HSM_ST_ERR;
3817 fsm_start:
3818 switch (ap->hsm_task_state) {
3819 case HSM_ST_FIRST:
3820 /* Some pre-ATAPI-4 devices assert INTRQ
3821 * at this state when ready to receive CDB.
3824 /* check device status */
3825 if (unlikely((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ)) {
3826 /* Wrong status. Let EH handle this */
3827 qc->err_mask |= AC_ERR_HSM;
3828 ap->hsm_task_state = HSM_ST_ERR;
3829 goto fsm_start;
3832 atapi_send_cdb(ap, qc);
3834 break;
3836 case HSM_ST:
3837 /* complete command or read/write the data register */
3838 if (qc->tf.protocol == ATA_PROT_ATAPI) {
3839 /* ATAPI PIO protocol */
3840 if ((status & ATA_DRQ) == 0) {
3841 /* no more data to transfer */
3842 ap->hsm_task_state = HSM_ST_LAST;
3843 goto fsm_start;
3846 atapi_pio_bytes(qc);
3848 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
3849 /* bad ireason reported by device */
3850 goto fsm_start;
3852 } else {
3853 /* ATA PIO protocol */
3854 if (unlikely((status & ATA_DRQ) == 0)) {
3855 /* handle BSY=0, DRQ=0 as error */
3856 qc->err_mask |= AC_ERR_HSM;
3857 ap->hsm_task_state = HSM_ST_ERR;
3858 goto fsm_start;
3861 ata_pio_sectors(qc);
3863 if (ap->hsm_task_state == HSM_ST_LAST &&
3864 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
3865 /* all data read */
3866 ata_altstatus(ap);
3867 status = ata_chk_status(ap);
3868 goto fsm_start;
3872 ata_altstatus(ap); /* flush */
3873 break;
3875 case HSM_ST_LAST:
3876 if (unlikely(status & ATA_DRQ)) {
3877 /* handle DRQ=1 as error */
3878 qc->err_mask |= AC_ERR_HSM;
3879 ap->hsm_task_state = HSM_ST_ERR;
3880 goto fsm_start;
3883 /* no more data to transfer */
3884 DPRINTK("ata%u: command complete, drv_stat 0x%x\n",
3885 ap->id, status);
3887 ap->hsm_task_state = HSM_ST_IDLE;
3889 /* complete taskfile transaction */
3890 qc->err_mask |= ac_err_mask(status);
3891 ata_qc_complete(qc);
3892 break;
3894 case HSM_ST_ERR:
3895 if (qc->tf.command != ATA_CMD_PACKET)
3896 printk(KERN_ERR "ata%u: command error, drv_stat 0x%x host_stat 0x%x\n",
3897 ap->id, status, host_stat);
3899 /* make sure qc->err_mask is available to
3900 * know what's wrong and recover
3902 WARN_ON(qc->err_mask == 0);
3904 ap->hsm_task_state = HSM_ST_IDLE;
3905 ata_qc_complete(qc);
3906 break;
3907 default:
3908 goto idle_irq;
3913 static void ata_pio_task(void *_data)
3915 struct ata_port *ap = _data;
3916 unsigned long timeout;
3917 int has_next;
3919 fsm_start:
3920 timeout = 0;
3921 has_next = 1;
3923 switch (ap->hsm_task_state) {
3924 case HSM_ST_FIRST:
3925 has_next = ata_pio_first_block(ap);
3926 break;
3928 case HSM_ST:
3929 ata_pio_block(ap);
3930 break;
3932 case HSM_ST_LAST:
3933 has_next = ata_pio_complete(ap);
3934 break;
3936 case HSM_ST_POLL:
3937 case HSM_ST_LAST_POLL:
3938 timeout = ata_pio_poll(ap);
3939 break;
3941 case HSM_ST_TMOUT:
3942 case HSM_ST_ERR:
3943 ata_pio_error(ap);
3944 return;
3946 default:
3947 BUG();
3948 return;
3951 if (timeout)
3952 ata_port_queue_task(ap, ata_pio_task, ap, timeout);
3953 else if (has_next)
3954 goto fsm_start;
3958 * ata_qc_timeout - Handle timeout of queued command
3959 * @qc: Command that timed out
3961 * Some part of the kernel (currently, only the SCSI layer)
3962 * has noticed that the active command on port @ap has not
3963 * completed after a specified length of time. Handle this
3964 * condition by disabling DMA (if necessary) and completing
3965 * transactions, with error if necessary.
3967 * This also handles the case of the "lost interrupt", where
3968 * for some reason (possibly hardware bug, possibly driver bug)
3969 * an interrupt was not delivered to the driver, even though the
3970 * transaction completed successfully.
3972 * LOCKING:
3973 * Inherited from SCSI layer (none, can sleep)
3976 static void ata_qc_timeout(struct ata_queued_cmd *qc)
3978 struct ata_port *ap = qc->ap;
3979 struct ata_host_set *host_set = ap->host_set;
3980 u8 host_stat = 0, drv_stat;
3981 unsigned long flags;
3983 DPRINTK("ENTER\n");
3985 ap->hsm_task_state = HSM_ST_IDLE;
3987 spin_lock_irqsave(&host_set->lock, flags);
3989 switch (qc->tf.protocol) {
3991 case ATA_PROT_DMA:
3992 case ATA_PROT_ATAPI_DMA:
3993 host_stat = ap->ops->bmdma_status(ap);
3995 /* before we do anything else, clear DMA-Start bit */
3996 ap->ops->bmdma_stop(qc);
3998 /* fall through */
4000 default:
4001 ata_altstatus(ap);
4002 drv_stat = ata_chk_status(ap);
4004 /* ack bmdma irq events */
4005 ap->ops->irq_clear(ap);
4007 printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x host_stat 0x%x\n",
4008 ap->id, qc->tf.command, drv_stat, host_stat);
4010 ap->hsm_task_state = HSM_ST_IDLE;
4012 /* complete taskfile transaction */
4013 qc->err_mask |= AC_ERR_TIMEOUT;
4014 break;
4017 spin_unlock_irqrestore(&host_set->lock, flags);
4019 ata_eh_qc_complete(qc);
4021 DPRINTK("EXIT\n");
4025 * ata_eng_timeout - Handle timeout of queued command
4026 * @ap: Port on which timed-out command is active
4028 * Some part of the kernel (currently, only the SCSI layer)
4029 * has noticed that the active command on port @ap has not
4030 * completed after a specified length of time. Handle this
4031 * condition by disabling DMA (if necessary) and completing
4032 * transactions, with error if necessary.
4034 * This also handles the case of the "lost interrupt", where
4035 * for some reason (possibly hardware bug, possibly driver bug)
4036 * an interrupt was not delivered to the driver, even though the
4037 * transaction completed successfully.
4039 * LOCKING:
4040 * Inherited from SCSI layer (none, can sleep)
4043 void ata_eng_timeout(struct ata_port *ap)
4045 DPRINTK("ENTER\n");
4047 ata_qc_timeout(ata_qc_from_tag(ap, ap->active_tag));
4049 DPRINTK("EXIT\n");
4053 * ata_qc_new - Request an available ATA command, for queueing
4054 * @ap: Port associated with device @dev
4055 * @dev: Device from whom we request an available command structure
4057 * LOCKING:
4058 * None.
4061 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4063 struct ata_queued_cmd *qc = NULL;
4064 unsigned int i;
4066 for (i = 0; i < ATA_MAX_QUEUE; i++)
4067 if (!test_and_set_bit(i, &ap->qactive)) {
4068 qc = ata_qc_from_tag(ap, i);
4069 break;
4072 if (qc)
4073 qc->tag = i;
4075 return qc;
4079 * ata_qc_new_init - Request an available ATA command, and initialize it
4080 * @ap: Port associated with device @dev
4081 * @dev: Device from whom we request an available command structure
4083 * LOCKING:
4084 * None.
4087 struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
4088 struct ata_device *dev)
4090 struct ata_queued_cmd *qc;
4092 qc = ata_qc_new(ap);
4093 if (qc) {
4094 qc->scsicmd = NULL;
4095 qc->ap = ap;
4096 qc->dev = dev;
4098 ata_qc_reinit(qc);
4101 return qc;
4105 * ata_qc_free - free unused ata_queued_cmd
4106 * @qc: Command to complete
4108 * Designed to free unused ata_queued_cmd object
4109 * in case something prevents using it.
4111 * LOCKING:
4112 * spin_lock_irqsave(host_set lock)
4114 void ata_qc_free(struct ata_queued_cmd *qc)
4116 struct ata_port *ap = qc->ap;
4117 unsigned int tag;
4119 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4121 qc->flags = 0;
4122 tag = qc->tag;
4123 if (likely(ata_tag_valid(tag))) {
4124 if (tag == ap->active_tag)
4125 ap->active_tag = ATA_TAG_POISON;
4126 qc->tag = ATA_TAG_POISON;
4127 clear_bit(tag, &ap->qactive);
4131 void __ata_qc_complete(struct ata_queued_cmd *qc)
4133 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4134 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
4136 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4137 ata_sg_clean(qc);
4139 /* atapi: mark qc as inactive to prevent the interrupt handler
4140 * from completing the command twice later, before the error handler
4141 * is called. (when rc != 0 and atapi request sense is needed)
4143 qc->flags &= ~ATA_QCFLAG_ACTIVE;
4145 /* call completion callback */
4146 qc->complete_fn(qc);
4149 static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
4151 struct ata_port *ap = qc->ap;
4153 switch (qc->tf.protocol) {
4154 case ATA_PROT_DMA:
4155 case ATA_PROT_ATAPI_DMA:
4156 return 1;
4158 case ATA_PROT_ATAPI:
4159 case ATA_PROT_PIO:
4160 if (ap->flags & ATA_FLAG_PIO_DMA)
4161 return 1;
4163 /* fall through */
4165 default:
4166 return 0;
4169 /* never reached */
4173 * ata_qc_issue - issue taskfile to device
4174 * @qc: command to issue to device
4176 * Prepare an ATA command to submission to device.
4177 * This includes mapping the data into a DMA-able
4178 * area, filling in the S/G table, and finally
4179 * writing the taskfile to hardware, starting the command.
4181 * LOCKING:
4182 * spin_lock_irqsave(host_set lock)
4184 * RETURNS:
4185 * Zero on success, AC_ERR_* mask on failure
4188 unsigned int ata_qc_issue(struct ata_queued_cmd *qc)
4190 struct ata_port *ap = qc->ap;
4192 if (ata_should_dma_map(qc)) {
4193 if (qc->flags & ATA_QCFLAG_SG) {
4194 if (ata_sg_setup(qc))
4195 goto sg_err;
4196 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
4197 if (ata_sg_setup_one(qc))
4198 goto sg_err;
4200 } else {
4201 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4204 ap->ops->qc_prep(qc);
4206 qc->ap->active_tag = qc->tag;
4207 qc->flags |= ATA_QCFLAG_ACTIVE;
4209 return ap->ops->qc_issue(qc);
4211 sg_err:
4212 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4213 return AC_ERR_SYSTEM;
4218 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
4219 * @qc: command to issue to device
4221 * Using various libata functions and hooks, this function
4222 * starts an ATA command. ATA commands are grouped into
4223 * classes called "protocols", and issuing each type of protocol
4224 * is slightly different.
4226 * May be used as the qc_issue() entry in ata_port_operations.
4228 * LOCKING:
4229 * spin_lock_irqsave(host_set lock)
4231 * RETURNS:
4232 * Zero on success, AC_ERR_* mask on failure
4235 unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
4237 struct ata_port *ap = qc->ap;
4239 /* Use polling pio if the LLD doesn't handle
4240 * interrupt driven pio and atapi CDB interrupt.
4242 if (ap->flags & ATA_FLAG_PIO_POLLING) {
4243 switch (qc->tf.protocol) {
4244 case ATA_PROT_PIO:
4245 case ATA_PROT_ATAPI:
4246 case ATA_PROT_ATAPI_NODATA:
4247 qc->tf.flags |= ATA_TFLAG_POLLING;
4248 break;
4249 case ATA_PROT_ATAPI_DMA:
4250 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
4251 BUG();
4252 break;
4253 default:
4254 break;
4258 /* select the device */
4259 ata_dev_select(ap, qc->dev->devno, 1, 0);
4261 /* start the command */
4262 switch (qc->tf.protocol) {
4263 case ATA_PROT_NODATA:
4264 if (qc->tf.flags & ATA_TFLAG_POLLING)
4265 ata_qc_set_polling(qc);
4267 ata_tf_to_host(ap, &qc->tf);
4268 ap->hsm_task_state = HSM_ST_LAST;
4270 if (qc->tf.flags & ATA_TFLAG_POLLING)
4271 ata_port_queue_task(ap, ata_pio_task, ap, 0);
4273 break;
4275 case ATA_PROT_DMA:
4276 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
4278 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4279 ap->ops->bmdma_setup(qc); /* set up bmdma */
4280 ap->ops->bmdma_start(qc); /* initiate bmdma */
4281 ap->hsm_task_state = HSM_ST_LAST;
4282 break;
4284 case ATA_PROT_PIO:
4285 if (qc->tf.flags & ATA_TFLAG_POLLING)
4286 ata_qc_set_polling(qc);
4288 ata_tf_to_host(ap, &qc->tf);
4290 if (qc->tf.flags & ATA_TFLAG_WRITE) {
4291 /* PIO data out protocol */
4292 ap->hsm_task_state = HSM_ST_FIRST;
4293 ata_port_queue_task(ap, ata_pio_task, ap, 0);
4295 /* always send first data block using
4296 * the ata_pio_task() codepath.
4298 } else {
4299 /* PIO data in protocol */
4300 ap->hsm_task_state = HSM_ST;
4302 if (qc->tf.flags & ATA_TFLAG_POLLING)
4303 ata_port_queue_task(ap, ata_pio_task, ap, 0);
4305 /* if polling, ata_pio_task() handles the rest.
4306 * otherwise, interrupt handler takes over from here.
4310 break;
4312 case ATA_PROT_ATAPI:
4313 case ATA_PROT_ATAPI_NODATA:
4314 if (qc->tf.flags & ATA_TFLAG_POLLING)
4315 ata_qc_set_polling(qc);
4317 ata_tf_to_host(ap, &qc->tf);
4319 ap->hsm_task_state = HSM_ST_FIRST;
4321 /* send cdb by polling if no cdb interrupt */
4322 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
4323 (qc->tf.flags & ATA_TFLAG_POLLING))
4324 ata_port_queue_task(ap, ata_pio_task, ap, 0);
4325 break;
4327 case ATA_PROT_ATAPI_DMA:
4328 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
4330 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4331 ap->ops->bmdma_setup(qc); /* set up bmdma */
4332 ap->hsm_task_state = HSM_ST_FIRST;
4334 /* send cdb by polling if no cdb interrupt */
4335 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4336 ata_port_queue_task(ap, ata_pio_task, ap, 0);
4337 break;
4339 default:
4340 WARN_ON(1);
4341 return AC_ERR_SYSTEM;
4344 return 0;
4348 * ata_host_intr - Handle host interrupt for given (port, task)
4349 * @ap: Port on which interrupt arrived (possibly...)
4350 * @qc: Taskfile currently active in engine
4352 * Handle host interrupt for given queued command. Currently,
4353 * only DMA interrupts are handled. All other commands are
4354 * handled via polling with interrupts disabled (nIEN bit).
4356 * LOCKING:
4357 * spin_lock_irqsave(host_set lock)
4359 * RETURNS:
4360 * One if interrupt was handled, zero if not (shared irq).
4363 inline unsigned int ata_host_intr (struct ata_port *ap,
4364 struct ata_queued_cmd *qc)
4366 u8 status, host_stat = 0;
4368 VPRINTK("ata%u: protocol %d task_state %d\n",
4369 ap->id, qc->tf.protocol, ap->hsm_task_state);
4371 /* Check whether we are expecting interrupt in this state */
4372 switch (ap->hsm_task_state) {
4373 case HSM_ST_FIRST:
4374 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
4375 * The flag was turned on only for atapi devices.
4376 * No need to check is_atapi_taskfile(&qc->tf) again.
4378 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4379 goto idle_irq;
4380 break;
4381 case HSM_ST_LAST:
4382 if (qc->tf.protocol == ATA_PROT_DMA ||
4383 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
4384 /* check status of DMA engine */
4385 host_stat = ap->ops->bmdma_status(ap);
4386 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
4388 /* if it's not our irq... */
4389 if (!(host_stat & ATA_DMA_INTR))
4390 goto idle_irq;
4392 /* before we do anything else, clear DMA-Start bit */
4393 ap->ops->bmdma_stop(qc);
4395 if (unlikely(host_stat & ATA_DMA_ERR)) {
4396 /* error when transfering data to/from memory */
4397 qc->err_mask |= AC_ERR_HOST_BUS;
4398 ap->hsm_task_state = HSM_ST_ERR;
4401 break;
4402 case HSM_ST:
4403 break;
4404 default:
4405 goto idle_irq;
4408 /* check altstatus */
4409 status = ata_altstatus(ap);
4410 if (status & ATA_BUSY)
4411 goto idle_irq;
4413 /* check main status, clearing INTRQ */
4414 status = ata_chk_status(ap);
4415 if (unlikely(status & ATA_BUSY))
4416 goto idle_irq;
4418 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
4419 ap->id, qc->tf.protocol, ap->hsm_task_state, status);
4421 /* ack bmdma irq events */
4422 ap->ops->irq_clear(ap);
4424 ata_hsm_move(ap, qc, status);
4425 return 1; /* irq handled */
4427 idle_irq:
4428 ap->stats.idle_irq++;
4430 #ifdef ATA_IRQ_TRAP
4431 if ((ap->stats.idle_irq % 1000) == 0) {
4432 ata_irq_ack(ap, 0); /* debug trap */
4433 printk(KERN_WARNING "ata%d: irq trap\n", ap->id);
4434 return 1;
4436 #endif
4437 return 0; /* irq not handled */
4441 * ata_interrupt - Default ATA host interrupt handler
4442 * @irq: irq line (unused)
4443 * @dev_instance: pointer to our ata_host_set information structure
4444 * @regs: unused
4446 * Default interrupt handler for PCI IDE devices. Calls
4447 * ata_host_intr() for each port that is not disabled.
4449 * LOCKING:
4450 * Obtains host_set lock during operation.
4452 * RETURNS:
4453 * IRQ_NONE or IRQ_HANDLED.
4456 irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
4458 struct ata_host_set *host_set = dev_instance;
4459 unsigned int i;
4460 unsigned int handled = 0;
4461 unsigned long flags;
4463 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
4464 spin_lock_irqsave(&host_set->lock, flags);
4466 for (i = 0; i < host_set->n_ports; i++) {
4467 struct ata_port *ap;
4469 ap = host_set->ports[i];
4470 if (ap &&
4471 !(ap->flags & ATA_FLAG_PORT_DISABLED)) {
4472 struct ata_queued_cmd *qc;
4474 qc = ata_qc_from_tag(ap, ap->active_tag);
4475 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
4476 (qc->flags & ATA_QCFLAG_ACTIVE))
4477 handled |= ata_host_intr(ap, qc);
4481 spin_unlock_irqrestore(&host_set->lock, flags);
4483 return IRQ_RETVAL(handled);
4488 * Execute a 'simple' command, that only consists of the opcode 'cmd' itself,
4489 * without filling any other registers
4491 static int ata_do_simple_cmd(struct ata_port *ap, struct ata_device *dev,
4492 u8 cmd)
4494 struct ata_taskfile tf;
4495 int err;
4497 ata_tf_init(ap, &tf, dev->devno);
4499 tf.command = cmd;
4500 tf.flags |= ATA_TFLAG_DEVICE;
4501 tf.protocol = ATA_PROT_NODATA;
4503 err = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0);
4504 if (err)
4505 printk(KERN_ERR "%s: ata command failed: %d\n",
4506 __FUNCTION__, err);
4508 return err;
4511 static int ata_flush_cache(struct ata_port *ap, struct ata_device *dev)
4513 u8 cmd;
4515 if (!ata_try_flush_cache(dev))
4516 return 0;
4518 if (ata_id_has_flush_ext(dev->id))
4519 cmd = ATA_CMD_FLUSH_EXT;
4520 else
4521 cmd = ATA_CMD_FLUSH;
4523 return ata_do_simple_cmd(ap, dev, cmd);
4526 static int ata_standby_drive(struct ata_port *ap, struct ata_device *dev)
4528 return ata_do_simple_cmd(ap, dev, ATA_CMD_STANDBYNOW1);
4531 static int ata_start_drive(struct ata_port *ap, struct ata_device *dev)
4533 return ata_do_simple_cmd(ap, dev, ATA_CMD_IDLEIMMEDIATE);
4537 * ata_device_resume - wakeup a previously suspended devices
4538 * @ap: port the device is connected to
4539 * @dev: the device to resume
4541 * Kick the drive back into action, by sending it an idle immediate
4542 * command and making sure its transfer mode matches between drive
4543 * and host.
4546 int ata_device_resume(struct ata_port *ap, struct ata_device *dev)
4548 if (ap->flags & ATA_FLAG_SUSPENDED) {
4549 ap->flags &= ~ATA_FLAG_SUSPENDED;
4550 ata_set_mode(ap);
4552 if (!ata_dev_present(dev))
4553 return 0;
4554 if (dev->class == ATA_DEV_ATA)
4555 ata_start_drive(ap, dev);
4557 return 0;
4561 * ata_device_suspend - prepare a device for suspend
4562 * @ap: port the device is connected to
4563 * @dev: the device to suspend
4565 * Flush the cache on the drive, if appropriate, then issue a
4566 * standbynow command.
4568 int ata_device_suspend(struct ata_port *ap, struct ata_device *dev, pm_message_t state)
4570 if (!ata_dev_present(dev))
4571 return 0;
4572 if (dev->class == ATA_DEV_ATA)
4573 ata_flush_cache(ap, dev);
4575 if (state.event != PM_EVENT_FREEZE)
4576 ata_standby_drive(ap, dev);
4577 ap->flags |= ATA_FLAG_SUSPENDED;
4578 return 0;
4582 * ata_port_start - Set port up for dma.
4583 * @ap: Port to initialize
4585 * Called just after data structures for each port are
4586 * initialized. Allocates space for PRD table.
4588 * May be used as the port_start() entry in ata_port_operations.
4590 * LOCKING:
4591 * Inherited from caller.
4594 int ata_port_start (struct ata_port *ap)
4596 struct device *dev = ap->dev;
4597 int rc;
4599 ap->prd = dma_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, GFP_KERNEL);
4600 if (!ap->prd)
4601 return -ENOMEM;
4603 rc = ata_pad_alloc(ap, dev);
4604 if (rc) {
4605 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
4606 return rc;
4609 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma);
4611 return 0;
4616 * ata_port_stop - Undo ata_port_start()
4617 * @ap: Port to shut down
4619 * Frees the PRD table.
4621 * May be used as the port_stop() entry in ata_port_operations.
4623 * LOCKING:
4624 * Inherited from caller.
4627 void ata_port_stop (struct ata_port *ap)
4629 struct device *dev = ap->dev;
4631 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
4632 ata_pad_free(ap, dev);
4635 void ata_host_stop (struct ata_host_set *host_set)
4637 if (host_set->mmio_base)
4638 iounmap(host_set->mmio_base);
4643 * ata_host_remove - Unregister SCSI host structure with upper layers
4644 * @ap: Port to unregister
4645 * @do_unregister: 1 if we fully unregister, 0 to just stop the port
4647 * LOCKING:
4648 * Inherited from caller.
4651 static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister)
4653 struct Scsi_Host *sh = ap->host;
4655 DPRINTK("ENTER\n");
4657 if (do_unregister)
4658 scsi_remove_host(sh);
4660 ap->ops->port_stop(ap);
4664 * ata_host_init - Initialize an ata_port structure
4665 * @ap: Structure to initialize
4666 * @host: associated SCSI mid-layer structure
4667 * @host_set: Collection of hosts to which @ap belongs
4668 * @ent: Probe information provided by low-level driver
4669 * @port_no: Port number associated with this ata_port
4671 * Initialize a new ata_port structure, and its associated
4672 * scsi_host.
4674 * LOCKING:
4675 * Inherited from caller.
4678 static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
4679 struct ata_host_set *host_set,
4680 const struct ata_probe_ent *ent, unsigned int port_no)
4682 unsigned int i;
4684 host->max_id = 16;
4685 host->max_lun = 1;
4686 host->max_channel = 1;
4687 host->unique_id = ata_unique_id++;
4688 host->max_cmd_len = 12;
4690 ap->flags = ATA_FLAG_PORT_DISABLED;
4691 ap->id = host->unique_id;
4692 ap->host = host;
4693 ap->ctl = ATA_DEVCTL_OBS;
4694 ap->host_set = host_set;
4695 ap->dev = ent->dev;
4696 ap->port_no = port_no;
4697 ap->hard_port_no =
4698 ent->legacy_mode ? ent->hard_port_no : port_no;
4699 ap->pio_mask = ent->pio_mask;
4700 ap->mwdma_mask = ent->mwdma_mask;
4701 ap->udma_mask = ent->udma_mask;
4702 ap->flags |= ent->host_flags;
4703 ap->ops = ent->port_ops;
4704 ap->cbl = ATA_CBL_NONE;
4705 ap->active_tag = ATA_TAG_POISON;
4706 ap->last_ctl = 0xFF;
4708 INIT_WORK(&ap->port_task, NULL, NULL);
4709 INIT_LIST_HEAD(&ap->eh_done_q);
4711 for (i = 0; i < ATA_MAX_DEVICES; i++) {
4712 struct ata_device *dev = &ap->device[i];
4713 dev->devno = i;
4714 dev->pio_mask = UINT_MAX;
4715 dev->mwdma_mask = UINT_MAX;
4716 dev->udma_mask = UINT_MAX;
4719 #ifdef ATA_IRQ_TRAP
4720 ap->stats.unhandled_irq = 1;
4721 ap->stats.idle_irq = 1;
4722 #endif
4724 memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
4728 * ata_host_add - Attach low-level ATA driver to system
4729 * @ent: Information provided by low-level driver
4730 * @host_set: Collections of ports to which we add
4731 * @port_no: Port number associated with this host
4733 * Attach low-level ATA driver to system.
4735 * LOCKING:
4736 * PCI/etc. bus probe sem.
4738 * RETURNS:
4739 * New ata_port on success, for NULL on error.
4742 static struct ata_port * ata_host_add(const struct ata_probe_ent *ent,
4743 struct ata_host_set *host_set,
4744 unsigned int port_no)
4746 struct Scsi_Host *host;
4747 struct ata_port *ap;
4748 int rc;
4750 DPRINTK("ENTER\n");
4752 if (!ent->port_ops->probe_reset &&
4753 !(ent->host_flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST))) {
4754 printk(KERN_ERR "ata%u: no reset mechanism available\n",
4755 port_no);
4756 return NULL;
4759 host = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
4760 if (!host)
4761 return NULL;
4763 host->transportt = &ata_scsi_transport_template;
4765 ap = (struct ata_port *) &host->hostdata[0];
4767 ata_host_init(ap, host, host_set, ent, port_no);
4769 rc = ap->ops->port_start(ap);
4770 if (rc)
4771 goto err_out;
4773 return ap;
4775 err_out:
4776 scsi_host_put(host);
4777 return NULL;
4781 * ata_device_add - Register hardware device with ATA and SCSI layers
4782 * @ent: Probe information describing hardware device to be registered
4784 * This function processes the information provided in the probe
4785 * information struct @ent, allocates the necessary ATA and SCSI
4786 * host information structures, initializes them, and registers
4787 * everything with requisite kernel subsystems.
4789 * This function requests irqs, probes the ATA bus, and probes
4790 * the SCSI bus.
4792 * LOCKING:
4793 * PCI/etc. bus probe sem.
4795 * RETURNS:
4796 * Number of ports registered. Zero on error (no ports registered).
4799 int ata_device_add(const struct ata_probe_ent *ent)
4801 unsigned int count = 0, i;
4802 struct device *dev = ent->dev;
4803 struct ata_host_set *host_set;
4805 DPRINTK("ENTER\n");
4806 /* alloc a container for our list of ATA ports (buses) */
4807 host_set = kzalloc(sizeof(struct ata_host_set) +
4808 (ent->n_ports * sizeof(void *)), GFP_KERNEL);
4809 if (!host_set)
4810 return 0;
4811 spin_lock_init(&host_set->lock);
4813 host_set->dev = dev;
4814 host_set->n_ports = ent->n_ports;
4815 host_set->irq = ent->irq;
4816 host_set->mmio_base = ent->mmio_base;
4817 host_set->private_data = ent->private_data;
4818 host_set->ops = ent->port_ops;
4820 /* register each port bound to this device */
4821 for (i = 0; i < ent->n_ports; i++) {
4822 struct ata_port *ap;
4823 unsigned long xfer_mode_mask;
4825 ap = ata_host_add(ent, host_set, i);
4826 if (!ap)
4827 goto err_out;
4829 host_set->ports[i] = ap;
4830 xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
4831 (ap->mwdma_mask << ATA_SHIFT_MWDMA) |
4832 (ap->pio_mask << ATA_SHIFT_PIO);
4834 /* print per-port info to dmesg */
4835 printk(KERN_INFO "ata%u: %cATA max %s cmd 0x%lX ctl 0x%lX "
4836 "bmdma 0x%lX irq %lu\n",
4837 ap->id,
4838 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
4839 ata_mode_string(xfer_mode_mask),
4840 ap->ioaddr.cmd_addr,
4841 ap->ioaddr.ctl_addr,
4842 ap->ioaddr.bmdma_addr,
4843 ent->irq);
4845 ata_chk_status(ap);
4846 host_set->ops->irq_clear(ap);
4847 count++;
4850 if (!count)
4851 goto err_free_ret;
4853 /* obtain irq, that is shared between channels */
4854 if (request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags,
4855 DRV_NAME, host_set))
4856 goto err_out;
4858 /* perform each probe synchronously */
4859 DPRINTK("probe begin\n");
4860 for (i = 0; i < count; i++) {
4861 struct ata_port *ap;
4862 int rc;
4864 ap = host_set->ports[i];
4866 DPRINTK("ata%u: bus probe begin\n", ap->id);
4867 rc = ata_bus_probe(ap);
4868 DPRINTK("ata%u: bus probe end\n", ap->id);
4870 if (rc) {
4871 /* FIXME: do something useful here?
4872 * Current libata behavior will
4873 * tear down everything when
4874 * the module is removed
4875 * or the h/w is unplugged.
4879 rc = scsi_add_host(ap->host, dev);
4880 if (rc) {
4881 printk(KERN_ERR "ata%u: scsi_add_host failed\n",
4882 ap->id);
4883 /* FIXME: do something useful here */
4884 /* FIXME: handle unconditional calls to
4885 * scsi_scan_host and ata_host_remove, below,
4886 * at the very least
4891 /* probes are done, now scan each port's disk(s) */
4892 DPRINTK("host probe begin\n");
4893 for (i = 0; i < count; i++) {
4894 struct ata_port *ap = host_set->ports[i];
4896 ata_scsi_scan_host(ap);
4899 dev_set_drvdata(dev, host_set);
4901 VPRINTK("EXIT, returning %u\n", ent->n_ports);
4902 return ent->n_ports; /* success */
4904 err_out:
4905 for (i = 0; i < count; i++) {
4906 ata_host_remove(host_set->ports[i], 1);
4907 scsi_host_put(host_set->ports[i]->host);
4909 err_free_ret:
4910 kfree(host_set);
4911 VPRINTK("EXIT, returning 0\n");
4912 return 0;
4916 * ata_host_set_remove - PCI layer callback for device removal
4917 * @host_set: ATA host set that was removed
4919 * Unregister all objects associated with this host set. Free those
4920 * objects.
4922 * LOCKING:
4923 * Inherited from calling layer (may sleep).
4926 void ata_host_set_remove(struct ata_host_set *host_set)
4928 struct ata_port *ap;
4929 unsigned int i;
4931 for (i = 0; i < host_set->n_ports; i++) {
4932 ap = host_set->ports[i];
4933 scsi_remove_host(ap->host);
4936 free_irq(host_set->irq, host_set);
4938 for (i = 0; i < host_set->n_ports; i++) {
4939 ap = host_set->ports[i];
4941 ata_scsi_release(ap->host);
4943 if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) {
4944 struct ata_ioports *ioaddr = &ap->ioaddr;
4946 if (ioaddr->cmd_addr == 0x1f0)
4947 release_region(0x1f0, 8);
4948 else if (ioaddr->cmd_addr == 0x170)
4949 release_region(0x170, 8);
4952 scsi_host_put(ap->host);
4955 if (host_set->ops->host_stop)
4956 host_set->ops->host_stop(host_set);
4958 kfree(host_set);
4962 * ata_scsi_release - SCSI layer callback hook for host unload
4963 * @host: libata host to be unloaded
4965 * Performs all duties necessary to shut down a libata port...
4966 * Kill port kthread, disable port, and release resources.
4968 * LOCKING:
4969 * Inherited from SCSI layer.
4971 * RETURNS:
4972 * One.
4975 int ata_scsi_release(struct Scsi_Host *host)
4977 struct ata_port *ap = (struct ata_port *) &host->hostdata[0];
4978 int i;
4980 DPRINTK("ENTER\n");
4982 ap->ops->port_disable(ap);
4983 ata_host_remove(ap, 0);
4984 for (i = 0; i < ATA_MAX_DEVICES; i++)
4985 kfree(ap->device[i].id);
4987 DPRINTK("EXIT\n");
4988 return 1;
4992 * ata_std_ports - initialize ioaddr with standard port offsets.
4993 * @ioaddr: IO address structure to be initialized
4995 * Utility function which initializes data_addr, error_addr,
4996 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
4997 * device_addr, status_addr, and command_addr to standard offsets
4998 * relative to cmd_addr.
5000 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
5003 void ata_std_ports(struct ata_ioports *ioaddr)
5005 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
5006 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
5007 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
5008 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
5009 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
5010 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
5011 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
5012 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
5013 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
5014 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
5018 #ifdef CONFIG_PCI
5020 void ata_pci_host_stop (struct ata_host_set *host_set)
5022 struct pci_dev *pdev = to_pci_dev(host_set->dev);
5024 pci_iounmap(pdev, host_set->mmio_base);
5028 * ata_pci_remove_one - PCI layer callback for device removal
5029 * @pdev: PCI device that was removed
5031 * PCI layer indicates to libata via this hook that
5032 * hot-unplug or module unload event has occurred.
5033 * Handle this by unregistering all objects associated
5034 * with this PCI device. Free those objects. Then finally
5035 * release PCI resources and disable device.
5037 * LOCKING:
5038 * Inherited from PCI layer (may sleep).
5041 void ata_pci_remove_one (struct pci_dev *pdev)
5043 struct device *dev = pci_dev_to_dev(pdev);
5044 struct ata_host_set *host_set = dev_get_drvdata(dev);
5046 ata_host_set_remove(host_set);
5047 pci_release_regions(pdev);
5048 pci_disable_device(pdev);
5049 dev_set_drvdata(dev, NULL);
5052 /* move to PCI subsystem */
5053 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
5055 unsigned long tmp = 0;
5057 switch (bits->width) {
5058 case 1: {
5059 u8 tmp8 = 0;
5060 pci_read_config_byte(pdev, bits->reg, &tmp8);
5061 tmp = tmp8;
5062 break;
5064 case 2: {
5065 u16 tmp16 = 0;
5066 pci_read_config_word(pdev, bits->reg, &tmp16);
5067 tmp = tmp16;
5068 break;
5070 case 4: {
5071 u32 tmp32 = 0;
5072 pci_read_config_dword(pdev, bits->reg, &tmp32);
5073 tmp = tmp32;
5074 break;
5077 default:
5078 return -EINVAL;
5081 tmp &= bits->mask;
5083 return (tmp == bits->val) ? 1 : 0;
5086 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t state)
5088 pci_save_state(pdev);
5089 pci_disable_device(pdev);
5090 pci_set_power_state(pdev, PCI_D3hot);
5091 return 0;
5094 int ata_pci_device_resume(struct pci_dev *pdev)
5096 pci_set_power_state(pdev, PCI_D0);
5097 pci_restore_state(pdev);
5098 pci_enable_device(pdev);
5099 pci_set_master(pdev);
5100 return 0;
5102 #endif /* CONFIG_PCI */
5105 static int __init ata_init(void)
5107 ata_wq = create_workqueue("ata");
5108 if (!ata_wq)
5109 return -ENOMEM;
5111 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
5112 return 0;
5115 static void __exit ata_exit(void)
5117 destroy_workqueue(ata_wq);
5120 module_init(ata_init);
5121 module_exit(ata_exit);
5123 static unsigned long ratelimit_time;
5124 static spinlock_t ata_ratelimit_lock = SPIN_LOCK_UNLOCKED;
5126 int ata_ratelimit(void)
5128 int rc;
5129 unsigned long flags;
5131 spin_lock_irqsave(&ata_ratelimit_lock, flags);
5133 if (time_after(jiffies, ratelimit_time)) {
5134 rc = 1;
5135 ratelimit_time = jiffies + (HZ/5);
5136 } else
5137 rc = 0;
5139 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
5141 return rc;
5145 * libata is essentially a library of internal helper functions for
5146 * low-level ATA host controller drivers. As such, the API/ABI is
5147 * likely to change as new drivers are added and updated.
5148 * Do not depend on ABI/API stability.
5151 EXPORT_SYMBOL_GPL(ata_std_bios_param);
5152 EXPORT_SYMBOL_GPL(ata_std_ports);
5153 EXPORT_SYMBOL_GPL(ata_device_add);
5154 EXPORT_SYMBOL_GPL(ata_host_set_remove);
5155 EXPORT_SYMBOL_GPL(ata_sg_init);
5156 EXPORT_SYMBOL_GPL(ata_sg_init_one);
5157 EXPORT_SYMBOL_GPL(__ata_qc_complete);
5158 EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
5159 EXPORT_SYMBOL_GPL(ata_eng_timeout);
5160 EXPORT_SYMBOL_GPL(ata_tf_load);
5161 EXPORT_SYMBOL_GPL(ata_tf_read);
5162 EXPORT_SYMBOL_GPL(ata_noop_dev_select);
5163 EXPORT_SYMBOL_GPL(ata_std_dev_select);
5164 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
5165 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
5166 EXPORT_SYMBOL_GPL(ata_check_status);
5167 EXPORT_SYMBOL_GPL(ata_altstatus);
5168 EXPORT_SYMBOL_GPL(ata_exec_command);
5169 EXPORT_SYMBOL_GPL(ata_port_start);
5170 EXPORT_SYMBOL_GPL(ata_port_stop);
5171 EXPORT_SYMBOL_GPL(ata_host_stop);
5172 EXPORT_SYMBOL_GPL(ata_interrupt);
5173 EXPORT_SYMBOL_GPL(ata_qc_prep);
5174 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
5175 EXPORT_SYMBOL_GPL(ata_bmdma_setup);
5176 EXPORT_SYMBOL_GPL(ata_bmdma_start);
5177 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
5178 EXPORT_SYMBOL_GPL(ata_bmdma_status);
5179 EXPORT_SYMBOL_GPL(ata_bmdma_stop);
5180 EXPORT_SYMBOL_GPL(ata_port_probe);
5181 EXPORT_SYMBOL_GPL(sata_phy_reset);
5182 EXPORT_SYMBOL_GPL(__sata_phy_reset);
5183 EXPORT_SYMBOL_GPL(ata_bus_reset);
5184 EXPORT_SYMBOL_GPL(ata_std_probeinit);
5185 EXPORT_SYMBOL_GPL(ata_std_softreset);
5186 EXPORT_SYMBOL_GPL(sata_std_hardreset);
5187 EXPORT_SYMBOL_GPL(ata_std_postreset);
5188 EXPORT_SYMBOL_GPL(ata_std_probe_reset);
5189 EXPORT_SYMBOL_GPL(ata_drive_probe_reset);
5190 EXPORT_SYMBOL_GPL(ata_dev_revalidate);
5191 EXPORT_SYMBOL_GPL(ata_dev_classify);
5192 EXPORT_SYMBOL_GPL(ata_dev_pair);
5193 EXPORT_SYMBOL_GPL(ata_port_disable);
5194 EXPORT_SYMBOL_GPL(ata_ratelimit);
5195 EXPORT_SYMBOL_GPL(ata_busy_sleep);
5196 EXPORT_SYMBOL_GPL(ata_port_queue_task);
5197 EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
5198 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
5199 EXPORT_SYMBOL_GPL(ata_scsi_error);
5200 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
5201 EXPORT_SYMBOL_GPL(ata_scsi_release);
5202 EXPORT_SYMBOL_GPL(ata_host_intr);
5203 EXPORT_SYMBOL_GPL(ata_id_string);
5204 EXPORT_SYMBOL_GPL(ata_id_c_string);
5205 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
5206 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
5207 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
5209 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
5210 EXPORT_SYMBOL_GPL(ata_timing_compute);
5211 EXPORT_SYMBOL_GPL(ata_timing_merge);
5213 #ifdef CONFIG_PCI
5214 EXPORT_SYMBOL_GPL(pci_test_config_bits);
5215 EXPORT_SYMBOL_GPL(ata_pci_host_stop);
5216 EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
5217 EXPORT_SYMBOL_GPL(ata_pci_init_one);
5218 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
5219 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
5220 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
5221 EXPORT_SYMBOL_GPL(ata_pci_default_filter);
5222 EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
5223 #endif /* CONFIG_PCI */
5225 EXPORT_SYMBOL_GPL(ata_device_suspend);
5226 EXPORT_SYMBOL_GPL(ata_device_resume);
5227 EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
5228 EXPORT_SYMBOL_GPL(ata_scsi_device_resume);