dm writecache: add cond_resched to loop in persistent_memory_claim()
[linux/fpc-iii.git] / drivers / ata / libata-core.c
blobe74c8fe2a5fdb38add6d6cf733f45f20ca24b73a
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * libata-core.c - helper library for ATA
5 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
6 * Copyright 2003-2004 Jeff Garzik
8 * libata documentation is available via 'make {ps|pdf}docs',
9 * as Documentation/driver-api/libata.rst
11 * Hardware documentation available from http://www.t13.org/ and
12 * http://www.sata-io.org/
14 * Standards documents from:
15 * http://www.t13.org (ATA standards, PCI DMA IDE spec)
16 * http://www.t10.org (SCSI MMC - for ATAPI MMC)
17 * http://www.sata-io.org (SATA)
18 * http://www.compactflash.org (CF)
19 * http://www.qic.org (QIC157 - Tape and DSC)
20 * http://www.ce-ata.org (CE-ATA: not supported)
22 * libata is essentially a library of internal helper functions for
23 * low-level ATA host controller drivers. As such, the API/ABI is
24 * likely to change as new drivers are added and updated.
25 * Do not depend on ABI/API stability.
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/pci.h>
31 #include <linux/init.h>
32 #include <linux/list.h>
33 #include <linux/mm.h>
34 #include <linux/spinlock.h>
35 #include <linux/blkdev.h>
36 #include <linux/delay.h>
37 #include <linux/timer.h>
38 #include <linux/time.h>
39 #include <linux/interrupt.h>
40 #include <linux/completion.h>
41 #include <linux/suspend.h>
42 #include <linux/workqueue.h>
43 #include <linux/scatterlist.h>
44 #include <linux/io.h>
45 #include <linux/log2.h>
46 #include <linux/slab.h>
47 #include <linux/glob.h>
48 #include <scsi/scsi.h>
49 #include <scsi/scsi_cmnd.h>
50 #include <scsi/scsi_host.h>
51 #include <linux/libata.h>
52 #include <asm/byteorder.h>
53 #include <asm/unaligned.h>
54 #include <linux/cdrom.h>
55 #include <linux/ratelimit.h>
56 #include <linux/leds.h>
57 #include <linux/pm_runtime.h>
58 #include <linux/platform_device.h>
59 #include <asm/setup.h>
61 #define CREATE_TRACE_POINTS
62 #include <trace/events/libata.h>
64 #include "libata.h"
65 #include "libata-transport.h"
67 const struct ata_port_operations ata_base_port_ops = {
68 .prereset = ata_std_prereset,
69 .postreset = ata_std_postreset,
70 .error_handler = ata_std_error_handler,
71 .sched_eh = ata_std_sched_eh,
72 .end_eh = ata_std_end_eh,
75 const struct ata_port_operations sata_port_ops = {
76 .inherits = &ata_base_port_ops,
78 .qc_defer = ata_std_qc_defer,
79 .hardreset = sata_std_hardreset,
81 EXPORT_SYMBOL_GPL(sata_port_ops);
83 static unsigned int ata_dev_init_params(struct ata_device *dev,
84 u16 heads, u16 sectors);
85 static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
86 static void ata_dev_xfermask(struct ata_device *dev);
87 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
89 atomic_t ata_print_id = ATOMIC_INIT(0);
91 #ifdef CONFIG_ATA_FORCE
92 struct ata_force_param {
93 const char *name;
94 u8 cbl;
95 u8 spd_limit;
96 unsigned long xfer_mask;
97 unsigned int horkage_on;
98 unsigned int horkage_off;
99 u16 lflags;
102 struct ata_force_ent {
103 int port;
104 int device;
105 struct ata_force_param param;
108 static struct ata_force_ent *ata_force_tbl;
109 static int ata_force_tbl_size;
111 static char ata_force_param_buf[COMMAND_LINE_SIZE] __initdata;
112 /* param_buf is thrown away after initialization, disallow read */
113 module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
114 MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/admin-guide/kernel-parameters.rst for details)");
115 #endif
117 static int atapi_enabled = 1;
118 module_param(atapi_enabled, int, 0444);
119 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on [default])");
121 static int atapi_dmadir = 0;
122 module_param(atapi_dmadir, int, 0444);
123 MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off [default], 1=on)");
125 int atapi_passthru16 = 1;
126 module_param(atapi_passthru16, int, 0444);
127 MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices (0=off, 1=on [default])");
129 int libata_fua = 0;
130 module_param_named(fua, libata_fua, int, 0444);
131 MODULE_PARM_DESC(fua, "FUA support (0=off [default], 1=on)");
133 static int ata_ignore_hpa;
134 module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
135 MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
137 static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
138 module_param_named(dma, libata_dma_mask, int, 0444);
139 MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
141 static int ata_probe_timeout;
142 module_param(ata_probe_timeout, int, 0444);
143 MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
145 int libata_noacpi = 0;
146 module_param_named(noacpi, libata_noacpi, int, 0444);
147 MODULE_PARM_DESC(noacpi, "Disable the use of ACPI in probe/suspend/resume (0=off [default], 1=on)");
149 int libata_allow_tpm = 0;
150 module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
151 MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)");
153 static int atapi_an;
154 module_param(atapi_an, int, 0444);
155 MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)");
157 MODULE_AUTHOR("Jeff Garzik");
158 MODULE_DESCRIPTION("Library module for ATA devices");
159 MODULE_LICENSE("GPL");
160 MODULE_VERSION(DRV_VERSION);
163 static bool ata_sstatus_online(u32 sstatus)
165 return (sstatus & 0xf) == 0x3;
169 * ata_link_next - link iteration helper
170 * @link: the previous link, NULL to start
171 * @ap: ATA port containing links to iterate
172 * @mode: iteration mode, one of ATA_LITER_*
174 * LOCKING:
175 * Host lock or EH context.
177 * RETURNS:
178 * Pointer to the next link.
180 struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap,
181 enum ata_link_iter_mode mode)
183 BUG_ON(mode != ATA_LITER_EDGE &&
184 mode != ATA_LITER_PMP_FIRST && mode != ATA_LITER_HOST_FIRST);
186 /* NULL link indicates start of iteration */
187 if (!link)
188 switch (mode) {
189 case ATA_LITER_EDGE:
190 case ATA_LITER_PMP_FIRST:
191 if (sata_pmp_attached(ap))
192 return ap->pmp_link;
193 /* fall through */
194 case ATA_LITER_HOST_FIRST:
195 return &ap->link;
198 /* we just iterated over the host link, what's next? */
199 if (link == &ap->link)
200 switch (mode) {
201 case ATA_LITER_HOST_FIRST:
202 if (sata_pmp_attached(ap))
203 return ap->pmp_link;
204 /* fall through */
205 case ATA_LITER_PMP_FIRST:
206 if (unlikely(ap->slave_link))
207 return ap->slave_link;
208 /* fall through */
209 case ATA_LITER_EDGE:
210 return NULL;
213 /* slave_link excludes PMP */
214 if (unlikely(link == ap->slave_link))
215 return NULL;
217 /* we were over a PMP link */
218 if (++link < ap->pmp_link + ap->nr_pmp_links)
219 return link;
221 if (mode == ATA_LITER_PMP_FIRST)
222 return &ap->link;
224 return NULL;
226 EXPORT_SYMBOL_GPL(ata_link_next);
229 * ata_dev_next - device iteration helper
230 * @dev: the previous device, NULL to start
231 * @link: ATA link containing devices to iterate
232 * @mode: iteration mode, one of ATA_DITER_*
234 * LOCKING:
235 * Host lock or EH context.
237 * RETURNS:
238 * Pointer to the next device.
240 struct ata_device *ata_dev_next(struct ata_device *dev, struct ata_link *link,
241 enum ata_dev_iter_mode mode)
243 BUG_ON(mode != ATA_DITER_ENABLED && mode != ATA_DITER_ENABLED_REVERSE &&
244 mode != ATA_DITER_ALL && mode != ATA_DITER_ALL_REVERSE);
246 /* NULL dev indicates start of iteration */
247 if (!dev)
248 switch (mode) {
249 case ATA_DITER_ENABLED:
250 case ATA_DITER_ALL:
251 dev = link->device;
252 goto check;
253 case ATA_DITER_ENABLED_REVERSE:
254 case ATA_DITER_ALL_REVERSE:
255 dev = link->device + ata_link_max_devices(link) - 1;
256 goto check;
259 next:
260 /* move to the next one */
261 switch (mode) {
262 case ATA_DITER_ENABLED:
263 case ATA_DITER_ALL:
264 if (++dev < link->device + ata_link_max_devices(link))
265 goto check;
266 return NULL;
267 case ATA_DITER_ENABLED_REVERSE:
268 case ATA_DITER_ALL_REVERSE:
269 if (--dev >= link->device)
270 goto check;
271 return NULL;
274 check:
275 if ((mode == ATA_DITER_ENABLED || mode == ATA_DITER_ENABLED_REVERSE) &&
276 !ata_dev_enabled(dev))
277 goto next;
278 return dev;
280 EXPORT_SYMBOL_GPL(ata_dev_next);
283 * ata_dev_phys_link - find physical link for a device
284 * @dev: ATA device to look up physical link for
286 * Look up physical link which @dev is attached to. Note that
287 * this is different from @dev->link only when @dev is on slave
288 * link. For all other cases, it's the same as @dev->link.
290 * LOCKING:
291 * Don't care.
293 * RETURNS:
294 * Pointer to the found physical link.
296 struct ata_link *ata_dev_phys_link(struct ata_device *dev)
298 struct ata_port *ap = dev->link->ap;
300 if (!ap->slave_link)
301 return dev->link;
302 if (!dev->devno)
303 return &ap->link;
304 return ap->slave_link;
307 #ifdef CONFIG_ATA_FORCE
309 * ata_force_cbl - force cable type according to libata.force
310 * @ap: ATA port of interest
312 * Force cable type according to libata.force and whine about it.
313 * The last entry which has matching port number is used, so it
314 * can be specified as part of device force parameters. For
315 * example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the
316 * same effect.
318 * LOCKING:
319 * EH context.
321 void ata_force_cbl(struct ata_port *ap)
323 int i;
325 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
326 const struct ata_force_ent *fe = &ata_force_tbl[i];
328 if (fe->port != -1 && fe->port != ap->print_id)
329 continue;
331 if (fe->param.cbl == ATA_CBL_NONE)
332 continue;
334 ap->cbl = fe->param.cbl;
335 ata_port_notice(ap, "FORCE: cable set to %s\n", fe->param.name);
336 return;
341 * ata_force_link_limits - force link limits according to libata.force
342 * @link: ATA link of interest
344 * Force link flags and SATA spd limit according to libata.force
345 * and whine about it. When only the port part is specified
346 * (e.g. 1:), the limit applies to all links connected to both
347 * the host link and all fan-out ports connected via PMP. If the
348 * device part is specified as 0 (e.g. 1.00:), it specifies the
349 * first fan-out link not the host link. Device number 15 always
350 * points to the host link whether PMP is attached or not. If the
351 * controller has slave link, device number 16 points to it.
353 * LOCKING:
354 * EH context.
356 static void ata_force_link_limits(struct ata_link *link)
358 bool did_spd = false;
359 int linkno = link->pmp;
360 int i;
362 if (ata_is_host_link(link))
363 linkno += 15;
365 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
366 const struct ata_force_ent *fe = &ata_force_tbl[i];
368 if (fe->port != -1 && fe->port != link->ap->print_id)
369 continue;
371 if (fe->device != -1 && fe->device != linkno)
372 continue;
374 /* only honor the first spd limit */
375 if (!did_spd && fe->param.spd_limit) {
376 link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
377 ata_link_notice(link, "FORCE: PHY spd limit set to %s\n",
378 fe->param.name);
379 did_spd = true;
382 /* let lflags stack */
383 if (fe->param.lflags) {
384 link->flags |= fe->param.lflags;
385 ata_link_notice(link,
386 "FORCE: link flag 0x%x forced -> 0x%x\n",
387 fe->param.lflags, link->flags);
393 * ata_force_xfermask - force xfermask according to libata.force
394 * @dev: ATA device of interest
396 * Force xfer_mask according to libata.force and whine about it.
397 * For consistency with link selection, device number 15 selects
398 * the first device connected to the host link.
400 * LOCKING:
401 * EH context.
403 static void ata_force_xfermask(struct ata_device *dev)
405 int devno = dev->link->pmp + dev->devno;
406 int alt_devno = devno;
407 int i;
409 /* allow n.15/16 for devices attached to host port */
410 if (ata_is_host_link(dev->link))
411 alt_devno += 15;
413 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
414 const struct ata_force_ent *fe = &ata_force_tbl[i];
415 unsigned long pio_mask, mwdma_mask, udma_mask;
417 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
418 continue;
420 if (fe->device != -1 && fe->device != devno &&
421 fe->device != alt_devno)
422 continue;
424 if (!fe->param.xfer_mask)
425 continue;
427 ata_unpack_xfermask(fe->param.xfer_mask,
428 &pio_mask, &mwdma_mask, &udma_mask);
429 if (udma_mask)
430 dev->udma_mask = udma_mask;
431 else if (mwdma_mask) {
432 dev->udma_mask = 0;
433 dev->mwdma_mask = mwdma_mask;
434 } else {
435 dev->udma_mask = 0;
436 dev->mwdma_mask = 0;
437 dev->pio_mask = pio_mask;
440 ata_dev_notice(dev, "FORCE: xfer_mask set to %s\n",
441 fe->param.name);
442 return;
447 * ata_force_horkage - force horkage according to libata.force
448 * @dev: ATA device of interest
450 * Force horkage according to libata.force and whine about it.
451 * For consistency with link selection, device number 15 selects
452 * the first device connected to the host link.
454 * LOCKING:
455 * EH context.
457 static void ata_force_horkage(struct ata_device *dev)
459 int devno = dev->link->pmp + dev->devno;
460 int alt_devno = devno;
461 int i;
463 /* allow n.15/16 for devices attached to host port */
464 if (ata_is_host_link(dev->link))
465 alt_devno += 15;
467 for (i = 0; i < ata_force_tbl_size; i++) {
468 const struct ata_force_ent *fe = &ata_force_tbl[i];
470 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
471 continue;
473 if (fe->device != -1 && fe->device != devno &&
474 fe->device != alt_devno)
475 continue;
477 if (!(~dev->horkage & fe->param.horkage_on) &&
478 !(dev->horkage & fe->param.horkage_off))
479 continue;
481 dev->horkage |= fe->param.horkage_on;
482 dev->horkage &= ~fe->param.horkage_off;
484 ata_dev_notice(dev, "FORCE: horkage modified (%s)\n",
485 fe->param.name);
488 #else
489 static inline void ata_force_link_limits(struct ata_link *link) { }
490 static inline void ata_force_xfermask(struct ata_device *dev) { }
491 static inline void ata_force_horkage(struct ata_device *dev) { }
492 #endif
495 * atapi_cmd_type - Determine ATAPI command type from SCSI opcode
496 * @opcode: SCSI opcode
498 * Determine ATAPI command type from @opcode.
500 * LOCKING:
501 * None.
503 * RETURNS:
504 * ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC}
506 int atapi_cmd_type(u8 opcode)
508 switch (opcode) {
509 case GPCMD_READ_10:
510 case GPCMD_READ_12:
511 return ATAPI_READ;
513 case GPCMD_WRITE_10:
514 case GPCMD_WRITE_12:
515 case GPCMD_WRITE_AND_VERIFY_10:
516 return ATAPI_WRITE;
518 case GPCMD_READ_CD:
519 case GPCMD_READ_CD_MSF:
520 return ATAPI_READ_CD;
522 case ATA_16:
523 case ATA_12:
524 if (atapi_passthru16)
525 return ATAPI_PASS_THRU;
526 /* fall thru */
527 default:
528 return ATAPI_MISC;
531 EXPORT_SYMBOL_GPL(atapi_cmd_type);
533 static const u8 ata_rw_cmds[] = {
534 /* pio multi */
535 ATA_CMD_READ_MULTI,
536 ATA_CMD_WRITE_MULTI,
537 ATA_CMD_READ_MULTI_EXT,
538 ATA_CMD_WRITE_MULTI_EXT,
542 ATA_CMD_WRITE_MULTI_FUA_EXT,
543 /* pio */
544 ATA_CMD_PIO_READ,
545 ATA_CMD_PIO_WRITE,
546 ATA_CMD_PIO_READ_EXT,
547 ATA_CMD_PIO_WRITE_EXT,
552 /* dma */
553 ATA_CMD_READ,
554 ATA_CMD_WRITE,
555 ATA_CMD_READ_EXT,
556 ATA_CMD_WRITE_EXT,
560 ATA_CMD_WRITE_FUA_EXT
564 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
565 * @tf: command to examine and configure
566 * @dev: device tf belongs to
568 * Examine the device configuration and tf->flags to calculate
569 * the proper read/write commands and protocol to use.
571 * LOCKING:
572 * caller.
574 static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
576 u8 cmd;
578 int index, fua, lba48, write;
580 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
581 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
582 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
584 if (dev->flags & ATA_DFLAG_PIO) {
585 tf->protocol = ATA_PROT_PIO;
586 index = dev->multi_count ? 0 : 8;
587 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
588 /* Unable to use DMA due to host limitation */
589 tf->protocol = ATA_PROT_PIO;
590 index = dev->multi_count ? 0 : 8;
591 } else {
592 tf->protocol = ATA_PROT_DMA;
593 index = 16;
596 cmd = ata_rw_cmds[index + fua + lba48 + write];
597 if (cmd) {
598 tf->command = cmd;
599 return 0;
601 return -1;
605 * ata_tf_read_block - Read block address from ATA taskfile
606 * @tf: ATA taskfile of interest
607 * @dev: ATA device @tf belongs to
609 * LOCKING:
610 * None.
612 * Read block address from @tf. This function can handle all
613 * three address formats - LBA, LBA48 and CHS. tf->protocol and
614 * flags select the address format to use.
616 * RETURNS:
617 * Block address read from @tf.
619 u64 ata_tf_read_block(const struct ata_taskfile *tf, struct ata_device *dev)
621 u64 block = 0;
623 if (tf->flags & ATA_TFLAG_LBA) {
624 if (tf->flags & ATA_TFLAG_LBA48) {
625 block |= (u64)tf->hob_lbah << 40;
626 block |= (u64)tf->hob_lbam << 32;
627 block |= (u64)tf->hob_lbal << 24;
628 } else
629 block |= (tf->device & 0xf) << 24;
631 block |= tf->lbah << 16;
632 block |= tf->lbam << 8;
633 block |= tf->lbal;
634 } else {
635 u32 cyl, head, sect;
637 cyl = tf->lbam | (tf->lbah << 8);
638 head = tf->device & 0xf;
639 sect = tf->lbal;
641 if (!sect) {
642 ata_dev_warn(dev,
643 "device reported invalid CHS sector 0\n");
644 return U64_MAX;
647 block = (cyl * dev->heads + head) * dev->sectors + sect - 1;
650 return block;
654 * ata_build_rw_tf - Build ATA taskfile for given read/write request
655 * @tf: Target ATA taskfile
656 * @dev: ATA device @tf belongs to
657 * @block: Block address
658 * @n_block: Number of blocks
659 * @tf_flags: RW/FUA etc...
660 * @tag: tag
661 * @class: IO priority class
663 * LOCKING:
664 * None.
666 * Build ATA taskfile @tf for read/write request described by
667 * @block, @n_block, @tf_flags and @tag on @dev.
669 * RETURNS:
671 * 0 on success, -ERANGE if the request is too large for @dev,
672 * -EINVAL if the request is invalid.
674 int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
675 u64 block, u32 n_block, unsigned int tf_flags,
676 unsigned int tag, int class)
678 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
679 tf->flags |= tf_flags;
681 if (ata_ncq_enabled(dev) && !ata_tag_internal(tag)) {
682 /* yay, NCQ */
683 if (!lba_48_ok(block, n_block))
684 return -ERANGE;
686 tf->protocol = ATA_PROT_NCQ;
687 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
689 if (tf->flags & ATA_TFLAG_WRITE)
690 tf->command = ATA_CMD_FPDMA_WRITE;
691 else
692 tf->command = ATA_CMD_FPDMA_READ;
694 tf->nsect = tag << 3;
695 tf->hob_feature = (n_block >> 8) & 0xff;
696 tf->feature = n_block & 0xff;
698 tf->hob_lbah = (block >> 40) & 0xff;
699 tf->hob_lbam = (block >> 32) & 0xff;
700 tf->hob_lbal = (block >> 24) & 0xff;
701 tf->lbah = (block >> 16) & 0xff;
702 tf->lbam = (block >> 8) & 0xff;
703 tf->lbal = block & 0xff;
705 tf->device = ATA_LBA;
706 if (tf->flags & ATA_TFLAG_FUA)
707 tf->device |= 1 << 7;
709 if (dev->flags & ATA_DFLAG_NCQ_PRIO) {
710 if (class == IOPRIO_CLASS_RT)
711 tf->hob_nsect |= ATA_PRIO_HIGH <<
712 ATA_SHIFT_PRIO;
714 } else if (dev->flags & ATA_DFLAG_LBA) {
715 tf->flags |= ATA_TFLAG_LBA;
717 if (lba_28_ok(block, n_block)) {
718 /* use LBA28 */
719 tf->device |= (block >> 24) & 0xf;
720 } else if (lba_48_ok(block, n_block)) {
721 if (!(dev->flags & ATA_DFLAG_LBA48))
722 return -ERANGE;
724 /* use LBA48 */
725 tf->flags |= ATA_TFLAG_LBA48;
727 tf->hob_nsect = (n_block >> 8) & 0xff;
729 tf->hob_lbah = (block >> 40) & 0xff;
730 tf->hob_lbam = (block >> 32) & 0xff;
731 tf->hob_lbal = (block >> 24) & 0xff;
732 } else
733 /* request too large even for LBA48 */
734 return -ERANGE;
736 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
737 return -EINVAL;
739 tf->nsect = n_block & 0xff;
741 tf->lbah = (block >> 16) & 0xff;
742 tf->lbam = (block >> 8) & 0xff;
743 tf->lbal = block & 0xff;
745 tf->device |= ATA_LBA;
746 } else {
747 /* CHS */
748 u32 sect, head, cyl, track;
750 /* The request -may- be too large for CHS addressing. */
751 if (!lba_28_ok(block, n_block))
752 return -ERANGE;
754 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
755 return -EINVAL;
757 /* Convert LBA to CHS */
758 track = (u32)block / dev->sectors;
759 cyl = track / dev->heads;
760 head = track % dev->heads;
761 sect = (u32)block % dev->sectors + 1;
763 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
764 (u32)block, track, cyl, head, sect);
766 /* Check whether the converted CHS can fit.
767 Cylinder: 0-65535
768 Head: 0-15
769 Sector: 1-255*/
770 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
771 return -ERANGE;
773 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
774 tf->lbal = sect;
775 tf->lbam = cyl;
776 tf->lbah = cyl >> 8;
777 tf->device |= head;
780 return 0;
784 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
785 * @pio_mask: pio_mask
786 * @mwdma_mask: mwdma_mask
787 * @udma_mask: udma_mask
789 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
790 * unsigned int xfer_mask.
792 * LOCKING:
793 * None.
795 * RETURNS:
796 * Packed xfer_mask.
798 unsigned long ata_pack_xfermask(unsigned long pio_mask,
799 unsigned long mwdma_mask,
800 unsigned long udma_mask)
802 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
803 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
804 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
806 EXPORT_SYMBOL_GPL(ata_pack_xfermask);
809 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
810 * @xfer_mask: xfer_mask to unpack
811 * @pio_mask: resulting pio_mask
812 * @mwdma_mask: resulting mwdma_mask
813 * @udma_mask: resulting udma_mask
815 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
816 * Any NULL destination masks will be ignored.
818 void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
819 unsigned long *mwdma_mask, unsigned long *udma_mask)
821 if (pio_mask)
822 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
823 if (mwdma_mask)
824 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
825 if (udma_mask)
826 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
829 static const struct ata_xfer_ent {
830 int shift, bits;
831 u8 base;
832 } ata_xfer_tbl[] = {
833 { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
834 { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
835 { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
836 { -1, },
840 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
841 * @xfer_mask: xfer_mask of interest
843 * Return matching XFER_* value for @xfer_mask. Only the highest
844 * bit of @xfer_mask is considered.
846 * LOCKING:
847 * None.
849 * RETURNS:
850 * Matching XFER_* value, 0xff if no match found.
852 u8 ata_xfer_mask2mode(unsigned long xfer_mask)
854 int highbit = fls(xfer_mask) - 1;
855 const struct ata_xfer_ent *ent;
857 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
858 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
859 return ent->base + highbit - ent->shift;
860 return 0xff;
862 EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
865 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
866 * @xfer_mode: XFER_* of interest
868 * Return matching xfer_mask for @xfer_mode.
870 * LOCKING:
871 * None.
873 * RETURNS:
874 * Matching xfer_mask, 0 if no match found.
876 unsigned long ata_xfer_mode2mask(u8 xfer_mode)
878 const struct ata_xfer_ent *ent;
880 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
881 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
882 return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
883 & ~((1 << ent->shift) - 1);
884 return 0;
886 EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
889 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
890 * @xfer_mode: XFER_* of interest
892 * Return matching xfer_shift for @xfer_mode.
894 * LOCKING:
895 * None.
897 * RETURNS:
898 * Matching xfer_shift, -1 if no match found.
900 int ata_xfer_mode2shift(unsigned long xfer_mode)
902 const struct ata_xfer_ent *ent;
904 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
905 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
906 return ent->shift;
907 return -1;
909 EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
912 * ata_mode_string - convert xfer_mask to string
913 * @xfer_mask: mask of bits supported; only highest bit counts.
915 * Determine string which represents the highest speed
916 * (highest bit in @modemask).
918 * LOCKING:
919 * None.
921 * RETURNS:
922 * Constant C string representing highest speed listed in
923 * @mode_mask, or the constant C string "<n/a>".
925 const char *ata_mode_string(unsigned long xfer_mask)
927 static const char * const xfer_mode_str[] = {
928 "PIO0",
929 "PIO1",
930 "PIO2",
931 "PIO3",
932 "PIO4",
933 "PIO5",
934 "PIO6",
935 "MWDMA0",
936 "MWDMA1",
937 "MWDMA2",
938 "MWDMA3",
939 "MWDMA4",
940 "UDMA/16",
941 "UDMA/25",
942 "UDMA/33",
943 "UDMA/44",
944 "UDMA/66",
945 "UDMA/100",
946 "UDMA/133",
947 "UDMA7",
949 int highbit;
951 highbit = fls(xfer_mask) - 1;
952 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
953 return xfer_mode_str[highbit];
954 return "<n/a>";
956 EXPORT_SYMBOL_GPL(ata_mode_string);
958 const char *sata_spd_string(unsigned int spd)
960 static const char * const spd_str[] = {
961 "1.5 Gbps",
962 "3.0 Gbps",
963 "6.0 Gbps",
966 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
967 return "<unknown>";
968 return spd_str[spd - 1];
972 * ata_dev_classify - determine device type based on ATA-spec signature
973 * @tf: ATA taskfile register set for device to be identified
975 * Determine from taskfile register contents whether a device is
976 * ATA or ATAPI, as per "Signature and persistence" section
977 * of ATA/PI spec (volume 1, sect 5.14).
979 * LOCKING:
980 * None.
982 * RETURNS:
983 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP,
984 * %ATA_DEV_ZAC, or %ATA_DEV_UNKNOWN the event of failure.
986 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
988 /* Apple's open source Darwin code hints that some devices only
989 * put a proper signature into the LBA mid/high registers,
990 * So, we only check those. It's sufficient for uniqueness.
992 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
993 * signatures for ATA and ATAPI devices attached on SerialATA,
994 * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA
995 * spec has never mentioned about using different signatures
996 * for ATA/ATAPI devices. Then, Serial ATA II: Port
997 * Multiplier specification began to use 0x69/0x96 to identify
998 * port multpliers and 0x3c/0xc3 to identify SEMB device.
999 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
1000 * 0x69/0x96 shortly and described them as reserved for
1001 * SerialATA.
1003 * We follow the current spec and consider that 0x69/0x96
1004 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
1005 * Unfortunately, WDC WD1600JS-62MHB5 (a hard drive) reports
1006 * SEMB signature. This is worked around in
1007 * ata_dev_read_id().
1009 if ((tf->lbam == 0) && (tf->lbah == 0)) {
1010 DPRINTK("found ATA device by sig\n");
1011 return ATA_DEV_ATA;
1014 if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
1015 DPRINTK("found ATAPI device by sig\n");
1016 return ATA_DEV_ATAPI;
1019 if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
1020 DPRINTK("found PMP device by sig\n");
1021 return ATA_DEV_PMP;
1024 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
1025 DPRINTK("found SEMB device by sig (could be ATA device)\n");
1026 return ATA_DEV_SEMB;
1029 if ((tf->lbam == 0xcd) && (tf->lbah == 0xab)) {
1030 DPRINTK("found ZAC device by sig\n");
1031 return ATA_DEV_ZAC;
1034 DPRINTK("unknown device\n");
1035 return ATA_DEV_UNKNOWN;
1037 EXPORT_SYMBOL_GPL(ata_dev_classify);
1040 * ata_id_string - Convert IDENTIFY DEVICE page into string
1041 * @id: IDENTIFY DEVICE results we will examine
1042 * @s: string into which data is output
1043 * @ofs: offset into identify device page
1044 * @len: length of string to return. must be an even number.
1046 * The strings in the IDENTIFY DEVICE page are broken up into
1047 * 16-bit chunks. Run through the string, and output each
1048 * 8-bit chunk linearly, regardless of platform.
1050 * LOCKING:
1051 * caller.
1054 void ata_id_string(const u16 *id, unsigned char *s,
1055 unsigned int ofs, unsigned int len)
1057 unsigned int c;
1059 BUG_ON(len & 1);
1061 while (len > 0) {
1062 c = id[ofs] >> 8;
1063 *s = c;
1064 s++;
1066 c = id[ofs] & 0xff;
1067 *s = c;
1068 s++;
1070 ofs++;
1071 len -= 2;
1074 EXPORT_SYMBOL_GPL(ata_id_string);
1077 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
1078 * @id: IDENTIFY DEVICE results we will examine
1079 * @s: string into which data is output
1080 * @ofs: offset into identify device page
1081 * @len: length of string to return. must be an odd number.
1083 * This function is identical to ata_id_string except that it
1084 * trims trailing spaces and terminates the resulting string with
1085 * null. @len must be actual maximum length (even number) + 1.
1087 * LOCKING:
1088 * caller.
1090 void ata_id_c_string(const u16 *id, unsigned char *s,
1091 unsigned int ofs, unsigned int len)
1093 unsigned char *p;
1095 ata_id_string(id, s, ofs, len - 1);
1097 p = s + strnlen(s, len - 1);
1098 while (p > s && p[-1] == ' ')
1099 p--;
1100 *p = '\0';
1102 EXPORT_SYMBOL_GPL(ata_id_c_string);
1104 static u64 ata_id_n_sectors(const u16 *id)
1106 if (ata_id_has_lba(id)) {
1107 if (ata_id_has_lba48(id))
1108 return ata_id_u64(id, ATA_ID_LBA_CAPACITY_2);
1109 else
1110 return ata_id_u32(id, ATA_ID_LBA_CAPACITY);
1111 } else {
1112 if (ata_id_current_chs_valid(id))
1113 return id[ATA_ID_CUR_CYLS] * id[ATA_ID_CUR_HEADS] *
1114 id[ATA_ID_CUR_SECTORS];
1115 else
1116 return id[ATA_ID_CYLS] * id[ATA_ID_HEADS] *
1117 id[ATA_ID_SECTORS];
1121 u64 ata_tf_to_lba48(const struct ata_taskfile *tf)
1123 u64 sectors = 0;
1125 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1126 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
1127 sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24;
1128 sectors |= (tf->lbah & 0xff) << 16;
1129 sectors |= (tf->lbam & 0xff) << 8;
1130 sectors |= (tf->lbal & 0xff);
1132 return sectors;
1135 u64 ata_tf_to_lba(const struct ata_taskfile *tf)
1137 u64 sectors = 0;
1139 sectors |= (tf->device & 0x0f) << 24;
1140 sectors |= (tf->lbah & 0xff) << 16;
1141 sectors |= (tf->lbam & 0xff) << 8;
1142 sectors |= (tf->lbal & 0xff);
1144 return sectors;
1148 * ata_read_native_max_address - Read native max address
1149 * @dev: target device
1150 * @max_sectors: out parameter for the result native max address
1152 * Perform an LBA48 or LBA28 native size query upon the device in
1153 * question.
1155 * RETURNS:
1156 * 0 on success, -EACCES if command is aborted by the drive.
1157 * -EIO on other errors.
1159 static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1161 unsigned int err_mask;
1162 struct ata_taskfile tf;
1163 int lba48 = ata_id_has_lba48(dev->id);
1165 ata_tf_init(dev, &tf);
1167 /* always clear all address registers */
1168 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1170 if (lba48) {
1171 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1172 tf.flags |= ATA_TFLAG_LBA48;
1173 } else
1174 tf.command = ATA_CMD_READ_NATIVE_MAX;
1176 tf.protocol = ATA_PROT_NODATA;
1177 tf.device |= ATA_LBA;
1179 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1180 if (err_mask) {
1181 ata_dev_warn(dev,
1182 "failed to read native max address (err_mask=0x%x)\n",
1183 err_mask);
1184 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1185 return -EACCES;
1186 return -EIO;
1189 if (lba48)
1190 *max_sectors = ata_tf_to_lba48(&tf) + 1;
1191 else
1192 *max_sectors = ata_tf_to_lba(&tf) + 1;
1193 if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
1194 (*max_sectors)--;
1195 return 0;
1199 * ata_set_max_sectors - Set max sectors
1200 * @dev: target device
1201 * @new_sectors: new max sectors value to set for the device
1203 * Set max sectors of @dev to @new_sectors.
1205 * RETURNS:
1206 * 0 on success, -EACCES if command is aborted or denied (due to
1207 * previous non-volatile SET_MAX) by the drive. -EIO on other
1208 * errors.
1210 static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1212 unsigned int err_mask;
1213 struct ata_taskfile tf;
1214 int lba48 = ata_id_has_lba48(dev->id);
1216 new_sectors--;
1218 ata_tf_init(dev, &tf);
1220 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1222 if (lba48) {
1223 tf.command = ATA_CMD_SET_MAX_EXT;
1224 tf.flags |= ATA_TFLAG_LBA48;
1226 tf.hob_lbal = (new_sectors >> 24) & 0xff;
1227 tf.hob_lbam = (new_sectors >> 32) & 0xff;
1228 tf.hob_lbah = (new_sectors >> 40) & 0xff;
1229 } else {
1230 tf.command = ATA_CMD_SET_MAX;
1232 tf.device |= (new_sectors >> 24) & 0xf;
1235 tf.protocol = ATA_PROT_NODATA;
1236 tf.device |= ATA_LBA;
1238 tf.lbal = (new_sectors >> 0) & 0xff;
1239 tf.lbam = (new_sectors >> 8) & 0xff;
1240 tf.lbah = (new_sectors >> 16) & 0xff;
1242 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1243 if (err_mask) {
1244 ata_dev_warn(dev,
1245 "failed to set max address (err_mask=0x%x)\n",
1246 err_mask);
1247 if (err_mask == AC_ERR_DEV &&
1248 (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1249 return -EACCES;
1250 return -EIO;
1253 return 0;
1257 * ata_hpa_resize - Resize a device with an HPA set
1258 * @dev: Device to resize
1260 * Read the size of an LBA28 or LBA48 disk with HPA features and resize
1261 * it if required to the full size of the media. The caller must check
1262 * the drive has the HPA feature set enabled.
1264 * RETURNS:
1265 * 0 on success, -errno on failure.
1267 static int ata_hpa_resize(struct ata_device *dev)
1269 struct ata_eh_context *ehc = &dev->link->eh_context;
1270 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1271 bool unlock_hpa = ata_ignore_hpa || dev->flags & ATA_DFLAG_UNLOCK_HPA;
1272 u64 sectors = ata_id_n_sectors(dev->id);
1273 u64 native_sectors;
1274 int rc;
1276 /* do we need to do it? */
1277 if ((dev->class != ATA_DEV_ATA && dev->class != ATA_DEV_ZAC) ||
1278 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1279 (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
1280 return 0;
1282 /* read native max address */
1283 rc = ata_read_native_max_address(dev, &native_sectors);
1284 if (rc) {
1285 /* If device aborted the command or HPA isn't going to
1286 * be unlocked, skip HPA resizing.
1288 if (rc == -EACCES || !unlock_hpa) {
1289 ata_dev_warn(dev,
1290 "HPA support seems broken, skipping HPA handling\n");
1291 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1293 /* we can continue if device aborted the command */
1294 if (rc == -EACCES)
1295 rc = 0;
1298 return rc;
1300 dev->n_native_sectors = native_sectors;
1302 /* nothing to do? */
1303 if (native_sectors <= sectors || !unlock_hpa) {
1304 if (!print_info || native_sectors == sectors)
1305 return 0;
1307 if (native_sectors > sectors)
1308 ata_dev_info(dev,
1309 "HPA detected: current %llu, native %llu\n",
1310 (unsigned long long)sectors,
1311 (unsigned long long)native_sectors);
1312 else if (native_sectors < sectors)
1313 ata_dev_warn(dev,
1314 "native sectors (%llu) is smaller than sectors (%llu)\n",
1315 (unsigned long long)native_sectors,
1316 (unsigned long long)sectors);
1317 return 0;
1320 /* let's unlock HPA */
1321 rc = ata_set_max_sectors(dev, native_sectors);
1322 if (rc == -EACCES) {
1323 /* if device aborted the command, skip HPA resizing */
1324 ata_dev_warn(dev,
1325 "device aborted resize (%llu -> %llu), skipping HPA handling\n",
1326 (unsigned long long)sectors,
1327 (unsigned long long)native_sectors);
1328 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1329 return 0;
1330 } else if (rc)
1331 return rc;
1333 /* re-read IDENTIFY data */
1334 rc = ata_dev_reread_id(dev, 0);
1335 if (rc) {
1336 ata_dev_err(dev,
1337 "failed to re-read IDENTIFY data after HPA resizing\n");
1338 return rc;
1341 if (print_info) {
1342 u64 new_sectors = ata_id_n_sectors(dev->id);
1343 ata_dev_info(dev,
1344 "HPA unlocked: %llu -> %llu, native %llu\n",
1345 (unsigned long long)sectors,
1346 (unsigned long long)new_sectors,
1347 (unsigned long long)native_sectors);
1350 return 0;
1354 * ata_dump_id - IDENTIFY DEVICE info debugging output
1355 * @id: IDENTIFY DEVICE page to dump
1357 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1358 * page.
1360 * LOCKING:
1361 * caller.
1364 static inline void ata_dump_id(const u16 *id)
1366 DPRINTK("49==0x%04x "
1367 "53==0x%04x "
1368 "63==0x%04x "
1369 "64==0x%04x "
1370 "75==0x%04x \n",
1371 id[49],
1372 id[53],
1373 id[63],
1374 id[64],
1375 id[75]);
1376 DPRINTK("80==0x%04x "
1377 "81==0x%04x "
1378 "82==0x%04x "
1379 "83==0x%04x "
1380 "84==0x%04x \n",
1381 id[80],
1382 id[81],
1383 id[82],
1384 id[83],
1385 id[84]);
1386 DPRINTK("88==0x%04x "
1387 "93==0x%04x\n",
1388 id[88],
1389 id[93]);
1393 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1394 * @id: IDENTIFY data to compute xfer mask from
1396 * Compute the xfermask for this device. This is not as trivial
1397 * as it seems if we must consider early devices correctly.
1399 * FIXME: pre IDE drive timing (do we care ?).
1401 * LOCKING:
1402 * None.
1404 * RETURNS:
1405 * Computed xfermask
1407 unsigned long ata_id_xfermask(const u16 *id)
1409 unsigned long pio_mask, mwdma_mask, udma_mask;
1411 /* Usual case. Word 53 indicates word 64 is valid */
1412 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1413 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1414 pio_mask <<= 3;
1415 pio_mask |= 0x7;
1416 } else {
1417 /* If word 64 isn't valid then Word 51 high byte holds
1418 * the PIO timing number for the maximum. Turn it into
1419 * a mask.
1421 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
1422 if (mode < 5) /* Valid PIO range */
1423 pio_mask = (2 << mode) - 1;
1424 else
1425 pio_mask = 1;
1427 /* But wait.. there's more. Design your standards by
1428 * committee and you too can get a free iordy field to
1429 * process. However its the speeds not the modes that
1430 * are supported... Note drivers using the timing API
1431 * will get this right anyway
1435 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1437 if (ata_id_is_cfa(id)) {
1439 * Process compact flash extended modes
1441 int pio = (id[ATA_ID_CFA_MODES] >> 0) & 0x7;
1442 int dma = (id[ATA_ID_CFA_MODES] >> 3) & 0x7;
1444 if (pio)
1445 pio_mask |= (1 << 5);
1446 if (pio > 1)
1447 pio_mask |= (1 << 6);
1448 if (dma)
1449 mwdma_mask |= (1 << 3);
1450 if (dma > 1)
1451 mwdma_mask |= (1 << 4);
1454 udma_mask = 0;
1455 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1456 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1458 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1460 EXPORT_SYMBOL_GPL(ata_id_xfermask);
1462 static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1464 struct completion *waiting = qc->private_data;
1466 complete(waiting);
1470 * ata_exec_internal_sg - execute libata internal command
1471 * @dev: Device to which the command is sent
1472 * @tf: Taskfile registers for the command and the result
1473 * @cdb: CDB for packet command
1474 * @dma_dir: Data transfer direction of the command
1475 * @sgl: sg list for the data buffer of the command
1476 * @n_elem: Number of sg entries
1477 * @timeout: Timeout in msecs (0 for default)
1479 * Executes libata internal command with timeout. @tf contains
1480 * command on entry and result on return. Timeout and error
1481 * conditions are reported via return value. No recovery action
1482 * is taken after a command times out. It's caller's duty to
1483 * clean up after timeout.
1485 * LOCKING:
1486 * None. Should be called with kernel context, might sleep.
1488 * RETURNS:
1489 * Zero on success, AC_ERR_* mask on failure
1491 unsigned ata_exec_internal_sg(struct ata_device *dev,
1492 struct ata_taskfile *tf, const u8 *cdb,
1493 int dma_dir, struct scatterlist *sgl,
1494 unsigned int n_elem, unsigned long timeout)
1496 struct ata_link *link = dev->link;
1497 struct ata_port *ap = link->ap;
1498 u8 command = tf->command;
1499 int auto_timeout = 0;
1500 struct ata_queued_cmd *qc;
1501 unsigned int preempted_tag;
1502 u32 preempted_sactive;
1503 u64 preempted_qc_active;
1504 int preempted_nr_active_links;
1505 DECLARE_COMPLETION_ONSTACK(wait);
1506 unsigned long flags;
1507 unsigned int err_mask;
1508 int rc;
1510 spin_lock_irqsave(ap->lock, flags);
1512 /* no internal command while frozen */
1513 if (ap->pflags & ATA_PFLAG_FROZEN) {
1514 spin_unlock_irqrestore(ap->lock, flags);
1515 return AC_ERR_SYSTEM;
1518 /* initialize internal qc */
1519 qc = __ata_qc_from_tag(ap, ATA_TAG_INTERNAL);
1521 qc->tag = ATA_TAG_INTERNAL;
1522 qc->hw_tag = 0;
1523 qc->scsicmd = NULL;
1524 qc->ap = ap;
1525 qc->dev = dev;
1526 ata_qc_reinit(qc);
1528 preempted_tag = link->active_tag;
1529 preempted_sactive = link->sactive;
1530 preempted_qc_active = ap->qc_active;
1531 preempted_nr_active_links = ap->nr_active_links;
1532 link->active_tag = ATA_TAG_POISON;
1533 link->sactive = 0;
1534 ap->qc_active = 0;
1535 ap->nr_active_links = 0;
1537 /* prepare & issue qc */
1538 qc->tf = *tf;
1539 if (cdb)
1540 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1542 /* some SATA bridges need us to indicate data xfer direction */
1543 if (tf->protocol == ATAPI_PROT_DMA && (dev->flags & ATA_DFLAG_DMADIR) &&
1544 dma_dir == DMA_FROM_DEVICE)
1545 qc->tf.feature |= ATAPI_DMADIR;
1547 qc->flags |= ATA_QCFLAG_RESULT_TF;
1548 qc->dma_dir = dma_dir;
1549 if (dma_dir != DMA_NONE) {
1550 unsigned int i, buflen = 0;
1551 struct scatterlist *sg;
1553 for_each_sg(sgl, sg, n_elem, i)
1554 buflen += sg->length;
1556 ata_sg_init(qc, sgl, n_elem);
1557 qc->nbytes = buflen;
1560 qc->private_data = &wait;
1561 qc->complete_fn = ata_qc_complete_internal;
1563 ata_qc_issue(qc);
1565 spin_unlock_irqrestore(ap->lock, flags);
1567 if (!timeout) {
1568 if (ata_probe_timeout)
1569 timeout = ata_probe_timeout * 1000;
1570 else {
1571 timeout = ata_internal_cmd_timeout(dev, command);
1572 auto_timeout = 1;
1576 if (ap->ops->error_handler)
1577 ata_eh_release(ap);
1579 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
1581 if (ap->ops->error_handler)
1582 ata_eh_acquire(ap);
1584 ata_sff_flush_pio_task(ap);
1586 if (!rc) {
1587 spin_lock_irqsave(ap->lock, flags);
1589 /* We're racing with irq here. If we lose, the
1590 * following test prevents us from completing the qc
1591 * twice. If we win, the port is frozen and will be
1592 * cleaned up by ->post_internal_cmd().
1594 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1595 qc->err_mask |= AC_ERR_TIMEOUT;
1597 if (ap->ops->error_handler)
1598 ata_port_freeze(ap);
1599 else
1600 ata_qc_complete(qc);
1602 if (ata_msg_warn(ap))
1603 ata_dev_warn(dev, "qc timeout (cmd 0x%x)\n",
1604 command);
1607 spin_unlock_irqrestore(ap->lock, flags);
1610 /* do post_internal_cmd */
1611 if (ap->ops->post_internal_cmd)
1612 ap->ops->post_internal_cmd(qc);
1614 /* perform minimal error analysis */
1615 if (qc->flags & ATA_QCFLAG_FAILED) {
1616 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1617 qc->err_mask |= AC_ERR_DEV;
1619 if (!qc->err_mask)
1620 qc->err_mask |= AC_ERR_OTHER;
1622 if (qc->err_mask & ~AC_ERR_OTHER)
1623 qc->err_mask &= ~AC_ERR_OTHER;
1624 } else if (qc->tf.command == ATA_CMD_REQ_SENSE_DATA) {
1625 qc->result_tf.command |= ATA_SENSE;
1628 /* finish up */
1629 spin_lock_irqsave(ap->lock, flags);
1631 *tf = qc->result_tf;
1632 err_mask = qc->err_mask;
1634 ata_qc_free(qc);
1635 link->active_tag = preempted_tag;
1636 link->sactive = preempted_sactive;
1637 ap->qc_active = preempted_qc_active;
1638 ap->nr_active_links = preempted_nr_active_links;
1640 spin_unlock_irqrestore(ap->lock, flags);
1642 if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout)
1643 ata_internal_cmd_timed_out(dev, command);
1645 return err_mask;
1649 * ata_exec_internal - execute libata internal command
1650 * @dev: Device to which the command is sent
1651 * @tf: Taskfile registers for the command and the result
1652 * @cdb: CDB for packet command
1653 * @dma_dir: Data transfer direction of the command
1654 * @buf: Data buffer of the command
1655 * @buflen: Length of data buffer
1656 * @timeout: Timeout in msecs (0 for default)
1658 * Wrapper around ata_exec_internal_sg() which takes simple
1659 * buffer instead of sg list.
1661 * LOCKING:
1662 * None. Should be called with kernel context, might sleep.
1664 * RETURNS:
1665 * Zero on success, AC_ERR_* mask on failure
1667 unsigned ata_exec_internal(struct ata_device *dev,
1668 struct ata_taskfile *tf, const u8 *cdb,
1669 int dma_dir, void *buf, unsigned int buflen,
1670 unsigned long timeout)
1672 struct scatterlist *psg = NULL, sg;
1673 unsigned int n_elem = 0;
1675 if (dma_dir != DMA_NONE) {
1676 WARN_ON(!buf);
1677 sg_init_one(&sg, buf, buflen);
1678 psg = &sg;
1679 n_elem++;
1682 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1683 timeout);
1687 * ata_pio_need_iordy - check if iordy needed
1688 * @adev: ATA device
1690 * Check if the current speed of the device requires IORDY. Used
1691 * by various controllers for chip configuration.
1693 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1695 /* Don't set IORDY if we're preparing for reset. IORDY may
1696 * lead to controller lock up on certain controllers if the
1697 * port is not occupied. See bko#11703 for details.
1699 if (adev->link->ap->pflags & ATA_PFLAG_RESETTING)
1700 return 0;
1701 /* Controller doesn't support IORDY. Probably a pointless
1702 * check as the caller should know this.
1704 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1705 return 0;
1706 /* CF spec. r4.1 Table 22 says no iordy on PIO5 and PIO6. */
1707 if (ata_id_is_cfa(adev->id)
1708 && (adev->pio_mode == XFER_PIO_5 || adev->pio_mode == XFER_PIO_6))
1709 return 0;
1710 /* PIO3 and higher it is mandatory */
1711 if (adev->pio_mode > XFER_PIO_2)
1712 return 1;
1713 /* We turn it on when possible */
1714 if (ata_id_has_iordy(adev->id))
1715 return 1;
1716 return 0;
1718 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
1721 * ata_pio_mask_no_iordy - Return the non IORDY mask
1722 * @adev: ATA device
1724 * Compute the highest mode possible if we are not using iordy. Return
1725 * -1 if no iordy mode is available.
1727 static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1729 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1730 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1731 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1732 /* Is the speed faster than the drive allows non IORDY ? */
1733 if (pio) {
1734 /* This is cycle times not frequency - watch the logic! */
1735 if (pio > 240) /* PIO2 is 240nS per cycle */
1736 return 3 << ATA_SHIFT_PIO;
1737 return 7 << ATA_SHIFT_PIO;
1740 return 3 << ATA_SHIFT_PIO;
1744 * ata_do_dev_read_id - default ID read method
1745 * @dev: device
1746 * @tf: proposed taskfile
1747 * @id: data buffer
1749 * Issue the identify taskfile and hand back the buffer containing
1750 * identify data. For some RAID controllers and for pre ATA devices
1751 * this function is wrapped or replaced by the driver
1753 unsigned int ata_do_dev_read_id(struct ata_device *dev,
1754 struct ata_taskfile *tf, u16 *id)
1756 return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE,
1757 id, sizeof(id[0]) * ATA_ID_WORDS, 0);
1759 EXPORT_SYMBOL_GPL(ata_do_dev_read_id);
1762 * ata_dev_read_id - Read ID data from the specified device
1763 * @dev: target device
1764 * @p_class: pointer to class of the target device (may be changed)
1765 * @flags: ATA_READID_* flags
1766 * @id: buffer to read IDENTIFY data into
1768 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1769 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1770 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1771 * for pre-ATA4 drives.
1773 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right
1774 * now we abort if we hit that case.
1776 * LOCKING:
1777 * Kernel thread context (may sleep)
1779 * RETURNS:
1780 * 0 on success, -errno otherwise.
1782 int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1783 unsigned int flags, u16 *id)
1785 struct ata_port *ap = dev->link->ap;
1786 unsigned int class = *p_class;
1787 struct ata_taskfile tf;
1788 unsigned int err_mask = 0;
1789 const char *reason;
1790 bool is_semb = class == ATA_DEV_SEMB;
1791 int may_fallback = 1, tried_spinup = 0;
1792 int rc;
1794 if (ata_msg_ctl(ap))
1795 ata_dev_dbg(dev, "%s: ENTER\n", __func__);
1797 retry:
1798 ata_tf_init(dev, &tf);
1800 switch (class) {
1801 case ATA_DEV_SEMB:
1802 class = ATA_DEV_ATA; /* some hard drives report SEMB sig */
1803 /* fall through */
1804 case ATA_DEV_ATA:
1805 case ATA_DEV_ZAC:
1806 tf.command = ATA_CMD_ID_ATA;
1807 break;
1808 case ATA_DEV_ATAPI:
1809 tf.command = ATA_CMD_ID_ATAPI;
1810 break;
1811 default:
1812 rc = -ENODEV;
1813 reason = "unsupported class";
1814 goto err_out;
1817 tf.protocol = ATA_PROT_PIO;
1819 /* Some devices choke if TF registers contain garbage. Make
1820 * sure those are properly initialized.
1822 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1824 /* Device presence detection is unreliable on some
1825 * controllers. Always poll IDENTIFY if available.
1827 tf.flags |= ATA_TFLAG_POLLING;
1829 if (ap->ops->read_id)
1830 err_mask = ap->ops->read_id(dev, &tf, id);
1831 else
1832 err_mask = ata_do_dev_read_id(dev, &tf, id);
1834 if (err_mask) {
1835 if (err_mask & AC_ERR_NODEV_HINT) {
1836 ata_dev_dbg(dev, "NODEV after polling detection\n");
1837 return -ENOENT;
1840 if (is_semb) {
1841 ata_dev_info(dev,
1842 "IDENTIFY failed on device w/ SEMB sig, disabled\n");
1843 /* SEMB is not supported yet */
1844 *p_class = ATA_DEV_SEMB_UNSUP;
1845 return 0;
1848 if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1849 /* Device or controller might have reported
1850 * the wrong device class. Give a shot at the
1851 * other IDENTIFY if the current one is
1852 * aborted by the device.
1854 if (may_fallback) {
1855 may_fallback = 0;
1857 if (class == ATA_DEV_ATA)
1858 class = ATA_DEV_ATAPI;
1859 else
1860 class = ATA_DEV_ATA;
1861 goto retry;
1864 /* Control reaches here iff the device aborted
1865 * both flavors of IDENTIFYs which happens
1866 * sometimes with phantom devices.
1868 ata_dev_dbg(dev,
1869 "both IDENTIFYs aborted, assuming NODEV\n");
1870 return -ENOENT;
1873 rc = -EIO;
1874 reason = "I/O error";
1875 goto err_out;
1878 if (dev->horkage & ATA_HORKAGE_DUMP_ID) {
1879 ata_dev_dbg(dev, "dumping IDENTIFY data, "
1880 "class=%d may_fallback=%d tried_spinup=%d\n",
1881 class, may_fallback, tried_spinup);
1882 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET,
1883 16, 2, id, ATA_ID_WORDS * sizeof(*id), true);
1886 /* Falling back doesn't make sense if ID data was read
1887 * successfully at least once.
1889 may_fallback = 0;
1891 swap_buf_le16(id, ATA_ID_WORDS);
1893 /* sanity check */
1894 rc = -EINVAL;
1895 reason = "device reports invalid type";
1897 if (class == ATA_DEV_ATA || class == ATA_DEV_ZAC) {
1898 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1899 goto err_out;
1900 if (ap->host->flags & ATA_HOST_IGNORE_ATA &&
1901 ata_id_is_ata(id)) {
1902 ata_dev_dbg(dev,
1903 "host indicates ignore ATA devices, ignored\n");
1904 return -ENOENT;
1906 } else {
1907 if (ata_id_is_ata(id))
1908 goto err_out;
1911 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1912 tried_spinup = 1;
1914 * Drive powered-up in standby mode, and requires a specific
1915 * SET_FEATURES spin-up subcommand before it will accept
1916 * anything other than the original IDENTIFY command.
1918 err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
1919 if (err_mask && id[2] != 0x738c) {
1920 rc = -EIO;
1921 reason = "SPINUP failed";
1922 goto err_out;
1925 * If the drive initially returned incomplete IDENTIFY info,
1926 * we now must reissue the IDENTIFY command.
1928 if (id[2] == 0x37c8)
1929 goto retry;
1932 if ((flags & ATA_READID_POSTRESET) &&
1933 (class == ATA_DEV_ATA || class == ATA_DEV_ZAC)) {
1935 * The exact sequence expected by certain pre-ATA4 drives is:
1936 * SRST RESET
1937 * IDENTIFY (optional in early ATA)
1938 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
1939 * anything else..
1940 * Some drives were very specific about that exact sequence.
1942 * Note that ATA4 says lba is mandatory so the second check
1943 * should never trigger.
1945 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1946 err_mask = ata_dev_init_params(dev, id[3], id[6]);
1947 if (err_mask) {
1948 rc = -EIO;
1949 reason = "INIT_DEV_PARAMS failed";
1950 goto err_out;
1953 /* current CHS translation info (id[53-58]) might be
1954 * changed. reread the identify device info.
1956 flags &= ~ATA_READID_POSTRESET;
1957 goto retry;
1961 *p_class = class;
1963 return 0;
1965 err_out:
1966 if (ata_msg_warn(ap))
1967 ata_dev_warn(dev, "failed to IDENTIFY (%s, err_mask=0x%x)\n",
1968 reason, err_mask);
1969 return rc;
1973 * ata_read_log_page - read a specific log page
1974 * @dev: target device
1975 * @log: log to read
1976 * @page: page to read
1977 * @buf: buffer to store read page
1978 * @sectors: number of sectors to read
1980 * Read log page using READ_LOG_EXT command.
1982 * LOCKING:
1983 * Kernel thread context (may sleep).
1985 * RETURNS:
1986 * 0 on success, AC_ERR_* mask otherwise.
1988 unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
1989 u8 page, void *buf, unsigned int sectors)
1991 unsigned long ap_flags = dev->link->ap->flags;
1992 struct ata_taskfile tf;
1993 unsigned int err_mask;
1994 bool dma = false;
1996 DPRINTK("read log page - log 0x%x, page 0x%x\n", log, page);
1999 * Return error without actually issuing the command on controllers
2000 * which e.g. lockup on a read log page.
2002 if (ap_flags & ATA_FLAG_NO_LOG_PAGE)
2003 return AC_ERR_DEV;
2005 retry:
2006 ata_tf_init(dev, &tf);
2007 if (dev->dma_mode && ata_id_has_read_log_dma_ext(dev->id) &&
2008 !(dev->horkage & ATA_HORKAGE_NO_DMA_LOG)) {
2009 tf.command = ATA_CMD_READ_LOG_DMA_EXT;
2010 tf.protocol = ATA_PROT_DMA;
2011 dma = true;
2012 } else {
2013 tf.command = ATA_CMD_READ_LOG_EXT;
2014 tf.protocol = ATA_PROT_PIO;
2015 dma = false;
2017 tf.lbal = log;
2018 tf.lbam = page;
2019 tf.nsect = sectors;
2020 tf.hob_nsect = sectors >> 8;
2021 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE;
2023 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
2024 buf, sectors * ATA_SECT_SIZE, 0);
2026 if (err_mask && dma) {
2027 dev->horkage |= ATA_HORKAGE_NO_DMA_LOG;
2028 ata_dev_warn(dev, "READ LOG DMA EXT failed, trying PIO\n");
2029 goto retry;
2032 DPRINTK("EXIT, err_mask=%x\n", err_mask);
2033 return err_mask;
2036 static bool ata_log_supported(struct ata_device *dev, u8 log)
2038 struct ata_port *ap = dev->link->ap;
2040 if (ata_read_log_page(dev, ATA_LOG_DIRECTORY, 0, ap->sector_buf, 1))
2041 return false;
2042 return get_unaligned_le16(&ap->sector_buf[log * 2]) ? true : false;
2045 static bool ata_identify_page_supported(struct ata_device *dev, u8 page)
2047 struct ata_port *ap = dev->link->ap;
2048 unsigned int err, i;
2050 if (!ata_log_supported(dev, ATA_LOG_IDENTIFY_DEVICE)) {
2051 ata_dev_warn(dev, "ATA Identify Device Log not supported\n");
2052 return false;
2056 * Read IDENTIFY DEVICE data log, page 0, to figure out if the page is
2057 * supported.
2059 err = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE, 0, ap->sector_buf,
2061 if (err) {
2062 ata_dev_info(dev,
2063 "failed to get Device Identify Log Emask 0x%x\n",
2064 err);
2065 return false;
2068 for (i = 0; i < ap->sector_buf[8]; i++) {
2069 if (ap->sector_buf[9 + i] == page)
2070 return true;
2073 return false;
2076 static int ata_do_link_spd_horkage(struct ata_device *dev)
2078 struct ata_link *plink = ata_dev_phys_link(dev);
2079 u32 target, target_limit;
2081 if (!sata_scr_valid(plink))
2082 return 0;
2084 if (dev->horkage & ATA_HORKAGE_1_5_GBPS)
2085 target = 1;
2086 else
2087 return 0;
2089 target_limit = (1 << target) - 1;
2091 /* if already on stricter limit, no need to push further */
2092 if (plink->sata_spd_limit <= target_limit)
2093 return 0;
2095 plink->sata_spd_limit = target_limit;
2097 /* Request another EH round by returning -EAGAIN if link is
2098 * going faster than the target speed. Forward progress is
2099 * guaranteed by setting sata_spd_limit to target_limit above.
2101 if (plink->sata_spd > target) {
2102 ata_dev_info(dev, "applying link speed limit horkage to %s\n",
2103 sata_spd_string(target));
2104 return -EAGAIN;
2106 return 0;
2109 static inline u8 ata_dev_knobble(struct ata_device *dev)
2111 struct ata_port *ap = dev->link->ap;
2113 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK)
2114 return 0;
2116 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
2119 static void ata_dev_config_ncq_send_recv(struct ata_device *dev)
2121 struct ata_port *ap = dev->link->ap;
2122 unsigned int err_mask;
2124 if (!ata_log_supported(dev, ATA_LOG_NCQ_SEND_RECV)) {
2125 ata_dev_warn(dev, "NCQ Send/Recv Log not supported\n");
2126 return;
2128 err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_SEND_RECV,
2129 0, ap->sector_buf, 1);
2130 if (err_mask) {
2131 ata_dev_dbg(dev,
2132 "failed to get NCQ Send/Recv Log Emask 0x%x\n",
2133 err_mask);
2134 } else {
2135 u8 *cmds = dev->ncq_send_recv_cmds;
2137 dev->flags |= ATA_DFLAG_NCQ_SEND_RECV;
2138 memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_SEND_RECV_SIZE);
2140 if (dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) {
2141 ata_dev_dbg(dev, "disabling queued TRIM support\n");
2142 cmds[ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET] &=
2143 ~ATA_LOG_NCQ_SEND_RECV_DSM_TRIM;
2148 static void ata_dev_config_ncq_non_data(struct ata_device *dev)
2150 struct ata_port *ap = dev->link->ap;
2151 unsigned int err_mask;
2153 if (!ata_log_supported(dev, ATA_LOG_NCQ_NON_DATA)) {
2154 ata_dev_warn(dev,
2155 "NCQ Send/Recv Log not supported\n");
2156 return;
2158 err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_NON_DATA,
2159 0, ap->sector_buf, 1);
2160 if (err_mask) {
2161 ata_dev_dbg(dev,
2162 "failed to get NCQ Non-Data Log Emask 0x%x\n",
2163 err_mask);
2164 } else {
2165 u8 *cmds = dev->ncq_non_data_cmds;
2167 memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_NON_DATA_SIZE);
2171 static void ata_dev_config_ncq_prio(struct ata_device *dev)
2173 struct ata_port *ap = dev->link->ap;
2174 unsigned int err_mask;
2176 if (!(dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLE)) {
2177 dev->flags &= ~ATA_DFLAG_NCQ_PRIO;
2178 return;
2181 err_mask = ata_read_log_page(dev,
2182 ATA_LOG_IDENTIFY_DEVICE,
2183 ATA_LOG_SATA_SETTINGS,
2184 ap->sector_buf,
2186 if (err_mask) {
2187 ata_dev_dbg(dev,
2188 "failed to get Identify Device data, Emask 0x%x\n",
2189 err_mask);
2190 return;
2193 if (ap->sector_buf[ATA_LOG_NCQ_PRIO_OFFSET] & BIT(3)) {
2194 dev->flags |= ATA_DFLAG_NCQ_PRIO;
2195 } else {
2196 dev->flags &= ~ATA_DFLAG_NCQ_PRIO;
2197 ata_dev_dbg(dev, "SATA page does not support priority\n");
2202 static int ata_dev_config_ncq(struct ata_device *dev,
2203 char *desc, size_t desc_sz)
2205 struct ata_port *ap = dev->link->ap;
2206 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2207 unsigned int err_mask;
2208 char *aa_desc = "";
2210 if (!ata_id_has_ncq(dev->id)) {
2211 desc[0] = '\0';
2212 return 0;
2214 if (!IS_ENABLED(CONFIG_SATA_HOST))
2215 return 0;
2216 if (dev->horkage & ATA_HORKAGE_NONCQ) {
2217 snprintf(desc, desc_sz, "NCQ (not used)");
2218 return 0;
2220 if (ap->flags & ATA_FLAG_NCQ) {
2221 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE);
2222 dev->flags |= ATA_DFLAG_NCQ;
2225 if (!(dev->horkage & ATA_HORKAGE_BROKEN_FPDMA_AA) &&
2226 (ap->flags & ATA_FLAG_FPDMA_AA) &&
2227 ata_id_has_fpdma_aa(dev->id)) {
2228 err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE,
2229 SATA_FPDMA_AA);
2230 if (err_mask) {
2231 ata_dev_err(dev,
2232 "failed to enable AA (error_mask=0x%x)\n",
2233 err_mask);
2234 if (err_mask != AC_ERR_DEV) {
2235 dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA;
2236 return -EIO;
2238 } else
2239 aa_desc = ", AA";
2242 if (hdepth >= ddepth)
2243 snprintf(desc, desc_sz, "NCQ (depth %d)%s", ddepth, aa_desc);
2244 else
2245 snprintf(desc, desc_sz, "NCQ (depth %d/%d)%s", hdepth,
2246 ddepth, aa_desc);
2248 if ((ap->flags & ATA_FLAG_FPDMA_AUX)) {
2249 if (ata_id_has_ncq_send_and_recv(dev->id))
2250 ata_dev_config_ncq_send_recv(dev);
2251 if (ata_id_has_ncq_non_data(dev->id))
2252 ata_dev_config_ncq_non_data(dev);
2253 if (ata_id_has_ncq_prio(dev->id))
2254 ata_dev_config_ncq_prio(dev);
2257 return 0;
2260 static void ata_dev_config_sense_reporting(struct ata_device *dev)
2262 unsigned int err_mask;
2264 if (!ata_id_has_sense_reporting(dev->id))
2265 return;
2267 if (ata_id_sense_reporting_enabled(dev->id))
2268 return;
2270 err_mask = ata_dev_set_feature(dev, SETFEATURE_SENSE_DATA, 0x1);
2271 if (err_mask) {
2272 ata_dev_dbg(dev,
2273 "failed to enable Sense Data Reporting, Emask 0x%x\n",
2274 err_mask);
2278 static void ata_dev_config_zac(struct ata_device *dev)
2280 struct ata_port *ap = dev->link->ap;
2281 unsigned int err_mask;
2282 u8 *identify_buf = ap->sector_buf;
2284 dev->zac_zones_optimal_open = U32_MAX;
2285 dev->zac_zones_optimal_nonseq = U32_MAX;
2286 dev->zac_zones_max_open = U32_MAX;
2289 * Always set the 'ZAC' flag for Host-managed devices.
2291 if (dev->class == ATA_DEV_ZAC)
2292 dev->flags |= ATA_DFLAG_ZAC;
2293 else if (ata_id_zoned_cap(dev->id) == 0x01)
2295 * Check for host-aware devices.
2297 dev->flags |= ATA_DFLAG_ZAC;
2299 if (!(dev->flags & ATA_DFLAG_ZAC))
2300 return;
2302 if (!ata_identify_page_supported(dev, ATA_LOG_ZONED_INFORMATION)) {
2303 ata_dev_warn(dev,
2304 "ATA Zoned Information Log not supported\n");
2305 return;
2309 * Read IDENTIFY DEVICE data log, page 9 (Zoned-device information)
2311 err_mask = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE,
2312 ATA_LOG_ZONED_INFORMATION,
2313 identify_buf, 1);
2314 if (!err_mask) {
2315 u64 zoned_cap, opt_open, opt_nonseq, max_open;
2317 zoned_cap = get_unaligned_le64(&identify_buf[8]);
2318 if ((zoned_cap >> 63))
2319 dev->zac_zoned_cap = (zoned_cap & 1);
2320 opt_open = get_unaligned_le64(&identify_buf[24]);
2321 if ((opt_open >> 63))
2322 dev->zac_zones_optimal_open = (u32)opt_open;
2323 opt_nonseq = get_unaligned_le64(&identify_buf[32]);
2324 if ((opt_nonseq >> 63))
2325 dev->zac_zones_optimal_nonseq = (u32)opt_nonseq;
2326 max_open = get_unaligned_le64(&identify_buf[40]);
2327 if ((max_open >> 63))
2328 dev->zac_zones_max_open = (u32)max_open;
2332 static void ata_dev_config_trusted(struct ata_device *dev)
2334 struct ata_port *ap = dev->link->ap;
2335 u64 trusted_cap;
2336 unsigned int err;
2338 if (!ata_id_has_trusted(dev->id))
2339 return;
2341 if (!ata_identify_page_supported(dev, ATA_LOG_SECURITY)) {
2342 ata_dev_warn(dev,
2343 "Security Log not supported\n");
2344 return;
2347 err = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE, ATA_LOG_SECURITY,
2348 ap->sector_buf, 1);
2349 if (err) {
2350 ata_dev_dbg(dev,
2351 "failed to read Security Log, Emask 0x%x\n", err);
2352 return;
2355 trusted_cap = get_unaligned_le64(&ap->sector_buf[40]);
2356 if (!(trusted_cap & (1ULL << 63))) {
2357 ata_dev_dbg(dev,
2358 "Trusted Computing capability qword not valid!\n");
2359 return;
2362 if (trusted_cap & (1 << 0))
2363 dev->flags |= ATA_DFLAG_TRUSTED;
2367 * ata_dev_configure - Configure the specified ATA/ATAPI device
2368 * @dev: Target device to configure
2370 * Configure @dev according to @dev->id. Generic and low-level
2371 * driver specific fixups are also applied.
2373 * LOCKING:
2374 * Kernel thread context (may sleep)
2376 * RETURNS:
2377 * 0 on success, -errno otherwise
2379 int ata_dev_configure(struct ata_device *dev)
2381 struct ata_port *ap = dev->link->ap;
2382 struct ata_eh_context *ehc = &dev->link->eh_context;
2383 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
2384 const u16 *id = dev->id;
2385 unsigned long xfer_mask;
2386 unsigned int err_mask;
2387 char revbuf[7]; /* XYZ-99\0 */
2388 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2389 char modelbuf[ATA_ID_PROD_LEN+1];
2390 int rc;
2392 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
2393 ata_dev_info(dev, "%s: ENTER/EXIT -- nodev\n", __func__);
2394 return 0;
2397 if (ata_msg_probe(ap))
2398 ata_dev_dbg(dev, "%s: ENTER\n", __func__);
2400 /* set horkage */
2401 dev->horkage |= ata_dev_blacklisted(dev);
2402 ata_force_horkage(dev);
2404 if (dev->horkage & ATA_HORKAGE_DISABLE) {
2405 ata_dev_info(dev, "unsupported device, disabling\n");
2406 ata_dev_disable(dev);
2407 return 0;
2410 if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) &&
2411 dev->class == ATA_DEV_ATAPI) {
2412 ata_dev_warn(dev, "WARNING: ATAPI is %s, device ignored\n",
2413 atapi_enabled ? "not supported with this driver"
2414 : "disabled");
2415 ata_dev_disable(dev);
2416 return 0;
2419 rc = ata_do_link_spd_horkage(dev);
2420 if (rc)
2421 return rc;
2423 /* some WD SATA-1 drives have issues with LPM, turn on NOLPM for them */
2424 if ((dev->horkage & ATA_HORKAGE_WD_BROKEN_LPM) &&
2425 (id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2)
2426 dev->horkage |= ATA_HORKAGE_NOLPM;
2428 if (ap->flags & ATA_FLAG_NO_LPM)
2429 dev->horkage |= ATA_HORKAGE_NOLPM;
2431 if (dev->horkage & ATA_HORKAGE_NOLPM) {
2432 ata_dev_warn(dev, "LPM support broken, forcing max_power\n");
2433 dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER;
2436 /* let ACPI work its magic */
2437 rc = ata_acpi_on_devcfg(dev);
2438 if (rc)
2439 return rc;
2441 /* massage HPA, do it early as it might change IDENTIFY data */
2442 rc = ata_hpa_resize(dev);
2443 if (rc)
2444 return rc;
2446 /* print device capabilities */
2447 if (ata_msg_probe(ap))
2448 ata_dev_dbg(dev,
2449 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2450 "85:%04x 86:%04x 87:%04x 88:%04x\n",
2451 __func__,
2452 id[49], id[82], id[83], id[84],
2453 id[85], id[86], id[87], id[88]);
2455 /* initialize to-be-configured parameters */
2456 dev->flags &= ~ATA_DFLAG_CFG_MASK;
2457 dev->max_sectors = 0;
2458 dev->cdb_len = 0;
2459 dev->n_sectors = 0;
2460 dev->cylinders = 0;
2461 dev->heads = 0;
2462 dev->sectors = 0;
2463 dev->multi_count = 0;
2466 * common ATA, ATAPI feature tests
2469 /* find max transfer mode; for printk only */
2470 xfer_mask = ata_id_xfermask(id);
2472 if (ata_msg_probe(ap))
2473 ata_dump_id(id);
2475 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2476 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2477 sizeof(fwrevbuf));
2479 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2480 sizeof(modelbuf));
2482 /* ATA-specific feature tests */
2483 if (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ZAC) {
2484 if (ata_id_is_cfa(id)) {
2485 /* CPRM may make this media unusable */
2486 if (id[ATA_ID_CFA_KEY_MGMT] & 1)
2487 ata_dev_warn(dev,
2488 "supports DRM functions and may not be fully accessible\n");
2489 snprintf(revbuf, 7, "CFA");
2490 } else {
2491 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
2492 /* Warn the user if the device has TPM extensions */
2493 if (ata_id_has_tpm(id))
2494 ata_dev_warn(dev,
2495 "supports DRM functions and may not be fully accessible\n");
2498 dev->n_sectors = ata_id_n_sectors(id);
2500 /* get current R/W Multiple count setting */
2501 if ((dev->id[47] >> 8) == 0x80 && (dev->id[59] & 0x100)) {
2502 unsigned int max = dev->id[47] & 0xff;
2503 unsigned int cnt = dev->id[59] & 0xff;
2504 /* only recognize/allow powers of two here */
2505 if (is_power_of_2(max) && is_power_of_2(cnt))
2506 if (cnt <= max)
2507 dev->multi_count = cnt;
2510 if (ata_id_has_lba(id)) {
2511 const char *lba_desc;
2512 char ncq_desc[24];
2514 lba_desc = "LBA";
2515 dev->flags |= ATA_DFLAG_LBA;
2516 if (ata_id_has_lba48(id)) {
2517 dev->flags |= ATA_DFLAG_LBA48;
2518 lba_desc = "LBA48";
2520 if (dev->n_sectors >= (1UL << 28) &&
2521 ata_id_has_flush_ext(id))
2522 dev->flags |= ATA_DFLAG_FLUSH_EXT;
2525 /* config NCQ */
2526 rc = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2527 if (rc)
2528 return rc;
2530 /* print device info to dmesg */
2531 if (ata_msg_drv(ap) && print_info) {
2532 ata_dev_info(dev, "%s: %s, %s, max %s\n",
2533 revbuf, modelbuf, fwrevbuf,
2534 ata_mode_string(xfer_mask));
2535 ata_dev_info(dev,
2536 "%llu sectors, multi %u: %s %s\n",
2537 (unsigned long long)dev->n_sectors,
2538 dev->multi_count, lba_desc, ncq_desc);
2540 } else {
2541 /* CHS */
2543 /* Default translation */
2544 dev->cylinders = id[1];
2545 dev->heads = id[3];
2546 dev->sectors = id[6];
2548 if (ata_id_current_chs_valid(id)) {
2549 /* Current CHS translation is valid. */
2550 dev->cylinders = id[54];
2551 dev->heads = id[55];
2552 dev->sectors = id[56];
2555 /* print device info to dmesg */
2556 if (ata_msg_drv(ap) && print_info) {
2557 ata_dev_info(dev, "%s: %s, %s, max %s\n",
2558 revbuf, modelbuf, fwrevbuf,
2559 ata_mode_string(xfer_mask));
2560 ata_dev_info(dev,
2561 "%llu sectors, multi %u, CHS %u/%u/%u\n",
2562 (unsigned long long)dev->n_sectors,
2563 dev->multi_count, dev->cylinders,
2564 dev->heads, dev->sectors);
2568 /* Check and mark DevSlp capability. Get DevSlp timing variables
2569 * from SATA Settings page of Identify Device Data Log.
2571 if (ata_id_has_devslp(dev->id)) {
2572 u8 *sata_setting = ap->sector_buf;
2573 int i, j;
2575 dev->flags |= ATA_DFLAG_DEVSLP;
2576 err_mask = ata_read_log_page(dev,
2577 ATA_LOG_IDENTIFY_DEVICE,
2578 ATA_LOG_SATA_SETTINGS,
2579 sata_setting,
2581 if (err_mask)
2582 ata_dev_dbg(dev,
2583 "failed to get Identify Device Data, Emask 0x%x\n",
2584 err_mask);
2585 else
2586 for (i = 0; i < ATA_LOG_DEVSLP_SIZE; i++) {
2587 j = ATA_LOG_DEVSLP_OFFSET + i;
2588 dev->devslp_timing[i] = sata_setting[j];
2591 ata_dev_config_sense_reporting(dev);
2592 ata_dev_config_zac(dev);
2593 ata_dev_config_trusted(dev);
2594 dev->cdb_len = 32;
2597 /* ATAPI-specific feature tests */
2598 else if (dev->class == ATA_DEV_ATAPI) {
2599 const char *cdb_intr_string = "";
2600 const char *atapi_an_string = "";
2601 const char *dma_dir_string = "";
2602 u32 sntf;
2604 rc = atapi_cdb_len(id);
2605 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
2606 if (ata_msg_warn(ap))
2607 ata_dev_warn(dev, "unsupported CDB len\n");
2608 rc = -EINVAL;
2609 goto err_out_nosup;
2611 dev->cdb_len = (unsigned int) rc;
2613 /* Enable ATAPI AN if both the host and device have
2614 * the support. If PMP is attached, SNTF is required
2615 * to enable ATAPI AN to discern between PHY status
2616 * changed notifications and ATAPI ANs.
2618 if (atapi_an &&
2619 (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2620 (!sata_pmp_attached(ap) ||
2621 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
2622 /* issue SET feature command to turn this on */
2623 err_mask = ata_dev_set_feature(dev,
2624 SETFEATURES_SATA_ENABLE, SATA_AN);
2625 if (err_mask)
2626 ata_dev_err(dev,
2627 "failed to enable ATAPI AN (err_mask=0x%x)\n",
2628 err_mask);
2629 else {
2630 dev->flags |= ATA_DFLAG_AN;
2631 atapi_an_string = ", ATAPI AN";
2635 if (ata_id_cdb_intr(dev->id)) {
2636 dev->flags |= ATA_DFLAG_CDB_INTR;
2637 cdb_intr_string = ", CDB intr";
2640 if (atapi_dmadir || (dev->horkage & ATA_HORKAGE_ATAPI_DMADIR) || atapi_id_dmadir(dev->id)) {
2641 dev->flags |= ATA_DFLAG_DMADIR;
2642 dma_dir_string = ", DMADIR";
2645 if (ata_id_has_da(dev->id)) {
2646 dev->flags |= ATA_DFLAG_DA;
2647 zpodd_init(dev);
2650 /* print device info to dmesg */
2651 if (ata_msg_drv(ap) && print_info)
2652 ata_dev_info(dev,
2653 "ATAPI: %s, %s, max %s%s%s%s\n",
2654 modelbuf, fwrevbuf,
2655 ata_mode_string(xfer_mask),
2656 cdb_intr_string, atapi_an_string,
2657 dma_dir_string);
2660 /* determine max_sectors */
2661 dev->max_sectors = ATA_MAX_SECTORS;
2662 if (dev->flags & ATA_DFLAG_LBA48)
2663 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2665 /* Limit PATA drive on SATA cable bridge transfers to udma5,
2666 200 sectors */
2667 if (ata_dev_knobble(dev)) {
2668 if (ata_msg_drv(ap) && print_info)
2669 ata_dev_info(dev, "applying bridge limits\n");
2670 dev->udma_mask &= ATA_UDMA5;
2671 dev->max_sectors = ATA_MAX_SECTORS;
2674 if ((dev->class == ATA_DEV_ATAPI) &&
2675 (atapi_command_packet_set(id) == TYPE_TAPE)) {
2676 dev->max_sectors = ATA_MAX_SECTORS_TAPE;
2677 dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2680 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
2681 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2682 dev->max_sectors);
2684 if (dev->horkage & ATA_HORKAGE_MAX_SEC_1024)
2685 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_1024,
2686 dev->max_sectors);
2688 if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48)
2689 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2691 if (ap->ops->dev_config)
2692 ap->ops->dev_config(dev);
2694 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2695 /* Let the user know. We don't want to disallow opens for
2696 rescue purposes, or in case the vendor is just a blithering
2697 idiot. Do this after the dev_config call as some controllers
2698 with buggy firmware may want to avoid reporting false device
2699 bugs */
2701 if (print_info) {
2702 ata_dev_warn(dev,
2703 "Drive reports diagnostics failure. This may indicate a drive\n");
2704 ata_dev_warn(dev,
2705 "fault or invalid emulation. Contact drive vendor for information.\n");
2709 if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) {
2710 ata_dev_warn(dev, "WARNING: device requires firmware update to be fully functional\n");
2711 ata_dev_warn(dev, " contact the vendor or visit http://ata.wiki.kernel.org\n");
2714 return 0;
2716 err_out_nosup:
2717 if (ata_msg_probe(ap))
2718 ata_dev_dbg(dev, "%s: EXIT, err\n", __func__);
2719 return rc;
2723 * ata_cable_40wire - return 40 wire cable type
2724 * @ap: port
2726 * Helper method for drivers which want to hardwire 40 wire cable
2727 * detection.
2730 int ata_cable_40wire(struct ata_port *ap)
2732 return ATA_CBL_PATA40;
2734 EXPORT_SYMBOL_GPL(ata_cable_40wire);
2737 * ata_cable_80wire - return 80 wire cable type
2738 * @ap: port
2740 * Helper method for drivers which want to hardwire 80 wire cable
2741 * detection.
2744 int ata_cable_80wire(struct ata_port *ap)
2746 return ATA_CBL_PATA80;
2748 EXPORT_SYMBOL_GPL(ata_cable_80wire);
2751 * ata_cable_unknown - return unknown PATA cable.
2752 * @ap: port
2754 * Helper method for drivers which have no PATA cable detection.
2757 int ata_cable_unknown(struct ata_port *ap)
2759 return ATA_CBL_PATA_UNK;
2761 EXPORT_SYMBOL_GPL(ata_cable_unknown);
2764 * ata_cable_ignore - return ignored PATA cable.
2765 * @ap: port
2767 * Helper method for drivers which don't use cable type to limit
2768 * transfer mode.
2770 int ata_cable_ignore(struct ata_port *ap)
2772 return ATA_CBL_PATA_IGN;
2774 EXPORT_SYMBOL_GPL(ata_cable_ignore);
2777 * ata_cable_sata - return SATA cable type
2778 * @ap: port
2780 * Helper method for drivers which have SATA cables
2783 int ata_cable_sata(struct ata_port *ap)
2785 return ATA_CBL_SATA;
2787 EXPORT_SYMBOL_GPL(ata_cable_sata);
2790 * ata_bus_probe - Reset and probe ATA bus
2791 * @ap: Bus to probe
2793 * Master ATA bus probing function. Initiates a hardware-dependent
2794 * bus reset, then attempts to identify any devices found on
2795 * the bus.
2797 * LOCKING:
2798 * PCI/etc. bus probe sem.
2800 * RETURNS:
2801 * Zero on success, negative errno otherwise.
2804 int ata_bus_probe(struct ata_port *ap)
2806 unsigned int classes[ATA_MAX_DEVICES];
2807 int tries[ATA_MAX_DEVICES];
2808 int rc;
2809 struct ata_device *dev;
2811 ata_for_each_dev(dev, &ap->link, ALL)
2812 tries[dev->devno] = ATA_PROBE_MAX_TRIES;
2814 retry:
2815 ata_for_each_dev(dev, &ap->link, ALL) {
2816 /* If we issue an SRST then an ATA drive (not ATAPI)
2817 * may change configuration and be in PIO0 timing. If
2818 * we do a hard reset (or are coming from power on)
2819 * this is true for ATA or ATAPI. Until we've set a
2820 * suitable controller mode we should not touch the
2821 * bus as we may be talking too fast.
2823 dev->pio_mode = XFER_PIO_0;
2824 dev->dma_mode = 0xff;
2826 /* If the controller has a pio mode setup function
2827 * then use it to set the chipset to rights. Don't
2828 * touch the DMA setup as that will be dealt with when
2829 * configuring devices.
2831 if (ap->ops->set_piomode)
2832 ap->ops->set_piomode(ap, dev);
2835 /* reset and determine device classes */
2836 ap->ops->phy_reset(ap);
2838 ata_for_each_dev(dev, &ap->link, ALL) {
2839 if (dev->class != ATA_DEV_UNKNOWN)
2840 classes[dev->devno] = dev->class;
2841 else
2842 classes[dev->devno] = ATA_DEV_NONE;
2844 dev->class = ATA_DEV_UNKNOWN;
2847 /* read IDENTIFY page and configure devices. We have to do the identify
2848 specific sequence bass-ackwards so that PDIAG- is released by
2849 the slave device */
2851 ata_for_each_dev(dev, &ap->link, ALL_REVERSE) {
2852 if (tries[dev->devno])
2853 dev->class = classes[dev->devno];
2855 if (!ata_dev_enabled(dev))
2856 continue;
2858 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2859 dev->id);
2860 if (rc)
2861 goto fail;
2864 /* Now ask for the cable type as PDIAG- should have been released */
2865 if (ap->ops->cable_detect)
2866 ap->cbl = ap->ops->cable_detect(ap);
2868 /* We may have SATA bridge glue hiding here irrespective of
2869 * the reported cable types and sensed types. When SATA
2870 * drives indicate we have a bridge, we don't know which end
2871 * of the link the bridge is which is a problem.
2873 ata_for_each_dev(dev, &ap->link, ENABLED)
2874 if (ata_id_is_sata(dev->id))
2875 ap->cbl = ATA_CBL_SATA;
2877 /* After the identify sequence we can now set up the devices. We do
2878 this in the normal order so that the user doesn't get confused */
2880 ata_for_each_dev(dev, &ap->link, ENABLED) {
2881 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
2882 rc = ata_dev_configure(dev);
2883 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
2884 if (rc)
2885 goto fail;
2888 /* configure transfer mode */
2889 rc = ata_set_mode(&ap->link, &dev);
2890 if (rc)
2891 goto fail;
2893 ata_for_each_dev(dev, &ap->link, ENABLED)
2894 return 0;
2896 return -ENODEV;
2898 fail:
2899 tries[dev->devno]--;
2901 switch (rc) {
2902 case -EINVAL:
2903 /* eeek, something went very wrong, give up */
2904 tries[dev->devno] = 0;
2905 break;
2907 case -ENODEV:
2908 /* give it just one more chance */
2909 tries[dev->devno] = min(tries[dev->devno], 1);
2910 /* fall through */
2911 case -EIO:
2912 if (tries[dev->devno] == 1) {
2913 /* This is the last chance, better to slow
2914 * down than lose it.
2916 sata_down_spd_limit(&ap->link, 0);
2917 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2921 if (!tries[dev->devno])
2922 ata_dev_disable(dev);
2924 goto retry;
2928 * sata_print_link_status - Print SATA link status
2929 * @link: SATA link to printk link status about
2931 * This function prints link speed and status of a SATA link.
2933 * LOCKING:
2934 * None.
2936 static void sata_print_link_status(struct ata_link *link)
2938 u32 sstatus, scontrol, tmp;
2940 if (sata_scr_read(link, SCR_STATUS, &sstatus))
2941 return;
2942 sata_scr_read(link, SCR_CONTROL, &scontrol);
2944 if (ata_phys_link_online(link)) {
2945 tmp = (sstatus >> 4) & 0xf;
2946 ata_link_info(link, "SATA link up %s (SStatus %X SControl %X)\n",
2947 sata_spd_string(tmp), sstatus, scontrol);
2948 } else {
2949 ata_link_info(link, "SATA link down (SStatus %X SControl %X)\n",
2950 sstatus, scontrol);
2955 * ata_dev_pair - return other device on cable
2956 * @adev: device
2958 * Obtain the other device on the same cable, or if none is
2959 * present NULL is returned
2962 struct ata_device *ata_dev_pair(struct ata_device *adev)
2964 struct ata_link *link = adev->link;
2965 struct ata_device *pair = &link->device[1 - adev->devno];
2966 if (!ata_dev_enabled(pair))
2967 return NULL;
2968 return pair;
2970 EXPORT_SYMBOL_GPL(ata_dev_pair);
2973 * sata_down_spd_limit - adjust SATA spd limit downward
2974 * @link: Link to adjust SATA spd limit for
2975 * @spd_limit: Additional limit
2977 * Adjust SATA spd limit of @link downward. Note that this
2978 * function only adjusts the limit. The change must be applied
2979 * using sata_set_spd().
2981 * If @spd_limit is non-zero, the speed is limited to equal to or
2982 * lower than @spd_limit if such speed is supported. If
2983 * @spd_limit is slower than any supported speed, only the lowest
2984 * supported speed is allowed.
2986 * LOCKING:
2987 * Inherited from caller.
2989 * RETURNS:
2990 * 0 on success, negative errno on failure
2992 int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
2994 u32 sstatus, spd, mask;
2995 int rc, bit;
2997 if (!sata_scr_valid(link))
2998 return -EOPNOTSUPP;
3000 /* If SCR can be read, use it to determine the current SPD.
3001 * If not, use cached value in link->sata_spd.
3003 rc = sata_scr_read(link, SCR_STATUS, &sstatus);
3004 if (rc == 0 && ata_sstatus_online(sstatus))
3005 spd = (sstatus >> 4) & 0xf;
3006 else
3007 spd = link->sata_spd;
3009 mask = link->sata_spd_limit;
3010 if (mask <= 1)
3011 return -EINVAL;
3013 /* unconditionally mask off the highest bit */
3014 bit = fls(mask) - 1;
3015 mask &= ~(1 << bit);
3018 * Mask off all speeds higher than or equal to the current one. At
3019 * this point, if current SPD is not available and we previously
3020 * recorded the link speed from SStatus, the driver has already
3021 * masked off the highest bit so mask should already be 1 or 0.
3022 * Otherwise, we should not force 1.5Gbps on a link where we have
3023 * not previously recorded speed from SStatus. Just return in this
3024 * case.
3026 if (spd > 1)
3027 mask &= (1 << (spd - 1)) - 1;
3028 else
3029 return -EINVAL;
3031 /* were we already at the bottom? */
3032 if (!mask)
3033 return -EINVAL;
3035 if (spd_limit) {
3036 if (mask & ((1 << spd_limit) - 1))
3037 mask &= (1 << spd_limit) - 1;
3038 else {
3039 bit = ffs(mask) - 1;
3040 mask = 1 << bit;
3044 link->sata_spd_limit = mask;
3046 ata_link_warn(link, "limiting SATA link speed to %s\n",
3047 sata_spd_string(fls(mask)));
3049 return 0;
3052 #ifdef CONFIG_ATA_ACPI
3054 * ata_timing_cycle2mode - find xfer mode for the specified cycle duration
3055 * @xfer_shift: ATA_SHIFT_* value for transfer type to examine.
3056 * @cycle: cycle duration in ns
3058 * Return matching xfer mode for @cycle. The returned mode is of
3059 * the transfer type specified by @xfer_shift. If @cycle is too
3060 * slow for @xfer_shift, 0xff is returned. If @cycle is faster
3061 * than the fastest known mode, the fasted mode is returned.
3063 * LOCKING:
3064 * None.
3066 * RETURNS:
3067 * Matching xfer_mode, 0xff if no match found.
3069 u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
3071 u8 base_mode = 0xff, last_mode = 0xff;
3072 const struct ata_xfer_ent *ent;
3073 const struct ata_timing *t;
3075 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
3076 if (ent->shift == xfer_shift)
3077 base_mode = ent->base;
3079 for (t = ata_timing_find_mode(base_mode);
3080 t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
3081 unsigned short this_cycle;
3083 switch (xfer_shift) {
3084 case ATA_SHIFT_PIO:
3085 case ATA_SHIFT_MWDMA:
3086 this_cycle = t->cycle;
3087 break;
3088 case ATA_SHIFT_UDMA:
3089 this_cycle = t->udma;
3090 break;
3091 default:
3092 return 0xff;
3095 if (cycle > this_cycle)
3096 break;
3098 last_mode = t->mode;
3101 return last_mode;
3103 #endif
3106 * ata_down_xfermask_limit - adjust dev xfer masks downward
3107 * @dev: Device to adjust xfer masks
3108 * @sel: ATA_DNXFER_* selector
3110 * Adjust xfer masks of @dev downward. Note that this function
3111 * does not apply the change. Invoking ata_set_mode() afterwards
3112 * will apply the limit.
3114 * LOCKING:
3115 * Inherited from caller.
3117 * RETURNS:
3118 * 0 on success, negative errno on failure
3120 int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
3122 char buf[32];
3123 unsigned long orig_mask, xfer_mask;
3124 unsigned long pio_mask, mwdma_mask, udma_mask;
3125 int quiet, highbit;
3127 quiet = !!(sel & ATA_DNXFER_QUIET);
3128 sel &= ~ATA_DNXFER_QUIET;
3130 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
3131 dev->mwdma_mask,
3132 dev->udma_mask);
3133 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
3135 switch (sel) {
3136 case ATA_DNXFER_PIO:
3137 highbit = fls(pio_mask) - 1;
3138 pio_mask &= ~(1 << highbit);
3139 break;
3141 case ATA_DNXFER_DMA:
3142 if (udma_mask) {
3143 highbit = fls(udma_mask) - 1;
3144 udma_mask &= ~(1 << highbit);
3145 if (!udma_mask)
3146 return -ENOENT;
3147 } else if (mwdma_mask) {
3148 highbit = fls(mwdma_mask) - 1;
3149 mwdma_mask &= ~(1 << highbit);
3150 if (!mwdma_mask)
3151 return -ENOENT;
3153 break;
3155 case ATA_DNXFER_40C:
3156 udma_mask &= ATA_UDMA_MASK_40C;
3157 break;
3159 case ATA_DNXFER_FORCE_PIO0:
3160 pio_mask &= 1;
3161 /* fall through */
3162 case ATA_DNXFER_FORCE_PIO:
3163 mwdma_mask = 0;
3164 udma_mask = 0;
3165 break;
3167 default:
3168 BUG();
3171 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3173 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3174 return -ENOENT;
3176 if (!quiet) {
3177 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3178 snprintf(buf, sizeof(buf), "%s:%s",
3179 ata_mode_string(xfer_mask),
3180 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3181 else
3182 snprintf(buf, sizeof(buf), "%s",
3183 ata_mode_string(xfer_mask));
3185 ata_dev_warn(dev, "limiting speed to %s\n", buf);
3188 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3189 &dev->udma_mask);
3191 return 0;
3194 static int ata_dev_set_mode(struct ata_device *dev)
3196 struct ata_port *ap = dev->link->ap;
3197 struct ata_eh_context *ehc = &dev->link->eh_context;
3198 const bool nosetxfer = dev->horkage & ATA_HORKAGE_NOSETXFER;
3199 const char *dev_err_whine = "";
3200 int ign_dev_err = 0;
3201 unsigned int err_mask = 0;
3202 int rc;
3204 dev->flags &= ~ATA_DFLAG_PIO;
3205 if (dev->xfer_shift == ATA_SHIFT_PIO)
3206 dev->flags |= ATA_DFLAG_PIO;
3208 if (nosetxfer && ap->flags & ATA_FLAG_SATA && ata_id_is_sata(dev->id))
3209 dev_err_whine = " (SET_XFERMODE skipped)";
3210 else {
3211 if (nosetxfer)
3212 ata_dev_warn(dev,
3213 "NOSETXFER but PATA detected - can't "
3214 "skip SETXFER, might malfunction\n");
3215 err_mask = ata_dev_set_xfermode(dev);
3218 if (err_mask & ~AC_ERR_DEV)
3219 goto fail;
3221 /* revalidate */
3222 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3223 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3224 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3225 if (rc)
3226 return rc;
3228 if (dev->xfer_shift == ATA_SHIFT_PIO) {
3229 /* Old CFA may refuse this command, which is just fine */
3230 if (ata_id_is_cfa(dev->id))
3231 ign_dev_err = 1;
3232 /* Catch several broken garbage emulations plus some pre
3233 ATA devices */
3234 if (ata_id_major_version(dev->id) == 0 &&
3235 dev->pio_mode <= XFER_PIO_2)
3236 ign_dev_err = 1;
3237 /* Some very old devices and some bad newer ones fail
3238 any kind of SET_XFERMODE request but support PIO0-2
3239 timings and no IORDY */
3240 if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2)
3241 ign_dev_err = 1;
3243 /* Early MWDMA devices do DMA but don't allow DMA mode setting.
3244 Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
3245 if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3246 dev->dma_mode == XFER_MW_DMA_0 &&
3247 (dev->id[63] >> 8) & 1)
3248 ign_dev_err = 1;
3250 /* if the device is actually configured correctly, ignore dev err */
3251 if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
3252 ign_dev_err = 1;
3254 if (err_mask & AC_ERR_DEV) {
3255 if (!ign_dev_err)
3256 goto fail;
3257 else
3258 dev_err_whine = " (device error ignored)";
3261 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3262 dev->xfer_shift, (int)dev->xfer_mode);
3264 if (!(ehc->i.flags & ATA_EHI_QUIET) ||
3265 ehc->i.flags & ATA_EHI_DID_HARDRESET)
3266 ata_dev_info(dev, "configured for %s%s\n",
3267 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
3268 dev_err_whine);
3270 return 0;
3272 fail:
3273 ata_dev_err(dev, "failed to set xfermode (err_mask=0x%x)\n", err_mask);
3274 return -EIO;
3278 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
3279 * @link: link on which timings will be programmed
3280 * @r_failed_dev: out parameter for failed device
3282 * Standard implementation of the function used to tune and set
3283 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
3284 * ata_dev_set_mode() fails, pointer to the failing device is
3285 * returned in @r_failed_dev.
3287 * LOCKING:
3288 * PCI/etc. bus probe sem.
3290 * RETURNS:
3291 * 0 on success, negative errno otherwise
3294 int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3296 struct ata_port *ap = link->ap;
3297 struct ata_device *dev;
3298 int rc = 0, used_dma = 0, found = 0;
3300 /* step 1: calculate xfer_mask */
3301 ata_for_each_dev(dev, link, ENABLED) {
3302 unsigned long pio_mask, dma_mask;
3303 unsigned int mode_mask;
3305 mode_mask = ATA_DMA_MASK_ATA;
3306 if (dev->class == ATA_DEV_ATAPI)
3307 mode_mask = ATA_DMA_MASK_ATAPI;
3308 else if (ata_id_is_cfa(dev->id))
3309 mode_mask = ATA_DMA_MASK_CFA;
3311 ata_dev_xfermask(dev);
3312 ata_force_xfermask(dev);
3314 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3316 if (libata_dma_mask & mode_mask)
3317 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask,
3318 dev->udma_mask);
3319 else
3320 dma_mask = 0;
3322 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3323 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
3325 found = 1;
3326 if (ata_dma_enabled(dev))
3327 used_dma = 1;
3329 if (!found)
3330 goto out;
3332 /* step 2: always set host PIO timings */
3333 ata_for_each_dev(dev, link, ENABLED) {
3334 if (dev->pio_mode == 0xff) {
3335 ata_dev_warn(dev, "no PIO support\n");
3336 rc = -EINVAL;
3337 goto out;
3340 dev->xfer_mode = dev->pio_mode;
3341 dev->xfer_shift = ATA_SHIFT_PIO;
3342 if (ap->ops->set_piomode)
3343 ap->ops->set_piomode(ap, dev);
3346 /* step 3: set host DMA timings */
3347 ata_for_each_dev(dev, link, ENABLED) {
3348 if (!ata_dma_enabled(dev))
3349 continue;
3351 dev->xfer_mode = dev->dma_mode;
3352 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3353 if (ap->ops->set_dmamode)
3354 ap->ops->set_dmamode(ap, dev);
3357 /* step 4: update devices' xfer mode */
3358 ata_for_each_dev(dev, link, ENABLED) {
3359 rc = ata_dev_set_mode(dev);
3360 if (rc)
3361 goto out;
3364 /* Record simplex status. If we selected DMA then the other
3365 * host channels are not permitted to do so.
3367 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
3368 ap->host->simplex_claimed = ap;
3370 out:
3371 if (rc)
3372 *r_failed_dev = dev;
3373 return rc;
3375 EXPORT_SYMBOL_GPL(ata_do_set_mode);
3378 * ata_wait_ready - wait for link to become ready
3379 * @link: link to be waited on
3380 * @deadline: deadline jiffies for the operation
3381 * @check_ready: callback to check link readiness
3383 * Wait for @link to become ready. @check_ready should return
3384 * positive number if @link is ready, 0 if it isn't, -ENODEV if
3385 * link doesn't seem to be occupied, other errno for other error
3386 * conditions.
3388 * Transient -ENODEV conditions are allowed for
3389 * ATA_TMOUT_FF_WAIT.
3391 * LOCKING:
3392 * EH context.
3394 * RETURNS:
3395 * 0 if @link is ready before @deadline; otherwise, -errno.
3397 int ata_wait_ready(struct ata_link *link, unsigned long deadline,
3398 int (*check_ready)(struct ata_link *link))
3400 unsigned long start = jiffies;
3401 unsigned long nodev_deadline;
3402 int warned = 0;
3404 /* choose which 0xff timeout to use, read comment in libata.h */
3405 if (link->ap->host->flags & ATA_HOST_PARALLEL_SCAN)
3406 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT_LONG);
3407 else
3408 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
3410 /* Slave readiness can't be tested separately from master. On
3411 * M/S emulation configuration, this function should be called
3412 * only on the master and it will handle both master and slave.
3414 WARN_ON(link == link->ap->slave_link);
3416 if (time_after(nodev_deadline, deadline))
3417 nodev_deadline = deadline;
3419 while (1) {
3420 unsigned long now = jiffies;
3421 int ready, tmp;
3423 ready = tmp = check_ready(link);
3424 if (ready > 0)
3425 return 0;
3428 * -ENODEV could be transient. Ignore -ENODEV if link
3429 * is online. Also, some SATA devices take a long
3430 * time to clear 0xff after reset. Wait for
3431 * ATA_TMOUT_FF_WAIT[_LONG] on -ENODEV if link isn't
3432 * offline.
3434 * Note that some PATA controllers (pata_ali) explode
3435 * if status register is read more than once when
3436 * there's no device attached.
3438 if (ready == -ENODEV) {
3439 if (ata_link_online(link))
3440 ready = 0;
3441 else if ((link->ap->flags & ATA_FLAG_SATA) &&
3442 !ata_link_offline(link) &&
3443 time_before(now, nodev_deadline))
3444 ready = 0;
3447 if (ready)
3448 return ready;
3449 if (time_after(now, deadline))
3450 return -EBUSY;
3452 if (!warned && time_after(now, start + 5 * HZ) &&
3453 (deadline - now > 3 * HZ)) {
3454 ata_link_warn(link,
3455 "link is slow to respond, please be patient "
3456 "(ready=%d)\n", tmp);
3457 warned = 1;
3460 ata_msleep(link->ap, 50);
3465 * ata_wait_after_reset - wait for link to become ready after reset
3466 * @link: link to be waited on
3467 * @deadline: deadline jiffies for the operation
3468 * @check_ready: callback to check link readiness
3470 * Wait for @link to become ready after reset.
3472 * LOCKING:
3473 * EH context.
3475 * RETURNS:
3476 * 0 if @link is ready before @deadline; otherwise, -errno.
3478 int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
3479 int (*check_ready)(struct ata_link *link))
3481 ata_msleep(link->ap, ATA_WAIT_AFTER_RESET);
3483 return ata_wait_ready(link, deadline, check_ready);
3485 EXPORT_SYMBOL_GPL(ata_wait_after_reset);
3488 * ata_std_prereset - prepare for reset
3489 * @link: ATA link to be reset
3490 * @deadline: deadline jiffies for the operation
3492 * @link is about to be reset. Initialize it. Failure from
3493 * prereset makes libata abort whole reset sequence and give up
3494 * that port, so prereset should be best-effort. It does its
3495 * best to prepare for reset sequence but if things go wrong, it
3496 * should just whine, not fail.
3498 * LOCKING:
3499 * Kernel thread context (may sleep)
3501 * RETURNS:
3502 * 0 on success, -errno otherwise.
3504 int ata_std_prereset(struct ata_link *link, unsigned long deadline)
3506 struct ata_port *ap = link->ap;
3507 struct ata_eh_context *ehc = &link->eh_context;
3508 const unsigned long *timing = sata_ehc_deb_timing(ehc);
3509 int rc;
3511 /* if we're about to do hardreset, nothing more to do */
3512 if (ehc->i.action & ATA_EH_HARDRESET)
3513 return 0;
3515 /* if SATA, resume link */
3516 if (ap->flags & ATA_FLAG_SATA) {
3517 rc = sata_link_resume(link, timing, deadline);
3518 /* whine about phy resume failure but proceed */
3519 if (rc && rc != -EOPNOTSUPP)
3520 ata_link_warn(link,
3521 "failed to resume link for reset (errno=%d)\n",
3522 rc);
3525 /* no point in trying softreset on offline link */
3526 if (ata_phys_link_offline(link))
3527 ehc->i.action &= ~ATA_EH_SOFTRESET;
3529 return 0;
3531 EXPORT_SYMBOL_GPL(ata_std_prereset);
3534 * sata_std_hardreset - COMRESET w/o waiting or classification
3535 * @link: link to reset
3536 * @class: resulting class of attached device
3537 * @deadline: deadline jiffies for the operation
3539 * Standard SATA COMRESET w/o waiting or classification.
3541 * LOCKING:
3542 * Kernel thread context (may sleep)
3544 * RETURNS:
3545 * 0 if link offline, -EAGAIN if link online, -errno on errors.
3547 int sata_std_hardreset(struct ata_link *link, unsigned int *class,
3548 unsigned long deadline)
3550 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
3551 bool online;
3552 int rc;
3554 /* do hardreset */
3555 rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
3556 return online ? -EAGAIN : rc;
3558 EXPORT_SYMBOL_GPL(sata_std_hardreset);
3561 * ata_std_postreset - standard postreset callback
3562 * @link: the target ata_link
3563 * @classes: classes of attached devices
3565 * This function is invoked after a successful reset. Note that
3566 * the device might have been reset more than once using
3567 * different reset methods before postreset is invoked.
3569 * LOCKING:
3570 * Kernel thread context (may sleep)
3572 void ata_std_postreset(struct ata_link *link, unsigned int *classes)
3574 u32 serror;
3576 DPRINTK("ENTER\n");
3578 /* reset complete, clear SError */
3579 if (!sata_scr_read(link, SCR_ERROR, &serror))
3580 sata_scr_write(link, SCR_ERROR, serror);
3582 /* print link status */
3583 sata_print_link_status(link);
3585 DPRINTK("EXIT\n");
3587 EXPORT_SYMBOL_GPL(ata_std_postreset);
3590 * ata_dev_same_device - Determine whether new ID matches configured device
3591 * @dev: device to compare against
3592 * @new_class: class of the new device
3593 * @new_id: IDENTIFY page of the new device
3595 * Compare @new_class and @new_id against @dev and determine
3596 * whether @dev is the device indicated by @new_class and
3597 * @new_id.
3599 * LOCKING:
3600 * None.
3602 * RETURNS:
3603 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
3605 static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3606 const u16 *new_id)
3608 const u16 *old_id = dev->id;
3609 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3610 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
3612 if (dev->class != new_class) {
3613 ata_dev_info(dev, "class mismatch %d != %d\n",
3614 dev->class, new_class);
3615 return 0;
3618 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3619 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3620 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3621 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
3623 if (strcmp(model[0], model[1])) {
3624 ata_dev_info(dev, "model number mismatch '%s' != '%s'\n",
3625 model[0], model[1]);
3626 return 0;
3629 if (strcmp(serial[0], serial[1])) {
3630 ata_dev_info(dev, "serial number mismatch '%s' != '%s'\n",
3631 serial[0], serial[1]);
3632 return 0;
3635 return 1;
3639 * ata_dev_reread_id - Re-read IDENTIFY data
3640 * @dev: target ATA device
3641 * @readid_flags: read ID flags
3643 * Re-read IDENTIFY page and make sure @dev is still attached to
3644 * the port.
3646 * LOCKING:
3647 * Kernel thread context (may sleep)
3649 * RETURNS:
3650 * 0 on success, negative errno otherwise
3652 int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
3654 unsigned int class = dev->class;
3655 u16 *id = (void *)dev->link->ap->sector_buf;
3656 int rc;
3658 /* read ID data */
3659 rc = ata_dev_read_id(dev, &class, readid_flags, id);
3660 if (rc)
3661 return rc;
3663 /* is the device still there? */
3664 if (!ata_dev_same_device(dev, class, id))
3665 return -ENODEV;
3667 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
3668 return 0;
3672 * ata_dev_revalidate - Revalidate ATA device
3673 * @dev: device to revalidate
3674 * @new_class: new class code
3675 * @readid_flags: read ID flags
3677 * Re-read IDENTIFY page, make sure @dev is still attached to the
3678 * port and reconfigure it according to the new IDENTIFY page.
3680 * LOCKING:
3681 * Kernel thread context (may sleep)
3683 * RETURNS:
3684 * 0 on success, negative errno otherwise
3686 int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
3687 unsigned int readid_flags)
3689 u64 n_sectors = dev->n_sectors;
3690 u64 n_native_sectors = dev->n_native_sectors;
3691 int rc;
3693 if (!ata_dev_enabled(dev))
3694 return -ENODEV;
3696 /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
3697 if (ata_class_enabled(new_class) &&
3698 new_class != ATA_DEV_ATA &&
3699 new_class != ATA_DEV_ATAPI &&
3700 new_class != ATA_DEV_ZAC &&
3701 new_class != ATA_DEV_SEMB) {
3702 ata_dev_info(dev, "class mismatch %u != %u\n",
3703 dev->class, new_class);
3704 rc = -ENODEV;
3705 goto fail;
3708 /* re-read ID */
3709 rc = ata_dev_reread_id(dev, readid_flags);
3710 if (rc)
3711 goto fail;
3713 /* configure device according to the new ID */
3714 rc = ata_dev_configure(dev);
3715 if (rc)
3716 goto fail;
3718 /* verify n_sectors hasn't changed */
3719 if (dev->class != ATA_DEV_ATA || !n_sectors ||
3720 dev->n_sectors == n_sectors)
3721 return 0;
3723 /* n_sectors has changed */
3724 ata_dev_warn(dev, "n_sectors mismatch %llu != %llu\n",
3725 (unsigned long long)n_sectors,
3726 (unsigned long long)dev->n_sectors);
3729 * Something could have caused HPA to be unlocked
3730 * involuntarily. If n_native_sectors hasn't changed and the
3731 * new size matches it, keep the device.
3733 if (dev->n_native_sectors == n_native_sectors &&
3734 dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) {
3735 ata_dev_warn(dev,
3736 "new n_sectors matches native, probably "
3737 "late HPA unlock, n_sectors updated\n");
3738 /* use the larger n_sectors */
3739 return 0;
3743 * Some BIOSes boot w/o HPA but resume w/ HPA locked. Try
3744 * unlocking HPA in those cases.
3746 * https://bugzilla.kernel.org/show_bug.cgi?id=15396
3748 if (dev->n_native_sectors == n_native_sectors &&
3749 dev->n_sectors < n_sectors && n_sectors == n_native_sectors &&
3750 !(dev->horkage & ATA_HORKAGE_BROKEN_HPA)) {
3751 ata_dev_warn(dev,
3752 "old n_sectors matches native, probably "
3753 "late HPA lock, will try to unlock HPA\n");
3754 /* try unlocking HPA */
3755 dev->flags |= ATA_DFLAG_UNLOCK_HPA;
3756 rc = -EIO;
3757 } else
3758 rc = -ENODEV;
3760 /* restore original n_[native_]sectors and fail */
3761 dev->n_native_sectors = n_native_sectors;
3762 dev->n_sectors = n_sectors;
3763 fail:
3764 ata_dev_err(dev, "revalidation failed (errno=%d)\n", rc);
3765 return rc;
3768 struct ata_blacklist_entry {
3769 const char *model_num;
3770 const char *model_rev;
3771 unsigned long horkage;
3774 static const struct ata_blacklist_entry ata_device_blacklist [] = {
3775 /* Devices with DMA related problems under Linux */
3776 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
3777 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
3778 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
3779 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
3780 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
3781 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
3782 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
3783 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
3784 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
3785 { "CRD-848[02]B", NULL, ATA_HORKAGE_NODMA },
3786 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
3787 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
3788 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
3789 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
3790 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
3791 { "HITACHI CDR-8[34]35",NULL, ATA_HORKAGE_NODMA },
3792 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
3793 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
3794 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
3795 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
3796 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
3797 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
3798 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
3799 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
3800 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
3801 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
3802 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
3803 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
3804 { " 2GB ATA Flash Disk", "ADMA428M", ATA_HORKAGE_NODMA },
3805 { "VRFDFC22048UCHC-TE*", NULL, ATA_HORKAGE_NODMA },
3806 /* Odd clown on sil3726/4726 PMPs */
3807 { "Config Disk", NULL, ATA_HORKAGE_DISABLE },
3809 /* Weird ATAPI devices */
3810 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
3811 { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA },
3812 { "Slimtype DVD A DS8A8SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
3813 { "Slimtype DVD A DS8A9SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
3816 * Causes silent data corruption with higher max sects.
3817 * http://lkml.kernel.org/g/x49wpy40ysk.fsf@segfault.boston.devel.redhat.com
3819 { "ST380013AS", "3.20", ATA_HORKAGE_MAX_SEC_1024 },
3822 * These devices time out with higher max sects.
3823 * https://bugzilla.kernel.org/show_bug.cgi?id=121671
3825 { "LITEON CX1-JB*-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 },
3826 { "LITEON EP1-*", NULL, ATA_HORKAGE_MAX_SEC_1024 },
3828 /* Devices we expect to fail diagnostics */
3830 /* Devices where NCQ should be avoided */
3831 /* NCQ is slow */
3832 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
3833 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
3834 /* http://thread.gmane.org/gmane.linux.ide/14907 */
3835 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
3836 /* NCQ is broken */
3837 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
3838 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
3839 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ },
3840 { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ },
3841 { "OCZ CORE_SSD", "02.10104", ATA_HORKAGE_NONCQ },
3843 /* Seagate NCQ + FLUSH CACHE firmware bug */
3844 { "ST31500341AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
3845 ATA_HORKAGE_FIRMWARE_WARN },
3847 { "ST31000333AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
3848 ATA_HORKAGE_FIRMWARE_WARN },
3850 { "ST3640[36]23AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
3851 ATA_HORKAGE_FIRMWARE_WARN },
3853 { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
3854 ATA_HORKAGE_FIRMWARE_WARN },
3856 /* drives which fail FPDMA_AA activation (some may freeze afterwards)
3857 the ST disks also have LPM issues */
3858 { "ST1000LM024 HN-M101MBB", NULL, ATA_HORKAGE_BROKEN_FPDMA_AA |
3859 ATA_HORKAGE_NOLPM, },
3860 { "VB0250EAVER", "HPG7", ATA_HORKAGE_BROKEN_FPDMA_AA },
3862 /* Blacklist entries taken from Silicon Image 3124/3132
3863 Windows driver .inf file - also several Linux problem reports */
3864 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
3865 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
3866 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
3868 /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */
3869 { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, },
3871 /* Some Sandisk SSDs lock up hard with NCQ enabled. Reported on
3872 SD7SN6S256G and SD8SN8U256G */
3873 { "SanDisk SD[78]SN*G", NULL, ATA_HORKAGE_NONCQ, },
3875 /* devices which puke on READ_NATIVE_MAX */
3876 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
3877 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
3878 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
3879 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
3881 /* this one allows HPA unlocking but fails IOs on the area */
3882 { "OCZ-VERTEX", "1.30", ATA_HORKAGE_BROKEN_HPA },
3884 /* Devices which report 1 sector over size HPA */
3885 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, },
3886 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, },
3887 { "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE, },
3889 /* Devices which get the IVB wrong */
3890 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
3891 /* Maybe we should just blacklist TSSTcorp... */
3892 { "TSSTcorp CDDVDW SH-S202[HJN]", "SB0[01]", ATA_HORKAGE_IVB, },
3894 /* Devices that do not need bridging limits applied */
3895 { "MTRON MSP-SATA*", NULL, ATA_HORKAGE_BRIDGE_OK, },
3896 { "BUFFALO HD-QSU2/R5", NULL, ATA_HORKAGE_BRIDGE_OK, },
3898 /* Devices which aren't very happy with higher link speeds */
3899 { "WD My Book", NULL, ATA_HORKAGE_1_5_GBPS, },
3900 { "Seagate FreeAgent GoFlex", NULL, ATA_HORKAGE_1_5_GBPS, },
3903 * Devices which choke on SETXFER. Applies only if both the
3904 * device and controller are SATA.
3906 { "PIONEER DVD-RW DVRTD08", NULL, ATA_HORKAGE_NOSETXFER },
3907 { "PIONEER DVD-RW DVRTD08A", NULL, ATA_HORKAGE_NOSETXFER },
3908 { "PIONEER DVD-RW DVR-215", NULL, ATA_HORKAGE_NOSETXFER },
3909 { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER },
3910 { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
3912 /* Crucial BX100 SSD 500GB has broken LPM support */
3913 { "CT500BX100SSD1", NULL, ATA_HORKAGE_NOLPM },
3915 /* 512GB MX100 with MU01 firmware has both queued TRIM and LPM issues */
3916 { "Crucial_CT512MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
3917 ATA_HORKAGE_ZERO_AFTER_TRIM |
3918 ATA_HORKAGE_NOLPM, },
3919 /* 512GB MX100 with newer firmware has only LPM issues */
3920 { "Crucial_CT512MX100*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM |
3921 ATA_HORKAGE_NOLPM, },
3923 /* 480GB+ M500 SSDs have both queued TRIM and LPM issues */
3924 { "Crucial_CT480M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
3925 ATA_HORKAGE_ZERO_AFTER_TRIM |
3926 ATA_HORKAGE_NOLPM, },
3927 { "Crucial_CT960M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
3928 ATA_HORKAGE_ZERO_AFTER_TRIM |
3929 ATA_HORKAGE_NOLPM, },
3931 /* These specific Samsung models/firmware-revs do not handle LPM well */
3932 { "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM, },
3933 { "SAMSUNG SSD PM830 mSATA *", "CXM13D1Q", ATA_HORKAGE_NOLPM, },
3934 { "SAMSUNG MZ7TD256HAFV-000L9", NULL, ATA_HORKAGE_NOLPM, },
3935 { "SAMSUNG MZ7TE512HMHP-000L1", "EXT06L0Q", ATA_HORKAGE_NOLPM, },
3937 /* devices that don't properly handle queued TRIM commands */
3938 { "Micron_M500IT_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
3939 ATA_HORKAGE_ZERO_AFTER_TRIM, },
3940 { "Micron_M500_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
3941 ATA_HORKAGE_ZERO_AFTER_TRIM, },
3942 { "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
3943 ATA_HORKAGE_ZERO_AFTER_TRIM, },
3944 { "Micron_M5[15]0_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
3945 ATA_HORKAGE_ZERO_AFTER_TRIM, },
3946 { "Crucial_CT*M550*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
3947 ATA_HORKAGE_ZERO_AFTER_TRIM, },
3948 { "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
3949 ATA_HORKAGE_ZERO_AFTER_TRIM, },
3950 { "Samsung SSD 840*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
3951 ATA_HORKAGE_ZERO_AFTER_TRIM, },
3952 { "Samsung SSD 850*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
3953 ATA_HORKAGE_ZERO_AFTER_TRIM, },
3954 { "FCCT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
3955 ATA_HORKAGE_ZERO_AFTER_TRIM, },
3957 /* devices that don't properly handle TRIM commands */
3958 { "SuperSSpeed S238*", NULL, ATA_HORKAGE_NOTRIM, },
3961 * As defined, the DRAT (Deterministic Read After Trim) and RZAT
3962 * (Return Zero After Trim) flags in the ATA Command Set are
3963 * unreliable in the sense that they only define what happens if
3964 * the device successfully executed the DSM TRIM command. TRIM
3965 * is only advisory, however, and the device is free to silently
3966 * ignore all or parts of the request.
3968 * Whitelist drives that are known to reliably return zeroes
3969 * after TRIM.
3973 * The intel 510 drive has buggy DRAT/RZAT. Explicitly exclude
3974 * that model before whitelisting all other intel SSDs.
3976 { "INTEL*SSDSC2MH*", NULL, 0, },
3978 { "Micron*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
3979 { "Crucial*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
3980 { "INTEL*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
3981 { "SSD*INTEL*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
3982 { "Samsung*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
3983 { "SAMSUNG*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
3984 { "SAMSUNG*MZ7KM*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
3985 { "ST[1248][0248]0[FH]*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
3988 * Some WD SATA-I drives spin up and down erratically when the link
3989 * is put into the slumber mode. We don't have full list of the
3990 * affected devices. Disable LPM if the device matches one of the
3991 * known prefixes and is SATA-1. As a side effect LPM partial is
3992 * lost too.
3994 * https://bugzilla.kernel.org/show_bug.cgi?id=57211
3996 { "WDC WD800JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
3997 { "WDC WD1200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
3998 { "WDC WD1600JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
3999 { "WDC WD2000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4000 { "WDC WD2500JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4001 { "WDC WD3000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4002 { "WDC WD3200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
4004 /* End Marker */
4008 static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
4010 unsigned char model_num[ATA_ID_PROD_LEN + 1];
4011 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
4012 const struct ata_blacklist_entry *ad = ata_device_blacklist;
4014 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
4015 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
4017 while (ad->model_num) {
4018 if (glob_match(ad->model_num, model_num)) {
4019 if (ad->model_rev == NULL)
4020 return ad->horkage;
4021 if (glob_match(ad->model_rev, model_rev))
4022 return ad->horkage;
4024 ad++;
4026 return 0;
4029 static int ata_dma_blacklisted(const struct ata_device *dev)
4031 /* We don't support polling DMA.
4032 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
4033 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
4035 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
4036 (dev->flags & ATA_DFLAG_CDB_INTR))
4037 return 1;
4038 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
4042 * ata_is_40wire - check drive side detection
4043 * @dev: device
4045 * Perform drive side detection decoding, allowing for device vendors
4046 * who can't follow the documentation.
4049 static int ata_is_40wire(struct ata_device *dev)
4051 if (dev->horkage & ATA_HORKAGE_IVB)
4052 return ata_drive_40wire_relaxed(dev->id);
4053 return ata_drive_40wire(dev->id);
4057 * cable_is_40wire - 40/80/SATA decider
4058 * @ap: port to consider
4060 * This function encapsulates the policy for speed management
4061 * in one place. At the moment we don't cache the result but
4062 * there is a good case for setting ap->cbl to the result when
4063 * we are called with unknown cables (and figuring out if it
4064 * impacts hotplug at all).
4066 * Return 1 if the cable appears to be 40 wire.
4069 static int cable_is_40wire(struct ata_port *ap)
4071 struct ata_link *link;
4072 struct ata_device *dev;
4074 /* If the controller thinks we are 40 wire, we are. */
4075 if (ap->cbl == ATA_CBL_PATA40)
4076 return 1;
4078 /* If the controller thinks we are 80 wire, we are. */
4079 if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA)
4080 return 0;
4082 /* If the system is known to be 40 wire short cable (eg
4083 * laptop), then we allow 80 wire modes even if the drive
4084 * isn't sure.
4086 if (ap->cbl == ATA_CBL_PATA40_SHORT)
4087 return 0;
4089 /* If the controller doesn't know, we scan.
4091 * Note: We look for all 40 wire detects at this point. Any
4092 * 80 wire detect is taken to be 80 wire cable because
4093 * - in many setups only the one drive (slave if present) will
4094 * give a valid detect
4095 * - if you have a non detect capable drive you don't want it
4096 * to colour the choice
4098 ata_for_each_link(link, ap, EDGE) {
4099 ata_for_each_dev(dev, link, ENABLED) {
4100 if (!ata_is_40wire(dev))
4101 return 0;
4104 return 1;
4108 * ata_dev_xfermask - Compute supported xfermask of the given device
4109 * @dev: Device to compute xfermask for
4111 * Compute supported xfermask of @dev and store it in
4112 * dev->*_mask. This function is responsible for applying all
4113 * known limits including host controller limits, device
4114 * blacklist, etc...
4116 * LOCKING:
4117 * None.
4119 static void ata_dev_xfermask(struct ata_device *dev)
4121 struct ata_link *link = dev->link;
4122 struct ata_port *ap = link->ap;
4123 struct ata_host *host = ap->host;
4124 unsigned long xfer_mask;
4126 /* controller modes available */
4127 xfer_mask = ata_pack_xfermask(ap->pio_mask,
4128 ap->mwdma_mask, ap->udma_mask);
4130 /* drive modes available */
4131 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4132 dev->mwdma_mask, dev->udma_mask);
4133 xfer_mask &= ata_id_xfermask(dev->id);
4136 * CFA Advanced TrueIDE timings are not allowed on a shared
4137 * cable
4139 if (ata_dev_pair(dev)) {
4140 /* No PIO5 or PIO6 */
4141 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4142 /* No MWDMA3 or MWDMA 4 */
4143 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4146 if (ata_dma_blacklisted(dev)) {
4147 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4148 ata_dev_warn(dev,
4149 "device is on DMA blacklist, disabling DMA\n");
4152 if ((host->flags & ATA_HOST_SIMPLEX) &&
4153 host->simplex_claimed && host->simplex_claimed != ap) {
4154 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4155 ata_dev_warn(dev,
4156 "simplex DMA is claimed by other device, disabling DMA\n");
4159 if (ap->flags & ATA_FLAG_NO_IORDY)
4160 xfer_mask &= ata_pio_mask_no_iordy(dev);
4162 if (ap->ops->mode_filter)
4163 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
4165 /* Apply cable rule here. Don't apply it early because when
4166 * we handle hot plug the cable type can itself change.
4167 * Check this last so that we know if the transfer rate was
4168 * solely limited by the cable.
4169 * Unknown or 80 wire cables reported host side are checked
4170 * drive side as well. Cases where we know a 40wire cable
4171 * is used safely for 80 are not checked here.
4173 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4174 /* UDMA/44 or higher would be available */
4175 if (cable_is_40wire(ap)) {
4176 ata_dev_warn(dev,
4177 "limited to UDMA/33 due to 40-wire cable\n");
4178 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4181 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4182 &dev->mwdma_mask, &dev->udma_mask);
4186 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
4187 * @dev: Device to which command will be sent
4189 * Issue SET FEATURES - XFER MODE command to device @dev
4190 * on port @ap.
4192 * LOCKING:
4193 * PCI/etc. bus probe sem.
4195 * RETURNS:
4196 * 0 on success, AC_ERR_* mask otherwise.
4199 static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
4201 struct ata_taskfile tf;
4202 unsigned int err_mask;
4204 /* set up set-features taskfile */
4205 DPRINTK("set features - xfer mode\n");
4207 /* Some controllers and ATAPI devices show flaky interrupt
4208 * behavior after setting xfer mode. Use polling instead.
4210 ata_tf_init(dev, &tf);
4211 tf.command = ATA_CMD_SET_FEATURES;
4212 tf.feature = SETFEATURES_XFER;
4213 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
4214 tf.protocol = ATA_PROT_NODATA;
4215 /* If we are using IORDY we must send the mode setting command */
4216 if (ata_pio_need_iordy(dev))
4217 tf.nsect = dev->xfer_mode;
4218 /* If the device has IORDY and the controller does not - turn it off */
4219 else if (ata_id_has_iordy(dev->id))
4220 tf.nsect = 0x01;
4221 else /* In the ancient relic department - skip all of this */
4222 return 0;
4224 /* On some disks, this command causes spin-up, so we need longer timeout */
4225 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 15000);
4227 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4228 return err_mask;
4232 * ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
4233 * @dev: Device to which command will be sent
4234 * @enable: Whether to enable or disable the feature
4235 * @feature: The sector count represents the feature to set
4237 * Issue SET FEATURES - SATA FEATURES command to device @dev
4238 * on port @ap with sector count
4240 * LOCKING:
4241 * PCI/etc. bus probe sem.
4243 * RETURNS:
4244 * 0 on success, AC_ERR_* mask otherwise.
4246 unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, u8 feature)
4248 struct ata_taskfile tf;
4249 unsigned int err_mask;
4250 unsigned long timeout = 0;
4252 /* set up set-features taskfile */
4253 DPRINTK("set features - SATA features\n");
4255 ata_tf_init(dev, &tf);
4256 tf.command = ATA_CMD_SET_FEATURES;
4257 tf.feature = enable;
4258 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4259 tf.protocol = ATA_PROT_NODATA;
4260 tf.nsect = feature;
4262 if (enable == SETFEATURES_SPINUP)
4263 timeout = ata_probe_timeout ?
4264 ata_probe_timeout * 1000 : SETFEATURES_SPINUP_TIMEOUT;
4265 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, timeout);
4267 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4268 return err_mask;
4270 EXPORT_SYMBOL_GPL(ata_dev_set_feature);
4273 * ata_dev_init_params - Issue INIT DEV PARAMS command
4274 * @dev: Device to which command will be sent
4275 * @heads: Number of heads (taskfile parameter)
4276 * @sectors: Number of sectors (taskfile parameter)
4278 * LOCKING:
4279 * Kernel thread context (may sleep)
4281 * RETURNS:
4282 * 0 on success, AC_ERR_* mask otherwise.
4284 static unsigned int ata_dev_init_params(struct ata_device *dev,
4285 u16 heads, u16 sectors)
4287 struct ata_taskfile tf;
4288 unsigned int err_mask;
4290 /* Number of sectors per track 1-255. Number of heads 1-16 */
4291 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
4292 return AC_ERR_INVALID;
4294 /* set up init dev params taskfile */
4295 DPRINTK("init dev params \n");
4297 ata_tf_init(dev, &tf);
4298 tf.command = ATA_CMD_INIT_DEV_PARAMS;
4299 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4300 tf.protocol = ATA_PROT_NODATA;
4301 tf.nsect = sectors;
4302 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
4304 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4305 /* A clean abort indicates an original or just out of spec drive
4306 and we should continue as we issue the setup based on the
4307 drive reported working geometry */
4308 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4309 err_mask = 0;
4311 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4312 return err_mask;
4316 * atapi_check_dma - Check whether ATAPI DMA can be supported
4317 * @qc: Metadata associated with taskfile to check
4319 * Allow low-level driver to filter ATA PACKET commands, returning
4320 * a status indicating whether or not it is OK to use DMA for the
4321 * supplied PACKET command.
4323 * LOCKING:
4324 * spin_lock_irqsave(host lock)
4326 * RETURNS: 0 when ATAPI DMA can be used
4327 * nonzero otherwise
4329 int atapi_check_dma(struct ata_queued_cmd *qc)
4331 struct ata_port *ap = qc->ap;
4333 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a
4334 * few ATAPI devices choke on such DMA requests.
4336 if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) &&
4337 unlikely(qc->nbytes & 15))
4338 return 1;
4340 if (ap->ops->check_atapi_dma)
4341 return ap->ops->check_atapi_dma(qc);
4343 return 0;
4347 * ata_std_qc_defer - Check whether a qc needs to be deferred
4348 * @qc: ATA command in question
4350 * Non-NCQ commands cannot run with any other command, NCQ or
4351 * not. As upper layer only knows the queue depth, we are
4352 * responsible for maintaining exclusion. This function checks
4353 * whether a new command @qc can be issued.
4355 * LOCKING:
4356 * spin_lock_irqsave(host lock)
4358 * RETURNS:
4359 * ATA_DEFER_* if deferring is needed, 0 otherwise.
4361 int ata_std_qc_defer(struct ata_queued_cmd *qc)
4363 struct ata_link *link = qc->dev->link;
4365 if (ata_is_ncq(qc->tf.protocol)) {
4366 if (!ata_tag_valid(link->active_tag))
4367 return 0;
4368 } else {
4369 if (!ata_tag_valid(link->active_tag) && !link->sactive)
4370 return 0;
4373 return ATA_DEFER_LINK;
4375 EXPORT_SYMBOL_GPL(ata_std_qc_defer);
4377 enum ata_completion_errors ata_noop_qc_prep(struct ata_queued_cmd *qc)
4379 return AC_ERR_OK;
4381 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
4384 * ata_sg_init - Associate command with scatter-gather table.
4385 * @qc: Command to be associated
4386 * @sg: Scatter-gather table.
4387 * @n_elem: Number of elements in s/g table.
4389 * Initialize the data-related elements of queued_cmd @qc
4390 * to point to a scatter-gather table @sg, containing @n_elem
4391 * elements.
4393 * LOCKING:
4394 * spin_lock_irqsave(host lock)
4396 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4397 unsigned int n_elem)
4399 qc->sg = sg;
4400 qc->n_elem = n_elem;
4401 qc->cursg = qc->sg;
4404 #ifdef CONFIG_HAS_DMA
4407 * ata_sg_clean - Unmap DMA memory associated with command
4408 * @qc: Command containing DMA memory to be released
4410 * Unmap all mapped DMA memory associated with this command.
4412 * LOCKING:
4413 * spin_lock_irqsave(host lock)
4415 static void ata_sg_clean(struct ata_queued_cmd *qc)
4417 struct ata_port *ap = qc->ap;
4418 struct scatterlist *sg = qc->sg;
4419 int dir = qc->dma_dir;
4421 WARN_ON_ONCE(sg == NULL);
4423 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
4425 if (qc->n_elem)
4426 dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir);
4428 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4429 qc->sg = NULL;
4433 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4434 * @qc: Command with scatter-gather table to be mapped.
4436 * DMA-map the scatter-gather table associated with queued_cmd @qc.
4438 * LOCKING:
4439 * spin_lock_irqsave(host lock)
4441 * RETURNS:
4442 * Zero on success, negative on error.
4445 static int ata_sg_setup(struct ata_queued_cmd *qc)
4447 struct ata_port *ap = qc->ap;
4448 unsigned int n_elem;
4450 VPRINTK("ENTER, ata%u\n", ap->print_id);
4452 n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
4453 if (n_elem < 1)
4454 return -1;
4456 VPRINTK("%d sg elements mapped\n", n_elem);
4457 qc->orig_n_elem = qc->n_elem;
4458 qc->n_elem = n_elem;
4459 qc->flags |= ATA_QCFLAG_DMAMAP;
4461 return 0;
4464 #else /* !CONFIG_HAS_DMA */
4466 static inline void ata_sg_clean(struct ata_queued_cmd *qc) {}
4467 static inline int ata_sg_setup(struct ata_queued_cmd *qc) { return -1; }
4469 #endif /* !CONFIG_HAS_DMA */
4472 * swap_buf_le16 - swap halves of 16-bit words in place
4473 * @buf: Buffer to swap
4474 * @buf_words: Number of 16-bit words in buffer.
4476 * Swap halves of 16-bit words if needed to convert from
4477 * little-endian byte order to native cpu byte order, or
4478 * vice-versa.
4480 * LOCKING:
4481 * Inherited from caller.
4483 void swap_buf_le16(u16 *buf, unsigned int buf_words)
4485 #ifdef __BIG_ENDIAN
4486 unsigned int i;
4488 for (i = 0; i < buf_words; i++)
4489 buf[i] = le16_to_cpu(buf[i]);
4490 #endif /* __BIG_ENDIAN */
4494 * ata_qc_new_init - Request an available ATA command, and initialize it
4495 * @dev: Device from whom we request an available command structure
4496 * @tag: tag
4498 * LOCKING:
4499 * None.
4502 struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag)
4504 struct ata_port *ap = dev->link->ap;
4505 struct ata_queued_cmd *qc;
4507 /* no command while frozen */
4508 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
4509 return NULL;
4511 /* libsas case */
4512 if (ap->flags & ATA_FLAG_SAS_HOST) {
4513 tag = ata_sas_allocate_tag(ap);
4514 if (tag < 0)
4515 return NULL;
4518 qc = __ata_qc_from_tag(ap, tag);
4519 qc->tag = qc->hw_tag = tag;
4520 qc->scsicmd = NULL;
4521 qc->ap = ap;
4522 qc->dev = dev;
4524 ata_qc_reinit(qc);
4526 return qc;
4530 * ata_qc_free - free unused ata_queued_cmd
4531 * @qc: Command to complete
4533 * Designed to free unused ata_queued_cmd object
4534 * in case something prevents using it.
4536 * LOCKING:
4537 * spin_lock_irqsave(host lock)
4539 void ata_qc_free(struct ata_queued_cmd *qc)
4541 struct ata_port *ap;
4542 unsigned int tag;
4544 WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4545 ap = qc->ap;
4547 qc->flags = 0;
4548 tag = qc->tag;
4549 if (ata_tag_valid(tag)) {
4550 qc->tag = ATA_TAG_POISON;
4551 if (ap->flags & ATA_FLAG_SAS_HOST)
4552 ata_sas_free_tag(tag, ap);
4556 void __ata_qc_complete(struct ata_queued_cmd *qc)
4558 struct ata_port *ap;
4559 struct ata_link *link;
4561 WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4562 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
4563 ap = qc->ap;
4564 link = qc->dev->link;
4566 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4567 ata_sg_clean(qc);
4569 /* command should be marked inactive atomically with qc completion */
4570 if (ata_is_ncq(qc->tf.protocol)) {
4571 link->sactive &= ~(1 << qc->hw_tag);
4572 if (!link->sactive)
4573 ap->nr_active_links--;
4574 } else {
4575 link->active_tag = ATA_TAG_POISON;
4576 ap->nr_active_links--;
4579 /* clear exclusive status */
4580 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
4581 ap->excl_link == link))
4582 ap->excl_link = NULL;
4584 /* atapi: mark qc as inactive to prevent the interrupt handler
4585 * from completing the command twice later, before the error handler
4586 * is called. (when rc != 0 and atapi request sense is needed)
4588 qc->flags &= ~ATA_QCFLAG_ACTIVE;
4589 ap->qc_active &= ~(1ULL << qc->tag);
4591 /* call completion callback */
4592 qc->complete_fn(qc);
4595 static void fill_result_tf(struct ata_queued_cmd *qc)
4597 struct ata_port *ap = qc->ap;
4599 qc->result_tf.flags = qc->tf.flags;
4600 ap->ops->qc_fill_rtf(qc);
4603 static void ata_verify_xfer(struct ata_queued_cmd *qc)
4605 struct ata_device *dev = qc->dev;
4607 if (!ata_is_data(qc->tf.protocol))
4608 return;
4610 if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
4611 return;
4613 dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
4617 * ata_qc_complete - Complete an active ATA command
4618 * @qc: Command to complete
4620 * Indicate to the mid and upper layers that an ATA command has
4621 * completed, with either an ok or not-ok status.
4623 * Refrain from calling this function multiple times when
4624 * successfully completing multiple NCQ commands.
4625 * ata_qc_complete_multiple() should be used instead, which will
4626 * properly update IRQ expect state.
4628 * LOCKING:
4629 * spin_lock_irqsave(host lock)
4631 void ata_qc_complete(struct ata_queued_cmd *qc)
4633 struct ata_port *ap = qc->ap;
4635 /* Trigger the LED (if available) */
4636 ledtrig_disk_activity(!!(qc->tf.flags & ATA_TFLAG_WRITE));
4638 /* XXX: New EH and old EH use different mechanisms to
4639 * synchronize EH with regular execution path.
4641 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4642 * Normal execution path is responsible for not accessing a
4643 * failed qc. libata core enforces the rule by returning NULL
4644 * from ata_qc_from_tag() for failed qcs.
4646 * Old EH depends on ata_qc_complete() nullifying completion
4647 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
4648 * not synchronize with interrupt handler. Only PIO task is
4649 * taken care of.
4651 if (ap->ops->error_handler) {
4652 struct ata_device *dev = qc->dev;
4653 struct ata_eh_info *ehi = &dev->link->eh_info;
4655 if (unlikely(qc->err_mask))
4656 qc->flags |= ATA_QCFLAG_FAILED;
4659 * Finish internal commands without any further processing
4660 * and always with the result TF filled.
4662 if (unlikely(ata_tag_internal(qc->tag))) {
4663 fill_result_tf(qc);
4664 trace_ata_qc_complete_internal(qc);
4665 __ata_qc_complete(qc);
4666 return;
4670 * Non-internal qc has failed. Fill the result TF and
4671 * summon EH.
4673 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4674 fill_result_tf(qc);
4675 trace_ata_qc_complete_failed(qc);
4676 ata_qc_schedule_eh(qc);
4677 return;
4680 WARN_ON_ONCE(ap->pflags & ATA_PFLAG_FROZEN);
4682 /* read result TF if requested */
4683 if (qc->flags & ATA_QCFLAG_RESULT_TF)
4684 fill_result_tf(qc);
4686 trace_ata_qc_complete_done(qc);
4687 /* Some commands need post-processing after successful
4688 * completion.
4690 switch (qc->tf.command) {
4691 case ATA_CMD_SET_FEATURES:
4692 if (qc->tf.feature != SETFEATURES_WC_ON &&
4693 qc->tf.feature != SETFEATURES_WC_OFF &&
4694 qc->tf.feature != SETFEATURES_RA_ON &&
4695 qc->tf.feature != SETFEATURES_RA_OFF)
4696 break;
4697 /* fall through */
4698 case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
4699 case ATA_CMD_SET_MULTI: /* multi_count changed */
4700 /* revalidate device */
4701 ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
4702 ata_port_schedule_eh(ap);
4703 break;
4705 case ATA_CMD_SLEEP:
4706 dev->flags |= ATA_DFLAG_SLEEPING;
4707 break;
4710 if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
4711 ata_verify_xfer(qc);
4713 __ata_qc_complete(qc);
4714 } else {
4715 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4716 return;
4718 /* read result TF if failed or requested */
4719 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
4720 fill_result_tf(qc);
4722 __ata_qc_complete(qc);
4725 EXPORT_SYMBOL_GPL(ata_qc_complete);
4728 * ata_qc_get_active - get bitmask of active qcs
4729 * @ap: port in question
4731 * LOCKING:
4732 * spin_lock_irqsave(host lock)
4734 * RETURNS:
4735 * Bitmask of active qcs
4737 u64 ata_qc_get_active(struct ata_port *ap)
4739 u64 qc_active = ap->qc_active;
4741 /* ATA_TAG_INTERNAL is sent to hw as tag 0 */
4742 if (qc_active & (1ULL << ATA_TAG_INTERNAL)) {
4743 qc_active |= (1 << 0);
4744 qc_active &= ~(1ULL << ATA_TAG_INTERNAL);
4747 return qc_active;
4749 EXPORT_SYMBOL_GPL(ata_qc_get_active);
4752 * ata_qc_issue - issue taskfile to device
4753 * @qc: command to issue to device
4755 * Prepare an ATA command to submission to device.
4756 * This includes mapping the data into a DMA-able
4757 * area, filling in the S/G table, and finally
4758 * writing the taskfile to hardware, starting the command.
4760 * LOCKING:
4761 * spin_lock_irqsave(host lock)
4763 void ata_qc_issue(struct ata_queued_cmd *qc)
4765 struct ata_port *ap = qc->ap;
4766 struct ata_link *link = qc->dev->link;
4767 u8 prot = qc->tf.protocol;
4769 /* Make sure only one non-NCQ command is outstanding. The
4770 * check is skipped for old EH because it reuses active qc to
4771 * request ATAPI sense.
4773 WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag));
4775 if (ata_is_ncq(prot)) {
4776 WARN_ON_ONCE(link->sactive & (1 << qc->hw_tag));
4778 if (!link->sactive)
4779 ap->nr_active_links++;
4780 link->sactive |= 1 << qc->hw_tag;
4781 } else {
4782 WARN_ON_ONCE(link->sactive);
4784 ap->nr_active_links++;
4785 link->active_tag = qc->tag;
4788 qc->flags |= ATA_QCFLAG_ACTIVE;
4789 ap->qc_active |= 1ULL << qc->tag;
4792 * We guarantee to LLDs that they will have at least one
4793 * non-zero sg if the command is a data command.
4795 if (ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes))
4796 goto sys_err;
4798 if (ata_is_dma(prot) || (ata_is_pio(prot) &&
4799 (ap->flags & ATA_FLAG_PIO_DMA)))
4800 if (ata_sg_setup(qc))
4801 goto sys_err;
4803 /* if device is sleeping, schedule reset and abort the link */
4804 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
4805 link->eh_info.action |= ATA_EH_RESET;
4806 ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
4807 ata_link_abort(link);
4808 return;
4811 qc->err_mask |= ap->ops->qc_prep(qc);
4812 if (unlikely(qc->err_mask))
4813 goto err;
4814 trace_ata_qc_issue(qc);
4815 qc->err_mask |= ap->ops->qc_issue(qc);
4816 if (unlikely(qc->err_mask))
4817 goto err;
4818 return;
4820 sys_err:
4821 qc->err_mask |= AC_ERR_SYSTEM;
4822 err:
4823 ata_qc_complete(qc);
4827 * ata_phys_link_online - test whether the given link is online
4828 * @link: ATA link to test
4830 * Test whether @link is online. Note that this function returns
4831 * 0 if online status of @link cannot be obtained, so
4832 * ata_link_online(link) != !ata_link_offline(link).
4834 * LOCKING:
4835 * None.
4837 * RETURNS:
4838 * True if the port online status is available and online.
4840 bool ata_phys_link_online(struct ata_link *link)
4842 u32 sstatus;
4844 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
4845 ata_sstatus_online(sstatus))
4846 return true;
4847 return false;
4851 * ata_phys_link_offline - test whether the given link is offline
4852 * @link: ATA link to test
4854 * Test whether @link is offline. Note that this function
4855 * returns 0 if offline status of @link cannot be obtained, so
4856 * ata_link_online(link) != !ata_link_offline(link).
4858 * LOCKING:
4859 * None.
4861 * RETURNS:
4862 * True if the port offline status is available and offline.
4864 bool ata_phys_link_offline(struct ata_link *link)
4866 u32 sstatus;
4868 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
4869 !ata_sstatus_online(sstatus))
4870 return true;
4871 return false;
4875 * ata_link_online - test whether the given link is online
4876 * @link: ATA link to test
4878 * Test whether @link is online. This is identical to
4879 * ata_phys_link_online() when there's no slave link. When
4880 * there's a slave link, this function should only be called on
4881 * the master link and will return true if any of M/S links is
4882 * online.
4884 * LOCKING:
4885 * None.
4887 * RETURNS:
4888 * True if the port online status is available and online.
4890 bool ata_link_online(struct ata_link *link)
4892 struct ata_link *slave = link->ap->slave_link;
4894 WARN_ON(link == slave); /* shouldn't be called on slave link */
4896 return ata_phys_link_online(link) ||
4897 (slave && ata_phys_link_online(slave));
4899 EXPORT_SYMBOL_GPL(ata_link_online);
4902 * ata_link_offline - test whether the given link is offline
4903 * @link: ATA link to test
4905 * Test whether @link is offline. This is identical to
4906 * ata_phys_link_offline() when there's no slave link. When
4907 * there's a slave link, this function should only be called on
4908 * the master link and will return true if both M/S links are
4909 * offline.
4911 * LOCKING:
4912 * None.
4914 * RETURNS:
4915 * True if the port offline status is available and offline.
4917 bool ata_link_offline(struct ata_link *link)
4919 struct ata_link *slave = link->ap->slave_link;
4921 WARN_ON(link == slave); /* shouldn't be called on slave link */
4923 return ata_phys_link_offline(link) &&
4924 (!slave || ata_phys_link_offline(slave));
4926 EXPORT_SYMBOL_GPL(ata_link_offline);
4928 #ifdef CONFIG_PM
4929 static void ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
4930 unsigned int action, unsigned int ehi_flags,
4931 bool async)
4933 struct ata_link *link;
4934 unsigned long flags;
4936 /* Previous resume operation might still be in
4937 * progress. Wait for PM_PENDING to clear.
4939 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
4940 ata_port_wait_eh(ap);
4941 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
4944 /* request PM ops to EH */
4945 spin_lock_irqsave(ap->lock, flags);
4947 ap->pm_mesg = mesg;
4948 ap->pflags |= ATA_PFLAG_PM_PENDING;
4949 ata_for_each_link(link, ap, HOST_FIRST) {
4950 link->eh_info.action |= action;
4951 link->eh_info.flags |= ehi_flags;
4954 ata_port_schedule_eh(ap);
4956 spin_unlock_irqrestore(ap->lock, flags);
4958 if (!async) {
4959 ata_port_wait_eh(ap);
4960 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
4965 * On some hardware, device fails to respond after spun down for suspend. As
4966 * the device won't be used before being resumed, we don't need to touch the
4967 * device. Ask EH to skip the usual stuff and proceed directly to suspend.
4969 * http://thread.gmane.org/gmane.linux.ide/46764
4971 static const unsigned int ata_port_suspend_ehi = ATA_EHI_QUIET
4972 | ATA_EHI_NO_AUTOPSY
4973 | ATA_EHI_NO_RECOVERY;
4975 static void ata_port_suspend(struct ata_port *ap, pm_message_t mesg)
4977 ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, false);
4980 static void ata_port_suspend_async(struct ata_port *ap, pm_message_t mesg)
4982 ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, true);
4985 static int ata_port_pm_suspend(struct device *dev)
4987 struct ata_port *ap = to_ata_port(dev);
4989 if (pm_runtime_suspended(dev))
4990 return 0;
4992 ata_port_suspend(ap, PMSG_SUSPEND);
4993 return 0;
4996 static int ata_port_pm_freeze(struct device *dev)
4998 struct ata_port *ap = to_ata_port(dev);
5000 if (pm_runtime_suspended(dev))
5001 return 0;
5003 ata_port_suspend(ap, PMSG_FREEZE);
5004 return 0;
5007 static int ata_port_pm_poweroff(struct device *dev)
5009 ata_port_suspend(to_ata_port(dev), PMSG_HIBERNATE);
5010 return 0;
5013 static const unsigned int ata_port_resume_ehi = ATA_EHI_NO_AUTOPSY
5014 | ATA_EHI_QUIET;
5016 static void ata_port_resume(struct ata_port *ap, pm_message_t mesg)
5018 ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, false);
5021 static void ata_port_resume_async(struct ata_port *ap, pm_message_t mesg)
5023 ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, true);
5026 static int ata_port_pm_resume(struct device *dev)
5028 ata_port_resume_async(to_ata_port(dev), PMSG_RESUME);
5029 pm_runtime_disable(dev);
5030 pm_runtime_set_active(dev);
5031 pm_runtime_enable(dev);
5032 return 0;
5036 * For ODDs, the upper layer will poll for media change every few seconds,
5037 * which will make it enter and leave suspend state every few seconds. And
5038 * as each suspend will cause a hard/soft reset, the gain of runtime suspend
5039 * is very little and the ODD may malfunction after constantly being reset.
5040 * So the idle callback here will not proceed to suspend if a non-ZPODD capable
5041 * ODD is attached to the port.
5043 static int ata_port_runtime_idle(struct device *dev)
5045 struct ata_port *ap = to_ata_port(dev);
5046 struct ata_link *link;
5047 struct ata_device *adev;
5049 ata_for_each_link(link, ap, HOST_FIRST) {
5050 ata_for_each_dev(adev, link, ENABLED)
5051 if (adev->class == ATA_DEV_ATAPI &&
5052 !zpodd_dev_enabled(adev))
5053 return -EBUSY;
5056 return 0;
5059 static int ata_port_runtime_suspend(struct device *dev)
5061 ata_port_suspend(to_ata_port(dev), PMSG_AUTO_SUSPEND);
5062 return 0;
5065 static int ata_port_runtime_resume(struct device *dev)
5067 ata_port_resume(to_ata_port(dev), PMSG_AUTO_RESUME);
5068 return 0;
5071 static const struct dev_pm_ops ata_port_pm_ops = {
5072 .suspend = ata_port_pm_suspend,
5073 .resume = ata_port_pm_resume,
5074 .freeze = ata_port_pm_freeze,
5075 .thaw = ata_port_pm_resume,
5076 .poweroff = ata_port_pm_poweroff,
5077 .restore = ata_port_pm_resume,
5079 .runtime_suspend = ata_port_runtime_suspend,
5080 .runtime_resume = ata_port_runtime_resume,
5081 .runtime_idle = ata_port_runtime_idle,
5084 /* sas ports don't participate in pm runtime management of ata_ports,
5085 * and need to resume ata devices at the domain level, not the per-port
5086 * level. sas suspend/resume is async to allow parallel port recovery
5087 * since sas has multiple ata_port instances per Scsi_Host.
5089 void ata_sas_port_suspend(struct ata_port *ap)
5091 ata_port_suspend_async(ap, PMSG_SUSPEND);
5093 EXPORT_SYMBOL_GPL(ata_sas_port_suspend);
5095 void ata_sas_port_resume(struct ata_port *ap)
5097 ata_port_resume_async(ap, PMSG_RESUME);
5099 EXPORT_SYMBOL_GPL(ata_sas_port_resume);
5102 * ata_host_suspend - suspend host
5103 * @host: host to suspend
5104 * @mesg: PM message
5106 * Suspend @host. Actual operation is performed by port suspend.
5108 int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5110 host->dev->power.power_state = mesg;
5111 return 0;
5113 EXPORT_SYMBOL_GPL(ata_host_suspend);
5116 * ata_host_resume - resume host
5117 * @host: host to resume
5119 * Resume @host. Actual operation is performed by port resume.
5121 void ata_host_resume(struct ata_host *host)
5123 host->dev->power.power_state = PMSG_ON;
5125 EXPORT_SYMBOL_GPL(ata_host_resume);
5126 #endif
5128 const struct device_type ata_port_type = {
5129 .name = "ata_port",
5130 #ifdef CONFIG_PM
5131 .pm = &ata_port_pm_ops,
5132 #endif
5136 * ata_dev_init - Initialize an ata_device structure
5137 * @dev: Device structure to initialize
5139 * Initialize @dev in preparation for probing.
5141 * LOCKING:
5142 * Inherited from caller.
5144 void ata_dev_init(struct ata_device *dev)
5146 struct ata_link *link = ata_dev_phys_link(dev);
5147 struct ata_port *ap = link->ap;
5148 unsigned long flags;
5150 /* SATA spd limit is bound to the attached device, reset together */
5151 link->sata_spd_limit = link->hw_sata_spd_limit;
5152 link->sata_spd = 0;
5154 /* High bits of dev->flags are used to record warm plug
5155 * requests which occur asynchronously. Synchronize using
5156 * host lock.
5158 spin_lock_irqsave(ap->lock, flags);
5159 dev->flags &= ~ATA_DFLAG_INIT_MASK;
5160 dev->horkage = 0;
5161 spin_unlock_irqrestore(ap->lock, flags);
5163 memset((void *)dev + ATA_DEVICE_CLEAR_BEGIN, 0,
5164 ATA_DEVICE_CLEAR_END - ATA_DEVICE_CLEAR_BEGIN);
5165 dev->pio_mask = UINT_MAX;
5166 dev->mwdma_mask = UINT_MAX;
5167 dev->udma_mask = UINT_MAX;
5171 * ata_link_init - Initialize an ata_link structure
5172 * @ap: ATA port link is attached to
5173 * @link: Link structure to initialize
5174 * @pmp: Port multiplier port number
5176 * Initialize @link.
5178 * LOCKING:
5179 * Kernel thread context (may sleep)
5181 void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
5183 int i;
5185 /* clear everything except for devices */
5186 memset((void *)link + ATA_LINK_CLEAR_BEGIN, 0,
5187 ATA_LINK_CLEAR_END - ATA_LINK_CLEAR_BEGIN);
5189 link->ap = ap;
5190 link->pmp = pmp;
5191 link->active_tag = ATA_TAG_POISON;
5192 link->hw_sata_spd_limit = UINT_MAX;
5194 /* can't use iterator, ap isn't initialized yet */
5195 for (i = 0; i < ATA_MAX_DEVICES; i++) {
5196 struct ata_device *dev = &link->device[i];
5198 dev->link = link;
5199 dev->devno = dev - link->device;
5200 #ifdef CONFIG_ATA_ACPI
5201 dev->gtf_filter = ata_acpi_gtf_filter;
5202 #endif
5203 ata_dev_init(dev);
5208 * sata_link_init_spd - Initialize link->sata_spd_limit
5209 * @link: Link to configure sata_spd_limit for
5211 * Initialize @link->[hw_]sata_spd_limit to the currently
5212 * configured value.
5214 * LOCKING:
5215 * Kernel thread context (may sleep).
5217 * RETURNS:
5218 * 0 on success, -errno on failure.
5220 int sata_link_init_spd(struct ata_link *link)
5222 u8 spd;
5223 int rc;
5225 rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol);
5226 if (rc)
5227 return rc;
5229 spd = (link->saved_scontrol >> 4) & 0xf;
5230 if (spd)
5231 link->hw_sata_spd_limit &= (1 << spd) - 1;
5233 ata_force_link_limits(link);
5235 link->sata_spd_limit = link->hw_sata_spd_limit;
5237 return 0;
5241 * ata_port_alloc - allocate and initialize basic ATA port resources
5242 * @host: ATA host this allocated port belongs to
5244 * Allocate and initialize basic ATA port resources.
5246 * RETURNS:
5247 * Allocate ATA port on success, NULL on failure.
5249 * LOCKING:
5250 * Inherited from calling layer (may sleep).
5252 struct ata_port *ata_port_alloc(struct ata_host *host)
5254 struct ata_port *ap;
5256 DPRINTK("ENTER\n");
5258 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
5259 if (!ap)
5260 return NULL;
5262 ap->pflags |= ATA_PFLAG_INITIALIZING | ATA_PFLAG_FROZEN;
5263 ap->lock = &host->lock;
5264 ap->print_id = -1;
5265 ap->local_port_no = -1;
5266 ap->host = host;
5267 ap->dev = host->dev;
5269 #if defined(ATA_VERBOSE_DEBUG)
5270 /* turn on all debugging levels */
5271 ap->msg_enable = 0x00FF;
5272 #elif defined(ATA_DEBUG)
5273 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
5274 #else
5275 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
5276 #endif
5278 mutex_init(&ap->scsi_scan_mutex);
5279 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5280 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
5281 INIT_LIST_HEAD(&ap->eh_done_q);
5282 init_waitqueue_head(&ap->eh_wait_q);
5283 init_completion(&ap->park_req_pending);
5284 timer_setup(&ap->fastdrain_timer, ata_eh_fastdrain_timerfn,
5285 TIMER_DEFERRABLE);
5287 ap->cbl = ATA_CBL_NONE;
5289 ata_link_init(ap, &ap->link, 0);
5291 #ifdef ATA_IRQ_TRAP
5292 ap->stats.unhandled_irq = 1;
5293 ap->stats.idle_irq = 1;
5294 #endif
5295 ata_sff_port_init(ap);
5297 return ap;
5300 static void ata_devres_release(struct device *gendev, void *res)
5302 struct ata_host *host = dev_get_drvdata(gendev);
5303 int i;
5305 for (i = 0; i < host->n_ports; i++) {
5306 struct ata_port *ap = host->ports[i];
5308 if (!ap)
5309 continue;
5311 if (ap->scsi_host)
5312 scsi_host_put(ap->scsi_host);
5316 dev_set_drvdata(gendev, NULL);
5317 ata_host_put(host);
5320 static void ata_host_release(struct kref *kref)
5322 struct ata_host *host = container_of(kref, struct ata_host, kref);
5323 int i;
5325 for (i = 0; i < host->n_ports; i++) {
5326 struct ata_port *ap = host->ports[i];
5328 kfree(ap->pmp_link);
5329 kfree(ap->slave_link);
5330 kfree(ap);
5331 host->ports[i] = NULL;
5333 kfree(host);
5336 void ata_host_get(struct ata_host *host)
5338 kref_get(&host->kref);
5341 void ata_host_put(struct ata_host *host)
5343 kref_put(&host->kref, ata_host_release);
5345 EXPORT_SYMBOL_GPL(ata_host_put);
5348 * ata_host_alloc - allocate and init basic ATA host resources
5349 * @dev: generic device this host is associated with
5350 * @max_ports: maximum number of ATA ports associated with this host
5352 * Allocate and initialize basic ATA host resources. LLD calls
5353 * this function to allocate a host, initializes it fully and
5354 * attaches it using ata_host_register().
5356 * @max_ports ports are allocated and host->n_ports is
5357 * initialized to @max_ports. The caller is allowed to decrease
5358 * host->n_ports before calling ata_host_register(). The unused
5359 * ports will be automatically freed on registration.
5361 * RETURNS:
5362 * Allocate ATA host on success, NULL on failure.
5364 * LOCKING:
5365 * Inherited from calling layer (may sleep).
5367 struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
5369 struct ata_host *host;
5370 size_t sz;
5371 int i;
5372 void *dr;
5374 DPRINTK("ENTER\n");
5376 /* alloc a container for our list of ATA ports (buses) */
5377 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
5378 host = kzalloc(sz, GFP_KERNEL);
5379 if (!host)
5380 return NULL;
5382 if (!devres_open_group(dev, NULL, GFP_KERNEL))
5383 goto err_free;
5385 dr = devres_alloc(ata_devres_release, 0, GFP_KERNEL);
5386 if (!dr)
5387 goto err_out;
5389 devres_add(dev, dr);
5390 dev_set_drvdata(dev, host);
5392 spin_lock_init(&host->lock);
5393 mutex_init(&host->eh_mutex);
5394 host->dev = dev;
5395 host->n_ports = max_ports;
5396 kref_init(&host->kref);
5398 /* allocate ports bound to this host */
5399 for (i = 0; i < max_ports; i++) {
5400 struct ata_port *ap;
5402 ap = ata_port_alloc(host);
5403 if (!ap)
5404 goto err_out;
5406 ap->port_no = i;
5407 host->ports[i] = ap;
5410 devres_remove_group(dev, NULL);
5411 return host;
5413 err_out:
5414 devres_release_group(dev, NULL);
5415 err_free:
5416 kfree(host);
5417 return NULL;
5419 EXPORT_SYMBOL_GPL(ata_host_alloc);
5422 * ata_host_alloc_pinfo - alloc host and init with port_info array
5423 * @dev: generic device this host is associated with
5424 * @ppi: array of ATA port_info to initialize host with
5425 * @n_ports: number of ATA ports attached to this host
5427 * Allocate ATA host and initialize with info from @ppi. If NULL
5428 * terminated, @ppi may contain fewer entries than @n_ports. The
5429 * last entry will be used for the remaining ports.
5431 * RETURNS:
5432 * Allocate ATA host on success, NULL on failure.
5434 * LOCKING:
5435 * Inherited from calling layer (may sleep).
5437 struct ata_host *ata_host_alloc_pinfo(struct device *dev,
5438 const struct ata_port_info * const * ppi,
5439 int n_ports)
5441 const struct ata_port_info *pi;
5442 struct ata_host *host;
5443 int i, j;
5445 host = ata_host_alloc(dev, n_ports);
5446 if (!host)
5447 return NULL;
5449 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
5450 struct ata_port *ap = host->ports[i];
5452 if (ppi[j])
5453 pi = ppi[j++];
5455 ap->pio_mask = pi->pio_mask;
5456 ap->mwdma_mask = pi->mwdma_mask;
5457 ap->udma_mask = pi->udma_mask;
5458 ap->flags |= pi->flags;
5459 ap->link.flags |= pi->link_flags;
5460 ap->ops = pi->port_ops;
5462 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
5463 host->ops = pi->port_ops;
5466 return host;
5468 EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
5470 static void ata_host_stop(struct device *gendev, void *res)
5472 struct ata_host *host = dev_get_drvdata(gendev);
5473 int i;
5475 WARN_ON(!(host->flags & ATA_HOST_STARTED));
5477 for (i = 0; i < host->n_ports; i++) {
5478 struct ata_port *ap = host->ports[i];
5480 if (ap->ops->port_stop)
5481 ap->ops->port_stop(ap);
5484 if (host->ops->host_stop)
5485 host->ops->host_stop(host);
5489 * ata_finalize_port_ops - finalize ata_port_operations
5490 * @ops: ata_port_operations to finalize
5492 * An ata_port_operations can inherit from another ops and that
5493 * ops can again inherit from another. This can go on as many
5494 * times as necessary as long as there is no loop in the
5495 * inheritance chain.
5497 * Ops tables are finalized when the host is started. NULL or
5498 * unspecified entries are inherited from the closet ancestor
5499 * which has the method and the entry is populated with it.
5500 * After finalization, the ops table directly points to all the
5501 * methods and ->inherits is no longer necessary and cleared.
5503 * Using ATA_OP_NULL, inheriting ops can force a method to NULL.
5505 * LOCKING:
5506 * None.
5508 static void ata_finalize_port_ops(struct ata_port_operations *ops)
5510 static DEFINE_SPINLOCK(lock);
5511 const struct ata_port_operations *cur;
5512 void **begin = (void **)ops;
5513 void **end = (void **)&ops->inherits;
5514 void **pp;
5516 if (!ops || !ops->inherits)
5517 return;
5519 spin_lock(&lock);
5521 for (cur = ops->inherits; cur; cur = cur->inherits) {
5522 void **inherit = (void **)cur;
5524 for (pp = begin; pp < end; pp++, inherit++)
5525 if (!*pp)
5526 *pp = *inherit;
5529 for (pp = begin; pp < end; pp++)
5530 if (IS_ERR(*pp))
5531 *pp = NULL;
5533 ops->inherits = NULL;
5535 spin_unlock(&lock);
5539 * ata_host_start - start and freeze ports of an ATA host
5540 * @host: ATA host to start ports for
5542 * Start and then freeze ports of @host. Started status is
5543 * recorded in host->flags, so this function can be called
5544 * multiple times. Ports are guaranteed to get started only
5545 * once. If host->ops isn't initialized yet, its set to the
5546 * first non-dummy port ops.
5548 * LOCKING:
5549 * Inherited from calling layer (may sleep).
5551 * RETURNS:
5552 * 0 if all ports are started successfully, -errno otherwise.
5554 int ata_host_start(struct ata_host *host)
5556 int have_stop = 0;
5557 void *start_dr = NULL;
5558 int i, rc;
5560 if (host->flags & ATA_HOST_STARTED)
5561 return 0;
5563 ata_finalize_port_ops(host->ops);
5565 for (i = 0; i < host->n_ports; i++) {
5566 struct ata_port *ap = host->ports[i];
5568 ata_finalize_port_ops(ap->ops);
5570 if (!host->ops && !ata_port_is_dummy(ap))
5571 host->ops = ap->ops;
5573 if (ap->ops->port_stop)
5574 have_stop = 1;
5577 if (host->ops->host_stop)
5578 have_stop = 1;
5580 if (have_stop) {
5581 start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
5582 if (!start_dr)
5583 return -ENOMEM;
5586 for (i = 0; i < host->n_ports; i++) {
5587 struct ata_port *ap = host->ports[i];
5589 if (ap->ops->port_start) {
5590 rc = ap->ops->port_start(ap);
5591 if (rc) {
5592 if (rc != -ENODEV)
5593 dev_err(host->dev,
5594 "failed to start port %d (errno=%d)\n",
5595 i, rc);
5596 goto err_out;
5599 ata_eh_freeze_port(ap);
5602 if (start_dr)
5603 devres_add(host->dev, start_dr);
5604 host->flags |= ATA_HOST_STARTED;
5605 return 0;
5607 err_out:
5608 while (--i >= 0) {
5609 struct ata_port *ap = host->ports[i];
5611 if (ap->ops->port_stop)
5612 ap->ops->port_stop(ap);
5614 devres_free(start_dr);
5615 return rc;
5617 EXPORT_SYMBOL_GPL(ata_host_start);
5620 * ata_sas_host_init - Initialize a host struct for sas (ipr, libsas)
5621 * @host: host to initialize
5622 * @dev: device host is attached to
5623 * @ops: port_ops
5626 void ata_host_init(struct ata_host *host, struct device *dev,
5627 struct ata_port_operations *ops)
5629 spin_lock_init(&host->lock);
5630 mutex_init(&host->eh_mutex);
5631 host->n_tags = ATA_MAX_QUEUE;
5632 host->dev = dev;
5633 host->ops = ops;
5634 kref_init(&host->kref);
5636 EXPORT_SYMBOL_GPL(ata_host_init);
5638 void __ata_port_probe(struct ata_port *ap)
5640 struct ata_eh_info *ehi = &ap->link.eh_info;
5641 unsigned long flags;
5643 /* kick EH for boot probing */
5644 spin_lock_irqsave(ap->lock, flags);
5646 ehi->probe_mask |= ATA_ALL_DEVICES;
5647 ehi->action |= ATA_EH_RESET;
5648 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
5650 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
5651 ap->pflags |= ATA_PFLAG_LOADING;
5652 ata_port_schedule_eh(ap);
5654 spin_unlock_irqrestore(ap->lock, flags);
5657 int ata_port_probe(struct ata_port *ap)
5659 int rc = 0;
5661 if (ap->ops->error_handler) {
5662 __ata_port_probe(ap);
5663 ata_port_wait_eh(ap);
5664 } else {
5665 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
5666 rc = ata_bus_probe(ap);
5667 DPRINTK("ata%u: bus probe end\n", ap->print_id);
5669 return rc;
5673 static void async_port_probe(void *data, async_cookie_t cookie)
5675 struct ata_port *ap = data;
5678 * If we're not allowed to scan this host in parallel,
5679 * we need to wait until all previous scans have completed
5680 * before going further.
5681 * Jeff Garzik says this is only within a controller, so we
5682 * don't need to wait for port 0, only for later ports.
5684 if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0)
5685 async_synchronize_cookie(cookie);
5687 (void)ata_port_probe(ap);
5689 /* in order to keep device order, we need to synchronize at this point */
5690 async_synchronize_cookie(cookie);
5692 ata_scsi_scan_host(ap, 1);
5696 * ata_host_register - register initialized ATA host
5697 * @host: ATA host to register
5698 * @sht: template for SCSI host
5700 * Register initialized ATA host. @host is allocated using
5701 * ata_host_alloc() and fully initialized by LLD. This function
5702 * starts ports, registers @host with ATA and SCSI layers and
5703 * probe registered devices.
5705 * LOCKING:
5706 * Inherited from calling layer (may sleep).
5708 * RETURNS:
5709 * 0 on success, -errno otherwise.
5711 int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
5713 int i, rc;
5715 host->n_tags = clamp(sht->can_queue, 1, ATA_MAX_QUEUE);
5717 /* host must have been started */
5718 if (!(host->flags & ATA_HOST_STARTED)) {
5719 dev_err(host->dev, "BUG: trying to register unstarted host\n");
5720 WARN_ON(1);
5721 return -EINVAL;
5724 /* Blow away unused ports. This happens when LLD can't
5725 * determine the exact number of ports to allocate at
5726 * allocation time.
5728 for (i = host->n_ports; host->ports[i]; i++)
5729 kfree(host->ports[i]);
5731 /* give ports names and add SCSI hosts */
5732 for (i = 0; i < host->n_ports; i++) {
5733 host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
5734 host->ports[i]->local_port_no = i + 1;
5737 /* Create associated sysfs transport objects */
5738 for (i = 0; i < host->n_ports; i++) {
5739 rc = ata_tport_add(host->dev,host->ports[i]);
5740 if (rc) {
5741 goto err_tadd;
5745 rc = ata_scsi_add_hosts(host, sht);
5746 if (rc)
5747 goto err_tadd;
5749 /* set cable, sata_spd_limit and report */
5750 for (i = 0; i < host->n_ports; i++) {
5751 struct ata_port *ap = host->ports[i];
5752 unsigned long xfer_mask;
5754 /* set SATA cable type if still unset */
5755 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
5756 ap->cbl = ATA_CBL_SATA;
5758 /* init sata_spd_limit to the current value */
5759 sata_link_init_spd(&ap->link);
5760 if (ap->slave_link)
5761 sata_link_init_spd(ap->slave_link);
5763 /* print per-port info to dmesg */
5764 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
5765 ap->udma_mask);
5767 if (!ata_port_is_dummy(ap)) {
5768 ata_port_info(ap, "%cATA max %s %s\n",
5769 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
5770 ata_mode_string(xfer_mask),
5771 ap->link.eh_info.desc);
5772 ata_ehi_clear_desc(&ap->link.eh_info);
5773 } else
5774 ata_port_info(ap, "DUMMY\n");
5777 /* perform each probe asynchronously */
5778 for (i = 0; i < host->n_ports; i++) {
5779 struct ata_port *ap = host->ports[i];
5780 ap->cookie = async_schedule(async_port_probe, ap);
5783 return 0;
5785 err_tadd:
5786 while (--i >= 0) {
5787 ata_tport_delete(host->ports[i]);
5789 return rc;
5792 EXPORT_SYMBOL_GPL(ata_host_register);
5795 * ata_host_activate - start host, request IRQ and register it
5796 * @host: target ATA host
5797 * @irq: IRQ to request
5798 * @irq_handler: irq_handler used when requesting IRQ
5799 * @irq_flags: irq_flags used when requesting IRQ
5800 * @sht: scsi_host_template to use when registering the host
5802 * After allocating an ATA host and initializing it, most libata
5803 * LLDs perform three steps to activate the host - start host,
5804 * request IRQ and register it. This helper takes necessary
5805 * arguments and performs the three steps in one go.
5807 * An invalid IRQ skips the IRQ registration and expects the host to
5808 * have set polling mode on the port. In this case, @irq_handler
5809 * should be NULL.
5811 * LOCKING:
5812 * Inherited from calling layer (may sleep).
5814 * RETURNS:
5815 * 0 on success, -errno otherwise.
5817 int ata_host_activate(struct ata_host *host, int irq,
5818 irq_handler_t irq_handler, unsigned long irq_flags,
5819 struct scsi_host_template *sht)
5821 int i, rc;
5822 char *irq_desc;
5824 rc = ata_host_start(host);
5825 if (rc)
5826 return rc;
5828 /* Special case for polling mode */
5829 if (!irq) {
5830 WARN_ON(irq_handler);
5831 return ata_host_register(host, sht);
5834 irq_desc = devm_kasprintf(host->dev, GFP_KERNEL, "%s[%s]",
5835 dev_driver_string(host->dev),
5836 dev_name(host->dev));
5837 if (!irq_desc)
5838 return -ENOMEM;
5840 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
5841 irq_desc, host);
5842 if (rc)
5843 return rc;
5845 for (i = 0; i < host->n_ports; i++)
5846 ata_port_desc(host->ports[i], "irq %d", irq);
5848 rc = ata_host_register(host, sht);
5849 /* if failed, just free the IRQ and leave ports alone */
5850 if (rc)
5851 devm_free_irq(host->dev, irq, host);
5853 return rc;
5855 EXPORT_SYMBOL_GPL(ata_host_activate);
5858 * ata_port_detach - Detach ATA port in preparation of device removal
5859 * @ap: ATA port to be detached
5861 * Detach all ATA devices and the associated SCSI devices of @ap;
5862 * then, remove the associated SCSI host. @ap is guaranteed to
5863 * be quiescent on return from this function.
5865 * LOCKING:
5866 * Kernel thread context (may sleep).
5868 static void ata_port_detach(struct ata_port *ap)
5870 unsigned long flags;
5871 struct ata_link *link;
5872 struct ata_device *dev;
5874 if (!ap->ops->error_handler)
5875 goto skip_eh;
5877 /* tell EH we're leaving & flush EH */
5878 spin_lock_irqsave(ap->lock, flags);
5879 ap->pflags |= ATA_PFLAG_UNLOADING;
5880 ata_port_schedule_eh(ap);
5881 spin_unlock_irqrestore(ap->lock, flags);
5883 /* wait till EH commits suicide */
5884 ata_port_wait_eh(ap);
5886 /* it better be dead now */
5887 WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED));
5889 cancel_delayed_work_sync(&ap->hotplug_task);
5891 skip_eh:
5892 /* clean up zpodd on port removal */
5893 ata_for_each_link(link, ap, HOST_FIRST) {
5894 ata_for_each_dev(dev, link, ALL) {
5895 if (zpodd_dev_enabled(dev))
5896 zpodd_exit(dev);
5899 if (ap->pmp_link) {
5900 int i;
5901 for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
5902 ata_tlink_delete(&ap->pmp_link[i]);
5904 /* remove the associated SCSI host */
5905 scsi_remove_host(ap->scsi_host);
5906 ata_tport_delete(ap);
5910 * ata_host_detach - Detach all ports of an ATA host
5911 * @host: Host to detach
5913 * Detach all ports of @host.
5915 * LOCKING:
5916 * Kernel thread context (may sleep).
5918 void ata_host_detach(struct ata_host *host)
5920 int i;
5922 for (i = 0; i < host->n_ports; i++) {
5923 /* Ensure ata_port probe has completed */
5924 async_synchronize_cookie(host->ports[i]->cookie + 1);
5925 ata_port_detach(host->ports[i]);
5928 /* the host is dead now, dissociate ACPI */
5929 ata_acpi_dissociate(host);
5931 EXPORT_SYMBOL_GPL(ata_host_detach);
5933 #ifdef CONFIG_PCI
5936 * ata_pci_remove_one - PCI layer callback for device removal
5937 * @pdev: PCI device that was removed
5939 * PCI layer indicates to libata via this hook that hot-unplug or
5940 * module unload event has occurred. Detach all ports. Resource
5941 * release is handled via devres.
5943 * LOCKING:
5944 * Inherited from PCI layer (may sleep).
5946 void ata_pci_remove_one(struct pci_dev *pdev)
5948 struct ata_host *host = pci_get_drvdata(pdev);
5950 ata_host_detach(host);
5952 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
5954 void ata_pci_shutdown_one(struct pci_dev *pdev)
5956 struct ata_host *host = pci_get_drvdata(pdev);
5957 int i;
5959 for (i = 0; i < host->n_ports; i++) {
5960 struct ata_port *ap = host->ports[i];
5962 ap->pflags |= ATA_PFLAG_FROZEN;
5964 /* Disable port interrupts */
5965 if (ap->ops->freeze)
5966 ap->ops->freeze(ap);
5968 /* Stop the port DMA engines */
5969 if (ap->ops->port_stop)
5970 ap->ops->port_stop(ap);
5973 EXPORT_SYMBOL_GPL(ata_pci_shutdown_one);
5975 /* move to PCI subsystem */
5976 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
5978 unsigned long tmp = 0;
5980 switch (bits->width) {
5981 case 1: {
5982 u8 tmp8 = 0;
5983 pci_read_config_byte(pdev, bits->reg, &tmp8);
5984 tmp = tmp8;
5985 break;
5987 case 2: {
5988 u16 tmp16 = 0;
5989 pci_read_config_word(pdev, bits->reg, &tmp16);
5990 tmp = tmp16;
5991 break;
5993 case 4: {
5994 u32 tmp32 = 0;
5995 pci_read_config_dword(pdev, bits->reg, &tmp32);
5996 tmp = tmp32;
5997 break;
6000 default:
6001 return -EINVAL;
6004 tmp &= bits->mask;
6006 return (tmp == bits->val) ? 1 : 0;
6008 EXPORT_SYMBOL_GPL(pci_test_config_bits);
6010 #ifdef CONFIG_PM
6011 void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
6013 pci_save_state(pdev);
6014 pci_disable_device(pdev);
6016 if (mesg.event & PM_EVENT_SLEEP)
6017 pci_set_power_state(pdev, PCI_D3hot);
6019 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6021 int ata_pci_device_do_resume(struct pci_dev *pdev)
6023 int rc;
6025 pci_set_power_state(pdev, PCI_D0);
6026 pci_restore_state(pdev);
6028 rc = pcim_enable_device(pdev);
6029 if (rc) {
6030 dev_err(&pdev->dev,
6031 "failed to enable device after resume (%d)\n", rc);
6032 return rc;
6035 pci_set_master(pdev);
6036 return 0;
6038 EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
6040 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
6042 struct ata_host *host = pci_get_drvdata(pdev);
6043 int rc = 0;
6045 rc = ata_host_suspend(host, mesg);
6046 if (rc)
6047 return rc;
6049 ata_pci_device_do_suspend(pdev, mesg);
6051 return 0;
6053 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6055 int ata_pci_device_resume(struct pci_dev *pdev)
6057 struct ata_host *host = pci_get_drvdata(pdev);
6058 int rc;
6060 rc = ata_pci_device_do_resume(pdev);
6061 if (rc == 0)
6062 ata_host_resume(host);
6063 return rc;
6065 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6066 #endif /* CONFIG_PM */
6067 #endif /* CONFIG_PCI */
6070 * ata_platform_remove_one - Platform layer callback for device removal
6071 * @pdev: Platform device that was removed
6073 * Platform layer indicates to libata via this hook that hot-unplug or
6074 * module unload event has occurred. Detach all ports. Resource
6075 * release is handled via devres.
6077 * LOCKING:
6078 * Inherited from platform layer (may sleep).
6080 int ata_platform_remove_one(struct platform_device *pdev)
6082 struct ata_host *host = platform_get_drvdata(pdev);
6084 ata_host_detach(host);
6086 return 0;
6088 EXPORT_SYMBOL_GPL(ata_platform_remove_one);
6090 #ifdef CONFIG_ATA_FORCE
6091 static int __init ata_parse_force_one(char **cur,
6092 struct ata_force_ent *force_ent,
6093 const char **reason)
6095 static const struct ata_force_param force_tbl[] __initconst = {
6096 { "40c", .cbl = ATA_CBL_PATA40 },
6097 { "80c", .cbl = ATA_CBL_PATA80 },
6098 { "short40c", .cbl = ATA_CBL_PATA40_SHORT },
6099 { "unk", .cbl = ATA_CBL_PATA_UNK },
6100 { "ign", .cbl = ATA_CBL_PATA_IGN },
6101 { "sata", .cbl = ATA_CBL_SATA },
6102 { "1.5Gbps", .spd_limit = 1 },
6103 { "3.0Gbps", .spd_limit = 2 },
6104 { "noncq", .horkage_on = ATA_HORKAGE_NONCQ },
6105 { "ncq", .horkage_off = ATA_HORKAGE_NONCQ },
6106 { "noncqtrim", .horkage_on = ATA_HORKAGE_NO_NCQ_TRIM },
6107 { "ncqtrim", .horkage_off = ATA_HORKAGE_NO_NCQ_TRIM },
6108 { "dump_id", .horkage_on = ATA_HORKAGE_DUMP_ID },
6109 { "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) },
6110 { "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) },
6111 { "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) },
6112 { "pio3", .xfer_mask = 1 << (ATA_SHIFT_PIO + 3) },
6113 { "pio4", .xfer_mask = 1 << (ATA_SHIFT_PIO + 4) },
6114 { "pio5", .xfer_mask = 1 << (ATA_SHIFT_PIO + 5) },
6115 { "pio6", .xfer_mask = 1 << (ATA_SHIFT_PIO + 6) },
6116 { "mwdma0", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 0) },
6117 { "mwdma1", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 1) },
6118 { "mwdma2", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 2) },
6119 { "mwdma3", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 3) },
6120 { "mwdma4", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 4) },
6121 { "udma0", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6122 { "udma16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6123 { "udma/16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6124 { "udma1", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6125 { "udma25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6126 { "udma/25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6127 { "udma2", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6128 { "udma33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6129 { "udma/33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6130 { "udma3", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6131 { "udma44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6132 { "udma/44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6133 { "udma4", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6134 { "udma66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6135 { "udma/66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6136 { "udma5", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6137 { "udma100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6138 { "udma/100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6139 { "udma6", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6140 { "udma133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6141 { "udma/133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6142 { "udma7", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 7) },
6143 { "nohrst", .lflags = ATA_LFLAG_NO_HRST },
6144 { "nosrst", .lflags = ATA_LFLAG_NO_SRST },
6145 { "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST },
6146 { "rstonce", .lflags = ATA_LFLAG_RST_ONCE },
6147 { "atapi_dmadir", .horkage_on = ATA_HORKAGE_ATAPI_DMADIR },
6148 { "disable", .horkage_on = ATA_HORKAGE_DISABLE },
6150 char *start = *cur, *p = *cur;
6151 char *id, *val, *endp;
6152 const struct ata_force_param *match_fp = NULL;
6153 int nr_matches = 0, i;
6155 /* find where this param ends and update *cur */
6156 while (*p != '\0' && *p != ',')
6157 p++;
6159 if (*p == '\0')
6160 *cur = p;
6161 else
6162 *cur = p + 1;
6164 *p = '\0';
6166 /* parse */
6167 p = strchr(start, ':');
6168 if (!p) {
6169 val = strstrip(start);
6170 goto parse_val;
6172 *p = '\0';
6174 id = strstrip(start);
6175 val = strstrip(p + 1);
6177 /* parse id */
6178 p = strchr(id, '.');
6179 if (p) {
6180 *p++ = '\0';
6181 force_ent->device = simple_strtoul(p, &endp, 10);
6182 if (p == endp || *endp != '\0') {
6183 *reason = "invalid device";
6184 return -EINVAL;
6188 force_ent->port = simple_strtoul(id, &endp, 10);
6189 if (id == endp || *endp != '\0') {
6190 *reason = "invalid port/link";
6191 return -EINVAL;
6194 parse_val:
6195 /* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */
6196 for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
6197 const struct ata_force_param *fp = &force_tbl[i];
6199 if (strncasecmp(val, fp->name, strlen(val)))
6200 continue;
6202 nr_matches++;
6203 match_fp = fp;
6205 if (strcasecmp(val, fp->name) == 0) {
6206 nr_matches = 1;
6207 break;
6211 if (!nr_matches) {
6212 *reason = "unknown value";
6213 return -EINVAL;
6215 if (nr_matches > 1) {
6216 *reason = "ambiguous value";
6217 return -EINVAL;
6220 force_ent->param = *match_fp;
6222 return 0;
6225 static void __init ata_parse_force_param(void)
6227 int idx = 0, size = 1;
6228 int last_port = -1, last_device = -1;
6229 char *p, *cur, *next;
6231 /* calculate maximum number of params and allocate force_tbl */
6232 for (p = ata_force_param_buf; *p; p++)
6233 if (*p == ',')
6234 size++;
6236 ata_force_tbl = kcalloc(size, sizeof(ata_force_tbl[0]), GFP_KERNEL);
6237 if (!ata_force_tbl) {
6238 printk(KERN_WARNING "ata: failed to extend force table, "
6239 "libata.force ignored\n");
6240 return;
6243 /* parse and populate the table */
6244 for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
6245 const char *reason = "";
6246 struct ata_force_ent te = { .port = -1, .device = -1 };
6248 next = cur;
6249 if (ata_parse_force_one(&next, &te, &reason)) {
6250 printk(KERN_WARNING "ata: failed to parse force "
6251 "parameter \"%s\" (%s)\n",
6252 cur, reason);
6253 continue;
6256 if (te.port == -1) {
6257 te.port = last_port;
6258 te.device = last_device;
6261 ata_force_tbl[idx++] = te;
6263 last_port = te.port;
6264 last_device = te.device;
6267 ata_force_tbl_size = idx;
6270 static void ata_free_force_param(void)
6272 kfree(ata_force_tbl);
6274 #else
6275 static inline void ata_parse_force_param(void) { }
6276 static inline void ata_free_force_param(void) { }
6277 #endif
6279 static int __init ata_init(void)
6281 int rc;
6283 ata_parse_force_param();
6285 rc = ata_sff_init();
6286 if (rc) {
6287 ata_free_force_param();
6288 return rc;
6291 libata_transport_init();
6292 ata_scsi_transport_template = ata_attach_transport();
6293 if (!ata_scsi_transport_template) {
6294 ata_sff_exit();
6295 rc = -ENOMEM;
6296 goto err_out;
6299 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6300 return 0;
6302 err_out:
6303 return rc;
6306 static void __exit ata_exit(void)
6308 ata_release_transport(ata_scsi_transport_template);
6309 libata_transport_exit();
6310 ata_sff_exit();
6311 ata_free_force_param();
6314 subsys_initcall(ata_init);
6315 module_exit(ata_exit);
6317 static DEFINE_RATELIMIT_STATE(ratelimit, HZ / 5, 1);
6319 int ata_ratelimit(void)
6321 return __ratelimit(&ratelimit);
6323 EXPORT_SYMBOL_GPL(ata_ratelimit);
6326 * ata_msleep - ATA EH owner aware msleep
6327 * @ap: ATA port to attribute the sleep to
6328 * @msecs: duration to sleep in milliseconds
6330 * Sleeps @msecs. If the current task is owner of @ap's EH, the
6331 * ownership is released before going to sleep and reacquired
6332 * after the sleep is complete. IOW, other ports sharing the
6333 * @ap->host will be allowed to own the EH while this task is
6334 * sleeping.
6336 * LOCKING:
6337 * Might sleep.
6339 void ata_msleep(struct ata_port *ap, unsigned int msecs)
6341 bool owns_eh = ap && ap->host->eh_owner == current;
6343 if (owns_eh)
6344 ata_eh_release(ap);
6346 if (msecs < 20) {
6347 unsigned long usecs = msecs * USEC_PER_MSEC;
6348 usleep_range(usecs, usecs + 50);
6349 } else {
6350 msleep(msecs);
6353 if (owns_eh)
6354 ata_eh_acquire(ap);
6356 EXPORT_SYMBOL_GPL(ata_msleep);
6359 * ata_wait_register - wait until register value changes
6360 * @ap: ATA port to wait register for, can be NULL
6361 * @reg: IO-mapped register
6362 * @mask: Mask to apply to read register value
6363 * @val: Wait condition
6364 * @interval: polling interval in milliseconds
6365 * @timeout: timeout in milliseconds
6367 * Waiting for some bits of register to change is a common
6368 * operation for ATA controllers. This function reads 32bit LE
6369 * IO-mapped register @reg and tests for the following condition.
6371 * (*@reg & mask) != val
6373 * If the condition is met, it returns; otherwise, the process is
6374 * repeated after @interval_msec until timeout.
6376 * LOCKING:
6377 * Kernel thread context (may sleep)
6379 * RETURNS:
6380 * The final register value.
6382 u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val,
6383 unsigned long interval, unsigned long timeout)
6385 unsigned long deadline;
6386 u32 tmp;
6388 tmp = ioread32(reg);
6390 /* Calculate timeout _after_ the first read to make sure
6391 * preceding writes reach the controller before starting to
6392 * eat away the timeout.
6394 deadline = ata_deadline(jiffies, timeout);
6396 while ((tmp & mask) == val && time_before(jiffies, deadline)) {
6397 ata_msleep(ap, interval);
6398 tmp = ioread32(reg);
6401 return tmp;
6403 EXPORT_SYMBOL_GPL(ata_wait_register);
6406 * Dummy port_ops
6408 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6410 return AC_ERR_SYSTEM;
6413 static void ata_dummy_error_handler(struct ata_port *ap)
6415 /* truly dummy */
6418 struct ata_port_operations ata_dummy_port_ops = {
6419 .qc_prep = ata_noop_qc_prep,
6420 .qc_issue = ata_dummy_qc_issue,
6421 .error_handler = ata_dummy_error_handler,
6422 .sched_eh = ata_std_sched_eh,
6423 .end_eh = ata_std_end_eh,
6425 EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
6427 const struct ata_port_info ata_dummy_port_info = {
6428 .port_ops = &ata_dummy_port_ops,
6430 EXPORT_SYMBOL_GPL(ata_dummy_port_info);
6433 * Utility print functions
6435 void ata_port_printk(const struct ata_port *ap, const char *level,
6436 const char *fmt, ...)
6438 struct va_format vaf;
6439 va_list args;
6441 va_start(args, fmt);
6443 vaf.fmt = fmt;
6444 vaf.va = &args;
6446 printk("%sata%u: %pV", level, ap->print_id, &vaf);
6448 va_end(args);
6450 EXPORT_SYMBOL(ata_port_printk);
6452 void ata_link_printk(const struct ata_link *link, const char *level,
6453 const char *fmt, ...)
6455 struct va_format vaf;
6456 va_list args;
6458 va_start(args, fmt);
6460 vaf.fmt = fmt;
6461 vaf.va = &args;
6463 if (sata_pmp_attached(link->ap) || link->ap->slave_link)
6464 printk("%sata%u.%02u: %pV",
6465 level, link->ap->print_id, link->pmp, &vaf);
6466 else
6467 printk("%sata%u: %pV",
6468 level, link->ap->print_id, &vaf);
6470 va_end(args);
6472 EXPORT_SYMBOL(ata_link_printk);
6474 void ata_dev_printk(const struct ata_device *dev, const char *level,
6475 const char *fmt, ...)
6477 struct va_format vaf;
6478 va_list args;
6480 va_start(args, fmt);
6482 vaf.fmt = fmt;
6483 vaf.va = &args;
6485 printk("%sata%u.%02u: %pV",
6486 level, dev->link->ap->print_id, dev->link->pmp + dev->devno,
6487 &vaf);
6489 va_end(args);
6491 EXPORT_SYMBOL(ata_dev_printk);
6493 void ata_print_version(const struct device *dev, const char *version)
6495 dev_printk(KERN_DEBUG, dev, "version %s\n", version);
6497 EXPORT_SYMBOL(ata_print_version);