1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * libata-eh.c - libata error handling
5 * Copyright 2006 Tejun Heo <htejun@gmail.com>
7 * libata documentation is available via 'make {ps|pdf}docs',
8 * as Documentation/driver-api/libata.rst
10 * Hardware documentation available from http://www.t13.org/ and
11 * http://www.sata-io.org/
14 #include <linux/kernel.h>
15 #include <linux/blkdev.h>
16 #include <linux/export.h>
17 #include <linux/pci.h>
18 #include <scsi/scsi.h>
19 #include <scsi/scsi_host.h>
20 #include <scsi/scsi_eh.h>
21 #include <scsi/scsi_device.h>
22 #include <scsi/scsi_cmnd.h>
23 #include <scsi/scsi_dbg.h>
24 #include "../scsi/scsi_transport_api.h"
26 #include <linux/libata.h>
28 #include <trace/events/libata.h>
32 /* speed down verdicts */
33 ATA_EH_SPDN_NCQ_OFF
= (1 << 0),
34 ATA_EH_SPDN_SPEED_DOWN
= (1 << 1),
35 ATA_EH_SPDN_FALLBACK_TO_PIO
= (1 << 2),
36 ATA_EH_SPDN_KEEP_ERRORS
= (1 << 3),
39 ATA_EFLAG_IS_IO
= (1 << 0),
40 ATA_EFLAG_DUBIOUS_XFER
= (1 << 1),
41 ATA_EFLAG_OLD_ER
= (1 << 31),
43 /* error categories */
46 ATA_ECAT_TOUT_HSM
= 2,
48 ATA_ECAT_DUBIOUS_NONE
= 4,
49 ATA_ECAT_DUBIOUS_ATA_BUS
= 5,
50 ATA_ECAT_DUBIOUS_TOUT_HSM
= 6,
51 ATA_ECAT_DUBIOUS_UNK_DEV
= 7,
54 ATA_EH_CMD_DFL_TIMEOUT
= 5000,
56 /* always put at least this amount of time between resets */
57 ATA_EH_RESET_COOL_DOWN
= 5000,
59 /* Waiting in ->prereset can never be reliable. It's
60 * sometimes nice to wait there but it can't be depended upon;
61 * otherwise, we wouldn't be resetting. Just give it enough
62 * time for most drives to spin up.
64 ATA_EH_PRERESET_TIMEOUT
= 10000,
65 ATA_EH_FASTDRAIN_INTERVAL
= 3000,
69 /* probe speed down parameters, see ata_eh_schedule_probe() */
70 ATA_EH_PROBE_TRIAL_INTERVAL
= 60000, /* 1 min */
71 ATA_EH_PROBE_TRIALS
= 2,
74 /* The following table determines how we sequence resets. Each entry
75 * represents timeout for that try. The first try can be soft or
76 * hardreset. All others are hardreset if available. In most cases
77 * the first reset w/ 10sec timeout should succeed. Following entries
78 * are mostly for error handling, hotplug and those outlier devices that
79 * take an exceptionally long time to recover from reset.
81 static const unsigned int ata_eh_reset_timeouts
[] = {
82 10000, /* most drives spin up by 10sec */
83 10000, /* > 99% working drives spin up before 20sec */
84 35000, /* give > 30 secs of idleness for outlier devices */
85 5000, /* and sweet one last chance */
86 UINT_MAX
, /* > 1 min has elapsed, give up */
89 static const unsigned int ata_eh_identify_timeouts
[] = {
90 5000, /* covers > 99% of successes and not too boring on failures */
91 10000, /* combined time till here is enough even for media access */
92 30000, /* for true idiots */
96 static const unsigned int ata_eh_revalidate_timeouts
[] = {
97 15000, /* Some drives are slow to read log pages when waking-up */
98 15000, /* combined time till here is enough even for media access */
102 static const unsigned int ata_eh_flush_timeouts
[] = {
103 15000, /* be generous with flush */
105 30000, /* and even more generous */
109 static const unsigned int ata_eh_other_timeouts
[] = {
110 5000, /* same rationale as identify timeout */
112 /* but no merciful 30sec for other commands, it just isn't worth it */
116 struct ata_eh_cmd_timeout_ent
{
118 const unsigned int *timeouts
;
121 /* The following table determines timeouts to use for EH internal
122 * commands. Each table entry is a command class and matches the
123 * commands the entry applies to and the timeout table to use.
125 * On the retry after a command timed out, the next timeout value from
126 * the table is used. If the table doesn't contain further entries,
127 * the last value is used.
129 * ehc->cmd_timeout_idx keeps track of which timeout to use per
130 * command class, so if SET_FEATURES times out on the first try, the
131 * next try will use the second timeout value only for that class.
133 #define CMDS(cmds...) (const u8 []){ cmds, 0 }
134 static const struct ata_eh_cmd_timeout_ent
135 ata_eh_cmd_timeout_table
[ATA_EH_CMD_TIMEOUT_TABLE_SIZE
] = {
136 { .commands
= CMDS(ATA_CMD_ID_ATA
, ATA_CMD_ID_ATAPI
),
137 .timeouts
= ata_eh_identify_timeouts
, },
138 { .commands
= CMDS(ATA_CMD_READ_LOG_EXT
, ATA_CMD_READ_LOG_DMA_EXT
),
139 .timeouts
= ata_eh_revalidate_timeouts
, },
140 { .commands
= CMDS(ATA_CMD_READ_NATIVE_MAX
, ATA_CMD_READ_NATIVE_MAX_EXT
),
141 .timeouts
= ata_eh_other_timeouts
, },
142 { .commands
= CMDS(ATA_CMD_SET_MAX
, ATA_CMD_SET_MAX_EXT
),
143 .timeouts
= ata_eh_other_timeouts
, },
144 { .commands
= CMDS(ATA_CMD_SET_FEATURES
),
145 .timeouts
= ata_eh_other_timeouts
, },
146 { .commands
= CMDS(ATA_CMD_INIT_DEV_PARAMS
),
147 .timeouts
= ata_eh_other_timeouts
, },
148 { .commands
= CMDS(ATA_CMD_FLUSH
, ATA_CMD_FLUSH_EXT
),
149 .timeouts
= ata_eh_flush_timeouts
},
150 { .commands
= CMDS(ATA_CMD_VERIFY
),
151 .timeouts
= ata_eh_reset_timeouts
},
155 static void __ata_port_freeze(struct ata_port
*ap
);
156 static int ata_eh_set_lpm(struct ata_link
*link
, enum ata_lpm_policy policy
,
157 struct ata_device
**r_failed_dev
);
159 static void ata_eh_handle_port_suspend(struct ata_port
*ap
);
160 static void ata_eh_handle_port_resume(struct ata_port
*ap
);
161 #else /* CONFIG_PM */
162 static void ata_eh_handle_port_suspend(struct ata_port
*ap
)
165 static void ata_eh_handle_port_resume(struct ata_port
*ap
)
167 #endif /* CONFIG_PM */
169 static __printf(2, 0) void __ata_ehi_pushv_desc(struct ata_eh_info
*ehi
,
170 const char *fmt
, va_list args
)
172 ehi
->desc_len
+= vscnprintf(ehi
->desc
+ ehi
->desc_len
,
173 ATA_EH_DESC_LEN
- ehi
->desc_len
,
178 * __ata_ehi_push_desc - push error description without adding separator
180 * @fmt: printf format string
182 * Format string according to @fmt and append it to @ehi->desc.
185 * spin_lock_irqsave(host lock)
187 void __ata_ehi_push_desc(struct ata_eh_info
*ehi
, const char *fmt
, ...)
192 __ata_ehi_pushv_desc(ehi
, fmt
, args
);
195 EXPORT_SYMBOL_GPL(__ata_ehi_push_desc
);
198 * ata_ehi_push_desc - push error description with separator
200 * @fmt: printf format string
202 * Format string according to @fmt and append it to @ehi->desc.
203 * If @ehi->desc is not empty, ", " is added in-between.
206 * spin_lock_irqsave(host lock)
208 void ata_ehi_push_desc(struct ata_eh_info
*ehi
, const char *fmt
, ...)
213 __ata_ehi_push_desc(ehi
, ", ");
216 __ata_ehi_pushv_desc(ehi
, fmt
, args
);
219 EXPORT_SYMBOL_GPL(ata_ehi_push_desc
);
222 * ata_ehi_clear_desc - clean error description
228 * spin_lock_irqsave(host lock)
230 void ata_ehi_clear_desc(struct ata_eh_info
*ehi
)
235 EXPORT_SYMBOL_GPL(ata_ehi_clear_desc
);
238 * ata_port_desc - append port description
239 * @ap: target ATA port
240 * @fmt: printf format string
242 * Format string according to @fmt and append it to port
243 * description. If port description is not empty, " " is added
244 * in-between. This function is to be used while initializing
245 * ata_host. The description is printed on host registration.
250 void ata_port_desc(struct ata_port
*ap
, const char *fmt
, ...)
254 WARN_ON(!(ap
->pflags
& ATA_PFLAG_INITIALIZING
));
256 if (ap
->link
.eh_info
.desc_len
)
257 __ata_ehi_push_desc(&ap
->link
.eh_info
, " ");
260 __ata_ehi_pushv_desc(&ap
->link
.eh_info
, fmt
, args
);
263 EXPORT_SYMBOL_GPL(ata_port_desc
);
267 * ata_port_pbar_desc - append PCI BAR description
268 * @ap: target ATA port
269 * @bar: target PCI BAR
270 * @offset: offset into PCI BAR
271 * @name: name of the area
273 * If @offset is negative, this function formats a string which
274 * contains the name, address, size and type of the BAR and
275 * appends it to the port description. If @offset is zero or
276 * positive, only name and offsetted address is appended.
281 void ata_port_pbar_desc(struct ata_port
*ap
, int bar
, ssize_t offset
,
284 struct pci_dev
*pdev
= to_pci_dev(ap
->host
->dev
);
286 unsigned long long start
, len
;
288 if (pci_resource_flags(pdev
, bar
) & IORESOURCE_MEM
)
290 else if (pci_resource_flags(pdev
, bar
) & IORESOURCE_IO
)
293 start
= (unsigned long long)pci_resource_start(pdev
, bar
);
294 len
= (unsigned long long)pci_resource_len(pdev
, bar
);
297 ata_port_desc(ap
, "%s %s%llu@0x%llx", name
, type
, len
, start
);
299 ata_port_desc(ap
, "%s 0x%llx", name
,
300 start
+ (unsigned long long)offset
);
302 EXPORT_SYMBOL_GPL(ata_port_pbar_desc
);
303 #endif /* CONFIG_PCI */
305 static int ata_lookup_timeout_table(u8 cmd
)
309 for (i
= 0; i
< ATA_EH_CMD_TIMEOUT_TABLE_SIZE
; i
++) {
312 for (cur
= ata_eh_cmd_timeout_table
[i
].commands
; *cur
; cur
++)
321 * ata_internal_cmd_timeout - determine timeout for an internal command
322 * @dev: target device
323 * @cmd: internal command to be issued
325 * Determine timeout for internal command @cmd for @dev.
331 * Determined timeout.
333 unsigned int ata_internal_cmd_timeout(struct ata_device
*dev
, u8 cmd
)
335 struct ata_eh_context
*ehc
= &dev
->link
->eh_context
;
336 int ent
= ata_lookup_timeout_table(cmd
);
340 return ATA_EH_CMD_DFL_TIMEOUT
;
342 idx
= ehc
->cmd_timeout_idx
[dev
->devno
][ent
];
343 return ata_eh_cmd_timeout_table
[ent
].timeouts
[idx
];
347 * ata_internal_cmd_timed_out - notification for internal command timeout
348 * @dev: target device
349 * @cmd: internal command which timed out
351 * Notify EH that internal command @cmd for @dev timed out. This
352 * function should be called only for commands whose timeouts are
353 * determined using ata_internal_cmd_timeout().
358 void ata_internal_cmd_timed_out(struct ata_device
*dev
, u8 cmd
)
360 struct ata_eh_context
*ehc
= &dev
->link
->eh_context
;
361 int ent
= ata_lookup_timeout_table(cmd
);
367 idx
= ehc
->cmd_timeout_idx
[dev
->devno
][ent
];
368 if (ata_eh_cmd_timeout_table
[ent
].timeouts
[idx
+ 1] != UINT_MAX
)
369 ehc
->cmd_timeout_idx
[dev
->devno
][ent
]++;
372 static void ata_ering_record(struct ata_ering
*ering
, unsigned int eflags
,
373 unsigned int err_mask
)
375 struct ata_ering_entry
*ent
;
380 ering
->cursor
%= ATA_ERING_SIZE
;
382 ent
= &ering
->ring
[ering
->cursor
];
383 ent
->eflags
= eflags
;
384 ent
->err_mask
= err_mask
;
385 ent
->timestamp
= get_jiffies_64();
388 static struct ata_ering_entry
*ata_ering_top(struct ata_ering
*ering
)
390 struct ata_ering_entry
*ent
= &ering
->ring
[ering
->cursor
];
397 int ata_ering_map(struct ata_ering
*ering
,
398 int (*map_fn
)(struct ata_ering_entry
*, void *),
402 struct ata_ering_entry
*ent
;
406 ent
= &ering
->ring
[idx
];
409 rc
= map_fn(ent
, arg
);
412 idx
= (idx
- 1 + ATA_ERING_SIZE
) % ATA_ERING_SIZE
;
413 } while (idx
!= ering
->cursor
);
418 static int ata_ering_clear_cb(struct ata_ering_entry
*ent
, void *void_arg
)
420 ent
->eflags
|= ATA_EFLAG_OLD_ER
;
424 static void ata_ering_clear(struct ata_ering
*ering
)
426 ata_ering_map(ering
, ata_ering_clear_cb
, NULL
);
429 static unsigned int ata_eh_dev_action(struct ata_device
*dev
)
431 struct ata_eh_context
*ehc
= &dev
->link
->eh_context
;
433 return ehc
->i
.action
| ehc
->i
.dev_action
[dev
->devno
];
436 static void ata_eh_clear_action(struct ata_link
*link
, struct ata_device
*dev
,
437 struct ata_eh_info
*ehi
, unsigned int action
)
439 struct ata_device
*tdev
;
442 ehi
->action
&= ~action
;
443 ata_for_each_dev(tdev
, link
, ALL
)
444 ehi
->dev_action
[tdev
->devno
] &= ~action
;
446 /* doesn't make sense for port-wide EH actions */
447 WARN_ON(!(action
& ATA_EH_PERDEV_MASK
));
449 /* break ehi->action into ehi->dev_action */
450 if (ehi
->action
& action
) {
451 ata_for_each_dev(tdev
, link
, ALL
)
452 ehi
->dev_action
[tdev
->devno
] |=
453 ehi
->action
& action
;
454 ehi
->action
&= ~action
;
457 /* turn off the specified per-dev action */
458 ehi
->dev_action
[dev
->devno
] &= ~action
;
463 * ata_eh_acquire - acquire EH ownership
464 * @ap: ATA port to acquire EH ownership for
466 * Acquire EH ownership for @ap. This is the basic exclusion
467 * mechanism for ports sharing a host. Only one port hanging off
468 * the same host can claim the ownership of EH.
473 void ata_eh_acquire(struct ata_port
*ap
)
475 mutex_lock(&ap
->host
->eh_mutex
);
476 WARN_ON_ONCE(ap
->host
->eh_owner
);
477 ap
->host
->eh_owner
= current
;
481 * ata_eh_release - release EH ownership
482 * @ap: ATA port to release EH ownership for
484 * Release EH ownership for @ap if the caller. The caller must
485 * have acquired EH ownership using ata_eh_acquire() previously.
490 void ata_eh_release(struct ata_port
*ap
)
492 WARN_ON_ONCE(ap
->host
->eh_owner
!= current
);
493 ap
->host
->eh_owner
= NULL
;
494 mutex_unlock(&ap
->host
->eh_mutex
);
497 static void ata_eh_dev_disable(struct ata_device
*dev
)
499 ata_acpi_on_disable(dev
);
500 ata_down_xfermask_limit(dev
, ATA_DNXFER_FORCE_PIO0
| ATA_DNXFER_QUIET
);
504 * From now till the next successful probe, ering is used to
505 * track probe failures. Clear accumulated device error info.
507 ata_ering_clear(&dev
->ering
);
509 ata_dev_free_resources(dev
);
512 static void ata_eh_unload(struct ata_port
*ap
)
514 struct ata_link
*link
;
515 struct ata_device
*dev
;
519 * Unless we are restarting, transition all enabled devices to
520 * standby power mode.
522 if (system_state
!= SYSTEM_RESTART
) {
523 ata_for_each_link(link
, ap
, PMP_FIRST
) {
524 ata_for_each_dev(dev
, link
, ENABLED
)
525 ata_dev_power_set_standby(dev
);
530 * Restore SControl IPM and SPD for the next driver and
531 * disable attached devices.
533 ata_for_each_link(link
, ap
, PMP_FIRST
) {
534 sata_scr_write(link
, SCR_CONTROL
, link
->saved_scontrol
& 0xff0);
535 ata_for_each_dev(dev
, link
, ENABLED
)
536 ata_eh_dev_disable(dev
);
539 /* freeze and set UNLOADED */
540 spin_lock_irqsave(ap
->lock
, flags
);
542 ata_port_freeze(ap
); /* won't be thawed */
543 ap
->pflags
&= ~ATA_PFLAG_EH_PENDING
; /* clear pending from freeze */
544 ap
->pflags
|= ATA_PFLAG_UNLOADED
;
546 spin_unlock_irqrestore(ap
->lock
, flags
);
550 * ata_scsi_error - SCSI layer error handler callback
551 * @host: SCSI host on which error occurred
553 * Handles SCSI-layer-thrown error events.
556 * Inherited from SCSI layer (none, can sleep)
561 void ata_scsi_error(struct Scsi_Host
*host
)
563 struct ata_port
*ap
= ata_shost_to_port(host
);
565 LIST_HEAD(eh_work_q
);
567 spin_lock_irqsave(host
->host_lock
, flags
);
568 list_splice_init(&host
->eh_cmd_q
, &eh_work_q
);
569 spin_unlock_irqrestore(host
->host_lock
, flags
);
571 ata_scsi_cmd_error_handler(host
, ap
, &eh_work_q
);
573 /* If we timed raced normal completion and there is nothing to
574 recover nr_timedout == 0 why exactly are we doing error recovery ? */
575 ata_scsi_port_error_handler(host
, ap
);
577 /* finish or retry handled scmd's and clean up */
578 WARN_ON(!list_empty(&eh_work_q
));
583 * ata_scsi_cmd_error_handler - error callback for a list of commands
584 * @host: scsi host containing the port
585 * @ap: ATA port within the host
586 * @eh_work_q: list of commands to process
588 * process the given list of commands and return those finished to the
589 * ap->eh_done_q. This function is the first part of the libata error
590 * handler which processes a given list of failed commands.
592 void ata_scsi_cmd_error_handler(struct Scsi_Host
*host
, struct ata_port
*ap
,
593 struct list_head
*eh_work_q
)
597 struct scsi_cmnd
*scmd
, *tmp
;
600 /* make sure sff pio task is not running */
601 ata_sff_flush_pio_task(ap
);
603 /* synchronize with host lock and sort out timeouts */
606 * For EH, all qcs are finished in one of three ways -
607 * normal completion, error completion, and SCSI timeout.
608 * Both completions can race against SCSI timeout. When normal
609 * completion wins, the qc never reaches EH. When error
610 * completion wins, the qc has ATA_QCFLAG_EH set.
612 * When SCSI timeout wins, things are a bit more complex.
613 * Normal or error completion can occur after the timeout but
614 * before this point. In such cases, both types of
615 * completions are honored. A scmd is determined to have
616 * timed out iff its associated qc is active and not failed.
618 spin_lock_irqsave(ap
->lock
, flags
);
621 * This must occur under the ap->lock as we don't want
622 * a polled recovery to race the real interrupt handler
624 * The lost_interrupt handler checks for any completed but
625 * non-notified command and completes much like an IRQ handler.
627 * We then fall into the error recovery code which will treat
628 * this as if normal completion won the race
630 if (ap
->ops
->lost_interrupt
)
631 ap
->ops
->lost_interrupt(ap
);
633 list_for_each_entry_safe(scmd
, tmp
, eh_work_q
, eh_entry
) {
634 struct ata_queued_cmd
*qc
;
637 * If the scmd was added to EH, via ata_qc_schedule_eh() ->
638 * scsi_timeout() -> scsi_eh_scmd_add(), scsi_timeout() will
639 * have set DID_TIME_OUT (since libata does not have an abort
640 * handler). Thus, to clear DID_TIME_OUT, clear the host byte.
642 set_host_byte(scmd
, DID_OK
);
644 ata_qc_for_each_raw(ap
, qc
, i
) {
645 if (qc
->flags
& ATA_QCFLAG_ACTIVE
&&
650 if (i
< ATA_MAX_QUEUE
) {
651 /* the scmd has an associated qc */
652 if (!(qc
->flags
& ATA_QCFLAG_EH
)) {
653 /* which hasn't failed yet, timeout */
654 set_host_byte(scmd
, DID_TIME_OUT
);
655 qc
->err_mask
|= AC_ERR_TIMEOUT
;
656 qc
->flags
|= ATA_QCFLAG_EH
;
660 /* Normal completion occurred after
661 * SCSI timeout but before this point.
662 * Successfully complete it.
664 scmd
->retries
= scmd
->allowed
;
665 scsi_eh_finish_cmd(scmd
, &ap
->eh_done_q
);
670 * If we have timed out qcs. They belong to EH from
671 * this point but the state of the controller is
672 * unknown. Freeze the port to make sure the IRQ
673 * handler doesn't diddle with those qcs. This must
674 * be done atomically w.r.t. setting ATA_QCFLAG_EH.
677 __ata_port_freeze(ap
);
679 /* initialize eh_tries */
680 ap
->eh_tries
= ATA_EH_MAX_TRIES
;
682 spin_unlock_irqrestore(ap
->lock
, flags
);
684 EXPORT_SYMBOL(ata_scsi_cmd_error_handler
);
687 * ata_scsi_port_error_handler - recover the port after the commands
688 * @host: SCSI host containing the port
691 * Handle the recovery of the port @ap after all the commands
692 * have been recovered.
694 void ata_scsi_port_error_handler(struct Scsi_Host
*host
, struct ata_port
*ap
)
697 struct ata_link
*link
;
699 /* acquire EH ownership */
702 /* kill fast drain timer */
703 del_timer_sync(&ap
->fastdrain_timer
);
705 /* process port resume request */
706 ata_eh_handle_port_resume(ap
);
708 /* fetch & clear EH info */
709 spin_lock_irqsave(ap
->lock
, flags
);
711 ata_for_each_link(link
, ap
, HOST_FIRST
) {
712 struct ata_eh_context
*ehc
= &link
->eh_context
;
713 struct ata_device
*dev
;
715 memset(&link
->eh_context
, 0, sizeof(link
->eh_context
));
716 link
->eh_context
.i
= link
->eh_info
;
717 memset(&link
->eh_info
, 0, sizeof(link
->eh_info
));
719 ata_for_each_dev(dev
, link
, ENABLED
) {
720 int devno
= dev
->devno
;
722 ehc
->saved_xfer_mode
[devno
] = dev
->xfer_mode
;
723 if (ata_ncq_enabled(dev
))
724 ehc
->saved_ncq_enabled
|= 1 << devno
;
726 /* If we are resuming, wake up the device */
727 if (ap
->pflags
& ATA_PFLAG_RESUMING
) {
728 dev
->flags
|= ATA_DFLAG_RESUMING
;
729 ehc
->i
.dev_action
[devno
] |= ATA_EH_SET_ACTIVE
;
734 ap
->pflags
|= ATA_PFLAG_EH_IN_PROGRESS
;
735 ap
->pflags
&= ~ATA_PFLAG_EH_PENDING
;
736 ap
->excl_link
= NULL
; /* don't maintain exclusion over EH */
738 spin_unlock_irqrestore(ap
->lock
, flags
);
740 /* invoke EH, skip if unloading or suspended */
741 if (!(ap
->pflags
& (ATA_PFLAG_UNLOADING
| ATA_PFLAG_SUSPENDED
)))
742 ap
->ops
->error_handler(ap
);
744 /* if unloading, commence suicide */
745 if ((ap
->pflags
& ATA_PFLAG_UNLOADING
) &&
746 !(ap
->pflags
& ATA_PFLAG_UNLOADED
))
751 /* process port suspend request */
752 ata_eh_handle_port_suspend(ap
);
755 * Exception might have happened after ->error_handler recovered the
756 * port but before this point. Repeat EH in such case.
758 spin_lock_irqsave(ap
->lock
, flags
);
760 if (ap
->pflags
& ATA_PFLAG_EH_PENDING
) {
761 if (--ap
->eh_tries
) {
762 spin_unlock_irqrestore(ap
->lock
, flags
);
766 "EH pending after %d tries, giving up\n",
768 ap
->pflags
&= ~ATA_PFLAG_EH_PENDING
;
771 /* this run is complete, make sure EH info is clear */
772 ata_for_each_link(link
, ap
, HOST_FIRST
)
773 memset(&link
->eh_info
, 0, sizeof(link
->eh_info
));
776 * end eh (clear host_eh_scheduled) while holding ap->lock such that if
777 * exception occurs after this point but before EH completion, SCSI
778 * midlayer will re-initiate EH.
782 spin_unlock_irqrestore(ap
->lock
, flags
);
785 scsi_eh_flush_done_q(&ap
->eh_done_q
);
788 spin_lock_irqsave(ap
->lock
, flags
);
790 ap
->pflags
&= ~ATA_PFLAG_RESUMING
;
792 if (ap
->pflags
& ATA_PFLAG_LOADING
)
793 ap
->pflags
&= ~ATA_PFLAG_LOADING
;
794 else if ((ap
->pflags
& ATA_PFLAG_SCSI_HOTPLUG
) &&
795 !(ap
->flags
& ATA_FLAG_SAS_HOST
))
796 schedule_delayed_work(&ap
->hotplug_task
, 0);
798 if (ap
->pflags
& ATA_PFLAG_RECOVERED
)
799 ata_port_info(ap
, "EH complete\n");
801 ap
->pflags
&= ~(ATA_PFLAG_SCSI_HOTPLUG
| ATA_PFLAG_RECOVERED
);
803 /* tell wait_eh that we're done */
804 ap
->pflags
&= ~ATA_PFLAG_EH_IN_PROGRESS
;
805 wake_up_all(&ap
->eh_wait_q
);
807 spin_unlock_irqrestore(ap
->lock
, flags
);
809 EXPORT_SYMBOL_GPL(ata_scsi_port_error_handler
);
812 * ata_port_wait_eh - Wait for the currently pending EH to complete
813 * @ap: Port to wait EH for
815 * Wait until the currently pending EH is complete.
818 * Kernel thread context (may sleep).
820 void ata_port_wait_eh(struct ata_port
*ap
)
826 spin_lock_irqsave(ap
->lock
, flags
);
828 while (ap
->pflags
& (ATA_PFLAG_EH_PENDING
| ATA_PFLAG_EH_IN_PROGRESS
)) {
829 prepare_to_wait(&ap
->eh_wait_q
, &wait
, TASK_UNINTERRUPTIBLE
);
830 spin_unlock_irqrestore(ap
->lock
, flags
);
832 spin_lock_irqsave(ap
->lock
, flags
);
834 finish_wait(&ap
->eh_wait_q
, &wait
);
836 spin_unlock_irqrestore(ap
->lock
, flags
);
838 /* make sure SCSI EH is complete */
839 if (scsi_host_in_recovery(ap
->scsi_host
)) {
844 EXPORT_SYMBOL_GPL(ata_port_wait_eh
);
846 static unsigned int ata_eh_nr_in_flight(struct ata_port
*ap
)
848 struct ata_queued_cmd
*qc
;
852 /* count only non-internal commands */
853 ata_qc_for_each(ap
, qc
, tag
) {
861 void ata_eh_fastdrain_timerfn(struct timer_list
*t
)
863 struct ata_port
*ap
= from_timer(ap
, t
, fastdrain_timer
);
867 spin_lock_irqsave(ap
->lock
, flags
);
869 cnt
= ata_eh_nr_in_flight(ap
);
875 if (cnt
== ap
->fastdrain_cnt
) {
876 struct ata_queued_cmd
*qc
;
879 /* No progress during the last interval, tag all
880 * in-flight qcs as timed out and freeze the port.
882 ata_qc_for_each(ap
, qc
, tag
) {
884 qc
->err_mask
|= AC_ERR_TIMEOUT
;
889 /* some qcs have finished, give it another chance */
890 ap
->fastdrain_cnt
= cnt
;
891 ap
->fastdrain_timer
.expires
=
892 ata_deadline(jiffies
, ATA_EH_FASTDRAIN_INTERVAL
);
893 add_timer(&ap
->fastdrain_timer
);
897 spin_unlock_irqrestore(ap
->lock
, flags
);
901 * ata_eh_set_pending - set ATA_PFLAG_EH_PENDING and activate fast drain
902 * @ap: target ATA port
903 * @fastdrain: activate fast drain
905 * Set ATA_PFLAG_EH_PENDING and activate fast drain if @fastdrain
906 * is non-zero and EH wasn't pending before. Fast drain ensures
907 * that EH kicks in in timely manner.
910 * spin_lock_irqsave(host lock)
912 static void ata_eh_set_pending(struct ata_port
*ap
, int fastdrain
)
916 /* already scheduled? */
917 if (ap
->pflags
& ATA_PFLAG_EH_PENDING
)
920 ap
->pflags
|= ATA_PFLAG_EH_PENDING
;
925 /* do we have in-flight qcs? */
926 cnt
= ata_eh_nr_in_flight(ap
);
930 /* activate fast drain */
931 ap
->fastdrain_cnt
= cnt
;
932 ap
->fastdrain_timer
.expires
=
933 ata_deadline(jiffies
, ATA_EH_FASTDRAIN_INTERVAL
);
934 add_timer(&ap
->fastdrain_timer
);
938 * ata_qc_schedule_eh - schedule qc for error handling
939 * @qc: command to schedule error handling for
941 * Schedule error handling for @qc. EH will kick in as soon as
942 * other commands are drained.
945 * spin_lock_irqsave(host lock)
947 void ata_qc_schedule_eh(struct ata_queued_cmd
*qc
)
949 struct ata_port
*ap
= qc
->ap
;
951 qc
->flags
|= ATA_QCFLAG_EH
;
952 ata_eh_set_pending(ap
, 1);
954 /* The following will fail if timeout has already expired.
955 * ata_scsi_error() takes care of such scmds on EH entry.
956 * Note that ATA_QCFLAG_EH is unconditionally set after
957 * this function completes.
959 blk_abort_request(scsi_cmd_to_rq(qc
->scsicmd
));
963 * ata_std_sched_eh - non-libsas ata_ports issue eh with this common routine
964 * @ap: ATA port to schedule EH for
966 * LOCKING: inherited from ata_port_schedule_eh
967 * spin_lock_irqsave(host lock)
969 void ata_std_sched_eh(struct ata_port
*ap
)
971 if (ap
->pflags
& ATA_PFLAG_INITIALIZING
)
974 ata_eh_set_pending(ap
, 1);
975 scsi_schedule_eh(ap
->scsi_host
);
977 trace_ata_std_sched_eh(ap
);
979 EXPORT_SYMBOL_GPL(ata_std_sched_eh
);
982 * ata_std_end_eh - non-libsas ata_ports complete eh with this common routine
983 * @ap: ATA port to end EH for
985 * In the libata object model there is a 1:1 mapping of ata_port to
986 * shost, so host fields can be directly manipulated under ap->lock, in
987 * the libsas case we need to hold a lock at the ha->level to coordinate
991 * spin_lock_irqsave(host lock)
993 void ata_std_end_eh(struct ata_port
*ap
)
995 struct Scsi_Host
*host
= ap
->scsi_host
;
997 host
->host_eh_scheduled
= 0;
999 EXPORT_SYMBOL(ata_std_end_eh
);
1003 * ata_port_schedule_eh - schedule error handling without a qc
1004 * @ap: ATA port to schedule EH for
1006 * Schedule error handling for @ap. EH will kick in as soon as
1007 * all commands are drained.
1010 * spin_lock_irqsave(host lock)
1012 void ata_port_schedule_eh(struct ata_port
*ap
)
1014 /* see: ata_std_sched_eh, unless you know better */
1015 ap
->ops
->sched_eh(ap
);
1017 EXPORT_SYMBOL_GPL(ata_port_schedule_eh
);
1019 static int ata_do_link_abort(struct ata_port
*ap
, struct ata_link
*link
)
1021 struct ata_queued_cmd
*qc
;
1022 int tag
, nr_aborted
= 0;
1024 /* we're gonna abort all commands, no need for fast drain */
1025 ata_eh_set_pending(ap
, 0);
1027 /* include internal tag in iteration */
1028 ata_qc_for_each_with_internal(ap
, qc
, tag
) {
1029 if (qc
&& (!link
|| qc
->dev
->link
== link
)) {
1030 qc
->flags
|= ATA_QCFLAG_EH
;
1031 ata_qc_complete(qc
);
1037 ata_port_schedule_eh(ap
);
1043 * ata_link_abort - abort all qc's on the link
1044 * @link: ATA link to abort qc's for
1046 * Abort all active qc's active on @link and schedule EH.
1049 * spin_lock_irqsave(host lock)
1052 * Number of aborted qc's.
1054 int ata_link_abort(struct ata_link
*link
)
1056 return ata_do_link_abort(link
->ap
, link
);
1058 EXPORT_SYMBOL_GPL(ata_link_abort
);
1061 * ata_port_abort - abort all qc's on the port
1062 * @ap: ATA port to abort qc's for
1064 * Abort all active qc's of @ap and schedule EH.
1067 * spin_lock_irqsave(host_set lock)
1070 * Number of aborted qc's.
1072 int ata_port_abort(struct ata_port
*ap
)
1074 return ata_do_link_abort(ap
, NULL
);
1076 EXPORT_SYMBOL_GPL(ata_port_abort
);
1079 * __ata_port_freeze - freeze port
1080 * @ap: ATA port to freeze
1082 * This function is called when HSM violation or some other
1083 * condition disrupts normal operation of the port. Frozen port
1084 * is not allowed to perform any operation until the port is
1085 * thawed, which usually follows a successful reset.
1087 * ap->ops->freeze() callback can be used for freezing the port
1088 * hardware-wise (e.g. mask interrupt and stop DMA engine). If a
1089 * port cannot be frozen hardware-wise, the interrupt handler
1090 * must ack and clear interrupts unconditionally while the port
1094 * spin_lock_irqsave(host lock)
1096 static void __ata_port_freeze(struct ata_port
*ap
)
1098 if (ap
->ops
->freeze
)
1099 ap
->ops
->freeze(ap
);
1101 ap
->pflags
|= ATA_PFLAG_FROZEN
;
1103 trace_ata_port_freeze(ap
);
1107 * ata_port_freeze - abort & freeze port
1108 * @ap: ATA port to freeze
1110 * Abort and freeze @ap. The freeze operation must be called
1111 * first, because some hardware requires special operations
1112 * before the taskfile registers are accessible.
1115 * spin_lock_irqsave(host lock)
1118 * Number of aborted commands.
1120 int ata_port_freeze(struct ata_port
*ap
)
1122 __ata_port_freeze(ap
);
1124 return ata_port_abort(ap
);
1126 EXPORT_SYMBOL_GPL(ata_port_freeze
);
1129 * ata_eh_freeze_port - EH helper to freeze port
1130 * @ap: ATA port to freeze
1137 void ata_eh_freeze_port(struct ata_port
*ap
)
1139 unsigned long flags
;
1141 spin_lock_irqsave(ap
->lock
, flags
);
1142 __ata_port_freeze(ap
);
1143 spin_unlock_irqrestore(ap
->lock
, flags
);
1145 EXPORT_SYMBOL_GPL(ata_eh_freeze_port
);
1148 * ata_eh_thaw_port - EH helper to thaw port
1149 * @ap: ATA port to thaw
1151 * Thaw frozen port @ap.
1156 void ata_eh_thaw_port(struct ata_port
*ap
)
1158 unsigned long flags
;
1160 spin_lock_irqsave(ap
->lock
, flags
);
1162 ap
->pflags
&= ~ATA_PFLAG_FROZEN
;
1167 spin_unlock_irqrestore(ap
->lock
, flags
);
1169 trace_ata_port_thaw(ap
);
1172 static void ata_eh_scsidone(struct scsi_cmnd
*scmd
)
1177 static void __ata_eh_qc_complete(struct ata_queued_cmd
*qc
)
1179 struct ata_port
*ap
= qc
->ap
;
1180 struct scsi_cmnd
*scmd
= qc
->scsicmd
;
1181 unsigned long flags
;
1183 spin_lock_irqsave(ap
->lock
, flags
);
1184 qc
->scsidone
= ata_eh_scsidone
;
1185 __ata_qc_complete(qc
);
1186 WARN_ON(ata_tag_valid(qc
->tag
));
1187 spin_unlock_irqrestore(ap
->lock
, flags
);
1189 scsi_eh_finish_cmd(scmd
, &ap
->eh_done_q
);
1193 * ata_eh_qc_complete - Complete an active ATA command from EH
1194 * @qc: Command to complete
1196 * Indicate to the mid and upper layers that an ATA command has
1197 * completed. To be used from EH.
1199 void ata_eh_qc_complete(struct ata_queued_cmd
*qc
)
1201 struct scsi_cmnd
*scmd
= qc
->scsicmd
;
1202 scmd
->retries
= scmd
->allowed
;
1203 __ata_eh_qc_complete(qc
);
1207 * ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH
1208 * @qc: Command to retry
1210 * Indicate to the mid and upper layers that an ATA command
1211 * should be retried. To be used from EH.
1213 * SCSI midlayer limits the number of retries to scmd->allowed.
1214 * scmd->allowed is incremented for commands which get retried
1215 * due to unrelated failures (qc->err_mask is zero).
1217 void ata_eh_qc_retry(struct ata_queued_cmd
*qc
)
1219 struct scsi_cmnd
*scmd
= qc
->scsicmd
;
1222 __ata_eh_qc_complete(qc
);
1226 * ata_dev_disable - disable ATA device
1227 * @dev: ATA device to disable
1234 void ata_dev_disable(struct ata_device
*dev
)
1236 if (!ata_dev_enabled(dev
))
1239 ata_dev_warn(dev
, "disable device\n");
1241 ata_eh_dev_disable(dev
);
1243 EXPORT_SYMBOL_GPL(ata_dev_disable
);
1246 * ata_eh_detach_dev - detach ATA device
1247 * @dev: ATA device to detach
1254 void ata_eh_detach_dev(struct ata_device
*dev
)
1256 struct ata_link
*link
= dev
->link
;
1257 struct ata_port
*ap
= link
->ap
;
1258 struct ata_eh_context
*ehc
= &link
->eh_context
;
1259 unsigned long flags
;
1262 * If the device is still enabled, transition it to standby power mode
1263 * (i.e. spin down HDDs) and disable it.
1265 if (ata_dev_enabled(dev
)) {
1266 ata_dev_power_set_standby(dev
);
1267 ata_eh_dev_disable(dev
);
1270 spin_lock_irqsave(ap
->lock
, flags
);
1272 dev
->flags
&= ~ATA_DFLAG_DETACH
;
1274 if (ata_scsi_offline_dev(dev
)) {
1275 dev
->flags
|= ATA_DFLAG_DETACHED
;
1276 ap
->pflags
|= ATA_PFLAG_SCSI_HOTPLUG
;
1279 /* clear per-dev EH info */
1280 ata_eh_clear_action(link
, dev
, &link
->eh_info
, ATA_EH_PERDEV_MASK
);
1281 ata_eh_clear_action(link
, dev
, &link
->eh_context
.i
, ATA_EH_PERDEV_MASK
);
1282 ehc
->saved_xfer_mode
[dev
->devno
] = 0;
1283 ehc
->saved_ncq_enabled
&= ~(1 << dev
->devno
);
1285 spin_unlock_irqrestore(ap
->lock
, flags
);
1289 * ata_eh_about_to_do - about to perform eh_action
1290 * @link: target ATA link
1291 * @dev: target ATA dev for per-dev action (can be NULL)
1292 * @action: action about to be performed
1294 * Called just before performing EH actions to clear related bits
1295 * in @link->eh_info such that eh actions are not unnecessarily
1301 void ata_eh_about_to_do(struct ata_link
*link
, struct ata_device
*dev
,
1302 unsigned int action
)
1304 struct ata_port
*ap
= link
->ap
;
1305 struct ata_eh_info
*ehi
= &link
->eh_info
;
1306 struct ata_eh_context
*ehc
= &link
->eh_context
;
1307 unsigned long flags
;
1309 trace_ata_eh_about_to_do(link
, dev
? dev
->devno
: 0, action
);
1311 spin_lock_irqsave(ap
->lock
, flags
);
1313 ata_eh_clear_action(link
, dev
, ehi
, action
);
1315 /* About to take EH action, set RECOVERED. Ignore actions on
1316 * slave links as master will do them again.
1318 if (!(ehc
->i
.flags
& ATA_EHI_QUIET
) && link
!= ap
->slave_link
)
1319 ap
->pflags
|= ATA_PFLAG_RECOVERED
;
1321 spin_unlock_irqrestore(ap
->lock
, flags
);
1325 * ata_eh_done - EH action complete
1326 * @link: ATA link for which EH actions are complete
1327 * @dev: target ATA dev for per-dev action (can be NULL)
1328 * @action: action just completed
1330 * Called right after performing EH actions to clear related bits
1331 * in @link->eh_context.
1336 void ata_eh_done(struct ata_link
*link
, struct ata_device
*dev
,
1337 unsigned int action
)
1339 struct ata_eh_context
*ehc
= &link
->eh_context
;
1341 trace_ata_eh_done(link
, dev
? dev
->devno
: 0, action
);
1343 ata_eh_clear_action(link
, dev
, &ehc
->i
, action
);
1347 * ata_err_string - convert err_mask to descriptive string
1348 * @err_mask: error mask to convert to string
1350 * Convert @err_mask to descriptive string. Errors are
1351 * prioritized according to severity and only the most severe
1352 * error is reported.
1358 * Descriptive string for @err_mask
1360 static const char *ata_err_string(unsigned int err_mask
)
1362 if (err_mask
& AC_ERR_HOST_BUS
)
1363 return "host bus error";
1364 if (err_mask
& AC_ERR_ATA_BUS
)
1365 return "ATA bus error";
1366 if (err_mask
& AC_ERR_TIMEOUT
)
1368 if (err_mask
& AC_ERR_HSM
)
1369 return "HSM violation";
1370 if (err_mask
& AC_ERR_SYSTEM
)
1371 return "internal error";
1372 if (err_mask
& AC_ERR_MEDIA
)
1373 return "media error";
1374 if (err_mask
& AC_ERR_INVALID
)
1375 return "invalid argument";
1376 if (err_mask
& AC_ERR_DEV
)
1377 return "device error";
1378 if (err_mask
& AC_ERR_NCQ
)
1380 if (err_mask
& AC_ERR_NODEV_HINT
)
1381 return "Polling detection error";
1382 return "unknown error";
1386 * atapi_eh_tur - perform ATAPI TEST_UNIT_READY
1387 * @dev: target ATAPI device
1388 * @r_sense_key: out parameter for sense_key
1390 * Perform ATAPI TEST_UNIT_READY.
1393 * EH context (may sleep).
1396 * 0 on success, AC_ERR_* mask on failure.
1398 unsigned int atapi_eh_tur(struct ata_device
*dev
, u8
*r_sense_key
)
1400 u8 cdb
[ATAPI_CDB_LEN
] = { TEST_UNIT_READY
, 0, 0, 0, 0, 0 };
1401 struct ata_taskfile tf
;
1402 unsigned int err_mask
;
1404 ata_tf_init(dev
, &tf
);
1406 tf
.flags
|= ATA_TFLAG_ISADDR
| ATA_TFLAG_DEVICE
;
1407 tf
.command
= ATA_CMD_PACKET
;
1408 tf
.protocol
= ATAPI_PROT_NODATA
;
1410 err_mask
= ata_exec_internal(dev
, &tf
, cdb
, DMA_NONE
, NULL
, 0, 0);
1411 if (err_mask
== AC_ERR_DEV
)
1412 *r_sense_key
= tf
.error
>> 4;
1417 * ata_eh_decide_disposition - Disposition a qc based on sense data
1418 * @qc: qc to examine
1420 * For a regular SCSI command, the SCSI completion callback (scsi_done())
1421 * will call scsi_complete(), which will call scsi_decide_disposition(),
1422 * which will call scsi_check_sense(). scsi_complete() finally calls
1423 * scsi_finish_command(). This is fine for SCSI, since any eventual sense
1424 * data is usually returned in the completion itself (without invoking SCSI
1425 * EH). However, for a QC, we always need to fetch the sense data
1426 * explicitly using SCSI EH.
1428 * A command that is completed via SCSI EH will instead be completed using
1429 * scsi_eh_flush_done_q(), which will call scsi_finish_command() directly
1430 * (without ever calling scsi_check_sense()).
1432 * For a command that went through SCSI EH, it is the responsibility of the
1433 * SCSI EH strategy handler to call scsi_decide_disposition(), see e.g. how
1434 * scsi_eh_get_sense() calls scsi_decide_disposition() for SCSI LLDDs that
1435 * do not get the sense data as part of the completion.
1437 * Thus, for QC commands that went via SCSI EH, we need to call
1438 * scsi_check_sense() ourselves, similar to how scsi_eh_get_sense() calls
1439 * scsi_decide_disposition(), which calls scsi_check_sense(), in order to
1440 * set the correct SCSI ML byte (if any).
1446 * SUCCESS or FAILED or NEEDS_RETRY or ADD_TO_MLQUEUE
1448 enum scsi_disposition
ata_eh_decide_disposition(struct ata_queued_cmd
*qc
)
1450 return scsi_check_sense(qc
->scsicmd
);
1454 * ata_eh_request_sense - perform REQUEST_SENSE_DATA_EXT
1455 * @qc: qc to perform REQUEST_SENSE_SENSE_DATA_EXT to
1457 * Perform REQUEST_SENSE_DATA_EXT after the device reported CHECK
1458 * SENSE. This function is an EH helper.
1461 * Kernel thread context (may sleep).
1464 * true if sense data could be fetched, false otherwise.
1466 static bool ata_eh_request_sense(struct ata_queued_cmd
*qc
)
1468 struct scsi_cmnd
*cmd
= qc
->scsicmd
;
1469 struct ata_device
*dev
= qc
->dev
;
1470 struct ata_taskfile tf
;
1471 unsigned int err_mask
;
1473 if (ata_port_is_frozen(qc
->ap
)) {
1474 ata_dev_warn(dev
, "sense data available but port frozen\n");
1478 if (!ata_id_sense_reporting_enabled(dev
->id
)) {
1479 ata_dev_warn(qc
->dev
, "sense data reporting disabled\n");
1483 ata_tf_init(dev
, &tf
);
1484 tf
.flags
|= ATA_TFLAG_ISADDR
| ATA_TFLAG_DEVICE
;
1485 tf
.flags
|= ATA_TFLAG_LBA
| ATA_TFLAG_LBA48
;
1486 tf
.command
= ATA_CMD_REQ_SENSE_DATA
;
1487 tf
.protocol
= ATA_PROT_NODATA
;
1489 err_mask
= ata_exec_internal(dev
, &tf
, NULL
, DMA_NONE
, NULL
, 0, 0);
1490 /* Ignore err_mask; ATA_ERR might be set */
1491 if (tf
.status
& ATA_SENSE
) {
1492 if (ata_scsi_sense_is_valid(tf
.lbah
, tf
.lbam
, tf
.lbal
)) {
1493 /* Set sense without also setting scsicmd->result */
1494 scsi_build_sense_buffer(dev
->flags
& ATA_DFLAG_D_SENSE
,
1495 cmd
->sense_buffer
, tf
.lbah
,
1497 qc
->flags
|= ATA_QCFLAG_SENSE_VALID
;
1501 ata_dev_warn(dev
, "request sense failed stat %02x emask %x\n",
1502 tf
.status
, err_mask
);
1509 * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
1510 * @dev: device to perform REQUEST_SENSE to
1511 * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
1512 * @dfl_sense_key: default sense key to use
1514 * Perform ATAPI REQUEST_SENSE after the device reported CHECK
1515 * SENSE. This function is EH helper.
1518 * Kernel thread context (may sleep).
1521 * 0 on success, AC_ERR_* mask on failure
1523 unsigned int atapi_eh_request_sense(struct ata_device
*dev
,
1524 u8
*sense_buf
, u8 dfl_sense_key
)
1526 u8 cdb
[ATAPI_CDB_LEN
] =
1527 { REQUEST_SENSE
, 0, 0, 0, SCSI_SENSE_BUFFERSIZE
, 0 };
1528 struct ata_port
*ap
= dev
->link
->ap
;
1529 struct ata_taskfile tf
;
1531 memset(sense_buf
, 0, SCSI_SENSE_BUFFERSIZE
);
1533 /* initialize sense_buf with the error register,
1534 * for the case where they are -not- overwritten
1536 sense_buf
[0] = 0x70;
1537 sense_buf
[2] = dfl_sense_key
;
1539 /* some devices time out if garbage left in tf */
1540 ata_tf_init(dev
, &tf
);
1542 tf
.flags
|= ATA_TFLAG_ISADDR
| ATA_TFLAG_DEVICE
;
1543 tf
.command
= ATA_CMD_PACKET
;
1545 /* is it pointless to prefer PIO for "safety reasons"? */
1546 if (ap
->flags
& ATA_FLAG_PIO_DMA
) {
1547 tf
.protocol
= ATAPI_PROT_DMA
;
1548 tf
.feature
|= ATAPI_PKT_DMA
;
1550 tf
.protocol
= ATAPI_PROT_PIO
;
1551 tf
.lbam
= SCSI_SENSE_BUFFERSIZE
;
1555 return ata_exec_internal(dev
, &tf
, cdb
, DMA_FROM_DEVICE
,
1556 sense_buf
, SCSI_SENSE_BUFFERSIZE
, 0);
1560 * ata_eh_analyze_serror - analyze SError for a failed port
1561 * @link: ATA link to analyze SError for
1563 * Analyze SError if available and further determine cause of
1569 static void ata_eh_analyze_serror(struct ata_link
*link
)
1571 struct ata_eh_context
*ehc
= &link
->eh_context
;
1572 u32 serror
= ehc
->i
.serror
;
1573 unsigned int err_mask
= 0, action
= 0;
1576 if (serror
& (SERR_PERSISTENT
| SERR_DATA
)) {
1577 err_mask
|= AC_ERR_ATA_BUS
;
1578 action
|= ATA_EH_RESET
;
1580 if (serror
& SERR_PROTOCOL
) {
1581 err_mask
|= AC_ERR_HSM
;
1582 action
|= ATA_EH_RESET
;
1584 if (serror
& SERR_INTERNAL
) {
1585 err_mask
|= AC_ERR_SYSTEM
;
1586 action
|= ATA_EH_RESET
;
1589 /* Determine whether a hotplug event has occurred. Both
1590 * SError.N/X are considered hotplug events for enabled or
1591 * host links. For disabled PMP links, only N bit is
1592 * considered as X bit is left at 1 for link plugging.
1594 if (link
->lpm_policy
> ATA_LPM_MAX_POWER
)
1595 hotplug_mask
= 0; /* hotplug doesn't work w/ LPM */
1596 else if (!(link
->flags
& ATA_LFLAG_DISABLED
) || ata_is_host_link(link
))
1597 hotplug_mask
= SERR_PHYRDY_CHG
| SERR_DEV_XCHG
;
1599 hotplug_mask
= SERR_PHYRDY_CHG
;
1601 if (serror
& hotplug_mask
)
1602 ata_ehi_hotplugged(&ehc
->i
);
1604 ehc
->i
.err_mask
|= err_mask
;
1605 ehc
->i
.action
|= action
;
1609 * ata_eh_analyze_tf - analyze taskfile of a failed qc
1610 * @qc: qc to analyze
1612 * Analyze taskfile of @qc and further determine cause of
1613 * failure. This function also requests ATAPI sense data if
1617 * Kernel thread context (may sleep).
1620 * Determined recovery action
1622 static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd
*qc
)
1624 const struct ata_taskfile
*tf
= &qc
->result_tf
;
1625 unsigned int tmp
, action
= 0;
1626 u8 stat
= tf
->status
, err
= tf
->error
;
1628 if ((stat
& (ATA_BUSY
| ATA_DRQ
| ATA_DRDY
)) != ATA_DRDY
) {
1629 qc
->err_mask
|= AC_ERR_HSM
;
1630 return ATA_EH_RESET
;
1633 if (stat
& (ATA_ERR
| ATA_DF
)) {
1634 qc
->err_mask
|= AC_ERR_DEV
;
1636 * Sense data reporting does not work if the
1637 * device fault bit is set.
1645 switch (qc
->dev
->class) {
1649 * Fetch the sense data explicitly if:
1650 * -It was a non-NCQ command that failed, or
1651 * -It was a NCQ command that failed, but the sense data
1652 * was not included in the NCQ command error log
1653 * (i.e. NCQ autosense is not supported by the device).
1655 if (!(qc
->flags
& ATA_QCFLAG_SENSE_VALID
) &&
1656 (stat
& ATA_SENSE
) && ata_eh_request_sense(qc
))
1657 set_status_byte(qc
->scsicmd
, SAM_STAT_CHECK_CONDITION
);
1659 qc
->err_mask
|= AC_ERR_ATA_BUS
;
1660 if (err
& (ATA_UNC
| ATA_AMNF
))
1661 qc
->err_mask
|= AC_ERR_MEDIA
;
1663 qc
->err_mask
|= AC_ERR_INVALID
;
1667 if (!ata_port_is_frozen(qc
->ap
)) {
1668 tmp
= atapi_eh_request_sense(qc
->dev
,
1669 qc
->scsicmd
->sense_buffer
,
1670 qc
->result_tf
.error
>> 4);
1672 qc
->flags
|= ATA_QCFLAG_SENSE_VALID
;
1674 qc
->err_mask
|= tmp
;
1678 if (qc
->flags
& ATA_QCFLAG_SENSE_VALID
) {
1679 enum scsi_disposition ret
= ata_eh_decide_disposition(qc
);
1682 * SUCCESS here means that the sense code could be
1683 * evaluated and should be passed to the upper layers
1684 * for correct evaluation.
1685 * FAILED means the sense code could not be interpreted
1686 * and the device would need to be reset.
1687 * NEEDS_RETRY and ADD_TO_MLQUEUE means that the
1688 * command would need to be retried.
1690 if (ret
== NEEDS_RETRY
|| ret
== ADD_TO_MLQUEUE
) {
1691 qc
->flags
|= ATA_QCFLAG_RETRY
;
1692 qc
->err_mask
|= AC_ERR_OTHER
;
1693 } else if (ret
!= SUCCESS
) {
1694 qc
->err_mask
|= AC_ERR_HSM
;
1697 if (qc
->err_mask
& (AC_ERR_HSM
| AC_ERR_TIMEOUT
| AC_ERR_ATA_BUS
))
1698 action
|= ATA_EH_RESET
;
1703 static int ata_eh_categorize_error(unsigned int eflags
, unsigned int err_mask
,
1708 if (!(eflags
& ATA_EFLAG_DUBIOUS_XFER
))
1712 base
= ATA_ECAT_DUBIOUS_NONE
;
1714 if (err_mask
& AC_ERR_ATA_BUS
)
1715 return base
+ ATA_ECAT_ATA_BUS
;
1717 if (err_mask
& AC_ERR_TIMEOUT
)
1718 return base
+ ATA_ECAT_TOUT_HSM
;
1720 if (eflags
& ATA_EFLAG_IS_IO
) {
1721 if (err_mask
& AC_ERR_HSM
)
1722 return base
+ ATA_ECAT_TOUT_HSM
;
1724 (AC_ERR_DEV
|AC_ERR_MEDIA
|AC_ERR_INVALID
)) == AC_ERR_DEV
)
1725 return base
+ ATA_ECAT_UNK_DEV
;
1731 struct speed_down_verdict_arg
{
1734 int nr_errors
[ATA_ECAT_NR
];
1737 static int speed_down_verdict_cb(struct ata_ering_entry
*ent
, void *void_arg
)
1739 struct speed_down_verdict_arg
*arg
= void_arg
;
1742 if ((ent
->eflags
& ATA_EFLAG_OLD_ER
) || (ent
->timestamp
< arg
->since
))
1745 cat
= ata_eh_categorize_error(ent
->eflags
, ent
->err_mask
,
1747 arg
->nr_errors
[cat
]++;
1753 * ata_eh_speed_down_verdict - Determine speed down verdict
1754 * @dev: Device of interest
1756 * This function examines error ring of @dev and determines
1757 * whether NCQ needs to be turned off, transfer speed should be
1758 * stepped down, or falling back to PIO is necessary.
1760 * ECAT_ATA_BUS : ATA_BUS error for any command
1762 * ECAT_TOUT_HSM : TIMEOUT for any command or HSM violation for
1765 * ECAT_UNK_DEV : Unknown DEV error for IO commands
1767 * ECAT_DUBIOUS_* : Identical to above three but occurred while
1768 * data transfer hasn't been verified.
1772 * NCQ_OFF : Turn off NCQ.
1774 * SPEED_DOWN : Speed down transfer speed but don't fall back
1777 * FALLBACK_TO_PIO : Fall back to PIO.
1779 * Even if multiple verdicts are returned, only one action is
1780 * taken per error. An action triggered by non-DUBIOUS errors
1781 * clears ering, while one triggered by DUBIOUS_* errors doesn't.
1782 * This is to expedite speed down decisions right after device is
1783 * initially configured.
1785 * The following are speed down rules. #1 and #2 deal with
1788 * 1. If more than one DUBIOUS_ATA_BUS or DUBIOUS_TOUT_HSM errors
1789 * occurred during last 5 mins, SPEED_DOWN and FALLBACK_TO_PIO.
1791 * 2. If more than one DUBIOUS_TOUT_HSM or DUBIOUS_UNK_DEV errors
1792 * occurred during last 5 mins, NCQ_OFF.
1794 * 3. If more than 8 ATA_BUS, TOUT_HSM or UNK_DEV errors
1795 * occurred during last 5 mins, FALLBACK_TO_PIO
1797 * 4. If more than 3 TOUT_HSM or UNK_DEV errors occurred
1798 * during last 10 mins, NCQ_OFF.
1800 * 5. If more than 3 ATA_BUS or TOUT_HSM errors, or more than 6
1801 * UNK_DEV errors occurred during last 10 mins, SPEED_DOWN.
1804 * Inherited from caller.
1807 * OR of ATA_EH_SPDN_* flags.
1809 static unsigned int ata_eh_speed_down_verdict(struct ata_device
*dev
)
1811 const u64 j5mins
= 5LLU * 60 * HZ
, j10mins
= 10LLU * 60 * HZ
;
1812 u64 j64
= get_jiffies_64();
1813 struct speed_down_verdict_arg arg
;
1814 unsigned int verdict
= 0;
1816 /* scan past 5 mins of error history */
1817 memset(&arg
, 0, sizeof(arg
));
1818 arg
.since
= j64
- min(j64
, j5mins
);
1819 ata_ering_map(&dev
->ering
, speed_down_verdict_cb
, &arg
);
1821 if (arg
.nr_errors
[ATA_ECAT_DUBIOUS_ATA_BUS
] +
1822 arg
.nr_errors
[ATA_ECAT_DUBIOUS_TOUT_HSM
] > 1)
1823 verdict
|= ATA_EH_SPDN_SPEED_DOWN
|
1824 ATA_EH_SPDN_FALLBACK_TO_PIO
| ATA_EH_SPDN_KEEP_ERRORS
;
1826 if (arg
.nr_errors
[ATA_ECAT_DUBIOUS_TOUT_HSM
] +
1827 arg
.nr_errors
[ATA_ECAT_DUBIOUS_UNK_DEV
] > 1)
1828 verdict
|= ATA_EH_SPDN_NCQ_OFF
| ATA_EH_SPDN_KEEP_ERRORS
;
1830 if (arg
.nr_errors
[ATA_ECAT_ATA_BUS
] +
1831 arg
.nr_errors
[ATA_ECAT_TOUT_HSM
] +
1832 arg
.nr_errors
[ATA_ECAT_UNK_DEV
] > 6)
1833 verdict
|= ATA_EH_SPDN_FALLBACK_TO_PIO
;
1835 /* scan past 10 mins of error history */
1836 memset(&arg
, 0, sizeof(arg
));
1837 arg
.since
= j64
- min(j64
, j10mins
);
1838 ata_ering_map(&dev
->ering
, speed_down_verdict_cb
, &arg
);
1840 if (arg
.nr_errors
[ATA_ECAT_TOUT_HSM
] +
1841 arg
.nr_errors
[ATA_ECAT_UNK_DEV
] > 3)
1842 verdict
|= ATA_EH_SPDN_NCQ_OFF
;
1844 if (arg
.nr_errors
[ATA_ECAT_ATA_BUS
] +
1845 arg
.nr_errors
[ATA_ECAT_TOUT_HSM
] > 3 ||
1846 arg
.nr_errors
[ATA_ECAT_UNK_DEV
] > 6)
1847 verdict
|= ATA_EH_SPDN_SPEED_DOWN
;
1853 * ata_eh_speed_down - record error and speed down if necessary
1854 * @dev: Failed device
1855 * @eflags: mask of ATA_EFLAG_* flags
1856 * @err_mask: err_mask of the error
1858 * Record error and examine error history to determine whether
1859 * adjusting transmission speed is necessary. It also sets
1860 * transmission limits appropriately if such adjustment is
1864 * Kernel thread context (may sleep).
1867 * Determined recovery action.
1869 static unsigned int ata_eh_speed_down(struct ata_device
*dev
,
1870 unsigned int eflags
, unsigned int err_mask
)
1872 struct ata_link
*link
= ata_dev_phys_link(dev
);
1874 unsigned int verdict
;
1875 unsigned int action
= 0;
1877 /* don't bother if Cat-0 error */
1878 if (ata_eh_categorize_error(eflags
, err_mask
, &xfer_ok
) == 0)
1881 /* record error and determine whether speed down is necessary */
1882 ata_ering_record(&dev
->ering
, eflags
, err_mask
);
1883 verdict
= ata_eh_speed_down_verdict(dev
);
1886 if ((verdict
& ATA_EH_SPDN_NCQ_OFF
) && ata_ncq_enabled(dev
)) {
1887 dev
->flags
|= ATA_DFLAG_NCQ_OFF
;
1888 ata_dev_warn(dev
, "NCQ disabled due to excessive errors\n");
1893 if (verdict
& ATA_EH_SPDN_SPEED_DOWN
) {
1894 /* speed down SATA link speed if possible */
1895 if (sata_down_spd_limit(link
, 0) == 0) {
1896 action
|= ATA_EH_RESET
;
1900 /* lower transfer mode */
1901 if (dev
->spdn_cnt
< 2) {
1902 static const int dma_dnxfer_sel
[] =
1903 { ATA_DNXFER_DMA
, ATA_DNXFER_40C
};
1904 static const int pio_dnxfer_sel
[] =
1905 { ATA_DNXFER_PIO
, ATA_DNXFER_FORCE_PIO0
};
1908 if (dev
->xfer_shift
!= ATA_SHIFT_PIO
)
1909 sel
= dma_dnxfer_sel
[dev
->spdn_cnt
];
1911 sel
= pio_dnxfer_sel
[dev
->spdn_cnt
];
1915 if (ata_down_xfermask_limit(dev
, sel
) == 0) {
1916 action
|= ATA_EH_RESET
;
1922 /* Fall back to PIO? Slowing down to PIO is meaningless for
1923 * SATA ATA devices. Consider it only for PATA and SATAPI.
1925 if ((verdict
& ATA_EH_SPDN_FALLBACK_TO_PIO
) && (dev
->spdn_cnt
>= 2) &&
1926 (link
->ap
->cbl
!= ATA_CBL_SATA
|| dev
->class == ATA_DEV_ATAPI
) &&
1927 (dev
->xfer_shift
!= ATA_SHIFT_PIO
)) {
1928 if (ata_down_xfermask_limit(dev
, ATA_DNXFER_FORCE_PIO
) == 0) {
1930 action
|= ATA_EH_RESET
;
1937 /* device has been slowed down, blow error history */
1938 if (!(verdict
& ATA_EH_SPDN_KEEP_ERRORS
))
1939 ata_ering_clear(&dev
->ering
);
1944 * ata_eh_worth_retry - analyze error and decide whether to retry
1945 * @qc: qc to possibly retry
1947 * Look at the cause of the error and decide if a retry
1948 * might be useful or not. We don't want to retry media errors
1949 * because the drive itself has probably already taken 10-30 seconds
1950 * doing its own internal retries before reporting the failure.
1952 static inline int ata_eh_worth_retry(struct ata_queued_cmd
*qc
)
1954 if (qc
->err_mask
& AC_ERR_MEDIA
)
1955 return 0; /* don't retry media errors */
1956 if (qc
->flags
& ATA_QCFLAG_IO
)
1957 return 1; /* otherwise retry anything from fs stack */
1958 if (qc
->err_mask
& AC_ERR_INVALID
)
1959 return 0; /* don't retry these */
1960 return qc
->err_mask
!= AC_ERR_DEV
; /* retry if not dev error */
1964 * ata_eh_quiet - check if we need to be quiet about a command error
1967 * Look at the qc flags anbd its scsi command request flags to determine
1968 * if we need to be quiet about the command failure.
1970 static inline bool ata_eh_quiet(struct ata_queued_cmd
*qc
)
1972 if (qc
->scsicmd
&& scsi_cmd_to_rq(qc
->scsicmd
)->rq_flags
& RQF_QUIET
)
1973 qc
->flags
|= ATA_QCFLAG_QUIET
;
1974 return qc
->flags
& ATA_QCFLAG_QUIET
;
1977 static int ata_eh_get_non_ncq_success_sense(struct ata_link
*link
)
1979 struct ata_port
*ap
= link
->ap
;
1980 struct ata_queued_cmd
*qc
;
1982 qc
= __ata_qc_from_tag(ap
, link
->active_tag
);
1986 if (!(qc
->flags
& ATA_QCFLAG_EH
) ||
1987 !(qc
->flags
& ATA_QCFLAG_EH_SUCCESS_CMD
) ||
1991 if (!ata_eh_request_sense(qc
))
1995 * No point in checking the return value, since the command has already
1996 * completed successfully.
1998 ata_eh_decide_disposition(qc
);
2003 static void ata_eh_get_success_sense(struct ata_link
*link
)
2005 struct ata_eh_context
*ehc
= &link
->eh_context
;
2006 struct ata_device
*dev
= link
->device
;
2007 struct ata_port
*ap
= link
->ap
;
2008 struct ata_queued_cmd
*qc
;
2011 if (!(ehc
->i
.dev_action
[dev
->devno
] & ATA_EH_GET_SUCCESS_SENSE
))
2014 /* if frozen, we can't do much */
2015 if (ata_port_is_frozen(ap
)) {
2017 "successful sense data available but port frozen\n");
2022 * If the link has sactive set, then we have outstanding NCQ commands
2023 * and have to read the Successful NCQ Commands log to get the sense
2024 * data. Otherwise, we are dealing with a non-NCQ command and use
2025 * request sense ext command to retrieve the sense data.
2028 ret
= ata_eh_get_ncq_success_sense(link
);
2030 ret
= ata_eh_get_non_ncq_success_sense(link
);
2034 ata_eh_done(link
, dev
, ATA_EH_GET_SUCCESS_SENSE
);
2039 * If we failed to get sense data for a successful command that ought to
2040 * have sense data, we cannot simply return BLK_STS_OK to user space.
2041 * This is because we can't know if the sense data that we couldn't get
2042 * was actually "DATA CURRENTLY UNAVAILABLE". Reporting such a command
2043 * as success to user space would result in a silent data corruption.
2044 * Thus, add a bogus ABORTED_COMMAND sense data to such commands, such
2045 * that SCSI will report these commands as BLK_STS_IOERR to user space.
2047 ata_qc_for_each_raw(ap
, qc
, tag
) {
2048 if (!(qc
->flags
& ATA_QCFLAG_EH
) ||
2049 !(qc
->flags
& ATA_QCFLAG_EH_SUCCESS_CMD
) ||
2051 ata_dev_phys_link(qc
->dev
) != link
)
2054 /* We managed to get sense for this success command, skip. */
2055 if (qc
->flags
& ATA_QCFLAG_SENSE_VALID
)
2058 /* This success command did not have any sense data, skip. */
2059 if (!(qc
->result_tf
.status
& ATA_SENSE
))
2062 /* This success command had sense data, but we failed to get. */
2063 ata_scsi_set_sense(dev
, qc
->scsicmd
, ABORTED_COMMAND
, 0, 0);
2064 qc
->flags
|= ATA_QCFLAG_SENSE_VALID
;
2066 ata_eh_done(link
, dev
, ATA_EH_GET_SUCCESS_SENSE
);
2070 * ata_eh_link_autopsy - analyze error and determine recovery action
2071 * @link: host link to perform autopsy on
2073 * Analyze why @link failed and determine which recovery actions
2074 * are needed. This function also sets more detailed AC_ERR_*
2075 * values and fills sense data for ATAPI CHECK SENSE.
2078 * Kernel thread context (may sleep).
2080 static void ata_eh_link_autopsy(struct ata_link
*link
)
2082 struct ata_port
*ap
= link
->ap
;
2083 struct ata_eh_context
*ehc
= &link
->eh_context
;
2084 struct ata_queued_cmd
*qc
;
2085 struct ata_device
*dev
;
2086 unsigned int all_err_mask
= 0, eflags
= 0;
2087 int tag
, nr_failed
= 0, nr_quiet
= 0;
2091 if (ehc
->i
.flags
& ATA_EHI_NO_AUTOPSY
)
2094 /* obtain and analyze SError */
2095 rc
= sata_scr_read(link
, SCR_ERROR
, &serror
);
2097 ehc
->i
.serror
|= serror
;
2098 ata_eh_analyze_serror(link
);
2099 } else if (rc
!= -EOPNOTSUPP
) {
2100 /* SError read failed, force reset and probing */
2101 ehc
->i
.probe_mask
|= ATA_ALL_DEVICES
;
2102 ehc
->i
.action
|= ATA_EH_RESET
;
2103 ehc
->i
.err_mask
|= AC_ERR_OTHER
;
2106 /* analyze NCQ failure */
2107 ata_eh_analyze_ncq_error(link
);
2110 * Check if this was a successful command that simply needs sense data.
2111 * Since the sense data is not part of the completion, we need to fetch
2112 * it using an additional command. Since this can't be done from irq
2113 * context, the sense data for successful commands are fetched by EH.
2115 ata_eh_get_success_sense(link
);
2117 /* any real error trumps AC_ERR_OTHER */
2118 if (ehc
->i
.err_mask
& ~AC_ERR_OTHER
)
2119 ehc
->i
.err_mask
&= ~AC_ERR_OTHER
;
2121 all_err_mask
|= ehc
->i
.err_mask
;
2123 ata_qc_for_each_raw(ap
, qc
, tag
) {
2124 if (!(qc
->flags
& ATA_QCFLAG_EH
) ||
2125 qc
->flags
& ATA_QCFLAG_RETRY
||
2126 qc
->flags
& ATA_QCFLAG_EH_SUCCESS_CMD
||
2127 ata_dev_phys_link(qc
->dev
) != link
)
2130 /* inherit upper level err_mask */
2131 qc
->err_mask
|= ehc
->i
.err_mask
;
2134 ehc
->i
.action
|= ata_eh_analyze_tf(qc
);
2136 /* DEV errors are probably spurious in case of ATA_BUS error */
2137 if (qc
->err_mask
& AC_ERR_ATA_BUS
)
2138 qc
->err_mask
&= ~(AC_ERR_DEV
| AC_ERR_MEDIA
|
2141 /* any real error trumps unknown error */
2142 if (qc
->err_mask
& ~AC_ERR_OTHER
)
2143 qc
->err_mask
&= ~AC_ERR_OTHER
;
2146 * SENSE_VALID trumps dev/unknown error and revalidation. Upper
2147 * layers will determine whether the command is worth retrying
2148 * based on the sense data and device class/type. Otherwise,
2149 * determine directly if the command is worth retrying using its
2150 * error mask and flags.
2152 if (qc
->flags
& ATA_QCFLAG_SENSE_VALID
)
2153 qc
->err_mask
&= ~(AC_ERR_DEV
| AC_ERR_OTHER
);
2154 else if (ata_eh_worth_retry(qc
))
2155 qc
->flags
|= ATA_QCFLAG_RETRY
;
2157 /* accumulate error info */
2158 ehc
->i
.dev
= qc
->dev
;
2159 all_err_mask
|= qc
->err_mask
;
2160 if (qc
->flags
& ATA_QCFLAG_IO
)
2161 eflags
|= ATA_EFLAG_IS_IO
;
2162 trace_ata_eh_link_autopsy_qc(qc
);
2164 /* Count quiet errors */
2165 if (ata_eh_quiet(qc
))
2170 /* If all failed commands requested silence, then be quiet */
2171 if (nr_quiet
== nr_failed
)
2172 ehc
->i
.flags
|= ATA_EHI_QUIET
;
2174 /* enforce default EH actions */
2175 if (ata_port_is_frozen(ap
) ||
2176 all_err_mask
& (AC_ERR_HSM
| AC_ERR_TIMEOUT
))
2177 ehc
->i
.action
|= ATA_EH_RESET
;
2178 else if (((eflags
& ATA_EFLAG_IS_IO
) && all_err_mask
) ||
2179 (!(eflags
& ATA_EFLAG_IS_IO
) && (all_err_mask
& ~AC_ERR_DEV
)))
2180 ehc
->i
.action
|= ATA_EH_REVALIDATE
;
2182 /* If we have offending qcs and the associated failed device,
2183 * perform per-dev EH action only on the offending device.
2186 ehc
->i
.dev_action
[ehc
->i
.dev
->devno
] |=
2187 ehc
->i
.action
& ATA_EH_PERDEV_MASK
;
2188 ehc
->i
.action
&= ~ATA_EH_PERDEV_MASK
;
2191 /* propagate timeout to host link */
2192 if ((all_err_mask
& AC_ERR_TIMEOUT
) && !ata_is_host_link(link
))
2193 ap
->link
.eh_context
.i
.err_mask
|= AC_ERR_TIMEOUT
;
2195 /* record error and consider speeding down */
2197 if (!dev
&& ((ata_link_max_devices(link
) == 1 &&
2198 ata_dev_enabled(link
->device
))))
2202 if (dev
->flags
& ATA_DFLAG_DUBIOUS_XFER
)
2203 eflags
|= ATA_EFLAG_DUBIOUS_XFER
;
2204 ehc
->i
.action
|= ata_eh_speed_down(dev
, eflags
, all_err_mask
);
2205 trace_ata_eh_link_autopsy(dev
, ehc
->i
.action
, all_err_mask
);
2210 * ata_eh_autopsy - analyze error and determine recovery action
2211 * @ap: host port to perform autopsy on
2213 * Analyze all links of @ap and determine why they failed and
2214 * which recovery actions are needed.
2217 * Kernel thread context (may sleep).
2219 void ata_eh_autopsy(struct ata_port
*ap
)
2221 struct ata_link
*link
;
2223 ata_for_each_link(link
, ap
, EDGE
)
2224 ata_eh_link_autopsy(link
);
2226 /* Handle the frigging slave link. Autopsy is done similarly
2227 * but actions and flags are transferred over to the master
2228 * link and handled from there.
2230 if (ap
->slave_link
) {
2231 struct ata_eh_context
*mehc
= &ap
->link
.eh_context
;
2232 struct ata_eh_context
*sehc
= &ap
->slave_link
->eh_context
;
2234 /* transfer control flags from master to slave */
2235 sehc
->i
.flags
|= mehc
->i
.flags
& ATA_EHI_TO_SLAVE_MASK
;
2237 /* perform autopsy on the slave link */
2238 ata_eh_link_autopsy(ap
->slave_link
);
2240 /* transfer actions from slave to master and clear slave */
2241 ata_eh_about_to_do(ap
->slave_link
, NULL
, ATA_EH_ALL_ACTIONS
);
2242 mehc
->i
.action
|= sehc
->i
.action
;
2243 mehc
->i
.dev_action
[1] |= sehc
->i
.dev_action
[1];
2244 mehc
->i
.flags
|= sehc
->i
.flags
;
2245 ata_eh_done(ap
->slave_link
, NULL
, ATA_EH_ALL_ACTIONS
);
2248 /* Autopsy of fanout ports can affect host link autopsy.
2249 * Perform host link autopsy last.
2251 if (sata_pmp_attached(ap
))
2252 ata_eh_link_autopsy(&ap
->link
);
2256 * ata_get_cmd_name - get name for ATA command
2257 * @command: ATA command code to get name for
2259 * Return a textual name of the given command or "unknown"
2264 const char *ata_get_cmd_name(u8 command
)
2266 #ifdef CONFIG_ATA_VERBOSE_ERROR
2272 { ATA_CMD_DEV_RESET
, "DEVICE RESET" },
2273 { ATA_CMD_CHK_POWER
, "CHECK POWER MODE" },
2274 { ATA_CMD_STANDBY
, "STANDBY" },
2275 { ATA_CMD_IDLE
, "IDLE" },
2276 { ATA_CMD_EDD
, "EXECUTE DEVICE DIAGNOSTIC" },
2277 { ATA_CMD_DOWNLOAD_MICRO
, "DOWNLOAD MICROCODE" },
2278 { ATA_CMD_DOWNLOAD_MICRO_DMA
, "DOWNLOAD MICROCODE DMA" },
2279 { ATA_CMD_NOP
, "NOP" },
2280 { ATA_CMD_FLUSH
, "FLUSH CACHE" },
2281 { ATA_CMD_FLUSH_EXT
, "FLUSH CACHE EXT" },
2282 { ATA_CMD_ID_ATA
, "IDENTIFY DEVICE" },
2283 { ATA_CMD_ID_ATAPI
, "IDENTIFY PACKET DEVICE" },
2284 { ATA_CMD_SERVICE
, "SERVICE" },
2285 { ATA_CMD_READ
, "READ DMA" },
2286 { ATA_CMD_READ_EXT
, "READ DMA EXT" },
2287 { ATA_CMD_READ_QUEUED
, "READ DMA QUEUED" },
2288 { ATA_CMD_READ_STREAM_EXT
, "READ STREAM EXT" },
2289 { ATA_CMD_READ_STREAM_DMA_EXT
, "READ STREAM DMA EXT" },
2290 { ATA_CMD_WRITE
, "WRITE DMA" },
2291 { ATA_CMD_WRITE_EXT
, "WRITE DMA EXT" },
2292 { ATA_CMD_WRITE_QUEUED
, "WRITE DMA QUEUED EXT" },
2293 { ATA_CMD_WRITE_STREAM_EXT
, "WRITE STREAM EXT" },
2294 { ATA_CMD_WRITE_STREAM_DMA_EXT
, "WRITE STREAM DMA EXT" },
2295 { ATA_CMD_WRITE_FUA_EXT
, "WRITE DMA FUA EXT" },
2296 { ATA_CMD_WRITE_QUEUED_FUA_EXT
, "WRITE DMA QUEUED FUA EXT" },
2297 { ATA_CMD_FPDMA_READ
, "READ FPDMA QUEUED" },
2298 { ATA_CMD_FPDMA_WRITE
, "WRITE FPDMA QUEUED" },
2299 { ATA_CMD_NCQ_NON_DATA
, "NCQ NON-DATA" },
2300 { ATA_CMD_FPDMA_SEND
, "SEND FPDMA QUEUED" },
2301 { ATA_CMD_FPDMA_RECV
, "RECEIVE FPDMA QUEUED" },
2302 { ATA_CMD_PIO_READ
, "READ SECTOR(S)" },
2303 { ATA_CMD_PIO_READ_EXT
, "READ SECTOR(S) EXT" },
2304 { ATA_CMD_PIO_WRITE
, "WRITE SECTOR(S)" },
2305 { ATA_CMD_PIO_WRITE_EXT
, "WRITE SECTOR(S) EXT" },
2306 { ATA_CMD_READ_MULTI
, "READ MULTIPLE" },
2307 { ATA_CMD_READ_MULTI_EXT
, "READ MULTIPLE EXT" },
2308 { ATA_CMD_WRITE_MULTI
, "WRITE MULTIPLE" },
2309 { ATA_CMD_WRITE_MULTI_EXT
, "WRITE MULTIPLE EXT" },
2310 { ATA_CMD_WRITE_MULTI_FUA_EXT
, "WRITE MULTIPLE FUA EXT" },
2311 { ATA_CMD_SET_FEATURES
, "SET FEATURES" },
2312 { ATA_CMD_SET_MULTI
, "SET MULTIPLE MODE" },
2313 { ATA_CMD_VERIFY
, "READ VERIFY SECTOR(S)" },
2314 { ATA_CMD_VERIFY_EXT
, "READ VERIFY SECTOR(S) EXT" },
2315 { ATA_CMD_WRITE_UNCORR_EXT
, "WRITE UNCORRECTABLE EXT" },
2316 { ATA_CMD_STANDBYNOW1
, "STANDBY IMMEDIATE" },
2317 { ATA_CMD_IDLEIMMEDIATE
, "IDLE IMMEDIATE" },
2318 { ATA_CMD_SLEEP
, "SLEEP" },
2319 { ATA_CMD_INIT_DEV_PARAMS
, "INITIALIZE DEVICE PARAMETERS" },
2320 { ATA_CMD_READ_NATIVE_MAX
, "READ NATIVE MAX ADDRESS" },
2321 { ATA_CMD_READ_NATIVE_MAX_EXT
, "READ NATIVE MAX ADDRESS EXT" },
2322 { ATA_CMD_SET_MAX
, "SET MAX ADDRESS" },
2323 { ATA_CMD_SET_MAX_EXT
, "SET MAX ADDRESS EXT" },
2324 { ATA_CMD_READ_LOG_EXT
, "READ LOG EXT" },
2325 { ATA_CMD_WRITE_LOG_EXT
, "WRITE LOG EXT" },
2326 { ATA_CMD_READ_LOG_DMA_EXT
, "READ LOG DMA EXT" },
2327 { ATA_CMD_WRITE_LOG_DMA_EXT
, "WRITE LOG DMA EXT" },
2328 { ATA_CMD_TRUSTED_NONDATA
, "TRUSTED NON-DATA" },
2329 { ATA_CMD_TRUSTED_RCV
, "TRUSTED RECEIVE" },
2330 { ATA_CMD_TRUSTED_RCV_DMA
, "TRUSTED RECEIVE DMA" },
2331 { ATA_CMD_TRUSTED_SND
, "TRUSTED SEND" },
2332 { ATA_CMD_TRUSTED_SND_DMA
, "TRUSTED SEND DMA" },
2333 { ATA_CMD_PMP_READ
, "READ BUFFER" },
2334 { ATA_CMD_PMP_READ_DMA
, "READ BUFFER DMA" },
2335 { ATA_CMD_PMP_WRITE
, "WRITE BUFFER" },
2336 { ATA_CMD_PMP_WRITE_DMA
, "WRITE BUFFER DMA" },
2337 { ATA_CMD_CONF_OVERLAY
, "DEVICE CONFIGURATION OVERLAY" },
2338 { ATA_CMD_SEC_SET_PASS
, "SECURITY SET PASSWORD" },
2339 { ATA_CMD_SEC_UNLOCK
, "SECURITY UNLOCK" },
2340 { ATA_CMD_SEC_ERASE_PREP
, "SECURITY ERASE PREPARE" },
2341 { ATA_CMD_SEC_ERASE_UNIT
, "SECURITY ERASE UNIT" },
2342 { ATA_CMD_SEC_FREEZE_LOCK
, "SECURITY FREEZE LOCK" },
2343 { ATA_CMD_SEC_DISABLE_PASS
, "SECURITY DISABLE PASSWORD" },
2344 { ATA_CMD_CONFIG_STREAM
, "CONFIGURE STREAM" },
2345 { ATA_CMD_SMART
, "SMART" },
2346 { ATA_CMD_MEDIA_LOCK
, "DOOR LOCK" },
2347 { ATA_CMD_MEDIA_UNLOCK
, "DOOR UNLOCK" },
2348 { ATA_CMD_DSM
, "DATA SET MANAGEMENT" },
2349 { ATA_CMD_CHK_MED_CRD_TYP
, "CHECK MEDIA CARD TYPE" },
2350 { ATA_CMD_CFA_REQ_EXT_ERR
, "CFA REQUEST EXTENDED ERROR" },
2351 { ATA_CMD_CFA_WRITE_NE
, "CFA WRITE SECTORS WITHOUT ERASE" },
2352 { ATA_CMD_CFA_TRANS_SECT
, "CFA TRANSLATE SECTOR" },
2353 { ATA_CMD_CFA_ERASE
, "CFA ERASE SECTORS" },
2354 { ATA_CMD_CFA_WRITE_MULT_NE
, "CFA WRITE MULTIPLE WITHOUT ERASE" },
2355 { ATA_CMD_REQ_SENSE_DATA
, "REQUEST SENSE DATA EXT" },
2356 { ATA_CMD_SANITIZE_DEVICE
, "SANITIZE DEVICE" },
2357 { ATA_CMD_ZAC_MGMT_IN
, "ZAC MANAGEMENT IN" },
2358 { ATA_CMD_ZAC_MGMT_OUT
, "ZAC MANAGEMENT OUT" },
2359 { ATA_CMD_READ_LONG
, "READ LONG (with retries)" },
2360 { ATA_CMD_READ_LONG_ONCE
, "READ LONG (without retries)" },
2361 { ATA_CMD_WRITE_LONG
, "WRITE LONG (with retries)" },
2362 { ATA_CMD_WRITE_LONG_ONCE
, "WRITE LONG (without retries)" },
2363 { ATA_CMD_RESTORE
, "RECALIBRATE" },
2364 { 0, NULL
} /* terminate list */
2368 for (i
= 0; cmd_descr
[i
].text
; i
++)
2369 if (cmd_descr
[i
].command
== command
)
2370 return cmd_descr
[i
].text
;
2375 EXPORT_SYMBOL_GPL(ata_get_cmd_name
);
2378 * ata_eh_link_report - report error handling to user
2379 * @link: ATA link EH is going on
2381 * Report EH to user.
2386 static void ata_eh_link_report(struct ata_link
*link
)
2388 struct ata_port
*ap
= link
->ap
;
2389 struct ata_eh_context
*ehc
= &link
->eh_context
;
2390 struct ata_queued_cmd
*qc
;
2391 const char *frozen
, *desc
;
2392 char tries_buf
[16] = "";
2393 int tag
, nr_failed
= 0;
2395 if (ehc
->i
.flags
& ATA_EHI_QUIET
)
2399 if (ehc
->i
.desc
[0] != '\0')
2402 ata_qc_for_each_raw(ap
, qc
, tag
) {
2403 if (!(qc
->flags
& ATA_QCFLAG_EH
) ||
2404 ata_dev_phys_link(qc
->dev
) != link
||
2405 ((qc
->flags
& ATA_QCFLAG_QUIET
) &&
2406 qc
->err_mask
== AC_ERR_DEV
))
2408 if (qc
->flags
& ATA_QCFLAG_SENSE_VALID
&& !qc
->err_mask
)
2414 if (!nr_failed
&& !ehc
->i
.err_mask
)
2418 if (ata_port_is_frozen(ap
))
2421 if (ap
->eh_tries
< ATA_EH_MAX_TRIES
)
2422 snprintf(tries_buf
, sizeof(tries_buf
), " t%d",
2426 ata_dev_err(ehc
->i
.dev
, "exception Emask 0x%x "
2427 "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
2428 ehc
->i
.err_mask
, link
->sactive
, ehc
->i
.serror
,
2429 ehc
->i
.action
, frozen
, tries_buf
);
2431 ata_dev_err(ehc
->i
.dev
, "%s\n", desc
);
2433 ata_link_err(link
, "exception Emask 0x%x "
2434 "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
2435 ehc
->i
.err_mask
, link
->sactive
, ehc
->i
.serror
,
2436 ehc
->i
.action
, frozen
, tries_buf
);
2438 ata_link_err(link
, "%s\n", desc
);
2441 #ifdef CONFIG_ATA_VERBOSE_ERROR
2444 "SError: { %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s}\n",
2445 ehc
->i
.serror
& SERR_DATA_RECOVERED
? "RecovData " : "",
2446 ehc
->i
.serror
& SERR_COMM_RECOVERED
? "RecovComm " : "",
2447 ehc
->i
.serror
& SERR_DATA
? "UnrecovData " : "",
2448 ehc
->i
.serror
& SERR_PERSISTENT
? "Persist " : "",
2449 ehc
->i
.serror
& SERR_PROTOCOL
? "Proto " : "",
2450 ehc
->i
.serror
& SERR_INTERNAL
? "HostInt " : "",
2451 ehc
->i
.serror
& SERR_PHYRDY_CHG
? "PHYRdyChg " : "",
2452 ehc
->i
.serror
& SERR_PHY_INT_ERR
? "PHYInt " : "",
2453 ehc
->i
.serror
& SERR_COMM_WAKE
? "CommWake " : "",
2454 ehc
->i
.serror
& SERR_10B_8B_ERR
? "10B8B " : "",
2455 ehc
->i
.serror
& SERR_DISPARITY
? "Dispar " : "",
2456 ehc
->i
.serror
& SERR_CRC
? "BadCRC " : "",
2457 ehc
->i
.serror
& SERR_HANDSHAKE
? "Handshk " : "",
2458 ehc
->i
.serror
& SERR_LINK_SEQ_ERR
? "LinkSeq " : "",
2459 ehc
->i
.serror
& SERR_TRANS_ST_ERROR
? "TrStaTrns " : "",
2460 ehc
->i
.serror
& SERR_UNRECOG_FIS
? "UnrecFIS " : "",
2461 ehc
->i
.serror
& SERR_DEV_XCHG
? "DevExch " : "");
2464 ata_qc_for_each_raw(ap
, qc
, tag
) {
2465 struct ata_taskfile
*cmd
= &qc
->tf
, *res
= &qc
->result_tf
;
2466 char data_buf
[20] = "";
2467 char cdb_buf
[70] = "";
2469 if (!(qc
->flags
& ATA_QCFLAG_EH
) ||
2470 ata_dev_phys_link(qc
->dev
) != link
|| !qc
->err_mask
)
2473 if (qc
->dma_dir
!= DMA_NONE
) {
2474 static const char *dma_str
[] = {
2475 [DMA_BIDIRECTIONAL
] = "bidi",
2476 [DMA_TO_DEVICE
] = "out",
2477 [DMA_FROM_DEVICE
] = "in",
2479 const char *prot_str
= NULL
;
2481 switch (qc
->tf
.protocol
) {
2482 case ATA_PROT_UNKNOWN
:
2483 prot_str
= "unknown";
2485 case ATA_PROT_NODATA
:
2486 prot_str
= "nodata";
2495 prot_str
= "ncq dma";
2497 case ATA_PROT_NCQ_NODATA
:
2498 prot_str
= "ncq nodata";
2500 case ATAPI_PROT_NODATA
:
2501 prot_str
= "nodata";
2503 case ATAPI_PROT_PIO
:
2506 case ATAPI_PROT_DMA
:
2510 snprintf(data_buf
, sizeof(data_buf
), " %s %u %s",
2511 prot_str
, qc
->nbytes
, dma_str
[qc
->dma_dir
]);
2514 if (ata_is_atapi(qc
->tf
.protocol
)) {
2515 const u8
*cdb
= qc
->cdb
;
2516 size_t cdb_len
= qc
->dev
->cdb_len
;
2519 cdb
= qc
->scsicmd
->cmnd
;
2520 cdb_len
= qc
->scsicmd
->cmd_len
;
2522 __scsi_format_command(cdb_buf
, sizeof(cdb_buf
),
2525 ata_dev_err(qc
->dev
, "failed command: %s\n",
2526 ata_get_cmd_name(cmd
->command
));
2528 ata_dev_err(qc
->dev
,
2529 "cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
2531 "res %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
2532 "Emask 0x%x (%s)%s\n",
2533 cmd
->command
, cmd
->feature
, cmd
->nsect
,
2534 cmd
->lbal
, cmd
->lbam
, cmd
->lbah
,
2535 cmd
->hob_feature
, cmd
->hob_nsect
,
2536 cmd
->hob_lbal
, cmd
->hob_lbam
, cmd
->hob_lbah
,
2537 cmd
->device
, qc
->tag
, data_buf
, cdb_buf
,
2538 res
->status
, res
->error
, res
->nsect
,
2539 res
->lbal
, res
->lbam
, res
->lbah
,
2540 res
->hob_feature
, res
->hob_nsect
,
2541 res
->hob_lbal
, res
->hob_lbam
, res
->hob_lbah
,
2542 res
->device
, qc
->err_mask
, ata_err_string(qc
->err_mask
),
2543 qc
->err_mask
& AC_ERR_NCQ
? " <F>" : "");
2545 #ifdef CONFIG_ATA_VERBOSE_ERROR
2546 if (res
->status
& (ATA_BUSY
| ATA_DRDY
| ATA_DF
| ATA_DRQ
|
2547 ATA_SENSE
| ATA_ERR
)) {
2548 if (res
->status
& ATA_BUSY
)
2549 ata_dev_err(qc
->dev
, "status: { Busy }\n");
2551 ata_dev_err(qc
->dev
, "status: { %s%s%s%s%s}\n",
2552 res
->status
& ATA_DRDY
? "DRDY " : "",
2553 res
->status
& ATA_DF
? "DF " : "",
2554 res
->status
& ATA_DRQ
? "DRQ " : "",
2555 res
->status
& ATA_SENSE
? "SENSE " : "",
2556 res
->status
& ATA_ERR
? "ERR " : "");
2559 if (cmd
->command
!= ATA_CMD_PACKET
&&
2560 (res
->error
& (ATA_ICRC
| ATA_UNC
| ATA_AMNF
| ATA_IDNF
|
2562 ata_dev_err(qc
->dev
, "error: { %s%s%s%s%s}\n",
2563 res
->error
& ATA_ICRC
? "ICRC " : "",
2564 res
->error
& ATA_UNC
? "UNC " : "",
2565 res
->error
& ATA_AMNF
? "AMNF " : "",
2566 res
->error
& ATA_IDNF
? "IDNF " : "",
2567 res
->error
& ATA_ABORTED
? "ABRT " : "");
2573 * ata_eh_report - report error handling to user
2574 * @ap: ATA port to report EH about
2576 * Report EH to user.
2581 void ata_eh_report(struct ata_port
*ap
)
2583 struct ata_link
*link
;
2585 ata_for_each_link(link
, ap
, HOST_FIRST
)
2586 ata_eh_link_report(link
);
2589 static int ata_do_reset(struct ata_link
*link
, ata_reset_fn_t reset
,
2590 unsigned int *classes
, unsigned long deadline
,
2593 struct ata_device
*dev
;
2596 ata_for_each_dev(dev
, link
, ALL
)
2597 classes
[dev
->devno
] = ATA_DEV_UNKNOWN
;
2599 return reset(link
, classes
, deadline
);
2602 static int ata_eh_followup_srst_needed(struct ata_link
*link
, int rc
)
2604 if ((link
->flags
& ATA_LFLAG_NO_SRST
) || ata_link_offline(link
))
2608 if (sata_pmp_supported(link
->ap
) && ata_is_host_link(link
))
2613 int ata_eh_reset(struct ata_link
*link
, int classify
,
2614 ata_prereset_fn_t prereset
, ata_reset_fn_t softreset
,
2615 ata_reset_fn_t hardreset
, ata_postreset_fn_t postreset
)
2617 struct ata_port
*ap
= link
->ap
;
2618 struct ata_link
*slave
= ap
->slave_link
;
2619 struct ata_eh_context
*ehc
= &link
->eh_context
;
2620 struct ata_eh_context
*sehc
= slave
? &slave
->eh_context
: NULL
;
2621 unsigned int *classes
= ehc
->classes
;
2622 unsigned int lflags
= link
->flags
;
2623 int verbose
= !(ehc
->i
.flags
& ATA_EHI_QUIET
);
2624 int max_tries
= 0, try = 0;
2625 struct ata_link
*failed_link
;
2626 struct ata_device
*dev
;
2627 unsigned long deadline
, now
;
2628 ata_reset_fn_t reset
;
2629 unsigned long flags
;
2636 while (ata_eh_reset_timeouts
[max_tries
] != UINT_MAX
)
2638 if (link
->flags
& ATA_LFLAG_RST_ONCE
)
2640 if (link
->flags
& ATA_LFLAG_NO_HRST
)
2642 if (link
->flags
& ATA_LFLAG_NO_SRST
)
2645 /* make sure each reset attempt is at least COOL_DOWN apart */
2646 if (ehc
->i
.flags
& ATA_EHI_DID_RESET
) {
2648 WARN_ON(time_after(ehc
->last_reset
, now
));
2649 deadline
= ata_deadline(ehc
->last_reset
,
2650 ATA_EH_RESET_COOL_DOWN
);
2651 if (time_before(now
, deadline
))
2652 schedule_timeout_uninterruptible(deadline
- now
);
2655 spin_lock_irqsave(ap
->lock
, flags
);
2656 ap
->pflags
|= ATA_PFLAG_RESETTING
;
2657 spin_unlock_irqrestore(ap
->lock
, flags
);
2659 ata_eh_about_to_do(link
, NULL
, ATA_EH_RESET
);
2661 ata_for_each_dev(dev
, link
, ALL
) {
2662 /* If we issue an SRST then an ATA drive (not ATAPI)
2663 * may change configuration and be in PIO0 timing. If
2664 * we do a hard reset (or are coming from power on)
2665 * this is true for ATA or ATAPI. Until we've set a
2666 * suitable controller mode we should not touch the
2667 * bus as we may be talking too fast.
2669 dev
->pio_mode
= XFER_PIO_0
;
2670 dev
->dma_mode
= 0xff;
2672 /* If the controller has a pio mode setup function
2673 * then use it to set the chipset to rights. Don't
2674 * touch the DMA setup as that will be dealt with when
2675 * configuring devices.
2677 if (ap
->ops
->set_piomode
)
2678 ap
->ops
->set_piomode(ap
, dev
);
2681 /* prefer hardreset */
2683 ehc
->i
.action
&= ~ATA_EH_RESET
;
2686 ehc
->i
.action
|= ATA_EH_HARDRESET
;
2687 } else if (softreset
) {
2689 ehc
->i
.action
|= ATA_EH_SOFTRESET
;
2693 unsigned long deadline
= ata_deadline(jiffies
,
2694 ATA_EH_PRERESET_TIMEOUT
);
2697 sehc
->i
.action
&= ~ATA_EH_RESET
;
2698 sehc
->i
.action
|= ehc
->i
.action
;
2701 rc
= prereset(link
, deadline
);
2703 /* If present, do prereset on slave link too. Reset
2704 * is skipped iff both master and slave links report
2705 * -ENOENT or clear ATA_EH_RESET.
2707 if (slave
&& (rc
== 0 || rc
== -ENOENT
)) {
2710 tmp
= prereset(slave
, deadline
);
2714 ehc
->i
.action
|= sehc
->i
.action
;
2718 if (rc
== -ENOENT
) {
2719 ata_link_dbg(link
, "port disabled--ignoring\n");
2720 ehc
->i
.action
&= ~ATA_EH_RESET
;
2722 ata_for_each_dev(dev
, link
, ALL
)
2723 classes
[dev
->devno
] = ATA_DEV_NONE
;
2728 "prereset failed (errno=%d)\n",
2733 /* prereset() might have cleared ATA_EH_RESET. If so,
2734 * bang classes, thaw and return.
2736 if (reset
&& !(ehc
->i
.action
& ATA_EH_RESET
)) {
2737 ata_for_each_dev(dev
, link
, ALL
)
2738 classes
[dev
->devno
] = ATA_DEV_NONE
;
2739 if (ata_port_is_frozen(ap
) && ata_is_host_link(link
))
2740 ata_eh_thaw_port(ap
);
2750 if (ata_is_host_link(link
))
2751 ata_eh_freeze_port(ap
);
2753 deadline
= ata_deadline(jiffies
, ata_eh_reset_timeouts
[try++]);
2757 ata_link_info(link
, "%s resetting link\n",
2758 reset
== softreset
? "soft" : "hard");
2760 /* mark that this EH session started with reset */
2761 ehc
->last_reset
= jiffies
;
2762 if (reset
== hardreset
) {
2763 ehc
->i
.flags
|= ATA_EHI_DID_HARDRESET
;
2764 trace_ata_link_hardreset_begin(link
, classes
, deadline
);
2766 ehc
->i
.flags
|= ATA_EHI_DID_SOFTRESET
;
2767 trace_ata_link_softreset_begin(link
, classes
, deadline
);
2770 rc
= ata_do_reset(link
, reset
, classes
, deadline
, true);
2771 if (reset
== hardreset
)
2772 trace_ata_link_hardreset_end(link
, classes
, rc
);
2774 trace_ata_link_softreset_end(link
, classes
, rc
);
2775 if (rc
&& rc
!= -EAGAIN
) {
2780 /* hardreset slave link if existent */
2781 if (slave
&& reset
== hardreset
) {
2785 ata_link_info(slave
, "hard resetting link\n");
2787 ata_eh_about_to_do(slave
, NULL
, ATA_EH_RESET
);
2788 trace_ata_slave_hardreset_begin(slave
, classes
,
2790 tmp
= ata_do_reset(slave
, reset
, classes
, deadline
,
2792 trace_ata_slave_hardreset_end(slave
, classes
, tmp
);
2800 failed_link
= slave
;
2806 /* perform follow-up SRST if necessary */
2807 if (reset
== hardreset
&&
2808 ata_eh_followup_srst_needed(link
, rc
)) {
2813 "follow-up softreset required but no softreset available\n");
2819 ata_eh_about_to_do(link
, NULL
, ATA_EH_RESET
);
2820 trace_ata_link_softreset_begin(link
, classes
, deadline
);
2821 rc
= ata_do_reset(link
, reset
, classes
, deadline
, true);
2822 trace_ata_link_softreset_end(link
, classes
, rc
);
2831 "no reset method available, skipping reset\n");
2832 if (!(lflags
& ATA_LFLAG_ASSUME_CLASS
))
2833 lflags
|= ATA_LFLAG_ASSUME_ATA
;
2837 * Post-reset processing
2839 ata_for_each_dev(dev
, link
, ALL
) {
2840 /* After the reset, the device state is PIO 0 and the
2841 * controller state is undefined. Reset also wakes up
2842 * drives from sleeping mode.
2844 dev
->pio_mode
= XFER_PIO_0
;
2845 dev
->flags
&= ~ATA_DFLAG_SLEEPING
;
2847 if (ata_phys_link_offline(ata_dev_phys_link(dev
)))
2850 /* apply class override */
2851 if (lflags
& ATA_LFLAG_ASSUME_ATA
)
2852 classes
[dev
->devno
] = ATA_DEV_ATA
;
2853 else if (lflags
& ATA_LFLAG_ASSUME_SEMB
)
2854 classes
[dev
->devno
] = ATA_DEV_SEMB_UNSUP
;
2857 /* record current link speed */
2858 if (sata_scr_read(link
, SCR_STATUS
, &sstatus
) == 0)
2859 link
->sata_spd
= (sstatus
>> 4) & 0xf;
2860 if (slave
&& sata_scr_read(slave
, SCR_STATUS
, &sstatus
) == 0)
2861 slave
->sata_spd
= (sstatus
>> 4) & 0xf;
2864 if (ata_is_host_link(link
))
2865 ata_eh_thaw_port(ap
);
2867 /* postreset() should clear hardware SError. Although SError
2868 * is cleared during link resume, clearing SError here is
2869 * necessary as some PHYs raise hotplug events after SRST.
2870 * This introduces race condition where hotplug occurs between
2871 * reset and here. This race is mediated by cross checking
2872 * link onlineness and classification result later.
2875 postreset(link
, classes
);
2876 trace_ata_link_postreset(link
, classes
, rc
);
2878 postreset(slave
, classes
);
2879 trace_ata_slave_postreset(slave
, classes
, rc
);
2883 /* clear cached SError */
2884 spin_lock_irqsave(link
->ap
->lock
, flags
);
2885 link
->eh_info
.serror
= 0;
2887 slave
->eh_info
.serror
= 0;
2888 spin_unlock_irqrestore(link
->ap
->lock
, flags
);
2891 * Make sure onlineness and classification result correspond.
2892 * Hotplug could have happened during reset and some
2893 * controllers fail to wait while a drive is spinning up after
2894 * being hotplugged causing misdetection. By cross checking
2895 * link on/offlineness and classification result, those
2896 * conditions can be reliably detected and retried.
2899 ata_for_each_dev(dev
, link
, ALL
) {
2900 if (ata_phys_link_online(ata_dev_phys_link(dev
))) {
2901 if (classes
[dev
->devno
] == ATA_DEV_UNKNOWN
) {
2902 ata_dev_dbg(dev
, "link online but device misclassified\n");
2903 classes
[dev
->devno
] = ATA_DEV_NONE
;
2906 } else if (ata_phys_link_offline(ata_dev_phys_link(dev
))) {
2907 if (ata_class_enabled(classes
[dev
->devno
]))
2909 "link offline, clearing class %d to NONE\n",
2910 classes
[dev
->devno
]);
2911 classes
[dev
->devno
] = ATA_DEV_NONE
;
2912 } else if (classes
[dev
->devno
] == ATA_DEV_UNKNOWN
) {
2914 "link status unknown, clearing UNKNOWN to NONE\n");
2915 classes
[dev
->devno
] = ATA_DEV_NONE
;
2919 if (classify
&& nr_unknown
) {
2920 if (try < max_tries
) {
2922 "link online but %d devices misclassified, retrying\n",
2929 "link online but %d devices misclassified, "
2930 "device detection might fail\n", nr_unknown
);
2933 /* reset successful, schedule revalidation */
2934 ata_eh_done(link
, NULL
, ATA_EH_RESET
);
2936 ata_eh_done(slave
, NULL
, ATA_EH_RESET
);
2937 ehc
->last_reset
= jiffies
; /* update to completion time */
2938 ehc
->i
.action
|= ATA_EH_REVALIDATE
;
2939 link
->lpm_policy
= ATA_LPM_UNKNOWN
; /* reset LPM state */
2943 /* clear hotplug flag */
2944 ehc
->i
.flags
&= ~ATA_EHI_HOTPLUGGED
;
2946 sehc
->i
.flags
&= ~ATA_EHI_HOTPLUGGED
;
2948 spin_lock_irqsave(ap
->lock
, flags
);
2949 ap
->pflags
&= ~ATA_PFLAG_RESETTING
;
2950 spin_unlock_irqrestore(ap
->lock
, flags
);
2955 /* if SCR isn't accessible on a fan-out port, PMP needs to be reset */
2956 if (!ata_is_host_link(link
) &&
2957 sata_scr_read(link
, SCR_STATUS
, &sstatus
))
2960 if (try >= max_tries
) {
2962 * Thaw host port even if reset failed, so that the port
2963 * can be retried on the next phy event. This risks
2964 * repeated EH runs but seems to be a better tradeoff than
2965 * shutting down a port after a botched hotplug attempt.
2967 if (ata_is_host_link(link
))
2968 ata_eh_thaw_port(ap
);
2969 ata_link_warn(link
, "%s failed\n",
2970 reset
== hardreset
? "hardreset" : "softreset");
2975 if (time_before(now
, deadline
)) {
2976 unsigned long delta
= deadline
- now
;
2978 ata_link_warn(failed_link
,
2979 "reset failed (errno=%d), retrying in %u secs\n",
2980 rc
, DIV_ROUND_UP(jiffies_to_msecs(delta
), 1000));
2984 delta
= schedule_timeout_uninterruptible(delta
);
2989 * While disks spinup behind PMP, some controllers fail sending SRST.
2990 * They need to be reset - as well as the PMP - before retrying.
2992 if (rc
== -ERESTART
) {
2993 if (ata_is_host_link(link
))
2994 ata_eh_thaw_port(ap
);
2998 if (try == max_tries
- 1) {
2999 sata_down_spd_limit(link
, 0);
3001 sata_down_spd_limit(slave
, 0);
3002 } else if (rc
== -EPIPE
)
3003 sata_down_spd_limit(failed_link
, 0);
3010 static inline void ata_eh_pull_park_action(struct ata_port
*ap
)
3012 struct ata_link
*link
;
3013 struct ata_device
*dev
;
3014 unsigned long flags
;
3017 * This function can be thought of as an extended version of
3018 * ata_eh_about_to_do() specially crafted to accommodate the
3019 * requirements of ATA_EH_PARK handling. Since the EH thread
3020 * does not leave the do {} while () loop in ata_eh_recover as
3021 * long as the timeout for a park request to *one* device on
3022 * the port has not expired, and since we still want to pick
3023 * up park requests to other devices on the same port or
3024 * timeout updates for the same device, we have to pull
3025 * ATA_EH_PARK actions from eh_info into eh_context.i
3026 * ourselves at the beginning of each pass over the loop.
3028 * Additionally, all write accesses to &ap->park_req_pending
3029 * through reinit_completion() (see below) or complete_all()
3030 * (see ata_scsi_park_store()) are protected by the host lock.
3031 * As a result we have that park_req_pending.done is zero on
3032 * exit from this function, i.e. when ATA_EH_PARK actions for
3033 * *all* devices on port ap have been pulled into the
3034 * respective eh_context structs. If, and only if,
3035 * park_req_pending.done is non-zero by the time we reach
3036 * wait_for_completion_timeout(), another ATA_EH_PARK action
3037 * has been scheduled for at least one of the devices on port
3038 * ap and we have to cycle over the do {} while () loop in
3039 * ata_eh_recover() again.
3042 spin_lock_irqsave(ap
->lock
, flags
);
3043 reinit_completion(&ap
->park_req_pending
);
3044 ata_for_each_link(link
, ap
, EDGE
) {
3045 ata_for_each_dev(dev
, link
, ALL
) {
3046 struct ata_eh_info
*ehi
= &link
->eh_info
;
3048 link
->eh_context
.i
.dev_action
[dev
->devno
] |=
3049 ehi
->dev_action
[dev
->devno
] & ATA_EH_PARK
;
3050 ata_eh_clear_action(link
, dev
, ehi
, ATA_EH_PARK
);
3053 spin_unlock_irqrestore(ap
->lock
, flags
);
3056 static void ata_eh_park_issue_cmd(struct ata_device
*dev
, int park
)
3058 struct ata_eh_context
*ehc
= &dev
->link
->eh_context
;
3059 struct ata_taskfile tf
;
3060 unsigned int err_mask
;
3062 ata_tf_init(dev
, &tf
);
3064 ehc
->unloaded_mask
|= 1 << dev
->devno
;
3065 tf
.command
= ATA_CMD_IDLEIMMEDIATE
;
3071 ehc
->unloaded_mask
&= ~(1 << dev
->devno
);
3072 tf
.command
= ATA_CMD_CHK_POWER
;
3075 tf
.flags
|= ATA_TFLAG_DEVICE
| ATA_TFLAG_ISADDR
;
3076 tf
.protocol
= ATA_PROT_NODATA
;
3077 err_mask
= ata_exec_internal(dev
, &tf
, NULL
, DMA_NONE
, NULL
, 0, 0);
3078 if (park
&& (err_mask
|| tf
.lbal
!= 0xc4)) {
3079 ata_dev_err(dev
, "head unload failed!\n");
3080 ehc
->unloaded_mask
&= ~(1 << dev
->devno
);
3084 static int ata_eh_revalidate_and_attach(struct ata_link
*link
,
3085 struct ata_device
**r_failed_dev
)
3087 struct ata_port
*ap
= link
->ap
;
3088 struct ata_eh_context
*ehc
= &link
->eh_context
;
3089 struct ata_device
*dev
;
3090 unsigned int new_mask
= 0;
3091 unsigned long flags
;
3094 /* For PATA drive side cable detection to work, IDENTIFY must
3095 * be done backwards such that PDIAG- is released by the slave
3096 * device before the master device is identified.
3098 ata_for_each_dev(dev
, link
, ALL_REVERSE
) {
3099 unsigned int action
= ata_eh_dev_action(dev
);
3100 unsigned int readid_flags
= 0;
3102 if (ehc
->i
.flags
& ATA_EHI_DID_RESET
)
3103 readid_flags
|= ATA_READID_POSTRESET
;
3105 if ((action
& ATA_EH_REVALIDATE
) && ata_dev_enabled(dev
)) {
3106 WARN_ON(dev
->class == ATA_DEV_PMP
);
3109 * The link may be in a deep sleep, wake it up.
3111 * If the link is in deep sleep, ata_phys_link_offline()
3112 * will return true, causing the revalidation to fail,
3113 * which leads to a (potentially) needless hard reset.
3115 * ata_eh_recover() will later restore the link policy
3116 * to ap->target_lpm_policy after revalidation is done.
3118 if (link
->lpm_policy
> ATA_LPM_MAX_POWER
) {
3119 rc
= ata_eh_set_lpm(link
, ATA_LPM_MAX_POWER
,
3125 if (ata_phys_link_offline(ata_dev_phys_link(dev
))) {
3130 ata_eh_about_to_do(link
, dev
, ATA_EH_REVALIDATE
);
3131 rc
= ata_dev_revalidate(dev
, ehc
->classes
[dev
->devno
],
3136 ata_eh_done(link
, dev
, ATA_EH_REVALIDATE
);
3138 /* Configuration may have changed, reconfigure
3141 ehc
->i
.flags
|= ATA_EHI_SETMODE
;
3143 /* schedule the scsi_rescan_device() here */
3144 schedule_delayed_work(&ap
->scsi_rescan_task
, 0);
3145 } else if (dev
->class == ATA_DEV_UNKNOWN
&&
3146 ehc
->tries
[dev
->devno
] &&
3147 ata_class_enabled(ehc
->classes
[dev
->devno
])) {
3148 /* Temporarily set dev->class, it will be
3149 * permanently set once all configurations are
3150 * complete. This is necessary because new
3151 * device configuration is done in two
3154 dev
->class = ehc
->classes
[dev
->devno
];
3156 if (dev
->class == ATA_DEV_PMP
)
3157 rc
= sata_pmp_attach(dev
);
3159 rc
= ata_dev_read_id(dev
, &dev
->class,
3160 readid_flags
, dev
->id
);
3162 /* read_id might have changed class, store and reset */
3163 ehc
->classes
[dev
->devno
] = dev
->class;
3164 dev
->class = ATA_DEV_UNKNOWN
;
3168 /* clear error info accumulated during probe */
3169 ata_ering_clear(&dev
->ering
);
3170 new_mask
|= 1 << dev
->devno
;
3173 /* IDENTIFY was issued to non-existent
3174 * device. No need to reset. Just
3175 * thaw and ignore the device.
3177 ata_eh_thaw_port(ap
);
3185 /* PDIAG- should have been released, ask cable type if post-reset */
3186 if ((ehc
->i
.flags
& ATA_EHI_DID_RESET
) && ata_is_host_link(link
)) {
3187 if (ap
->ops
->cable_detect
)
3188 ap
->cbl
= ap
->ops
->cable_detect(ap
);
3192 /* Configure new devices forward such that user doesn't see
3193 * device detection messages backwards.
3195 ata_for_each_dev(dev
, link
, ALL
) {
3196 if (!(new_mask
& (1 << dev
->devno
)))
3199 dev
->class = ehc
->classes
[dev
->devno
];
3201 if (dev
->class == ATA_DEV_PMP
)
3204 ehc
->i
.flags
|= ATA_EHI_PRINTINFO
;
3205 rc
= ata_dev_configure(dev
);
3206 ehc
->i
.flags
&= ~ATA_EHI_PRINTINFO
;
3208 dev
->class = ATA_DEV_UNKNOWN
;
3212 spin_lock_irqsave(ap
->lock
, flags
);
3213 ap
->pflags
|= ATA_PFLAG_SCSI_HOTPLUG
;
3214 spin_unlock_irqrestore(ap
->lock
, flags
);
3216 /* new device discovered, configure xfermode */
3217 ehc
->i
.flags
|= ATA_EHI_SETMODE
;
3223 dev
->flags
&= ~ATA_DFLAG_RESUMING
;
3224 *r_failed_dev
= dev
;
3229 * ata_set_mode - Program timings and issue SET FEATURES - XFER
3230 * @link: link on which timings will be programmed
3231 * @r_failed_dev: out parameter for failed device
3233 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
3234 * ata_set_mode() fails, pointer to the failing device is
3235 * returned in @r_failed_dev.
3238 * PCI/etc. bus probe sem.
3241 * 0 on success, negative errno otherwise
3243 int ata_set_mode(struct ata_link
*link
, struct ata_device
**r_failed_dev
)
3245 struct ata_port
*ap
= link
->ap
;
3246 struct ata_device
*dev
;
3249 /* if data transfer is verified, clear DUBIOUS_XFER on ering top */
3250 ata_for_each_dev(dev
, link
, ENABLED
) {
3251 if (!(dev
->flags
& ATA_DFLAG_DUBIOUS_XFER
)) {
3252 struct ata_ering_entry
*ent
;
3254 ent
= ata_ering_top(&dev
->ering
);
3256 ent
->eflags
&= ~ATA_EFLAG_DUBIOUS_XFER
;
3260 /* has private set_mode? */
3261 if (ap
->ops
->set_mode
)
3262 rc
= ap
->ops
->set_mode(link
, r_failed_dev
);
3264 rc
= ata_do_set_mode(link
, r_failed_dev
);
3266 /* if transfer mode has changed, set DUBIOUS_XFER on device */
3267 ata_for_each_dev(dev
, link
, ENABLED
) {
3268 struct ata_eh_context
*ehc
= &link
->eh_context
;
3269 u8 saved_xfer_mode
= ehc
->saved_xfer_mode
[dev
->devno
];
3270 u8 saved_ncq
= !!(ehc
->saved_ncq_enabled
& (1 << dev
->devno
));
3272 if (dev
->xfer_mode
!= saved_xfer_mode
||
3273 ata_ncq_enabled(dev
) != saved_ncq
)
3274 dev
->flags
|= ATA_DFLAG_DUBIOUS_XFER
;
3281 * atapi_eh_clear_ua - Clear ATAPI UNIT ATTENTION after reset
3282 * @dev: ATAPI device to clear UA for
3284 * Resets and other operations can make an ATAPI device raise
3285 * UNIT ATTENTION which causes the next operation to fail. This
3286 * function clears UA.
3289 * EH context (may sleep).
3292 * 0 on success, -errno on failure.
3294 static int atapi_eh_clear_ua(struct ata_device
*dev
)
3298 for (i
= 0; i
< ATA_EH_UA_TRIES
; i
++) {
3299 u8
*sense_buffer
= dev
->sector_buf
;
3301 unsigned int err_mask
;
3303 err_mask
= atapi_eh_tur(dev
, &sense_key
);
3304 if (err_mask
!= 0 && err_mask
!= AC_ERR_DEV
) {
3306 "TEST_UNIT_READY failed (err_mask=0x%x)\n",
3311 if (!err_mask
|| sense_key
!= UNIT_ATTENTION
)
3314 err_mask
= atapi_eh_request_sense(dev
, sense_buffer
, sense_key
);
3316 ata_dev_warn(dev
, "failed to clear "
3317 "UNIT ATTENTION (err_mask=0x%x)\n", err_mask
);
3322 ata_dev_warn(dev
, "UNIT ATTENTION persists after %d tries\n",
3329 * ata_eh_maybe_retry_flush - Retry FLUSH if necessary
3330 * @dev: ATA device which may need FLUSH retry
3332 * If @dev failed FLUSH, it needs to be reported upper layer
3333 * immediately as it means that @dev failed to remap and already
3334 * lost at least a sector and further FLUSH retrials won't make
3335 * any difference to the lost sector. However, if FLUSH failed
3336 * for other reasons, for example transmission error, FLUSH needs
3339 * This function determines whether FLUSH failure retry is
3340 * necessary and performs it if so.
3343 * 0 if EH can continue, -errno if EH needs to be repeated.
3345 static int ata_eh_maybe_retry_flush(struct ata_device
*dev
)
3347 struct ata_link
*link
= dev
->link
;
3348 struct ata_port
*ap
= link
->ap
;
3349 struct ata_queued_cmd
*qc
;
3350 struct ata_taskfile tf
;
3351 unsigned int err_mask
;
3354 /* did flush fail for this device? */
3355 if (!ata_tag_valid(link
->active_tag
))
3358 qc
= __ata_qc_from_tag(ap
, link
->active_tag
);
3359 if (qc
->dev
!= dev
|| (qc
->tf
.command
!= ATA_CMD_FLUSH_EXT
&&
3360 qc
->tf
.command
!= ATA_CMD_FLUSH
))
3363 /* if the device failed it, it should be reported to upper layers */
3364 if (qc
->err_mask
& AC_ERR_DEV
)
3367 /* flush failed for some other reason, give it another shot */
3368 ata_tf_init(dev
, &tf
);
3370 tf
.command
= qc
->tf
.command
;
3371 tf
.flags
|= ATA_TFLAG_DEVICE
;
3372 tf
.protocol
= ATA_PROT_NODATA
;
3374 ata_dev_warn(dev
, "retrying FLUSH 0x%x Emask 0x%x\n",
3375 tf
.command
, qc
->err_mask
);
3377 err_mask
= ata_exec_internal(dev
, &tf
, NULL
, DMA_NONE
, NULL
, 0, 0);
3380 * FLUSH is complete but there's no way to
3381 * successfully complete a failed command from EH.
3382 * Making sure retry is allowed at least once and
3383 * retrying it should do the trick - whatever was in
3384 * the cache is already on the platter and this won't
3385 * cause infinite loop.
3387 qc
->scsicmd
->allowed
= max(qc
->scsicmd
->allowed
, 1);
3389 ata_dev_warn(dev
, "FLUSH failed Emask 0x%x\n",
3393 /* if device failed it, report it to upper layers */
3394 if (err_mask
& AC_ERR_DEV
) {
3395 qc
->err_mask
|= AC_ERR_DEV
;
3397 if (!ata_port_is_frozen(ap
))
3405 * ata_eh_set_lpm - configure SATA interface power management
3406 * @link: link to configure power management
3407 * @policy: the link power management policy
3408 * @r_failed_dev: out parameter for failed device
3410 * Enable SATA Interface power management. This will enable
3411 * Device Interface Power Management (DIPM) for min_power and
3412 * medium_power_with_dipm policies, and then call driver specific
3413 * callbacks for enabling Host Initiated Power management.
3419 * 0 on success, -errno on failure.
3421 static int ata_eh_set_lpm(struct ata_link
*link
, enum ata_lpm_policy policy
,
3422 struct ata_device
**r_failed_dev
)
3424 struct ata_port
*ap
= ata_is_host_link(link
) ? link
->ap
: NULL
;
3425 struct ata_eh_context
*ehc
= &link
->eh_context
;
3426 struct ata_device
*dev
, *link_dev
= NULL
, *lpm_dev
= NULL
;
3427 enum ata_lpm_policy old_policy
= link
->lpm_policy
;
3428 bool no_dipm
= link
->ap
->flags
& ATA_FLAG_NO_DIPM
;
3429 unsigned int hints
= ATA_LPM_EMPTY
| ATA_LPM_HIPM
;
3430 unsigned int err_mask
;
3433 /* if the link or host doesn't do LPM, noop */
3434 if (!IS_ENABLED(CONFIG_SATA_HOST
) ||
3435 (link
->flags
& ATA_LFLAG_NO_LPM
) || (ap
&& !ap
->ops
->set_lpm
))
3439 * DIPM is enabled only for MIN_POWER as some devices
3440 * misbehave when the host NACKs transition to SLUMBER. Order
3441 * device and link configurations such that the host always
3442 * allows DIPM requests.
3444 ata_for_each_dev(dev
, link
, ENABLED
) {
3445 bool hipm
= ata_id_has_hipm(dev
->id
);
3446 bool dipm
= ata_id_has_dipm(dev
->id
) && !no_dipm
;
3448 /* find the first enabled and LPM enabled devices */
3452 if (!lpm_dev
&& (hipm
|| dipm
))
3455 hints
&= ~ATA_LPM_EMPTY
;
3457 hints
&= ~ATA_LPM_HIPM
;
3459 /* disable DIPM before changing link config */
3460 if (policy
< ATA_LPM_MED_POWER_WITH_DIPM
&& dipm
) {
3461 err_mask
= ata_dev_set_feature(dev
,
3462 SETFEATURES_SATA_DISABLE
, SATA_DIPM
);
3463 if (err_mask
&& err_mask
!= AC_ERR_DEV
) {
3465 "failed to disable DIPM, Emask 0x%x\n",
3474 rc
= ap
->ops
->set_lpm(link
, policy
, hints
);
3475 if (!rc
&& ap
->slave_link
)
3476 rc
= ap
->ops
->set_lpm(ap
->slave_link
, policy
, hints
);
3478 rc
= sata_pmp_set_lpm(link
, policy
, hints
);
3481 * Attribute link config failure to the first (LPM) enabled
3482 * device on the link.
3485 if (rc
== -EOPNOTSUPP
) {
3486 link
->flags
|= ATA_LFLAG_NO_LPM
;
3489 dev
= lpm_dev
? lpm_dev
: link_dev
;
3494 * Low level driver acked the transition. Issue DIPM command
3495 * with the new policy set.
3497 link
->lpm_policy
= policy
;
3498 if (ap
&& ap
->slave_link
)
3499 ap
->slave_link
->lpm_policy
= policy
;
3501 /* host config updated, enable DIPM if transitioning to MIN_POWER */
3502 ata_for_each_dev(dev
, link
, ENABLED
) {
3503 if (policy
>= ATA_LPM_MED_POWER_WITH_DIPM
&& !no_dipm
&&
3504 ata_id_has_dipm(dev
->id
)) {
3505 err_mask
= ata_dev_set_feature(dev
,
3506 SETFEATURES_SATA_ENABLE
, SATA_DIPM
);
3507 if (err_mask
&& err_mask
!= AC_ERR_DEV
) {
3509 "failed to enable DIPM, Emask 0x%x\n",
3517 link
->last_lpm_change
= jiffies
;
3518 link
->flags
|= ATA_LFLAG_CHANGED
;
3523 /* restore the old policy */
3524 link
->lpm_policy
= old_policy
;
3525 if (ap
&& ap
->slave_link
)
3526 ap
->slave_link
->lpm_policy
= old_policy
;
3528 /* if no device or only one more chance is left, disable LPM */
3529 if (!dev
|| ehc
->tries
[dev
->devno
] <= 2) {
3530 ata_link_warn(link
, "disabling LPM on the link\n");
3531 link
->flags
|= ATA_LFLAG_NO_LPM
;
3534 *r_failed_dev
= dev
;
3538 int ata_link_nr_enabled(struct ata_link
*link
)
3540 struct ata_device
*dev
;
3543 ata_for_each_dev(dev
, link
, ENABLED
)
3548 static int ata_link_nr_vacant(struct ata_link
*link
)
3550 struct ata_device
*dev
;
3553 ata_for_each_dev(dev
, link
, ALL
)
3554 if (dev
->class == ATA_DEV_UNKNOWN
)
3559 static int ata_eh_skip_recovery(struct ata_link
*link
)
3561 struct ata_port
*ap
= link
->ap
;
3562 struct ata_eh_context
*ehc
= &link
->eh_context
;
3563 struct ata_device
*dev
;
3565 /* skip disabled links */
3566 if (link
->flags
& ATA_LFLAG_DISABLED
)
3569 /* skip if explicitly requested */
3570 if (ehc
->i
.flags
& ATA_EHI_NO_RECOVERY
)
3573 /* thaw frozen port and recover failed devices */
3574 if (ata_port_is_frozen(ap
) || ata_link_nr_enabled(link
))
3577 /* reset at least once if reset is requested */
3578 if ((ehc
->i
.action
& ATA_EH_RESET
) &&
3579 !(ehc
->i
.flags
& ATA_EHI_DID_RESET
))
3582 /* skip if class codes for all vacant slots are ATA_DEV_NONE */
3583 ata_for_each_dev(dev
, link
, ALL
) {
3584 if (dev
->class == ATA_DEV_UNKNOWN
&&
3585 ehc
->classes
[dev
->devno
] != ATA_DEV_NONE
)
3592 static int ata_count_probe_trials_cb(struct ata_ering_entry
*ent
, void *void_arg
)
3594 u64 interval
= msecs_to_jiffies(ATA_EH_PROBE_TRIAL_INTERVAL
);
3595 u64 now
= get_jiffies_64();
3596 int *trials
= void_arg
;
3598 if ((ent
->eflags
& ATA_EFLAG_OLD_ER
) ||
3599 (ent
->timestamp
< now
- min(now
, interval
)))
3606 static int ata_eh_schedule_probe(struct ata_device
*dev
)
3608 struct ata_eh_context
*ehc
= &dev
->link
->eh_context
;
3609 struct ata_link
*link
= ata_dev_phys_link(dev
);
3612 if (!(ehc
->i
.probe_mask
& (1 << dev
->devno
)) ||
3613 (ehc
->did_probe_mask
& (1 << dev
->devno
)))
3616 ata_eh_detach_dev(dev
);
3618 ehc
->did_probe_mask
|= (1 << dev
->devno
);
3619 ehc
->i
.action
|= ATA_EH_RESET
;
3620 ehc
->saved_xfer_mode
[dev
->devno
] = 0;
3621 ehc
->saved_ncq_enabled
&= ~(1 << dev
->devno
);
3623 /* the link maybe in a deep sleep, wake it up */
3624 if (link
->lpm_policy
> ATA_LPM_MAX_POWER
) {
3625 if (ata_is_host_link(link
))
3626 link
->ap
->ops
->set_lpm(link
, ATA_LPM_MAX_POWER
,
3629 sata_pmp_set_lpm(link
, ATA_LPM_MAX_POWER
,
3633 /* Record and count probe trials on the ering. The specific
3634 * error mask used is irrelevant. Because a successful device
3635 * detection clears the ering, this count accumulates only if
3636 * there are consecutive failed probes.
3638 * If the count is equal to or higher than ATA_EH_PROBE_TRIALS
3639 * in the last ATA_EH_PROBE_TRIAL_INTERVAL, link speed is
3640 * forced to 1.5Gbps.
3642 * This is to work around cases where failed link speed
3643 * negotiation results in device misdetection leading to
3644 * infinite DEVXCHG or PHRDY CHG events.
3646 ata_ering_record(&dev
->ering
, 0, AC_ERR_OTHER
);
3647 ata_ering_map(&dev
->ering
, ata_count_probe_trials_cb
, &trials
);
3649 if (trials
> ATA_EH_PROBE_TRIALS
)
3650 sata_down_spd_limit(link
, 1);
3655 static int ata_eh_handle_dev_fail(struct ata_device
*dev
, int err
)
3657 struct ata_eh_context
*ehc
= &dev
->link
->eh_context
;
3659 /* -EAGAIN from EH routine indicates retry without prejudice.
3660 * The requester is responsible for ensuring forward progress.
3663 ehc
->tries
[dev
->devno
]--;
3667 /* device missing or wrong IDENTIFY data, schedule probing */
3668 ehc
->i
.probe_mask
|= (1 << dev
->devno
);
3671 /* give it just one more chance */
3672 ehc
->tries
[dev
->devno
] = min(ehc
->tries
[dev
->devno
], 1);
3675 if (ehc
->tries
[dev
->devno
] == 1) {
3676 /* This is the last chance, better to slow
3677 * down than lose it.
3679 sata_down_spd_limit(ata_dev_phys_link(dev
), 0);
3680 if (dev
->pio_mode
> XFER_PIO_0
)
3681 ata_down_xfermask_limit(dev
, ATA_DNXFER_PIO
);
3685 if (ata_dev_enabled(dev
) && !ehc
->tries
[dev
->devno
]) {
3686 /* disable device if it has used up all its chances */
3687 ata_dev_disable(dev
);
3689 /* detach if offline */
3690 if (ata_phys_link_offline(ata_dev_phys_link(dev
)))
3691 ata_eh_detach_dev(dev
);
3693 /* schedule probe if necessary */
3694 if (ata_eh_schedule_probe(dev
)) {
3695 ehc
->tries
[dev
->devno
] = ATA_EH_DEV_TRIES
;
3696 memset(ehc
->cmd_timeout_idx
[dev
->devno
], 0,
3697 sizeof(ehc
->cmd_timeout_idx
[dev
->devno
]));
3702 ehc
->i
.action
|= ATA_EH_RESET
;
3708 * ata_eh_recover - recover host port after error
3709 * @ap: host port to recover
3710 * @prereset: prereset method (can be NULL)
3711 * @softreset: softreset method (can be NULL)
3712 * @hardreset: hardreset method (can be NULL)
3713 * @postreset: postreset method (can be NULL)
3714 * @r_failed_link: out parameter for failed link
3716 * This is the alpha and omega, eum and yang, heart and soul of
3717 * libata exception handling. On entry, actions required to
3718 * recover each link and hotplug requests are recorded in the
3719 * link's eh_context. This function executes all the operations
3720 * with appropriate retrials and fallbacks to resurrect failed
3721 * devices, detach goners and greet newcomers.
3724 * Kernel thread context (may sleep).
3727 * 0 on success, -errno on failure.
3729 int ata_eh_recover(struct ata_port
*ap
, ata_prereset_fn_t prereset
,
3730 ata_reset_fn_t softreset
, ata_reset_fn_t hardreset
,
3731 ata_postreset_fn_t postreset
,
3732 struct ata_link
**r_failed_link
)
3734 struct ata_link
*link
;
3735 struct ata_device
*dev
;
3737 unsigned long flags
, deadline
;
3739 /* prep for recovery */
3740 ata_for_each_link(link
, ap
, EDGE
) {
3741 struct ata_eh_context
*ehc
= &link
->eh_context
;
3743 /* re-enable link? */
3744 if (ehc
->i
.action
& ATA_EH_ENABLE_LINK
) {
3745 ata_eh_about_to_do(link
, NULL
, ATA_EH_ENABLE_LINK
);
3746 spin_lock_irqsave(ap
->lock
, flags
);
3747 link
->flags
&= ~ATA_LFLAG_DISABLED
;
3748 spin_unlock_irqrestore(ap
->lock
, flags
);
3749 ata_eh_done(link
, NULL
, ATA_EH_ENABLE_LINK
);
3752 ata_for_each_dev(dev
, link
, ALL
) {
3753 if (link
->flags
& ATA_LFLAG_NO_RETRY
)
3754 ehc
->tries
[dev
->devno
] = 1;
3756 ehc
->tries
[dev
->devno
] = ATA_EH_DEV_TRIES
;
3758 /* collect port action mask recorded in dev actions */
3759 ehc
->i
.action
|= ehc
->i
.dev_action
[dev
->devno
] &
3760 ~ATA_EH_PERDEV_MASK
;
3761 ehc
->i
.dev_action
[dev
->devno
] &= ATA_EH_PERDEV_MASK
;
3763 /* process hotplug request */
3764 if (dev
->flags
& ATA_DFLAG_DETACH
)
3765 ata_eh_detach_dev(dev
);
3767 /* schedule probe if necessary */
3768 if (!ata_dev_enabled(dev
))
3769 ata_eh_schedule_probe(dev
);
3776 /* if UNLOADING, finish immediately */
3777 if (ap
->pflags
& ATA_PFLAG_UNLOADING
)
3781 ata_for_each_link(link
, ap
, EDGE
) {
3782 struct ata_eh_context
*ehc
= &link
->eh_context
;
3784 /* skip EH if possible. */
3785 if (ata_eh_skip_recovery(link
))
3788 ata_for_each_dev(dev
, link
, ALL
)
3789 ehc
->classes
[dev
->devno
] = ATA_DEV_UNKNOWN
;
3793 ata_for_each_link(link
, ap
, EDGE
) {
3794 struct ata_eh_context
*ehc
= &link
->eh_context
;
3796 if (!(ehc
->i
.action
& ATA_EH_RESET
))
3799 rc
= ata_eh_reset(link
, ata_link_nr_vacant(link
),
3800 prereset
, softreset
, hardreset
, postreset
);
3802 ata_link_err(link
, "reset failed, giving up\n");
3811 * clears ATA_EH_PARK in eh_info and resets
3812 * ap->park_req_pending
3814 ata_eh_pull_park_action(ap
);
3817 ata_for_each_link(link
, ap
, EDGE
) {
3818 ata_for_each_dev(dev
, link
, ALL
) {
3819 struct ata_eh_context
*ehc
= &link
->eh_context
;
3822 if (dev
->class != ATA_DEV_ATA
&&
3823 dev
->class != ATA_DEV_ZAC
)
3825 if (!(ehc
->i
.dev_action
[dev
->devno
] &
3828 tmp
= dev
->unpark_deadline
;
3829 if (time_before(deadline
, tmp
))
3831 else if (time_before_eq(tmp
, jiffies
))
3833 if (ehc
->unloaded_mask
& (1 << dev
->devno
))
3836 ata_eh_park_issue_cmd(dev
, 1);
3841 if (time_before_eq(deadline
, now
))
3845 deadline
= wait_for_completion_timeout(&ap
->park_req_pending
,
3849 ata_for_each_link(link
, ap
, EDGE
) {
3850 ata_for_each_dev(dev
, link
, ALL
) {
3851 if (!(link
->eh_context
.unloaded_mask
&
3855 ata_eh_park_issue_cmd(dev
, 0);
3856 ata_eh_done(link
, dev
, ATA_EH_PARK
);
3862 ata_for_each_link(link
, ap
, PMP_FIRST
) {
3863 struct ata_eh_context
*ehc
= &link
->eh_context
;
3865 if (sata_pmp_attached(ap
) && ata_is_host_link(link
))
3868 /* revalidate existing devices and attach new ones */
3869 rc
= ata_eh_revalidate_and_attach(link
, &dev
);
3873 /* if PMP got attached, return, pmp EH will take care of it */
3874 if (link
->device
->class == ATA_DEV_PMP
) {
3879 /* configure transfer mode if necessary */
3880 if (ehc
->i
.flags
& ATA_EHI_SETMODE
) {
3881 rc
= ata_set_mode(link
, &dev
);
3884 ehc
->i
.flags
&= ~ATA_EHI_SETMODE
;
3887 /* If reset has been issued, clear UA to avoid
3888 * disrupting the current users of the device.
3890 if (ehc
->i
.flags
& ATA_EHI_DID_RESET
) {
3891 ata_for_each_dev(dev
, link
, ALL
) {
3892 if (dev
->class != ATA_DEV_ATAPI
)
3894 rc
= atapi_eh_clear_ua(dev
);
3897 if (zpodd_dev_enabled(dev
))
3898 zpodd_post_poweron(dev
);
3903 * Make sure to transition devices to the active power mode
3904 * if needed (e.g. if we were scheduled on system resume).
3906 ata_for_each_dev(dev
, link
, ENABLED
) {
3907 if (ehc
->i
.dev_action
[dev
->devno
] & ATA_EH_SET_ACTIVE
) {
3908 ata_dev_power_set_active(dev
);
3909 ata_eh_done(link
, dev
, ATA_EH_SET_ACTIVE
);
3913 /* retry flush if necessary */
3914 ata_for_each_dev(dev
, link
, ALL
) {
3915 if (dev
->class != ATA_DEV_ATA
&&
3916 dev
->class != ATA_DEV_ZAC
)
3918 rc
= ata_eh_maybe_retry_flush(dev
);
3924 /* configure link power saving */
3925 if (link
->lpm_policy
!= ap
->target_lpm_policy
) {
3926 rc
= ata_eh_set_lpm(link
, ap
->target_lpm_policy
, &dev
);
3931 /* this link is okay now */
3938 ata_eh_handle_dev_fail(dev
, rc
);
3940 if (ata_port_is_frozen(ap
)) {
3941 /* PMP reset requires working host port.
3942 * Can't retry if it's frozen.
3944 if (sata_pmp_attached(ap
))
3954 if (rc
&& r_failed_link
)
3955 *r_failed_link
= link
;
3961 * ata_eh_finish - finish up EH
3962 * @ap: host port to finish EH for
3964 * Recovery is complete. Clean up EH states and retry or finish
3970 void ata_eh_finish(struct ata_port
*ap
)
3972 struct ata_queued_cmd
*qc
;
3975 /* retry or finish qcs */
3976 ata_qc_for_each_raw(ap
, qc
, tag
) {
3977 if (!(qc
->flags
& ATA_QCFLAG_EH
))
3981 /* FIXME: Once EH migration is complete,
3982 * generate sense data in this function,
3983 * considering both err_mask and tf.
3985 if (qc
->flags
& ATA_QCFLAG_RETRY
) {
3987 * Since qc->err_mask is set, ata_eh_qc_retry()
3988 * will not increment scmd->allowed, so upper
3989 * layer will only retry the command if it has
3990 * not already been retried too many times.
3992 ata_eh_qc_retry(qc
);
3994 ata_eh_qc_complete(qc
);
3997 if (qc
->flags
& ATA_QCFLAG_SENSE_VALID
||
3998 qc
->flags
& ATA_QCFLAG_EH_SUCCESS_CMD
) {
3999 ata_eh_qc_complete(qc
);
4001 /* feed zero TF to sense generation */
4002 memset(&qc
->result_tf
, 0, sizeof(qc
->result_tf
));
4004 * Since qc->err_mask is not set,
4005 * ata_eh_qc_retry() will increment
4006 * scmd->allowed, so upper layer is guaranteed
4007 * to retry the command.
4009 ata_eh_qc_retry(qc
);
4014 /* make sure nr_active_links is zero after EH */
4015 WARN_ON(ap
->nr_active_links
);
4016 ap
->nr_active_links
= 0;
4020 * ata_do_eh - do standard error handling
4021 * @ap: host port to handle error for
4023 * @prereset: prereset method (can be NULL)
4024 * @softreset: softreset method (can be NULL)
4025 * @hardreset: hardreset method (can be NULL)
4026 * @postreset: postreset method (can be NULL)
4028 * Perform standard error handling sequence.
4031 * Kernel thread context (may sleep).
4033 void ata_do_eh(struct ata_port
*ap
, ata_prereset_fn_t prereset
,
4034 ata_reset_fn_t softreset
, ata_reset_fn_t hardreset
,
4035 ata_postreset_fn_t postreset
)
4037 struct ata_device
*dev
;
4043 rc
= ata_eh_recover(ap
, prereset
, softreset
, hardreset
, postreset
,
4046 ata_for_each_dev(dev
, &ap
->link
, ALL
)
4047 ata_dev_disable(dev
);
4054 * ata_std_error_handler - standard error handler
4055 * @ap: host port to handle error for
4057 * Standard error handler
4060 * Kernel thread context (may sleep).
4062 void ata_std_error_handler(struct ata_port
*ap
)
4064 struct ata_port_operations
*ops
= ap
->ops
;
4065 ata_reset_fn_t hardreset
= ops
->hardreset
;
4067 /* ignore built-in hardreset if SCR access is not available */
4068 if (hardreset
== sata_std_hardreset
&& !sata_scr_valid(&ap
->link
))
4071 ata_do_eh(ap
, ops
->prereset
, ops
->softreset
, hardreset
, ops
->postreset
);
4073 EXPORT_SYMBOL_GPL(ata_std_error_handler
);
4077 * ata_eh_handle_port_suspend - perform port suspend operation
4078 * @ap: port to suspend
4083 * Kernel thread context (may sleep).
4085 static void ata_eh_handle_port_suspend(struct ata_port
*ap
)
4087 unsigned long flags
;
4089 struct ata_device
*dev
;
4090 struct ata_link
*link
;
4092 /* are we suspending? */
4093 spin_lock_irqsave(ap
->lock
, flags
);
4094 if (!(ap
->pflags
& ATA_PFLAG_PM_PENDING
) ||
4095 ap
->pm_mesg
.event
& PM_EVENT_RESUME
) {
4096 spin_unlock_irqrestore(ap
->lock
, flags
);
4099 spin_unlock_irqrestore(ap
->lock
, flags
);
4101 WARN_ON(ap
->pflags
& ATA_PFLAG_SUSPENDED
);
4104 * We will reach this point for all of the PM events:
4105 * PM_EVENT_SUSPEND (if runtime pm, PM_EVENT_AUTO will also be set)
4106 * PM_EVENT_FREEZE, and PM_EVENT_HIBERNATE.
4108 * We do not want to perform disk spin down for PM_EVENT_FREEZE.
4109 * (Spin down will be performed by the subsequent PM_EVENT_HIBERNATE.)
4111 if (!(ap
->pm_mesg
.event
& PM_EVENT_FREEZE
)) {
4112 /* Set all devices attached to the port in standby mode */
4113 ata_for_each_link(link
, ap
, HOST_FIRST
) {
4114 ata_for_each_dev(dev
, link
, ENABLED
)
4115 ata_dev_power_set_standby(dev
);
4120 * If we have a ZPODD attached, check its zero
4121 * power ready status before the port is frozen.
4122 * Only needed for runtime suspend.
4124 if (PMSG_IS_AUTO(ap
->pm_mesg
)) {
4125 ata_for_each_dev(dev
, &ap
->link
, ENABLED
) {
4126 if (zpodd_dev_enabled(dev
))
4127 zpodd_on_suspend(dev
);
4132 ata_eh_freeze_port(ap
);
4134 if (ap
->ops
->port_suspend
)
4135 rc
= ap
->ops
->port_suspend(ap
, ap
->pm_mesg
);
4137 ata_acpi_set_state(ap
, ap
->pm_mesg
);
4139 /* update the flags */
4140 spin_lock_irqsave(ap
->lock
, flags
);
4142 ap
->pflags
&= ~ATA_PFLAG_PM_PENDING
;
4144 ap
->pflags
|= ATA_PFLAG_SUSPENDED
;
4145 else if (ata_port_is_frozen(ap
))
4146 ata_port_schedule_eh(ap
);
4148 spin_unlock_irqrestore(ap
->lock
, flags
);
4154 * ata_eh_handle_port_resume - perform port resume operation
4155 * @ap: port to resume
4160 * Kernel thread context (may sleep).
4162 static void ata_eh_handle_port_resume(struct ata_port
*ap
)
4164 struct ata_link
*link
;
4165 struct ata_device
*dev
;
4166 unsigned long flags
;
4168 /* are we resuming? */
4169 spin_lock_irqsave(ap
->lock
, flags
);
4170 if (!(ap
->pflags
& ATA_PFLAG_PM_PENDING
) ||
4171 !(ap
->pm_mesg
.event
& PM_EVENT_RESUME
)) {
4172 spin_unlock_irqrestore(ap
->lock
, flags
);
4175 spin_unlock_irqrestore(ap
->lock
, flags
);
4177 WARN_ON(!(ap
->pflags
& ATA_PFLAG_SUSPENDED
));
4180 * Error timestamps are in jiffies which doesn't run while
4181 * suspended and PHY events during resume isn't too uncommon.
4182 * When the two are combined, it can lead to unnecessary speed
4183 * downs if the machine is suspended and resumed repeatedly.
4184 * Clear error history.
4186 ata_for_each_link(link
, ap
, HOST_FIRST
)
4187 ata_for_each_dev(dev
, link
, ALL
)
4188 ata_ering_clear(&dev
->ering
);
4190 ata_acpi_set_state(ap
, ap
->pm_mesg
);
4192 if (ap
->ops
->port_resume
)
4193 ap
->ops
->port_resume(ap
);
4195 /* tell ACPI that we're resuming */
4196 ata_acpi_on_resume(ap
);
4198 /* update the flags */
4199 spin_lock_irqsave(ap
->lock
, flags
);
4200 ap
->pflags
&= ~(ATA_PFLAG_PM_PENDING
| ATA_PFLAG_SUSPENDED
);
4201 ap
->pflags
|= ATA_PFLAG_RESUMING
;
4202 spin_unlock_irqrestore(ap
->lock
, flags
);
4204 #endif /* CONFIG_PM */