1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Support for SATA devices on Serial Attached SCSI (SAS) controllers
5 * Copyright (C) 2006 IBM Corporation
7 * Written by: Darrick J. Wong <djwong@us.ibm.com>, IBM Corporation
10 #include <linux/scatterlist.h>
11 #include <linux/slab.h>
12 #include <linux/async.h>
13 #include <linux/export.h>
15 #include <scsi/sas_ata.h>
16 #include "sas_internal.h"
17 #include <scsi/scsi_host.h>
18 #include <scsi/scsi_device.h>
19 #include <scsi/scsi_tcq.h>
20 #include <scsi/scsi.h>
21 #include <scsi/scsi_transport.h>
22 #include <scsi/scsi_transport_sas.h>
23 #include "scsi_sas_internal.h"
24 #include "scsi_transport_api.h"
25 #include <scsi/scsi_eh.h>
27 static enum ata_completion_errors
sas_to_ata_err(struct task_status_struct
*ts
)
29 /* Cheesy attempt to translate SAS errors into ATA. Hah! */
32 if (ts
->resp
== SAS_TASK_UNDELIVERED
)
33 return AC_ERR_ATA_BUS
;
35 /* ts->resp == SAS_TASK_COMPLETE */
36 /* task delivered, what happened afterwards? */
38 case SAS_DEV_NO_RESPONSE
:
39 return AC_ERR_TIMEOUT
;
43 return AC_ERR_ATA_BUS
;
44 case SAS_DATA_UNDERRUN
:
46 * Some programs that use the taskfile interface
47 * (smartctl in particular) can cause underrun
48 * problems. Ignore these errors, perhaps at our
52 case SAS_DATA_OVERRUN
:
54 case SAS_DEVICE_UNKNOWN
:
57 pr_warn("%s: Saw error %d. What to do?\n",
60 case SAM_STAT_CHECK_CONDITION
:
61 case SAS_ABORTED_TASK
:
63 case SAS_PROTO_RESPONSE
:
64 /* This means the ending_fis has the error
65 * value; return 0 here to collect it
73 static void sas_ata_task_done(struct sas_task
*task
)
75 struct ata_queued_cmd
*qc
= task
->uldd_task
;
76 struct domain_device
*dev
= task
->dev
;
77 struct task_status_struct
*stat
= &task
->task_status
;
78 struct ata_task_resp
*resp
= (struct ata_task_resp
*)stat
->buf
;
79 struct sas_ha_struct
*sas_ha
= dev
->port
->ha
;
80 enum ata_completion_errors ac
;
82 struct ata_link
*link
;
85 spin_lock_irqsave(&dev
->done_lock
, flags
);
86 if (test_bit(SAS_HA_FROZEN
, &sas_ha
->state
))
88 else if (qc
&& qc
->scsicmd
)
89 ASSIGN_SAS_TASK(qc
->scsicmd
, NULL
);
90 spin_unlock_irqrestore(&dev
->done_lock
, flags
);
92 /* check if libsas-eh got to the task before us */
102 spin_lock_irqsave(ap
->lock
, flags
);
103 /* check if we lost the race with libata/sas_ata_post_internal() */
104 if (unlikely(ata_port_is_frozen(ap
))) {
105 spin_unlock_irqrestore(ap
->lock
, flags
);
107 goto qc_already_gone
;
109 /* if eh is not involved and the port is frozen then the
110 * ata internal abort process has taken responsibility
117 if (stat
->stat
== SAS_PROTO_RESPONSE
||
118 stat
->stat
== SAS_SAM_STAT_GOOD
||
119 (stat
->stat
== SAS_SAM_STAT_CHECK_CONDITION
&&
120 dev
->sata_dev
.class == ATA_DEV_ATAPI
)) {
121 memcpy(dev
->sata_dev
.fis
, resp
->ending_fis
, ATA_RESP_FIS_SIZE
);
123 if (!link
->sactive
) {
124 qc
->err_mask
|= ac_err_mask(dev
->sata_dev
.fis
[2]);
126 link
->eh_info
.err_mask
|= ac_err_mask(dev
->sata_dev
.fis
[2]);
127 if (unlikely(link
->eh_info
.err_mask
))
128 qc
->flags
|= ATA_QCFLAG_EH
;
131 ac
= sas_to_ata_err(stat
);
133 pr_warn("%s: SAS error 0x%x\n", __func__
, stat
->stat
);
134 /* We saw a SAS error. Send a vague error. */
135 if (!link
->sactive
) {
138 link
->eh_info
.err_mask
|= AC_ERR_DEV
;
139 qc
->flags
|= ATA_QCFLAG_EH
;
142 dev
->sata_dev
.fis
[2] = ATA_ERR
| ATA_DRDY
; /* tf status */
143 dev
->sata_dev
.fis
[3] = ATA_ABORTED
; /* tf error */
147 qc
->lldd_task
= NULL
;
149 spin_unlock_irqrestore(ap
->lock
, flags
);
155 static unsigned int sas_ata_qc_issue(struct ata_queued_cmd
*qc
)
156 __must_hold(ap
->lock
)
158 struct sas_task
*task
;
159 struct scatterlist
*sg
;
160 int ret
= AC_ERR_SYSTEM
;
161 unsigned int si
, xfer
= 0;
162 struct ata_port
*ap
= qc
->ap
;
163 struct domain_device
*dev
= ap
->private_data
;
164 struct sas_ha_struct
*sas_ha
= dev
->port
->ha
;
165 struct Scsi_Host
*host
= sas_ha
->shost
;
166 struct sas_internal
*i
= to_sas_internal(host
->transportt
);
168 /* TODO: we should try to remove that unlock */
169 spin_unlock(ap
->lock
);
171 /* If the device fell off, no sense in issuing commands */
172 if (test_bit(SAS_DEV_GONE
, &dev
->state
))
175 task
= sas_alloc_task(GFP_ATOMIC
);
179 task
->task_proto
= SAS_PROTOCOL_STP
;
180 task
->task_done
= sas_ata_task_done
;
182 /* For NCQ commands, zero out the tag libata assigned us */
183 if (ata_is_ncq(qc
->tf
.protocol
))
186 ata_tf_to_fis(&qc
->tf
, qc
->dev
->link
->pmp
, 1, (u8
*)&task
->ata_task
.fis
);
187 task
->uldd_task
= qc
;
188 if (ata_is_atapi(qc
->tf
.protocol
)) {
189 memcpy(task
->ata_task
.atapi_packet
, qc
->cdb
, qc
->dev
->cdb_len
);
190 task
->total_xfer_len
= qc
->nbytes
;
191 task
->num_scatter
= qc
->n_elem
;
192 task
->data_dir
= qc
->dma_dir
;
193 } else if (!ata_is_data(qc
->tf
.protocol
)) {
194 task
->data_dir
= DMA_NONE
;
196 for_each_sg(qc
->sg
, sg
, qc
->n_elem
, si
)
197 xfer
+= sg_dma_len(sg
);
199 task
->total_xfer_len
= xfer
;
200 task
->num_scatter
= si
;
201 task
->data_dir
= qc
->dma_dir
;
203 task
->scatter
= qc
->sg
;
204 qc
->lldd_task
= task
;
206 task
->ata_task
.use_ncq
= ata_is_ncq(qc
->tf
.protocol
);
207 task
->ata_task
.dma_xfer
= ata_is_dma(qc
->tf
.protocol
);
209 if (qc
->flags
& ATA_QCFLAG_RESULT_TF
)
210 task
->ata_task
.return_fis_on_success
= 1;
213 ASSIGN_SAS_TASK(qc
->scsicmd
, task
);
215 ret
= i
->dft
->lldd_execute_task(task
, GFP_ATOMIC
);
217 pr_debug("lldd_execute_task returned: %d\n", ret
);
220 ASSIGN_SAS_TASK(qc
->scsicmd
, NULL
);
222 qc
->lldd_task
= NULL
;
231 static void sas_ata_qc_fill_rtf(struct ata_queued_cmd
*qc
)
233 struct domain_device
*dev
= qc
->ap
->private_data
;
235 ata_tf_from_fis(dev
->sata_dev
.fis
, &qc
->result_tf
);
238 static struct sas_internal
*dev_to_sas_internal(struct domain_device
*dev
)
240 return to_sas_internal(dev
->port
->ha
->shost
->transportt
);
243 static int sas_get_ata_command_set(struct domain_device
*dev
)
245 struct ata_taskfile tf
;
247 if (dev
->dev_type
== SAS_SATA_PENDING
)
248 return ATA_DEV_UNKNOWN
;
250 ata_tf_from_fis(dev
->frame_rcvd
, &tf
);
252 return ata_dev_classify(&tf
);
255 int sas_get_ata_info(struct domain_device
*dev
, struct ex_phy
*phy
)
257 if (phy
->attached_tproto
& SAS_PROTOCOL_STP
)
258 dev
->tproto
= phy
->attached_tproto
;
259 if (phy
->attached_sata_dev
)
260 dev
->tproto
|= SAS_SATA_DEV
;
262 if (phy
->attached_dev_type
== SAS_SATA_PENDING
)
263 dev
->dev_type
= SAS_SATA_PENDING
;
267 dev
->dev_type
= SAS_SATA_DEV
;
268 res
= sas_get_report_phy_sata(dev
->parent
, phy
->phy_id
,
269 &dev
->sata_dev
.rps_resp
);
271 pr_debug("report phy sata to %016llx:%02d returned 0x%x\n",
272 SAS_ADDR(dev
->parent
->sas_addr
),
276 memcpy(dev
->frame_rcvd
, &dev
->sata_dev
.rps_resp
.rps
.fis
,
277 sizeof(struct dev_to_host_fis
));
278 dev
->sata_dev
.class = sas_get_ata_command_set(dev
);
283 static int sas_ata_clear_pending(struct domain_device
*dev
, struct ex_phy
*phy
)
287 /* we weren't pending, so successfully end the reset sequence now */
288 if (dev
->dev_type
!= SAS_SATA_PENDING
)
291 /* hmmm, if this succeeds do we need to repost the domain_device to the
292 * lldd so it can pick up new parameters?
294 res
= sas_get_ata_info(dev
, phy
);
296 return 0; /* retry */
301 int smp_ata_check_ready_type(struct ata_link
*link
)
303 struct domain_device
*dev
= link
->ap
->private_data
;
304 struct sas_phy
*phy
= sas_get_local_phy(dev
);
305 struct domain_device
*ex_dev
= dev
->parent
;
306 enum sas_device_type type
= SAS_PHY_UNUSED
;
307 u8 sas_addr
[SAS_ADDR_SIZE
];
310 res
= sas_get_phy_attached_dev(ex_dev
, phy
->number
, sas_addr
, &type
);
311 sas_put_local_phy(phy
);
316 case SAS_SATA_PENDING
:
324 EXPORT_SYMBOL_GPL(smp_ata_check_ready_type
);
326 static int smp_ata_check_ready(struct ata_link
*link
)
329 struct ata_port
*ap
= link
->ap
;
330 struct domain_device
*dev
= ap
->private_data
;
331 struct domain_device
*ex_dev
= dev
->parent
;
332 struct sas_phy
*phy
= sas_get_local_phy(dev
);
333 struct ex_phy
*ex_phy
= &ex_dev
->ex_dev
.ex_phy
[phy
->number
];
335 res
= sas_ex_phy_discover(ex_dev
, phy
->number
);
336 sas_put_local_phy(phy
);
338 /* break the wait early if the expander is unreachable,
339 * otherwise keep polling
343 if (res
!= SMP_RESP_FUNC_ACC
)
346 switch (ex_phy
->attached_dev_type
) {
347 case SAS_SATA_PENDING
:
350 if (ex_phy
->attached_sata_dev
)
351 return sas_ata_clear_pending(dev
, ex_phy
);
358 static int local_ata_check_ready(struct ata_link
*link
)
360 struct ata_port
*ap
= link
->ap
;
361 struct domain_device
*dev
= ap
->private_data
;
362 struct sas_internal
*i
= dev_to_sas_internal(dev
);
364 if (i
->dft
->lldd_ata_check_ready
)
365 return i
->dft
->lldd_ata_check_ready(dev
);
367 /* lldd's that don't implement 'ready' checking get the
368 * old default behavior of not coordinating reset
369 * recovery with libata
375 static int sas_ata_printk(const char *level
, const struct domain_device
*ddev
,
376 const char *fmt
, ...)
378 struct ata_port
*ap
= ddev
->sata_dev
.ap
;
379 struct device
*dev
= &ddev
->rphy
->dev
;
380 struct va_format vaf
;
389 r
= printk("%s" SAS_FMT
"ata%u: %s: %pV",
390 level
, ap
->print_id
, dev_name(dev
), &vaf
);
397 static int sas_ata_wait_after_reset(struct domain_device
*dev
, unsigned long deadline
)
399 struct sata_device
*sata_dev
= &dev
->sata_dev
;
400 int (*check_ready
)(struct ata_link
*link
);
401 struct ata_port
*ap
= sata_dev
->ap
;
402 struct ata_link
*link
= &ap
->link
;
406 phy
= sas_get_local_phy(dev
);
407 if (scsi_is_sas_phy_local(phy
))
408 check_ready
= local_ata_check_ready
;
410 check_ready
= smp_ata_check_ready
;
411 sas_put_local_phy(phy
);
413 ret
= ata_wait_after_reset(link
, deadline
, check_ready
);
414 if (ret
&& ret
!= -EAGAIN
)
415 sas_ata_printk(KERN_ERR
, dev
, "reset failed (errno=%d)\n", ret
);
420 static int sas_ata_hard_reset(struct ata_link
*link
, unsigned int *class,
421 unsigned long deadline
)
423 struct ata_port
*ap
= link
->ap
;
424 struct domain_device
*dev
= ap
->private_data
;
425 struct sas_internal
*i
= dev_to_sas_internal(dev
);
428 ret
= i
->dft
->lldd_I_T_nexus_reset(dev
);
432 if (ret
!= TMF_RESP_FUNC_COMPLETE
)
433 sas_ata_printk(KERN_DEBUG
, dev
, "Unable to reset ata device?\n");
435 ret
= sas_ata_wait_after_reset(dev
, deadline
);
437 *class = dev
->sata_dev
.class;
439 ap
->cbl
= ATA_CBL_SATA
;
444 * notify the lldd to forget the sas_task for this internal ata command
445 * that bypasses scsi-eh
447 static void sas_ata_internal_abort(struct sas_task
*task
)
449 struct sas_internal
*si
= dev_to_sas_internal(task
->dev
);
453 spin_lock_irqsave(&task
->task_state_lock
, flags
);
454 if (task
->task_state_flags
& SAS_TASK_STATE_ABORTED
||
455 task
->task_state_flags
& SAS_TASK_STATE_DONE
) {
456 spin_unlock_irqrestore(&task
->task_state_lock
, flags
);
457 pr_debug("%s: Task %p already finished.\n", __func__
, task
);
460 task
->task_state_flags
|= SAS_TASK_STATE_ABORTED
;
461 spin_unlock_irqrestore(&task
->task_state_lock
, flags
);
463 res
= si
->dft
->lldd_abort_task(task
);
465 spin_lock_irqsave(&task
->task_state_lock
, flags
);
466 if (task
->task_state_flags
& SAS_TASK_STATE_DONE
||
467 res
== TMF_RESP_FUNC_COMPLETE
) {
468 spin_unlock_irqrestore(&task
->task_state_lock
, flags
);
472 /* XXX we are not prepared to deal with ->lldd_abort_task()
473 * failures. TODO: lldds need to unconditionally forget about
474 * aborted ata tasks, otherwise we (likely) leak the sas task
477 pr_warn("%s: Task %p leaked.\n", __func__
, task
);
479 if (!(task
->task_state_flags
& SAS_TASK_STATE_DONE
))
480 task
->task_state_flags
&= ~SAS_TASK_STATE_ABORTED
;
481 spin_unlock_irqrestore(&task
->task_state_lock
, flags
);
488 static void sas_ata_post_internal(struct ata_queued_cmd
*qc
)
490 if (qc
->flags
& ATA_QCFLAG_EH
)
491 qc
->err_mask
|= AC_ERR_OTHER
;
495 * Find the sas_task and kill it. By this point, libata
496 * has decided to kill the qc and has frozen the port.
497 * In this state sas_ata_task_done() will no longer free
498 * the sas_task, so we need to notify the lldd (via
499 * ->lldd_abort_task) that the task is dead and free it
502 struct sas_task
*task
= qc
->lldd_task
;
504 qc
->lldd_task
= NULL
;
507 task
->uldd_task
= NULL
;
508 sas_ata_internal_abort(task
);
513 static void sas_ata_set_dmamode(struct ata_port
*ap
, struct ata_device
*ata_dev
)
515 struct domain_device
*dev
= ap
->private_data
;
516 struct sas_internal
*i
= dev_to_sas_internal(dev
);
518 if (i
->dft
->lldd_ata_set_dmamode
)
519 i
->dft
->lldd_ata_set_dmamode(dev
);
522 static void sas_ata_sched_eh(struct ata_port
*ap
)
524 struct domain_device
*dev
= ap
->private_data
;
525 struct sas_ha_struct
*ha
= dev
->port
->ha
;
528 spin_lock_irqsave(&ha
->lock
, flags
);
529 if (!test_and_set_bit(SAS_DEV_EH_PENDING
, &dev
->state
))
531 ata_std_sched_eh(ap
);
532 spin_unlock_irqrestore(&ha
->lock
, flags
);
535 void sas_ata_end_eh(struct ata_port
*ap
)
537 struct domain_device
*dev
= ap
->private_data
;
538 struct sas_ha_struct
*ha
= dev
->port
->ha
;
541 spin_lock_irqsave(&ha
->lock
, flags
);
542 if (test_and_clear_bit(SAS_DEV_EH_PENDING
, &dev
->state
))
544 spin_unlock_irqrestore(&ha
->lock
, flags
);
547 static int sas_ata_prereset(struct ata_link
*link
, unsigned long deadline
)
549 struct ata_port
*ap
= link
->ap
;
550 struct domain_device
*dev
= ap
->private_data
;
551 struct sas_phy
*local_phy
= sas_get_local_phy(dev
);
554 if (!local_phy
->enabled
|| test_bit(SAS_DEV_GONE
, &dev
->state
))
556 sas_put_local_phy(local_phy
);
561 static struct ata_port_operations sas_sata_ops
= {
562 .prereset
= sas_ata_prereset
,
563 .hardreset
= sas_ata_hard_reset
,
564 .error_handler
= ata_std_error_handler
,
565 .post_internal_cmd
= sas_ata_post_internal
,
566 .qc_defer
= ata_std_qc_defer
,
567 .qc_issue
= sas_ata_qc_issue
,
568 .qc_fill_rtf
= sas_ata_qc_fill_rtf
,
569 .set_dmamode
= sas_ata_set_dmamode
,
570 .sched_eh
= sas_ata_sched_eh
,
571 .end_eh
= sas_ata_end_eh
,
574 int sas_ata_init(struct domain_device
*found_dev
)
576 struct sas_ha_struct
*ha
= found_dev
->port
->ha
;
577 struct Scsi_Host
*shost
= ha
->shost
;
578 struct ata_host
*ata_host
;
582 ata_host
= kzalloc(sizeof(*ata_host
), GFP_KERNEL
);
584 pr_err("ata host alloc failed.\n");
588 ata_host_init(ata_host
, ha
->dev
, &sas_sata_ops
);
590 ap
= ata_port_alloc(ata_host
);
592 pr_err("ata_port_alloc failed.\n");
598 ap
->pio_mask
= ATA_PIO4
;
599 ap
->mwdma_mask
= ATA_MWDMA2
;
600 ap
->udma_mask
= ATA_UDMA6
;
601 ap
->flags
|= ATA_FLAG_SATA
| ATA_FLAG_PIO_DMA
| ATA_FLAG_NCQ
|
602 ATA_FLAG_SAS_HOST
| ATA_FLAG_FPDMA_AUX
;
603 ap
->ops
= &sas_sata_ops
;
604 ap
->private_data
= found_dev
;
605 ap
->cbl
= ATA_CBL_SATA
;
606 ap
->scsi_host
= shost
;
608 rc
= ata_tport_add(ata_host
->dev
, ap
);
612 found_dev
->sata_dev
.ata_host
= ata_host
;
613 found_dev
->sata_dev
.ap
= ap
;
620 ata_host_put(ata_host
);
624 void sas_ata_task_abort(struct sas_task
*task
)
626 struct ata_queued_cmd
*qc
= task
->uldd_task
;
627 struct completion
*waiting
;
629 /* Bounce SCSI-initiated commands to the SCSI EH */
631 blk_abort_request(scsi_cmd_to_rq(qc
->scsicmd
));
635 /* Internal command, fake a timeout and complete. */
636 qc
->flags
&= ~ATA_QCFLAG_ACTIVE
;
637 qc
->flags
|= ATA_QCFLAG_EH
;
638 qc
->err_mask
|= AC_ERR_TIMEOUT
;
639 waiting
= qc
->private_data
;
643 void sas_probe_sata(struct asd_sas_port
*port
)
645 struct domain_device
*dev
, *n
;
647 mutex_lock(&port
->ha
->disco_mutex
);
648 list_for_each_entry(dev
, &port
->disco_list
, disco_list_node
) {
649 if (!dev_is_sata(dev
))
652 ata_port_probe(dev
->sata_dev
.ap
);
654 mutex_unlock(&port
->ha
->disco_mutex
);
656 list_for_each_entry_safe(dev
, n
, &port
->disco_list
, disco_list_node
) {
657 if (!dev_is_sata(dev
))
660 sas_ata_wait_eh(dev
);
662 /* if libata could not bring the link up, don't surface
665 if (!ata_dev_enabled(sas_to_ata_dev(dev
)))
666 sas_fail_probe(dev
, __func__
, -ENODEV
);
671 int sas_ata_add_dev(struct domain_device
*parent
, struct ex_phy
*phy
,
672 struct domain_device
*child
, int phy_id
)
674 struct sas_rphy
*rphy
;
677 if (child
->linkrate
> parent
->min_linkrate
) {
678 struct sas_phy
*cphy
= child
->phy
;
679 enum sas_linkrate min_prate
= cphy
->minimum_linkrate
,
680 parent_min_lrate
= parent
->min_linkrate
,
681 min_linkrate
= (min_prate
> parent_min_lrate
) ?
682 parent_min_lrate
: 0;
683 struct sas_phy_linkrates rates
= {
684 .maximum_linkrate
= parent
->min_linkrate
,
685 .minimum_linkrate
= min_linkrate
,
688 pr_notice("ex %016llx phy%02d SATA device linkrate > min pathway connection rate, attempting to lower device linkrate\n",
689 SAS_ADDR(child
->sas_addr
), phy_id
);
690 ret
= sas_smp_phy_control(parent
, phy_id
,
691 PHY_FUNC_LINK_RESET
, &rates
);
693 pr_err("ex %016llx phy%02d SATA device could not set linkrate (%d)\n",
694 SAS_ADDR(child
->sas_addr
), phy_id
, ret
);
697 pr_notice("ex %016llx phy%02d SATA device set linkrate successfully\n",
698 SAS_ADDR(child
->sas_addr
), phy_id
);
699 child
->linkrate
= child
->min_linkrate
;
701 ret
= sas_get_ata_info(child
, phy
);
706 ret
= sas_ata_init(child
);
710 rphy
= sas_end_device_alloc(phy
->port
);
714 rphy
->identify
.phy_identifier
= phy_id
;
716 get_device(&rphy
->dev
);
718 list_add_tail(&child
->disco_list_node
, &parent
->port
->disco_list
);
720 ret
= sas_discover_sata(child
);
722 pr_notice("sas_discover_sata() for device %16llx at %016llx:%02d returned 0x%x\n",
723 SAS_ADDR(child
->sas_addr
),
724 SAS_ADDR(parent
->sas_addr
), phy_id
, ret
);
725 sas_rphy_free(child
->rphy
);
726 list_del(&child
->disco_list_node
);
733 static void sas_ata_flush_pm_eh(struct asd_sas_port
*port
, const char *func
)
735 struct domain_device
*dev
, *n
;
737 list_for_each_entry_safe(dev
, n
, &port
->dev_list
, dev_list_node
) {
738 if (!dev_is_sata(dev
))
741 sas_ata_wait_eh(dev
);
743 /* if libata failed to power manage the device, tear it down */
744 if (ata_dev_disabled(sas_to_ata_dev(dev
)))
745 sas_fail_probe(dev
, func
, -ENODEV
);
749 void sas_suspend_sata(struct asd_sas_port
*port
)
751 struct domain_device
*dev
;
753 mutex_lock(&port
->ha
->disco_mutex
);
754 list_for_each_entry(dev
, &port
->dev_list
, dev_list_node
) {
755 struct sata_device
*sata
;
757 if (!dev_is_sata(dev
))
760 sata
= &dev
->sata_dev
;
761 if (sata
->ap
->pm_mesg
.event
== PM_EVENT_SUSPEND
)
764 ata_sas_port_suspend(sata
->ap
);
766 mutex_unlock(&port
->ha
->disco_mutex
);
768 sas_ata_flush_pm_eh(port
, __func__
);
771 void sas_resume_sata(struct asd_sas_port
*port
)
773 struct domain_device
*dev
;
775 mutex_lock(&port
->ha
->disco_mutex
);
776 list_for_each_entry(dev
, &port
->dev_list
, dev_list_node
) {
777 struct sata_device
*sata
;
779 if (!dev_is_sata(dev
))
782 sata
= &dev
->sata_dev
;
783 if (sata
->ap
->pm_mesg
.event
== PM_EVENT_ON
)
786 ata_sas_port_resume(sata
->ap
);
788 mutex_unlock(&port
->ha
->disco_mutex
);
790 sas_ata_flush_pm_eh(port
, __func__
);
794 * sas_discover_sata - discover an STP/SATA domain device
795 * @dev: pointer to struct domain_device of interest
797 * Devices directly attached to a HA port, have no parents. All other
798 * devices do, and should have their "parent" pointer set appropriately
799 * before calling this function.
801 int sas_discover_sata(struct domain_device
*dev
)
803 if (dev
->dev_type
== SAS_SATA_PM
)
806 dev
->sata_dev
.class = sas_get_ata_command_set(dev
);
807 sas_fill_in_rphy(dev
, dev
->rphy
);
809 return sas_notify_lldd_dev_found(dev
);
812 static void async_sas_ata_eh(void *data
, async_cookie_t cookie
)
814 struct domain_device
*dev
= data
;
815 struct ata_port
*ap
= dev
->sata_dev
.ap
;
816 struct sas_ha_struct
*ha
= dev
->port
->ha
;
818 sas_ata_printk(KERN_DEBUG
, dev
, "dev error handler\n");
819 ata_scsi_port_error_handler(ha
->shost
, ap
);
823 void sas_ata_strategy_handler(struct Scsi_Host
*shost
)
825 struct sas_ha_struct
*sas_ha
= SHOST_TO_SAS_HA(shost
);
826 ASYNC_DOMAIN_EXCLUSIVE(async
);
829 /* it's ok to defer revalidation events during ata eh, these
830 * disks are in one of three states:
831 * 1/ present for initial domain discovery, and these
832 * resets will cause bcn flutters
833 * 2/ hot removed, we'll discover that after eh fails
834 * 3/ hot added after initial discovery, lost the race, and need
835 * to catch the next train.
837 sas_disable_revalidation(sas_ha
);
839 spin_lock_irq(&sas_ha
->phy_port_lock
);
840 for (i
= 0; i
< sas_ha
->num_phys
; i
++) {
841 struct asd_sas_port
*port
= sas_ha
->sas_port
[i
];
842 struct domain_device
*dev
;
844 spin_lock(&port
->dev_list_lock
);
845 list_for_each_entry(dev
, &port
->dev_list
, dev_list_node
) {
846 if (!dev_is_sata(dev
))
849 /* hold a reference over eh since we may be
850 * racing with final remove once all commands
853 kref_get(&dev
->kref
);
855 async_schedule_domain(async_sas_ata_eh
, dev
, &async
);
857 spin_unlock(&port
->dev_list_lock
);
859 spin_unlock_irq(&sas_ha
->phy_port_lock
);
861 async_synchronize_full_domain(&async
);
863 sas_enable_revalidation(sas_ha
);
866 void sas_ata_eh(struct Scsi_Host
*shost
, struct list_head
*work_q
)
868 struct scsi_cmnd
*cmd
, *n
;
869 struct domain_device
*eh_dev
;
875 list_for_each_entry_safe(cmd
, n
, work_q
, eh_entry
) {
876 struct domain_device
*ddev
= cmd_to_domain_dev(cmd
);
878 if (!dev_is_sata(ddev
) || TO_SAS_TASK(cmd
))
880 if (eh_dev
&& eh_dev
!= ddev
)
883 list_move(&cmd
->eh_entry
, &sata_q
);
886 if (!list_empty(&sata_q
)) {
887 struct ata_port
*ap
= eh_dev
->sata_dev
.ap
;
889 sas_ata_printk(KERN_DEBUG
, eh_dev
, "cmd error handler\n");
890 ata_scsi_cmd_error_handler(shost
, ap
, &sata_q
);
892 * ata's error handler may leave the cmd on the list
893 * so make sure they don't remain on a stack list
894 * about to go out of scope.
896 * This looks strange, since the commands are
897 * now part of no list, but the next error
898 * action will be ata_port_error_handler()
899 * which takes no list and sweeps them up
900 * anyway from the ata tag array.
902 while (!list_empty(&sata_q
))
903 list_del_init(sata_q
.next
);
908 void sas_ata_schedule_reset(struct domain_device
*dev
)
910 struct ata_eh_info
*ehi
;
914 if (!dev_is_sata(dev
))
917 ap
= dev
->sata_dev
.ap
;
918 ehi
= &ap
->link
.eh_info
;
920 spin_lock_irqsave(ap
->lock
, flags
);
921 ehi
->err_mask
|= AC_ERR_TIMEOUT
;
922 ehi
->action
|= ATA_EH_RESET
;
923 ata_port_schedule_eh(ap
);
924 spin_unlock_irqrestore(ap
->lock
, flags
);
926 EXPORT_SYMBOL_GPL(sas_ata_schedule_reset
);
928 void sas_ata_wait_eh(struct domain_device
*dev
)
932 if (!dev_is_sata(dev
))
935 ap
= dev
->sata_dev
.ap
;
936 ata_port_wait_eh(ap
);
939 void sas_ata_device_link_abort(struct domain_device
*device
, bool force_reset
)
941 struct ata_port
*ap
= device
->sata_dev
.ap
;
942 struct ata_link
*link
= &ap
->link
;
945 spin_lock_irqsave(ap
->lock
, flags
);
946 device
->sata_dev
.fis
[2] = ATA_ERR
| ATA_DRDY
; /* tf status */
947 device
->sata_dev
.fis
[3] = ATA_ABORTED
; /* tf error */
949 link
->eh_info
.err_mask
|= AC_ERR_DEV
;
951 link
->eh_info
.action
|= ATA_EH_RESET
;
952 ata_link_abort(link
);
953 spin_unlock_irqrestore(ap
->lock
, flags
);
955 EXPORT_SYMBOL_GPL(sas_ata_device_link_abort
);
957 int sas_execute_ata_cmd(struct domain_device
*device
, u8
*fis
, int force_phy_id
)
959 struct sas_tmf_task tmf_task
= {};
960 return sas_execute_tmf(device
, fis
, sizeof(struct host_to_dev_fis
),
961 force_phy_id
, &tmf_task
);
963 EXPORT_SYMBOL_GPL(sas_execute_ata_cmd
);
965 static ssize_t
sas_ncq_prio_supported_show(struct device
*device
,
966 struct device_attribute
*attr
,
969 struct scsi_device
*sdev
= to_scsi_device(device
);
970 struct domain_device
*ddev
= sdev_to_domain_dev(sdev
);
974 rc
= ata_ncq_prio_supported(ddev
->sata_dev
.ap
, sdev
, &supported
);
978 return sysfs_emit(buf
, "%d\n", supported
);
981 static struct device_attribute dev_attr_sas_ncq_prio_supported
=
982 __ATTR(ncq_prio_supported
, S_IRUGO
, sas_ncq_prio_supported_show
, NULL
);
984 static ssize_t
sas_ncq_prio_enable_show(struct device
*device
,
985 struct device_attribute
*attr
,
988 struct scsi_device
*sdev
= to_scsi_device(device
);
989 struct domain_device
*ddev
= sdev_to_domain_dev(sdev
);
993 rc
= ata_ncq_prio_enabled(ddev
->sata_dev
.ap
, sdev
, &enabled
);
997 return sysfs_emit(buf
, "%d\n", enabled
);
1000 static ssize_t
sas_ncq_prio_enable_store(struct device
*device
,
1001 struct device_attribute
*attr
,
1002 const char *buf
, size_t len
)
1004 struct scsi_device
*sdev
= to_scsi_device(device
);
1005 struct domain_device
*ddev
= sdev_to_domain_dev(sdev
);
1009 rc
= kstrtobool(buf
, &enable
);
1013 rc
= ata_ncq_prio_enable(ddev
->sata_dev
.ap
, sdev
, enable
);
1020 static struct device_attribute dev_attr_sas_ncq_prio_enable
=
1021 __ATTR(ncq_prio_enable
, S_IRUGO
| S_IWUSR
,
1022 sas_ncq_prio_enable_show
, sas_ncq_prio_enable_store
);
1024 static struct attribute
*sas_ata_sdev_attrs
[] = {
1025 &dev_attr_sas_ncq_prio_supported
.attr
,
1026 &dev_attr_sas_ncq_prio_enable
.attr
,
1030 static umode_t
sas_ata_attr_is_visible(struct kobject
*kobj
,
1031 struct attribute
*attr
, int i
)
1033 struct device
*dev
= kobj_to_dev(kobj
);
1034 struct scsi_device
*sdev
= to_scsi_device(dev
);
1035 struct domain_device
*ddev
= sdev_to_domain_dev(sdev
);
1037 if (!dev_is_sata(ddev
))
1043 const struct attribute_group sas_ata_sdev_attr_group
= {
1044 .attrs
= sas_ata_sdev_attrs
,
1045 .is_visible
= sas_ata_attr_is_visible
,
1047 EXPORT_SYMBOL_GPL(sas_ata_sdev_attr_group
);