1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (c) 2015 Linaro Ltd.
4 * Copyright (c) 2015 Hisilicon Limited.
8 #define DRV_NAME "hisi_sas"
10 #define DEV_IS_GONE(dev) \
11 ((!dev) || (dev->dev_type == SAS_PHY_UNUSED))
13 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device
*device
,
14 u8
*lun
, struct hisi_sas_tmf_task
*tmf
);
16 hisi_sas_internal_task_abort(struct hisi_hba
*hisi_hba
,
17 struct domain_device
*device
,
18 int abort_flag
, int tag
);
19 static int hisi_sas_softreset_ata_disk(struct domain_device
*device
);
20 static int hisi_sas_control_phy(struct asd_sas_phy
*sas_phy
, enum phy_func func
,
22 static void hisi_sas_release_task(struct hisi_hba
*hisi_hba
,
23 struct domain_device
*device
);
24 static void hisi_sas_dev_gone(struct domain_device
*device
);
26 u8
hisi_sas_get_ata_protocol(struct host_to_dev_fis
*fis
, int direction
)
28 switch (fis
->command
) {
29 case ATA_CMD_FPDMA_WRITE
:
30 case ATA_CMD_FPDMA_READ
:
31 case ATA_CMD_FPDMA_RECV
:
32 case ATA_CMD_FPDMA_SEND
:
33 case ATA_CMD_NCQ_NON_DATA
:
34 return HISI_SAS_SATA_PROTOCOL_FPDMA
;
36 case ATA_CMD_DOWNLOAD_MICRO
:
38 case ATA_CMD_PMP_READ
:
39 case ATA_CMD_READ_LOG_EXT
:
40 case ATA_CMD_PIO_READ
:
41 case ATA_CMD_PIO_READ_EXT
:
42 case ATA_CMD_PMP_WRITE
:
43 case ATA_CMD_WRITE_LOG_EXT
:
44 case ATA_CMD_PIO_WRITE
:
45 case ATA_CMD_PIO_WRITE_EXT
:
46 return HISI_SAS_SATA_PROTOCOL_PIO
;
49 case ATA_CMD_DOWNLOAD_MICRO_DMA
:
50 case ATA_CMD_PMP_READ_DMA
:
51 case ATA_CMD_PMP_WRITE_DMA
:
53 case ATA_CMD_READ_EXT
:
54 case ATA_CMD_READ_LOG_DMA_EXT
:
55 case ATA_CMD_READ_STREAM_DMA_EXT
:
56 case ATA_CMD_TRUSTED_RCV_DMA
:
57 case ATA_CMD_TRUSTED_SND_DMA
:
59 case ATA_CMD_WRITE_EXT
:
60 case ATA_CMD_WRITE_FUA_EXT
:
61 case ATA_CMD_WRITE_QUEUED
:
62 case ATA_CMD_WRITE_LOG_DMA_EXT
:
63 case ATA_CMD_WRITE_STREAM_DMA_EXT
:
64 case ATA_CMD_ZAC_MGMT_IN
:
65 return HISI_SAS_SATA_PROTOCOL_DMA
;
67 case ATA_CMD_CHK_POWER
:
68 case ATA_CMD_DEV_RESET
:
71 case ATA_CMD_FLUSH_EXT
:
73 case ATA_CMD_VERIFY_EXT
:
74 case ATA_CMD_SET_FEATURES
:
76 case ATA_CMD_STANDBYNOW1
:
77 case ATA_CMD_ZAC_MGMT_OUT
:
78 return HISI_SAS_SATA_PROTOCOL_NONDATA
;
81 switch (fis
->features
) {
82 case ATA_SET_MAX_PASSWD
:
83 case ATA_SET_MAX_LOCK
:
84 return HISI_SAS_SATA_PROTOCOL_PIO
;
86 case ATA_SET_MAX_PASSWD_DMA
:
87 case ATA_SET_MAX_UNLOCK_DMA
:
88 return HISI_SAS_SATA_PROTOCOL_DMA
;
91 return HISI_SAS_SATA_PROTOCOL_NONDATA
;
96 if (direction
== DMA_NONE
)
97 return HISI_SAS_SATA_PROTOCOL_NONDATA
;
98 return HISI_SAS_SATA_PROTOCOL_PIO
;
102 EXPORT_SYMBOL_GPL(hisi_sas_get_ata_protocol
);
104 void hisi_sas_sata_done(struct sas_task
*task
,
105 struct hisi_sas_slot
*slot
)
107 struct task_status_struct
*ts
= &task
->task_status
;
108 struct ata_task_resp
*resp
= (struct ata_task_resp
*)ts
->buf
;
109 struct hisi_sas_status_buffer
*status_buf
=
110 hisi_sas_status_buf_addr_mem(slot
);
111 u8
*iu
= &status_buf
->iu
[0];
112 struct dev_to_host_fis
*d2h
= (struct dev_to_host_fis
*)iu
;
114 resp
->frame_len
= sizeof(struct dev_to_host_fis
);
115 memcpy(&resp
->ending_fis
[0], d2h
, sizeof(struct dev_to_host_fis
));
117 ts
->buf_valid_size
= sizeof(*resp
);
119 EXPORT_SYMBOL_GPL(hisi_sas_sata_done
);
122 * This function assumes linkrate mask fits in 8 bits, which it
123 * does for all HW versions supported.
125 u8
hisi_sas_get_prog_phy_linkrate_mask(enum sas_linkrate max
)
130 max
-= SAS_LINK_RATE_1_5_GBPS
;
131 for (i
= 0; i
<= max
; i
++)
132 rate
|= 1 << (i
* 2);
135 EXPORT_SYMBOL_GPL(hisi_sas_get_prog_phy_linkrate_mask
);
137 static struct hisi_hba
*dev_to_hisi_hba(struct domain_device
*device
)
139 return device
->port
->ha
->lldd_ha
;
142 struct hisi_sas_port
*to_hisi_sas_port(struct asd_sas_port
*sas_port
)
144 return container_of(sas_port
, struct hisi_sas_port
, sas_port
);
146 EXPORT_SYMBOL_GPL(to_hisi_sas_port
);
148 void hisi_sas_stop_phys(struct hisi_hba
*hisi_hba
)
152 for (phy_no
= 0; phy_no
< hisi_hba
->n_phy
; phy_no
++)
153 hisi_sas_phy_enable(hisi_hba
, phy_no
, 0);
155 EXPORT_SYMBOL_GPL(hisi_sas_stop_phys
);
157 static void hisi_sas_slot_index_clear(struct hisi_hba
*hisi_hba
, int slot_idx
)
159 void *bitmap
= hisi_hba
->slot_index_tags
;
161 clear_bit(slot_idx
, bitmap
);
164 static void hisi_sas_slot_index_free(struct hisi_hba
*hisi_hba
, int slot_idx
)
166 if (hisi_hba
->hw
->slot_index_alloc
||
167 slot_idx
>= HISI_SAS_UNRESERVED_IPTT
) {
168 spin_lock(&hisi_hba
->lock
);
169 hisi_sas_slot_index_clear(hisi_hba
, slot_idx
);
170 spin_unlock(&hisi_hba
->lock
);
174 static void hisi_sas_slot_index_set(struct hisi_hba
*hisi_hba
, int slot_idx
)
176 void *bitmap
= hisi_hba
->slot_index_tags
;
178 set_bit(slot_idx
, bitmap
);
181 static int hisi_sas_slot_index_alloc(struct hisi_hba
*hisi_hba
,
182 struct scsi_cmnd
*scsi_cmnd
)
185 void *bitmap
= hisi_hba
->slot_index_tags
;
188 return scsi_cmnd
->request
->tag
;
190 spin_lock(&hisi_hba
->lock
);
191 index
= find_next_zero_bit(bitmap
, hisi_hba
->slot_index_count
,
192 hisi_hba
->last_slot_index
+ 1);
193 if (index
>= hisi_hba
->slot_index_count
) {
194 index
= find_next_zero_bit(bitmap
,
195 hisi_hba
->slot_index_count
,
196 HISI_SAS_UNRESERVED_IPTT
);
197 if (index
>= hisi_hba
->slot_index_count
) {
198 spin_unlock(&hisi_hba
->lock
);
199 return -SAS_QUEUE_FULL
;
202 hisi_sas_slot_index_set(hisi_hba
, index
);
203 hisi_hba
->last_slot_index
= index
;
204 spin_unlock(&hisi_hba
->lock
);
209 static void hisi_sas_slot_index_init(struct hisi_hba
*hisi_hba
)
213 for (i
= 0; i
< hisi_hba
->slot_index_count
; ++i
)
214 hisi_sas_slot_index_clear(hisi_hba
, i
);
217 void hisi_sas_slot_task_free(struct hisi_hba
*hisi_hba
, struct sas_task
*task
,
218 struct hisi_sas_slot
*slot
)
220 int device_id
= slot
->device_id
;
221 struct hisi_sas_device
*sas_dev
= &hisi_hba
->devices
[device_id
];
224 struct device
*dev
= hisi_hba
->dev
;
226 if (!task
->lldd_task
)
229 task
->lldd_task
= NULL
;
231 if (!sas_protocol_ata(task
->task_proto
)) {
233 dma_unmap_sg(dev
, task
->scatter
,
236 if (slot
->n_elem_dif
) {
237 struct sas_ssp_task
*ssp_task
= &task
->ssp_task
;
238 struct scsi_cmnd
*scsi_cmnd
= ssp_task
->cmd
;
240 dma_unmap_sg(dev
, scsi_prot_sglist(scsi_cmnd
),
241 scsi_prot_sg_count(scsi_cmnd
),
247 spin_lock(&sas_dev
->lock
);
248 list_del_init(&slot
->entry
);
249 spin_unlock(&sas_dev
->lock
);
251 memset(slot
, 0, offsetof(struct hisi_sas_slot
, buf
));
253 hisi_sas_slot_index_free(hisi_hba
, slot
->idx
);
255 EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free
);
257 static void hisi_sas_task_prep_smp(struct hisi_hba
*hisi_hba
,
258 struct hisi_sas_slot
*slot
)
260 hisi_hba
->hw
->prep_smp(hisi_hba
, slot
);
263 static void hisi_sas_task_prep_ssp(struct hisi_hba
*hisi_hba
,
264 struct hisi_sas_slot
*slot
)
266 hisi_hba
->hw
->prep_ssp(hisi_hba
, slot
);
269 static void hisi_sas_task_prep_ata(struct hisi_hba
*hisi_hba
,
270 struct hisi_sas_slot
*slot
)
272 hisi_hba
->hw
->prep_stp(hisi_hba
, slot
);
275 static void hisi_sas_task_prep_abort(struct hisi_hba
*hisi_hba
,
276 struct hisi_sas_slot
*slot
,
277 int device_id
, int abort_flag
, int tag_to_abort
)
279 hisi_hba
->hw
->prep_abort(hisi_hba
, slot
,
280 device_id
, abort_flag
, tag_to_abort
);
283 static void hisi_sas_dma_unmap(struct hisi_hba
*hisi_hba
,
284 struct sas_task
*task
, int n_elem
,
287 struct device
*dev
= hisi_hba
->dev
;
289 if (!sas_protocol_ata(task
->task_proto
)) {
290 if (task
->num_scatter
) {
292 dma_unmap_sg(dev
, task
->scatter
,
295 } else if (task
->task_proto
& SAS_PROTOCOL_SMP
) {
297 dma_unmap_sg(dev
, &task
->smp_task
.smp_req
,
303 static int hisi_sas_dma_map(struct hisi_hba
*hisi_hba
,
304 struct sas_task
*task
, int *n_elem
,
307 struct device
*dev
= hisi_hba
->dev
;
310 if (sas_protocol_ata(task
->task_proto
)) {
311 *n_elem
= task
->num_scatter
;
313 unsigned int req_len
;
315 if (task
->num_scatter
) {
316 *n_elem
= dma_map_sg(dev
, task
->scatter
,
317 task
->num_scatter
, task
->data_dir
);
322 } else if (task
->task_proto
& SAS_PROTOCOL_SMP
) {
323 *n_elem_req
= dma_map_sg(dev
, &task
->smp_task
.smp_req
,
329 req_len
= sg_dma_len(&task
->smp_task
.smp_req
);
332 goto err_out_dma_unmap
;
337 if (*n_elem
> HISI_SAS_SGE_PAGE_CNT
) {
338 dev_err(dev
, "task prep: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT\n",
341 goto err_out_dma_unmap
;
346 /* It would be better to call dma_unmap_sg() here, but it's messy */
347 hisi_sas_dma_unmap(hisi_hba
, task
, *n_elem
,
353 static void hisi_sas_dif_dma_unmap(struct hisi_hba
*hisi_hba
,
354 struct sas_task
*task
, int n_elem_dif
)
356 struct device
*dev
= hisi_hba
->dev
;
359 struct sas_ssp_task
*ssp_task
= &task
->ssp_task
;
360 struct scsi_cmnd
*scsi_cmnd
= ssp_task
->cmd
;
362 dma_unmap_sg(dev
, scsi_prot_sglist(scsi_cmnd
),
363 scsi_prot_sg_count(scsi_cmnd
),
368 static int hisi_sas_dif_dma_map(struct hisi_hba
*hisi_hba
,
369 int *n_elem_dif
, struct sas_task
*task
)
371 struct device
*dev
= hisi_hba
->dev
;
372 struct sas_ssp_task
*ssp_task
;
373 struct scsi_cmnd
*scsi_cmnd
;
376 if (task
->num_scatter
) {
377 ssp_task
= &task
->ssp_task
;
378 scsi_cmnd
= ssp_task
->cmd
;
380 if (scsi_prot_sg_count(scsi_cmnd
)) {
381 *n_elem_dif
= dma_map_sg(dev
,
382 scsi_prot_sglist(scsi_cmnd
),
383 scsi_prot_sg_count(scsi_cmnd
),
389 if (*n_elem_dif
> HISI_SAS_SGE_DIF_PAGE_CNT
) {
390 dev_err(dev
, "task prep: n_elem_dif(%d) too large\n",
393 goto err_out_dif_dma_unmap
;
400 err_out_dif_dma_unmap
:
401 dma_unmap_sg(dev
, scsi_prot_sglist(scsi_cmnd
),
402 scsi_prot_sg_count(scsi_cmnd
), task
->data_dir
);
406 static int hisi_sas_task_prep(struct sas_task
*task
,
407 struct hisi_sas_dq
**dq_pointer
,
408 bool is_tmf
, struct hisi_sas_tmf_task
*tmf
,
411 struct domain_device
*device
= task
->dev
;
412 struct hisi_hba
*hisi_hba
= dev_to_hisi_hba(device
);
413 struct hisi_sas_device
*sas_dev
= device
->lldd_dev
;
414 struct hisi_sas_port
*port
;
415 struct hisi_sas_slot
*slot
;
416 struct hisi_sas_cmd_hdr
*cmd_hdr_base
;
417 struct asd_sas_port
*sas_port
= device
->port
;
418 struct device
*dev
= hisi_hba
->dev
;
419 int dlvry_queue_slot
, dlvry_queue
, rc
, slot_idx
;
420 int n_elem
= 0, n_elem_dif
= 0, n_elem_req
= 0;
421 struct scsi_cmnd
*scmd
= NULL
;
422 struct hisi_sas_dq
*dq
;
426 if (DEV_IS_GONE(sas_dev
)) {
428 dev_info(dev
, "task prep: device %d not ready\n",
431 dev_info(dev
, "task prep: device %016llx not ready\n",
432 SAS_ADDR(device
->sas_addr
));
437 if (task
->uldd_task
) {
438 struct ata_queued_cmd
*qc
;
440 if (dev_is_sata(device
)) {
441 qc
= task
->uldd_task
;
444 scmd
= task
->uldd_task
;
448 if (scmd
&& hisi_hba
->shost
->nr_hw_queues
) {
449 unsigned int dq_index
;
452 blk_tag
= blk_mq_unique_tag(scmd
->request
);
453 dq_index
= blk_mq_unique_tag_to_hwq(blk_tag
);
454 *dq_pointer
= dq
= &hisi_hba
->dq
[dq_index
];
455 } else if (hisi_hba
->shost
->nr_hw_queues
) {
456 struct Scsi_Host
*shost
= hisi_hba
->shost
;
457 struct blk_mq_queue_map
*qmap
= &shost
->tag_set
.map
[HCTX_TYPE_DEFAULT
];
458 int queue
= qmap
->mq_map
[raw_smp_processor_id()];
460 *dq_pointer
= dq
= &hisi_hba
->dq
[queue
];
462 *dq_pointer
= dq
= sas_dev
->dq
;
465 port
= to_hisi_sas_port(sas_port
);
466 if (port
&& !port
->port_attached
) {
467 dev_info(dev
, "task prep: %s port%d not attach device\n",
468 (dev_is_sata(device
)) ?
475 rc
= hisi_sas_dma_map(hisi_hba
, task
, &n_elem
,
480 if (!sas_protocol_ata(task
->task_proto
)) {
481 rc
= hisi_sas_dif_dma_map(hisi_hba
, &n_elem_dif
, task
);
483 goto err_out_dma_unmap
;
486 if (hisi_hba
->hw
->slot_index_alloc
)
487 rc
= hisi_hba
->hw
->slot_index_alloc(hisi_hba
, device
);
489 rc
= hisi_sas_slot_index_alloc(hisi_hba
, scmd
);
492 goto err_out_dif_dma_unmap
;
495 slot
= &hisi_hba
->slot_info
[slot_idx
];
497 spin_lock(&dq
->lock
);
498 wr_q_index
= dq
->wr_point
;
499 dq
->wr_point
= (dq
->wr_point
+ 1) % HISI_SAS_QUEUE_SLOTS
;
500 list_add_tail(&slot
->delivery
, &dq
->list
);
501 spin_unlock(&dq
->lock
);
502 spin_lock(&sas_dev
->lock
);
503 list_add_tail(&slot
->entry
, &sas_dev
->list
);
504 spin_unlock(&sas_dev
->lock
);
506 dlvry_queue
= dq
->id
;
507 dlvry_queue_slot
= wr_q_index
;
509 slot
->device_id
= sas_dev
->device_id
;
510 slot
->n_elem
= n_elem
;
511 slot
->n_elem_dif
= n_elem_dif
;
512 slot
->dlvry_queue
= dlvry_queue
;
513 slot
->dlvry_queue_slot
= dlvry_queue_slot
;
514 cmd_hdr_base
= hisi_hba
->cmd_hdr
[dlvry_queue
];
515 slot
->cmd_hdr
= &cmd_hdr_base
[dlvry_queue_slot
];
519 slot
->is_internal
= is_tmf
;
520 task
->lldd_task
= slot
;
522 memset(slot
->cmd_hdr
, 0, sizeof(struct hisi_sas_cmd_hdr
));
523 memset(hisi_sas_cmd_hdr_addr_mem(slot
), 0, HISI_SAS_COMMAND_TABLE_SZ
);
524 memset(hisi_sas_status_buf_addr_mem(slot
), 0,
525 sizeof(struct hisi_sas_err_record
));
527 switch (task
->task_proto
) {
528 case SAS_PROTOCOL_SMP
:
529 hisi_sas_task_prep_smp(hisi_hba
, slot
);
531 case SAS_PROTOCOL_SSP
:
532 hisi_sas_task_prep_ssp(hisi_hba
, slot
);
534 case SAS_PROTOCOL_SATA
:
535 case SAS_PROTOCOL_STP
:
536 case SAS_PROTOCOL_SATA
| SAS_PROTOCOL_STP
:
537 hisi_sas_task_prep_ata(hisi_hba
, slot
);
540 dev_err(dev
, "task prep: unknown/unsupported proto (0x%x)\n",
545 spin_lock_irqsave(&task
->task_state_lock
, flags
);
546 task
->task_state_flags
|= SAS_TASK_AT_INITIATOR
;
547 spin_unlock_irqrestore(&task
->task_state_lock
, flags
);
550 WRITE_ONCE(slot
->ready
, 1);
554 err_out_dif_dma_unmap
:
555 if (!sas_protocol_ata(task
->task_proto
))
556 hisi_sas_dif_dma_unmap(hisi_hba
, task
, n_elem_dif
);
558 hisi_sas_dma_unmap(hisi_hba
, task
, n_elem
,
561 dev_err(dev
, "task prep: failed[%d]!\n", rc
);
565 static int hisi_sas_task_exec(struct sas_task
*task
, gfp_t gfp_flags
,
566 bool is_tmf
, struct hisi_sas_tmf_task
*tmf
)
570 struct hisi_hba
*hisi_hba
;
572 struct domain_device
*device
= task
->dev
;
573 struct asd_sas_port
*sas_port
= device
->port
;
574 struct hisi_sas_dq
*dq
= NULL
;
577 struct task_status_struct
*ts
= &task
->task_status
;
579 ts
->resp
= SAS_TASK_UNDELIVERED
;
580 ts
->stat
= SAS_PHY_DOWN
;
582 * libsas will use dev->port, should
583 * not call task_done for sata
585 if (device
->dev_type
!= SAS_SATA_DEV
)
586 task
->task_done(task
);
590 hisi_hba
= dev_to_hisi_hba(device
);
593 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT
, &hisi_hba
->flags
))) {
594 if (!gfpflags_allow_blocking(gfp_flags
))
597 down(&hisi_hba
->sem
);
601 /* protect task_prep and start_delivery sequence */
602 rc
= hisi_sas_task_prep(task
, &dq
, is_tmf
, tmf
, &pass
);
604 dev_err(dev
, "task exec: failed[%d]!\n", rc
);
607 spin_lock(&dq
->lock
);
608 hisi_hba
->hw
->start_delivery(dq
);
609 spin_unlock(&dq
->lock
);
615 static void hisi_sas_bytes_dmaed(struct hisi_hba
*hisi_hba
, int phy_no
)
617 struct hisi_sas_phy
*phy
= &hisi_hba
->phy
[phy_no
];
618 struct asd_sas_phy
*sas_phy
= &phy
->sas_phy
;
619 struct sas_ha_struct
*sas_ha
;
621 if (!phy
->phy_attached
)
624 if (test_bit(HISI_SAS_PM_BIT
, &hisi_hba
->flags
) &&
625 !sas_phy
->suspended
) {
626 dev_warn(hisi_hba
->dev
, "phy%d during suspend filtered out\n", phy_no
);
630 sas_ha
= &hisi_hba
->sha
;
631 sas_ha
->notify_phy_event(sas_phy
, PHYE_OOB_DONE
);
634 struct sas_phy
*sphy
= sas_phy
->phy
;
636 sphy
->negotiated_linkrate
= sas_phy
->linkrate
;
637 sphy
->minimum_linkrate_hw
= SAS_LINK_RATE_1_5_GBPS
;
638 sphy
->maximum_linkrate_hw
=
639 hisi_hba
->hw
->phy_get_max_linkrate();
640 if (sphy
->minimum_linkrate
== SAS_LINK_RATE_UNKNOWN
)
641 sphy
->minimum_linkrate
= phy
->minimum_linkrate
;
643 if (sphy
->maximum_linkrate
== SAS_LINK_RATE_UNKNOWN
)
644 sphy
->maximum_linkrate
= phy
->maximum_linkrate
;
647 if (phy
->phy_type
& PORT_TYPE_SAS
) {
648 struct sas_identify_frame
*id
;
650 id
= (struct sas_identify_frame
*)phy
->frame_rcvd
;
651 id
->dev_type
= phy
->identify
.device_type
;
652 id
->initiator_bits
= SAS_PROTOCOL_ALL
;
653 id
->target_bits
= phy
->identify
.target_port_protocols
;
654 } else if (phy
->phy_type
& PORT_TYPE_SATA
) {
658 sas_phy
->frame_rcvd_size
= phy
->frame_rcvd_size
;
659 sas_ha
->notify_port_event(sas_phy
, PORTE_BYTES_DMAED
);
662 static struct hisi_sas_device
*hisi_sas_alloc_dev(struct domain_device
*device
)
664 struct hisi_hba
*hisi_hba
= dev_to_hisi_hba(device
);
665 struct hisi_sas_device
*sas_dev
= NULL
;
666 int last
= hisi_hba
->last_dev_id
;
667 int first
= (hisi_hba
->last_dev_id
+ 1) % HISI_SAS_MAX_DEVICES
;
670 spin_lock(&hisi_hba
->lock
);
671 for (i
= first
; i
!= last
; i
%= HISI_SAS_MAX_DEVICES
) {
672 if (hisi_hba
->devices
[i
].dev_type
== SAS_PHY_UNUSED
) {
673 int queue
= i
% hisi_hba
->queue_count
;
674 struct hisi_sas_dq
*dq
= &hisi_hba
->dq
[queue
];
676 hisi_hba
->devices
[i
].device_id
= i
;
677 sas_dev
= &hisi_hba
->devices
[i
];
678 sas_dev
->dev_status
= HISI_SAS_DEV_INIT
;
679 sas_dev
->dev_type
= device
->dev_type
;
680 sas_dev
->hisi_hba
= hisi_hba
;
681 sas_dev
->sas_device
= device
;
683 spin_lock_init(&sas_dev
->lock
);
684 INIT_LIST_HEAD(&hisi_hba
->devices
[i
].list
);
689 hisi_hba
->last_dev_id
= i
;
690 spin_unlock(&hisi_hba
->lock
);
695 #define HISI_SAS_DISK_RECOVER_CNT 3
696 static int hisi_sas_init_device(struct domain_device
*device
)
698 int rc
= TMF_RESP_FUNC_COMPLETE
;
700 struct hisi_sas_tmf_task tmf_task
;
701 int retry
= HISI_SAS_DISK_RECOVER_CNT
;
702 struct hisi_hba
*hisi_hba
= dev_to_hisi_hba(device
);
703 struct device
*dev
= hisi_hba
->dev
;
704 struct sas_phy
*local_phy
;
706 switch (device
->dev_type
) {
708 int_to_scsilun(0, &lun
);
710 tmf_task
.tmf
= TMF_CLEAR_TASK_SET
;
711 while (retry
-- > 0) {
712 rc
= hisi_sas_debug_issue_ssp_tmf(device
, lun
.scsi_lun
,
714 if (rc
== TMF_RESP_FUNC_COMPLETE
) {
715 hisi_sas_release_task(hisi_hba
, device
);
722 case SAS_SATA_PM_PORT
:
723 case SAS_SATA_PENDING
:
725 * send HARD RESET to clear previous affiliation of
728 local_phy
= sas_get_local_phy(device
);
729 if (!scsi_is_sas_phy_local(local_phy
) &&
730 !test_bit(HISI_SAS_RESET_BIT
, &hisi_hba
->flags
)) {
731 unsigned long deadline
= ata_deadline(jiffies
, 20000);
732 struct sata_device
*sata_dev
= &device
->sata_dev
;
733 struct ata_host
*ata_host
= sata_dev
->ata_host
;
734 struct ata_port_operations
*ops
= ata_host
->ops
;
735 struct ata_port
*ap
= sata_dev
->ap
;
736 struct ata_link
*link
;
737 unsigned int classes
;
739 ata_for_each_link(link
, ap
, EDGE
)
740 rc
= ops
->hardreset(link
, &classes
,
743 sas_put_local_phy(local_phy
);
745 dev_warn(dev
, "SATA disk hardreset fail: %d\n", rc
);
749 while (retry
-- > 0) {
750 rc
= hisi_sas_softreset_ata_disk(device
);
762 static int hisi_sas_dev_found(struct domain_device
*device
)
764 struct hisi_hba
*hisi_hba
= dev_to_hisi_hba(device
);
765 struct domain_device
*parent_dev
= device
->parent
;
766 struct hisi_sas_device
*sas_dev
;
767 struct device
*dev
= hisi_hba
->dev
;
770 if (hisi_hba
->hw
->alloc_dev
)
771 sas_dev
= hisi_hba
->hw
->alloc_dev(device
);
773 sas_dev
= hisi_sas_alloc_dev(device
);
775 dev_err(dev
, "fail alloc dev: max support %d devices\n",
776 HISI_SAS_MAX_DEVICES
);
780 device
->lldd_dev
= sas_dev
;
781 hisi_hba
->hw
->setup_itct(hisi_hba
, sas_dev
);
783 if (parent_dev
&& dev_is_expander(parent_dev
->dev_type
)) {
785 u8 phy_num
= parent_dev
->ex_dev
.num_phys
;
788 for (phy_no
= 0; phy_no
< phy_num
; phy_no
++) {
789 phy
= &parent_dev
->ex_dev
.ex_phy
[phy_no
];
790 if (SAS_ADDR(phy
->attached_sas_addr
) ==
791 SAS_ADDR(device
->sas_addr
))
795 if (phy_no
== phy_num
) {
796 dev_info(dev
, "dev found: no attached "
797 "dev:%016llx at ex:%016llx\n",
798 SAS_ADDR(device
->sas_addr
),
799 SAS_ADDR(parent_dev
->sas_addr
));
805 dev_info(dev
, "dev[%d:%x] found\n",
806 sas_dev
->device_id
, sas_dev
->dev_type
);
808 rc
= hisi_sas_init_device(device
);
811 sas_dev
->dev_status
= HISI_SAS_DEV_NORMAL
;
815 hisi_sas_dev_gone(device
);
819 int hisi_sas_slave_configure(struct scsi_device
*sdev
)
821 struct domain_device
*dev
= sdev_to_domain_dev(sdev
);
822 int ret
= sas_slave_configure(sdev
);
826 if (!dev_is_sata(dev
))
827 sas_change_queue_depth(sdev
, 64);
831 EXPORT_SYMBOL_GPL(hisi_sas_slave_configure
);
833 void hisi_sas_scan_start(struct Scsi_Host
*shost
)
835 struct hisi_hba
*hisi_hba
= shost_priv(shost
);
837 hisi_hba
->hw
->phys_init(hisi_hba
);
839 EXPORT_SYMBOL_GPL(hisi_sas_scan_start
);
841 int hisi_sas_scan_finished(struct Scsi_Host
*shost
, unsigned long time
)
843 struct hisi_hba
*hisi_hba
= shost_priv(shost
);
844 struct sas_ha_struct
*sha
= &hisi_hba
->sha
;
846 /* Wait for PHY up interrupt to occur */
853 EXPORT_SYMBOL_GPL(hisi_sas_scan_finished
);
855 static void hisi_sas_phyup_work(struct work_struct
*work
)
857 struct hisi_sas_phy
*phy
=
858 container_of(work
, typeof(*phy
), works
[HISI_PHYE_PHY_UP
]);
859 struct hisi_hba
*hisi_hba
= phy
->hisi_hba
;
860 struct asd_sas_phy
*sas_phy
= &phy
->sas_phy
;
861 int phy_no
= sas_phy
->id
;
863 if (phy
->identify
.target_port_protocols
== SAS_PROTOCOL_SSP
)
864 hisi_hba
->hw
->sl_notify_ssp(hisi_hba
, phy_no
);
865 hisi_sas_bytes_dmaed(hisi_hba
, phy_no
);
868 static void hisi_sas_linkreset_work(struct work_struct
*work
)
870 struct hisi_sas_phy
*phy
=
871 container_of(work
, typeof(*phy
), works
[HISI_PHYE_LINK_RESET
]);
872 struct asd_sas_phy
*sas_phy
= &phy
->sas_phy
;
874 hisi_sas_control_phy(sas_phy
, PHY_FUNC_LINK_RESET
, NULL
);
877 static const work_func_t hisi_sas_phye_fns
[HISI_PHYES_NUM
] = {
878 [HISI_PHYE_PHY_UP
] = hisi_sas_phyup_work
,
879 [HISI_PHYE_LINK_RESET
] = hisi_sas_linkreset_work
,
882 bool hisi_sas_notify_phy_event(struct hisi_sas_phy
*phy
,
883 enum hisi_sas_phy_event event
)
885 struct hisi_hba
*hisi_hba
= phy
->hisi_hba
;
887 if (WARN_ON(event
>= HISI_PHYES_NUM
))
890 return queue_work(hisi_hba
->wq
, &phy
->works
[event
]);
892 EXPORT_SYMBOL_GPL(hisi_sas_notify_phy_event
);
894 static void hisi_sas_wait_phyup_timedout(struct timer_list
*t
)
896 struct hisi_sas_phy
*phy
= from_timer(phy
, t
, timer
);
897 struct hisi_hba
*hisi_hba
= phy
->hisi_hba
;
898 struct device
*dev
= hisi_hba
->dev
;
899 int phy_no
= phy
->sas_phy
.id
;
901 dev_warn(dev
, "phy%d wait phyup timeout, issuing link reset\n", phy_no
);
902 hisi_sas_notify_phy_event(phy
, HISI_PHYE_LINK_RESET
);
905 void hisi_sas_phy_oob_ready(struct hisi_hba
*hisi_hba
, int phy_no
)
907 struct hisi_sas_phy
*phy
= &hisi_hba
->phy
[phy_no
];
908 struct device
*dev
= hisi_hba
->dev
;
910 dev_dbg(dev
, "phy%d OOB ready\n", phy_no
);
911 if (phy
->phy_attached
)
914 if (!timer_pending(&phy
->timer
)) {
915 phy
->timer
.expires
= jiffies
+ HISI_SAS_WAIT_PHYUP_TIMEOUT
* HZ
;
916 add_timer(&phy
->timer
);
919 EXPORT_SYMBOL_GPL(hisi_sas_phy_oob_ready
);
921 static void hisi_sas_phy_init(struct hisi_hba
*hisi_hba
, int phy_no
)
923 struct hisi_sas_phy
*phy
= &hisi_hba
->phy
[phy_no
];
924 struct asd_sas_phy
*sas_phy
= &phy
->sas_phy
;
927 phy
->hisi_hba
= hisi_hba
;
929 phy
->minimum_linkrate
= SAS_LINK_RATE_1_5_GBPS
;
930 phy
->maximum_linkrate
= hisi_hba
->hw
->phy_get_max_linkrate();
931 sas_phy
->enabled
= (phy_no
< hisi_hba
->n_phy
) ? 1 : 0;
932 sas_phy
->class = SAS
;
933 sas_phy
->iproto
= SAS_PROTOCOL_ALL
;
935 sas_phy
->type
= PHY_TYPE_PHYSICAL
;
936 sas_phy
->role
= PHY_ROLE_INITIATOR
;
937 sas_phy
->oob_mode
= OOB_NOT_CONNECTED
;
938 sas_phy
->linkrate
= SAS_LINK_RATE_UNKNOWN
;
939 sas_phy
->id
= phy_no
;
940 sas_phy
->sas_addr
= &hisi_hba
->sas_addr
[0];
941 sas_phy
->frame_rcvd
= &phy
->frame_rcvd
[0];
942 sas_phy
->ha
= (struct sas_ha_struct
*)hisi_hba
->shost
->hostdata
;
943 sas_phy
->lldd_phy
= phy
;
945 for (i
= 0; i
< HISI_PHYES_NUM
; i
++)
946 INIT_WORK(&phy
->works
[i
], hisi_sas_phye_fns
[i
]);
948 spin_lock_init(&phy
->lock
);
950 timer_setup(&phy
->timer
, hisi_sas_wait_phyup_timedout
, 0);
953 /* Wrapper to ensure we track hisi_sas_phy.enable properly */
954 void hisi_sas_phy_enable(struct hisi_hba
*hisi_hba
, int phy_no
, int enable
)
956 struct hisi_sas_phy
*phy
= &hisi_hba
->phy
[phy_no
];
957 struct asd_sas_phy
*aphy
= &phy
->sas_phy
;
958 struct sas_phy
*sphy
= aphy
->phy
;
961 spin_lock_irqsave(&phy
->lock
, flags
);
964 /* We may have been enabled already; if so, don't touch */
966 sphy
->negotiated_linkrate
= SAS_LINK_RATE_UNKNOWN
;
967 hisi_hba
->hw
->phy_start(hisi_hba
, phy_no
);
969 sphy
->negotiated_linkrate
= SAS_PHY_DISABLED
;
970 hisi_hba
->hw
->phy_disable(hisi_hba
, phy_no
);
972 phy
->enable
= enable
;
973 spin_unlock_irqrestore(&phy
->lock
, flags
);
975 EXPORT_SYMBOL_GPL(hisi_sas_phy_enable
);
977 static void hisi_sas_port_notify_formed(struct asd_sas_phy
*sas_phy
)
979 struct sas_ha_struct
*sas_ha
= sas_phy
->ha
;
980 struct hisi_hba
*hisi_hba
= sas_ha
->lldd_ha
;
981 struct hisi_sas_phy
*phy
= sas_phy
->lldd_phy
;
982 struct asd_sas_port
*sas_port
= sas_phy
->port
;
983 struct hisi_sas_port
*port
;
989 port
= to_hisi_sas_port(sas_port
);
990 spin_lock_irqsave(&hisi_hba
->lock
, flags
);
991 port
->port_attached
= 1;
992 port
->id
= phy
->port_id
;
994 sas_port
->lldd_port
= port
;
995 spin_unlock_irqrestore(&hisi_hba
->lock
, flags
);
998 static void hisi_sas_do_release_task(struct hisi_hba
*hisi_hba
, struct sas_task
*task
,
999 struct hisi_sas_slot
*slot
)
1002 unsigned long flags
;
1003 struct task_status_struct
*ts
;
1005 ts
= &task
->task_status
;
1007 ts
->resp
= SAS_TASK_COMPLETE
;
1008 ts
->stat
= SAS_ABORTED_TASK
;
1009 spin_lock_irqsave(&task
->task_state_lock
, flags
);
1010 task
->task_state_flags
&=
1011 ~(SAS_TASK_STATE_PENDING
| SAS_TASK_AT_INITIATOR
);
1012 if (!slot
->is_internal
&& task
->task_proto
!= SAS_PROTOCOL_SMP
)
1013 task
->task_state_flags
|= SAS_TASK_STATE_DONE
;
1014 spin_unlock_irqrestore(&task
->task_state_lock
, flags
);
1017 hisi_sas_slot_task_free(hisi_hba
, task
, slot
);
1020 static void hisi_sas_release_task(struct hisi_hba
*hisi_hba
,
1021 struct domain_device
*device
)
1023 struct hisi_sas_slot
*slot
, *slot2
;
1024 struct hisi_sas_device
*sas_dev
= device
->lldd_dev
;
1026 list_for_each_entry_safe(slot
, slot2
, &sas_dev
->list
, entry
)
1027 hisi_sas_do_release_task(hisi_hba
, slot
->task
, slot
);
1030 void hisi_sas_release_tasks(struct hisi_hba
*hisi_hba
)
1032 struct hisi_sas_device
*sas_dev
;
1033 struct domain_device
*device
;
1036 for (i
= 0; i
< HISI_SAS_MAX_DEVICES
; i
++) {
1037 sas_dev
= &hisi_hba
->devices
[i
];
1038 device
= sas_dev
->sas_device
;
1040 if ((sas_dev
->dev_type
== SAS_PHY_UNUSED
) ||
1044 hisi_sas_release_task(hisi_hba
, device
);
1047 EXPORT_SYMBOL_GPL(hisi_sas_release_tasks
);
1049 static void hisi_sas_dereg_device(struct hisi_hba
*hisi_hba
,
1050 struct domain_device
*device
)
1052 if (hisi_hba
->hw
->dereg_device
)
1053 hisi_hba
->hw
->dereg_device(hisi_hba
, device
);
1056 static void hisi_sas_dev_gone(struct domain_device
*device
)
1058 struct hisi_sas_device
*sas_dev
= device
->lldd_dev
;
1059 struct hisi_hba
*hisi_hba
= dev_to_hisi_hba(device
);
1060 struct device
*dev
= hisi_hba
->dev
;
1063 dev_info(dev
, "dev[%d:%x] is gone\n",
1064 sas_dev
->device_id
, sas_dev
->dev_type
);
1066 down(&hisi_hba
->sem
);
1067 if (!test_bit(HISI_SAS_RESET_BIT
, &hisi_hba
->flags
)) {
1068 hisi_sas_internal_task_abort(hisi_hba
, device
,
1069 HISI_SAS_INT_ABT_DEV
, 0);
1071 hisi_sas_dereg_device(hisi_hba
, device
);
1073 ret
= hisi_hba
->hw
->clear_itct(hisi_hba
, sas_dev
);
1074 device
->lldd_dev
= NULL
;
1077 if (hisi_hba
->hw
->free_device
)
1078 hisi_hba
->hw
->free_device(sas_dev
);
1080 /* Don't mark it as SAS_PHY_UNUSED if failed to clear ITCT */
1082 sas_dev
->dev_type
= SAS_PHY_UNUSED
;
1083 sas_dev
->sas_device
= NULL
;
1087 static int hisi_sas_queue_command(struct sas_task
*task
, gfp_t gfp_flags
)
1089 return hisi_sas_task_exec(task
, gfp_flags
, 0, NULL
);
1092 static int hisi_sas_phy_set_linkrate(struct hisi_hba
*hisi_hba
, int phy_no
,
1093 struct sas_phy_linkrates
*r
)
1095 struct sas_phy_linkrates _r
;
1097 struct hisi_sas_phy
*phy
= &hisi_hba
->phy
[phy_no
];
1098 struct asd_sas_phy
*sas_phy
= &phy
->sas_phy
;
1099 enum sas_linkrate min
, max
;
1101 if (r
->minimum_linkrate
> SAS_LINK_RATE_1_5_GBPS
)
1104 if (r
->maximum_linkrate
== SAS_LINK_RATE_UNKNOWN
) {
1105 max
= sas_phy
->phy
->maximum_linkrate
;
1106 min
= r
->minimum_linkrate
;
1107 } else if (r
->minimum_linkrate
== SAS_LINK_RATE_UNKNOWN
) {
1108 max
= r
->maximum_linkrate
;
1109 min
= sas_phy
->phy
->minimum_linkrate
;
1113 _r
.maximum_linkrate
= max
;
1114 _r
.minimum_linkrate
= min
;
1116 sas_phy
->phy
->maximum_linkrate
= max
;
1117 sas_phy
->phy
->minimum_linkrate
= min
;
1119 hisi_sas_phy_enable(hisi_hba
, phy_no
, 0);
1121 hisi_hba
->hw
->phy_set_linkrate(hisi_hba
, phy_no
, &_r
);
1122 hisi_sas_phy_enable(hisi_hba
, phy_no
, 1);
1127 static int hisi_sas_control_phy(struct asd_sas_phy
*sas_phy
, enum phy_func func
,
1130 struct sas_ha_struct
*sas_ha
= sas_phy
->ha
;
1131 struct hisi_hba
*hisi_hba
= sas_ha
->lldd_ha
;
1132 int phy_no
= sas_phy
->id
;
1135 case PHY_FUNC_HARD_RESET
:
1136 hisi_hba
->hw
->phy_hard_reset(hisi_hba
, phy_no
);
1139 case PHY_FUNC_LINK_RESET
:
1140 hisi_sas_phy_enable(hisi_hba
, phy_no
, 0);
1142 hisi_sas_phy_enable(hisi_hba
, phy_no
, 1);
1145 case PHY_FUNC_DISABLE
:
1146 hisi_sas_phy_enable(hisi_hba
, phy_no
, 0);
1149 case PHY_FUNC_SET_LINK_RATE
:
1150 return hisi_sas_phy_set_linkrate(hisi_hba
, phy_no
, funcdata
);
1151 case PHY_FUNC_GET_EVENTS
:
1152 if (hisi_hba
->hw
->get_events
) {
1153 hisi_hba
->hw
->get_events(hisi_hba
, phy_no
);
1157 case PHY_FUNC_RELEASE_SPINUP_HOLD
:
1164 static void hisi_sas_task_done(struct sas_task
*task
)
1166 del_timer(&task
->slow_task
->timer
);
1167 complete(&task
->slow_task
->completion
);
1170 static void hisi_sas_tmf_timedout(struct timer_list
*t
)
1172 struct sas_task_slow
*slow
= from_timer(slow
, t
, timer
);
1173 struct sas_task
*task
= slow
->task
;
1174 unsigned long flags
;
1175 bool is_completed
= true;
1177 spin_lock_irqsave(&task
->task_state_lock
, flags
);
1178 if (!(task
->task_state_flags
& SAS_TASK_STATE_DONE
)) {
1179 task
->task_state_flags
|= SAS_TASK_STATE_ABORTED
;
1180 is_completed
= false;
1182 spin_unlock_irqrestore(&task
->task_state_lock
, flags
);
1185 complete(&task
->slow_task
->completion
);
1188 #define TASK_TIMEOUT 20
1189 #define TASK_RETRY 3
1190 #define INTERNAL_ABORT_TIMEOUT 6
1191 static int hisi_sas_exec_internal_tmf_task(struct domain_device
*device
,
1192 void *parameter
, u32 para_len
,
1193 struct hisi_sas_tmf_task
*tmf
)
1195 struct hisi_sas_device
*sas_dev
= device
->lldd_dev
;
1196 struct hisi_hba
*hisi_hba
= sas_dev
->hisi_hba
;
1197 struct device
*dev
= hisi_hba
->dev
;
1198 struct sas_task
*task
;
1201 for (retry
= 0; retry
< TASK_RETRY
; retry
++) {
1202 task
= sas_alloc_slow_task(GFP_KERNEL
);
1207 task
->task_proto
= device
->tproto
;
1209 if (dev_is_sata(device
)) {
1210 task
->ata_task
.device_control_reg_update
= 1;
1211 memcpy(&task
->ata_task
.fis
, parameter
, para_len
);
1213 memcpy(&task
->ssp_task
, parameter
, para_len
);
1215 task
->task_done
= hisi_sas_task_done
;
1217 task
->slow_task
->timer
.function
= hisi_sas_tmf_timedout
;
1218 task
->slow_task
->timer
.expires
= jiffies
+ TASK_TIMEOUT
* HZ
;
1219 add_timer(&task
->slow_task
->timer
);
1221 res
= hisi_sas_task_exec(task
, GFP_KERNEL
, 1, tmf
);
1224 del_timer(&task
->slow_task
->timer
);
1225 dev_err(dev
, "abort tmf: executing internal task failed: %d\n",
1230 wait_for_completion(&task
->slow_task
->completion
);
1231 res
= TMF_RESP_FUNC_FAILED
;
1232 /* Even TMF timed out, return direct. */
1233 if ((task
->task_state_flags
& SAS_TASK_STATE_ABORTED
)) {
1234 if (!(task
->task_state_flags
& SAS_TASK_STATE_DONE
)) {
1235 struct hisi_sas_slot
*slot
= task
->lldd_task
;
1237 dev_err(dev
, "abort tmf: TMF task timeout and not done\n");
1239 struct hisi_sas_cq
*cq
=
1240 &hisi_hba
->cq
[slot
->dlvry_queue
];
1242 * sync irq to avoid free'ing task
1243 * before using task in IO completion
1245 synchronize_irq(cq
->irq_no
);
1251 dev_err(dev
, "abort tmf: TMF task timeout\n");
1254 if (task
->task_status
.resp
== SAS_TASK_COMPLETE
&&
1255 task
->task_status
.stat
== TMF_RESP_FUNC_COMPLETE
) {
1256 res
= TMF_RESP_FUNC_COMPLETE
;
1260 if (task
->task_status
.resp
== SAS_TASK_COMPLETE
&&
1261 task
->task_status
.stat
== TMF_RESP_FUNC_SUCC
) {
1262 res
= TMF_RESP_FUNC_SUCC
;
1266 if (task
->task_status
.resp
== SAS_TASK_COMPLETE
&&
1267 task
->task_status
.stat
== SAS_DATA_UNDERRUN
) {
1268 /* no error, but return the number of bytes of
1271 dev_warn(dev
, "abort tmf: task to dev %016llx resp: 0x%x sts 0x%x underrun\n",
1272 SAS_ADDR(device
->sas_addr
),
1273 task
->task_status
.resp
,
1274 task
->task_status
.stat
);
1275 res
= task
->task_status
.residual
;
1279 if (task
->task_status
.resp
== SAS_TASK_COMPLETE
&&
1280 task
->task_status
.stat
== SAS_DATA_OVERRUN
) {
1281 dev_warn(dev
, "abort tmf: blocked task error\n");
1286 if (task
->task_status
.resp
== SAS_TASK_COMPLETE
&&
1287 task
->task_status
.stat
== SAS_OPEN_REJECT
) {
1288 dev_warn(dev
, "abort tmf: open reject failed\n");
1291 dev_warn(dev
, "abort tmf: task to dev %016llx resp: 0x%x status 0x%x\n",
1292 SAS_ADDR(device
->sas_addr
),
1293 task
->task_status
.resp
,
1294 task
->task_status
.stat
);
1296 sas_free_task(task
);
1300 if (retry
== TASK_RETRY
)
1301 dev_warn(dev
, "abort tmf: executing internal task failed!\n");
1302 sas_free_task(task
);
1306 static void hisi_sas_fill_ata_reset_cmd(struct ata_device
*dev
,
1307 bool reset
, int pmp
, u8
*fis
)
1309 struct ata_taskfile tf
;
1311 ata_tf_init(dev
, &tf
);
1315 tf
.ctl
&= ~ATA_SRST
;
1316 tf
.command
= ATA_CMD_DEV_RESET
;
1317 ata_tf_to_fis(&tf
, pmp
, 0, fis
);
1320 static int hisi_sas_softreset_ata_disk(struct domain_device
*device
)
1323 struct ata_port
*ap
= device
->sata_dev
.ap
;
1324 struct ata_link
*link
;
1325 int rc
= TMF_RESP_FUNC_FAILED
;
1326 struct hisi_hba
*hisi_hba
= dev_to_hisi_hba(device
);
1327 struct device
*dev
= hisi_hba
->dev
;
1328 int s
= sizeof(struct host_to_dev_fis
);
1330 ata_for_each_link(link
, ap
, EDGE
) {
1331 int pmp
= sata_srst_pmp(link
);
1333 hisi_sas_fill_ata_reset_cmd(link
->device
, 1, pmp
, fis
);
1334 rc
= hisi_sas_exec_internal_tmf_task(device
, fis
, s
, NULL
);
1335 if (rc
!= TMF_RESP_FUNC_COMPLETE
)
1339 if (rc
== TMF_RESP_FUNC_COMPLETE
) {
1340 ata_for_each_link(link
, ap
, EDGE
) {
1341 int pmp
= sata_srst_pmp(link
);
1343 hisi_sas_fill_ata_reset_cmd(link
->device
, 0, pmp
, fis
);
1344 rc
= hisi_sas_exec_internal_tmf_task(device
, fis
,
1346 if (rc
!= TMF_RESP_FUNC_COMPLETE
)
1347 dev_err(dev
, "ata disk de-reset failed\n");
1350 dev_err(dev
, "ata disk reset failed\n");
1353 if (rc
== TMF_RESP_FUNC_COMPLETE
)
1354 hisi_sas_release_task(hisi_hba
, device
);
1359 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device
*device
,
1360 u8
*lun
, struct hisi_sas_tmf_task
*tmf
)
1362 struct sas_ssp_task ssp_task
;
1364 if (!(device
->tproto
& SAS_PROTOCOL_SSP
))
1365 return TMF_RESP_FUNC_ESUPP
;
1367 memcpy(ssp_task
.LUN
, lun
, 8);
1369 return hisi_sas_exec_internal_tmf_task(device
, &ssp_task
,
1370 sizeof(ssp_task
), tmf
);
1373 static void hisi_sas_refresh_port_id(struct hisi_hba
*hisi_hba
)
1375 u32 state
= hisi_hba
->hw
->get_phys_state(hisi_hba
);
1378 for (i
= 0; i
< HISI_SAS_MAX_DEVICES
; i
++) {
1379 struct hisi_sas_device
*sas_dev
= &hisi_hba
->devices
[i
];
1380 struct domain_device
*device
= sas_dev
->sas_device
;
1381 struct asd_sas_port
*sas_port
;
1382 struct hisi_sas_port
*port
;
1383 struct hisi_sas_phy
*phy
= NULL
;
1384 struct asd_sas_phy
*sas_phy
;
1386 if ((sas_dev
->dev_type
== SAS_PHY_UNUSED
)
1387 || !device
|| !device
->port
)
1390 sas_port
= device
->port
;
1391 port
= to_hisi_sas_port(sas_port
);
1393 list_for_each_entry(sas_phy
, &sas_port
->phy_list
, port_phy_el
)
1394 if (state
& BIT(sas_phy
->id
)) {
1395 phy
= sas_phy
->lldd_phy
;
1400 port
->id
= phy
->port_id
;
1402 /* Update linkrate of directly attached device. */
1403 if (!device
->parent
)
1404 device
->linkrate
= phy
->sas_phy
.linkrate
;
1406 hisi_hba
->hw
->setup_itct(hisi_hba
, sas_dev
);
1412 static void hisi_sas_rescan_topology(struct hisi_hba
*hisi_hba
, u32 state
)
1414 struct sas_ha_struct
*sas_ha
= &hisi_hba
->sha
;
1415 struct asd_sas_port
*_sas_port
= NULL
;
1418 for (phy_no
= 0; phy_no
< hisi_hba
->n_phy
; phy_no
++) {
1419 struct hisi_sas_phy
*phy
= &hisi_hba
->phy
[phy_no
];
1420 struct asd_sas_phy
*sas_phy
= &phy
->sas_phy
;
1421 struct asd_sas_port
*sas_port
= sas_phy
->port
;
1422 bool do_port_check
= _sas_port
!= sas_port
;
1424 if (!sas_phy
->phy
->enabled
)
1427 /* Report PHY state change to libsas */
1428 if (state
& BIT(phy_no
)) {
1429 if (do_port_check
&& sas_port
&& sas_port
->port_dev
) {
1430 struct domain_device
*dev
= sas_port
->port_dev
;
1432 _sas_port
= sas_port
;
1434 if (dev_is_expander(dev
->dev_type
))
1435 sas_ha
->notify_port_event(sas_phy
,
1436 PORTE_BROADCAST_RCVD
);
1439 hisi_sas_phy_down(hisi_hba
, phy_no
, 0);
1444 static void hisi_sas_reset_init_all_devices(struct hisi_hba
*hisi_hba
)
1446 struct hisi_sas_device
*sas_dev
;
1447 struct domain_device
*device
;
1450 for (i
= 0; i
< HISI_SAS_MAX_DEVICES
; i
++) {
1451 sas_dev
= &hisi_hba
->devices
[i
];
1452 device
= sas_dev
->sas_device
;
1454 if ((sas_dev
->dev_type
== SAS_PHY_UNUSED
) || !device
)
1457 hisi_sas_init_device(device
);
1461 static void hisi_sas_send_ata_reset_each_phy(struct hisi_hba
*hisi_hba
,
1462 struct asd_sas_port
*sas_port
,
1463 struct domain_device
*device
)
1465 struct hisi_sas_tmf_task tmf_task
= { .force_phy
= 1 };
1466 struct ata_port
*ap
= device
->sata_dev
.ap
;
1467 struct device
*dev
= hisi_hba
->dev
;
1468 int s
= sizeof(struct host_to_dev_fis
);
1469 int rc
= TMF_RESP_FUNC_FAILED
;
1470 struct asd_sas_phy
*sas_phy
;
1471 struct ata_link
*link
;
1475 state
= hisi_hba
->hw
->get_phys_state(hisi_hba
);
1476 list_for_each_entry(sas_phy
, &sas_port
->phy_list
, port_phy_el
) {
1477 if (!(state
& BIT(sas_phy
->id
)))
1480 ata_for_each_link(link
, ap
, EDGE
) {
1481 int pmp
= sata_srst_pmp(link
);
1483 tmf_task
.phy_id
= sas_phy
->id
;
1484 hisi_sas_fill_ata_reset_cmd(link
->device
, 1, pmp
, fis
);
1485 rc
= hisi_sas_exec_internal_tmf_task(device
, fis
, s
,
1487 if (rc
!= TMF_RESP_FUNC_COMPLETE
) {
1488 dev_err(dev
, "phy%d ata reset failed rc=%d\n",
1496 static void hisi_sas_terminate_stp_reject(struct hisi_hba
*hisi_hba
)
1498 struct device
*dev
= hisi_hba
->dev
;
1501 for (i
= 0; i
< HISI_SAS_MAX_DEVICES
; i
++) {
1502 struct hisi_sas_device
*sas_dev
= &hisi_hba
->devices
[i
];
1503 struct domain_device
*device
= sas_dev
->sas_device
;
1505 if ((sas_dev
->dev_type
== SAS_PHY_UNUSED
) || !device
)
1508 rc
= hisi_sas_internal_task_abort(hisi_hba
, device
,
1509 HISI_SAS_INT_ABT_DEV
, 0);
1511 dev_err(dev
, "STP reject: abort dev failed %d\n", rc
);
1514 for (port_no
= 0; port_no
< hisi_hba
->n_phy
; port_no
++) {
1515 struct hisi_sas_port
*port
= &hisi_hba
->port
[port_no
];
1516 struct asd_sas_port
*sas_port
= &port
->sas_port
;
1517 struct domain_device
*port_dev
= sas_port
->port_dev
;
1518 struct domain_device
*device
;
1520 if (!port_dev
|| !dev_is_expander(port_dev
->dev_type
))
1523 /* Try to find a SATA device */
1524 list_for_each_entry(device
, &sas_port
->dev_list
,
1526 if (dev_is_sata(device
)) {
1527 hisi_sas_send_ata_reset_each_phy(hisi_hba
,
1536 void hisi_sas_controller_reset_prepare(struct hisi_hba
*hisi_hba
)
1538 struct Scsi_Host
*shost
= hisi_hba
->shost
;
1540 down(&hisi_hba
->sem
);
1541 hisi_hba
->phy_state
= hisi_hba
->hw
->get_phys_state(hisi_hba
);
1543 scsi_block_requests(shost
);
1544 hisi_hba
->hw
->wait_cmds_complete_timeout(hisi_hba
, 100, 5000);
1546 if (timer_pending(&hisi_hba
->timer
))
1547 del_timer_sync(&hisi_hba
->timer
);
1549 set_bit(HISI_SAS_REJECT_CMD_BIT
, &hisi_hba
->flags
);
1551 EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_prepare
);
1553 void hisi_sas_controller_reset_done(struct hisi_hba
*hisi_hba
)
1555 struct Scsi_Host
*shost
= hisi_hba
->shost
;
1557 /* Init and wait for PHYs to come up and all libsas event finished. */
1558 hisi_hba
->hw
->phys_init(hisi_hba
);
1560 hisi_sas_refresh_port_id(hisi_hba
);
1561 clear_bit(HISI_SAS_REJECT_CMD_BIT
, &hisi_hba
->flags
);
1563 if (hisi_hba
->reject_stp_links_msk
)
1564 hisi_sas_terminate_stp_reject(hisi_hba
);
1565 hisi_sas_reset_init_all_devices(hisi_hba
);
1567 scsi_unblock_requests(shost
);
1568 clear_bit(HISI_SAS_RESET_BIT
, &hisi_hba
->flags
);
1570 hisi_sas_rescan_topology(hisi_hba
, hisi_hba
->phy_state
);
1572 EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_done
);
1574 static int hisi_sas_controller_reset(struct hisi_hba
*hisi_hba
)
1576 struct device
*dev
= hisi_hba
->dev
;
1577 struct Scsi_Host
*shost
= hisi_hba
->shost
;
1580 if (hisi_sas_debugfs_enable
&& hisi_hba
->debugfs_itct
[0].itct
)
1581 queue_work(hisi_hba
->wq
, &hisi_hba
->debugfs_work
);
1583 if (!hisi_hba
->hw
->soft_reset
)
1586 if (test_and_set_bit(HISI_SAS_RESET_BIT
, &hisi_hba
->flags
))
1589 dev_info(dev
, "controller resetting...\n");
1590 hisi_sas_controller_reset_prepare(hisi_hba
);
1592 rc
= hisi_hba
->hw
->soft_reset(hisi_hba
);
1594 dev_warn(dev
, "controller reset failed (%d)\n", rc
);
1595 clear_bit(HISI_SAS_REJECT_CMD_BIT
, &hisi_hba
->flags
);
1597 scsi_unblock_requests(shost
);
1598 clear_bit(HISI_SAS_RESET_BIT
, &hisi_hba
->flags
);
1602 hisi_sas_controller_reset_done(hisi_hba
);
1603 dev_info(dev
, "controller reset complete\n");
1608 static int hisi_sas_abort_task(struct sas_task
*task
)
1610 struct scsi_lun lun
;
1611 struct hisi_sas_tmf_task tmf_task
;
1612 struct domain_device
*device
= task
->dev
;
1613 struct hisi_sas_device
*sas_dev
= device
->lldd_dev
;
1614 struct hisi_hba
*hisi_hba
;
1616 int rc
= TMF_RESP_FUNC_FAILED
;
1617 unsigned long flags
;
1620 return TMF_RESP_FUNC_FAILED
;
1622 hisi_hba
= dev_to_hisi_hba(task
->dev
);
1623 dev
= hisi_hba
->dev
;
1625 spin_lock_irqsave(&task
->task_state_lock
, flags
);
1626 if (task
->task_state_flags
& SAS_TASK_STATE_DONE
) {
1627 struct hisi_sas_slot
*slot
= task
->lldd_task
;
1628 struct hisi_sas_cq
*cq
;
1632 * sync irq to avoid free'ing task
1633 * before using task in IO completion
1635 cq
= &hisi_hba
->cq
[slot
->dlvry_queue
];
1636 synchronize_irq(cq
->irq_no
);
1638 spin_unlock_irqrestore(&task
->task_state_lock
, flags
);
1639 rc
= TMF_RESP_FUNC_COMPLETE
;
1642 task
->task_state_flags
|= SAS_TASK_STATE_ABORTED
;
1643 spin_unlock_irqrestore(&task
->task_state_lock
, flags
);
1645 if (task
->lldd_task
&& task
->task_proto
& SAS_PROTOCOL_SSP
) {
1646 struct scsi_cmnd
*cmnd
= task
->uldd_task
;
1647 struct hisi_sas_slot
*slot
= task
->lldd_task
;
1648 u16 tag
= slot
->idx
;
1651 int_to_scsilun(cmnd
->device
->lun
, &lun
);
1652 tmf_task
.tmf
= TMF_ABORT_TASK
;
1653 tmf_task
.tag_of_task_to_be_managed
= tag
;
1655 rc
= hisi_sas_debug_issue_ssp_tmf(task
->dev
, lun
.scsi_lun
,
1658 rc2
= hisi_sas_internal_task_abort(hisi_hba
, device
,
1659 HISI_SAS_INT_ABT_CMD
, tag
);
1661 dev_err(dev
, "abort task: internal abort (%d)\n", rc2
);
1662 return TMF_RESP_FUNC_FAILED
;
1666 * If the TMF finds that the IO is not in the device and also
1667 * the internal abort does not succeed, then it is safe to
1669 * Note: if the internal abort succeeds then the slot
1670 * will have already been completed
1672 if (rc
== TMF_RESP_FUNC_COMPLETE
&& rc2
!= TMF_RESP_FUNC_SUCC
) {
1673 if (task
->lldd_task
)
1674 hisi_sas_do_release_task(hisi_hba
, task
, slot
);
1676 } else if (task
->task_proto
& SAS_PROTOCOL_SATA
||
1677 task
->task_proto
& SAS_PROTOCOL_STP
) {
1678 if (task
->dev
->dev_type
== SAS_SATA_DEV
) {
1679 rc
= hisi_sas_internal_task_abort(hisi_hba
, device
,
1680 HISI_SAS_INT_ABT_DEV
,
1683 dev_err(dev
, "abort task: internal abort failed\n");
1686 hisi_sas_dereg_device(hisi_hba
, device
);
1687 rc
= hisi_sas_softreset_ata_disk(device
);
1689 } else if (task
->lldd_task
&& task
->task_proto
& SAS_PROTOCOL_SMP
) {
1691 struct hisi_sas_slot
*slot
= task
->lldd_task
;
1692 u32 tag
= slot
->idx
;
1693 struct hisi_sas_cq
*cq
= &hisi_hba
->cq
[slot
->dlvry_queue
];
1695 rc
= hisi_sas_internal_task_abort(hisi_hba
, device
,
1696 HISI_SAS_INT_ABT_CMD
, tag
);
1697 if (((rc
< 0) || (rc
== TMF_RESP_FUNC_FAILED
)) &&
1700 * sync irq to avoid free'ing task
1701 * before using task in IO completion
1703 synchronize_irq(cq
->irq_no
);
1709 if (rc
!= TMF_RESP_FUNC_COMPLETE
)
1710 dev_notice(dev
, "abort task: rc=%d\n", rc
);
1714 static int hisi_sas_abort_task_set(struct domain_device
*device
, u8
*lun
)
1716 struct hisi_hba
*hisi_hba
= dev_to_hisi_hba(device
);
1717 struct device
*dev
= hisi_hba
->dev
;
1718 struct hisi_sas_tmf_task tmf_task
;
1721 rc
= hisi_sas_internal_task_abort(hisi_hba
, device
,
1722 HISI_SAS_INT_ABT_DEV
, 0);
1724 dev_err(dev
, "abort task set: internal abort rc=%d\n", rc
);
1725 return TMF_RESP_FUNC_FAILED
;
1727 hisi_sas_dereg_device(hisi_hba
, device
);
1729 tmf_task
.tmf
= TMF_ABORT_TASK_SET
;
1730 rc
= hisi_sas_debug_issue_ssp_tmf(device
, lun
, &tmf_task
);
1732 if (rc
== TMF_RESP_FUNC_COMPLETE
)
1733 hisi_sas_release_task(hisi_hba
, device
);
1738 static int hisi_sas_clear_aca(struct domain_device
*device
, u8
*lun
)
1740 struct hisi_sas_tmf_task tmf_task
;
1743 tmf_task
.tmf
= TMF_CLEAR_ACA
;
1744 rc
= hisi_sas_debug_issue_ssp_tmf(device
, lun
, &tmf_task
);
1749 static int hisi_sas_debug_I_T_nexus_reset(struct domain_device
*device
)
1751 struct sas_phy
*local_phy
= sas_get_local_phy(device
);
1752 struct hisi_sas_device
*sas_dev
= device
->lldd_dev
;
1753 struct hisi_hba
*hisi_hba
= dev_to_hisi_hba(device
);
1754 struct sas_ha_struct
*sas_ha
= &hisi_hba
->sha
;
1755 DECLARE_COMPLETION_ONSTACK(phyreset
);
1758 if (!local_phy
->enabled
) {
1759 sas_put_local_phy(local_phy
);
1763 if (scsi_is_sas_phy_local(local_phy
)) {
1764 struct asd_sas_phy
*sas_phy
=
1765 sas_ha
->sas_phy
[local_phy
->number
];
1766 struct hisi_sas_phy
*phy
=
1767 container_of(sas_phy
, struct hisi_sas_phy
, sas_phy
);
1769 phy
->reset_completion
= &phyreset
;
1772 reset_type
= (sas_dev
->dev_status
== HISI_SAS_DEV_INIT
||
1773 !dev_is_sata(device
)) ? true : false;
1775 rc
= sas_phy_reset(local_phy
, reset_type
);
1776 sas_put_local_phy(local_phy
);
1778 if (scsi_is_sas_phy_local(local_phy
)) {
1779 struct asd_sas_phy
*sas_phy
=
1780 sas_ha
->sas_phy
[local_phy
->number
];
1781 struct hisi_sas_phy
*phy
=
1782 container_of(sas_phy
, struct hisi_sas_phy
, sas_phy
);
1783 int ret
= wait_for_completion_timeout(&phyreset
, 2 * HZ
);
1784 unsigned long flags
;
1786 spin_lock_irqsave(&phy
->lock
, flags
);
1787 phy
->reset_completion
= NULL
;
1789 spin_unlock_irqrestore(&phy
->lock
, flags
);
1791 /* report PHY down if timed out */
1793 hisi_sas_phy_down(hisi_hba
, sas_phy
->id
, 0);
1794 } else if (sas_dev
->dev_status
!= HISI_SAS_DEV_INIT
) {
1796 * If in init state, we rely on caller to wait for link to be
1797 * ready; otherwise, except phy reset is fail, delay.
1806 static int hisi_sas_I_T_nexus_reset(struct domain_device
*device
)
1808 struct hisi_hba
*hisi_hba
= dev_to_hisi_hba(device
);
1809 struct device
*dev
= hisi_hba
->dev
;
1812 rc
= hisi_sas_internal_task_abort(hisi_hba
, device
,
1813 HISI_SAS_INT_ABT_DEV
, 0);
1815 dev_err(dev
, "I_T nexus reset: internal abort (%d)\n", rc
);
1816 return TMF_RESP_FUNC_FAILED
;
1818 hisi_sas_dereg_device(hisi_hba
, device
);
1820 if (dev_is_sata(device
)) {
1821 rc
= hisi_sas_softreset_ata_disk(device
);
1822 if (rc
== TMF_RESP_FUNC_FAILED
)
1823 return TMF_RESP_FUNC_FAILED
;
1826 rc
= hisi_sas_debug_I_T_nexus_reset(device
);
1828 if ((rc
== TMF_RESP_FUNC_COMPLETE
) || (rc
== -ENODEV
))
1829 hisi_sas_release_task(hisi_hba
, device
);
1834 static int hisi_sas_lu_reset(struct domain_device
*device
, u8
*lun
)
1836 struct hisi_sas_device
*sas_dev
= device
->lldd_dev
;
1837 struct hisi_hba
*hisi_hba
= dev_to_hisi_hba(device
);
1838 struct device
*dev
= hisi_hba
->dev
;
1839 int rc
= TMF_RESP_FUNC_FAILED
;
1841 /* Clear internal IO and then lu reset */
1842 rc
= hisi_sas_internal_task_abort(hisi_hba
, device
,
1843 HISI_SAS_INT_ABT_DEV
, 0);
1845 dev_err(dev
, "lu_reset: internal abort failed\n");
1848 hisi_sas_dereg_device(hisi_hba
, device
);
1850 if (dev_is_sata(device
)) {
1851 struct sas_phy
*phy
;
1853 phy
= sas_get_local_phy(device
);
1855 rc
= sas_phy_reset(phy
, true);
1858 hisi_sas_release_task(hisi_hba
, device
);
1859 sas_put_local_phy(phy
);
1861 struct hisi_sas_tmf_task tmf_task
= { .tmf
= TMF_LU_RESET
};
1863 rc
= hisi_sas_debug_issue_ssp_tmf(device
, lun
, &tmf_task
);
1864 if (rc
== TMF_RESP_FUNC_COMPLETE
)
1865 hisi_sas_release_task(hisi_hba
, device
);
1868 if (rc
!= TMF_RESP_FUNC_COMPLETE
)
1869 dev_err(dev
, "lu_reset: for device[%d]:rc= %d\n",
1870 sas_dev
->device_id
, rc
);
1874 static int hisi_sas_clear_nexus_ha(struct sas_ha_struct
*sas_ha
)
1876 struct hisi_hba
*hisi_hba
= sas_ha
->lldd_ha
;
1877 struct device
*dev
= hisi_hba
->dev
;
1878 HISI_SAS_DECLARE_RST_WORK_ON_STACK(r
);
1881 queue_work(hisi_hba
->wq
, &r
.work
);
1882 wait_for_completion(r
.completion
);
1884 return TMF_RESP_FUNC_FAILED
;
1886 for (i
= 0; i
< HISI_SAS_MAX_DEVICES
; i
++) {
1887 struct hisi_sas_device
*sas_dev
= &hisi_hba
->devices
[i
];
1888 struct domain_device
*device
= sas_dev
->sas_device
;
1890 if ((sas_dev
->dev_type
== SAS_PHY_UNUSED
) || !device
||
1891 dev_is_expander(device
->dev_type
))
1894 rc
= hisi_sas_debug_I_T_nexus_reset(device
);
1895 if (rc
!= TMF_RESP_FUNC_COMPLETE
)
1896 dev_info(dev
, "clear nexus ha: for device[%d] rc=%d\n",
1897 sas_dev
->device_id
, rc
);
1900 hisi_sas_release_tasks(hisi_hba
);
1902 return TMF_RESP_FUNC_COMPLETE
;
1905 static int hisi_sas_query_task(struct sas_task
*task
)
1907 struct scsi_lun lun
;
1908 struct hisi_sas_tmf_task tmf_task
;
1909 int rc
= TMF_RESP_FUNC_FAILED
;
1911 if (task
->lldd_task
&& task
->task_proto
& SAS_PROTOCOL_SSP
) {
1912 struct scsi_cmnd
*cmnd
= task
->uldd_task
;
1913 struct domain_device
*device
= task
->dev
;
1914 struct hisi_sas_slot
*slot
= task
->lldd_task
;
1915 u32 tag
= slot
->idx
;
1917 int_to_scsilun(cmnd
->device
->lun
, &lun
);
1918 tmf_task
.tmf
= TMF_QUERY_TASK
;
1919 tmf_task
.tag_of_task_to_be_managed
= tag
;
1921 rc
= hisi_sas_debug_issue_ssp_tmf(device
,
1925 /* The task is still in Lun, release it then */
1926 case TMF_RESP_FUNC_SUCC
:
1927 /* The task is not in Lun or failed, reset the phy */
1928 case TMF_RESP_FUNC_FAILED
:
1929 case TMF_RESP_FUNC_COMPLETE
:
1932 rc
= TMF_RESP_FUNC_FAILED
;
1940 hisi_sas_internal_abort_task_exec(struct hisi_hba
*hisi_hba
, int device_id
,
1941 struct sas_task
*task
, int abort_flag
,
1942 int task_tag
, struct hisi_sas_dq
*dq
)
1944 struct domain_device
*device
= task
->dev
;
1945 struct hisi_sas_device
*sas_dev
= device
->lldd_dev
;
1946 struct device
*dev
= hisi_hba
->dev
;
1947 struct hisi_sas_port
*port
;
1948 struct hisi_sas_slot
*slot
;
1949 struct asd_sas_port
*sas_port
= device
->port
;
1950 struct hisi_sas_cmd_hdr
*cmd_hdr_base
;
1951 int dlvry_queue_slot
, dlvry_queue
, n_elem
= 0, rc
, slot_idx
;
1952 unsigned long flags
;
1955 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT
, &hisi_hba
->flags
)))
1961 port
= to_hisi_sas_port(sas_port
);
1963 /* simply get a slot and send abort command */
1964 rc
= hisi_sas_slot_index_alloc(hisi_hba
, NULL
);
1969 slot
= &hisi_hba
->slot_info
[slot_idx
];
1971 spin_lock(&dq
->lock
);
1972 wr_q_index
= dq
->wr_point
;
1973 dq
->wr_point
= (dq
->wr_point
+ 1) % HISI_SAS_QUEUE_SLOTS
;
1974 list_add_tail(&slot
->delivery
, &dq
->list
);
1975 spin_unlock(&dq
->lock
);
1976 spin_lock(&sas_dev
->lock
);
1977 list_add_tail(&slot
->entry
, &sas_dev
->list
);
1978 spin_unlock(&sas_dev
->lock
);
1980 dlvry_queue
= dq
->id
;
1981 dlvry_queue_slot
= wr_q_index
;
1983 slot
->device_id
= sas_dev
->device_id
;
1984 slot
->n_elem
= n_elem
;
1985 slot
->dlvry_queue
= dlvry_queue
;
1986 slot
->dlvry_queue_slot
= dlvry_queue_slot
;
1987 cmd_hdr_base
= hisi_hba
->cmd_hdr
[dlvry_queue
];
1988 slot
->cmd_hdr
= &cmd_hdr_base
[dlvry_queue_slot
];
1991 slot
->is_internal
= true;
1992 task
->lldd_task
= slot
;
1994 memset(slot
->cmd_hdr
, 0, sizeof(struct hisi_sas_cmd_hdr
));
1995 memset(hisi_sas_cmd_hdr_addr_mem(slot
), 0, HISI_SAS_COMMAND_TABLE_SZ
);
1996 memset(hisi_sas_status_buf_addr_mem(slot
), 0,
1997 sizeof(struct hisi_sas_err_record
));
1999 hisi_sas_task_prep_abort(hisi_hba
, slot
, device_id
,
2000 abort_flag
, task_tag
);
2002 spin_lock_irqsave(&task
->task_state_lock
, flags
);
2003 task
->task_state_flags
|= SAS_TASK_AT_INITIATOR
;
2004 spin_unlock_irqrestore(&task
->task_state_lock
, flags
);
2005 WRITE_ONCE(slot
->ready
, 1);
2006 /* send abort command to the chip */
2007 spin_lock(&dq
->lock
);
2008 hisi_hba
->hw
->start_delivery(dq
);
2009 spin_unlock(&dq
->lock
);
2014 dev_err(dev
, "internal abort task prep: failed[%d]!\n", rc
);
2020 * _hisi_sas_internal_task_abort -- execute an internal
2021 * abort command for single IO command or a device
2022 * @hisi_hba: host controller struct
2023 * @device: domain device
2024 * @abort_flag: mode of operation, device or single IO
2025 * @tag: tag of IO to be aborted (only relevant to single
2027 * @dq: delivery queue for this internal abort command
2030 _hisi_sas_internal_task_abort(struct hisi_hba
*hisi_hba
,
2031 struct domain_device
*device
, int abort_flag
,
2032 int tag
, struct hisi_sas_dq
*dq
)
2034 struct sas_task
*task
;
2035 struct hisi_sas_device
*sas_dev
= device
->lldd_dev
;
2036 struct device
*dev
= hisi_hba
->dev
;
2040 * The interface is not realized means this HW don't support internal
2041 * abort, or don't need to do internal abort. Then here, we return
2042 * TMF_RESP_FUNC_FAILED and let other steps go on, which depends that
2043 * the internal abort has been executed and returned CQ.
2045 if (!hisi_hba
->hw
->prep_abort
)
2046 return TMF_RESP_FUNC_FAILED
;
2048 task
= sas_alloc_slow_task(GFP_KERNEL
);
2053 task
->task_proto
= device
->tproto
;
2054 task
->task_done
= hisi_sas_task_done
;
2055 task
->slow_task
->timer
.function
= hisi_sas_tmf_timedout
;
2056 task
->slow_task
->timer
.expires
= jiffies
+ INTERNAL_ABORT_TIMEOUT
* HZ
;
2057 add_timer(&task
->slow_task
->timer
);
2059 res
= hisi_sas_internal_abort_task_exec(hisi_hba
, sas_dev
->device_id
,
2060 task
, abort_flag
, tag
, dq
);
2062 del_timer(&task
->slow_task
->timer
);
2063 dev_err(dev
, "internal task abort: executing internal task failed: %d\n",
2067 wait_for_completion(&task
->slow_task
->completion
);
2068 res
= TMF_RESP_FUNC_FAILED
;
2070 /* Internal abort timed out */
2071 if ((task
->task_state_flags
& SAS_TASK_STATE_ABORTED
)) {
2072 if (hisi_sas_debugfs_enable
&& hisi_hba
->debugfs_itct
[0].itct
)
2073 queue_work(hisi_hba
->wq
, &hisi_hba
->debugfs_work
);
2075 if (!(task
->task_state_flags
& SAS_TASK_STATE_DONE
)) {
2076 struct hisi_sas_slot
*slot
= task
->lldd_task
;
2079 struct hisi_sas_cq
*cq
=
2080 &hisi_hba
->cq
[slot
->dlvry_queue
];
2082 * sync irq to avoid free'ing task
2083 * before using task in IO completion
2085 synchronize_irq(cq
->irq_no
);
2088 dev_err(dev
, "internal task abort: timeout and not done.\n");
2093 dev_err(dev
, "internal task abort: timeout.\n");
2096 if (task
->task_status
.resp
== SAS_TASK_COMPLETE
&&
2097 task
->task_status
.stat
== TMF_RESP_FUNC_COMPLETE
) {
2098 res
= TMF_RESP_FUNC_COMPLETE
;
2102 if (task
->task_status
.resp
== SAS_TASK_COMPLETE
&&
2103 task
->task_status
.stat
== TMF_RESP_FUNC_SUCC
) {
2104 res
= TMF_RESP_FUNC_SUCC
;
2109 dev_dbg(dev
, "internal task abort: task to dev %016llx task=%pK resp: 0x%x sts 0x%x\n",
2110 SAS_ADDR(device
->sas_addr
), task
,
2111 task
->task_status
.resp
, /* 0 is complete, -1 is undelivered */
2112 task
->task_status
.stat
);
2113 sas_free_task(task
);
2119 hisi_sas_internal_task_abort(struct hisi_hba
*hisi_hba
,
2120 struct domain_device
*device
,
2121 int abort_flag
, int tag
)
2123 struct hisi_sas_slot
*slot
;
2124 struct device
*dev
= hisi_hba
->dev
;
2125 struct hisi_sas_dq
*dq
;
2128 switch (abort_flag
) {
2129 case HISI_SAS_INT_ABT_CMD
:
2130 slot
= &hisi_hba
->slot_info
[tag
];
2131 dq
= &hisi_hba
->dq
[slot
->dlvry_queue
];
2132 return _hisi_sas_internal_task_abort(hisi_hba
, device
,
2133 abort_flag
, tag
, dq
);
2134 case HISI_SAS_INT_ABT_DEV
:
2135 for (i
= 0; i
< hisi_hba
->cq_nvecs
; i
++) {
2136 struct hisi_sas_cq
*cq
= &hisi_hba
->cq
[i
];
2137 const struct cpumask
*mask
= cq
->irq_mask
;
2139 if (mask
&& !cpumask_intersects(cpu_online_mask
, mask
))
2141 dq
= &hisi_hba
->dq
[i
];
2142 rc
= _hisi_sas_internal_task_abort(hisi_hba
, device
,
2150 dev_err(dev
, "Unrecognised internal abort flag (%d)\n",
2158 static void hisi_sas_port_formed(struct asd_sas_phy
*sas_phy
)
2160 hisi_sas_port_notify_formed(sas_phy
);
2163 static int hisi_sas_write_gpio(struct sas_ha_struct
*sha
, u8 reg_type
,
2164 u8 reg_index
, u8 reg_count
, u8
*write_data
)
2166 struct hisi_hba
*hisi_hba
= sha
->lldd_ha
;
2168 if (!hisi_hba
->hw
->write_gpio
)
2171 return hisi_hba
->hw
->write_gpio(hisi_hba
, reg_type
,
2172 reg_index
, reg_count
, write_data
);
2175 static void hisi_sas_phy_disconnected(struct hisi_sas_phy
*phy
)
2177 struct asd_sas_phy
*sas_phy
= &phy
->sas_phy
;
2178 struct sas_phy
*sphy
= sas_phy
->phy
;
2179 unsigned long flags
;
2181 phy
->phy_attached
= 0;
2185 spin_lock_irqsave(&phy
->lock
, flags
);
2187 sphy
->negotiated_linkrate
= SAS_LINK_RATE_UNKNOWN
;
2189 sphy
->negotiated_linkrate
= SAS_PHY_DISABLED
;
2190 spin_unlock_irqrestore(&phy
->lock
, flags
);
2193 void hisi_sas_phy_down(struct hisi_hba
*hisi_hba
, int phy_no
, int rdy
)
2195 struct hisi_sas_phy
*phy
= &hisi_hba
->phy
[phy_no
];
2196 struct asd_sas_phy
*sas_phy
= &phy
->sas_phy
;
2197 struct sas_ha_struct
*sas_ha
= &hisi_hba
->sha
;
2198 struct device
*dev
= hisi_hba
->dev
;
2201 /* Phy down but ready */
2202 hisi_sas_bytes_dmaed(hisi_hba
, phy_no
);
2203 hisi_sas_port_notify_formed(sas_phy
);
2205 struct hisi_sas_port
*port
= phy
->port
;
2207 if (test_bit(HISI_SAS_RESET_BIT
, &hisi_hba
->flags
) ||
2209 dev_info(dev
, "ignore flutter phy%d down\n", phy_no
);
2212 /* Phy down and not ready */
2213 sas_ha
->notify_phy_event(sas_phy
, PHYE_LOSS_OF_SIGNAL
);
2214 sas_phy_disconnected(sas_phy
);
2217 if (phy
->phy_type
& PORT_TYPE_SAS
) {
2218 int port_id
= port
->id
;
2220 if (!hisi_hba
->hw
->get_wideport_bitmap(hisi_hba
,
2222 port
->port_attached
= 0;
2223 } else if (phy
->phy_type
& PORT_TYPE_SATA
)
2224 port
->port_attached
= 0;
2226 hisi_sas_phy_disconnected(phy
);
2229 EXPORT_SYMBOL_GPL(hisi_sas_phy_down
);
2231 void hisi_sas_sync_irqs(struct hisi_hba
*hisi_hba
)
2235 for (i
= 0; i
< hisi_hba
->cq_nvecs
; i
++) {
2236 struct hisi_sas_cq
*cq
= &hisi_hba
->cq
[i
];
2238 synchronize_irq(cq
->irq_no
);
2241 EXPORT_SYMBOL_GPL(hisi_sas_sync_irqs
);
2243 int hisi_sas_host_reset(struct Scsi_Host
*shost
, int reset_type
)
2245 struct hisi_hba
*hisi_hba
= shost_priv(shost
);
2247 if (reset_type
!= SCSI_ADAPTER_RESET
)
2250 queue_work(hisi_hba
->wq
, &hisi_hba
->rst_work
);
2254 EXPORT_SYMBOL_GPL(hisi_sas_host_reset
);
2256 struct scsi_transport_template
*hisi_sas_stt
;
2257 EXPORT_SYMBOL_GPL(hisi_sas_stt
);
2259 static struct sas_domain_function_template hisi_sas_transport_ops
= {
2260 .lldd_dev_found
= hisi_sas_dev_found
,
2261 .lldd_dev_gone
= hisi_sas_dev_gone
,
2262 .lldd_execute_task
= hisi_sas_queue_command
,
2263 .lldd_control_phy
= hisi_sas_control_phy
,
2264 .lldd_abort_task
= hisi_sas_abort_task
,
2265 .lldd_abort_task_set
= hisi_sas_abort_task_set
,
2266 .lldd_clear_aca
= hisi_sas_clear_aca
,
2267 .lldd_I_T_nexus_reset
= hisi_sas_I_T_nexus_reset
,
2268 .lldd_lu_reset
= hisi_sas_lu_reset
,
2269 .lldd_query_task
= hisi_sas_query_task
,
2270 .lldd_clear_nexus_ha
= hisi_sas_clear_nexus_ha
,
2271 .lldd_port_formed
= hisi_sas_port_formed
,
2272 .lldd_write_gpio
= hisi_sas_write_gpio
,
2275 void hisi_sas_init_mem(struct hisi_hba
*hisi_hba
)
2277 int i
, s
, j
, max_command_entries
= HISI_SAS_MAX_COMMANDS
;
2278 struct hisi_sas_breakpoint
*sata_breakpoint
= hisi_hba
->sata_breakpoint
;
2280 for (i
= 0; i
< hisi_hba
->queue_count
; i
++) {
2281 struct hisi_sas_cq
*cq
= &hisi_hba
->cq
[i
];
2282 struct hisi_sas_dq
*dq
= &hisi_hba
->dq
[i
];
2283 struct hisi_sas_cmd_hdr
*cmd_hdr
= hisi_hba
->cmd_hdr
[i
];
2285 s
= sizeof(struct hisi_sas_cmd_hdr
);
2286 for (j
= 0; j
< HISI_SAS_QUEUE_SLOTS
; j
++)
2287 memset(&cmd_hdr
[j
], 0, s
);
2291 s
= hisi_hba
->hw
->complete_hdr_size
* HISI_SAS_QUEUE_SLOTS
;
2292 memset(hisi_hba
->complete_hdr
[i
], 0, s
);
2296 s
= sizeof(struct hisi_sas_initial_fis
) * hisi_hba
->n_phy
;
2297 memset(hisi_hba
->initial_fis
, 0, s
);
2299 s
= max_command_entries
* sizeof(struct hisi_sas_iost
);
2300 memset(hisi_hba
->iost
, 0, s
);
2302 s
= max_command_entries
* sizeof(struct hisi_sas_breakpoint
);
2303 memset(hisi_hba
->breakpoint
, 0, s
);
2305 s
= sizeof(struct hisi_sas_sata_breakpoint
);
2306 for (j
= 0; j
< HISI_SAS_MAX_ITCT_ENTRIES
; j
++)
2307 memset(&sata_breakpoint
[j
], 0, s
);
2309 EXPORT_SYMBOL_GPL(hisi_sas_init_mem
);
2311 int hisi_sas_alloc(struct hisi_hba
*hisi_hba
)
2313 struct device
*dev
= hisi_hba
->dev
;
2314 int i
, j
, s
, max_command_entries
= HISI_SAS_MAX_COMMANDS
;
2315 int max_command_entries_ru
, sz_slot_buf_ru
;
2316 int blk_cnt
, slots_per_blk
;
2318 sema_init(&hisi_hba
->sem
, 1);
2319 spin_lock_init(&hisi_hba
->lock
);
2320 for (i
= 0; i
< hisi_hba
->n_phy
; i
++) {
2321 hisi_sas_phy_init(hisi_hba
, i
);
2322 hisi_hba
->port
[i
].port_attached
= 0;
2323 hisi_hba
->port
[i
].id
= -1;
2326 for (i
= 0; i
< HISI_SAS_MAX_DEVICES
; i
++) {
2327 hisi_hba
->devices
[i
].dev_type
= SAS_PHY_UNUSED
;
2328 hisi_hba
->devices
[i
].device_id
= i
;
2329 hisi_hba
->devices
[i
].dev_status
= HISI_SAS_DEV_INIT
;
2332 for (i
= 0; i
< hisi_hba
->queue_count
; i
++) {
2333 struct hisi_sas_cq
*cq
= &hisi_hba
->cq
[i
];
2334 struct hisi_sas_dq
*dq
= &hisi_hba
->dq
[i
];
2336 /* Completion queue structure */
2338 cq
->hisi_hba
= hisi_hba
;
2340 /* Delivery queue structure */
2341 spin_lock_init(&dq
->lock
);
2342 INIT_LIST_HEAD(&dq
->list
);
2344 dq
->hisi_hba
= hisi_hba
;
2346 /* Delivery queue */
2347 s
= sizeof(struct hisi_sas_cmd_hdr
) * HISI_SAS_QUEUE_SLOTS
;
2348 hisi_hba
->cmd_hdr
[i
] = dmam_alloc_coherent(dev
, s
,
2349 &hisi_hba
->cmd_hdr_dma
[i
],
2351 if (!hisi_hba
->cmd_hdr
[i
])
2354 /* Completion queue */
2355 s
= hisi_hba
->hw
->complete_hdr_size
* HISI_SAS_QUEUE_SLOTS
;
2356 hisi_hba
->complete_hdr
[i
] = dmam_alloc_coherent(dev
, s
,
2357 &hisi_hba
->complete_hdr_dma
[i
],
2359 if (!hisi_hba
->complete_hdr
[i
])
2363 s
= HISI_SAS_MAX_ITCT_ENTRIES
* sizeof(struct hisi_sas_itct
);
2364 hisi_hba
->itct
= dmam_alloc_coherent(dev
, s
, &hisi_hba
->itct_dma
,
2366 if (!hisi_hba
->itct
)
2369 hisi_hba
->slot_info
= devm_kcalloc(dev
, max_command_entries
,
2370 sizeof(struct hisi_sas_slot
),
2372 if (!hisi_hba
->slot_info
)
2375 /* roundup to avoid overly large block size */
2376 max_command_entries_ru
= roundup(max_command_entries
, 64);
2377 if (hisi_hba
->prot_mask
& HISI_SAS_DIX_PROT_MASK
)
2378 sz_slot_buf_ru
= sizeof(struct hisi_sas_slot_dif_buf_table
);
2380 sz_slot_buf_ru
= sizeof(struct hisi_sas_slot_buf_table
);
2381 sz_slot_buf_ru
= roundup(sz_slot_buf_ru
, 64);
2382 s
= max(lcm(max_command_entries_ru
, sz_slot_buf_ru
), PAGE_SIZE
);
2383 blk_cnt
= (max_command_entries_ru
* sz_slot_buf_ru
) / s
;
2384 slots_per_blk
= s
/ sz_slot_buf_ru
;
2386 for (i
= 0; i
< blk_cnt
; i
++) {
2387 int slot_index
= i
* slots_per_blk
;
2391 buf
= dmam_alloc_coherent(dev
, s
, &buf_dma
,
2396 for (j
= 0; j
< slots_per_blk
; j
++, slot_index
++) {
2397 struct hisi_sas_slot
*slot
;
2399 slot
= &hisi_hba
->slot_info
[slot_index
];
2401 slot
->buf_dma
= buf_dma
;
2402 slot
->idx
= slot_index
;
2404 buf
+= sz_slot_buf_ru
;
2405 buf_dma
+= sz_slot_buf_ru
;
2409 s
= max_command_entries
* sizeof(struct hisi_sas_iost
);
2410 hisi_hba
->iost
= dmam_alloc_coherent(dev
, s
, &hisi_hba
->iost_dma
,
2412 if (!hisi_hba
->iost
)
2415 s
= max_command_entries
* sizeof(struct hisi_sas_breakpoint
);
2416 hisi_hba
->breakpoint
= dmam_alloc_coherent(dev
, s
,
2417 &hisi_hba
->breakpoint_dma
,
2419 if (!hisi_hba
->breakpoint
)
2422 hisi_hba
->slot_index_count
= max_command_entries
;
2423 s
= hisi_hba
->slot_index_count
/ BITS_PER_BYTE
;
2424 hisi_hba
->slot_index_tags
= devm_kzalloc(dev
, s
, GFP_KERNEL
);
2425 if (!hisi_hba
->slot_index_tags
)
2428 s
= sizeof(struct hisi_sas_initial_fis
) * HISI_SAS_MAX_PHYS
;
2429 hisi_hba
->initial_fis
= dmam_alloc_coherent(dev
, s
,
2430 &hisi_hba
->initial_fis_dma
,
2432 if (!hisi_hba
->initial_fis
)
2435 s
= HISI_SAS_MAX_ITCT_ENTRIES
* sizeof(struct hisi_sas_sata_breakpoint
);
2436 hisi_hba
->sata_breakpoint
= dmam_alloc_coherent(dev
, s
,
2437 &hisi_hba
->sata_breakpoint_dma
,
2439 if (!hisi_hba
->sata_breakpoint
)
2442 hisi_sas_slot_index_init(hisi_hba
);
2443 hisi_hba
->last_slot_index
= HISI_SAS_UNRESERVED_IPTT
;
2445 hisi_hba
->wq
= create_singlethread_workqueue(dev_name(dev
));
2446 if (!hisi_hba
->wq
) {
2447 dev_err(dev
, "sas_alloc: failed to create workqueue\n");
2455 EXPORT_SYMBOL_GPL(hisi_sas_alloc
);
2457 void hisi_sas_free(struct hisi_hba
*hisi_hba
)
2461 for (i
= 0; i
< hisi_hba
->n_phy
; i
++) {
2462 struct hisi_sas_phy
*phy
= &hisi_hba
->phy
[i
];
2464 del_timer_sync(&phy
->timer
);
2468 destroy_workqueue(hisi_hba
->wq
);
2470 EXPORT_SYMBOL_GPL(hisi_sas_free
);
2472 void hisi_sas_rst_work_handler(struct work_struct
*work
)
2474 struct hisi_hba
*hisi_hba
=
2475 container_of(work
, struct hisi_hba
, rst_work
);
2477 hisi_sas_controller_reset(hisi_hba
);
2479 EXPORT_SYMBOL_GPL(hisi_sas_rst_work_handler
);
2481 void hisi_sas_sync_rst_work_handler(struct work_struct
*work
)
2483 struct hisi_sas_rst
*rst
=
2484 container_of(work
, struct hisi_sas_rst
, work
);
2486 if (!hisi_sas_controller_reset(rst
->hisi_hba
))
2488 complete(rst
->completion
);
2490 EXPORT_SYMBOL_GPL(hisi_sas_sync_rst_work_handler
);
2492 int hisi_sas_get_fw_info(struct hisi_hba
*hisi_hba
)
2494 struct device
*dev
= hisi_hba
->dev
;
2495 struct platform_device
*pdev
= hisi_hba
->platform_dev
;
2496 struct device_node
*np
= pdev
? pdev
->dev
.of_node
: NULL
;
2499 if (device_property_read_u8_array(dev
, "sas-addr", hisi_hba
->sas_addr
,
2501 dev_err(dev
, "could not get property sas-addr\n");
2507 * These properties are only required for platform device-based
2508 * controller with DT firmware.
2510 hisi_hba
->ctrl
= syscon_regmap_lookup_by_phandle(np
,
2511 "hisilicon,sas-syscon");
2512 if (IS_ERR(hisi_hba
->ctrl
)) {
2513 dev_err(dev
, "could not get syscon\n");
2517 if (device_property_read_u32(dev
, "ctrl-reset-reg",
2518 &hisi_hba
->ctrl_reset_reg
)) {
2519 dev_err(dev
, "could not get property ctrl-reset-reg\n");
2523 if (device_property_read_u32(dev
, "ctrl-reset-sts-reg",
2524 &hisi_hba
->ctrl_reset_sts_reg
)) {
2525 dev_err(dev
, "could not get property ctrl-reset-sts-reg\n");
2529 if (device_property_read_u32(dev
, "ctrl-clock-ena-reg",
2530 &hisi_hba
->ctrl_clock_ena_reg
)) {
2531 dev_err(dev
, "could not get property ctrl-clock-ena-reg\n");
2536 refclk
= devm_clk_get(dev
, NULL
);
2538 dev_dbg(dev
, "no ref clk property\n");
2540 hisi_hba
->refclk_frequency_mhz
= clk_get_rate(refclk
) / 1000000;
2542 if (device_property_read_u32(dev
, "phy-count", &hisi_hba
->n_phy
)) {
2543 dev_err(dev
, "could not get property phy-count\n");
2547 if (device_property_read_u32(dev
, "queue-count",
2548 &hisi_hba
->queue_count
)) {
2549 dev_err(dev
, "could not get property queue-count\n");
2555 EXPORT_SYMBOL_GPL(hisi_sas_get_fw_info
);
2557 static struct Scsi_Host
*hisi_sas_shost_alloc(struct platform_device
*pdev
,
2558 const struct hisi_sas_hw
*hw
)
2560 struct resource
*res
;
2561 struct Scsi_Host
*shost
;
2562 struct hisi_hba
*hisi_hba
;
2563 struct device
*dev
= &pdev
->dev
;
2566 shost
= scsi_host_alloc(hw
->sht
, sizeof(*hisi_hba
));
2568 dev_err(dev
, "scsi host alloc failed\n");
2571 hisi_hba
= shost_priv(shost
);
2573 INIT_WORK(&hisi_hba
->rst_work
, hisi_sas_rst_work_handler
);
2575 hisi_hba
->dev
= dev
;
2576 hisi_hba
->platform_dev
= pdev
;
2577 hisi_hba
->shost
= shost
;
2578 SHOST_TO_SAS_HA(shost
) = &hisi_hba
->sha
;
2580 timer_setup(&hisi_hba
->timer
, NULL
, 0);
2582 if (hisi_sas_get_fw_info(hisi_hba
) < 0)
2585 error
= dma_set_mask_and_coherent(dev
, DMA_BIT_MASK(64));
2587 error
= dma_set_mask_and_coherent(dev
, DMA_BIT_MASK(32));
2590 dev_err(dev
, "No usable DMA addressing method\n");
2594 hisi_hba
->regs
= devm_platform_ioremap_resource(pdev
, 0);
2595 if (IS_ERR(hisi_hba
->regs
))
2598 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 1);
2600 hisi_hba
->sgpio_regs
= devm_ioremap_resource(dev
, res
);
2601 if (IS_ERR(hisi_hba
->sgpio_regs
))
2605 if (hisi_sas_alloc(hisi_hba
)) {
2606 hisi_sas_free(hisi_hba
);
2612 scsi_host_put(shost
);
2613 dev_err(dev
, "shost alloc failed\n");
2617 static int hisi_sas_interrupt_preinit(struct hisi_hba
*hisi_hba
)
2619 if (hisi_hba
->hw
->interrupt_preinit
)
2620 return hisi_hba
->hw
->interrupt_preinit(hisi_hba
);
2624 int hisi_sas_probe(struct platform_device
*pdev
,
2625 const struct hisi_sas_hw
*hw
)
2627 struct Scsi_Host
*shost
;
2628 struct hisi_hba
*hisi_hba
;
2629 struct device
*dev
= &pdev
->dev
;
2630 struct asd_sas_phy
**arr_phy
;
2631 struct asd_sas_port
**arr_port
;
2632 struct sas_ha_struct
*sha
;
2633 int rc
, phy_nr
, port_nr
, i
;
2635 shost
= hisi_sas_shost_alloc(pdev
, hw
);
2639 sha
= SHOST_TO_SAS_HA(shost
);
2640 hisi_hba
= shost_priv(shost
);
2641 platform_set_drvdata(pdev
, sha
);
2643 phy_nr
= port_nr
= hisi_hba
->n_phy
;
2645 arr_phy
= devm_kcalloc(dev
, phy_nr
, sizeof(void *), GFP_KERNEL
);
2646 arr_port
= devm_kcalloc(dev
, port_nr
, sizeof(void *), GFP_KERNEL
);
2647 if (!arr_phy
|| !arr_port
) {
2652 sha
->sas_phy
= arr_phy
;
2653 sha
->sas_port
= arr_port
;
2654 sha
->lldd_ha
= hisi_hba
;
2656 shost
->transportt
= hisi_sas_stt
;
2657 shost
->max_id
= HISI_SAS_MAX_DEVICES
;
2658 shost
->max_lun
= ~0;
2659 shost
->max_channel
= 1;
2660 shost
->max_cmd_len
= 16;
2661 if (hisi_hba
->hw
->slot_index_alloc
) {
2662 shost
->can_queue
= HISI_SAS_MAX_COMMANDS
;
2663 shost
->cmd_per_lun
= HISI_SAS_MAX_COMMANDS
;
2665 shost
->can_queue
= HISI_SAS_UNRESERVED_IPTT
;
2666 shost
->cmd_per_lun
= HISI_SAS_UNRESERVED_IPTT
;
2669 sha
->sas_ha_name
= DRV_NAME
;
2670 sha
->dev
= hisi_hba
->dev
;
2671 sha
->lldd_module
= THIS_MODULE
;
2672 sha
->sas_addr
= &hisi_hba
->sas_addr
[0];
2673 sha
->num_phys
= hisi_hba
->n_phy
;
2674 sha
->core
.shost
= hisi_hba
->shost
;
2676 for (i
= 0; i
< hisi_hba
->n_phy
; i
++) {
2677 sha
->sas_phy
[i
] = &hisi_hba
->phy
[i
].sas_phy
;
2678 sha
->sas_port
[i
] = &hisi_hba
->port
[i
].sas_port
;
2681 rc
= hisi_sas_interrupt_preinit(hisi_hba
);
2685 rc
= scsi_add_host(shost
, &pdev
->dev
);
2689 rc
= sas_register_ha(sha
);
2691 goto err_out_register_ha
;
2693 rc
= hisi_hba
->hw
->hw_init(hisi_hba
);
2695 goto err_out_register_ha
;
2697 scsi_scan_host(shost
);
2701 err_out_register_ha
:
2702 scsi_remove_host(shost
);
2704 hisi_sas_free(hisi_hba
);
2705 scsi_host_put(shost
);
2708 EXPORT_SYMBOL_GPL(hisi_sas_probe
);
2710 int hisi_sas_remove(struct platform_device
*pdev
)
2712 struct sas_ha_struct
*sha
= platform_get_drvdata(pdev
);
2713 struct hisi_hba
*hisi_hba
= sha
->lldd_ha
;
2714 struct Scsi_Host
*shost
= sha
->core
.shost
;
2716 if (timer_pending(&hisi_hba
->timer
))
2717 del_timer(&hisi_hba
->timer
);
2719 sas_unregister_ha(sha
);
2720 sas_remove_host(sha
->core
.shost
);
2722 hisi_sas_free(hisi_hba
);
2723 scsi_host_put(shost
);
2726 EXPORT_SYMBOL_GPL(hisi_sas_remove
);
2728 bool hisi_sas_debugfs_enable
;
2729 EXPORT_SYMBOL_GPL(hisi_sas_debugfs_enable
);
2730 module_param_named(debugfs_enable
, hisi_sas_debugfs_enable
, bool, 0444);
2731 MODULE_PARM_DESC(hisi_sas_debugfs_enable
, "Enable driver debugfs (default disabled)");
2733 u32 hisi_sas_debugfs_dump_count
= 1;
2734 EXPORT_SYMBOL_GPL(hisi_sas_debugfs_dump_count
);
2735 module_param_named(debugfs_dump_count
, hisi_sas_debugfs_dump_count
, uint
, 0444);
2736 MODULE_PARM_DESC(hisi_sas_debugfs_dump_count
, "Number of debugfs dumps to allow");
2738 struct dentry
*hisi_sas_debugfs_dir
;
2739 EXPORT_SYMBOL_GPL(hisi_sas_debugfs_dir
);
2741 static __init
int hisi_sas_init(void)
2743 hisi_sas_stt
= sas_domain_attach_transport(&hisi_sas_transport_ops
);
2747 if (hisi_sas_debugfs_enable
) {
2748 hisi_sas_debugfs_dir
= debugfs_create_dir("hisi_sas", NULL
);
2749 if (hisi_sas_debugfs_dump_count
> HISI_SAS_MAX_DEBUGFS_DUMP
) {
2750 pr_info("hisi_sas: Limiting debugfs dump count\n");
2751 hisi_sas_debugfs_dump_count
= HISI_SAS_MAX_DEBUGFS_DUMP
;
2758 static __exit
void hisi_sas_exit(void)
2760 sas_release_transport(hisi_sas_stt
);
2762 debugfs_remove(hisi_sas_debugfs_dir
);
2765 module_init(hisi_sas_init
);
2766 module_exit(hisi_sas_exit
);
2768 MODULE_LICENSE("GPL");
2769 MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
2770 MODULE_DESCRIPTION("HISILICON SAS controller driver");
2771 MODULE_ALIAS("platform:" DRV_NAME
);