2 * Copyright (c) 2015 Linaro Ltd.
3 * Copyright (c) 2015 Hisilicon Limited.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
13 #define DRV_NAME "hisi_sas"
15 #define DEV_IS_GONE(dev) \
16 ((!dev) || (dev->dev_type == SAS_PHY_UNUSED))
18 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device
*device
,
19 u8
*lun
, struct hisi_sas_tmf_task
*tmf
);
21 hisi_sas_internal_task_abort(struct hisi_hba
*hisi_hba
,
22 struct domain_device
*device
,
23 int abort_flag
, int tag
);
24 static int hisi_sas_softreset_ata_disk(struct domain_device
*device
);
25 static int hisi_sas_control_phy(struct asd_sas_phy
*sas_phy
, enum phy_func func
,
27 static void hisi_sas_release_task(struct hisi_hba
*hisi_hba
,
28 struct domain_device
*device
);
29 static void hisi_sas_dev_gone(struct domain_device
*device
);
31 u8
hisi_sas_get_ata_protocol(struct host_to_dev_fis
*fis
, int direction
)
33 switch (fis
->command
) {
34 case ATA_CMD_FPDMA_WRITE
:
35 case ATA_CMD_FPDMA_READ
:
36 case ATA_CMD_FPDMA_RECV
:
37 case ATA_CMD_FPDMA_SEND
:
38 case ATA_CMD_NCQ_NON_DATA
:
39 return HISI_SAS_SATA_PROTOCOL_FPDMA
;
41 case ATA_CMD_DOWNLOAD_MICRO
:
43 case ATA_CMD_PMP_READ
:
44 case ATA_CMD_READ_LOG_EXT
:
45 case ATA_CMD_PIO_READ
:
46 case ATA_CMD_PIO_READ_EXT
:
47 case ATA_CMD_PMP_WRITE
:
48 case ATA_CMD_WRITE_LOG_EXT
:
49 case ATA_CMD_PIO_WRITE
:
50 case ATA_CMD_PIO_WRITE_EXT
:
51 return HISI_SAS_SATA_PROTOCOL_PIO
;
54 case ATA_CMD_DOWNLOAD_MICRO_DMA
:
55 case ATA_CMD_PMP_READ_DMA
:
56 case ATA_CMD_PMP_WRITE_DMA
:
58 case ATA_CMD_READ_EXT
:
59 case ATA_CMD_READ_LOG_DMA_EXT
:
60 case ATA_CMD_READ_STREAM_DMA_EXT
:
61 case ATA_CMD_TRUSTED_RCV_DMA
:
62 case ATA_CMD_TRUSTED_SND_DMA
:
64 case ATA_CMD_WRITE_EXT
:
65 case ATA_CMD_WRITE_FUA_EXT
:
66 case ATA_CMD_WRITE_QUEUED
:
67 case ATA_CMD_WRITE_LOG_DMA_EXT
:
68 case ATA_CMD_WRITE_STREAM_DMA_EXT
:
69 case ATA_CMD_ZAC_MGMT_IN
:
70 return HISI_SAS_SATA_PROTOCOL_DMA
;
72 case ATA_CMD_CHK_POWER
:
73 case ATA_CMD_DEV_RESET
:
76 case ATA_CMD_FLUSH_EXT
:
78 case ATA_CMD_VERIFY_EXT
:
79 case ATA_CMD_SET_FEATURES
:
81 case ATA_CMD_STANDBYNOW1
:
82 case ATA_CMD_ZAC_MGMT_OUT
:
83 return HISI_SAS_SATA_PROTOCOL_NONDATA
;
86 switch (fis
->features
) {
87 case ATA_SET_MAX_PASSWD
:
88 case ATA_SET_MAX_LOCK
:
89 return HISI_SAS_SATA_PROTOCOL_PIO
;
91 case ATA_SET_MAX_PASSWD_DMA
:
92 case ATA_SET_MAX_UNLOCK_DMA
:
93 return HISI_SAS_SATA_PROTOCOL_DMA
;
96 return HISI_SAS_SATA_PROTOCOL_NONDATA
;
101 if (direction
== DMA_NONE
)
102 return HISI_SAS_SATA_PROTOCOL_NONDATA
;
103 return HISI_SAS_SATA_PROTOCOL_PIO
;
107 EXPORT_SYMBOL_GPL(hisi_sas_get_ata_protocol
);
109 void hisi_sas_sata_done(struct sas_task
*task
,
110 struct hisi_sas_slot
*slot
)
112 struct task_status_struct
*ts
= &task
->task_status
;
113 struct ata_task_resp
*resp
= (struct ata_task_resp
*)ts
->buf
;
114 struct hisi_sas_status_buffer
*status_buf
=
115 hisi_sas_status_buf_addr_mem(slot
);
116 u8
*iu
= &status_buf
->iu
[0];
117 struct dev_to_host_fis
*d2h
= (struct dev_to_host_fis
*)iu
;
119 resp
->frame_len
= sizeof(struct dev_to_host_fis
);
120 memcpy(&resp
->ending_fis
[0], d2h
, sizeof(struct dev_to_host_fis
));
122 ts
->buf_valid_size
= sizeof(*resp
);
124 EXPORT_SYMBOL_GPL(hisi_sas_sata_done
);
126 int hisi_sas_get_ncq_tag(struct sas_task
*task
, u32
*tag
)
128 struct ata_queued_cmd
*qc
= task
->uldd_task
;
131 if (qc
->tf
.command
== ATA_CMD_FPDMA_WRITE
||
132 qc
->tf
.command
== ATA_CMD_FPDMA_READ
) {
139 EXPORT_SYMBOL_GPL(hisi_sas_get_ncq_tag
);
142 * This function assumes linkrate mask fits in 8 bits, which it
143 * does for all HW versions supported.
145 u8
hisi_sas_get_prog_phy_linkrate_mask(enum sas_linkrate max
)
150 max
-= SAS_LINK_RATE_1_5_GBPS
;
151 for (i
= 0; i
<= max
; i
++)
152 rate
|= 1 << (i
* 2);
155 EXPORT_SYMBOL_GPL(hisi_sas_get_prog_phy_linkrate_mask
);
157 static struct hisi_hba
*dev_to_hisi_hba(struct domain_device
*device
)
159 return device
->port
->ha
->lldd_ha
;
162 struct hisi_sas_port
*to_hisi_sas_port(struct asd_sas_port
*sas_port
)
164 return container_of(sas_port
, struct hisi_sas_port
, sas_port
);
166 EXPORT_SYMBOL_GPL(to_hisi_sas_port
);
168 void hisi_sas_stop_phys(struct hisi_hba
*hisi_hba
)
172 for (phy_no
= 0; phy_no
< hisi_hba
->n_phy
; phy_no
++)
173 hisi_hba
->hw
->phy_disable(hisi_hba
, phy_no
);
175 EXPORT_SYMBOL_GPL(hisi_sas_stop_phys
);
177 static void hisi_sas_slot_index_clear(struct hisi_hba
*hisi_hba
, int slot_idx
)
179 void *bitmap
= hisi_hba
->slot_index_tags
;
181 clear_bit(slot_idx
, bitmap
);
184 static void hisi_sas_slot_index_free(struct hisi_hba
*hisi_hba
, int slot_idx
)
186 hisi_sas_slot_index_clear(hisi_hba
, slot_idx
);
189 static void hisi_sas_slot_index_set(struct hisi_hba
*hisi_hba
, int slot_idx
)
191 void *bitmap
= hisi_hba
->slot_index_tags
;
193 set_bit(slot_idx
, bitmap
);
196 static int hisi_sas_slot_index_alloc(struct hisi_hba
*hisi_hba
, int *slot_idx
)
199 void *bitmap
= hisi_hba
->slot_index_tags
;
201 index
= find_next_zero_bit(bitmap
, hisi_hba
->slot_index_count
,
202 hisi_hba
->last_slot_index
+ 1);
203 if (index
>= hisi_hba
->slot_index_count
) {
204 index
= find_next_zero_bit(bitmap
, hisi_hba
->slot_index_count
,
206 if (index
>= hisi_hba
->slot_index_count
)
207 return -SAS_QUEUE_FULL
;
209 hisi_sas_slot_index_set(hisi_hba
, index
);
211 hisi_hba
->last_slot_index
= index
;
216 static void hisi_sas_slot_index_init(struct hisi_hba
*hisi_hba
)
220 for (i
= 0; i
< hisi_hba
->slot_index_count
; ++i
)
221 hisi_sas_slot_index_clear(hisi_hba
, i
);
224 void hisi_sas_slot_task_free(struct hisi_hba
*hisi_hba
, struct sas_task
*task
,
225 struct hisi_sas_slot
*slot
)
227 struct hisi_sas_dq
*dq
= &hisi_hba
->dq
[slot
->dlvry_queue
];
231 struct device
*dev
= hisi_hba
->dev
;
233 if (!task
->lldd_task
)
236 task
->lldd_task
= NULL
;
238 if (!sas_protocol_ata(task
->task_proto
))
240 dma_unmap_sg(dev
, task
->scatter
,
246 dma_pool_free(hisi_hba
->buffer_pool
, slot
->buf
, slot
->buf_dma
);
248 spin_lock_irqsave(&dq
->lock
, flags
);
249 list_del_init(&slot
->entry
);
250 spin_unlock_irqrestore(&dq
->lock
, flags
);
254 spin_lock_irqsave(&hisi_hba
->lock
, flags
);
255 hisi_sas_slot_index_free(hisi_hba
, slot
->idx
);
256 spin_unlock_irqrestore(&hisi_hba
->lock
, flags
);
258 /* slot memory is fully zeroed when it is reused */
260 EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free
);
262 static void hisi_sas_task_prep_smp(struct hisi_hba
*hisi_hba
,
263 struct hisi_sas_slot
*slot
)
265 hisi_hba
->hw
->prep_smp(hisi_hba
, slot
);
268 static void hisi_sas_task_prep_ssp(struct hisi_hba
*hisi_hba
,
269 struct hisi_sas_slot
*slot
)
271 hisi_hba
->hw
->prep_ssp(hisi_hba
, slot
);
274 static void hisi_sas_task_prep_ata(struct hisi_hba
*hisi_hba
,
275 struct hisi_sas_slot
*slot
)
277 hisi_hba
->hw
->prep_stp(hisi_hba
, slot
);
280 static void hisi_sas_task_prep_abort(struct hisi_hba
*hisi_hba
,
281 struct hisi_sas_slot
*slot
,
282 int device_id
, int abort_flag
, int tag_to_abort
)
284 hisi_hba
->hw
->prep_abort(hisi_hba
, slot
,
285 device_id
, abort_flag
, tag_to_abort
);
289 * This function will issue an abort TMF regardless of whether the
290 * task is in the sdev or not. Then it will do the task complete
291 * cleanup and callbacks.
293 static void hisi_sas_slot_abort(struct work_struct
*work
)
295 struct hisi_sas_slot
*abort_slot
=
296 container_of(work
, struct hisi_sas_slot
, abort_slot
);
297 struct sas_task
*task
= abort_slot
->task
;
298 struct hisi_hba
*hisi_hba
= dev_to_hisi_hba(task
->dev
);
299 struct scsi_cmnd
*cmnd
= task
->uldd_task
;
300 struct hisi_sas_tmf_task tmf_task
;
302 struct device
*dev
= hisi_hba
->dev
;
303 int tag
= abort_slot
->idx
;
305 if (!(task
->task_proto
& SAS_PROTOCOL_SSP
)) {
306 dev_err(dev
, "cannot abort slot for non-ssp task\n");
310 int_to_scsilun(cmnd
->device
->lun
, &lun
);
311 tmf_task
.tmf
= TMF_ABORT_TASK
;
312 tmf_task
.tag_of_task_to_be_managed
= cpu_to_le16(tag
);
314 hisi_sas_debug_issue_ssp_tmf(task
->dev
, lun
.scsi_lun
, &tmf_task
);
316 /* Do cleanup for this task */
317 hisi_sas_slot_task_free(hisi_hba
, task
, abort_slot
);
319 task
->task_done(task
);
322 static int hisi_sas_task_prep(struct sas_task
*task
,
323 struct hisi_sas_dq
**dq_pointer
,
324 bool is_tmf
, struct hisi_sas_tmf_task
*tmf
,
327 struct domain_device
*device
= task
->dev
;
328 struct hisi_hba
*hisi_hba
= dev_to_hisi_hba(device
);
329 struct hisi_sas_device
*sas_dev
= device
->lldd_dev
;
330 struct hisi_sas_port
*port
;
331 struct hisi_sas_slot
*slot
;
332 struct hisi_sas_cmd_hdr
*cmd_hdr_base
;
333 struct asd_sas_port
*sas_port
= device
->port
;
334 struct device
*dev
= hisi_hba
->dev
;
335 int dlvry_queue_slot
, dlvry_queue
, rc
, slot_idx
;
336 int n_elem
= 0, n_elem_req
= 0, n_elem_resp
= 0;
337 unsigned long flags
, flags_dq
;
338 struct hisi_sas_dq
*dq
;
342 struct task_status_struct
*ts
= &task
->task_status
;
344 ts
->resp
= SAS_TASK_UNDELIVERED
;
345 ts
->stat
= SAS_PHY_DOWN
;
347 * libsas will use dev->port, should
348 * not call task_done for sata
350 if (device
->dev_type
!= SAS_SATA_DEV
)
351 task
->task_done(task
);
355 if (DEV_IS_GONE(sas_dev
)) {
357 dev_info(dev
, "task prep: device %d not ready\n",
360 dev_info(dev
, "task prep: device %016llx not ready\n",
361 SAS_ADDR(device
->sas_addr
));
366 *dq_pointer
= dq
= sas_dev
->dq
;
368 port
= to_hisi_sas_port(sas_port
);
369 if (port
&& !port
->port_attached
) {
370 dev_info(dev
, "task prep: %s port%d not attach device\n",
371 (dev_is_sata(device
)) ?
378 if (!sas_protocol_ata(task
->task_proto
)) {
379 unsigned int req_len
, resp_len
;
381 if (task
->num_scatter
) {
382 n_elem
= dma_map_sg(dev
, task
->scatter
,
383 task
->num_scatter
, task
->data_dir
);
388 } else if (task
->task_proto
& SAS_PROTOCOL_SMP
) {
389 n_elem_req
= dma_map_sg(dev
, &task
->smp_task
.smp_req
,
395 req_len
= sg_dma_len(&task
->smp_task
.smp_req
);
398 goto err_out_dma_unmap
;
400 n_elem_resp
= dma_map_sg(dev
, &task
->smp_task
.smp_resp
,
404 goto err_out_dma_unmap
;
406 resp_len
= sg_dma_len(&task
->smp_task
.smp_resp
);
407 if (resp_len
& 0x3) {
409 goto err_out_dma_unmap
;
413 n_elem
= task
->num_scatter
;
415 if (n_elem
> HISI_SAS_SGE_PAGE_CNT
) {
416 dev_err(dev
, "task prep: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT",
419 goto err_out_dma_unmap
;
422 spin_lock_irqsave(&hisi_hba
->lock
, flags
);
423 if (hisi_hba
->hw
->slot_index_alloc
)
424 rc
= hisi_hba
->hw
->slot_index_alloc(hisi_hba
, &slot_idx
,
427 rc
= hisi_sas_slot_index_alloc(hisi_hba
, &slot_idx
);
428 spin_unlock_irqrestore(&hisi_hba
->lock
, flags
);
430 goto err_out_dma_unmap
;
432 slot
= &hisi_hba
->slot_info
[slot_idx
];
433 memset(slot
, 0, sizeof(struct hisi_sas_slot
));
435 slot
->buf
= dma_pool_alloc(hisi_hba
->buffer_pool
,
436 GFP_ATOMIC
, &slot
->buf_dma
);
442 spin_lock_irqsave(&dq
->lock
, flags_dq
);
443 wr_q_index
= hisi_hba
->hw
->get_free_slot(hisi_hba
, dq
);
444 if (wr_q_index
< 0) {
445 spin_unlock_irqrestore(&dq
->lock
, flags_dq
);
450 list_add_tail(&slot
->delivery
, &dq
->list
);
451 spin_unlock_irqrestore(&dq
->lock
, flags_dq
);
453 dlvry_queue
= dq
->id
;
454 dlvry_queue_slot
= wr_q_index
;
456 slot
->idx
= slot_idx
;
457 slot
->n_elem
= n_elem
;
458 slot
->dlvry_queue
= dlvry_queue
;
459 slot
->dlvry_queue_slot
= dlvry_queue_slot
;
460 cmd_hdr_base
= hisi_hba
->cmd_hdr
[dlvry_queue
];
461 slot
->cmd_hdr
= &cmd_hdr_base
[dlvry_queue_slot
];
465 slot
->is_internal
= is_tmf
;
466 task
->lldd_task
= slot
;
467 INIT_WORK(&slot
->abort_slot
, hisi_sas_slot_abort
);
469 memset(slot
->cmd_hdr
, 0, sizeof(struct hisi_sas_cmd_hdr
));
470 memset(hisi_sas_cmd_hdr_addr_mem(slot
), 0, HISI_SAS_COMMAND_TABLE_SZ
);
471 memset(hisi_sas_status_buf_addr_mem(slot
), 0, HISI_SAS_STATUS_BUF_SZ
);
473 switch (task
->task_proto
) {
474 case SAS_PROTOCOL_SMP
:
475 hisi_sas_task_prep_smp(hisi_hba
, slot
);
477 case SAS_PROTOCOL_SSP
:
478 hisi_sas_task_prep_ssp(hisi_hba
, slot
);
480 case SAS_PROTOCOL_SATA
:
481 case SAS_PROTOCOL_STP
:
482 case SAS_PROTOCOL_SATA
| SAS_PROTOCOL_STP
:
483 hisi_sas_task_prep_ata(hisi_hba
, slot
);
486 dev_err(dev
, "task prep: unknown/unsupported proto (0x%x)\n",
491 spin_lock_irqsave(&dq
->lock
, flags
);
492 list_add_tail(&slot
->entry
, &sas_dev
->list
);
493 spin_unlock_irqrestore(&dq
->lock
, flags
);
494 spin_lock_irqsave(&task
->task_state_lock
, flags
);
495 task
->task_state_flags
|= SAS_TASK_AT_INITIATOR
;
496 spin_unlock_irqrestore(&task
->task_state_lock
, flags
);
504 dma_pool_free(hisi_hba
->buffer_pool
, slot
->buf
,
507 spin_lock_irqsave(&hisi_hba
->lock
, flags
);
508 hisi_sas_slot_index_free(hisi_hba
, slot_idx
);
509 spin_unlock_irqrestore(&hisi_hba
->lock
, flags
);
511 if (!sas_protocol_ata(task
->task_proto
)) {
512 if (task
->num_scatter
) {
513 dma_unmap_sg(dev
, task
->scatter
, task
->num_scatter
,
515 } else if (task
->task_proto
& SAS_PROTOCOL_SMP
) {
517 dma_unmap_sg(dev
, &task
->smp_task
.smp_req
,
520 dma_unmap_sg(dev
, &task
->smp_task
.smp_resp
,
525 dev_err(dev
, "task prep: failed[%d]!\n", rc
);
529 static int hisi_sas_task_exec(struct sas_task
*task
, gfp_t gfp_flags
,
530 bool is_tmf
, struct hisi_sas_tmf_task
*tmf
)
535 struct hisi_hba
*hisi_hba
= dev_to_hisi_hba(task
->dev
);
536 struct device
*dev
= hisi_hba
->dev
;
537 struct hisi_sas_dq
*dq
= NULL
;
539 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT
, &hisi_hba
->flags
)))
542 /* protect task_prep and start_delivery sequence */
543 rc
= hisi_sas_task_prep(task
, &dq
, is_tmf
, tmf
, &pass
);
545 dev_err(dev
, "task exec: failed[%d]!\n", rc
);
548 spin_lock_irqsave(&dq
->lock
, flags
);
549 hisi_hba
->hw
->start_delivery(dq
);
550 spin_unlock_irqrestore(&dq
->lock
, flags
);
556 static void hisi_sas_bytes_dmaed(struct hisi_hba
*hisi_hba
, int phy_no
)
558 struct hisi_sas_phy
*phy
= &hisi_hba
->phy
[phy_no
];
559 struct asd_sas_phy
*sas_phy
= &phy
->sas_phy
;
560 struct sas_ha_struct
*sas_ha
;
562 if (!phy
->phy_attached
)
565 sas_ha
= &hisi_hba
->sha
;
566 sas_ha
->notify_phy_event(sas_phy
, PHYE_OOB_DONE
);
569 struct sas_phy
*sphy
= sas_phy
->phy
;
571 sphy
->negotiated_linkrate
= sas_phy
->linkrate
;
572 sphy
->minimum_linkrate_hw
= SAS_LINK_RATE_1_5_GBPS
;
573 sphy
->maximum_linkrate_hw
=
574 hisi_hba
->hw
->phy_get_max_linkrate();
575 if (sphy
->minimum_linkrate
== SAS_LINK_RATE_UNKNOWN
)
576 sphy
->minimum_linkrate
= phy
->minimum_linkrate
;
578 if (sphy
->maximum_linkrate
== SAS_LINK_RATE_UNKNOWN
)
579 sphy
->maximum_linkrate
= phy
->maximum_linkrate
;
582 if (phy
->phy_type
& PORT_TYPE_SAS
) {
583 struct sas_identify_frame
*id
;
585 id
= (struct sas_identify_frame
*)phy
->frame_rcvd
;
586 id
->dev_type
= phy
->identify
.device_type
;
587 id
->initiator_bits
= SAS_PROTOCOL_ALL
;
588 id
->target_bits
= phy
->identify
.target_port_protocols
;
589 } else if (phy
->phy_type
& PORT_TYPE_SATA
) {
593 sas_phy
->frame_rcvd_size
= phy
->frame_rcvd_size
;
594 sas_ha
->notify_port_event(sas_phy
, PORTE_BYTES_DMAED
);
597 static struct hisi_sas_device
*hisi_sas_alloc_dev(struct domain_device
*device
)
599 struct hisi_hba
*hisi_hba
= dev_to_hisi_hba(device
);
600 struct hisi_sas_device
*sas_dev
= NULL
;
602 int last
= hisi_hba
->last_dev_id
;
603 int first
= (hisi_hba
->last_dev_id
+ 1) % HISI_SAS_MAX_DEVICES
;
606 spin_lock_irqsave(&hisi_hba
->lock
, flags
);
607 for (i
= first
; i
!= last
; i
%= HISI_SAS_MAX_DEVICES
) {
608 if (hisi_hba
->devices
[i
].dev_type
== SAS_PHY_UNUSED
) {
609 int queue
= i
% hisi_hba
->queue_count
;
610 struct hisi_sas_dq
*dq
= &hisi_hba
->dq
[queue
];
612 hisi_hba
->devices
[i
].device_id
= i
;
613 sas_dev
= &hisi_hba
->devices
[i
];
614 sas_dev
->dev_status
= HISI_SAS_DEV_NORMAL
;
615 sas_dev
->dev_type
= device
->dev_type
;
616 sas_dev
->hisi_hba
= hisi_hba
;
617 sas_dev
->sas_device
= device
;
619 INIT_LIST_HEAD(&hisi_hba
->devices
[i
].list
);
624 hisi_hba
->last_dev_id
= i
;
625 spin_unlock_irqrestore(&hisi_hba
->lock
, flags
);
630 #define HISI_SAS_SRST_ATA_DISK_CNT 3
631 static int hisi_sas_init_device(struct domain_device
*device
)
633 int rc
= TMF_RESP_FUNC_COMPLETE
;
635 struct hisi_sas_tmf_task tmf_task
;
636 int retry
= HISI_SAS_SRST_ATA_DISK_CNT
;
637 struct hisi_hba
*hisi_hba
= dev_to_hisi_hba(device
);
639 switch (device
->dev_type
) {
641 int_to_scsilun(0, &lun
);
643 tmf_task
.tmf
= TMF_CLEAR_TASK_SET
;
644 rc
= hisi_sas_debug_issue_ssp_tmf(device
, lun
.scsi_lun
,
646 if (rc
== TMF_RESP_FUNC_COMPLETE
)
647 hisi_sas_release_task(hisi_hba
, device
);
651 case SAS_SATA_PM_PORT
:
652 case SAS_SATA_PENDING
:
653 while (retry
-- > 0) {
654 rc
= hisi_sas_softreset_ata_disk(device
);
666 static int hisi_sas_dev_found(struct domain_device
*device
)
668 struct hisi_hba
*hisi_hba
= dev_to_hisi_hba(device
);
669 struct domain_device
*parent_dev
= device
->parent
;
670 struct hisi_sas_device
*sas_dev
;
671 struct device
*dev
= hisi_hba
->dev
;
674 if (hisi_hba
->hw
->alloc_dev
)
675 sas_dev
= hisi_hba
->hw
->alloc_dev(device
);
677 sas_dev
= hisi_sas_alloc_dev(device
);
679 dev_err(dev
, "fail alloc dev: max support %d devices\n",
680 HISI_SAS_MAX_DEVICES
);
684 device
->lldd_dev
= sas_dev
;
685 hisi_hba
->hw
->setup_itct(hisi_hba
, sas_dev
);
687 if (parent_dev
&& DEV_IS_EXPANDER(parent_dev
->dev_type
)) {
689 u8 phy_num
= parent_dev
->ex_dev
.num_phys
;
692 for (phy_no
= 0; phy_no
< phy_num
; phy_no
++) {
693 phy
= &parent_dev
->ex_dev
.ex_phy
[phy_no
];
694 if (SAS_ADDR(phy
->attached_sas_addr
) ==
695 SAS_ADDR(device
->sas_addr
))
699 if (phy_no
== phy_num
) {
700 dev_info(dev
, "dev found: no attached "
701 "dev:%016llx at ex:%016llx\n",
702 SAS_ADDR(device
->sas_addr
),
703 SAS_ADDR(parent_dev
->sas_addr
));
709 dev_info(dev
, "dev[%d:%x] found\n",
710 sas_dev
->device_id
, sas_dev
->dev_type
);
712 rc
= hisi_sas_init_device(device
);
718 hisi_sas_dev_gone(device
);
722 int hisi_sas_slave_configure(struct scsi_device
*sdev
)
724 struct domain_device
*dev
= sdev_to_domain_dev(sdev
);
725 int ret
= sas_slave_configure(sdev
);
729 if (!dev_is_sata(dev
))
730 sas_change_queue_depth(sdev
, 64);
734 EXPORT_SYMBOL_GPL(hisi_sas_slave_configure
);
736 void hisi_sas_scan_start(struct Scsi_Host
*shost
)
738 struct hisi_hba
*hisi_hba
= shost_priv(shost
);
740 hisi_hba
->hw
->phys_init(hisi_hba
);
742 EXPORT_SYMBOL_GPL(hisi_sas_scan_start
);
744 int hisi_sas_scan_finished(struct Scsi_Host
*shost
, unsigned long time
)
746 struct hisi_hba
*hisi_hba
= shost_priv(shost
);
747 struct sas_ha_struct
*sha
= &hisi_hba
->sha
;
749 /* Wait for PHY up interrupt to occur */
756 EXPORT_SYMBOL_GPL(hisi_sas_scan_finished
);
758 static void hisi_sas_phyup_work(struct work_struct
*work
)
760 struct hisi_sas_phy
*phy
=
761 container_of(work
, typeof(*phy
), works
[HISI_PHYE_PHY_UP
]);
762 struct hisi_hba
*hisi_hba
= phy
->hisi_hba
;
763 struct asd_sas_phy
*sas_phy
= &phy
->sas_phy
;
764 int phy_no
= sas_phy
->id
;
766 hisi_hba
->hw
->sl_notify(hisi_hba
, phy_no
); /* This requires a sleep */
767 hisi_sas_bytes_dmaed(hisi_hba
, phy_no
);
770 static void hisi_sas_linkreset_work(struct work_struct
*work
)
772 struct hisi_sas_phy
*phy
=
773 container_of(work
, typeof(*phy
), works
[HISI_PHYE_LINK_RESET
]);
774 struct asd_sas_phy
*sas_phy
= &phy
->sas_phy
;
776 hisi_sas_control_phy(sas_phy
, PHY_FUNC_LINK_RESET
, NULL
);
779 static const work_func_t hisi_sas_phye_fns
[HISI_PHYES_NUM
] = {
780 [HISI_PHYE_PHY_UP
] = hisi_sas_phyup_work
,
781 [HISI_PHYE_LINK_RESET
] = hisi_sas_linkreset_work
,
784 bool hisi_sas_notify_phy_event(struct hisi_sas_phy
*phy
,
785 enum hisi_sas_phy_event event
)
787 struct hisi_hba
*hisi_hba
= phy
->hisi_hba
;
789 if (WARN_ON(event
>= HISI_PHYES_NUM
))
792 return queue_work(hisi_hba
->wq
, &phy
->works
[event
]);
794 EXPORT_SYMBOL_GPL(hisi_sas_notify_phy_event
);
796 static void hisi_sas_phy_init(struct hisi_hba
*hisi_hba
, int phy_no
)
798 struct hisi_sas_phy
*phy
= &hisi_hba
->phy
[phy_no
];
799 struct asd_sas_phy
*sas_phy
= &phy
->sas_phy
;
802 phy
->hisi_hba
= hisi_hba
;
804 phy
->minimum_linkrate
= SAS_LINK_RATE_1_5_GBPS
;
805 phy
->maximum_linkrate
= hisi_hba
->hw
->phy_get_max_linkrate();
806 sas_phy
->enabled
= (phy_no
< hisi_hba
->n_phy
) ? 1 : 0;
807 sas_phy
->class = SAS
;
808 sas_phy
->iproto
= SAS_PROTOCOL_ALL
;
810 sas_phy
->type
= PHY_TYPE_PHYSICAL
;
811 sas_phy
->role
= PHY_ROLE_INITIATOR
;
812 sas_phy
->oob_mode
= OOB_NOT_CONNECTED
;
813 sas_phy
->linkrate
= SAS_LINK_RATE_UNKNOWN
;
814 sas_phy
->id
= phy_no
;
815 sas_phy
->sas_addr
= &hisi_hba
->sas_addr
[0];
816 sas_phy
->frame_rcvd
= &phy
->frame_rcvd
[0];
817 sas_phy
->ha
= (struct sas_ha_struct
*)hisi_hba
->shost
->hostdata
;
818 sas_phy
->lldd_phy
= phy
;
820 for (i
= 0; i
< HISI_PHYES_NUM
; i
++)
821 INIT_WORK(&phy
->works
[i
], hisi_sas_phye_fns
[i
]);
824 static void hisi_sas_port_notify_formed(struct asd_sas_phy
*sas_phy
)
826 struct sas_ha_struct
*sas_ha
= sas_phy
->ha
;
827 struct hisi_hba
*hisi_hba
= sas_ha
->lldd_ha
;
828 struct hisi_sas_phy
*phy
= sas_phy
->lldd_phy
;
829 struct asd_sas_port
*sas_port
= sas_phy
->port
;
830 struct hisi_sas_port
*port
= to_hisi_sas_port(sas_port
);
836 spin_lock_irqsave(&hisi_hba
->lock
, flags
);
837 port
->port_attached
= 1;
838 port
->id
= phy
->port_id
;
840 sas_port
->lldd_port
= port
;
841 spin_unlock_irqrestore(&hisi_hba
->lock
, flags
);
844 static void hisi_sas_do_release_task(struct hisi_hba
*hisi_hba
, struct sas_task
*task
,
845 struct hisi_sas_slot
*slot
)
849 struct task_status_struct
*ts
;
851 ts
= &task
->task_status
;
853 ts
->resp
= SAS_TASK_COMPLETE
;
854 ts
->stat
= SAS_ABORTED_TASK
;
855 spin_lock_irqsave(&task
->task_state_lock
, flags
);
856 task
->task_state_flags
&=
857 ~(SAS_TASK_STATE_PENDING
| SAS_TASK_AT_INITIATOR
);
858 task
->task_state_flags
|= SAS_TASK_STATE_DONE
;
859 spin_unlock_irqrestore(&task
->task_state_lock
, flags
);
862 hisi_sas_slot_task_free(hisi_hba
, task
, slot
);
865 /* hisi_hba.lock should be locked */
866 static void hisi_sas_release_task(struct hisi_hba
*hisi_hba
,
867 struct domain_device
*device
)
869 struct hisi_sas_slot
*slot
, *slot2
;
870 struct hisi_sas_device
*sas_dev
= device
->lldd_dev
;
872 list_for_each_entry_safe(slot
, slot2
, &sas_dev
->list
, entry
)
873 hisi_sas_do_release_task(hisi_hba
, slot
->task
, slot
);
876 void hisi_sas_release_tasks(struct hisi_hba
*hisi_hba
)
878 struct hisi_sas_device
*sas_dev
;
879 struct domain_device
*device
;
882 for (i
= 0; i
< HISI_SAS_MAX_DEVICES
; i
++) {
883 sas_dev
= &hisi_hba
->devices
[i
];
884 device
= sas_dev
->sas_device
;
886 if ((sas_dev
->dev_type
== SAS_PHY_UNUSED
) ||
890 hisi_sas_release_task(hisi_hba
, device
);
893 EXPORT_SYMBOL_GPL(hisi_sas_release_tasks
);
895 static void hisi_sas_dereg_device(struct hisi_hba
*hisi_hba
,
896 struct domain_device
*device
)
898 if (hisi_hba
->hw
->dereg_device
)
899 hisi_hba
->hw
->dereg_device(hisi_hba
, device
);
902 static void hisi_sas_dev_gone(struct domain_device
*device
)
904 struct hisi_sas_device
*sas_dev
= device
->lldd_dev
;
905 struct hisi_hba
*hisi_hba
= dev_to_hisi_hba(device
);
906 struct device
*dev
= hisi_hba
->dev
;
908 dev_info(dev
, "dev[%d:%x] is gone\n",
909 sas_dev
->device_id
, sas_dev
->dev_type
);
911 if (!test_bit(HISI_SAS_RESET_BIT
, &hisi_hba
->flags
)) {
912 hisi_sas_internal_task_abort(hisi_hba
, device
,
913 HISI_SAS_INT_ABT_DEV
, 0);
915 hisi_sas_dereg_device(hisi_hba
, device
);
917 hisi_hba
->hw
->clear_itct(hisi_hba
, sas_dev
);
918 device
->lldd_dev
= NULL
;
921 if (hisi_hba
->hw
->free_device
)
922 hisi_hba
->hw
->free_device(sas_dev
);
923 sas_dev
->dev_type
= SAS_PHY_UNUSED
;
926 static int hisi_sas_queue_command(struct sas_task
*task
, gfp_t gfp_flags
)
928 return hisi_sas_task_exec(task
, gfp_flags
, 0, NULL
);
931 static void hisi_sas_phy_set_linkrate(struct hisi_hba
*hisi_hba
, int phy_no
,
932 struct sas_phy_linkrates
*r
)
934 struct sas_phy_linkrates _r
;
936 struct hisi_sas_phy
*phy
= &hisi_hba
->phy
[phy_no
];
937 struct asd_sas_phy
*sas_phy
= &phy
->sas_phy
;
938 enum sas_linkrate min
, max
;
940 if (r
->maximum_linkrate
== SAS_LINK_RATE_UNKNOWN
) {
941 max
= sas_phy
->phy
->maximum_linkrate
;
942 min
= r
->minimum_linkrate
;
943 } else if (r
->minimum_linkrate
== SAS_LINK_RATE_UNKNOWN
) {
944 max
= r
->maximum_linkrate
;
945 min
= sas_phy
->phy
->minimum_linkrate
;
949 _r
.maximum_linkrate
= max
;
950 _r
.minimum_linkrate
= min
;
952 hisi_hba
->hw
->phy_disable(hisi_hba
, phy_no
);
954 hisi_hba
->hw
->phy_set_linkrate(hisi_hba
, phy_no
, &_r
);
955 hisi_hba
->hw
->phy_start(hisi_hba
, phy_no
);
958 static int hisi_sas_control_phy(struct asd_sas_phy
*sas_phy
, enum phy_func func
,
961 struct sas_ha_struct
*sas_ha
= sas_phy
->ha
;
962 struct hisi_hba
*hisi_hba
= sas_ha
->lldd_ha
;
963 int phy_no
= sas_phy
->id
;
966 case PHY_FUNC_HARD_RESET
:
967 hisi_hba
->hw
->phy_hard_reset(hisi_hba
, phy_no
);
970 case PHY_FUNC_LINK_RESET
:
971 hisi_hba
->hw
->phy_disable(hisi_hba
, phy_no
);
973 hisi_hba
->hw
->phy_start(hisi_hba
, phy_no
);
976 case PHY_FUNC_DISABLE
:
977 hisi_hba
->hw
->phy_disable(hisi_hba
, phy_no
);
980 case PHY_FUNC_SET_LINK_RATE
:
981 hisi_sas_phy_set_linkrate(hisi_hba
, phy_no
, funcdata
);
983 case PHY_FUNC_GET_EVENTS
:
984 if (hisi_hba
->hw
->get_events
) {
985 hisi_hba
->hw
->get_events(hisi_hba
, phy_no
);
989 case PHY_FUNC_RELEASE_SPINUP_HOLD
:
996 static void hisi_sas_task_done(struct sas_task
*task
)
998 if (!del_timer(&task
->slow_task
->timer
))
1000 complete(&task
->slow_task
->completion
);
1003 static void hisi_sas_tmf_timedout(struct timer_list
*t
)
1005 struct sas_task_slow
*slow
= from_timer(slow
, t
, timer
);
1006 struct sas_task
*task
= slow
->task
;
1007 unsigned long flags
;
1009 spin_lock_irqsave(&task
->task_state_lock
, flags
);
1010 if (!(task
->task_state_flags
& SAS_TASK_STATE_DONE
))
1011 task
->task_state_flags
|= SAS_TASK_STATE_ABORTED
;
1012 spin_unlock_irqrestore(&task
->task_state_lock
, flags
);
1014 complete(&task
->slow_task
->completion
);
1017 #define TASK_TIMEOUT 20
1018 #define TASK_RETRY 3
1019 #define INTERNAL_ABORT_TIMEOUT 6
1020 static int hisi_sas_exec_internal_tmf_task(struct domain_device
*device
,
1021 void *parameter
, u32 para_len
,
1022 struct hisi_sas_tmf_task
*tmf
)
1024 struct hisi_sas_device
*sas_dev
= device
->lldd_dev
;
1025 struct hisi_hba
*hisi_hba
= sas_dev
->hisi_hba
;
1026 struct device
*dev
= hisi_hba
->dev
;
1027 struct sas_task
*task
;
1030 for (retry
= 0; retry
< TASK_RETRY
; retry
++) {
1031 task
= sas_alloc_slow_task(GFP_KERNEL
);
1036 task
->task_proto
= device
->tproto
;
1038 if (dev_is_sata(device
)) {
1039 task
->ata_task
.device_control_reg_update
= 1;
1040 memcpy(&task
->ata_task
.fis
, parameter
, para_len
);
1042 memcpy(&task
->ssp_task
, parameter
, para_len
);
1044 task
->task_done
= hisi_sas_task_done
;
1046 task
->slow_task
->timer
.function
= hisi_sas_tmf_timedout
;
1047 task
->slow_task
->timer
.expires
= jiffies
+ TASK_TIMEOUT
*HZ
;
1048 add_timer(&task
->slow_task
->timer
);
1050 res
= hisi_sas_task_exec(task
, GFP_KERNEL
, 1, tmf
);
1053 del_timer(&task
->slow_task
->timer
);
1054 dev_err(dev
, "abort tmf: executing internal task failed: %d\n",
1059 wait_for_completion(&task
->slow_task
->completion
);
1060 res
= TMF_RESP_FUNC_FAILED
;
1061 /* Even TMF timed out, return direct. */
1062 if ((task
->task_state_flags
& SAS_TASK_STATE_ABORTED
)) {
1063 if (!(task
->task_state_flags
& SAS_TASK_STATE_DONE
)) {
1064 struct hisi_sas_slot
*slot
= task
->lldd_task
;
1066 dev_err(dev
, "abort tmf: TMF task timeout and not done\n");
1072 dev_err(dev
, "abort tmf: TMF task timeout\n");
1075 if (task
->task_status
.resp
== SAS_TASK_COMPLETE
&&
1076 task
->task_status
.stat
== TMF_RESP_FUNC_COMPLETE
) {
1077 res
= TMF_RESP_FUNC_COMPLETE
;
1081 if (task
->task_status
.resp
== SAS_TASK_COMPLETE
&&
1082 task
->task_status
.stat
== TMF_RESP_FUNC_SUCC
) {
1083 res
= TMF_RESP_FUNC_SUCC
;
1087 if (task
->task_status
.resp
== SAS_TASK_COMPLETE
&&
1088 task
->task_status
.stat
== SAS_DATA_UNDERRUN
) {
1089 /* no error, but return the number of bytes of
1092 dev_warn(dev
, "abort tmf: task to dev %016llx "
1093 "resp: 0x%x sts 0x%x underrun\n",
1094 SAS_ADDR(device
->sas_addr
),
1095 task
->task_status
.resp
,
1096 task
->task_status
.stat
);
1097 res
= task
->task_status
.residual
;
1101 if (task
->task_status
.resp
== SAS_TASK_COMPLETE
&&
1102 task
->task_status
.stat
== SAS_DATA_OVERRUN
) {
1103 dev_warn(dev
, "abort tmf: blocked task error\n");
1108 dev_warn(dev
, "abort tmf: task to dev "
1109 "%016llx resp: 0x%x status 0x%x\n",
1110 SAS_ADDR(device
->sas_addr
), task
->task_status
.resp
,
1111 task
->task_status
.stat
);
1112 sas_free_task(task
);
1116 if (retry
== TASK_RETRY
)
1117 dev_warn(dev
, "abort tmf: executing internal task failed!\n");
1118 sas_free_task(task
);
1122 static void hisi_sas_fill_ata_reset_cmd(struct ata_device
*dev
,
1123 bool reset
, int pmp
, u8
*fis
)
1125 struct ata_taskfile tf
;
1127 ata_tf_init(dev
, &tf
);
1131 tf
.ctl
&= ~ATA_SRST
;
1132 tf
.command
= ATA_CMD_DEV_RESET
;
1133 ata_tf_to_fis(&tf
, pmp
, 0, fis
);
1136 static int hisi_sas_softreset_ata_disk(struct domain_device
*device
)
1139 struct ata_port
*ap
= device
->sata_dev
.ap
;
1140 struct ata_link
*link
;
1141 int rc
= TMF_RESP_FUNC_FAILED
;
1142 struct hisi_hba
*hisi_hba
= dev_to_hisi_hba(device
);
1143 struct device
*dev
= hisi_hba
->dev
;
1144 int s
= sizeof(struct host_to_dev_fis
);
1146 ata_for_each_link(link
, ap
, EDGE
) {
1147 int pmp
= sata_srst_pmp(link
);
1149 hisi_sas_fill_ata_reset_cmd(link
->device
, 1, pmp
, fis
);
1150 rc
= hisi_sas_exec_internal_tmf_task(device
, fis
, s
, NULL
);
1151 if (rc
!= TMF_RESP_FUNC_COMPLETE
)
1155 if (rc
== TMF_RESP_FUNC_COMPLETE
) {
1156 ata_for_each_link(link
, ap
, EDGE
) {
1157 int pmp
= sata_srst_pmp(link
);
1159 hisi_sas_fill_ata_reset_cmd(link
->device
, 0, pmp
, fis
);
1160 rc
= hisi_sas_exec_internal_tmf_task(device
, fis
,
1162 if (rc
!= TMF_RESP_FUNC_COMPLETE
)
1163 dev_err(dev
, "ata disk de-reset failed\n");
1166 dev_err(dev
, "ata disk reset failed\n");
1169 if (rc
== TMF_RESP_FUNC_COMPLETE
)
1170 hisi_sas_release_task(hisi_hba
, device
);
1175 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device
*device
,
1176 u8
*lun
, struct hisi_sas_tmf_task
*tmf
)
1178 struct sas_ssp_task ssp_task
;
1180 if (!(device
->tproto
& SAS_PROTOCOL_SSP
))
1181 return TMF_RESP_FUNC_ESUPP
;
1183 memcpy(ssp_task
.LUN
, lun
, 8);
1185 return hisi_sas_exec_internal_tmf_task(device
, &ssp_task
,
1186 sizeof(ssp_task
), tmf
);
1189 static void hisi_sas_refresh_port_id(struct hisi_hba
*hisi_hba
)
1191 u32 state
= hisi_hba
->hw
->get_phys_state(hisi_hba
);
1194 for (i
= 0; i
< HISI_SAS_MAX_DEVICES
; i
++) {
1195 struct hisi_sas_device
*sas_dev
= &hisi_hba
->devices
[i
];
1196 struct domain_device
*device
= sas_dev
->sas_device
;
1197 struct asd_sas_port
*sas_port
;
1198 struct hisi_sas_port
*port
;
1199 struct hisi_sas_phy
*phy
= NULL
;
1200 struct asd_sas_phy
*sas_phy
;
1202 if ((sas_dev
->dev_type
== SAS_PHY_UNUSED
)
1203 || !device
|| !device
->port
)
1206 sas_port
= device
->port
;
1207 port
= to_hisi_sas_port(sas_port
);
1209 list_for_each_entry(sas_phy
, &sas_port
->phy_list
, port_phy_el
)
1210 if (state
& BIT(sas_phy
->id
)) {
1211 phy
= sas_phy
->lldd_phy
;
1216 port
->id
= phy
->port_id
;
1218 /* Update linkrate of directly attached device. */
1219 if (!device
->parent
)
1220 device
->linkrate
= phy
->sas_phy
.linkrate
;
1222 hisi_hba
->hw
->setup_itct(hisi_hba
, sas_dev
);
1228 static void hisi_sas_rescan_topology(struct hisi_hba
*hisi_hba
, u32 old_state
,
1231 struct sas_ha_struct
*sas_ha
= &hisi_hba
->sha
;
1232 struct asd_sas_port
*_sas_port
= NULL
;
1235 for (phy_no
= 0; phy_no
< hisi_hba
->n_phy
; phy_no
++) {
1236 struct hisi_sas_phy
*phy
= &hisi_hba
->phy
[phy_no
];
1237 struct asd_sas_phy
*sas_phy
= &phy
->sas_phy
;
1238 struct asd_sas_port
*sas_port
= sas_phy
->port
;
1239 bool do_port_check
= !!(_sas_port
!= sas_port
);
1241 if (!sas_phy
->phy
->enabled
)
1244 /* Report PHY state change to libsas */
1245 if (state
& BIT(phy_no
)) {
1246 if (do_port_check
&& sas_port
&& sas_port
->port_dev
) {
1247 struct domain_device
*dev
= sas_port
->port_dev
;
1249 _sas_port
= sas_port
;
1251 if (DEV_IS_EXPANDER(dev
->dev_type
))
1252 sas_ha
->notify_port_event(sas_phy
,
1253 PORTE_BROADCAST_RCVD
);
1255 } else if (old_state
& (1 << phy_no
))
1256 /* PHY down but was up before */
1257 hisi_sas_phy_down(hisi_hba
, phy_no
, 0);
1262 static void hisi_sas_reset_init_all_devices(struct hisi_hba
*hisi_hba
)
1264 struct hisi_sas_device
*sas_dev
;
1265 struct domain_device
*device
;
1268 for (i
= 0; i
< HISI_SAS_MAX_DEVICES
; i
++) {
1269 sas_dev
= &hisi_hba
->devices
[i
];
1270 device
= sas_dev
->sas_device
;
1272 if ((sas_dev
->dev_type
== SAS_PHY_UNUSED
) || !device
)
1275 hisi_sas_init_device(device
);
1279 static void hisi_sas_send_ata_reset_each_phy(struct hisi_hba
*hisi_hba
,
1280 struct asd_sas_port
*sas_port
,
1281 struct domain_device
*device
)
1283 struct hisi_sas_tmf_task tmf_task
= { .force_phy
= 1 };
1284 struct ata_port
*ap
= device
->sata_dev
.ap
;
1285 struct device
*dev
= hisi_hba
->dev
;
1286 int s
= sizeof(struct host_to_dev_fis
);
1287 int rc
= TMF_RESP_FUNC_FAILED
;
1288 struct asd_sas_phy
*sas_phy
;
1289 struct ata_link
*link
;
1293 state
= hisi_hba
->hw
->get_phys_state(hisi_hba
);
1294 list_for_each_entry(sas_phy
, &sas_port
->phy_list
, port_phy_el
) {
1295 if (!(state
& BIT(sas_phy
->id
)))
1298 ata_for_each_link(link
, ap
, EDGE
) {
1299 int pmp
= sata_srst_pmp(link
);
1301 tmf_task
.phy_id
= sas_phy
->id
;
1302 hisi_sas_fill_ata_reset_cmd(link
->device
, 1, pmp
, fis
);
1303 rc
= hisi_sas_exec_internal_tmf_task(device
, fis
, s
,
1305 if (rc
!= TMF_RESP_FUNC_COMPLETE
) {
1306 dev_err(dev
, "phy%d ata reset failed rc=%d\n",
1314 static void hisi_sas_terminate_stp_reject(struct hisi_hba
*hisi_hba
)
1316 struct device
*dev
= hisi_hba
->dev
;
1319 for (i
= 0; i
< HISI_SAS_MAX_DEVICES
; i
++) {
1320 struct hisi_sas_device
*sas_dev
= &hisi_hba
->devices
[i
];
1321 struct domain_device
*device
= sas_dev
->sas_device
;
1323 if ((sas_dev
->dev_type
== SAS_PHY_UNUSED
) || !device
)
1326 rc
= hisi_sas_internal_task_abort(hisi_hba
, device
,
1327 HISI_SAS_INT_ABT_DEV
, 0);
1329 dev_err(dev
, "STP reject: abort dev failed %d\n", rc
);
1332 for (port_no
= 0; port_no
< hisi_hba
->n_phy
; port_no
++) {
1333 struct hisi_sas_port
*port
= &hisi_hba
->port
[port_no
];
1334 struct asd_sas_port
*sas_port
= &port
->sas_port
;
1335 struct domain_device
*port_dev
= sas_port
->port_dev
;
1336 struct domain_device
*device
;
1338 if (!port_dev
|| !DEV_IS_EXPANDER(port_dev
->dev_type
))
1341 /* Try to find a SATA device */
1342 list_for_each_entry(device
, &sas_port
->dev_list
,
1344 if (dev_is_sata(device
)) {
1345 hisi_sas_send_ata_reset_each_phy(hisi_hba
,
1354 static int hisi_sas_controller_reset(struct hisi_hba
*hisi_hba
)
1356 struct device
*dev
= hisi_hba
->dev
;
1357 struct Scsi_Host
*shost
= hisi_hba
->shost
;
1358 u32 old_state
, state
;
1361 if (!hisi_hba
->hw
->soft_reset
)
1364 if (test_and_set_bit(HISI_SAS_RESET_BIT
, &hisi_hba
->flags
))
1367 dev_info(dev
, "controller resetting...\n");
1368 old_state
= hisi_hba
->hw
->get_phys_state(hisi_hba
);
1370 scsi_block_requests(shost
);
1371 hisi_hba
->hw
->wait_cmds_complete_timeout(hisi_hba
, 100, 5000);
1373 if (timer_pending(&hisi_hba
->timer
))
1374 del_timer_sync(&hisi_hba
->timer
);
1376 set_bit(HISI_SAS_REJECT_CMD_BIT
, &hisi_hba
->flags
);
1377 rc
= hisi_hba
->hw
->soft_reset(hisi_hba
);
1379 dev_warn(dev
, "controller reset failed (%d)\n", rc
);
1380 clear_bit(HISI_SAS_REJECT_CMD_BIT
, &hisi_hba
->flags
);
1381 scsi_unblock_requests(shost
);
1385 clear_bit(HISI_SAS_REJECT_CMD_BIT
, &hisi_hba
->flags
);
1387 /* Init and wait for PHYs to come up and all libsas event finished. */
1388 hisi_hba
->hw
->phys_init(hisi_hba
);
1390 hisi_sas_refresh_port_id(hisi_hba
);
1392 if (hisi_hba
->reject_stp_links_msk
)
1393 hisi_sas_terminate_stp_reject(hisi_hba
);
1394 hisi_sas_reset_init_all_devices(hisi_hba
);
1395 scsi_unblock_requests(shost
);
1397 state
= hisi_hba
->hw
->get_phys_state(hisi_hba
);
1398 hisi_sas_rescan_topology(hisi_hba
, old_state
, state
);
1399 dev_info(dev
, "controller reset complete\n");
1402 clear_bit(HISI_SAS_RESET_BIT
, &hisi_hba
->flags
);
1407 static int hisi_sas_abort_task(struct sas_task
*task
)
1409 struct scsi_lun lun
;
1410 struct hisi_sas_tmf_task tmf_task
;
1411 struct domain_device
*device
= task
->dev
;
1412 struct hisi_sas_device
*sas_dev
= device
->lldd_dev
;
1413 struct hisi_hba
*hisi_hba
;
1415 int rc
= TMF_RESP_FUNC_FAILED
;
1416 unsigned long flags
;
1419 return TMF_RESP_FUNC_FAILED
;
1421 hisi_hba
= dev_to_hisi_hba(task
->dev
);
1422 dev
= hisi_hba
->dev
;
1424 spin_lock_irqsave(&task
->task_state_lock
, flags
);
1425 if (task
->task_state_flags
& SAS_TASK_STATE_DONE
) {
1426 spin_unlock_irqrestore(&task
->task_state_lock
, flags
);
1427 rc
= TMF_RESP_FUNC_COMPLETE
;
1430 task
->task_state_flags
|= SAS_TASK_STATE_ABORTED
;
1431 spin_unlock_irqrestore(&task
->task_state_lock
, flags
);
1433 sas_dev
->dev_status
= HISI_SAS_DEV_EH
;
1434 if (task
->lldd_task
&& task
->task_proto
& SAS_PROTOCOL_SSP
) {
1435 struct scsi_cmnd
*cmnd
= task
->uldd_task
;
1436 struct hisi_sas_slot
*slot
= task
->lldd_task
;
1437 u32 tag
= slot
->idx
;
1440 int_to_scsilun(cmnd
->device
->lun
, &lun
);
1441 tmf_task
.tmf
= TMF_ABORT_TASK
;
1442 tmf_task
.tag_of_task_to_be_managed
= cpu_to_le16(tag
);
1444 rc
= hisi_sas_debug_issue_ssp_tmf(task
->dev
, lun
.scsi_lun
,
1447 rc2
= hisi_sas_internal_task_abort(hisi_hba
, device
,
1448 HISI_SAS_INT_ABT_CMD
, tag
);
1450 dev_err(dev
, "abort task: internal abort (%d)\n", rc2
);
1451 return TMF_RESP_FUNC_FAILED
;
1455 * If the TMF finds that the IO is not in the device and also
1456 * the internal abort does not succeed, then it is safe to
1458 * Note: if the internal abort succeeds then the slot
1459 * will have already been completed
1461 if (rc
== TMF_RESP_FUNC_COMPLETE
&& rc2
!= TMF_RESP_FUNC_SUCC
) {
1462 if (task
->lldd_task
)
1463 hisi_sas_do_release_task(hisi_hba
, task
, slot
);
1465 } else if (task
->task_proto
& SAS_PROTOCOL_SATA
||
1466 task
->task_proto
& SAS_PROTOCOL_STP
) {
1467 if (task
->dev
->dev_type
== SAS_SATA_DEV
) {
1468 rc
= hisi_sas_internal_task_abort(hisi_hba
, device
,
1469 HISI_SAS_INT_ABT_DEV
, 0);
1471 dev_err(dev
, "abort task: internal abort failed\n");
1474 hisi_sas_dereg_device(hisi_hba
, device
);
1475 rc
= hisi_sas_softreset_ata_disk(device
);
1477 } else if (task
->lldd_task
&& task
->task_proto
& SAS_PROTOCOL_SMP
) {
1479 struct hisi_sas_slot
*slot
= task
->lldd_task
;
1480 u32 tag
= slot
->idx
;
1482 rc
= hisi_sas_internal_task_abort(hisi_hba
, device
,
1483 HISI_SAS_INT_ABT_CMD
, tag
);
1484 if (((rc
< 0) || (rc
== TMF_RESP_FUNC_FAILED
)) &&
1486 hisi_sas_do_release_task(hisi_hba
, task
, slot
);
1490 if (rc
!= TMF_RESP_FUNC_COMPLETE
)
1491 dev_notice(dev
, "abort task: rc=%d\n", rc
);
1495 static int hisi_sas_abort_task_set(struct domain_device
*device
, u8
*lun
)
1497 struct hisi_hba
*hisi_hba
= dev_to_hisi_hba(device
);
1498 struct device
*dev
= hisi_hba
->dev
;
1499 struct hisi_sas_tmf_task tmf_task
;
1500 int rc
= TMF_RESP_FUNC_FAILED
;
1502 rc
= hisi_sas_internal_task_abort(hisi_hba
, device
,
1503 HISI_SAS_INT_ABT_DEV
, 0);
1505 dev_err(dev
, "abort task set: internal abort rc=%d\n", rc
);
1506 return TMF_RESP_FUNC_FAILED
;
1508 hisi_sas_dereg_device(hisi_hba
, device
);
1510 tmf_task
.tmf
= TMF_ABORT_TASK_SET
;
1511 rc
= hisi_sas_debug_issue_ssp_tmf(device
, lun
, &tmf_task
);
1513 if (rc
== TMF_RESP_FUNC_COMPLETE
)
1514 hisi_sas_release_task(hisi_hba
, device
);
1519 static int hisi_sas_clear_aca(struct domain_device
*device
, u8
*lun
)
1521 int rc
= TMF_RESP_FUNC_FAILED
;
1522 struct hisi_sas_tmf_task tmf_task
;
1524 tmf_task
.tmf
= TMF_CLEAR_ACA
;
1525 rc
= hisi_sas_debug_issue_ssp_tmf(device
, lun
, &tmf_task
);
1530 static int hisi_sas_debug_I_T_nexus_reset(struct domain_device
*device
)
1532 struct sas_phy
*local_phy
= sas_get_local_phy(device
);
1533 int rc
, reset_type
= (device
->dev_type
== SAS_SATA_DEV
||
1534 (device
->tproto
& SAS_PROTOCOL_STP
)) ? 0 : 1;
1535 struct hisi_hba
*hisi_hba
= dev_to_hisi_hba(device
);
1536 struct sas_ha_struct
*sas_ha
= &hisi_hba
->sha
;
1537 struct asd_sas_phy
*sas_phy
= sas_ha
->sas_phy
[local_phy
->number
];
1538 struct hisi_sas_phy
*phy
= container_of(sas_phy
,
1539 struct hisi_sas_phy
, sas_phy
);
1540 DECLARE_COMPLETION_ONSTACK(phyreset
);
1542 if (scsi_is_sas_phy_local(local_phy
)) {
1544 phy
->reset_completion
= &phyreset
;
1547 rc
= sas_phy_reset(local_phy
, reset_type
);
1548 sas_put_local_phy(local_phy
);
1550 if (scsi_is_sas_phy_local(local_phy
)) {
1551 int ret
= wait_for_completion_timeout(&phyreset
, 2 * HZ
);
1552 unsigned long flags
;
1554 spin_lock_irqsave(&phy
->lock
, flags
);
1555 phy
->reset_completion
= NULL
;
1557 spin_unlock_irqrestore(&phy
->lock
, flags
);
1559 /* report PHY down if timed out */
1561 hisi_sas_phy_down(hisi_hba
, sas_phy
->id
, 0);
1568 static int hisi_sas_I_T_nexus_reset(struct domain_device
*device
)
1570 struct hisi_sas_device
*sas_dev
= device
->lldd_dev
;
1571 struct hisi_hba
*hisi_hba
= dev_to_hisi_hba(device
);
1572 struct device
*dev
= hisi_hba
->dev
;
1573 int rc
= TMF_RESP_FUNC_FAILED
;
1575 if (sas_dev
->dev_status
!= HISI_SAS_DEV_EH
)
1576 return TMF_RESP_FUNC_FAILED
;
1577 sas_dev
->dev_status
= HISI_SAS_DEV_NORMAL
;
1579 rc
= hisi_sas_internal_task_abort(hisi_hba
, device
,
1580 HISI_SAS_INT_ABT_DEV
, 0);
1582 dev_err(dev
, "I_T nexus reset: internal abort (%d)\n", rc
);
1583 return TMF_RESP_FUNC_FAILED
;
1585 hisi_sas_dereg_device(hisi_hba
, device
);
1587 rc
= hisi_sas_debug_I_T_nexus_reset(device
);
1589 if ((rc
== TMF_RESP_FUNC_COMPLETE
) || (rc
== -ENODEV
))
1590 hisi_sas_release_task(hisi_hba
, device
);
1595 static int hisi_sas_lu_reset(struct domain_device
*device
, u8
*lun
)
1597 struct hisi_sas_device
*sas_dev
= device
->lldd_dev
;
1598 struct hisi_hba
*hisi_hba
= dev_to_hisi_hba(device
);
1599 struct device
*dev
= hisi_hba
->dev
;
1600 int rc
= TMF_RESP_FUNC_FAILED
;
1602 sas_dev
->dev_status
= HISI_SAS_DEV_EH
;
1603 if (dev_is_sata(device
)) {
1604 struct sas_phy
*phy
;
1606 /* Clear internal IO and then hardreset */
1607 rc
= hisi_sas_internal_task_abort(hisi_hba
, device
,
1608 HISI_SAS_INT_ABT_DEV
, 0);
1610 dev_err(dev
, "lu_reset: internal abort failed\n");
1613 hisi_sas_dereg_device(hisi_hba
, device
);
1615 phy
= sas_get_local_phy(device
);
1617 rc
= sas_phy_reset(phy
, 1);
1620 hisi_sas_release_task(hisi_hba
, device
);
1621 sas_put_local_phy(phy
);
1623 struct hisi_sas_tmf_task tmf_task
= { .tmf
= TMF_LU_RESET
};
1625 rc
= hisi_sas_internal_task_abort(hisi_hba
, device
,
1626 HISI_SAS_INT_ABT_DEV
, 0);
1628 dev_err(dev
, "lu_reset: internal abort failed\n");
1631 hisi_sas_dereg_device(hisi_hba
, device
);
1633 rc
= hisi_sas_debug_issue_ssp_tmf(device
, lun
, &tmf_task
);
1634 if (rc
== TMF_RESP_FUNC_COMPLETE
)
1635 hisi_sas_release_task(hisi_hba
, device
);
1638 if (rc
!= TMF_RESP_FUNC_COMPLETE
)
1639 dev_err(dev
, "lu_reset: for device[%d]:rc= %d\n",
1640 sas_dev
->device_id
, rc
);
1644 static int hisi_sas_clear_nexus_ha(struct sas_ha_struct
*sas_ha
)
1646 struct hisi_hba
*hisi_hba
= sas_ha
->lldd_ha
;
1647 HISI_SAS_DECLARE_RST_WORK_ON_STACK(r
);
1649 queue_work(hisi_hba
->wq
, &r
.work
);
1650 wait_for_completion(r
.completion
);
1652 return TMF_RESP_FUNC_COMPLETE
;
1654 return TMF_RESP_FUNC_FAILED
;
1657 static int hisi_sas_query_task(struct sas_task
*task
)
1659 struct scsi_lun lun
;
1660 struct hisi_sas_tmf_task tmf_task
;
1661 int rc
= TMF_RESP_FUNC_FAILED
;
1663 if (task
->lldd_task
&& task
->task_proto
& SAS_PROTOCOL_SSP
) {
1664 struct scsi_cmnd
*cmnd
= task
->uldd_task
;
1665 struct domain_device
*device
= task
->dev
;
1666 struct hisi_sas_slot
*slot
= task
->lldd_task
;
1667 u32 tag
= slot
->idx
;
1669 int_to_scsilun(cmnd
->device
->lun
, &lun
);
1670 tmf_task
.tmf
= TMF_QUERY_TASK
;
1671 tmf_task
.tag_of_task_to_be_managed
= cpu_to_le16(tag
);
1673 rc
= hisi_sas_debug_issue_ssp_tmf(device
,
1677 /* The task is still in Lun, release it then */
1678 case TMF_RESP_FUNC_SUCC
:
1679 /* The task is not in Lun or failed, reset the phy */
1680 case TMF_RESP_FUNC_FAILED
:
1681 case TMF_RESP_FUNC_COMPLETE
:
1684 rc
= TMF_RESP_FUNC_FAILED
;
1692 hisi_sas_internal_abort_task_exec(struct hisi_hba
*hisi_hba
, int device_id
,
1693 struct sas_task
*task
, int abort_flag
,
1696 struct domain_device
*device
= task
->dev
;
1697 struct hisi_sas_device
*sas_dev
= device
->lldd_dev
;
1698 struct device
*dev
= hisi_hba
->dev
;
1699 struct hisi_sas_port
*port
;
1700 struct hisi_sas_slot
*slot
;
1701 struct asd_sas_port
*sas_port
= device
->port
;
1702 struct hisi_sas_cmd_hdr
*cmd_hdr_base
;
1703 struct hisi_sas_dq
*dq
= sas_dev
->dq
;
1704 int dlvry_queue_slot
, dlvry_queue
, n_elem
= 0, rc
, slot_idx
;
1705 unsigned long flags
, flags_dq
= 0;
1708 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT
, &hisi_hba
->flags
)))
1714 port
= to_hisi_sas_port(sas_port
);
1716 /* simply get a slot and send abort command */
1717 spin_lock_irqsave(&hisi_hba
->lock
, flags
);
1718 rc
= hisi_sas_slot_index_alloc(hisi_hba
, &slot_idx
);
1720 spin_unlock_irqrestore(&hisi_hba
->lock
, flags
);
1723 spin_unlock_irqrestore(&hisi_hba
->lock
, flags
);
1725 slot
= &hisi_hba
->slot_info
[slot_idx
];
1726 memset(slot
, 0, sizeof(struct hisi_sas_slot
));
1728 slot
->buf
= dma_pool_alloc(hisi_hba
->buffer_pool
,
1729 GFP_ATOMIC
, &slot
->buf_dma
);
1735 spin_lock_irqsave(&dq
->lock
, flags_dq
);
1736 wr_q_index
= hisi_hba
->hw
->get_free_slot(hisi_hba
, dq
);
1737 if (wr_q_index
< 0) {
1738 spin_unlock_irqrestore(&dq
->lock
, flags_dq
);
1742 list_add_tail(&slot
->delivery
, &dq
->list
);
1743 spin_unlock_irqrestore(&dq
->lock
, flags_dq
);
1745 dlvry_queue
= dq
->id
;
1746 dlvry_queue_slot
= wr_q_index
;
1748 slot
->idx
= slot_idx
;
1749 slot
->n_elem
= n_elem
;
1750 slot
->dlvry_queue
= dlvry_queue
;
1751 slot
->dlvry_queue_slot
= dlvry_queue_slot
;
1752 cmd_hdr_base
= hisi_hba
->cmd_hdr
[dlvry_queue
];
1753 slot
->cmd_hdr
= &cmd_hdr_base
[dlvry_queue_slot
];
1756 slot
->is_internal
= true;
1757 task
->lldd_task
= slot
;
1759 memset(slot
->cmd_hdr
, 0, sizeof(struct hisi_sas_cmd_hdr
));
1760 memset(hisi_sas_cmd_hdr_addr_mem(slot
), 0, HISI_SAS_COMMAND_TABLE_SZ
);
1761 memset(hisi_sas_status_buf_addr_mem(slot
), 0, HISI_SAS_STATUS_BUF_SZ
);
1763 hisi_sas_task_prep_abort(hisi_hba
, slot
, device_id
,
1764 abort_flag
, task_tag
);
1766 spin_lock_irqsave(&task
->task_state_lock
, flags
);
1767 task
->task_state_flags
|= SAS_TASK_AT_INITIATOR
;
1768 spin_unlock_irqrestore(&task
->task_state_lock
, flags
);
1771 /* send abort command to the chip */
1772 spin_lock_irqsave(&dq
->lock
, flags
);
1773 list_add_tail(&slot
->entry
, &sas_dev
->list
);
1774 hisi_hba
->hw
->start_delivery(dq
);
1775 spin_unlock_irqrestore(&dq
->lock
, flags
);
1780 dma_pool_free(hisi_hba
->buffer_pool
, slot
->buf
,
1783 spin_lock_irqsave(&hisi_hba
->lock
, flags
);
1784 hisi_sas_slot_index_free(hisi_hba
, slot_idx
);
1785 spin_unlock_irqrestore(&hisi_hba
->lock
, flags
);
1787 dev_err(dev
, "internal abort task prep: failed[%d]!\n", rc
);
1793 * hisi_sas_internal_task_abort -- execute an internal
1794 * abort command for single IO command or a device
1795 * @hisi_hba: host controller struct
1796 * @device: domain device
1797 * @abort_flag: mode of operation, device or single IO
1798 * @tag: tag of IO to be aborted (only relevant to single
1802 hisi_sas_internal_task_abort(struct hisi_hba
*hisi_hba
,
1803 struct domain_device
*device
,
1804 int abort_flag
, int tag
)
1806 struct sas_task
*task
;
1807 struct hisi_sas_device
*sas_dev
= device
->lldd_dev
;
1808 struct device
*dev
= hisi_hba
->dev
;
1812 * The interface is not realized means this HW don't support internal
1813 * abort, or don't need to do internal abort. Then here, we return
1814 * TMF_RESP_FUNC_FAILED and let other steps go on, which depends that
1815 * the internal abort has been executed and returned CQ.
1817 if (!hisi_hba
->hw
->prep_abort
)
1818 return TMF_RESP_FUNC_FAILED
;
1820 task
= sas_alloc_slow_task(GFP_KERNEL
);
1825 task
->task_proto
= device
->tproto
;
1826 task
->task_done
= hisi_sas_task_done
;
1827 task
->slow_task
->timer
.function
= hisi_sas_tmf_timedout
;
1828 task
->slow_task
->timer
.expires
= jiffies
+ INTERNAL_ABORT_TIMEOUT
*HZ
;
1829 add_timer(&task
->slow_task
->timer
);
1831 res
= hisi_sas_internal_abort_task_exec(hisi_hba
, sas_dev
->device_id
,
1832 task
, abort_flag
, tag
);
1834 del_timer(&task
->slow_task
->timer
);
1835 dev_err(dev
, "internal task abort: executing internal task failed: %d\n",
1839 wait_for_completion(&task
->slow_task
->completion
);
1840 res
= TMF_RESP_FUNC_FAILED
;
1842 /* Internal abort timed out */
1843 if ((task
->task_state_flags
& SAS_TASK_STATE_ABORTED
)) {
1844 if (!(task
->task_state_flags
& SAS_TASK_STATE_DONE
)) {
1845 struct hisi_sas_slot
*slot
= task
->lldd_task
;
1849 dev_err(dev
, "internal task abort: timeout and not done.\n");
1853 dev_err(dev
, "internal task abort: timeout.\n");
1856 if (task
->task_status
.resp
== SAS_TASK_COMPLETE
&&
1857 task
->task_status
.stat
== TMF_RESP_FUNC_COMPLETE
) {
1858 res
= TMF_RESP_FUNC_COMPLETE
;
1862 if (task
->task_status
.resp
== SAS_TASK_COMPLETE
&&
1863 task
->task_status
.stat
== TMF_RESP_FUNC_SUCC
) {
1864 res
= TMF_RESP_FUNC_SUCC
;
1869 dev_dbg(dev
, "internal task abort: task to dev %016llx task=%p "
1870 "resp: 0x%x sts 0x%x\n",
1871 SAS_ADDR(device
->sas_addr
),
1873 task
->task_status
.resp
, /* 0 is complete, -1 is undelivered */
1874 task
->task_status
.stat
);
1875 sas_free_task(task
);
1880 static void hisi_sas_port_formed(struct asd_sas_phy
*sas_phy
)
1882 hisi_sas_port_notify_formed(sas_phy
);
1885 static void hisi_sas_port_deformed(struct asd_sas_phy
*sas_phy
)
1889 static int hisi_sas_write_gpio(struct sas_ha_struct
*sha
, u8 reg_type
,
1890 u8 reg_index
, u8 reg_count
, u8
*write_data
)
1892 struct hisi_hba
*hisi_hba
= sha
->lldd_ha
;
1894 if (!hisi_hba
->hw
->write_gpio
)
1897 return hisi_hba
->hw
->write_gpio(hisi_hba
, reg_type
,
1898 reg_index
, reg_count
, write_data
);
1901 static void hisi_sas_phy_disconnected(struct hisi_sas_phy
*phy
)
1903 phy
->phy_attached
= 0;
1908 void hisi_sas_phy_down(struct hisi_hba
*hisi_hba
, int phy_no
, int rdy
)
1910 struct hisi_sas_phy
*phy
= &hisi_hba
->phy
[phy_no
];
1911 struct asd_sas_phy
*sas_phy
= &phy
->sas_phy
;
1912 struct sas_ha_struct
*sas_ha
= &hisi_hba
->sha
;
1913 struct device
*dev
= hisi_hba
->dev
;
1916 /* Phy down but ready */
1917 hisi_sas_bytes_dmaed(hisi_hba
, phy_no
);
1918 hisi_sas_port_notify_formed(sas_phy
);
1920 struct hisi_sas_port
*port
= phy
->port
;
1922 if (phy
->in_reset
) {
1923 dev_info(dev
, "ignore flutter phy%d down\n", phy_no
);
1926 /* Phy down and not ready */
1927 sas_ha
->notify_phy_event(sas_phy
, PHYE_LOSS_OF_SIGNAL
);
1928 sas_phy_disconnected(sas_phy
);
1931 if (phy
->phy_type
& PORT_TYPE_SAS
) {
1932 int port_id
= port
->id
;
1934 if (!hisi_hba
->hw
->get_wideport_bitmap(hisi_hba
,
1936 port
->port_attached
= 0;
1937 } else if (phy
->phy_type
& PORT_TYPE_SATA
)
1938 port
->port_attached
= 0;
1940 hisi_sas_phy_disconnected(phy
);
1943 EXPORT_SYMBOL_GPL(hisi_sas_phy_down
);
1945 void hisi_sas_kill_tasklets(struct hisi_hba
*hisi_hba
)
1949 for (i
= 0; i
< hisi_hba
->queue_count
; i
++) {
1950 struct hisi_sas_cq
*cq
= &hisi_hba
->cq
[i
];
1952 tasklet_kill(&cq
->tasklet
);
1955 EXPORT_SYMBOL_GPL(hisi_sas_kill_tasklets
);
1957 struct scsi_transport_template
*hisi_sas_stt
;
1958 EXPORT_SYMBOL_GPL(hisi_sas_stt
);
1960 struct device_attribute
*host_attrs
[] = {
1961 &dev_attr_phy_event_threshold
,
1964 EXPORT_SYMBOL_GPL(host_attrs
);
1966 static struct sas_domain_function_template hisi_sas_transport_ops
= {
1967 .lldd_dev_found
= hisi_sas_dev_found
,
1968 .lldd_dev_gone
= hisi_sas_dev_gone
,
1969 .lldd_execute_task
= hisi_sas_queue_command
,
1970 .lldd_control_phy
= hisi_sas_control_phy
,
1971 .lldd_abort_task
= hisi_sas_abort_task
,
1972 .lldd_abort_task_set
= hisi_sas_abort_task_set
,
1973 .lldd_clear_aca
= hisi_sas_clear_aca
,
1974 .lldd_I_T_nexus_reset
= hisi_sas_I_T_nexus_reset
,
1975 .lldd_lu_reset
= hisi_sas_lu_reset
,
1976 .lldd_query_task
= hisi_sas_query_task
,
1977 .lldd_clear_nexus_ha
= hisi_sas_clear_nexus_ha
,
1978 .lldd_port_formed
= hisi_sas_port_formed
,
1979 .lldd_port_deformed
= hisi_sas_port_deformed
,
1980 .lldd_write_gpio
= hisi_sas_write_gpio
,
1983 void hisi_sas_init_mem(struct hisi_hba
*hisi_hba
)
1985 int i
, s
, max_command_entries
= hisi_hba
->hw
->max_command_entries
;
1987 for (i
= 0; i
< hisi_hba
->queue_count
; i
++) {
1988 struct hisi_sas_cq
*cq
= &hisi_hba
->cq
[i
];
1989 struct hisi_sas_dq
*dq
= &hisi_hba
->dq
[i
];
1991 s
= sizeof(struct hisi_sas_cmd_hdr
) * HISI_SAS_QUEUE_SLOTS
;
1992 memset(hisi_hba
->cmd_hdr
[i
], 0, s
);
1995 s
= hisi_hba
->hw
->complete_hdr_size
* HISI_SAS_QUEUE_SLOTS
;
1996 memset(hisi_hba
->complete_hdr
[i
], 0, s
);
2000 s
= sizeof(struct hisi_sas_initial_fis
) * hisi_hba
->n_phy
;
2001 memset(hisi_hba
->initial_fis
, 0, s
);
2003 s
= max_command_entries
* sizeof(struct hisi_sas_iost
);
2004 memset(hisi_hba
->iost
, 0, s
);
2006 s
= max_command_entries
* sizeof(struct hisi_sas_breakpoint
);
2007 memset(hisi_hba
->breakpoint
, 0, s
);
2009 s
= HISI_SAS_MAX_ITCT_ENTRIES
* sizeof(struct hisi_sas_sata_breakpoint
);
2010 memset(hisi_hba
->sata_breakpoint
, 0, s
);
2012 EXPORT_SYMBOL_GPL(hisi_sas_init_mem
);
2014 int hisi_sas_alloc(struct hisi_hba
*hisi_hba
, struct Scsi_Host
*shost
)
2016 struct device
*dev
= hisi_hba
->dev
;
2017 int i
, s
, max_command_entries
= hisi_hba
->hw
->max_command_entries
;
2019 spin_lock_init(&hisi_hba
->lock
);
2020 for (i
= 0; i
< hisi_hba
->n_phy
; i
++) {
2021 hisi_sas_phy_init(hisi_hba
, i
);
2022 hisi_hba
->port
[i
].port_attached
= 0;
2023 hisi_hba
->port
[i
].id
= -1;
2026 for (i
= 0; i
< HISI_SAS_MAX_DEVICES
; i
++) {
2027 hisi_hba
->devices
[i
].dev_type
= SAS_PHY_UNUSED
;
2028 hisi_hba
->devices
[i
].device_id
= i
;
2029 hisi_hba
->devices
[i
].dev_status
= HISI_SAS_DEV_NORMAL
;
2032 for (i
= 0; i
< hisi_hba
->queue_count
; i
++) {
2033 struct hisi_sas_cq
*cq
= &hisi_hba
->cq
[i
];
2034 struct hisi_sas_dq
*dq
= &hisi_hba
->dq
[i
];
2036 /* Completion queue structure */
2038 cq
->hisi_hba
= hisi_hba
;
2040 /* Delivery queue structure */
2041 spin_lock_init(&dq
->lock
);
2042 INIT_LIST_HEAD(&dq
->list
);
2044 dq
->hisi_hba
= hisi_hba
;
2046 /* Delivery queue */
2047 s
= sizeof(struct hisi_sas_cmd_hdr
) * HISI_SAS_QUEUE_SLOTS
;
2048 hisi_hba
->cmd_hdr
[i
] = dma_alloc_coherent(dev
, s
,
2049 &hisi_hba
->cmd_hdr_dma
[i
], GFP_KERNEL
);
2050 if (!hisi_hba
->cmd_hdr
[i
])
2053 /* Completion queue */
2054 s
= hisi_hba
->hw
->complete_hdr_size
* HISI_SAS_QUEUE_SLOTS
;
2055 hisi_hba
->complete_hdr
[i
] = dma_alloc_coherent(dev
, s
,
2056 &hisi_hba
->complete_hdr_dma
[i
], GFP_KERNEL
);
2057 if (!hisi_hba
->complete_hdr
[i
])
2061 s
= sizeof(struct hisi_sas_slot_buf_table
);
2062 hisi_hba
->buffer_pool
= dma_pool_create("dma_buffer", dev
, s
, 16, 0);
2063 if (!hisi_hba
->buffer_pool
)
2066 s
= HISI_SAS_MAX_ITCT_ENTRIES
* sizeof(struct hisi_sas_itct
);
2067 hisi_hba
->itct
= dma_zalloc_coherent(dev
, s
, &hisi_hba
->itct_dma
,
2069 if (!hisi_hba
->itct
)
2072 hisi_hba
->slot_info
= devm_kcalloc(dev
, max_command_entries
,
2073 sizeof(struct hisi_sas_slot
),
2075 if (!hisi_hba
->slot_info
)
2078 s
= max_command_entries
* sizeof(struct hisi_sas_iost
);
2079 hisi_hba
->iost
= dma_alloc_coherent(dev
, s
, &hisi_hba
->iost_dma
,
2081 if (!hisi_hba
->iost
)
2084 s
= max_command_entries
* sizeof(struct hisi_sas_breakpoint
);
2085 hisi_hba
->breakpoint
= dma_alloc_coherent(dev
, s
,
2086 &hisi_hba
->breakpoint_dma
, GFP_KERNEL
);
2087 if (!hisi_hba
->breakpoint
)
2090 hisi_hba
->slot_index_count
= max_command_entries
;
2091 s
= hisi_hba
->slot_index_count
/ BITS_PER_BYTE
;
2092 hisi_hba
->slot_index_tags
= devm_kzalloc(dev
, s
, GFP_KERNEL
);
2093 if (!hisi_hba
->slot_index_tags
)
2096 s
= sizeof(struct hisi_sas_initial_fis
) * HISI_SAS_MAX_PHYS
;
2097 hisi_hba
->initial_fis
= dma_alloc_coherent(dev
, s
,
2098 &hisi_hba
->initial_fis_dma
, GFP_KERNEL
);
2099 if (!hisi_hba
->initial_fis
)
2102 s
= HISI_SAS_MAX_ITCT_ENTRIES
* sizeof(struct hisi_sas_sata_breakpoint
);
2103 hisi_hba
->sata_breakpoint
= dma_alloc_coherent(dev
, s
,
2104 &hisi_hba
->sata_breakpoint_dma
, GFP_KERNEL
);
2105 if (!hisi_hba
->sata_breakpoint
)
2107 hisi_sas_init_mem(hisi_hba
);
2109 hisi_sas_slot_index_init(hisi_hba
);
2111 hisi_hba
->wq
= create_singlethread_workqueue(dev_name(dev
));
2112 if (!hisi_hba
->wq
) {
2113 dev_err(dev
, "sas_alloc: failed to create workqueue\n");
2121 EXPORT_SYMBOL_GPL(hisi_sas_alloc
);
2123 void hisi_sas_free(struct hisi_hba
*hisi_hba
)
2125 struct device
*dev
= hisi_hba
->dev
;
2126 int i
, s
, max_command_entries
= hisi_hba
->hw
->max_command_entries
;
2128 for (i
= 0; i
< hisi_hba
->queue_count
; i
++) {
2129 s
= sizeof(struct hisi_sas_cmd_hdr
) * HISI_SAS_QUEUE_SLOTS
;
2130 if (hisi_hba
->cmd_hdr
[i
])
2131 dma_free_coherent(dev
, s
,
2132 hisi_hba
->cmd_hdr
[i
],
2133 hisi_hba
->cmd_hdr_dma
[i
]);
2135 s
= hisi_hba
->hw
->complete_hdr_size
* HISI_SAS_QUEUE_SLOTS
;
2136 if (hisi_hba
->complete_hdr
[i
])
2137 dma_free_coherent(dev
, s
,
2138 hisi_hba
->complete_hdr
[i
],
2139 hisi_hba
->complete_hdr_dma
[i
]);
2142 dma_pool_destroy(hisi_hba
->buffer_pool
);
2144 s
= HISI_SAS_MAX_ITCT_ENTRIES
* sizeof(struct hisi_sas_itct
);
2146 dma_free_coherent(dev
, s
,
2147 hisi_hba
->itct
, hisi_hba
->itct_dma
);
2149 s
= max_command_entries
* sizeof(struct hisi_sas_iost
);
2151 dma_free_coherent(dev
, s
,
2152 hisi_hba
->iost
, hisi_hba
->iost_dma
);
2154 s
= max_command_entries
* sizeof(struct hisi_sas_breakpoint
);
2155 if (hisi_hba
->breakpoint
)
2156 dma_free_coherent(dev
, s
,
2157 hisi_hba
->breakpoint
,
2158 hisi_hba
->breakpoint_dma
);
2161 s
= sizeof(struct hisi_sas_initial_fis
) * HISI_SAS_MAX_PHYS
;
2162 if (hisi_hba
->initial_fis
)
2163 dma_free_coherent(dev
, s
,
2164 hisi_hba
->initial_fis
,
2165 hisi_hba
->initial_fis_dma
);
2167 s
= HISI_SAS_MAX_ITCT_ENTRIES
* sizeof(struct hisi_sas_sata_breakpoint
);
2168 if (hisi_hba
->sata_breakpoint
)
2169 dma_free_coherent(dev
, s
,
2170 hisi_hba
->sata_breakpoint
,
2171 hisi_hba
->sata_breakpoint_dma
);
2174 destroy_workqueue(hisi_hba
->wq
);
2176 EXPORT_SYMBOL_GPL(hisi_sas_free
);
2178 void hisi_sas_rst_work_handler(struct work_struct
*work
)
2180 struct hisi_hba
*hisi_hba
=
2181 container_of(work
, struct hisi_hba
, rst_work
);
2183 hisi_sas_controller_reset(hisi_hba
);
2185 EXPORT_SYMBOL_GPL(hisi_sas_rst_work_handler
);
2187 void hisi_sas_sync_rst_work_handler(struct work_struct
*work
)
2189 struct hisi_sas_rst
*rst
=
2190 container_of(work
, struct hisi_sas_rst
, work
);
2192 if (!hisi_sas_controller_reset(rst
->hisi_hba
))
2194 complete(rst
->completion
);
2196 EXPORT_SYMBOL_GPL(hisi_sas_sync_rst_work_handler
);
2198 int hisi_sas_get_fw_info(struct hisi_hba
*hisi_hba
)
2200 struct device
*dev
= hisi_hba
->dev
;
2201 struct platform_device
*pdev
= hisi_hba
->platform_dev
;
2202 struct device_node
*np
= pdev
? pdev
->dev
.of_node
: NULL
;
2205 if (device_property_read_u8_array(dev
, "sas-addr", hisi_hba
->sas_addr
,
2207 dev_err(dev
, "could not get property sas-addr\n");
2213 * These properties are only required for platform device-based
2214 * controller with DT firmware.
2216 hisi_hba
->ctrl
= syscon_regmap_lookup_by_phandle(np
,
2217 "hisilicon,sas-syscon");
2218 if (IS_ERR(hisi_hba
->ctrl
)) {
2219 dev_err(dev
, "could not get syscon\n");
2223 if (device_property_read_u32(dev
, "ctrl-reset-reg",
2224 &hisi_hba
->ctrl_reset_reg
)) {
2226 "could not get property ctrl-reset-reg\n");
2230 if (device_property_read_u32(dev
, "ctrl-reset-sts-reg",
2231 &hisi_hba
->ctrl_reset_sts_reg
)) {
2233 "could not get property ctrl-reset-sts-reg\n");
2237 if (device_property_read_u32(dev
, "ctrl-clock-ena-reg",
2238 &hisi_hba
->ctrl_clock_ena_reg
)) {
2240 "could not get property ctrl-clock-ena-reg\n");
2245 refclk
= devm_clk_get(dev
, NULL
);
2247 dev_dbg(dev
, "no ref clk property\n");
2249 hisi_hba
->refclk_frequency_mhz
= clk_get_rate(refclk
) / 1000000;
2251 if (device_property_read_u32(dev
, "phy-count", &hisi_hba
->n_phy
)) {
2252 dev_err(dev
, "could not get property phy-count\n");
2256 if (device_property_read_u32(dev
, "queue-count",
2257 &hisi_hba
->queue_count
)) {
2258 dev_err(dev
, "could not get property queue-count\n");
2264 EXPORT_SYMBOL_GPL(hisi_sas_get_fw_info
);
2266 static struct Scsi_Host
*hisi_sas_shost_alloc(struct platform_device
*pdev
,
2267 const struct hisi_sas_hw
*hw
)
2269 struct resource
*res
;
2270 struct Scsi_Host
*shost
;
2271 struct hisi_hba
*hisi_hba
;
2272 struct device
*dev
= &pdev
->dev
;
2274 shost
= scsi_host_alloc(hw
->sht
, sizeof(*hisi_hba
));
2276 dev_err(dev
, "scsi host alloc failed\n");
2279 hisi_hba
= shost_priv(shost
);
2281 INIT_WORK(&hisi_hba
->rst_work
, hisi_sas_rst_work_handler
);
2283 hisi_hba
->dev
= dev
;
2284 hisi_hba
->platform_dev
= pdev
;
2285 hisi_hba
->shost
= shost
;
2286 SHOST_TO_SAS_HA(shost
) = &hisi_hba
->sha
;
2288 timer_setup(&hisi_hba
->timer
, NULL
, 0);
2290 if (hisi_sas_get_fw_info(hisi_hba
) < 0)
2293 if (dma_set_mask_and_coherent(dev
, DMA_BIT_MASK(64)) &&
2294 dma_set_mask_and_coherent(dev
, DMA_BIT_MASK(32))) {
2295 dev_err(dev
, "No usable DMA addressing method\n");
2299 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
2300 hisi_hba
->regs
= devm_ioremap_resource(dev
, res
);
2301 if (IS_ERR(hisi_hba
->regs
))
2304 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 1);
2306 hisi_hba
->sgpio_regs
= devm_ioremap_resource(dev
, res
);
2307 if (IS_ERR(hisi_hba
->sgpio_regs
))
2311 if (hisi_sas_alloc(hisi_hba
, shost
)) {
2312 hisi_sas_free(hisi_hba
);
2318 scsi_host_put(shost
);
2319 dev_err(dev
, "shost alloc failed\n");
2323 int hisi_sas_probe(struct platform_device
*pdev
,
2324 const struct hisi_sas_hw
*hw
)
2326 struct Scsi_Host
*shost
;
2327 struct hisi_hba
*hisi_hba
;
2328 struct device
*dev
= &pdev
->dev
;
2329 struct asd_sas_phy
**arr_phy
;
2330 struct asd_sas_port
**arr_port
;
2331 struct sas_ha_struct
*sha
;
2332 int rc
, phy_nr
, port_nr
, i
;
2334 shost
= hisi_sas_shost_alloc(pdev
, hw
);
2338 sha
= SHOST_TO_SAS_HA(shost
);
2339 hisi_hba
= shost_priv(shost
);
2340 platform_set_drvdata(pdev
, sha
);
2342 phy_nr
= port_nr
= hisi_hba
->n_phy
;
2344 arr_phy
= devm_kcalloc(dev
, phy_nr
, sizeof(void *), GFP_KERNEL
);
2345 arr_port
= devm_kcalloc(dev
, port_nr
, sizeof(void *), GFP_KERNEL
);
2346 if (!arr_phy
|| !arr_port
) {
2351 sha
->sas_phy
= arr_phy
;
2352 sha
->sas_port
= arr_port
;
2353 sha
->lldd_ha
= hisi_hba
;
2355 shost
->transportt
= hisi_sas_stt
;
2356 shost
->max_id
= HISI_SAS_MAX_DEVICES
;
2357 shost
->max_lun
= ~0;
2358 shost
->max_channel
= 1;
2359 shost
->max_cmd_len
= 16;
2360 shost
->sg_tablesize
= min_t(u16
, SG_ALL
, HISI_SAS_SGE_PAGE_CNT
);
2361 shost
->can_queue
= hisi_hba
->hw
->max_command_entries
;
2362 shost
->cmd_per_lun
= hisi_hba
->hw
->max_command_entries
;
2364 sha
->sas_ha_name
= DRV_NAME
;
2365 sha
->dev
= hisi_hba
->dev
;
2366 sha
->lldd_module
= THIS_MODULE
;
2367 sha
->sas_addr
= &hisi_hba
->sas_addr
[0];
2368 sha
->num_phys
= hisi_hba
->n_phy
;
2369 sha
->core
.shost
= hisi_hba
->shost
;
2371 for (i
= 0; i
< hisi_hba
->n_phy
; i
++) {
2372 sha
->sas_phy
[i
] = &hisi_hba
->phy
[i
].sas_phy
;
2373 sha
->sas_port
[i
] = &hisi_hba
->port
[i
].sas_port
;
2376 rc
= scsi_add_host(shost
, &pdev
->dev
);
2380 rc
= sas_register_ha(sha
);
2382 goto err_out_register_ha
;
2384 rc
= hisi_hba
->hw
->hw_init(hisi_hba
);
2386 goto err_out_register_ha
;
2388 scsi_scan_host(shost
);
2392 err_out_register_ha
:
2393 scsi_remove_host(shost
);
2395 hisi_sas_free(hisi_hba
);
2396 scsi_host_put(shost
);
2399 EXPORT_SYMBOL_GPL(hisi_sas_probe
);
2401 int hisi_sas_remove(struct platform_device
*pdev
)
2403 struct sas_ha_struct
*sha
= platform_get_drvdata(pdev
);
2404 struct hisi_hba
*hisi_hba
= sha
->lldd_ha
;
2405 struct Scsi_Host
*shost
= sha
->core
.shost
;
2407 if (timer_pending(&hisi_hba
->timer
))
2408 del_timer(&hisi_hba
->timer
);
2410 sas_unregister_ha(sha
);
2411 sas_remove_host(sha
->core
.shost
);
2413 hisi_sas_free(hisi_hba
);
2414 scsi_host_put(shost
);
2417 EXPORT_SYMBOL_GPL(hisi_sas_remove
);
2419 static __init
int hisi_sas_init(void)
2421 hisi_sas_stt
= sas_domain_attach_transport(&hisi_sas_transport_ops
);
2428 static __exit
void hisi_sas_exit(void)
2430 sas_release_transport(hisi_sas_stt
);
2433 module_init(hisi_sas_init
);
2434 module_exit(hisi_sas_exit
);
2436 MODULE_LICENSE("GPL");
2437 MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
2438 MODULE_DESCRIPTION("HISILICON SAS controller driver");
2439 MODULE_ALIAS("platform:" DRV_NAME
);