1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (c) 2015 Linaro Ltd.
4 * Copyright (c) 2015 Hisilicon Limited.
8 #define DRV_NAME "hisi_sas"
10 #define DEV_IS_GONE(dev) \
11 ((!dev) || (dev->dev_type == SAS_PHY_UNUSED))
13 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device
*device
,
14 u8
*lun
, struct hisi_sas_tmf_task
*tmf
);
16 hisi_sas_internal_task_abort(struct hisi_hba
*hisi_hba
,
17 struct domain_device
*device
,
18 int abort_flag
, int tag
);
19 static int hisi_sas_softreset_ata_disk(struct domain_device
*device
);
20 static int hisi_sas_control_phy(struct asd_sas_phy
*sas_phy
, enum phy_func func
,
22 static void hisi_sas_release_task(struct hisi_hba
*hisi_hba
,
23 struct domain_device
*device
);
24 static void hisi_sas_dev_gone(struct domain_device
*device
);
26 u8
hisi_sas_get_ata_protocol(struct host_to_dev_fis
*fis
, int direction
)
28 switch (fis
->command
) {
29 case ATA_CMD_FPDMA_WRITE
:
30 case ATA_CMD_FPDMA_READ
:
31 case ATA_CMD_FPDMA_RECV
:
32 case ATA_CMD_FPDMA_SEND
:
33 case ATA_CMD_NCQ_NON_DATA
:
34 return HISI_SAS_SATA_PROTOCOL_FPDMA
;
36 case ATA_CMD_DOWNLOAD_MICRO
:
38 case ATA_CMD_PMP_READ
:
39 case ATA_CMD_READ_LOG_EXT
:
40 case ATA_CMD_PIO_READ
:
41 case ATA_CMD_PIO_READ_EXT
:
42 case ATA_CMD_PMP_WRITE
:
43 case ATA_CMD_WRITE_LOG_EXT
:
44 case ATA_CMD_PIO_WRITE
:
45 case ATA_CMD_PIO_WRITE_EXT
:
46 return HISI_SAS_SATA_PROTOCOL_PIO
;
49 case ATA_CMD_DOWNLOAD_MICRO_DMA
:
50 case ATA_CMD_PMP_READ_DMA
:
51 case ATA_CMD_PMP_WRITE_DMA
:
53 case ATA_CMD_READ_EXT
:
54 case ATA_CMD_READ_LOG_DMA_EXT
:
55 case ATA_CMD_READ_STREAM_DMA_EXT
:
56 case ATA_CMD_TRUSTED_RCV_DMA
:
57 case ATA_CMD_TRUSTED_SND_DMA
:
59 case ATA_CMD_WRITE_EXT
:
60 case ATA_CMD_WRITE_FUA_EXT
:
61 case ATA_CMD_WRITE_QUEUED
:
62 case ATA_CMD_WRITE_LOG_DMA_EXT
:
63 case ATA_CMD_WRITE_STREAM_DMA_EXT
:
64 case ATA_CMD_ZAC_MGMT_IN
:
65 return HISI_SAS_SATA_PROTOCOL_DMA
;
67 case ATA_CMD_CHK_POWER
:
68 case ATA_CMD_DEV_RESET
:
71 case ATA_CMD_FLUSH_EXT
:
73 case ATA_CMD_VERIFY_EXT
:
74 case ATA_CMD_SET_FEATURES
:
76 case ATA_CMD_STANDBYNOW1
:
77 case ATA_CMD_ZAC_MGMT_OUT
:
78 return HISI_SAS_SATA_PROTOCOL_NONDATA
;
81 switch (fis
->features
) {
82 case ATA_SET_MAX_PASSWD
:
83 case ATA_SET_MAX_LOCK
:
84 return HISI_SAS_SATA_PROTOCOL_PIO
;
86 case ATA_SET_MAX_PASSWD_DMA
:
87 case ATA_SET_MAX_UNLOCK_DMA
:
88 return HISI_SAS_SATA_PROTOCOL_DMA
;
91 return HISI_SAS_SATA_PROTOCOL_NONDATA
;
96 if (direction
== DMA_NONE
)
97 return HISI_SAS_SATA_PROTOCOL_NONDATA
;
98 return HISI_SAS_SATA_PROTOCOL_PIO
;
102 EXPORT_SYMBOL_GPL(hisi_sas_get_ata_protocol
);
104 void hisi_sas_sata_done(struct sas_task
*task
,
105 struct hisi_sas_slot
*slot
)
107 struct task_status_struct
*ts
= &task
->task_status
;
108 struct ata_task_resp
*resp
= (struct ata_task_resp
*)ts
->buf
;
109 struct hisi_sas_status_buffer
*status_buf
=
110 hisi_sas_status_buf_addr_mem(slot
);
111 u8
*iu
= &status_buf
->iu
[0];
112 struct dev_to_host_fis
*d2h
= (struct dev_to_host_fis
*)iu
;
114 resp
->frame_len
= sizeof(struct dev_to_host_fis
);
115 memcpy(&resp
->ending_fis
[0], d2h
, sizeof(struct dev_to_host_fis
));
117 ts
->buf_valid_size
= sizeof(*resp
);
119 EXPORT_SYMBOL_GPL(hisi_sas_sata_done
);
122 * This function assumes linkrate mask fits in 8 bits, which it
123 * does for all HW versions supported.
125 u8
hisi_sas_get_prog_phy_linkrate_mask(enum sas_linkrate max
)
130 max
-= SAS_LINK_RATE_1_5_GBPS
;
131 for (i
= 0; i
<= max
; i
++)
132 rate
|= 1 << (i
* 2);
135 EXPORT_SYMBOL_GPL(hisi_sas_get_prog_phy_linkrate_mask
);
137 static struct hisi_hba
*dev_to_hisi_hba(struct domain_device
*device
)
139 return device
->port
->ha
->lldd_ha
;
142 struct hisi_sas_port
*to_hisi_sas_port(struct asd_sas_port
*sas_port
)
144 return container_of(sas_port
, struct hisi_sas_port
, sas_port
);
146 EXPORT_SYMBOL_GPL(to_hisi_sas_port
);
148 void hisi_sas_stop_phys(struct hisi_hba
*hisi_hba
)
152 for (phy_no
= 0; phy_no
< hisi_hba
->n_phy
; phy_no
++)
153 hisi_sas_phy_enable(hisi_hba
, phy_no
, 0);
155 EXPORT_SYMBOL_GPL(hisi_sas_stop_phys
);
157 static void hisi_sas_slot_index_clear(struct hisi_hba
*hisi_hba
, int slot_idx
)
159 void *bitmap
= hisi_hba
->slot_index_tags
;
161 clear_bit(slot_idx
, bitmap
);
164 static void hisi_sas_slot_index_free(struct hisi_hba
*hisi_hba
, int slot_idx
)
166 if (hisi_hba
->hw
->slot_index_alloc
||
167 slot_idx
>= HISI_SAS_UNRESERVED_IPTT
) {
168 spin_lock(&hisi_hba
->lock
);
169 hisi_sas_slot_index_clear(hisi_hba
, slot_idx
);
170 spin_unlock(&hisi_hba
->lock
);
174 static void hisi_sas_slot_index_set(struct hisi_hba
*hisi_hba
, int slot_idx
)
176 void *bitmap
= hisi_hba
->slot_index_tags
;
178 set_bit(slot_idx
, bitmap
);
181 static int hisi_sas_slot_index_alloc(struct hisi_hba
*hisi_hba
,
182 struct scsi_cmnd
*scsi_cmnd
)
185 void *bitmap
= hisi_hba
->slot_index_tags
;
188 return scsi_cmnd
->request
->tag
;
190 spin_lock(&hisi_hba
->lock
);
191 index
= find_next_zero_bit(bitmap
, hisi_hba
->slot_index_count
,
192 hisi_hba
->last_slot_index
+ 1);
193 if (index
>= hisi_hba
->slot_index_count
) {
194 index
= find_next_zero_bit(bitmap
,
195 hisi_hba
->slot_index_count
,
196 HISI_SAS_UNRESERVED_IPTT
);
197 if (index
>= hisi_hba
->slot_index_count
) {
198 spin_unlock(&hisi_hba
->lock
);
199 return -SAS_QUEUE_FULL
;
202 hisi_sas_slot_index_set(hisi_hba
, index
);
203 hisi_hba
->last_slot_index
= index
;
204 spin_unlock(&hisi_hba
->lock
);
209 static void hisi_sas_slot_index_init(struct hisi_hba
*hisi_hba
)
213 for (i
= 0; i
< hisi_hba
->slot_index_count
; ++i
)
214 hisi_sas_slot_index_clear(hisi_hba
, i
);
217 void hisi_sas_slot_task_free(struct hisi_hba
*hisi_hba
, struct sas_task
*task
,
218 struct hisi_sas_slot
*slot
)
220 int device_id
= slot
->device_id
;
221 struct hisi_sas_device
*sas_dev
= &hisi_hba
->devices
[device_id
];
224 struct device
*dev
= hisi_hba
->dev
;
226 if (!task
->lldd_task
)
229 task
->lldd_task
= NULL
;
231 if (!sas_protocol_ata(task
->task_proto
)) {
232 struct sas_ssp_task
*ssp_task
= &task
->ssp_task
;
233 struct scsi_cmnd
*scsi_cmnd
= ssp_task
->cmd
;
236 dma_unmap_sg(dev
, task
->scatter
,
239 if (slot
->n_elem_dif
)
240 dma_unmap_sg(dev
, scsi_prot_sglist(scsi_cmnd
),
241 scsi_prot_sg_count(scsi_cmnd
),
246 spin_lock(&sas_dev
->lock
);
247 list_del_init(&slot
->entry
);
248 spin_unlock(&sas_dev
->lock
);
250 memset(slot
, 0, offsetof(struct hisi_sas_slot
, buf
));
252 hisi_sas_slot_index_free(hisi_hba
, slot
->idx
);
254 EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free
);
256 static void hisi_sas_task_prep_smp(struct hisi_hba
*hisi_hba
,
257 struct hisi_sas_slot
*slot
)
259 hisi_hba
->hw
->prep_smp(hisi_hba
, slot
);
262 static void hisi_sas_task_prep_ssp(struct hisi_hba
*hisi_hba
,
263 struct hisi_sas_slot
*slot
)
265 hisi_hba
->hw
->prep_ssp(hisi_hba
, slot
);
268 static void hisi_sas_task_prep_ata(struct hisi_hba
*hisi_hba
,
269 struct hisi_sas_slot
*slot
)
271 hisi_hba
->hw
->prep_stp(hisi_hba
, slot
);
274 static void hisi_sas_task_prep_abort(struct hisi_hba
*hisi_hba
,
275 struct hisi_sas_slot
*slot
,
276 int device_id
, int abort_flag
, int tag_to_abort
)
278 hisi_hba
->hw
->prep_abort(hisi_hba
, slot
,
279 device_id
, abort_flag
, tag_to_abort
);
282 static void hisi_sas_dma_unmap(struct hisi_hba
*hisi_hba
,
283 struct sas_task
*task
, int n_elem
,
286 struct device
*dev
= hisi_hba
->dev
;
288 if (!sas_protocol_ata(task
->task_proto
)) {
289 if (task
->num_scatter
) {
291 dma_unmap_sg(dev
, task
->scatter
,
294 } else if (task
->task_proto
& SAS_PROTOCOL_SMP
) {
296 dma_unmap_sg(dev
, &task
->smp_task
.smp_req
,
302 static int hisi_sas_dma_map(struct hisi_hba
*hisi_hba
,
303 struct sas_task
*task
, int *n_elem
,
306 struct device
*dev
= hisi_hba
->dev
;
309 if (sas_protocol_ata(task
->task_proto
)) {
310 *n_elem
= task
->num_scatter
;
312 unsigned int req_len
;
314 if (task
->num_scatter
) {
315 *n_elem
= dma_map_sg(dev
, task
->scatter
,
316 task
->num_scatter
, task
->data_dir
);
321 } else if (task
->task_proto
& SAS_PROTOCOL_SMP
) {
322 *n_elem_req
= dma_map_sg(dev
, &task
->smp_task
.smp_req
,
328 req_len
= sg_dma_len(&task
->smp_task
.smp_req
);
331 goto err_out_dma_unmap
;
336 if (*n_elem
> HISI_SAS_SGE_PAGE_CNT
) {
337 dev_err(dev
, "task prep: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT",
340 goto err_out_dma_unmap
;
345 /* It would be better to call dma_unmap_sg() here, but it's messy */
346 hisi_sas_dma_unmap(hisi_hba
, task
, *n_elem
,
352 static void hisi_sas_dif_dma_unmap(struct hisi_hba
*hisi_hba
,
353 struct sas_task
*task
, int n_elem_dif
)
355 struct device
*dev
= hisi_hba
->dev
;
358 struct sas_ssp_task
*ssp_task
= &task
->ssp_task
;
359 struct scsi_cmnd
*scsi_cmnd
= ssp_task
->cmd
;
361 dma_unmap_sg(dev
, scsi_prot_sglist(scsi_cmnd
),
362 scsi_prot_sg_count(scsi_cmnd
),
367 static int hisi_sas_dif_dma_map(struct hisi_hba
*hisi_hba
,
368 int *n_elem_dif
, struct sas_task
*task
)
370 struct device
*dev
= hisi_hba
->dev
;
371 struct sas_ssp_task
*ssp_task
;
372 struct scsi_cmnd
*scsi_cmnd
;
375 if (task
->num_scatter
) {
376 ssp_task
= &task
->ssp_task
;
377 scsi_cmnd
= ssp_task
->cmd
;
379 if (scsi_prot_sg_count(scsi_cmnd
)) {
380 *n_elem_dif
= dma_map_sg(dev
,
381 scsi_prot_sglist(scsi_cmnd
),
382 scsi_prot_sg_count(scsi_cmnd
),
388 if (*n_elem_dif
> HISI_SAS_SGE_DIF_PAGE_CNT
) {
389 dev_err(dev
, "task prep: n_elem_dif(%d) too large\n",
392 goto err_out_dif_dma_unmap
;
399 err_out_dif_dma_unmap
:
400 dma_unmap_sg(dev
, scsi_prot_sglist(scsi_cmnd
),
401 scsi_prot_sg_count(scsi_cmnd
), task
->data_dir
);
405 static int hisi_sas_task_prep(struct sas_task
*task
,
406 struct hisi_sas_dq
**dq_pointer
,
407 bool is_tmf
, struct hisi_sas_tmf_task
*tmf
,
410 struct domain_device
*device
= task
->dev
;
411 struct hisi_hba
*hisi_hba
= dev_to_hisi_hba(device
);
412 struct hisi_sas_device
*sas_dev
= device
->lldd_dev
;
413 struct hisi_sas_port
*port
;
414 struct hisi_sas_slot
*slot
;
415 struct hisi_sas_cmd_hdr
*cmd_hdr_base
;
416 struct asd_sas_port
*sas_port
= device
->port
;
417 struct device
*dev
= hisi_hba
->dev
;
418 int dlvry_queue_slot
, dlvry_queue
, rc
, slot_idx
;
419 int n_elem
= 0, n_elem_dif
= 0, n_elem_req
= 0;
420 struct hisi_sas_dq
*dq
;
424 if (DEV_IS_GONE(sas_dev
)) {
426 dev_info(dev
, "task prep: device %d not ready\n",
429 dev_info(dev
, "task prep: device %016llx not ready\n",
430 SAS_ADDR(device
->sas_addr
));
435 if (hisi_hba
->reply_map
) {
436 int cpu
= raw_smp_processor_id();
437 unsigned int dq_index
= hisi_hba
->reply_map
[cpu
];
439 *dq_pointer
= dq
= &hisi_hba
->dq
[dq_index
];
441 *dq_pointer
= dq
= sas_dev
->dq
;
444 port
= to_hisi_sas_port(sas_port
);
445 if (port
&& !port
->port_attached
) {
446 dev_info(dev
, "task prep: %s port%d not attach device\n",
447 (dev_is_sata(device
)) ?
454 rc
= hisi_sas_dma_map(hisi_hba
, task
, &n_elem
,
459 if (!sas_protocol_ata(task
->task_proto
)) {
460 rc
= hisi_sas_dif_dma_map(hisi_hba
, &n_elem_dif
, task
);
462 goto err_out_dma_unmap
;
465 if (hisi_hba
->hw
->slot_index_alloc
)
466 rc
= hisi_hba
->hw
->slot_index_alloc(hisi_hba
, device
);
468 struct scsi_cmnd
*scsi_cmnd
= NULL
;
470 if (task
->uldd_task
) {
471 struct ata_queued_cmd
*qc
;
473 if (dev_is_sata(device
)) {
474 qc
= task
->uldd_task
;
475 scsi_cmnd
= qc
->scsicmd
;
477 scsi_cmnd
= task
->uldd_task
;
480 rc
= hisi_sas_slot_index_alloc(hisi_hba
, scsi_cmnd
);
483 goto err_out_dif_dma_unmap
;
486 slot
= &hisi_hba
->slot_info
[slot_idx
];
488 spin_lock(&dq
->lock
);
489 wr_q_index
= dq
->wr_point
;
490 dq
->wr_point
= (dq
->wr_point
+ 1) % HISI_SAS_QUEUE_SLOTS
;
491 list_add_tail(&slot
->delivery
, &dq
->list
);
492 spin_unlock(&dq
->lock
);
493 spin_lock(&sas_dev
->lock
);
494 list_add_tail(&slot
->entry
, &sas_dev
->list
);
495 spin_unlock(&sas_dev
->lock
);
497 dlvry_queue
= dq
->id
;
498 dlvry_queue_slot
= wr_q_index
;
500 slot
->device_id
= sas_dev
->device_id
;
501 slot
->n_elem
= n_elem
;
502 slot
->n_elem_dif
= n_elem_dif
;
503 slot
->dlvry_queue
= dlvry_queue
;
504 slot
->dlvry_queue_slot
= dlvry_queue_slot
;
505 cmd_hdr_base
= hisi_hba
->cmd_hdr
[dlvry_queue
];
506 slot
->cmd_hdr
= &cmd_hdr_base
[dlvry_queue_slot
];
510 slot
->is_internal
= is_tmf
;
511 task
->lldd_task
= slot
;
513 memset(slot
->cmd_hdr
, 0, sizeof(struct hisi_sas_cmd_hdr
));
514 memset(hisi_sas_cmd_hdr_addr_mem(slot
), 0, HISI_SAS_COMMAND_TABLE_SZ
);
515 memset(hisi_sas_status_buf_addr_mem(slot
), 0,
516 sizeof(struct hisi_sas_err_record
));
518 switch (task
->task_proto
) {
519 case SAS_PROTOCOL_SMP
:
520 hisi_sas_task_prep_smp(hisi_hba
, slot
);
522 case SAS_PROTOCOL_SSP
:
523 hisi_sas_task_prep_ssp(hisi_hba
, slot
);
525 case SAS_PROTOCOL_SATA
:
526 case SAS_PROTOCOL_STP
:
527 case SAS_PROTOCOL_SATA
| SAS_PROTOCOL_STP
:
528 hisi_sas_task_prep_ata(hisi_hba
, slot
);
531 dev_err(dev
, "task prep: unknown/unsupported proto (0x%x)\n",
536 spin_lock_irqsave(&task
->task_state_lock
, flags
);
537 task
->task_state_flags
|= SAS_TASK_AT_INITIATOR
;
538 spin_unlock_irqrestore(&task
->task_state_lock
, flags
);
541 WRITE_ONCE(slot
->ready
, 1);
545 err_out_dif_dma_unmap
:
546 if (!sas_protocol_ata(task
->task_proto
))
547 hisi_sas_dif_dma_unmap(hisi_hba
, task
, n_elem_dif
);
549 hisi_sas_dma_unmap(hisi_hba
, task
, n_elem
,
552 dev_err(dev
, "task prep: failed[%d]!\n", rc
);
556 static int hisi_sas_task_exec(struct sas_task
*task
, gfp_t gfp_flags
,
557 bool is_tmf
, struct hisi_sas_tmf_task
*tmf
)
561 struct hisi_hba
*hisi_hba
;
563 struct domain_device
*device
= task
->dev
;
564 struct asd_sas_port
*sas_port
= device
->port
;
565 struct hisi_sas_dq
*dq
= NULL
;
568 struct task_status_struct
*ts
= &task
->task_status
;
570 ts
->resp
= SAS_TASK_UNDELIVERED
;
571 ts
->stat
= SAS_PHY_DOWN
;
573 * libsas will use dev->port, should
574 * not call task_done for sata
576 if (device
->dev_type
!= SAS_SATA_DEV
)
577 task
->task_done(task
);
581 hisi_hba
= dev_to_hisi_hba(device
);
584 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT
, &hisi_hba
->flags
))) {
586 * For IOs from upper layer, it may already disable preempt
587 * in the IO path, if disable preempt again in down(),
588 * function schedule() will report schedule_bug(), so check
589 * preemptible() before goto down().
594 down(&hisi_hba
->sem
);
598 /* protect task_prep and start_delivery sequence */
599 rc
= hisi_sas_task_prep(task
, &dq
, is_tmf
, tmf
, &pass
);
601 dev_err(dev
, "task exec: failed[%d]!\n", rc
);
604 spin_lock(&dq
->lock
);
605 hisi_hba
->hw
->start_delivery(dq
);
606 spin_unlock(&dq
->lock
);
612 static void hisi_sas_bytes_dmaed(struct hisi_hba
*hisi_hba
, int phy_no
)
614 struct hisi_sas_phy
*phy
= &hisi_hba
->phy
[phy_no
];
615 struct asd_sas_phy
*sas_phy
= &phy
->sas_phy
;
616 struct sas_ha_struct
*sas_ha
;
618 if (!phy
->phy_attached
)
621 sas_ha
= &hisi_hba
->sha
;
622 sas_ha
->notify_phy_event(sas_phy
, PHYE_OOB_DONE
);
625 struct sas_phy
*sphy
= sas_phy
->phy
;
627 sphy
->negotiated_linkrate
= sas_phy
->linkrate
;
628 sphy
->minimum_linkrate_hw
= SAS_LINK_RATE_1_5_GBPS
;
629 sphy
->maximum_linkrate_hw
=
630 hisi_hba
->hw
->phy_get_max_linkrate();
631 if (sphy
->minimum_linkrate
== SAS_LINK_RATE_UNKNOWN
)
632 sphy
->minimum_linkrate
= phy
->minimum_linkrate
;
634 if (sphy
->maximum_linkrate
== SAS_LINK_RATE_UNKNOWN
)
635 sphy
->maximum_linkrate
= phy
->maximum_linkrate
;
638 if (phy
->phy_type
& PORT_TYPE_SAS
) {
639 struct sas_identify_frame
*id
;
641 id
= (struct sas_identify_frame
*)phy
->frame_rcvd
;
642 id
->dev_type
= phy
->identify
.device_type
;
643 id
->initiator_bits
= SAS_PROTOCOL_ALL
;
644 id
->target_bits
= phy
->identify
.target_port_protocols
;
645 } else if (phy
->phy_type
& PORT_TYPE_SATA
) {
649 sas_phy
->frame_rcvd_size
= phy
->frame_rcvd_size
;
650 sas_ha
->notify_port_event(sas_phy
, PORTE_BYTES_DMAED
);
653 static struct hisi_sas_device
*hisi_sas_alloc_dev(struct domain_device
*device
)
655 struct hisi_hba
*hisi_hba
= dev_to_hisi_hba(device
);
656 struct hisi_sas_device
*sas_dev
= NULL
;
657 int last
= hisi_hba
->last_dev_id
;
658 int first
= (hisi_hba
->last_dev_id
+ 1) % HISI_SAS_MAX_DEVICES
;
661 spin_lock(&hisi_hba
->lock
);
662 for (i
= first
; i
!= last
; i
%= HISI_SAS_MAX_DEVICES
) {
663 if (hisi_hba
->devices
[i
].dev_type
== SAS_PHY_UNUSED
) {
664 int queue
= i
% hisi_hba
->queue_count
;
665 struct hisi_sas_dq
*dq
= &hisi_hba
->dq
[queue
];
667 hisi_hba
->devices
[i
].device_id
= i
;
668 sas_dev
= &hisi_hba
->devices
[i
];
669 sas_dev
->dev_status
= HISI_SAS_DEV_INIT
;
670 sas_dev
->dev_type
= device
->dev_type
;
671 sas_dev
->hisi_hba
= hisi_hba
;
672 sas_dev
->sas_device
= device
;
674 spin_lock_init(&sas_dev
->lock
);
675 INIT_LIST_HEAD(&hisi_hba
->devices
[i
].list
);
680 hisi_hba
->last_dev_id
= i
;
681 spin_unlock(&hisi_hba
->lock
);
686 #define HISI_SAS_DISK_RECOVER_CNT 3
687 static int hisi_sas_init_device(struct domain_device
*device
)
689 int rc
= TMF_RESP_FUNC_COMPLETE
;
691 struct hisi_sas_tmf_task tmf_task
;
692 int retry
= HISI_SAS_DISK_RECOVER_CNT
;
693 struct hisi_hba
*hisi_hba
= dev_to_hisi_hba(device
);
694 struct device
*dev
= hisi_hba
->dev
;
695 struct sas_phy
*local_phy
;
697 switch (device
->dev_type
) {
699 int_to_scsilun(0, &lun
);
701 tmf_task
.tmf
= TMF_CLEAR_TASK_SET
;
702 while (retry
-- > 0) {
703 rc
= hisi_sas_debug_issue_ssp_tmf(device
, lun
.scsi_lun
,
705 if (rc
== TMF_RESP_FUNC_COMPLETE
) {
706 hisi_sas_release_task(hisi_hba
, device
);
713 case SAS_SATA_PM_PORT
:
714 case SAS_SATA_PENDING
:
716 * send HARD RESET to clear previous affiliation of
719 local_phy
= sas_get_local_phy(device
);
720 if (!scsi_is_sas_phy_local(local_phy
) &&
721 !test_bit(HISI_SAS_RESET_BIT
, &hisi_hba
->flags
)) {
722 unsigned long deadline
= ata_deadline(jiffies
, 20000);
723 struct sata_device
*sata_dev
= &device
->sata_dev
;
724 struct ata_host
*ata_host
= sata_dev
->ata_host
;
725 struct ata_port_operations
*ops
= ata_host
->ops
;
726 struct ata_port
*ap
= sata_dev
->ap
;
727 struct ata_link
*link
;
728 unsigned int classes
;
730 ata_for_each_link(link
, ap
, EDGE
)
731 rc
= ops
->hardreset(link
, &classes
,
734 sas_put_local_phy(local_phy
);
736 dev_warn(dev
, "SATA disk hardreset fail: %d\n", rc
);
740 while (retry
-- > 0) {
741 rc
= hisi_sas_softreset_ata_disk(device
);
753 static int hisi_sas_dev_found(struct domain_device
*device
)
755 struct hisi_hba
*hisi_hba
= dev_to_hisi_hba(device
);
756 struct domain_device
*parent_dev
= device
->parent
;
757 struct hisi_sas_device
*sas_dev
;
758 struct device
*dev
= hisi_hba
->dev
;
761 if (hisi_hba
->hw
->alloc_dev
)
762 sas_dev
= hisi_hba
->hw
->alloc_dev(device
);
764 sas_dev
= hisi_sas_alloc_dev(device
);
766 dev_err(dev
, "fail alloc dev: max support %d devices\n",
767 HISI_SAS_MAX_DEVICES
);
771 device
->lldd_dev
= sas_dev
;
772 hisi_hba
->hw
->setup_itct(hisi_hba
, sas_dev
);
774 if (parent_dev
&& dev_is_expander(parent_dev
->dev_type
)) {
776 u8 phy_num
= parent_dev
->ex_dev
.num_phys
;
779 for (phy_no
= 0; phy_no
< phy_num
; phy_no
++) {
780 phy
= &parent_dev
->ex_dev
.ex_phy
[phy_no
];
781 if (SAS_ADDR(phy
->attached_sas_addr
) ==
782 SAS_ADDR(device
->sas_addr
))
786 if (phy_no
== phy_num
) {
787 dev_info(dev
, "dev found: no attached "
788 "dev:%016llx at ex:%016llx\n",
789 SAS_ADDR(device
->sas_addr
),
790 SAS_ADDR(parent_dev
->sas_addr
));
796 dev_info(dev
, "dev[%d:%x] found\n",
797 sas_dev
->device_id
, sas_dev
->dev_type
);
799 rc
= hisi_sas_init_device(device
);
802 sas_dev
->dev_status
= HISI_SAS_DEV_NORMAL
;
806 hisi_sas_dev_gone(device
);
810 int hisi_sas_slave_configure(struct scsi_device
*sdev
)
812 struct domain_device
*dev
= sdev_to_domain_dev(sdev
);
813 int ret
= sas_slave_configure(sdev
);
817 if (!dev_is_sata(dev
))
818 sas_change_queue_depth(sdev
, 64);
822 EXPORT_SYMBOL_GPL(hisi_sas_slave_configure
);
824 void hisi_sas_scan_start(struct Scsi_Host
*shost
)
826 struct hisi_hba
*hisi_hba
= shost_priv(shost
);
828 hisi_hba
->hw
->phys_init(hisi_hba
);
830 EXPORT_SYMBOL_GPL(hisi_sas_scan_start
);
832 int hisi_sas_scan_finished(struct Scsi_Host
*shost
, unsigned long time
)
834 struct hisi_hba
*hisi_hba
= shost_priv(shost
);
835 struct sas_ha_struct
*sha
= &hisi_hba
->sha
;
837 /* Wait for PHY up interrupt to occur */
844 EXPORT_SYMBOL_GPL(hisi_sas_scan_finished
);
846 static void hisi_sas_phyup_work(struct work_struct
*work
)
848 struct hisi_sas_phy
*phy
=
849 container_of(work
, typeof(*phy
), works
[HISI_PHYE_PHY_UP
]);
850 struct hisi_hba
*hisi_hba
= phy
->hisi_hba
;
851 struct asd_sas_phy
*sas_phy
= &phy
->sas_phy
;
852 int phy_no
= sas_phy
->id
;
854 if (phy
->identify
.target_port_protocols
== SAS_PROTOCOL_SSP
)
855 hisi_hba
->hw
->sl_notify_ssp(hisi_hba
, phy_no
);
856 hisi_sas_bytes_dmaed(hisi_hba
, phy_no
);
859 static void hisi_sas_linkreset_work(struct work_struct
*work
)
861 struct hisi_sas_phy
*phy
=
862 container_of(work
, typeof(*phy
), works
[HISI_PHYE_LINK_RESET
]);
863 struct asd_sas_phy
*sas_phy
= &phy
->sas_phy
;
865 hisi_sas_control_phy(sas_phy
, PHY_FUNC_LINK_RESET
, NULL
);
868 static const work_func_t hisi_sas_phye_fns
[HISI_PHYES_NUM
] = {
869 [HISI_PHYE_PHY_UP
] = hisi_sas_phyup_work
,
870 [HISI_PHYE_LINK_RESET
] = hisi_sas_linkreset_work
,
873 bool hisi_sas_notify_phy_event(struct hisi_sas_phy
*phy
,
874 enum hisi_sas_phy_event event
)
876 struct hisi_hba
*hisi_hba
= phy
->hisi_hba
;
878 if (WARN_ON(event
>= HISI_PHYES_NUM
))
881 return queue_work(hisi_hba
->wq
, &phy
->works
[event
]);
883 EXPORT_SYMBOL_GPL(hisi_sas_notify_phy_event
);
885 static void hisi_sas_wait_phyup_timedout(struct timer_list
*t
)
887 struct hisi_sas_phy
*phy
= from_timer(phy
, t
, timer
);
888 struct hisi_hba
*hisi_hba
= phy
->hisi_hba
;
889 struct device
*dev
= hisi_hba
->dev
;
890 int phy_no
= phy
->sas_phy
.id
;
892 dev_warn(dev
, "phy%d wait phyup timeout, issuing link reset\n", phy_no
);
893 hisi_sas_notify_phy_event(phy
, HISI_PHYE_LINK_RESET
);
896 void hisi_sas_phy_oob_ready(struct hisi_hba
*hisi_hba
, int phy_no
)
898 struct hisi_sas_phy
*phy
= &hisi_hba
->phy
[phy_no
];
899 struct device
*dev
= hisi_hba
->dev
;
901 if (!timer_pending(&phy
->timer
)) {
902 dev_dbg(dev
, "phy%d OOB ready\n", phy_no
);
903 phy
->timer
.expires
= jiffies
+ HISI_SAS_WAIT_PHYUP_TIMEOUT
* HZ
;
904 add_timer(&phy
->timer
);
907 EXPORT_SYMBOL_GPL(hisi_sas_phy_oob_ready
);
909 static void hisi_sas_phy_init(struct hisi_hba
*hisi_hba
, int phy_no
)
911 struct hisi_sas_phy
*phy
= &hisi_hba
->phy
[phy_no
];
912 struct asd_sas_phy
*sas_phy
= &phy
->sas_phy
;
915 phy
->hisi_hba
= hisi_hba
;
917 phy
->minimum_linkrate
= SAS_LINK_RATE_1_5_GBPS
;
918 phy
->maximum_linkrate
= hisi_hba
->hw
->phy_get_max_linkrate();
919 sas_phy
->enabled
= (phy_no
< hisi_hba
->n_phy
) ? 1 : 0;
920 sas_phy
->class = SAS
;
921 sas_phy
->iproto
= SAS_PROTOCOL_ALL
;
923 sas_phy
->type
= PHY_TYPE_PHYSICAL
;
924 sas_phy
->role
= PHY_ROLE_INITIATOR
;
925 sas_phy
->oob_mode
= OOB_NOT_CONNECTED
;
926 sas_phy
->linkrate
= SAS_LINK_RATE_UNKNOWN
;
927 sas_phy
->id
= phy_no
;
928 sas_phy
->sas_addr
= &hisi_hba
->sas_addr
[0];
929 sas_phy
->frame_rcvd
= &phy
->frame_rcvd
[0];
930 sas_phy
->ha
= (struct sas_ha_struct
*)hisi_hba
->shost
->hostdata
;
931 sas_phy
->lldd_phy
= phy
;
933 for (i
= 0; i
< HISI_PHYES_NUM
; i
++)
934 INIT_WORK(&phy
->works
[i
], hisi_sas_phye_fns
[i
]);
936 spin_lock_init(&phy
->lock
);
938 timer_setup(&phy
->timer
, hisi_sas_wait_phyup_timedout
, 0);
941 /* Wrapper to ensure we track hisi_sas_phy.enable properly */
942 void hisi_sas_phy_enable(struct hisi_hba
*hisi_hba
, int phy_no
, int enable
)
944 struct hisi_sas_phy
*phy
= &hisi_hba
->phy
[phy_no
];
945 struct asd_sas_phy
*aphy
= &phy
->sas_phy
;
946 struct sas_phy
*sphy
= aphy
->phy
;
949 spin_lock_irqsave(&phy
->lock
, flags
);
952 /* We may have been enabled already; if so, don't touch */
954 sphy
->negotiated_linkrate
= SAS_LINK_RATE_UNKNOWN
;
955 hisi_hba
->hw
->phy_start(hisi_hba
, phy_no
);
957 sphy
->negotiated_linkrate
= SAS_PHY_DISABLED
;
958 hisi_hba
->hw
->phy_disable(hisi_hba
, phy_no
);
960 phy
->enable
= enable
;
961 spin_unlock_irqrestore(&phy
->lock
, flags
);
963 EXPORT_SYMBOL_GPL(hisi_sas_phy_enable
);
965 static void hisi_sas_port_notify_formed(struct asd_sas_phy
*sas_phy
)
967 struct sas_ha_struct
*sas_ha
= sas_phy
->ha
;
968 struct hisi_hba
*hisi_hba
= sas_ha
->lldd_ha
;
969 struct hisi_sas_phy
*phy
= sas_phy
->lldd_phy
;
970 struct asd_sas_port
*sas_port
= sas_phy
->port
;
971 struct hisi_sas_port
*port
;
977 port
= to_hisi_sas_port(sas_port
);
978 spin_lock_irqsave(&hisi_hba
->lock
, flags
);
979 port
->port_attached
= 1;
980 port
->id
= phy
->port_id
;
982 sas_port
->lldd_port
= port
;
983 spin_unlock_irqrestore(&hisi_hba
->lock
, flags
);
986 static void hisi_sas_do_release_task(struct hisi_hba
*hisi_hba
, struct sas_task
*task
,
987 struct hisi_sas_slot
*slot
)
991 struct task_status_struct
*ts
;
993 ts
= &task
->task_status
;
995 ts
->resp
= SAS_TASK_COMPLETE
;
996 ts
->stat
= SAS_ABORTED_TASK
;
997 spin_lock_irqsave(&task
->task_state_lock
, flags
);
998 task
->task_state_flags
&=
999 ~(SAS_TASK_STATE_PENDING
| SAS_TASK_AT_INITIATOR
);
1000 if (!slot
->is_internal
&& task
->task_proto
!= SAS_PROTOCOL_SMP
)
1001 task
->task_state_flags
|= SAS_TASK_STATE_DONE
;
1002 spin_unlock_irqrestore(&task
->task_state_lock
, flags
);
1005 hisi_sas_slot_task_free(hisi_hba
, task
, slot
);
1008 static void hisi_sas_release_task(struct hisi_hba
*hisi_hba
,
1009 struct domain_device
*device
)
1011 struct hisi_sas_slot
*slot
, *slot2
;
1012 struct hisi_sas_device
*sas_dev
= device
->lldd_dev
;
1014 list_for_each_entry_safe(slot
, slot2
, &sas_dev
->list
, entry
)
1015 hisi_sas_do_release_task(hisi_hba
, slot
->task
, slot
);
1018 void hisi_sas_release_tasks(struct hisi_hba
*hisi_hba
)
1020 struct hisi_sas_device
*sas_dev
;
1021 struct domain_device
*device
;
1024 for (i
= 0; i
< HISI_SAS_MAX_DEVICES
; i
++) {
1025 sas_dev
= &hisi_hba
->devices
[i
];
1026 device
= sas_dev
->sas_device
;
1028 if ((sas_dev
->dev_type
== SAS_PHY_UNUSED
) ||
1032 hisi_sas_release_task(hisi_hba
, device
);
1035 EXPORT_SYMBOL_GPL(hisi_sas_release_tasks
);
1037 static void hisi_sas_dereg_device(struct hisi_hba
*hisi_hba
,
1038 struct domain_device
*device
)
1040 if (hisi_hba
->hw
->dereg_device
)
1041 hisi_hba
->hw
->dereg_device(hisi_hba
, device
);
1044 static void hisi_sas_dev_gone(struct domain_device
*device
)
1046 struct hisi_sas_device
*sas_dev
= device
->lldd_dev
;
1047 struct hisi_hba
*hisi_hba
= dev_to_hisi_hba(device
);
1048 struct device
*dev
= hisi_hba
->dev
;
1051 dev_info(dev
, "dev[%d:%x] is gone\n",
1052 sas_dev
->device_id
, sas_dev
->dev_type
);
1054 down(&hisi_hba
->sem
);
1055 if (!test_bit(HISI_SAS_RESET_BIT
, &hisi_hba
->flags
)) {
1056 hisi_sas_internal_task_abort(hisi_hba
, device
,
1057 HISI_SAS_INT_ABT_DEV
, 0);
1059 hisi_sas_dereg_device(hisi_hba
, device
);
1061 ret
= hisi_hba
->hw
->clear_itct(hisi_hba
, sas_dev
);
1062 device
->lldd_dev
= NULL
;
1065 if (hisi_hba
->hw
->free_device
)
1066 hisi_hba
->hw
->free_device(sas_dev
);
1068 /* Don't mark it as SAS_PHY_UNUSED if failed to clear ITCT */
1070 sas_dev
->dev_type
= SAS_PHY_UNUSED
;
1071 sas_dev
->sas_device
= NULL
;
1075 static int hisi_sas_queue_command(struct sas_task
*task
, gfp_t gfp_flags
)
1077 return hisi_sas_task_exec(task
, gfp_flags
, 0, NULL
);
1080 static int hisi_sas_phy_set_linkrate(struct hisi_hba
*hisi_hba
, int phy_no
,
1081 struct sas_phy_linkrates
*r
)
1083 struct sas_phy_linkrates _r
;
1085 struct hisi_sas_phy
*phy
= &hisi_hba
->phy
[phy_no
];
1086 struct asd_sas_phy
*sas_phy
= &phy
->sas_phy
;
1087 enum sas_linkrate min
, max
;
1089 if (r
->minimum_linkrate
> SAS_LINK_RATE_1_5_GBPS
)
1092 if (r
->maximum_linkrate
== SAS_LINK_RATE_UNKNOWN
) {
1093 max
= sas_phy
->phy
->maximum_linkrate
;
1094 min
= r
->minimum_linkrate
;
1095 } else if (r
->minimum_linkrate
== SAS_LINK_RATE_UNKNOWN
) {
1096 max
= r
->maximum_linkrate
;
1097 min
= sas_phy
->phy
->minimum_linkrate
;
1101 _r
.maximum_linkrate
= max
;
1102 _r
.minimum_linkrate
= min
;
1104 sas_phy
->phy
->maximum_linkrate
= max
;
1105 sas_phy
->phy
->minimum_linkrate
= min
;
1107 hisi_sas_phy_enable(hisi_hba
, phy_no
, 0);
1109 hisi_hba
->hw
->phy_set_linkrate(hisi_hba
, phy_no
, &_r
);
1110 hisi_sas_phy_enable(hisi_hba
, phy_no
, 1);
1115 static int hisi_sas_control_phy(struct asd_sas_phy
*sas_phy
, enum phy_func func
,
1118 struct sas_ha_struct
*sas_ha
= sas_phy
->ha
;
1119 struct hisi_hba
*hisi_hba
= sas_ha
->lldd_ha
;
1120 int phy_no
= sas_phy
->id
;
1123 case PHY_FUNC_HARD_RESET
:
1124 hisi_hba
->hw
->phy_hard_reset(hisi_hba
, phy_no
);
1127 case PHY_FUNC_LINK_RESET
:
1128 hisi_sas_phy_enable(hisi_hba
, phy_no
, 0);
1130 hisi_sas_phy_enable(hisi_hba
, phy_no
, 1);
1133 case PHY_FUNC_DISABLE
:
1134 hisi_sas_phy_enable(hisi_hba
, phy_no
, 0);
1137 case PHY_FUNC_SET_LINK_RATE
:
1138 return hisi_sas_phy_set_linkrate(hisi_hba
, phy_no
, funcdata
);
1139 case PHY_FUNC_GET_EVENTS
:
1140 if (hisi_hba
->hw
->get_events
) {
1141 hisi_hba
->hw
->get_events(hisi_hba
, phy_no
);
1145 case PHY_FUNC_RELEASE_SPINUP_HOLD
:
1152 static void hisi_sas_task_done(struct sas_task
*task
)
1154 del_timer(&task
->slow_task
->timer
);
1155 complete(&task
->slow_task
->completion
);
1158 static void hisi_sas_tmf_timedout(struct timer_list
*t
)
1160 struct sas_task_slow
*slow
= from_timer(slow
, t
, timer
);
1161 struct sas_task
*task
= slow
->task
;
1162 unsigned long flags
;
1163 bool is_completed
= true;
1165 spin_lock_irqsave(&task
->task_state_lock
, flags
);
1166 if (!(task
->task_state_flags
& SAS_TASK_STATE_DONE
)) {
1167 task
->task_state_flags
|= SAS_TASK_STATE_ABORTED
;
1168 is_completed
= false;
1170 spin_unlock_irqrestore(&task
->task_state_lock
, flags
);
1173 complete(&task
->slow_task
->completion
);
1176 #define TASK_TIMEOUT 20
1177 #define TASK_RETRY 3
1178 #define INTERNAL_ABORT_TIMEOUT 6
1179 static int hisi_sas_exec_internal_tmf_task(struct domain_device
*device
,
1180 void *parameter
, u32 para_len
,
1181 struct hisi_sas_tmf_task
*tmf
)
1183 struct hisi_sas_device
*sas_dev
= device
->lldd_dev
;
1184 struct hisi_hba
*hisi_hba
= sas_dev
->hisi_hba
;
1185 struct device
*dev
= hisi_hba
->dev
;
1186 struct sas_task
*task
;
1189 for (retry
= 0; retry
< TASK_RETRY
; retry
++) {
1190 task
= sas_alloc_slow_task(GFP_KERNEL
);
1195 task
->task_proto
= device
->tproto
;
1197 if (dev_is_sata(device
)) {
1198 task
->ata_task
.device_control_reg_update
= 1;
1199 memcpy(&task
->ata_task
.fis
, parameter
, para_len
);
1201 memcpy(&task
->ssp_task
, parameter
, para_len
);
1203 task
->task_done
= hisi_sas_task_done
;
1205 task
->slow_task
->timer
.function
= hisi_sas_tmf_timedout
;
1206 task
->slow_task
->timer
.expires
= jiffies
+ TASK_TIMEOUT
* HZ
;
1207 add_timer(&task
->slow_task
->timer
);
1209 res
= hisi_sas_task_exec(task
, GFP_KERNEL
, 1, tmf
);
1212 del_timer(&task
->slow_task
->timer
);
1213 dev_err(dev
, "abort tmf: executing internal task failed: %d\n",
1218 wait_for_completion(&task
->slow_task
->completion
);
1219 res
= TMF_RESP_FUNC_FAILED
;
1220 /* Even TMF timed out, return direct. */
1221 if ((task
->task_state_flags
& SAS_TASK_STATE_ABORTED
)) {
1222 if (!(task
->task_state_flags
& SAS_TASK_STATE_DONE
)) {
1223 struct hisi_sas_slot
*slot
= task
->lldd_task
;
1225 dev_err(dev
, "abort tmf: TMF task timeout and not done\n");
1227 struct hisi_sas_cq
*cq
=
1228 &hisi_hba
->cq
[slot
->dlvry_queue
];
1230 * sync irq to avoid free'ing task
1231 * before using task in IO completion
1233 synchronize_irq(cq
->irq_no
);
1239 dev_err(dev
, "abort tmf: TMF task timeout\n");
1242 if (task
->task_status
.resp
== SAS_TASK_COMPLETE
&&
1243 task
->task_status
.stat
== TMF_RESP_FUNC_COMPLETE
) {
1244 res
= TMF_RESP_FUNC_COMPLETE
;
1248 if (task
->task_status
.resp
== SAS_TASK_COMPLETE
&&
1249 task
->task_status
.stat
== TMF_RESP_FUNC_SUCC
) {
1250 res
= TMF_RESP_FUNC_SUCC
;
1254 if (task
->task_status
.resp
== SAS_TASK_COMPLETE
&&
1255 task
->task_status
.stat
== SAS_DATA_UNDERRUN
) {
1256 /* no error, but return the number of bytes of
1259 dev_warn(dev
, "abort tmf: task to dev %016llx resp: 0x%x sts 0x%x underrun\n",
1260 SAS_ADDR(device
->sas_addr
),
1261 task
->task_status
.resp
,
1262 task
->task_status
.stat
);
1263 res
= task
->task_status
.residual
;
1267 if (task
->task_status
.resp
== SAS_TASK_COMPLETE
&&
1268 task
->task_status
.stat
== SAS_DATA_OVERRUN
) {
1269 dev_warn(dev
, "abort tmf: blocked task error\n");
1274 if (task
->task_status
.resp
== SAS_TASK_COMPLETE
&&
1275 task
->task_status
.stat
== SAS_OPEN_REJECT
) {
1276 dev_warn(dev
, "abort tmf: open reject failed\n");
1279 dev_warn(dev
, "abort tmf: task to dev %016llx resp: 0x%x status 0x%x\n",
1280 SAS_ADDR(device
->sas_addr
),
1281 task
->task_status
.resp
,
1282 task
->task_status
.stat
);
1284 sas_free_task(task
);
1288 if (retry
== TASK_RETRY
)
1289 dev_warn(dev
, "abort tmf: executing internal task failed!\n");
1290 sas_free_task(task
);
1294 static void hisi_sas_fill_ata_reset_cmd(struct ata_device
*dev
,
1295 bool reset
, int pmp
, u8
*fis
)
1297 struct ata_taskfile tf
;
1299 ata_tf_init(dev
, &tf
);
1303 tf
.ctl
&= ~ATA_SRST
;
1304 tf
.command
= ATA_CMD_DEV_RESET
;
1305 ata_tf_to_fis(&tf
, pmp
, 0, fis
);
1308 static int hisi_sas_softreset_ata_disk(struct domain_device
*device
)
1311 struct ata_port
*ap
= device
->sata_dev
.ap
;
1312 struct ata_link
*link
;
1313 int rc
= TMF_RESP_FUNC_FAILED
;
1314 struct hisi_hba
*hisi_hba
= dev_to_hisi_hba(device
);
1315 struct device
*dev
= hisi_hba
->dev
;
1316 int s
= sizeof(struct host_to_dev_fis
);
1318 ata_for_each_link(link
, ap
, EDGE
) {
1319 int pmp
= sata_srst_pmp(link
);
1321 hisi_sas_fill_ata_reset_cmd(link
->device
, 1, pmp
, fis
);
1322 rc
= hisi_sas_exec_internal_tmf_task(device
, fis
, s
, NULL
);
1323 if (rc
!= TMF_RESP_FUNC_COMPLETE
)
1327 if (rc
== TMF_RESP_FUNC_COMPLETE
) {
1328 ata_for_each_link(link
, ap
, EDGE
) {
1329 int pmp
= sata_srst_pmp(link
);
1331 hisi_sas_fill_ata_reset_cmd(link
->device
, 0, pmp
, fis
);
1332 rc
= hisi_sas_exec_internal_tmf_task(device
, fis
,
1334 if (rc
!= TMF_RESP_FUNC_COMPLETE
)
1335 dev_err(dev
, "ata disk de-reset failed\n");
1338 dev_err(dev
, "ata disk reset failed\n");
1341 if (rc
== TMF_RESP_FUNC_COMPLETE
)
1342 hisi_sas_release_task(hisi_hba
, device
);
1347 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device
*device
,
1348 u8
*lun
, struct hisi_sas_tmf_task
*tmf
)
1350 struct sas_ssp_task ssp_task
;
1352 if (!(device
->tproto
& SAS_PROTOCOL_SSP
))
1353 return TMF_RESP_FUNC_ESUPP
;
1355 memcpy(ssp_task
.LUN
, lun
, 8);
1357 return hisi_sas_exec_internal_tmf_task(device
, &ssp_task
,
1358 sizeof(ssp_task
), tmf
);
1361 static void hisi_sas_refresh_port_id(struct hisi_hba
*hisi_hba
)
1363 u32 state
= hisi_hba
->hw
->get_phys_state(hisi_hba
);
1366 for (i
= 0; i
< HISI_SAS_MAX_DEVICES
; i
++) {
1367 struct hisi_sas_device
*sas_dev
= &hisi_hba
->devices
[i
];
1368 struct domain_device
*device
= sas_dev
->sas_device
;
1369 struct asd_sas_port
*sas_port
;
1370 struct hisi_sas_port
*port
;
1371 struct hisi_sas_phy
*phy
= NULL
;
1372 struct asd_sas_phy
*sas_phy
;
1374 if ((sas_dev
->dev_type
== SAS_PHY_UNUSED
)
1375 || !device
|| !device
->port
)
1378 sas_port
= device
->port
;
1379 port
= to_hisi_sas_port(sas_port
);
1381 list_for_each_entry(sas_phy
, &sas_port
->phy_list
, port_phy_el
)
1382 if (state
& BIT(sas_phy
->id
)) {
1383 phy
= sas_phy
->lldd_phy
;
1388 port
->id
= phy
->port_id
;
1390 /* Update linkrate of directly attached device. */
1391 if (!device
->parent
)
1392 device
->linkrate
= phy
->sas_phy
.linkrate
;
1394 hisi_hba
->hw
->setup_itct(hisi_hba
, sas_dev
);
1400 static void hisi_sas_rescan_topology(struct hisi_hba
*hisi_hba
, u32 state
)
1402 struct sas_ha_struct
*sas_ha
= &hisi_hba
->sha
;
1403 struct asd_sas_port
*_sas_port
= NULL
;
1406 for (phy_no
= 0; phy_no
< hisi_hba
->n_phy
; phy_no
++) {
1407 struct hisi_sas_phy
*phy
= &hisi_hba
->phy
[phy_no
];
1408 struct asd_sas_phy
*sas_phy
= &phy
->sas_phy
;
1409 struct asd_sas_port
*sas_port
= sas_phy
->port
;
1410 bool do_port_check
= _sas_port
!= sas_port
;
1412 if (!sas_phy
->phy
->enabled
)
1415 /* Report PHY state change to libsas */
1416 if (state
& BIT(phy_no
)) {
1417 if (do_port_check
&& sas_port
&& sas_port
->port_dev
) {
1418 struct domain_device
*dev
= sas_port
->port_dev
;
1420 _sas_port
= sas_port
;
1422 if (dev_is_expander(dev
->dev_type
))
1423 sas_ha
->notify_port_event(sas_phy
,
1424 PORTE_BROADCAST_RCVD
);
1427 hisi_sas_phy_down(hisi_hba
, phy_no
, 0);
1433 static void hisi_sas_reset_init_all_devices(struct hisi_hba
*hisi_hba
)
1435 struct hisi_sas_device
*sas_dev
;
1436 struct domain_device
*device
;
1439 for (i
= 0; i
< HISI_SAS_MAX_DEVICES
; i
++) {
1440 sas_dev
= &hisi_hba
->devices
[i
];
1441 device
= sas_dev
->sas_device
;
1443 if ((sas_dev
->dev_type
== SAS_PHY_UNUSED
) || !device
)
1446 hisi_sas_init_device(device
);
1450 static void hisi_sas_send_ata_reset_each_phy(struct hisi_hba
*hisi_hba
,
1451 struct asd_sas_port
*sas_port
,
1452 struct domain_device
*device
)
1454 struct hisi_sas_tmf_task tmf_task
= { .force_phy
= 1 };
1455 struct ata_port
*ap
= device
->sata_dev
.ap
;
1456 struct device
*dev
= hisi_hba
->dev
;
1457 int s
= sizeof(struct host_to_dev_fis
);
1458 int rc
= TMF_RESP_FUNC_FAILED
;
1459 struct asd_sas_phy
*sas_phy
;
1460 struct ata_link
*link
;
1464 state
= hisi_hba
->hw
->get_phys_state(hisi_hba
);
1465 list_for_each_entry(sas_phy
, &sas_port
->phy_list
, port_phy_el
) {
1466 if (!(state
& BIT(sas_phy
->id
)))
1469 ata_for_each_link(link
, ap
, EDGE
) {
1470 int pmp
= sata_srst_pmp(link
);
1472 tmf_task
.phy_id
= sas_phy
->id
;
1473 hisi_sas_fill_ata_reset_cmd(link
->device
, 1, pmp
, fis
);
1474 rc
= hisi_sas_exec_internal_tmf_task(device
, fis
, s
,
1476 if (rc
!= TMF_RESP_FUNC_COMPLETE
) {
1477 dev_err(dev
, "phy%d ata reset failed rc=%d\n",
1485 static void hisi_sas_terminate_stp_reject(struct hisi_hba
*hisi_hba
)
1487 struct device
*dev
= hisi_hba
->dev
;
1490 for (i
= 0; i
< HISI_SAS_MAX_DEVICES
; i
++) {
1491 struct hisi_sas_device
*sas_dev
= &hisi_hba
->devices
[i
];
1492 struct domain_device
*device
= sas_dev
->sas_device
;
1494 if ((sas_dev
->dev_type
== SAS_PHY_UNUSED
) || !device
)
1497 rc
= hisi_sas_internal_task_abort(hisi_hba
, device
,
1498 HISI_SAS_INT_ABT_DEV
, 0);
1500 dev_err(dev
, "STP reject: abort dev failed %d\n", rc
);
1503 for (port_no
= 0; port_no
< hisi_hba
->n_phy
; port_no
++) {
1504 struct hisi_sas_port
*port
= &hisi_hba
->port
[port_no
];
1505 struct asd_sas_port
*sas_port
= &port
->sas_port
;
1506 struct domain_device
*port_dev
= sas_port
->port_dev
;
1507 struct domain_device
*device
;
1509 if (!port_dev
|| !dev_is_expander(port_dev
->dev_type
))
1512 /* Try to find a SATA device */
1513 list_for_each_entry(device
, &sas_port
->dev_list
,
1515 if (dev_is_sata(device
)) {
1516 hisi_sas_send_ata_reset_each_phy(hisi_hba
,
1525 void hisi_sas_controller_reset_prepare(struct hisi_hba
*hisi_hba
)
1527 struct Scsi_Host
*shost
= hisi_hba
->shost
;
1529 down(&hisi_hba
->sem
);
1530 hisi_hba
->phy_state
= hisi_hba
->hw
->get_phys_state(hisi_hba
);
1532 scsi_block_requests(shost
);
1533 hisi_hba
->hw
->wait_cmds_complete_timeout(hisi_hba
, 100, 5000);
1535 if (timer_pending(&hisi_hba
->timer
))
1536 del_timer_sync(&hisi_hba
->timer
);
1538 set_bit(HISI_SAS_REJECT_CMD_BIT
, &hisi_hba
->flags
);
1540 EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_prepare
);
1542 void hisi_sas_controller_reset_done(struct hisi_hba
*hisi_hba
)
1544 struct Scsi_Host
*shost
= hisi_hba
->shost
;
1547 /* Init and wait for PHYs to come up and all libsas event finished. */
1548 hisi_hba
->hw
->phys_init(hisi_hba
);
1550 hisi_sas_refresh_port_id(hisi_hba
);
1551 clear_bit(HISI_SAS_REJECT_CMD_BIT
, &hisi_hba
->flags
);
1553 if (hisi_hba
->reject_stp_links_msk
)
1554 hisi_sas_terminate_stp_reject(hisi_hba
);
1555 hisi_sas_reset_init_all_devices(hisi_hba
);
1557 scsi_unblock_requests(shost
);
1558 clear_bit(HISI_SAS_RESET_BIT
, &hisi_hba
->flags
);
1560 state
= hisi_hba
->hw
->get_phys_state(hisi_hba
);
1561 hisi_sas_rescan_topology(hisi_hba
, state
);
1563 EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_done
);
1565 static int hisi_sas_controller_reset(struct hisi_hba
*hisi_hba
)
1567 struct device
*dev
= hisi_hba
->dev
;
1568 struct Scsi_Host
*shost
= hisi_hba
->shost
;
1571 if (hisi_sas_debugfs_enable
&& hisi_hba
->debugfs_itct
[0].itct
)
1572 queue_work(hisi_hba
->wq
, &hisi_hba
->debugfs_work
);
1574 if (!hisi_hba
->hw
->soft_reset
)
1577 if (test_and_set_bit(HISI_SAS_RESET_BIT
, &hisi_hba
->flags
))
1580 dev_info(dev
, "controller resetting...\n");
1581 hisi_sas_controller_reset_prepare(hisi_hba
);
1583 rc
= hisi_hba
->hw
->soft_reset(hisi_hba
);
1585 dev_warn(dev
, "controller reset failed (%d)\n", rc
);
1586 clear_bit(HISI_SAS_REJECT_CMD_BIT
, &hisi_hba
->flags
);
1588 scsi_unblock_requests(shost
);
1589 clear_bit(HISI_SAS_RESET_BIT
, &hisi_hba
->flags
);
1593 hisi_sas_controller_reset_done(hisi_hba
);
1594 dev_info(dev
, "controller reset complete\n");
1599 static int hisi_sas_abort_task(struct sas_task
*task
)
1601 struct scsi_lun lun
;
1602 struct hisi_sas_tmf_task tmf_task
;
1603 struct domain_device
*device
= task
->dev
;
1604 struct hisi_sas_device
*sas_dev
= device
->lldd_dev
;
1605 struct hisi_hba
*hisi_hba
;
1607 int rc
= TMF_RESP_FUNC_FAILED
;
1608 unsigned long flags
;
1611 return TMF_RESP_FUNC_FAILED
;
1613 hisi_hba
= dev_to_hisi_hba(task
->dev
);
1614 dev
= hisi_hba
->dev
;
1616 spin_lock_irqsave(&task
->task_state_lock
, flags
);
1617 if (task
->task_state_flags
& SAS_TASK_STATE_DONE
) {
1618 struct hisi_sas_slot
*slot
= task
->lldd_task
;
1619 struct hisi_sas_cq
*cq
;
1623 * sync irq to avoid free'ing task
1624 * before using task in IO completion
1626 cq
= &hisi_hba
->cq
[slot
->dlvry_queue
];
1627 synchronize_irq(cq
->irq_no
);
1629 spin_unlock_irqrestore(&task
->task_state_lock
, flags
);
1630 rc
= TMF_RESP_FUNC_COMPLETE
;
1633 task
->task_state_flags
|= SAS_TASK_STATE_ABORTED
;
1634 spin_unlock_irqrestore(&task
->task_state_lock
, flags
);
1636 if (task
->lldd_task
&& task
->task_proto
& SAS_PROTOCOL_SSP
) {
1637 struct scsi_cmnd
*cmnd
= task
->uldd_task
;
1638 struct hisi_sas_slot
*slot
= task
->lldd_task
;
1639 u16 tag
= slot
->idx
;
1642 int_to_scsilun(cmnd
->device
->lun
, &lun
);
1643 tmf_task
.tmf
= TMF_ABORT_TASK
;
1644 tmf_task
.tag_of_task_to_be_managed
= tag
;
1646 rc
= hisi_sas_debug_issue_ssp_tmf(task
->dev
, lun
.scsi_lun
,
1649 rc2
= hisi_sas_internal_task_abort(hisi_hba
, device
,
1650 HISI_SAS_INT_ABT_CMD
, tag
);
1652 dev_err(dev
, "abort task: internal abort (%d)\n", rc2
);
1653 return TMF_RESP_FUNC_FAILED
;
1657 * If the TMF finds that the IO is not in the device and also
1658 * the internal abort does not succeed, then it is safe to
1660 * Note: if the internal abort succeeds then the slot
1661 * will have already been completed
1663 if (rc
== TMF_RESP_FUNC_COMPLETE
&& rc2
!= TMF_RESP_FUNC_SUCC
) {
1664 if (task
->lldd_task
)
1665 hisi_sas_do_release_task(hisi_hba
, task
, slot
);
1667 } else if (task
->task_proto
& SAS_PROTOCOL_SATA
||
1668 task
->task_proto
& SAS_PROTOCOL_STP
) {
1669 if (task
->dev
->dev_type
== SAS_SATA_DEV
) {
1670 rc
= hisi_sas_internal_task_abort(hisi_hba
, device
,
1671 HISI_SAS_INT_ABT_DEV
,
1674 dev_err(dev
, "abort task: internal abort failed\n");
1677 hisi_sas_dereg_device(hisi_hba
, device
);
1678 rc
= hisi_sas_softreset_ata_disk(device
);
1680 } else if (task
->lldd_task
&& task
->task_proto
& SAS_PROTOCOL_SMP
) {
1682 struct hisi_sas_slot
*slot
= task
->lldd_task
;
1683 u32 tag
= slot
->idx
;
1684 struct hisi_sas_cq
*cq
= &hisi_hba
->cq
[slot
->dlvry_queue
];
1686 rc
= hisi_sas_internal_task_abort(hisi_hba
, device
,
1687 HISI_SAS_INT_ABT_CMD
, tag
);
1688 if (((rc
< 0) || (rc
== TMF_RESP_FUNC_FAILED
)) &&
1691 * sync irq to avoid free'ing task
1692 * before using task in IO completion
1694 synchronize_irq(cq
->irq_no
);
1700 if (rc
!= TMF_RESP_FUNC_COMPLETE
)
1701 dev_notice(dev
, "abort task: rc=%d\n", rc
);
1705 static int hisi_sas_abort_task_set(struct domain_device
*device
, u8
*lun
)
1707 struct hisi_hba
*hisi_hba
= dev_to_hisi_hba(device
);
1708 struct device
*dev
= hisi_hba
->dev
;
1709 struct hisi_sas_tmf_task tmf_task
;
1712 rc
= hisi_sas_internal_task_abort(hisi_hba
, device
,
1713 HISI_SAS_INT_ABT_DEV
, 0);
1715 dev_err(dev
, "abort task set: internal abort rc=%d\n", rc
);
1716 return TMF_RESP_FUNC_FAILED
;
1718 hisi_sas_dereg_device(hisi_hba
, device
);
1720 tmf_task
.tmf
= TMF_ABORT_TASK_SET
;
1721 rc
= hisi_sas_debug_issue_ssp_tmf(device
, lun
, &tmf_task
);
1723 if (rc
== TMF_RESP_FUNC_COMPLETE
)
1724 hisi_sas_release_task(hisi_hba
, device
);
1729 static int hisi_sas_clear_aca(struct domain_device
*device
, u8
*lun
)
1731 struct hisi_sas_tmf_task tmf_task
;
1734 tmf_task
.tmf
= TMF_CLEAR_ACA
;
1735 rc
= hisi_sas_debug_issue_ssp_tmf(device
, lun
, &tmf_task
);
1740 static int hisi_sas_debug_I_T_nexus_reset(struct domain_device
*device
)
1742 struct sas_phy
*local_phy
= sas_get_local_phy(device
);
1743 struct hisi_sas_device
*sas_dev
= device
->lldd_dev
;
1744 struct hisi_hba
*hisi_hba
= dev_to_hisi_hba(device
);
1745 struct sas_ha_struct
*sas_ha
= &hisi_hba
->sha
;
1746 DECLARE_COMPLETION_ONSTACK(phyreset
);
1749 if (!local_phy
->enabled
) {
1750 sas_put_local_phy(local_phy
);
1754 if (scsi_is_sas_phy_local(local_phy
)) {
1755 struct asd_sas_phy
*sas_phy
=
1756 sas_ha
->sas_phy
[local_phy
->number
];
1757 struct hisi_sas_phy
*phy
=
1758 container_of(sas_phy
, struct hisi_sas_phy
, sas_phy
);
1760 phy
->reset_completion
= &phyreset
;
1763 reset_type
= (sas_dev
->dev_status
== HISI_SAS_DEV_INIT
||
1764 !dev_is_sata(device
)) ? true : false;
1766 rc
= sas_phy_reset(local_phy
, reset_type
);
1767 sas_put_local_phy(local_phy
);
1769 if (scsi_is_sas_phy_local(local_phy
)) {
1770 struct asd_sas_phy
*sas_phy
=
1771 sas_ha
->sas_phy
[local_phy
->number
];
1772 struct hisi_sas_phy
*phy
=
1773 container_of(sas_phy
, struct hisi_sas_phy
, sas_phy
);
1774 int ret
= wait_for_completion_timeout(&phyreset
, 2 * HZ
);
1775 unsigned long flags
;
1777 spin_lock_irqsave(&phy
->lock
, flags
);
1778 phy
->reset_completion
= NULL
;
1780 spin_unlock_irqrestore(&phy
->lock
, flags
);
1782 /* report PHY down if timed out */
1784 hisi_sas_phy_down(hisi_hba
, sas_phy
->id
, 0);
1785 } else if (sas_dev
->dev_status
!= HISI_SAS_DEV_INIT
) {
1787 * If in init state, we rely on caller to wait for link to be
1788 * ready; otherwise, except phy reset is fail, delay.
1797 static int hisi_sas_I_T_nexus_reset(struct domain_device
*device
)
1799 struct hisi_hba
*hisi_hba
= dev_to_hisi_hba(device
);
1800 struct device
*dev
= hisi_hba
->dev
;
1803 rc
= hisi_sas_internal_task_abort(hisi_hba
, device
,
1804 HISI_SAS_INT_ABT_DEV
, 0);
1806 dev_err(dev
, "I_T nexus reset: internal abort (%d)\n", rc
);
1807 return TMF_RESP_FUNC_FAILED
;
1809 hisi_sas_dereg_device(hisi_hba
, device
);
1811 if (dev_is_sata(device
)) {
1812 rc
= hisi_sas_softreset_ata_disk(device
);
1813 if (rc
== TMF_RESP_FUNC_FAILED
)
1814 return TMF_RESP_FUNC_FAILED
;
1817 rc
= hisi_sas_debug_I_T_nexus_reset(device
);
1819 if ((rc
== TMF_RESP_FUNC_COMPLETE
) || (rc
== -ENODEV
))
1820 hisi_sas_release_task(hisi_hba
, device
);
1825 static int hisi_sas_lu_reset(struct domain_device
*device
, u8
*lun
)
1827 struct hisi_sas_device
*sas_dev
= device
->lldd_dev
;
1828 struct hisi_hba
*hisi_hba
= dev_to_hisi_hba(device
);
1829 struct device
*dev
= hisi_hba
->dev
;
1830 int rc
= TMF_RESP_FUNC_FAILED
;
1832 /* Clear internal IO and then lu reset */
1833 rc
= hisi_sas_internal_task_abort(hisi_hba
, device
,
1834 HISI_SAS_INT_ABT_DEV
, 0);
1836 dev_err(dev
, "lu_reset: internal abort failed\n");
1839 hisi_sas_dereg_device(hisi_hba
, device
);
1841 if (dev_is_sata(device
)) {
1842 struct sas_phy
*phy
;
1844 phy
= sas_get_local_phy(device
);
1846 rc
= sas_phy_reset(phy
, true);
1849 hisi_sas_release_task(hisi_hba
, device
);
1850 sas_put_local_phy(phy
);
1852 struct hisi_sas_tmf_task tmf_task
= { .tmf
= TMF_LU_RESET
};
1854 rc
= hisi_sas_debug_issue_ssp_tmf(device
, lun
, &tmf_task
);
1855 if (rc
== TMF_RESP_FUNC_COMPLETE
)
1856 hisi_sas_release_task(hisi_hba
, device
);
1859 if (rc
!= TMF_RESP_FUNC_COMPLETE
)
1860 dev_err(dev
, "lu_reset: for device[%d]:rc= %d\n",
1861 sas_dev
->device_id
, rc
);
1865 static int hisi_sas_clear_nexus_ha(struct sas_ha_struct
*sas_ha
)
1867 struct hisi_hba
*hisi_hba
= sas_ha
->lldd_ha
;
1868 struct device
*dev
= hisi_hba
->dev
;
1869 HISI_SAS_DECLARE_RST_WORK_ON_STACK(r
);
1872 queue_work(hisi_hba
->wq
, &r
.work
);
1873 wait_for_completion(r
.completion
);
1875 return TMF_RESP_FUNC_FAILED
;
1877 for (i
= 0; i
< HISI_SAS_MAX_DEVICES
; i
++) {
1878 struct hisi_sas_device
*sas_dev
= &hisi_hba
->devices
[i
];
1879 struct domain_device
*device
= sas_dev
->sas_device
;
1881 if ((sas_dev
->dev_type
== SAS_PHY_UNUSED
) || !device
||
1882 dev_is_expander(device
->dev_type
))
1885 rc
= hisi_sas_debug_I_T_nexus_reset(device
);
1886 if (rc
!= TMF_RESP_FUNC_COMPLETE
)
1887 dev_info(dev
, "clear nexus ha: for device[%d] rc=%d\n",
1888 sas_dev
->device_id
, rc
);
1891 hisi_sas_release_tasks(hisi_hba
);
1893 return TMF_RESP_FUNC_COMPLETE
;
1896 static int hisi_sas_query_task(struct sas_task
*task
)
1898 struct scsi_lun lun
;
1899 struct hisi_sas_tmf_task tmf_task
;
1900 int rc
= TMF_RESP_FUNC_FAILED
;
1902 if (task
->lldd_task
&& task
->task_proto
& SAS_PROTOCOL_SSP
) {
1903 struct scsi_cmnd
*cmnd
= task
->uldd_task
;
1904 struct domain_device
*device
= task
->dev
;
1905 struct hisi_sas_slot
*slot
= task
->lldd_task
;
1906 u32 tag
= slot
->idx
;
1908 int_to_scsilun(cmnd
->device
->lun
, &lun
);
1909 tmf_task
.tmf
= TMF_QUERY_TASK
;
1910 tmf_task
.tag_of_task_to_be_managed
= tag
;
1912 rc
= hisi_sas_debug_issue_ssp_tmf(device
,
1916 /* The task is still in Lun, release it then */
1917 case TMF_RESP_FUNC_SUCC
:
1918 /* The task is not in Lun or failed, reset the phy */
1919 case TMF_RESP_FUNC_FAILED
:
1920 case TMF_RESP_FUNC_COMPLETE
:
1923 rc
= TMF_RESP_FUNC_FAILED
;
1931 hisi_sas_internal_abort_task_exec(struct hisi_hba
*hisi_hba
, int device_id
,
1932 struct sas_task
*task
, int abort_flag
,
1933 int task_tag
, struct hisi_sas_dq
*dq
)
1935 struct domain_device
*device
= task
->dev
;
1936 struct hisi_sas_device
*sas_dev
= device
->lldd_dev
;
1937 struct device
*dev
= hisi_hba
->dev
;
1938 struct hisi_sas_port
*port
;
1939 struct hisi_sas_slot
*slot
;
1940 struct asd_sas_port
*sas_port
= device
->port
;
1941 struct hisi_sas_cmd_hdr
*cmd_hdr_base
;
1942 int dlvry_queue_slot
, dlvry_queue
, n_elem
= 0, rc
, slot_idx
;
1943 unsigned long flags
;
1946 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT
, &hisi_hba
->flags
)))
1952 port
= to_hisi_sas_port(sas_port
);
1954 /* simply get a slot and send abort command */
1955 rc
= hisi_sas_slot_index_alloc(hisi_hba
, NULL
);
1960 slot
= &hisi_hba
->slot_info
[slot_idx
];
1962 spin_lock(&dq
->lock
);
1963 wr_q_index
= dq
->wr_point
;
1964 dq
->wr_point
= (dq
->wr_point
+ 1) % HISI_SAS_QUEUE_SLOTS
;
1965 list_add_tail(&slot
->delivery
, &dq
->list
);
1966 spin_unlock(&dq
->lock
);
1967 spin_lock(&sas_dev
->lock
);
1968 list_add_tail(&slot
->entry
, &sas_dev
->list
);
1969 spin_unlock(&sas_dev
->lock
);
1971 dlvry_queue
= dq
->id
;
1972 dlvry_queue_slot
= wr_q_index
;
1974 slot
->device_id
= sas_dev
->device_id
;
1975 slot
->n_elem
= n_elem
;
1976 slot
->dlvry_queue
= dlvry_queue
;
1977 slot
->dlvry_queue_slot
= dlvry_queue_slot
;
1978 cmd_hdr_base
= hisi_hba
->cmd_hdr
[dlvry_queue
];
1979 slot
->cmd_hdr
= &cmd_hdr_base
[dlvry_queue_slot
];
1982 slot
->is_internal
= true;
1983 task
->lldd_task
= slot
;
1985 memset(slot
->cmd_hdr
, 0, sizeof(struct hisi_sas_cmd_hdr
));
1986 memset(hisi_sas_cmd_hdr_addr_mem(slot
), 0, HISI_SAS_COMMAND_TABLE_SZ
);
1987 memset(hisi_sas_status_buf_addr_mem(slot
), 0,
1988 sizeof(struct hisi_sas_err_record
));
1990 hisi_sas_task_prep_abort(hisi_hba
, slot
, device_id
,
1991 abort_flag
, task_tag
);
1993 spin_lock_irqsave(&task
->task_state_lock
, flags
);
1994 task
->task_state_flags
|= SAS_TASK_AT_INITIATOR
;
1995 spin_unlock_irqrestore(&task
->task_state_lock
, flags
);
1996 WRITE_ONCE(slot
->ready
, 1);
1997 /* send abort command to the chip */
1998 spin_lock(&dq
->lock
);
1999 hisi_hba
->hw
->start_delivery(dq
);
2000 spin_unlock(&dq
->lock
);
2005 dev_err(dev
, "internal abort task prep: failed[%d]!\n", rc
);
2011 * _hisi_sas_internal_task_abort -- execute an internal
2012 * abort command for single IO command or a device
2013 * @hisi_hba: host controller struct
2014 * @device: domain device
2015 * @abort_flag: mode of operation, device or single IO
2016 * @tag: tag of IO to be aborted (only relevant to single
2018 * @dq: delivery queue for this internal abort command
2021 _hisi_sas_internal_task_abort(struct hisi_hba
*hisi_hba
,
2022 struct domain_device
*device
, int abort_flag
,
2023 int tag
, struct hisi_sas_dq
*dq
)
2025 struct sas_task
*task
;
2026 struct hisi_sas_device
*sas_dev
= device
->lldd_dev
;
2027 struct device
*dev
= hisi_hba
->dev
;
2031 * The interface is not realized means this HW don't support internal
2032 * abort, or don't need to do internal abort. Then here, we return
2033 * TMF_RESP_FUNC_FAILED and let other steps go on, which depends that
2034 * the internal abort has been executed and returned CQ.
2036 if (!hisi_hba
->hw
->prep_abort
)
2037 return TMF_RESP_FUNC_FAILED
;
2039 task
= sas_alloc_slow_task(GFP_KERNEL
);
2044 task
->task_proto
= device
->tproto
;
2045 task
->task_done
= hisi_sas_task_done
;
2046 task
->slow_task
->timer
.function
= hisi_sas_tmf_timedout
;
2047 task
->slow_task
->timer
.expires
= jiffies
+ INTERNAL_ABORT_TIMEOUT
* HZ
;
2048 add_timer(&task
->slow_task
->timer
);
2050 res
= hisi_sas_internal_abort_task_exec(hisi_hba
, sas_dev
->device_id
,
2051 task
, abort_flag
, tag
, dq
);
2053 del_timer(&task
->slow_task
->timer
);
2054 dev_err(dev
, "internal task abort: executing internal task failed: %d\n",
2058 wait_for_completion(&task
->slow_task
->completion
);
2059 res
= TMF_RESP_FUNC_FAILED
;
2061 /* Internal abort timed out */
2062 if ((task
->task_state_flags
& SAS_TASK_STATE_ABORTED
)) {
2063 if (hisi_sas_debugfs_enable
&& hisi_hba
->debugfs_itct
[0].itct
)
2064 queue_work(hisi_hba
->wq
, &hisi_hba
->debugfs_work
);
2066 if (!(task
->task_state_flags
& SAS_TASK_STATE_DONE
)) {
2067 struct hisi_sas_slot
*slot
= task
->lldd_task
;
2070 struct hisi_sas_cq
*cq
=
2071 &hisi_hba
->cq
[slot
->dlvry_queue
];
2073 * sync irq to avoid free'ing task
2074 * before using task in IO completion
2076 synchronize_irq(cq
->irq_no
);
2079 dev_err(dev
, "internal task abort: timeout and not done.\n");
2084 dev_err(dev
, "internal task abort: timeout.\n");
2087 if (task
->task_status
.resp
== SAS_TASK_COMPLETE
&&
2088 task
->task_status
.stat
== TMF_RESP_FUNC_COMPLETE
) {
2089 res
= TMF_RESP_FUNC_COMPLETE
;
2093 if (task
->task_status
.resp
== SAS_TASK_COMPLETE
&&
2094 task
->task_status
.stat
== TMF_RESP_FUNC_SUCC
) {
2095 res
= TMF_RESP_FUNC_SUCC
;
2100 dev_dbg(dev
, "internal task abort: task to dev %016llx task=%pK resp: 0x%x sts 0x%x\n",
2101 SAS_ADDR(device
->sas_addr
), task
,
2102 task
->task_status
.resp
, /* 0 is complete, -1 is undelivered */
2103 task
->task_status
.stat
);
2104 sas_free_task(task
);
2110 hisi_sas_internal_task_abort(struct hisi_hba
*hisi_hba
,
2111 struct domain_device
*device
,
2112 int abort_flag
, int tag
)
2114 struct hisi_sas_slot
*slot
;
2115 struct device
*dev
= hisi_hba
->dev
;
2116 struct hisi_sas_dq
*dq
;
2119 switch (abort_flag
) {
2120 case HISI_SAS_INT_ABT_CMD
:
2121 slot
= &hisi_hba
->slot_info
[tag
];
2122 dq
= &hisi_hba
->dq
[slot
->dlvry_queue
];
2123 return _hisi_sas_internal_task_abort(hisi_hba
, device
,
2124 abort_flag
, tag
, dq
);
2125 case HISI_SAS_INT_ABT_DEV
:
2126 for (i
= 0; i
< hisi_hba
->cq_nvecs
; i
++) {
2127 struct hisi_sas_cq
*cq
= &hisi_hba
->cq
[i
];
2128 const struct cpumask
*mask
= cq
->irq_mask
;
2130 if (mask
&& !cpumask_intersects(cpu_online_mask
, mask
))
2132 dq
= &hisi_hba
->dq
[i
];
2133 rc
= _hisi_sas_internal_task_abort(hisi_hba
, device
,
2141 dev_err(dev
, "Unrecognised internal abort flag (%d)\n",
2149 static void hisi_sas_port_formed(struct asd_sas_phy
*sas_phy
)
2151 hisi_sas_port_notify_formed(sas_phy
);
2154 static int hisi_sas_write_gpio(struct sas_ha_struct
*sha
, u8 reg_type
,
2155 u8 reg_index
, u8 reg_count
, u8
*write_data
)
2157 struct hisi_hba
*hisi_hba
= sha
->lldd_ha
;
2159 if (!hisi_hba
->hw
->write_gpio
)
2162 return hisi_hba
->hw
->write_gpio(hisi_hba
, reg_type
,
2163 reg_index
, reg_count
, write_data
);
2166 static void hisi_sas_phy_disconnected(struct hisi_sas_phy
*phy
)
2168 struct asd_sas_phy
*sas_phy
= &phy
->sas_phy
;
2169 struct sas_phy
*sphy
= sas_phy
->phy
;
2170 unsigned long flags
;
2172 phy
->phy_attached
= 0;
2176 spin_lock_irqsave(&phy
->lock
, flags
);
2178 sphy
->negotiated_linkrate
= SAS_LINK_RATE_UNKNOWN
;
2180 sphy
->negotiated_linkrate
= SAS_PHY_DISABLED
;
2181 spin_unlock_irqrestore(&phy
->lock
, flags
);
2184 void hisi_sas_phy_down(struct hisi_hba
*hisi_hba
, int phy_no
, int rdy
)
2186 struct hisi_sas_phy
*phy
= &hisi_hba
->phy
[phy_no
];
2187 struct asd_sas_phy
*sas_phy
= &phy
->sas_phy
;
2188 struct sas_ha_struct
*sas_ha
= &hisi_hba
->sha
;
2189 struct device
*dev
= hisi_hba
->dev
;
2192 /* Phy down but ready */
2193 hisi_sas_bytes_dmaed(hisi_hba
, phy_no
);
2194 hisi_sas_port_notify_formed(sas_phy
);
2196 struct hisi_sas_port
*port
= phy
->port
;
2198 if (test_bit(HISI_SAS_RESET_BIT
, &hisi_hba
->flags
) ||
2200 dev_info(dev
, "ignore flutter phy%d down\n", phy_no
);
2203 /* Phy down and not ready */
2204 sas_ha
->notify_phy_event(sas_phy
, PHYE_LOSS_OF_SIGNAL
);
2205 sas_phy_disconnected(sas_phy
);
2208 if (phy
->phy_type
& PORT_TYPE_SAS
) {
2209 int port_id
= port
->id
;
2211 if (!hisi_hba
->hw
->get_wideport_bitmap(hisi_hba
,
2213 port
->port_attached
= 0;
2214 } else if (phy
->phy_type
& PORT_TYPE_SATA
)
2215 port
->port_attached
= 0;
2217 hisi_sas_phy_disconnected(phy
);
2220 EXPORT_SYMBOL_GPL(hisi_sas_phy_down
);
2222 void hisi_sas_sync_irqs(struct hisi_hba
*hisi_hba
)
2226 for (i
= 0; i
< hisi_hba
->cq_nvecs
; i
++) {
2227 struct hisi_sas_cq
*cq
= &hisi_hba
->cq
[i
];
2229 synchronize_irq(cq
->irq_no
);
2232 EXPORT_SYMBOL_GPL(hisi_sas_sync_irqs
);
2234 int hisi_sas_host_reset(struct Scsi_Host
*shost
, int reset_type
)
2236 struct hisi_hba
*hisi_hba
= shost_priv(shost
);
2238 if (reset_type
!= SCSI_ADAPTER_RESET
)
2241 queue_work(hisi_hba
->wq
, &hisi_hba
->rst_work
);
2245 EXPORT_SYMBOL_GPL(hisi_sas_host_reset
);
2247 struct scsi_transport_template
*hisi_sas_stt
;
2248 EXPORT_SYMBOL_GPL(hisi_sas_stt
);
2250 static struct sas_domain_function_template hisi_sas_transport_ops
= {
2251 .lldd_dev_found
= hisi_sas_dev_found
,
2252 .lldd_dev_gone
= hisi_sas_dev_gone
,
2253 .lldd_execute_task
= hisi_sas_queue_command
,
2254 .lldd_control_phy
= hisi_sas_control_phy
,
2255 .lldd_abort_task
= hisi_sas_abort_task
,
2256 .lldd_abort_task_set
= hisi_sas_abort_task_set
,
2257 .lldd_clear_aca
= hisi_sas_clear_aca
,
2258 .lldd_I_T_nexus_reset
= hisi_sas_I_T_nexus_reset
,
2259 .lldd_lu_reset
= hisi_sas_lu_reset
,
2260 .lldd_query_task
= hisi_sas_query_task
,
2261 .lldd_clear_nexus_ha
= hisi_sas_clear_nexus_ha
,
2262 .lldd_port_formed
= hisi_sas_port_formed
,
2263 .lldd_write_gpio
= hisi_sas_write_gpio
,
2266 void hisi_sas_init_mem(struct hisi_hba
*hisi_hba
)
2268 int i
, s
, j
, max_command_entries
= HISI_SAS_MAX_COMMANDS
;
2269 struct hisi_sas_breakpoint
*sata_breakpoint
= hisi_hba
->sata_breakpoint
;
2271 for (i
= 0; i
< hisi_hba
->queue_count
; i
++) {
2272 struct hisi_sas_cq
*cq
= &hisi_hba
->cq
[i
];
2273 struct hisi_sas_dq
*dq
= &hisi_hba
->dq
[i
];
2274 struct hisi_sas_cmd_hdr
*cmd_hdr
= hisi_hba
->cmd_hdr
[i
];
2276 s
= sizeof(struct hisi_sas_cmd_hdr
);
2277 for (j
= 0; j
< HISI_SAS_QUEUE_SLOTS
; j
++)
2278 memset(&cmd_hdr
[j
], 0, s
);
2282 s
= hisi_hba
->hw
->complete_hdr_size
* HISI_SAS_QUEUE_SLOTS
;
2283 memset(hisi_hba
->complete_hdr
[i
], 0, s
);
2287 s
= sizeof(struct hisi_sas_initial_fis
) * hisi_hba
->n_phy
;
2288 memset(hisi_hba
->initial_fis
, 0, s
);
2290 s
= max_command_entries
* sizeof(struct hisi_sas_iost
);
2291 memset(hisi_hba
->iost
, 0, s
);
2293 s
= max_command_entries
* sizeof(struct hisi_sas_breakpoint
);
2294 memset(hisi_hba
->breakpoint
, 0, s
);
2296 s
= sizeof(struct hisi_sas_sata_breakpoint
);
2297 for (j
= 0; j
< HISI_SAS_MAX_ITCT_ENTRIES
; j
++)
2298 memset(&sata_breakpoint
[j
], 0, s
);
2300 EXPORT_SYMBOL_GPL(hisi_sas_init_mem
);
2302 int hisi_sas_alloc(struct hisi_hba
*hisi_hba
)
2304 struct device
*dev
= hisi_hba
->dev
;
2305 int i
, j
, s
, max_command_entries
= HISI_SAS_MAX_COMMANDS
;
2306 int max_command_entries_ru
, sz_slot_buf_ru
;
2307 int blk_cnt
, slots_per_blk
;
2309 sema_init(&hisi_hba
->sem
, 1);
2310 spin_lock_init(&hisi_hba
->lock
);
2311 for (i
= 0; i
< hisi_hba
->n_phy
; i
++) {
2312 hisi_sas_phy_init(hisi_hba
, i
);
2313 hisi_hba
->port
[i
].port_attached
= 0;
2314 hisi_hba
->port
[i
].id
= -1;
2317 for (i
= 0; i
< HISI_SAS_MAX_DEVICES
; i
++) {
2318 hisi_hba
->devices
[i
].dev_type
= SAS_PHY_UNUSED
;
2319 hisi_hba
->devices
[i
].device_id
= i
;
2320 hisi_hba
->devices
[i
].dev_status
= HISI_SAS_DEV_INIT
;
2323 for (i
= 0; i
< hisi_hba
->queue_count
; i
++) {
2324 struct hisi_sas_cq
*cq
= &hisi_hba
->cq
[i
];
2325 struct hisi_sas_dq
*dq
= &hisi_hba
->dq
[i
];
2327 /* Completion queue structure */
2329 cq
->hisi_hba
= hisi_hba
;
2331 /* Delivery queue structure */
2332 spin_lock_init(&dq
->lock
);
2333 INIT_LIST_HEAD(&dq
->list
);
2335 dq
->hisi_hba
= hisi_hba
;
2337 /* Delivery queue */
2338 s
= sizeof(struct hisi_sas_cmd_hdr
) * HISI_SAS_QUEUE_SLOTS
;
2339 hisi_hba
->cmd_hdr
[i
] = dmam_alloc_coherent(dev
, s
,
2340 &hisi_hba
->cmd_hdr_dma
[i
],
2342 if (!hisi_hba
->cmd_hdr
[i
])
2345 /* Completion queue */
2346 s
= hisi_hba
->hw
->complete_hdr_size
* HISI_SAS_QUEUE_SLOTS
;
2347 hisi_hba
->complete_hdr
[i
] = dmam_alloc_coherent(dev
, s
,
2348 &hisi_hba
->complete_hdr_dma
[i
],
2350 if (!hisi_hba
->complete_hdr
[i
])
2354 s
= HISI_SAS_MAX_ITCT_ENTRIES
* sizeof(struct hisi_sas_itct
);
2355 hisi_hba
->itct
= dmam_alloc_coherent(dev
, s
, &hisi_hba
->itct_dma
,
2357 if (!hisi_hba
->itct
)
2360 hisi_hba
->slot_info
= devm_kcalloc(dev
, max_command_entries
,
2361 sizeof(struct hisi_sas_slot
),
2363 if (!hisi_hba
->slot_info
)
2366 /* roundup to avoid overly large block size */
2367 max_command_entries_ru
= roundup(max_command_entries
, 64);
2368 if (hisi_hba
->prot_mask
& HISI_SAS_DIX_PROT_MASK
)
2369 sz_slot_buf_ru
= sizeof(struct hisi_sas_slot_dif_buf_table
);
2371 sz_slot_buf_ru
= sizeof(struct hisi_sas_slot_buf_table
);
2372 sz_slot_buf_ru
= roundup(sz_slot_buf_ru
, 64);
2373 s
= max(lcm(max_command_entries_ru
, sz_slot_buf_ru
), PAGE_SIZE
);
2374 blk_cnt
= (max_command_entries_ru
* sz_slot_buf_ru
) / s
;
2375 slots_per_blk
= s
/ sz_slot_buf_ru
;
2377 for (i
= 0; i
< blk_cnt
; i
++) {
2378 int slot_index
= i
* slots_per_blk
;
2382 buf
= dmam_alloc_coherent(dev
, s
, &buf_dma
,
2387 for (j
= 0; j
< slots_per_blk
; j
++, slot_index
++) {
2388 struct hisi_sas_slot
*slot
;
2390 slot
= &hisi_hba
->slot_info
[slot_index
];
2392 slot
->buf_dma
= buf_dma
;
2393 slot
->idx
= slot_index
;
2395 buf
+= sz_slot_buf_ru
;
2396 buf_dma
+= sz_slot_buf_ru
;
2400 s
= max_command_entries
* sizeof(struct hisi_sas_iost
);
2401 hisi_hba
->iost
= dmam_alloc_coherent(dev
, s
, &hisi_hba
->iost_dma
,
2403 if (!hisi_hba
->iost
)
2406 s
= max_command_entries
* sizeof(struct hisi_sas_breakpoint
);
2407 hisi_hba
->breakpoint
= dmam_alloc_coherent(dev
, s
,
2408 &hisi_hba
->breakpoint_dma
,
2410 if (!hisi_hba
->breakpoint
)
2413 hisi_hba
->slot_index_count
= max_command_entries
;
2414 s
= hisi_hba
->slot_index_count
/ BITS_PER_BYTE
;
2415 hisi_hba
->slot_index_tags
= devm_kzalloc(dev
, s
, GFP_KERNEL
);
2416 if (!hisi_hba
->slot_index_tags
)
2419 s
= sizeof(struct hisi_sas_initial_fis
) * HISI_SAS_MAX_PHYS
;
2420 hisi_hba
->initial_fis
= dmam_alloc_coherent(dev
, s
,
2421 &hisi_hba
->initial_fis_dma
,
2423 if (!hisi_hba
->initial_fis
)
2426 s
= HISI_SAS_MAX_ITCT_ENTRIES
* sizeof(struct hisi_sas_sata_breakpoint
);
2427 hisi_hba
->sata_breakpoint
= dmam_alloc_coherent(dev
, s
,
2428 &hisi_hba
->sata_breakpoint_dma
,
2430 if (!hisi_hba
->sata_breakpoint
)
2433 hisi_sas_slot_index_init(hisi_hba
);
2434 hisi_hba
->last_slot_index
= HISI_SAS_UNRESERVED_IPTT
;
2436 hisi_hba
->wq
= create_singlethread_workqueue(dev_name(dev
));
2437 if (!hisi_hba
->wq
) {
2438 dev_err(dev
, "sas_alloc: failed to create workqueue\n");
2446 EXPORT_SYMBOL_GPL(hisi_sas_alloc
);
2448 void hisi_sas_free(struct hisi_hba
*hisi_hba
)
2452 for (i
= 0; i
< hisi_hba
->n_phy
; i
++) {
2453 struct hisi_sas_phy
*phy
= &hisi_hba
->phy
[i
];
2455 del_timer_sync(&phy
->timer
);
2459 destroy_workqueue(hisi_hba
->wq
);
2461 EXPORT_SYMBOL_GPL(hisi_sas_free
);
2463 void hisi_sas_rst_work_handler(struct work_struct
*work
)
2465 struct hisi_hba
*hisi_hba
=
2466 container_of(work
, struct hisi_hba
, rst_work
);
2468 hisi_sas_controller_reset(hisi_hba
);
2470 EXPORT_SYMBOL_GPL(hisi_sas_rst_work_handler
);
2472 void hisi_sas_sync_rst_work_handler(struct work_struct
*work
)
2474 struct hisi_sas_rst
*rst
=
2475 container_of(work
, struct hisi_sas_rst
, work
);
2477 if (!hisi_sas_controller_reset(rst
->hisi_hba
))
2479 complete(rst
->completion
);
2481 EXPORT_SYMBOL_GPL(hisi_sas_sync_rst_work_handler
);
2483 int hisi_sas_get_fw_info(struct hisi_hba
*hisi_hba
)
2485 struct device
*dev
= hisi_hba
->dev
;
2486 struct platform_device
*pdev
= hisi_hba
->platform_dev
;
2487 struct device_node
*np
= pdev
? pdev
->dev
.of_node
: NULL
;
2490 if (device_property_read_u8_array(dev
, "sas-addr", hisi_hba
->sas_addr
,
2492 dev_err(dev
, "could not get property sas-addr\n");
2498 * These properties are only required for platform device-based
2499 * controller with DT firmware.
2501 hisi_hba
->ctrl
= syscon_regmap_lookup_by_phandle(np
,
2502 "hisilicon,sas-syscon");
2503 if (IS_ERR(hisi_hba
->ctrl
)) {
2504 dev_err(dev
, "could not get syscon\n");
2508 if (device_property_read_u32(dev
, "ctrl-reset-reg",
2509 &hisi_hba
->ctrl_reset_reg
)) {
2510 dev_err(dev
, "could not get property ctrl-reset-reg\n");
2514 if (device_property_read_u32(dev
, "ctrl-reset-sts-reg",
2515 &hisi_hba
->ctrl_reset_sts_reg
)) {
2516 dev_err(dev
, "could not get property ctrl-reset-sts-reg\n");
2520 if (device_property_read_u32(dev
, "ctrl-clock-ena-reg",
2521 &hisi_hba
->ctrl_clock_ena_reg
)) {
2522 dev_err(dev
, "could not get property ctrl-clock-ena-reg\n");
2527 refclk
= devm_clk_get(dev
, NULL
);
2529 dev_dbg(dev
, "no ref clk property\n");
2531 hisi_hba
->refclk_frequency_mhz
= clk_get_rate(refclk
) / 1000000;
2533 if (device_property_read_u32(dev
, "phy-count", &hisi_hba
->n_phy
)) {
2534 dev_err(dev
, "could not get property phy-count\n");
2538 if (device_property_read_u32(dev
, "queue-count",
2539 &hisi_hba
->queue_count
)) {
2540 dev_err(dev
, "could not get property queue-count\n");
2546 EXPORT_SYMBOL_GPL(hisi_sas_get_fw_info
);
2548 static struct Scsi_Host
*hisi_sas_shost_alloc(struct platform_device
*pdev
,
2549 const struct hisi_sas_hw
*hw
)
2551 struct resource
*res
;
2552 struct Scsi_Host
*shost
;
2553 struct hisi_hba
*hisi_hba
;
2554 struct device
*dev
= &pdev
->dev
;
2557 shost
= scsi_host_alloc(hw
->sht
, sizeof(*hisi_hba
));
2559 dev_err(dev
, "scsi host alloc failed\n");
2562 hisi_hba
= shost_priv(shost
);
2564 INIT_WORK(&hisi_hba
->rst_work
, hisi_sas_rst_work_handler
);
2566 hisi_hba
->dev
= dev
;
2567 hisi_hba
->platform_dev
= pdev
;
2568 hisi_hba
->shost
= shost
;
2569 SHOST_TO_SAS_HA(shost
) = &hisi_hba
->sha
;
2571 timer_setup(&hisi_hba
->timer
, NULL
, 0);
2573 if (hisi_sas_get_fw_info(hisi_hba
) < 0)
2576 error
= dma_set_mask_and_coherent(dev
, DMA_BIT_MASK(64));
2578 error
= dma_set_mask_and_coherent(dev
, DMA_BIT_MASK(32));
2581 dev_err(dev
, "No usable DMA addressing method\n");
2585 hisi_hba
->regs
= devm_platform_ioremap_resource(pdev
, 0);
2586 if (IS_ERR(hisi_hba
->regs
))
2589 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 1);
2591 hisi_hba
->sgpio_regs
= devm_ioremap_resource(dev
, res
);
2592 if (IS_ERR(hisi_hba
->sgpio_regs
))
2596 if (hisi_sas_alloc(hisi_hba
)) {
2597 hisi_sas_free(hisi_hba
);
2603 scsi_host_put(shost
);
2604 dev_err(dev
, "shost alloc failed\n");
2608 int hisi_sas_probe(struct platform_device
*pdev
,
2609 const struct hisi_sas_hw
*hw
)
2611 struct Scsi_Host
*shost
;
2612 struct hisi_hba
*hisi_hba
;
2613 struct device
*dev
= &pdev
->dev
;
2614 struct asd_sas_phy
**arr_phy
;
2615 struct asd_sas_port
**arr_port
;
2616 struct sas_ha_struct
*sha
;
2617 int rc
, phy_nr
, port_nr
, i
;
2619 shost
= hisi_sas_shost_alloc(pdev
, hw
);
2623 sha
= SHOST_TO_SAS_HA(shost
);
2624 hisi_hba
= shost_priv(shost
);
2625 platform_set_drvdata(pdev
, sha
);
2627 phy_nr
= port_nr
= hisi_hba
->n_phy
;
2629 arr_phy
= devm_kcalloc(dev
, phy_nr
, sizeof(void *), GFP_KERNEL
);
2630 arr_port
= devm_kcalloc(dev
, port_nr
, sizeof(void *), GFP_KERNEL
);
2631 if (!arr_phy
|| !arr_port
) {
2636 sha
->sas_phy
= arr_phy
;
2637 sha
->sas_port
= arr_port
;
2638 sha
->lldd_ha
= hisi_hba
;
2640 shost
->transportt
= hisi_sas_stt
;
2641 shost
->max_id
= HISI_SAS_MAX_DEVICES
;
2642 shost
->max_lun
= ~0;
2643 shost
->max_channel
= 1;
2644 shost
->max_cmd_len
= 16;
2645 if (hisi_hba
->hw
->slot_index_alloc
) {
2646 shost
->can_queue
= HISI_SAS_MAX_COMMANDS
;
2647 shost
->cmd_per_lun
= HISI_SAS_MAX_COMMANDS
;
2649 shost
->can_queue
= HISI_SAS_UNRESERVED_IPTT
;
2650 shost
->cmd_per_lun
= HISI_SAS_UNRESERVED_IPTT
;
2653 sha
->sas_ha_name
= DRV_NAME
;
2654 sha
->dev
= hisi_hba
->dev
;
2655 sha
->lldd_module
= THIS_MODULE
;
2656 sha
->sas_addr
= &hisi_hba
->sas_addr
[0];
2657 sha
->num_phys
= hisi_hba
->n_phy
;
2658 sha
->core
.shost
= hisi_hba
->shost
;
2660 for (i
= 0; i
< hisi_hba
->n_phy
; i
++) {
2661 sha
->sas_phy
[i
] = &hisi_hba
->phy
[i
].sas_phy
;
2662 sha
->sas_port
[i
] = &hisi_hba
->port
[i
].sas_port
;
2665 rc
= scsi_add_host(shost
, &pdev
->dev
);
2669 rc
= sas_register_ha(sha
);
2671 goto err_out_register_ha
;
2673 rc
= hisi_hba
->hw
->hw_init(hisi_hba
);
2675 goto err_out_register_ha
;
2677 scsi_scan_host(shost
);
2681 err_out_register_ha
:
2682 scsi_remove_host(shost
);
2684 hisi_sas_debugfs_exit(hisi_hba
);
2685 hisi_sas_free(hisi_hba
);
2686 scsi_host_put(shost
);
2689 EXPORT_SYMBOL_GPL(hisi_sas_probe
);
2691 struct dentry
*hisi_sas_debugfs_dir
;
2693 static void hisi_sas_debugfs_snapshot_cq_reg(struct hisi_hba
*hisi_hba
)
2695 int queue_entry_size
= hisi_hba
->hw
->complete_hdr_size
;
2696 int dump_index
= hisi_hba
->debugfs_dump_index
;
2699 for (i
= 0; i
< hisi_hba
->queue_count
; i
++)
2700 memcpy(hisi_hba
->debugfs_cq
[dump_index
][i
].complete_hdr
,
2701 hisi_hba
->complete_hdr
[i
],
2702 HISI_SAS_QUEUE_SLOTS
* queue_entry_size
);
2705 static void hisi_sas_debugfs_snapshot_dq_reg(struct hisi_hba
*hisi_hba
)
2707 int queue_entry_size
= sizeof(struct hisi_sas_cmd_hdr
);
2708 int dump_index
= hisi_hba
->debugfs_dump_index
;
2711 for (i
= 0; i
< hisi_hba
->queue_count
; i
++) {
2712 struct hisi_sas_cmd_hdr
*debugfs_cmd_hdr
, *cmd_hdr
;
2715 debugfs_cmd_hdr
= hisi_hba
->debugfs_dq
[dump_index
][i
].hdr
;
2716 cmd_hdr
= hisi_hba
->cmd_hdr
[i
];
2718 for (j
= 0; j
< HISI_SAS_QUEUE_SLOTS
; j
++)
2719 memcpy(&debugfs_cmd_hdr
[j
], &cmd_hdr
[j
],
2724 static void hisi_sas_debugfs_snapshot_port_reg(struct hisi_hba
*hisi_hba
)
2726 int dump_index
= hisi_hba
->debugfs_dump_index
;
2727 const struct hisi_sas_debugfs_reg
*port
=
2728 hisi_hba
->hw
->debugfs_reg_port
;
2733 for (phy_cnt
= 0; phy_cnt
< hisi_hba
->n_phy
; phy_cnt
++) {
2734 databuf
= hisi_hba
->debugfs_port_reg
[dump_index
][phy_cnt
].data
;
2735 for (i
= 0; i
< port
->count
; i
++, databuf
++) {
2736 offset
= port
->base_off
+ 4 * i
;
2737 *databuf
= port
->read_port_reg(hisi_hba
, phy_cnt
,
2743 static void hisi_sas_debugfs_snapshot_global_reg(struct hisi_hba
*hisi_hba
)
2745 int dump_index
= hisi_hba
->debugfs_dump_index
;
2746 u32
*databuf
= hisi_hba
->debugfs_regs
[dump_index
][DEBUGFS_GLOBAL
].data
;
2747 const struct hisi_sas_hw
*hw
= hisi_hba
->hw
;
2748 const struct hisi_sas_debugfs_reg
*global
=
2749 hw
->debugfs_reg_array
[DEBUGFS_GLOBAL
];
2752 for (i
= 0; i
< global
->count
; i
++, databuf
++)
2753 *databuf
= global
->read_global_reg(hisi_hba
, 4 * i
);
2756 static void hisi_sas_debugfs_snapshot_axi_reg(struct hisi_hba
*hisi_hba
)
2758 int dump_index
= hisi_hba
->debugfs_dump_index
;
2759 u32
*databuf
= hisi_hba
->debugfs_regs
[dump_index
][DEBUGFS_AXI
].data
;
2760 const struct hisi_sas_hw
*hw
= hisi_hba
->hw
;
2761 const struct hisi_sas_debugfs_reg
*axi
=
2762 hw
->debugfs_reg_array
[DEBUGFS_AXI
];
2765 for (i
= 0; i
< axi
->count
; i
++, databuf
++)
2766 *databuf
= axi
->read_global_reg(hisi_hba
,
2767 4 * i
+ axi
->base_off
);
2770 static void hisi_sas_debugfs_snapshot_ras_reg(struct hisi_hba
*hisi_hba
)
2772 int dump_index
= hisi_hba
->debugfs_dump_index
;
2773 u32
*databuf
= hisi_hba
->debugfs_regs
[dump_index
][DEBUGFS_RAS
].data
;
2774 const struct hisi_sas_hw
*hw
= hisi_hba
->hw
;
2775 const struct hisi_sas_debugfs_reg
*ras
=
2776 hw
->debugfs_reg_array
[DEBUGFS_RAS
];
2779 for (i
= 0; i
< ras
->count
; i
++, databuf
++)
2780 *databuf
= ras
->read_global_reg(hisi_hba
,
2781 4 * i
+ ras
->base_off
);
2784 static void hisi_sas_debugfs_snapshot_itct_reg(struct hisi_hba
*hisi_hba
)
2786 int dump_index
= hisi_hba
->debugfs_dump_index
;
2787 void *cachebuf
= hisi_hba
->debugfs_itct_cache
[dump_index
].cache
;
2788 void *databuf
= hisi_hba
->debugfs_itct
[dump_index
].itct
;
2789 struct hisi_sas_itct
*itct
;
2792 hisi_hba
->hw
->read_iost_itct_cache(hisi_hba
, HISI_SAS_ITCT_CACHE
,
2795 itct
= hisi_hba
->itct
;
2797 for (i
= 0; i
< HISI_SAS_MAX_ITCT_ENTRIES
; i
++, itct
++) {
2798 memcpy(databuf
, itct
, sizeof(struct hisi_sas_itct
));
2799 databuf
+= sizeof(struct hisi_sas_itct
);
2803 static void hisi_sas_debugfs_snapshot_iost_reg(struct hisi_hba
*hisi_hba
)
2805 int dump_index
= hisi_hba
->debugfs_dump_index
;
2806 int max_command_entries
= HISI_SAS_MAX_COMMANDS
;
2807 void *cachebuf
= hisi_hba
->debugfs_iost_cache
[dump_index
].cache
;
2808 void *databuf
= hisi_hba
->debugfs_iost
[dump_index
].iost
;
2809 struct hisi_sas_iost
*iost
;
2812 hisi_hba
->hw
->read_iost_itct_cache(hisi_hba
, HISI_SAS_IOST_CACHE
,
2815 iost
= hisi_hba
->iost
;
2817 for (i
= 0; i
< max_command_entries
; i
++, iost
++) {
2818 memcpy(databuf
, iost
, sizeof(struct hisi_sas_iost
));
2819 databuf
+= sizeof(struct hisi_sas_iost
);
2824 hisi_sas_debugfs_to_reg_name(int off
, int base_off
,
2825 const struct hisi_sas_debugfs_reg_lu
*lu
)
2827 for (; lu
->name
; lu
++) {
2828 if (off
== lu
->off
- base_off
)
2835 static void hisi_sas_debugfs_print_reg(u32
*regs_val
, const void *ptr
,
2838 const struct hisi_sas_debugfs_reg
*reg
= ptr
;
2841 for (i
= 0; i
< reg
->count
; i
++) {
2845 name
= hisi_sas_debugfs_to_reg_name(off
, reg
->base_off
,
2849 seq_printf(s
, "0x%08x 0x%08x %s\n", off
,
2852 seq_printf(s
, "0x%08x 0x%08x\n", off
,
2857 static int hisi_sas_debugfs_global_show(struct seq_file
*s
, void *p
)
2859 struct hisi_sas_debugfs_regs
*global
= s
->private;
2860 struct hisi_hba
*hisi_hba
= global
->hisi_hba
;
2861 const struct hisi_sas_hw
*hw
= hisi_hba
->hw
;
2862 const void *reg_global
= hw
->debugfs_reg_array
[DEBUGFS_GLOBAL
];
2864 hisi_sas_debugfs_print_reg(global
->data
,
2870 static int hisi_sas_debugfs_global_open(struct inode
*inode
, struct file
*filp
)
2872 return single_open(filp
, hisi_sas_debugfs_global_show
,
2876 static const struct file_operations hisi_sas_debugfs_global_fops
= {
2877 .open
= hisi_sas_debugfs_global_open
,
2879 .llseek
= seq_lseek
,
2880 .release
= single_release
,
2881 .owner
= THIS_MODULE
,
2884 static int hisi_sas_debugfs_axi_show(struct seq_file
*s
, void *p
)
2886 struct hisi_sas_debugfs_regs
*axi
= s
->private;
2887 struct hisi_hba
*hisi_hba
= axi
->hisi_hba
;
2888 const struct hisi_sas_hw
*hw
= hisi_hba
->hw
;
2889 const void *reg_axi
= hw
->debugfs_reg_array
[DEBUGFS_AXI
];
2891 hisi_sas_debugfs_print_reg(axi
->data
,
2897 static int hisi_sas_debugfs_axi_open(struct inode
*inode
, struct file
*filp
)
2899 return single_open(filp
, hisi_sas_debugfs_axi_show
,
2903 static const struct file_operations hisi_sas_debugfs_axi_fops
= {
2904 .open
= hisi_sas_debugfs_axi_open
,
2906 .llseek
= seq_lseek
,
2907 .release
= single_release
,
2908 .owner
= THIS_MODULE
,
2911 static int hisi_sas_debugfs_ras_show(struct seq_file
*s
, void *p
)
2913 struct hisi_sas_debugfs_regs
*ras
= s
->private;
2914 struct hisi_hba
*hisi_hba
= ras
->hisi_hba
;
2915 const struct hisi_sas_hw
*hw
= hisi_hba
->hw
;
2916 const void *reg_ras
= hw
->debugfs_reg_array
[DEBUGFS_RAS
];
2918 hisi_sas_debugfs_print_reg(ras
->data
,
2924 static int hisi_sas_debugfs_ras_open(struct inode
*inode
, struct file
*filp
)
2926 return single_open(filp
, hisi_sas_debugfs_ras_show
,
2930 static const struct file_operations hisi_sas_debugfs_ras_fops
= {
2931 .open
= hisi_sas_debugfs_ras_open
,
2933 .llseek
= seq_lseek
,
2934 .release
= single_release
,
2935 .owner
= THIS_MODULE
,
2938 static int hisi_sas_debugfs_port_show(struct seq_file
*s
, void *p
)
2940 struct hisi_sas_debugfs_port
*port
= s
->private;
2941 struct hisi_sas_phy
*phy
= port
->phy
;
2942 struct hisi_hba
*hisi_hba
= phy
->hisi_hba
;
2943 const struct hisi_sas_hw
*hw
= hisi_hba
->hw
;
2944 const struct hisi_sas_debugfs_reg
*reg_port
= hw
->debugfs_reg_port
;
2946 hisi_sas_debugfs_print_reg(port
->data
, reg_port
, s
);
2951 static int hisi_sas_debugfs_port_open(struct inode
*inode
, struct file
*filp
)
2953 return single_open(filp
, hisi_sas_debugfs_port_show
, inode
->i_private
);
2956 static const struct file_operations hisi_sas_debugfs_port_fops
= {
2957 .open
= hisi_sas_debugfs_port_open
,
2959 .llseek
= seq_lseek
,
2960 .release
= single_release
,
2961 .owner
= THIS_MODULE
,
2964 static void hisi_sas_show_row_64(struct seq_file
*s
, int index
,
2965 int sz
, __le64
*ptr
)
2969 /* completion header size not fixed per HW version */
2970 seq_printf(s
, "index %04d:\n\t", index
);
2971 for (i
= 1; i
<= sz
/ 8; i
++, ptr
++) {
2972 seq_printf(s
, " 0x%016llx", le64_to_cpu(*ptr
));
2974 seq_puts(s
, "\n\t");
2980 static void hisi_sas_show_row_32(struct seq_file
*s
, int index
,
2981 int sz
, __le32
*ptr
)
2985 /* completion header size not fixed per HW version */
2986 seq_printf(s
, "index %04d:\n\t", index
);
2987 for (i
= 1; i
<= sz
/ 4; i
++, ptr
++) {
2988 seq_printf(s
, " 0x%08x", le32_to_cpu(*ptr
));
2990 seq_puts(s
, "\n\t");
2995 static void hisi_sas_cq_show_slot(struct seq_file
*s
, int slot
,
2996 struct hisi_sas_debugfs_cq
*debugfs_cq
)
2998 struct hisi_sas_cq
*cq
= debugfs_cq
->cq
;
2999 struct hisi_hba
*hisi_hba
= cq
->hisi_hba
;
3000 __le32
*complete_hdr
= debugfs_cq
->complete_hdr
+
3001 (hisi_hba
->hw
->complete_hdr_size
* slot
);
3003 hisi_sas_show_row_32(s
, slot
,
3004 hisi_hba
->hw
->complete_hdr_size
,
3008 static int hisi_sas_debugfs_cq_show(struct seq_file
*s
, void *p
)
3010 struct hisi_sas_debugfs_cq
*debugfs_cq
= s
->private;
3013 for (slot
= 0; slot
< HISI_SAS_QUEUE_SLOTS
; slot
++) {
3014 hisi_sas_cq_show_slot(s
, slot
, debugfs_cq
);
3019 static int hisi_sas_debugfs_cq_open(struct inode
*inode
, struct file
*filp
)
3021 return single_open(filp
, hisi_sas_debugfs_cq_show
, inode
->i_private
);
3024 static const struct file_operations hisi_sas_debugfs_cq_fops
= {
3025 .open
= hisi_sas_debugfs_cq_open
,
3027 .llseek
= seq_lseek
,
3028 .release
= single_release
,
3029 .owner
= THIS_MODULE
,
3032 static void hisi_sas_dq_show_slot(struct seq_file
*s
, int slot
, void *dq_ptr
)
3034 struct hisi_sas_debugfs_dq
*debugfs_dq
= dq_ptr
;
3035 void *cmd_queue
= debugfs_dq
->hdr
;
3036 __le32
*cmd_hdr
= cmd_queue
+
3037 sizeof(struct hisi_sas_cmd_hdr
) * slot
;
3039 hisi_sas_show_row_32(s
, slot
, sizeof(struct hisi_sas_cmd_hdr
), cmd_hdr
);
3042 static int hisi_sas_debugfs_dq_show(struct seq_file
*s
, void *p
)
3046 for (slot
= 0; slot
< HISI_SAS_QUEUE_SLOTS
; slot
++) {
3047 hisi_sas_dq_show_slot(s
, slot
, s
->private);
3052 static int hisi_sas_debugfs_dq_open(struct inode
*inode
, struct file
*filp
)
3054 return single_open(filp
, hisi_sas_debugfs_dq_show
, inode
->i_private
);
3057 static const struct file_operations hisi_sas_debugfs_dq_fops
= {
3058 .open
= hisi_sas_debugfs_dq_open
,
3060 .llseek
= seq_lseek
,
3061 .release
= single_release
,
3062 .owner
= THIS_MODULE
,
3065 static int hisi_sas_debugfs_iost_show(struct seq_file
*s
, void *p
)
3067 struct hisi_sas_debugfs_iost
*debugfs_iost
= s
->private;
3068 struct hisi_sas_iost
*iost
= debugfs_iost
->iost
;
3069 int i
, max_command_entries
= HISI_SAS_MAX_COMMANDS
;
3071 for (i
= 0; i
< max_command_entries
; i
++, iost
++) {
3072 __le64
*data
= &iost
->qw0
;
3074 hisi_sas_show_row_64(s
, i
, sizeof(*iost
), data
);
3080 static int hisi_sas_debugfs_iost_open(struct inode
*inode
, struct file
*filp
)
3082 return single_open(filp
, hisi_sas_debugfs_iost_show
, inode
->i_private
);
3085 static const struct file_operations hisi_sas_debugfs_iost_fops
= {
3086 .open
= hisi_sas_debugfs_iost_open
,
3088 .llseek
= seq_lseek
,
3089 .release
= single_release
,
3090 .owner
= THIS_MODULE
,
3093 static int hisi_sas_debugfs_iost_cache_show(struct seq_file
*s
, void *p
)
3095 struct hisi_sas_debugfs_iost_cache
*debugfs_iost_cache
= s
->private;
3096 struct hisi_sas_iost_itct_cache
*iost_cache
= debugfs_iost_cache
->cache
;
3097 u32 cache_size
= HISI_SAS_IOST_ITCT_CACHE_DW_SZ
* 4;
3101 for (i
= 0; i
< HISI_SAS_IOST_ITCT_CACHE_NUM
; i
++, iost_cache
++) {
3103 * Data struct of IOST cache:
3104 * Data[1]: BIT0~15: Table index
3106 * Data[2]~[9]: IOST table
3108 tab_idx
= (iost_cache
->data
[1] & 0xffff);
3109 iost
= (__le64
*)iost_cache
;
3111 hisi_sas_show_row_64(s
, tab_idx
, cache_size
, iost
);
3117 static int hisi_sas_debugfs_iost_cache_open(struct inode
*inode
,
3120 return single_open(filp
, hisi_sas_debugfs_iost_cache_show
,
3124 static const struct file_operations hisi_sas_debugfs_iost_cache_fops
= {
3125 .open
= hisi_sas_debugfs_iost_cache_open
,
3127 .llseek
= seq_lseek
,
3128 .release
= single_release
,
3129 .owner
= THIS_MODULE
,
3132 static int hisi_sas_debugfs_itct_show(struct seq_file
*s
, void *p
)
3135 struct hisi_sas_debugfs_itct
*debugfs_itct
= s
->private;
3136 struct hisi_sas_itct
*itct
= debugfs_itct
->itct
;
3138 for (i
= 0; i
< HISI_SAS_MAX_ITCT_ENTRIES
; i
++, itct
++) {
3139 __le64
*data
= &itct
->qw0
;
3141 hisi_sas_show_row_64(s
, i
, sizeof(*itct
), data
);
3147 static int hisi_sas_debugfs_itct_open(struct inode
*inode
, struct file
*filp
)
3149 return single_open(filp
, hisi_sas_debugfs_itct_show
, inode
->i_private
);
3152 static const struct file_operations hisi_sas_debugfs_itct_fops
= {
3153 .open
= hisi_sas_debugfs_itct_open
,
3155 .llseek
= seq_lseek
,
3156 .release
= single_release
,
3157 .owner
= THIS_MODULE
,
3160 static int hisi_sas_debugfs_itct_cache_show(struct seq_file
*s
, void *p
)
3162 struct hisi_sas_debugfs_itct_cache
*debugfs_itct_cache
= s
->private;
3163 struct hisi_sas_iost_itct_cache
*itct_cache
= debugfs_itct_cache
->cache
;
3164 u32 cache_size
= HISI_SAS_IOST_ITCT_CACHE_DW_SZ
* 4;
3168 for (i
= 0; i
< HISI_SAS_IOST_ITCT_CACHE_NUM
; i
++, itct_cache
++) {
3170 * Data struct of ITCT cache:
3171 * Data[1]: BIT0~15: Table index
3173 * Data[2]~[9]: ITCT table
3175 tab_idx
= itct_cache
->data
[1] & 0xffff;
3176 itct
= (__le64
*)itct_cache
;
3178 hisi_sas_show_row_64(s
, tab_idx
, cache_size
, itct
);
3184 static int hisi_sas_debugfs_itct_cache_open(struct inode
*inode
,
3187 return single_open(filp
, hisi_sas_debugfs_itct_cache_show
,
3191 static const struct file_operations hisi_sas_debugfs_itct_cache_fops
= {
3192 .open
= hisi_sas_debugfs_itct_cache_open
,
3194 .llseek
= seq_lseek
,
3195 .release
= single_release
,
3196 .owner
= THIS_MODULE
,
3199 static void hisi_sas_debugfs_create_files(struct hisi_hba
*hisi_hba
)
3201 u64
*debugfs_timestamp
;
3202 int dump_index
= hisi_hba
->debugfs_dump_index
;
3203 struct dentry
*dump_dentry
;
3204 struct dentry
*dentry
;
3210 snprintf(name
, 256, "%d", dump_index
);
3212 dump_dentry
= debugfs_create_dir(name
, hisi_hba
->debugfs_dump_dentry
);
3214 debugfs_timestamp
= &hisi_hba
->debugfs_timestamp
[dump_index
];
3216 debugfs_create_u64("timestamp", 0400, dump_dentry
,
3219 debugfs_create_file("global", 0400, dump_dentry
,
3220 &hisi_hba
->debugfs_regs
[dump_index
][DEBUGFS_GLOBAL
],
3221 &hisi_sas_debugfs_global_fops
);
3223 /* Create port dir and files */
3224 dentry
= debugfs_create_dir("port", dump_dentry
);
3225 for (p
= 0; p
< hisi_hba
->n_phy
; p
++) {
3226 snprintf(name
, 256, "%d", p
);
3228 debugfs_create_file(name
, 0400, dentry
,
3229 &hisi_hba
->debugfs_port_reg
[dump_index
][p
],
3230 &hisi_sas_debugfs_port_fops
);
3233 /* Create CQ dir and files */
3234 dentry
= debugfs_create_dir("cq", dump_dentry
);
3235 for (c
= 0; c
< hisi_hba
->queue_count
; c
++) {
3236 snprintf(name
, 256, "%d", c
);
3238 debugfs_create_file(name
, 0400, dentry
,
3239 &hisi_hba
->debugfs_cq
[dump_index
][c
],
3240 &hisi_sas_debugfs_cq_fops
);
3243 /* Create DQ dir and files */
3244 dentry
= debugfs_create_dir("dq", dump_dentry
);
3245 for (d
= 0; d
< hisi_hba
->queue_count
; d
++) {
3246 snprintf(name
, 256, "%d", d
);
3248 debugfs_create_file(name
, 0400, dentry
,
3249 &hisi_hba
->debugfs_dq
[dump_index
][d
],
3250 &hisi_sas_debugfs_dq_fops
);
3253 debugfs_create_file("iost", 0400, dump_dentry
,
3254 &hisi_hba
->debugfs_iost
[dump_index
],
3255 &hisi_sas_debugfs_iost_fops
);
3257 debugfs_create_file("iost_cache", 0400, dump_dentry
,
3258 &hisi_hba
->debugfs_iost_cache
[dump_index
],
3259 &hisi_sas_debugfs_iost_cache_fops
);
3261 debugfs_create_file("itct", 0400, dump_dentry
,
3262 &hisi_hba
->debugfs_itct
[dump_index
],
3263 &hisi_sas_debugfs_itct_fops
);
3265 debugfs_create_file("itct_cache", 0400, dump_dentry
,
3266 &hisi_hba
->debugfs_itct_cache
[dump_index
],
3267 &hisi_sas_debugfs_itct_cache_fops
);
3269 debugfs_create_file("axi", 0400, dump_dentry
,
3270 &hisi_hba
->debugfs_regs
[dump_index
][DEBUGFS_AXI
],
3271 &hisi_sas_debugfs_axi_fops
);
3273 debugfs_create_file("ras", 0400, dump_dentry
,
3274 &hisi_hba
->debugfs_regs
[dump_index
][DEBUGFS_RAS
],
3275 &hisi_sas_debugfs_ras_fops
);
3280 static void hisi_sas_debugfs_snapshot_regs(struct hisi_hba
*hisi_hba
)
3282 hisi_hba
->hw
->snapshot_prepare(hisi_hba
);
3284 hisi_sas_debugfs_snapshot_global_reg(hisi_hba
);
3285 hisi_sas_debugfs_snapshot_port_reg(hisi_hba
);
3286 hisi_sas_debugfs_snapshot_axi_reg(hisi_hba
);
3287 hisi_sas_debugfs_snapshot_ras_reg(hisi_hba
);
3288 hisi_sas_debugfs_snapshot_cq_reg(hisi_hba
);
3289 hisi_sas_debugfs_snapshot_dq_reg(hisi_hba
);
3290 hisi_sas_debugfs_snapshot_itct_reg(hisi_hba
);
3291 hisi_sas_debugfs_snapshot_iost_reg(hisi_hba
);
3293 hisi_sas_debugfs_create_files(hisi_hba
);
3295 hisi_hba
->hw
->snapshot_restore(hisi_hba
);
3298 static ssize_t
hisi_sas_debugfs_trigger_dump_write(struct file
*file
,
3299 const char __user
*user_buf
,
3300 size_t count
, loff_t
*ppos
)
3302 struct hisi_hba
*hisi_hba
= file
->f_inode
->i_private
;
3305 if (hisi_hba
->debugfs_dump_index
>= hisi_sas_debugfs_dump_count
)
3311 if (copy_from_user(buf
, user_buf
, count
))
3317 queue_work(hisi_hba
->wq
, &hisi_hba
->debugfs_work
);
3322 static const struct file_operations hisi_sas_debugfs_trigger_dump_fops
= {
3323 .write
= &hisi_sas_debugfs_trigger_dump_write
,
3324 .owner
= THIS_MODULE
,
3328 HISI_SAS_BIST_LOOPBACK_MODE_DIGITAL
= 0,
3329 HISI_SAS_BIST_LOOPBACK_MODE_SERDES
,
3330 HISI_SAS_BIST_LOOPBACK_MODE_REMOTE
,
3334 HISI_SAS_BIST_CODE_MODE_PRBS7
= 0,
3335 HISI_SAS_BIST_CODE_MODE_PRBS23
,
3336 HISI_SAS_BIST_CODE_MODE_PRBS31
,
3337 HISI_SAS_BIST_CODE_MODE_JTPAT
,
3338 HISI_SAS_BIST_CODE_MODE_CJTPAT
,
3339 HISI_SAS_BIST_CODE_MODE_SCRAMBED_0
,
3340 HISI_SAS_BIST_CODE_MODE_TRAIN
,
3341 HISI_SAS_BIST_CODE_MODE_TRAIN_DONE
,
3342 HISI_SAS_BIST_CODE_MODE_HFTP
,
3343 HISI_SAS_BIST_CODE_MODE_MFTP
,
3344 HISI_SAS_BIST_CODE_MODE_LFTP
,
3345 HISI_SAS_BIST_CODE_MODE_FIXED_DATA
,
3348 static const struct {
3351 } hisi_sas_debugfs_loop_linkrate
[] = {
3352 { SAS_LINK_RATE_1_5_GBPS
, "1.5 Gbit" },
3353 { SAS_LINK_RATE_3_0_GBPS
, "3.0 Gbit" },
3354 { SAS_LINK_RATE_6_0_GBPS
, "6.0 Gbit" },
3355 { SAS_LINK_RATE_12_0_GBPS
, "12.0 Gbit" },
3358 static int hisi_sas_debugfs_bist_linkrate_show(struct seq_file
*s
, void *p
)
3360 struct hisi_hba
*hisi_hba
= s
->private;
3363 for (i
= 0; i
< ARRAY_SIZE(hisi_sas_debugfs_loop_linkrate
); i
++) {
3364 int match
= (hisi_hba
->debugfs_bist_linkrate
==
3365 hisi_sas_debugfs_loop_linkrate
[i
].value
);
3367 seq_printf(s
, "%s%s%s ", match
? "[" : "",
3368 hisi_sas_debugfs_loop_linkrate
[i
].name
,
3376 static ssize_t
hisi_sas_debugfs_bist_linkrate_write(struct file
*filp
,
3377 const char __user
*buf
,
3378 size_t count
, loff_t
*ppos
)
3380 struct seq_file
*m
= filp
->private_data
;
3381 struct hisi_hba
*hisi_hba
= m
->private;
3382 char kbuf
[16] = {}, *pkbuf
;
3386 if (hisi_hba
->debugfs_bist_enable
)
3389 if (count
>= sizeof(kbuf
))
3392 if (copy_from_user(kbuf
, buf
, count
))
3395 pkbuf
= strstrip(kbuf
);
3397 for (i
= 0; i
< ARRAY_SIZE(hisi_sas_debugfs_loop_linkrate
); i
++) {
3398 if (!strncmp(hisi_sas_debugfs_loop_linkrate
[i
].name
,
3400 hisi_hba
->debugfs_bist_linkrate
=
3401 hisi_sas_debugfs_loop_linkrate
[i
].value
;
3413 static int hisi_sas_debugfs_bist_linkrate_open(struct inode
*inode
,
3416 return single_open(filp
, hisi_sas_debugfs_bist_linkrate_show
,
3420 static const struct file_operations hisi_sas_debugfs_bist_linkrate_ops
= {
3421 .open
= hisi_sas_debugfs_bist_linkrate_open
,
3423 .write
= hisi_sas_debugfs_bist_linkrate_write
,
3424 .llseek
= seq_lseek
,
3425 .release
= single_release
,
3426 .owner
= THIS_MODULE
,
3429 static const struct {
3432 } hisi_sas_debugfs_loop_code_mode
[] = {
3433 { HISI_SAS_BIST_CODE_MODE_PRBS7
, "PRBS7" },
3434 { HISI_SAS_BIST_CODE_MODE_PRBS23
, "PRBS23" },
3435 { HISI_SAS_BIST_CODE_MODE_PRBS31
, "PRBS31" },
3436 { HISI_SAS_BIST_CODE_MODE_JTPAT
, "JTPAT" },
3437 { HISI_SAS_BIST_CODE_MODE_CJTPAT
, "CJTPAT" },
3438 { HISI_SAS_BIST_CODE_MODE_SCRAMBED_0
, "SCRAMBED_0" },
3439 { HISI_SAS_BIST_CODE_MODE_TRAIN
, "TRAIN" },
3440 { HISI_SAS_BIST_CODE_MODE_TRAIN_DONE
, "TRAIN_DONE" },
3441 { HISI_SAS_BIST_CODE_MODE_HFTP
, "HFTP" },
3442 { HISI_SAS_BIST_CODE_MODE_MFTP
, "MFTP" },
3443 { HISI_SAS_BIST_CODE_MODE_LFTP
, "LFTP" },
3444 { HISI_SAS_BIST_CODE_MODE_FIXED_DATA
, "FIXED_DATA" },
3447 static int hisi_sas_debugfs_bist_code_mode_show(struct seq_file
*s
, void *p
)
3449 struct hisi_hba
*hisi_hba
= s
->private;
3452 for (i
= 0; i
< ARRAY_SIZE(hisi_sas_debugfs_loop_code_mode
); i
++) {
3453 int match
= (hisi_hba
->debugfs_bist_code_mode
==
3454 hisi_sas_debugfs_loop_code_mode
[i
].value
);
3456 seq_printf(s
, "%s%s%s ", match
? "[" : "",
3457 hisi_sas_debugfs_loop_code_mode
[i
].name
,
3465 static ssize_t
hisi_sas_debugfs_bist_code_mode_write(struct file
*filp
,
3466 const char __user
*buf
,
3470 struct seq_file
*m
= filp
->private_data
;
3471 struct hisi_hba
*hisi_hba
= m
->private;
3472 char kbuf
[16] = {}, *pkbuf
;
3476 if (hisi_hba
->debugfs_bist_enable
)
3479 if (count
>= sizeof(kbuf
))
3482 if (copy_from_user(kbuf
, buf
, count
))
3485 pkbuf
= strstrip(kbuf
);
3487 for (i
= 0; i
< ARRAY_SIZE(hisi_sas_debugfs_loop_code_mode
); i
++) {
3488 if (!strncmp(hisi_sas_debugfs_loop_code_mode
[i
].name
,
3490 hisi_hba
->debugfs_bist_code_mode
=
3491 hisi_sas_debugfs_loop_code_mode
[i
].value
;
3503 static int hisi_sas_debugfs_bist_code_mode_open(struct inode
*inode
,
3506 return single_open(filp
, hisi_sas_debugfs_bist_code_mode_show
,
3510 static const struct file_operations hisi_sas_debugfs_bist_code_mode_ops
= {
3511 .open
= hisi_sas_debugfs_bist_code_mode_open
,
3513 .write
= hisi_sas_debugfs_bist_code_mode_write
,
3514 .llseek
= seq_lseek
,
3515 .release
= single_release
,
3516 .owner
= THIS_MODULE
,
3519 static ssize_t
hisi_sas_debugfs_bist_phy_write(struct file
*filp
,
3520 const char __user
*buf
,
3521 size_t count
, loff_t
*ppos
)
3523 struct seq_file
*m
= filp
->private_data
;
3524 struct hisi_hba
*hisi_hba
= m
->private;
3525 unsigned int phy_no
;
3528 if (hisi_hba
->debugfs_bist_enable
)
3531 val
= kstrtouint_from_user(buf
, count
, 0, &phy_no
);
3535 if (phy_no
>= hisi_hba
->n_phy
)
3538 hisi_hba
->debugfs_bist_phy_no
= phy_no
;
3543 static int hisi_sas_debugfs_bist_phy_show(struct seq_file
*s
, void *p
)
3545 struct hisi_hba
*hisi_hba
= s
->private;
3547 seq_printf(s
, "%d\n", hisi_hba
->debugfs_bist_phy_no
);
3552 static int hisi_sas_debugfs_bist_phy_open(struct inode
*inode
,
3555 return single_open(filp
, hisi_sas_debugfs_bist_phy_show
,
3559 static const struct file_operations hisi_sas_debugfs_bist_phy_ops
= {
3560 .open
= hisi_sas_debugfs_bist_phy_open
,
3562 .write
= hisi_sas_debugfs_bist_phy_write
,
3563 .llseek
= seq_lseek
,
3564 .release
= single_release
,
3565 .owner
= THIS_MODULE
,
3568 static const struct {
3571 } hisi_sas_debugfs_loop_modes
[] = {
3572 { HISI_SAS_BIST_LOOPBACK_MODE_DIGITAL
, "digital" },
3573 { HISI_SAS_BIST_LOOPBACK_MODE_SERDES
, "serdes" },
3574 { HISI_SAS_BIST_LOOPBACK_MODE_REMOTE
, "remote" },
3577 static int hisi_sas_debugfs_bist_mode_show(struct seq_file
*s
, void *p
)
3579 struct hisi_hba
*hisi_hba
= s
->private;
3582 for (i
= 0; i
< ARRAY_SIZE(hisi_sas_debugfs_loop_modes
); i
++) {
3583 int match
= (hisi_hba
->debugfs_bist_mode
==
3584 hisi_sas_debugfs_loop_modes
[i
].value
);
3586 seq_printf(s
, "%s%s%s ", match
? "[" : "",
3587 hisi_sas_debugfs_loop_modes
[i
].name
,
3595 static ssize_t
hisi_sas_debugfs_bist_mode_write(struct file
*filp
,
3596 const char __user
*buf
,
3597 size_t count
, loff_t
*ppos
)
3599 struct seq_file
*m
= filp
->private_data
;
3600 struct hisi_hba
*hisi_hba
= m
->private;
3601 char kbuf
[16] = {}, *pkbuf
;
3605 if (hisi_hba
->debugfs_bist_enable
)
3608 if (count
>= sizeof(kbuf
))
3611 if (copy_from_user(kbuf
, buf
, count
))
3614 pkbuf
= strstrip(kbuf
);
3616 for (i
= 0; i
< ARRAY_SIZE(hisi_sas_debugfs_loop_modes
); i
++) {
3617 if (!strncmp(hisi_sas_debugfs_loop_modes
[i
].name
, pkbuf
, 16)) {
3618 hisi_hba
->debugfs_bist_mode
=
3619 hisi_sas_debugfs_loop_modes
[i
].value
;
3631 static int hisi_sas_debugfs_bist_mode_open(struct inode
*inode
,
3634 return single_open(filp
, hisi_sas_debugfs_bist_mode_show
,
3638 static const struct file_operations hisi_sas_debugfs_bist_mode_ops
= {
3639 .open
= hisi_sas_debugfs_bist_mode_open
,
3641 .write
= hisi_sas_debugfs_bist_mode_write
,
3642 .llseek
= seq_lseek
,
3643 .release
= single_release
,
3644 .owner
= THIS_MODULE
,
3647 static ssize_t
hisi_sas_debugfs_bist_enable_write(struct file
*filp
,
3648 const char __user
*buf
,
3649 size_t count
, loff_t
*ppos
)
3651 struct seq_file
*m
= filp
->private_data
;
3652 struct hisi_hba
*hisi_hba
= m
->private;
3653 unsigned int enable
;
3656 val
= kstrtouint_from_user(buf
, count
, 0, &enable
);
3663 if (enable
== hisi_hba
->debugfs_bist_enable
)
3666 if (!hisi_hba
->hw
->set_bist
)
3669 val
= hisi_hba
->hw
->set_bist(hisi_hba
, enable
);
3673 hisi_hba
->debugfs_bist_enable
= enable
;
3678 static int hisi_sas_debugfs_bist_enable_show(struct seq_file
*s
, void *p
)
3680 struct hisi_hba
*hisi_hba
= s
->private;
3682 seq_printf(s
, "%d\n", hisi_hba
->debugfs_bist_enable
);
3687 static int hisi_sas_debugfs_bist_enable_open(struct inode
*inode
,
3690 return single_open(filp
, hisi_sas_debugfs_bist_enable_show
,
3694 static const struct file_operations hisi_sas_debugfs_bist_enable_ops
= {
3695 .open
= hisi_sas_debugfs_bist_enable_open
,
3697 .write
= hisi_sas_debugfs_bist_enable_write
,
3698 .llseek
= seq_lseek
,
3699 .release
= single_release
,
3700 .owner
= THIS_MODULE
,
3703 static ssize_t
hisi_sas_debugfs_phy_down_cnt_write(struct file
*filp
,
3704 const char __user
*buf
,
3705 size_t count
, loff_t
*ppos
)
3707 struct seq_file
*s
= filp
->private_data
;
3708 struct hisi_sas_phy
*phy
= s
->private;
3709 unsigned int set_val
;
3712 res
= kstrtouint_from_user(buf
, count
, 0, &set_val
);
3719 atomic_set(&phy
->down_cnt
, 0);
3724 static int hisi_sas_debugfs_phy_down_cnt_show(struct seq_file
*s
, void *p
)
3726 struct hisi_sas_phy
*phy
= s
->private;
3728 seq_printf(s
, "%d\n", atomic_read(&phy
->down_cnt
));
3733 static int hisi_sas_debugfs_phy_down_cnt_open(struct inode
*inode
,
3736 return single_open(filp
, hisi_sas_debugfs_phy_down_cnt_show
,
3740 static const struct file_operations hisi_sas_debugfs_phy_down_cnt_ops
= {
3741 .open
= hisi_sas_debugfs_phy_down_cnt_open
,
3743 .write
= hisi_sas_debugfs_phy_down_cnt_write
,
3744 .llseek
= seq_lseek
,
3745 .release
= single_release
,
3746 .owner
= THIS_MODULE
,
3749 void hisi_sas_debugfs_work_handler(struct work_struct
*work
)
3751 struct hisi_hba
*hisi_hba
=
3752 container_of(work
, struct hisi_hba
, debugfs_work
);
3753 int debugfs_dump_index
= hisi_hba
->debugfs_dump_index
;
3754 struct device
*dev
= hisi_hba
->dev
;
3755 u64 timestamp
= local_clock();
3757 if (debugfs_dump_index
>= hisi_sas_debugfs_dump_count
) {
3758 dev_warn(dev
, "dump count exceeded!\n");
3762 do_div(timestamp
, NSEC_PER_MSEC
);
3763 hisi_hba
->debugfs_timestamp
[debugfs_dump_index
] = timestamp
;
3765 hisi_sas_debugfs_snapshot_regs(hisi_hba
);
3766 hisi_hba
->debugfs_dump_index
++;
3768 EXPORT_SYMBOL_GPL(hisi_sas_debugfs_work_handler
);
3770 static void hisi_sas_debugfs_release(struct hisi_hba
*hisi_hba
, int dump_index
)
3772 struct device
*dev
= hisi_hba
->dev
;
3775 devm_kfree(dev
, hisi_hba
->debugfs_iost_cache
[dump_index
].cache
);
3776 devm_kfree(dev
, hisi_hba
->debugfs_itct_cache
[dump_index
].cache
);
3777 devm_kfree(dev
, hisi_hba
->debugfs_iost
[dump_index
].iost
);
3778 devm_kfree(dev
, hisi_hba
->debugfs_itct
[dump_index
].itct
);
3780 for (i
= 0; i
< hisi_hba
->queue_count
; i
++)
3781 devm_kfree(dev
, hisi_hba
->debugfs_dq
[dump_index
][i
].hdr
);
3783 for (i
= 0; i
< hisi_hba
->queue_count
; i
++)
3785 hisi_hba
->debugfs_cq
[dump_index
][i
].complete_hdr
);
3787 for (i
= 0; i
< DEBUGFS_REGS_NUM
; i
++)
3788 devm_kfree(dev
, hisi_hba
->debugfs_regs
[dump_index
][i
].data
);
3790 for (i
= 0; i
< hisi_hba
->n_phy
; i
++)
3791 devm_kfree(dev
, hisi_hba
->debugfs_port_reg
[dump_index
][i
].data
);
3794 static int hisi_sas_debugfs_alloc(struct hisi_hba
*hisi_hba
, int dump_index
)
3796 const struct hisi_sas_hw
*hw
= hisi_hba
->hw
;
3797 struct device
*dev
= hisi_hba
->dev
;
3801 for (r
= 0; r
< DEBUGFS_REGS_NUM
; r
++) {
3802 struct hisi_sas_debugfs_regs
*regs
=
3803 &hisi_hba
->debugfs_regs
[dump_index
][r
];
3805 sz
= hw
->debugfs_reg_array
[r
]->count
* 4;
3806 regs
->data
= devm_kmalloc(dev
, sz
, GFP_KERNEL
);
3809 regs
->hisi_hba
= hisi_hba
;
3812 sz
= hw
->debugfs_reg_port
->count
* 4;
3813 for (p
= 0; p
< hisi_hba
->n_phy
; p
++) {
3814 struct hisi_sas_debugfs_port
*port
=
3815 &hisi_hba
->debugfs_port_reg
[dump_index
][p
];
3817 port
->data
= devm_kmalloc(dev
, sz
, GFP_KERNEL
);
3820 port
->phy
= &hisi_hba
->phy
[p
];
3823 sz
= hw
->complete_hdr_size
* HISI_SAS_QUEUE_SLOTS
;
3824 for (c
= 0; c
< hisi_hba
->queue_count
; c
++) {
3825 struct hisi_sas_debugfs_cq
*cq
=
3826 &hisi_hba
->debugfs_cq
[dump_index
][c
];
3828 cq
->complete_hdr
= devm_kmalloc(dev
, sz
, GFP_KERNEL
);
3829 if (!cq
->complete_hdr
)
3831 cq
->cq
= &hisi_hba
->cq
[c
];
3834 sz
= sizeof(struct hisi_sas_cmd_hdr
) * HISI_SAS_QUEUE_SLOTS
;
3835 for (d
= 0; d
< hisi_hba
->queue_count
; d
++) {
3836 struct hisi_sas_debugfs_dq
*dq
=
3837 &hisi_hba
->debugfs_dq
[dump_index
][d
];
3839 dq
->hdr
= devm_kmalloc(dev
, sz
, GFP_KERNEL
);
3842 dq
->dq
= &hisi_hba
->dq
[d
];
3845 sz
= HISI_SAS_MAX_COMMANDS
* sizeof(struct hisi_sas_iost
);
3847 hisi_hba
->debugfs_iost
[dump_index
].iost
=
3848 devm_kmalloc(dev
, sz
, GFP_KERNEL
);
3849 if (!hisi_hba
->debugfs_iost
[dump_index
].iost
)
3852 sz
= HISI_SAS_IOST_ITCT_CACHE_NUM
*
3853 sizeof(struct hisi_sas_iost_itct_cache
);
3855 hisi_hba
->debugfs_iost_cache
[dump_index
].cache
=
3856 devm_kmalloc(dev
, sz
, GFP_KERNEL
);
3857 if (!hisi_hba
->debugfs_iost_cache
[dump_index
].cache
)
3860 sz
= HISI_SAS_IOST_ITCT_CACHE_NUM
*
3861 sizeof(struct hisi_sas_iost_itct_cache
);
3863 hisi_hba
->debugfs_itct_cache
[dump_index
].cache
=
3864 devm_kmalloc(dev
, sz
, GFP_KERNEL
);
3865 if (!hisi_hba
->debugfs_itct_cache
[dump_index
].cache
)
3868 /* New memory allocation must be locate before itct */
3869 sz
= HISI_SAS_MAX_ITCT_ENTRIES
* sizeof(struct hisi_sas_itct
);
3871 hisi_hba
->debugfs_itct
[dump_index
].itct
=
3872 devm_kmalloc(dev
, sz
, GFP_KERNEL
);
3873 if (!hisi_hba
->debugfs_itct
[dump_index
].itct
)
3878 for (i
= 0; i
< hisi_sas_debugfs_dump_count
; i
++)
3879 hisi_sas_debugfs_release(hisi_hba
, i
);
3883 static void hisi_sas_debugfs_phy_down_cnt_init(struct hisi_hba
*hisi_hba
)
3885 struct dentry
*dir
= debugfs_create_dir("phy_down_cnt",
3886 hisi_hba
->debugfs_dir
);
3890 for (phy_no
= 0; phy_no
< hisi_hba
->n_phy
; phy_no
++) {
3891 snprintf(name
, 16, "%d", phy_no
);
3892 debugfs_create_file(name
, 0600, dir
,
3893 &hisi_hba
->phy
[phy_no
],
3894 &hisi_sas_debugfs_phy_down_cnt_ops
);
3898 static void hisi_sas_debugfs_bist_init(struct hisi_hba
*hisi_hba
)
3900 hisi_hba
->debugfs_bist_dentry
=
3901 debugfs_create_dir("bist", hisi_hba
->debugfs_dir
);
3902 debugfs_create_file("link_rate", 0600,
3903 hisi_hba
->debugfs_bist_dentry
, hisi_hba
,
3904 &hisi_sas_debugfs_bist_linkrate_ops
);
3906 debugfs_create_file("code_mode", 0600,
3907 hisi_hba
->debugfs_bist_dentry
, hisi_hba
,
3908 &hisi_sas_debugfs_bist_code_mode_ops
);
3910 debugfs_create_file("phy_id", 0600, hisi_hba
->debugfs_bist_dentry
,
3911 hisi_hba
, &hisi_sas_debugfs_bist_phy_ops
);
3913 debugfs_create_u32("cnt", 0600, hisi_hba
->debugfs_bist_dentry
,
3914 &hisi_hba
->debugfs_bist_cnt
);
3916 debugfs_create_file("loopback_mode", 0600,
3917 hisi_hba
->debugfs_bist_dentry
,
3918 hisi_hba
, &hisi_sas_debugfs_bist_mode_ops
);
3920 debugfs_create_file("enable", 0600, hisi_hba
->debugfs_bist_dentry
,
3921 hisi_hba
, &hisi_sas_debugfs_bist_enable_ops
);
3923 hisi_hba
->debugfs_bist_linkrate
= SAS_LINK_RATE_1_5_GBPS
;
3926 void hisi_sas_debugfs_init(struct hisi_hba
*hisi_hba
)
3928 struct device
*dev
= hisi_hba
->dev
;
3931 hisi_hba
->debugfs_dir
= debugfs_create_dir(dev_name(dev
),
3932 hisi_sas_debugfs_dir
);
3933 debugfs_create_file("trigger_dump", 0200,
3934 hisi_hba
->debugfs_dir
,
3936 &hisi_sas_debugfs_trigger_dump_fops
);
3938 /* create bist structures */
3939 hisi_sas_debugfs_bist_init(hisi_hba
);
3941 hisi_hba
->debugfs_dump_dentry
=
3942 debugfs_create_dir("dump", hisi_hba
->debugfs_dir
);
3944 hisi_sas_debugfs_phy_down_cnt_init(hisi_hba
);
3946 for (i
= 0; i
< hisi_sas_debugfs_dump_count
; i
++) {
3947 if (hisi_sas_debugfs_alloc(hisi_hba
, i
)) {
3948 debugfs_remove_recursive(hisi_hba
->debugfs_dir
);
3949 dev_dbg(dev
, "failed to init debugfs!\n");
3954 EXPORT_SYMBOL_GPL(hisi_sas_debugfs_init
);
3956 void hisi_sas_debugfs_exit(struct hisi_hba
*hisi_hba
)
3958 debugfs_remove_recursive(hisi_hba
->debugfs_dir
);
3960 EXPORT_SYMBOL_GPL(hisi_sas_debugfs_exit
);
3962 int hisi_sas_remove(struct platform_device
*pdev
)
3964 struct sas_ha_struct
*sha
= platform_get_drvdata(pdev
);
3965 struct hisi_hba
*hisi_hba
= sha
->lldd_ha
;
3966 struct Scsi_Host
*shost
= sha
->core
.shost
;
3968 if (timer_pending(&hisi_hba
->timer
))
3969 del_timer(&hisi_hba
->timer
);
3971 sas_unregister_ha(sha
);
3972 sas_remove_host(sha
->core
.shost
);
3974 hisi_sas_free(hisi_hba
);
3975 scsi_host_put(shost
);
3978 EXPORT_SYMBOL_GPL(hisi_sas_remove
);
3980 bool hisi_sas_debugfs_enable
;
3981 EXPORT_SYMBOL_GPL(hisi_sas_debugfs_enable
);
3982 module_param_named(debugfs_enable
, hisi_sas_debugfs_enable
, bool, 0444);
3983 MODULE_PARM_DESC(hisi_sas_debugfs_enable
, "Enable driver debugfs (default disabled)");
3985 u32 hisi_sas_debugfs_dump_count
= 1;
3986 EXPORT_SYMBOL_GPL(hisi_sas_debugfs_dump_count
);
3987 module_param_named(debugfs_dump_count
, hisi_sas_debugfs_dump_count
, uint
, 0444);
3988 MODULE_PARM_DESC(hisi_sas_debugfs_dump_count
, "Number of debugfs dumps to allow");
3990 static __init
int hisi_sas_init(void)
3992 hisi_sas_stt
= sas_domain_attach_transport(&hisi_sas_transport_ops
);
3996 if (hisi_sas_debugfs_enable
) {
3997 hisi_sas_debugfs_dir
= debugfs_create_dir("hisi_sas", NULL
);
3998 if (hisi_sas_debugfs_dump_count
> HISI_SAS_MAX_DEBUGFS_DUMP
) {
3999 pr_info("hisi_sas: Limiting debugfs dump count\n");
4000 hisi_sas_debugfs_dump_count
= HISI_SAS_MAX_DEBUGFS_DUMP
;
4007 static __exit
void hisi_sas_exit(void)
4009 sas_release_transport(hisi_sas_stt
);
4011 debugfs_remove(hisi_sas_debugfs_dir
);
4014 module_init(hisi_sas_init
);
4015 module_exit(hisi_sas_exit
);
4017 MODULE_LICENSE("GPL");
4018 MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
4019 MODULE_DESCRIPTION("HISILICON SAS controller driver");
4020 MODULE_ALIAS("platform:" DRV_NAME
);