2 * Copyright (c) 2015 Linaro Ltd.
3 * Copyright (c) 2015 Hisilicon Limited.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
13 #include "../libsas/sas_internal.h"
14 #define DRV_NAME "hisi_sas"
16 #define DEV_IS_GONE(dev) \
17 ((!dev) || (dev->dev_type == SAS_PHY_UNUSED))
19 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device
*device
,
20 u8
*lun
, struct hisi_sas_tmf_task
*tmf
);
22 hisi_sas_internal_task_abort(struct hisi_hba
*hisi_hba
,
23 struct domain_device
*device
,
24 int abort_flag
, int tag
);
25 static int hisi_sas_softreset_ata_disk(struct domain_device
*device
);
26 static int hisi_sas_control_phy(struct asd_sas_phy
*sas_phy
, enum phy_func func
,
28 static void hisi_sas_release_task(struct hisi_hba
*hisi_hba
,
29 struct domain_device
*device
);
30 static void hisi_sas_dev_gone(struct domain_device
*device
);
32 u8
hisi_sas_get_ata_protocol(struct host_to_dev_fis
*fis
, int direction
)
34 switch (fis
->command
) {
35 case ATA_CMD_FPDMA_WRITE
:
36 case ATA_CMD_FPDMA_READ
:
37 case ATA_CMD_FPDMA_RECV
:
38 case ATA_CMD_FPDMA_SEND
:
39 case ATA_CMD_NCQ_NON_DATA
:
40 return HISI_SAS_SATA_PROTOCOL_FPDMA
;
42 case ATA_CMD_DOWNLOAD_MICRO
:
44 case ATA_CMD_PMP_READ
:
45 case ATA_CMD_READ_LOG_EXT
:
46 case ATA_CMD_PIO_READ
:
47 case ATA_CMD_PIO_READ_EXT
:
48 case ATA_CMD_PMP_WRITE
:
49 case ATA_CMD_WRITE_LOG_EXT
:
50 case ATA_CMD_PIO_WRITE
:
51 case ATA_CMD_PIO_WRITE_EXT
:
52 return HISI_SAS_SATA_PROTOCOL_PIO
;
55 case ATA_CMD_DOWNLOAD_MICRO_DMA
:
56 case ATA_CMD_PMP_READ_DMA
:
57 case ATA_CMD_PMP_WRITE_DMA
:
59 case ATA_CMD_READ_EXT
:
60 case ATA_CMD_READ_LOG_DMA_EXT
:
61 case ATA_CMD_READ_STREAM_DMA_EXT
:
62 case ATA_CMD_TRUSTED_RCV_DMA
:
63 case ATA_CMD_TRUSTED_SND_DMA
:
65 case ATA_CMD_WRITE_EXT
:
66 case ATA_CMD_WRITE_FUA_EXT
:
67 case ATA_CMD_WRITE_QUEUED
:
68 case ATA_CMD_WRITE_LOG_DMA_EXT
:
69 case ATA_CMD_WRITE_STREAM_DMA_EXT
:
70 case ATA_CMD_ZAC_MGMT_IN
:
71 return HISI_SAS_SATA_PROTOCOL_DMA
;
73 case ATA_CMD_CHK_POWER
:
74 case ATA_CMD_DEV_RESET
:
77 case ATA_CMD_FLUSH_EXT
:
79 case ATA_CMD_VERIFY_EXT
:
80 case ATA_CMD_SET_FEATURES
:
82 case ATA_CMD_STANDBYNOW1
:
83 case ATA_CMD_ZAC_MGMT_OUT
:
84 return HISI_SAS_SATA_PROTOCOL_NONDATA
;
87 switch (fis
->features
) {
88 case ATA_SET_MAX_PASSWD
:
89 case ATA_SET_MAX_LOCK
:
90 return HISI_SAS_SATA_PROTOCOL_PIO
;
92 case ATA_SET_MAX_PASSWD_DMA
:
93 case ATA_SET_MAX_UNLOCK_DMA
:
94 return HISI_SAS_SATA_PROTOCOL_DMA
;
97 return HISI_SAS_SATA_PROTOCOL_NONDATA
;
102 if (direction
== DMA_NONE
)
103 return HISI_SAS_SATA_PROTOCOL_NONDATA
;
104 return HISI_SAS_SATA_PROTOCOL_PIO
;
108 EXPORT_SYMBOL_GPL(hisi_sas_get_ata_protocol
);
110 void hisi_sas_sata_done(struct sas_task
*task
,
111 struct hisi_sas_slot
*slot
)
113 struct task_status_struct
*ts
= &task
->task_status
;
114 struct ata_task_resp
*resp
= (struct ata_task_resp
*)ts
->buf
;
115 struct hisi_sas_status_buffer
*status_buf
=
116 hisi_sas_status_buf_addr_mem(slot
);
117 u8
*iu
= &status_buf
->iu
[0];
118 struct dev_to_host_fis
*d2h
= (struct dev_to_host_fis
*)iu
;
120 resp
->frame_len
= sizeof(struct dev_to_host_fis
);
121 memcpy(&resp
->ending_fis
[0], d2h
, sizeof(struct dev_to_host_fis
));
123 ts
->buf_valid_size
= sizeof(*resp
);
125 EXPORT_SYMBOL_GPL(hisi_sas_sata_done
);
127 int hisi_sas_get_ncq_tag(struct sas_task
*task
, u32
*tag
)
129 struct ata_queued_cmd
*qc
= task
->uldd_task
;
132 if (qc
->tf
.command
== ATA_CMD_FPDMA_WRITE
||
133 qc
->tf
.command
== ATA_CMD_FPDMA_READ
) {
140 EXPORT_SYMBOL_GPL(hisi_sas_get_ncq_tag
);
143 * This function assumes linkrate mask fits in 8 bits, which it
144 * does for all HW versions supported.
146 u8
hisi_sas_get_prog_phy_linkrate_mask(enum sas_linkrate max
)
151 max
-= SAS_LINK_RATE_1_5_GBPS
;
152 for (i
= 0; i
<= max
; i
++)
153 rate
|= 1 << (i
* 2);
156 EXPORT_SYMBOL_GPL(hisi_sas_get_prog_phy_linkrate_mask
);
158 static struct hisi_hba
*dev_to_hisi_hba(struct domain_device
*device
)
160 return device
->port
->ha
->lldd_ha
;
163 struct hisi_sas_port
*to_hisi_sas_port(struct asd_sas_port
*sas_port
)
165 return container_of(sas_port
, struct hisi_sas_port
, sas_port
);
167 EXPORT_SYMBOL_GPL(to_hisi_sas_port
);
169 void hisi_sas_stop_phys(struct hisi_hba
*hisi_hba
)
173 for (phy_no
= 0; phy_no
< hisi_hba
->n_phy
; phy_no
++)
174 hisi_hba
->hw
->phy_disable(hisi_hba
, phy_no
);
176 EXPORT_SYMBOL_GPL(hisi_sas_stop_phys
);
178 static void hisi_sas_slot_index_clear(struct hisi_hba
*hisi_hba
, int slot_idx
)
180 void *bitmap
= hisi_hba
->slot_index_tags
;
182 clear_bit(slot_idx
, bitmap
);
185 static void hisi_sas_slot_index_free(struct hisi_hba
*hisi_hba
, int slot_idx
)
187 hisi_sas_slot_index_clear(hisi_hba
, slot_idx
);
190 static void hisi_sas_slot_index_set(struct hisi_hba
*hisi_hba
, int slot_idx
)
192 void *bitmap
= hisi_hba
->slot_index_tags
;
194 set_bit(slot_idx
, bitmap
);
197 static int hisi_sas_slot_index_alloc(struct hisi_hba
*hisi_hba
, int *slot_idx
)
200 void *bitmap
= hisi_hba
->slot_index_tags
;
202 index
= find_next_zero_bit(bitmap
, hisi_hba
->slot_index_count
,
203 hisi_hba
->last_slot_index
+ 1);
204 if (index
>= hisi_hba
->slot_index_count
) {
205 index
= find_next_zero_bit(bitmap
, hisi_hba
->slot_index_count
,
207 if (index
>= hisi_hba
->slot_index_count
)
208 return -SAS_QUEUE_FULL
;
210 hisi_sas_slot_index_set(hisi_hba
, index
);
212 hisi_hba
->last_slot_index
= index
;
217 static void hisi_sas_slot_index_init(struct hisi_hba
*hisi_hba
)
221 for (i
= 0; i
< hisi_hba
->slot_index_count
; ++i
)
222 hisi_sas_slot_index_clear(hisi_hba
, i
);
225 void hisi_sas_slot_task_free(struct hisi_hba
*hisi_hba
, struct sas_task
*task
,
226 struct hisi_sas_slot
*slot
)
228 struct hisi_sas_dq
*dq
= &hisi_hba
->dq
[slot
->dlvry_queue
];
232 struct device
*dev
= hisi_hba
->dev
;
234 if (!task
->lldd_task
)
237 task
->lldd_task
= NULL
;
239 if (!sas_protocol_ata(task
->task_proto
))
241 dma_unmap_sg(dev
, task
->scatter
,
247 spin_lock_irqsave(&dq
->lock
, flags
);
248 list_del_init(&slot
->entry
);
249 spin_unlock_irqrestore(&dq
->lock
, flags
);
251 memset(slot
, 0, offsetof(struct hisi_sas_slot
, buf
));
253 spin_lock_irqsave(&hisi_hba
->lock
, flags
);
254 hisi_sas_slot_index_free(hisi_hba
, slot
->idx
);
255 spin_unlock_irqrestore(&hisi_hba
->lock
, flags
);
257 EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free
);
259 static void hisi_sas_task_prep_smp(struct hisi_hba
*hisi_hba
,
260 struct hisi_sas_slot
*slot
)
262 hisi_hba
->hw
->prep_smp(hisi_hba
, slot
);
265 static void hisi_sas_task_prep_ssp(struct hisi_hba
*hisi_hba
,
266 struct hisi_sas_slot
*slot
)
268 hisi_hba
->hw
->prep_ssp(hisi_hba
, slot
);
271 static void hisi_sas_task_prep_ata(struct hisi_hba
*hisi_hba
,
272 struct hisi_sas_slot
*slot
)
274 hisi_hba
->hw
->prep_stp(hisi_hba
, slot
);
277 static void hisi_sas_task_prep_abort(struct hisi_hba
*hisi_hba
,
278 struct hisi_sas_slot
*slot
,
279 int device_id
, int abort_flag
, int tag_to_abort
)
281 hisi_hba
->hw
->prep_abort(hisi_hba
, slot
,
282 device_id
, abort_flag
, tag_to_abort
);
285 static int hisi_sas_task_prep(struct sas_task
*task
,
286 struct hisi_sas_dq
**dq_pointer
,
287 bool is_tmf
, struct hisi_sas_tmf_task
*tmf
,
290 struct domain_device
*device
= task
->dev
;
291 struct hisi_hba
*hisi_hba
= dev_to_hisi_hba(device
);
292 struct hisi_sas_device
*sas_dev
= device
->lldd_dev
;
293 struct hisi_sas_port
*port
;
294 struct hisi_sas_slot
*slot
;
295 struct hisi_sas_cmd_hdr
*cmd_hdr_base
;
296 struct asd_sas_port
*sas_port
= device
->port
;
297 struct device
*dev
= hisi_hba
->dev
;
298 int dlvry_queue_slot
, dlvry_queue
, rc
, slot_idx
;
299 int n_elem
= 0, n_elem_req
= 0, n_elem_resp
= 0;
300 struct hisi_sas_dq
*dq
;
305 struct task_status_struct
*ts
= &task
->task_status
;
307 ts
->resp
= SAS_TASK_UNDELIVERED
;
308 ts
->stat
= SAS_PHY_DOWN
;
310 * libsas will use dev->port, should
311 * not call task_done for sata
313 if (device
->dev_type
!= SAS_SATA_DEV
)
314 task
->task_done(task
);
318 if (DEV_IS_GONE(sas_dev
)) {
320 dev_info(dev
, "task prep: device %d not ready\n",
323 dev_info(dev
, "task prep: device %016llx not ready\n",
324 SAS_ADDR(device
->sas_addr
));
329 *dq_pointer
= dq
= sas_dev
->dq
;
331 port
= to_hisi_sas_port(sas_port
);
332 if (port
&& !port
->port_attached
) {
333 dev_info(dev
, "task prep: %s port%d not attach device\n",
334 (dev_is_sata(device
)) ?
341 if (!sas_protocol_ata(task
->task_proto
)) {
342 unsigned int req_len
, resp_len
;
344 if (task
->num_scatter
) {
345 n_elem
= dma_map_sg(dev
, task
->scatter
,
346 task
->num_scatter
, task
->data_dir
);
351 } else if (task
->task_proto
& SAS_PROTOCOL_SMP
) {
352 n_elem_req
= dma_map_sg(dev
, &task
->smp_task
.smp_req
,
358 req_len
= sg_dma_len(&task
->smp_task
.smp_req
);
361 goto err_out_dma_unmap
;
363 n_elem_resp
= dma_map_sg(dev
, &task
->smp_task
.smp_resp
,
367 goto err_out_dma_unmap
;
369 resp_len
= sg_dma_len(&task
->smp_task
.smp_resp
);
370 if (resp_len
& 0x3) {
372 goto err_out_dma_unmap
;
376 n_elem
= task
->num_scatter
;
378 if (n_elem
> HISI_SAS_SGE_PAGE_CNT
) {
379 dev_err(dev
, "task prep: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT",
382 goto err_out_dma_unmap
;
385 spin_lock_irqsave(&hisi_hba
->lock
, flags
);
386 if (hisi_hba
->hw
->slot_index_alloc
)
387 rc
= hisi_hba
->hw
->slot_index_alloc(hisi_hba
, &slot_idx
,
390 rc
= hisi_sas_slot_index_alloc(hisi_hba
, &slot_idx
);
391 spin_unlock_irqrestore(&hisi_hba
->lock
, flags
);
393 goto err_out_dma_unmap
;
395 slot
= &hisi_hba
->slot_info
[slot_idx
];
397 spin_lock_irqsave(&dq
->lock
, flags
);
398 wr_q_index
= hisi_hba
->hw
->get_free_slot(hisi_hba
, dq
);
399 if (wr_q_index
< 0) {
400 spin_unlock_irqrestore(&dq
->lock
, flags
);
405 list_add_tail(&slot
->delivery
, &dq
->list
);
406 list_add_tail(&slot
->entry
, &sas_dev
->list
);
407 spin_unlock_irqrestore(&dq
->lock
, flags
);
409 dlvry_queue
= dq
->id
;
410 dlvry_queue_slot
= wr_q_index
;
412 slot
->n_elem
= n_elem
;
413 slot
->dlvry_queue
= dlvry_queue
;
414 slot
->dlvry_queue_slot
= dlvry_queue_slot
;
415 cmd_hdr_base
= hisi_hba
->cmd_hdr
[dlvry_queue
];
416 slot
->cmd_hdr
= &cmd_hdr_base
[dlvry_queue_slot
];
420 slot
->is_internal
= is_tmf
;
421 task
->lldd_task
= slot
;
423 memset(slot
->cmd_hdr
, 0, sizeof(struct hisi_sas_cmd_hdr
));
424 memset(hisi_sas_cmd_hdr_addr_mem(slot
), 0, HISI_SAS_COMMAND_TABLE_SZ
);
425 memset(hisi_sas_status_buf_addr_mem(slot
), 0, HISI_SAS_STATUS_BUF_SZ
);
427 switch (task
->task_proto
) {
428 case SAS_PROTOCOL_SMP
:
429 hisi_sas_task_prep_smp(hisi_hba
, slot
);
431 case SAS_PROTOCOL_SSP
:
432 hisi_sas_task_prep_ssp(hisi_hba
, slot
);
434 case SAS_PROTOCOL_SATA
:
435 case SAS_PROTOCOL_STP
:
436 case SAS_PROTOCOL_SATA
| SAS_PROTOCOL_STP
:
437 hisi_sas_task_prep_ata(hisi_hba
, slot
);
440 dev_err(dev
, "task prep: unknown/unsupported proto (0x%x)\n",
445 spin_lock_irqsave(&task
->task_state_lock
, flags
);
446 task
->task_state_flags
|= SAS_TASK_AT_INITIATOR
;
447 spin_unlock_irqrestore(&task
->task_state_lock
, flags
);
450 WRITE_ONCE(slot
->ready
, 1);
455 spin_lock_irqsave(&hisi_hba
->lock
, flags
);
456 hisi_sas_slot_index_free(hisi_hba
, slot_idx
);
457 spin_unlock_irqrestore(&hisi_hba
->lock
, flags
);
459 if (!sas_protocol_ata(task
->task_proto
)) {
460 if (task
->num_scatter
) {
461 dma_unmap_sg(dev
, task
->scatter
, task
->num_scatter
,
463 } else if (task
->task_proto
& SAS_PROTOCOL_SMP
) {
465 dma_unmap_sg(dev
, &task
->smp_task
.smp_req
,
468 dma_unmap_sg(dev
, &task
->smp_task
.smp_resp
,
473 dev_err(dev
, "task prep: failed[%d]!\n", rc
);
477 static int hisi_sas_task_exec(struct sas_task
*task
, gfp_t gfp_flags
,
478 bool is_tmf
, struct hisi_sas_tmf_task
*tmf
)
483 struct hisi_hba
*hisi_hba
= dev_to_hisi_hba(task
->dev
);
484 struct device
*dev
= hisi_hba
->dev
;
485 struct hisi_sas_dq
*dq
= NULL
;
487 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT
, &hisi_hba
->flags
))) {
489 * For IOs from upper layer, it may already disable preempt
490 * in the IO path, if disable preempt again in down(),
491 * function schedule() will report schedule_bug(), so check
492 * preemptible() before goto down().
497 down(&hisi_hba
->sem
);
501 /* protect task_prep and start_delivery sequence */
502 rc
= hisi_sas_task_prep(task
, &dq
, is_tmf
, tmf
, &pass
);
504 dev_err(dev
, "task exec: failed[%d]!\n", rc
);
507 spin_lock_irqsave(&dq
->lock
, flags
);
508 hisi_hba
->hw
->start_delivery(dq
);
509 spin_unlock_irqrestore(&dq
->lock
, flags
);
515 static void hisi_sas_bytes_dmaed(struct hisi_hba
*hisi_hba
, int phy_no
)
517 struct hisi_sas_phy
*phy
= &hisi_hba
->phy
[phy_no
];
518 struct asd_sas_phy
*sas_phy
= &phy
->sas_phy
;
519 struct sas_ha_struct
*sas_ha
;
521 if (!phy
->phy_attached
)
524 sas_ha
= &hisi_hba
->sha
;
525 sas_ha
->notify_phy_event(sas_phy
, PHYE_OOB_DONE
);
528 struct sas_phy
*sphy
= sas_phy
->phy
;
530 sphy
->negotiated_linkrate
= sas_phy
->linkrate
;
531 sphy
->minimum_linkrate_hw
= SAS_LINK_RATE_1_5_GBPS
;
532 sphy
->maximum_linkrate_hw
=
533 hisi_hba
->hw
->phy_get_max_linkrate();
534 if (sphy
->minimum_linkrate
== SAS_LINK_RATE_UNKNOWN
)
535 sphy
->minimum_linkrate
= phy
->minimum_linkrate
;
537 if (sphy
->maximum_linkrate
== SAS_LINK_RATE_UNKNOWN
)
538 sphy
->maximum_linkrate
= phy
->maximum_linkrate
;
541 if (phy
->phy_type
& PORT_TYPE_SAS
) {
542 struct sas_identify_frame
*id
;
544 id
= (struct sas_identify_frame
*)phy
->frame_rcvd
;
545 id
->dev_type
= phy
->identify
.device_type
;
546 id
->initiator_bits
= SAS_PROTOCOL_ALL
;
547 id
->target_bits
= phy
->identify
.target_port_protocols
;
548 } else if (phy
->phy_type
& PORT_TYPE_SATA
) {
552 sas_phy
->frame_rcvd_size
= phy
->frame_rcvd_size
;
553 sas_ha
->notify_port_event(sas_phy
, PORTE_BYTES_DMAED
);
556 static struct hisi_sas_device
*hisi_sas_alloc_dev(struct domain_device
*device
)
558 struct hisi_hba
*hisi_hba
= dev_to_hisi_hba(device
);
559 struct hisi_sas_device
*sas_dev
= NULL
;
561 int last
= hisi_hba
->last_dev_id
;
562 int first
= (hisi_hba
->last_dev_id
+ 1) % HISI_SAS_MAX_DEVICES
;
565 spin_lock_irqsave(&hisi_hba
->lock
, flags
);
566 for (i
= first
; i
!= last
; i
%= HISI_SAS_MAX_DEVICES
) {
567 if (hisi_hba
->devices
[i
].dev_type
== SAS_PHY_UNUSED
) {
568 int queue
= i
% hisi_hba
->queue_count
;
569 struct hisi_sas_dq
*dq
= &hisi_hba
->dq
[queue
];
571 hisi_hba
->devices
[i
].device_id
= i
;
572 sas_dev
= &hisi_hba
->devices
[i
];
573 sas_dev
->dev_status
= HISI_SAS_DEV_NORMAL
;
574 sas_dev
->dev_type
= device
->dev_type
;
575 sas_dev
->hisi_hba
= hisi_hba
;
576 sas_dev
->sas_device
= device
;
578 INIT_LIST_HEAD(&hisi_hba
->devices
[i
].list
);
583 hisi_hba
->last_dev_id
= i
;
584 spin_unlock_irqrestore(&hisi_hba
->lock
, flags
);
589 #define HISI_SAS_SRST_ATA_DISK_CNT 3
590 static int hisi_sas_init_device(struct domain_device
*device
)
592 int rc
= TMF_RESP_FUNC_COMPLETE
;
594 struct hisi_sas_tmf_task tmf_task
;
595 int retry
= HISI_SAS_SRST_ATA_DISK_CNT
;
596 struct hisi_hba
*hisi_hba
= dev_to_hisi_hba(device
);
598 switch (device
->dev_type
) {
600 int_to_scsilun(0, &lun
);
602 tmf_task
.tmf
= TMF_CLEAR_TASK_SET
;
603 rc
= hisi_sas_debug_issue_ssp_tmf(device
, lun
.scsi_lun
,
605 if (rc
== TMF_RESP_FUNC_COMPLETE
)
606 hisi_sas_release_task(hisi_hba
, device
);
610 case SAS_SATA_PM_PORT
:
611 case SAS_SATA_PENDING
:
612 while (retry
-- > 0) {
613 rc
= hisi_sas_softreset_ata_disk(device
);
625 static int hisi_sas_dev_found(struct domain_device
*device
)
627 struct hisi_hba
*hisi_hba
= dev_to_hisi_hba(device
);
628 struct domain_device
*parent_dev
= device
->parent
;
629 struct hisi_sas_device
*sas_dev
;
630 struct device
*dev
= hisi_hba
->dev
;
633 if (hisi_hba
->hw
->alloc_dev
)
634 sas_dev
= hisi_hba
->hw
->alloc_dev(device
);
636 sas_dev
= hisi_sas_alloc_dev(device
);
638 dev_err(dev
, "fail alloc dev: max support %d devices\n",
639 HISI_SAS_MAX_DEVICES
);
643 device
->lldd_dev
= sas_dev
;
644 hisi_hba
->hw
->setup_itct(hisi_hba
, sas_dev
);
646 if (parent_dev
&& DEV_IS_EXPANDER(parent_dev
->dev_type
)) {
648 u8 phy_num
= parent_dev
->ex_dev
.num_phys
;
651 for (phy_no
= 0; phy_no
< phy_num
; phy_no
++) {
652 phy
= &parent_dev
->ex_dev
.ex_phy
[phy_no
];
653 if (SAS_ADDR(phy
->attached_sas_addr
) ==
654 SAS_ADDR(device
->sas_addr
))
658 if (phy_no
== phy_num
) {
659 dev_info(dev
, "dev found: no attached "
660 "dev:%016llx at ex:%016llx\n",
661 SAS_ADDR(device
->sas_addr
),
662 SAS_ADDR(parent_dev
->sas_addr
));
668 dev_info(dev
, "dev[%d:%x] found\n",
669 sas_dev
->device_id
, sas_dev
->dev_type
);
671 rc
= hisi_sas_init_device(device
);
677 hisi_sas_dev_gone(device
);
681 int hisi_sas_slave_configure(struct scsi_device
*sdev
)
683 struct domain_device
*dev
= sdev_to_domain_dev(sdev
);
684 int ret
= sas_slave_configure(sdev
);
688 if (!dev_is_sata(dev
))
689 sas_change_queue_depth(sdev
, 64);
693 EXPORT_SYMBOL_GPL(hisi_sas_slave_configure
);
695 void hisi_sas_scan_start(struct Scsi_Host
*shost
)
697 struct hisi_hba
*hisi_hba
= shost_priv(shost
);
699 hisi_hba
->hw
->phys_init(hisi_hba
);
701 EXPORT_SYMBOL_GPL(hisi_sas_scan_start
);
703 int hisi_sas_scan_finished(struct Scsi_Host
*shost
, unsigned long time
)
705 struct hisi_hba
*hisi_hba
= shost_priv(shost
);
706 struct sas_ha_struct
*sha
= &hisi_hba
->sha
;
708 /* Wait for PHY up interrupt to occur */
715 EXPORT_SYMBOL_GPL(hisi_sas_scan_finished
);
717 static void hisi_sas_phyup_work(struct work_struct
*work
)
719 struct hisi_sas_phy
*phy
=
720 container_of(work
, typeof(*phy
), works
[HISI_PHYE_PHY_UP
]);
721 struct hisi_hba
*hisi_hba
= phy
->hisi_hba
;
722 struct asd_sas_phy
*sas_phy
= &phy
->sas_phy
;
723 int phy_no
= sas_phy
->id
;
725 if (phy
->identify
.target_port_protocols
== SAS_PROTOCOL_SSP
)
726 hisi_hba
->hw
->sl_notify_ssp(hisi_hba
, phy_no
);
727 hisi_sas_bytes_dmaed(hisi_hba
, phy_no
);
730 static void hisi_sas_linkreset_work(struct work_struct
*work
)
732 struct hisi_sas_phy
*phy
=
733 container_of(work
, typeof(*phy
), works
[HISI_PHYE_LINK_RESET
]);
734 struct asd_sas_phy
*sas_phy
= &phy
->sas_phy
;
736 hisi_sas_control_phy(sas_phy
, PHY_FUNC_LINK_RESET
, NULL
);
739 static const work_func_t hisi_sas_phye_fns
[HISI_PHYES_NUM
] = {
740 [HISI_PHYE_PHY_UP
] = hisi_sas_phyup_work
,
741 [HISI_PHYE_LINK_RESET
] = hisi_sas_linkreset_work
,
744 bool hisi_sas_notify_phy_event(struct hisi_sas_phy
*phy
,
745 enum hisi_sas_phy_event event
)
747 struct hisi_hba
*hisi_hba
= phy
->hisi_hba
;
749 if (WARN_ON(event
>= HISI_PHYES_NUM
))
752 return queue_work(hisi_hba
->wq
, &phy
->works
[event
]);
754 EXPORT_SYMBOL_GPL(hisi_sas_notify_phy_event
);
756 static void hisi_sas_phy_init(struct hisi_hba
*hisi_hba
, int phy_no
)
758 struct hisi_sas_phy
*phy
= &hisi_hba
->phy
[phy_no
];
759 struct asd_sas_phy
*sas_phy
= &phy
->sas_phy
;
762 phy
->hisi_hba
= hisi_hba
;
764 phy
->minimum_linkrate
= SAS_LINK_RATE_1_5_GBPS
;
765 phy
->maximum_linkrate
= hisi_hba
->hw
->phy_get_max_linkrate();
766 sas_phy
->enabled
= (phy_no
< hisi_hba
->n_phy
) ? 1 : 0;
767 sas_phy
->class = SAS
;
768 sas_phy
->iproto
= SAS_PROTOCOL_ALL
;
770 sas_phy
->type
= PHY_TYPE_PHYSICAL
;
771 sas_phy
->role
= PHY_ROLE_INITIATOR
;
772 sas_phy
->oob_mode
= OOB_NOT_CONNECTED
;
773 sas_phy
->linkrate
= SAS_LINK_RATE_UNKNOWN
;
774 sas_phy
->id
= phy_no
;
775 sas_phy
->sas_addr
= &hisi_hba
->sas_addr
[0];
776 sas_phy
->frame_rcvd
= &phy
->frame_rcvd
[0];
777 sas_phy
->ha
= (struct sas_ha_struct
*)hisi_hba
->shost
->hostdata
;
778 sas_phy
->lldd_phy
= phy
;
780 for (i
= 0; i
< HISI_PHYES_NUM
; i
++)
781 INIT_WORK(&phy
->works
[i
], hisi_sas_phye_fns
[i
]);
783 spin_lock_init(&phy
->lock
);
786 static void hisi_sas_port_notify_formed(struct asd_sas_phy
*sas_phy
)
788 struct sas_ha_struct
*sas_ha
= sas_phy
->ha
;
789 struct hisi_hba
*hisi_hba
= sas_ha
->lldd_ha
;
790 struct hisi_sas_phy
*phy
= sas_phy
->lldd_phy
;
791 struct asd_sas_port
*sas_port
= sas_phy
->port
;
792 struct hisi_sas_port
*port
= to_hisi_sas_port(sas_port
);
798 spin_lock_irqsave(&hisi_hba
->lock
, flags
);
799 port
->port_attached
= 1;
800 port
->id
= phy
->port_id
;
802 sas_port
->lldd_port
= port
;
803 spin_unlock_irqrestore(&hisi_hba
->lock
, flags
);
806 static void hisi_sas_do_release_task(struct hisi_hba
*hisi_hba
, struct sas_task
*task
,
807 struct hisi_sas_slot
*slot
)
811 struct task_status_struct
*ts
;
813 ts
= &task
->task_status
;
815 ts
->resp
= SAS_TASK_COMPLETE
;
816 ts
->stat
= SAS_ABORTED_TASK
;
817 spin_lock_irqsave(&task
->task_state_lock
, flags
);
818 task
->task_state_flags
&=
819 ~(SAS_TASK_STATE_PENDING
| SAS_TASK_AT_INITIATOR
);
820 if (!slot
->is_internal
&& task
->task_proto
!= SAS_PROTOCOL_SMP
)
821 task
->task_state_flags
|= SAS_TASK_STATE_DONE
;
822 spin_unlock_irqrestore(&task
->task_state_lock
, flags
);
825 hisi_sas_slot_task_free(hisi_hba
, task
, slot
);
828 static void hisi_sas_release_task(struct hisi_hba
*hisi_hba
,
829 struct domain_device
*device
)
831 struct hisi_sas_slot
*slot
, *slot2
;
832 struct hisi_sas_device
*sas_dev
= device
->lldd_dev
;
834 list_for_each_entry_safe(slot
, slot2
, &sas_dev
->list
, entry
)
835 hisi_sas_do_release_task(hisi_hba
, slot
->task
, slot
);
838 void hisi_sas_release_tasks(struct hisi_hba
*hisi_hba
)
840 struct hisi_sas_device
*sas_dev
;
841 struct domain_device
*device
;
844 for (i
= 0; i
< HISI_SAS_MAX_DEVICES
; i
++) {
845 sas_dev
= &hisi_hba
->devices
[i
];
846 device
= sas_dev
->sas_device
;
848 if ((sas_dev
->dev_type
== SAS_PHY_UNUSED
) ||
852 hisi_sas_release_task(hisi_hba
, device
);
855 EXPORT_SYMBOL_GPL(hisi_sas_release_tasks
);
857 static void hisi_sas_dereg_device(struct hisi_hba
*hisi_hba
,
858 struct domain_device
*device
)
860 if (hisi_hba
->hw
->dereg_device
)
861 hisi_hba
->hw
->dereg_device(hisi_hba
, device
);
864 static void hisi_sas_dev_gone(struct domain_device
*device
)
866 struct hisi_sas_device
*sas_dev
= device
->lldd_dev
;
867 struct hisi_hba
*hisi_hba
= dev_to_hisi_hba(device
);
868 struct device
*dev
= hisi_hba
->dev
;
870 dev_info(dev
, "dev[%d:%x] is gone\n",
871 sas_dev
->device_id
, sas_dev
->dev_type
);
873 if (!test_bit(HISI_SAS_RESET_BIT
, &hisi_hba
->flags
)) {
874 hisi_sas_internal_task_abort(hisi_hba
, device
,
875 HISI_SAS_INT_ABT_DEV
, 0);
877 hisi_sas_dereg_device(hisi_hba
, device
);
879 down(&hisi_hba
->sem
);
880 hisi_hba
->hw
->clear_itct(hisi_hba
, sas_dev
);
882 device
->lldd_dev
= NULL
;
885 if (hisi_hba
->hw
->free_device
)
886 hisi_hba
->hw
->free_device(sas_dev
);
887 sas_dev
->dev_type
= SAS_PHY_UNUSED
;
890 static int hisi_sas_queue_command(struct sas_task
*task
, gfp_t gfp_flags
)
892 return hisi_sas_task_exec(task
, gfp_flags
, 0, NULL
);
895 static int hisi_sas_phy_set_linkrate(struct hisi_hba
*hisi_hba
, int phy_no
,
896 struct sas_phy_linkrates
*r
)
898 struct sas_phy_linkrates _r
;
900 struct hisi_sas_phy
*phy
= &hisi_hba
->phy
[phy_no
];
901 struct asd_sas_phy
*sas_phy
= &phy
->sas_phy
;
902 enum sas_linkrate min
, max
;
904 if (r
->minimum_linkrate
> SAS_LINK_RATE_1_5_GBPS
)
907 if (r
->maximum_linkrate
== SAS_LINK_RATE_UNKNOWN
) {
908 max
= sas_phy
->phy
->maximum_linkrate
;
909 min
= r
->minimum_linkrate
;
910 } else if (r
->minimum_linkrate
== SAS_LINK_RATE_UNKNOWN
) {
911 max
= r
->maximum_linkrate
;
912 min
= sas_phy
->phy
->minimum_linkrate
;
916 _r
.maximum_linkrate
= max
;
917 _r
.minimum_linkrate
= min
;
919 sas_phy
->phy
->maximum_linkrate
= max
;
920 sas_phy
->phy
->minimum_linkrate
= min
;
922 hisi_hba
->hw
->phy_disable(hisi_hba
, phy_no
);
924 hisi_hba
->hw
->phy_set_linkrate(hisi_hba
, phy_no
, &_r
);
925 hisi_hba
->hw
->phy_start(hisi_hba
, phy_no
);
930 static int hisi_sas_control_phy(struct asd_sas_phy
*sas_phy
, enum phy_func func
,
933 struct sas_ha_struct
*sas_ha
= sas_phy
->ha
;
934 struct hisi_hba
*hisi_hba
= sas_ha
->lldd_ha
;
935 int phy_no
= sas_phy
->id
;
938 case PHY_FUNC_HARD_RESET
:
939 hisi_hba
->hw
->phy_hard_reset(hisi_hba
, phy_no
);
942 case PHY_FUNC_LINK_RESET
:
943 hisi_hba
->hw
->phy_disable(hisi_hba
, phy_no
);
945 hisi_hba
->hw
->phy_start(hisi_hba
, phy_no
);
948 case PHY_FUNC_DISABLE
:
949 hisi_hba
->hw
->phy_disable(hisi_hba
, phy_no
);
952 case PHY_FUNC_SET_LINK_RATE
:
953 return hisi_sas_phy_set_linkrate(hisi_hba
, phy_no
, funcdata
);
954 case PHY_FUNC_GET_EVENTS
:
955 if (hisi_hba
->hw
->get_events
) {
956 hisi_hba
->hw
->get_events(hisi_hba
, phy_no
);
960 case PHY_FUNC_RELEASE_SPINUP_HOLD
:
967 static void hisi_sas_task_done(struct sas_task
*task
)
969 del_timer(&task
->slow_task
->timer
);
970 complete(&task
->slow_task
->completion
);
973 static void hisi_sas_tmf_timedout(struct timer_list
*t
)
975 struct sas_task_slow
*slow
= from_timer(slow
, t
, timer
);
976 struct sas_task
*task
= slow
->task
;
978 bool is_completed
= true;
980 spin_lock_irqsave(&task
->task_state_lock
, flags
);
981 if (!(task
->task_state_flags
& SAS_TASK_STATE_DONE
)) {
982 task
->task_state_flags
|= SAS_TASK_STATE_ABORTED
;
983 is_completed
= false;
985 spin_unlock_irqrestore(&task
->task_state_lock
, flags
);
988 complete(&task
->slow_task
->completion
);
991 #define TASK_TIMEOUT 20
993 #define INTERNAL_ABORT_TIMEOUT 6
994 static int hisi_sas_exec_internal_tmf_task(struct domain_device
*device
,
995 void *parameter
, u32 para_len
,
996 struct hisi_sas_tmf_task
*tmf
)
998 struct hisi_sas_device
*sas_dev
= device
->lldd_dev
;
999 struct hisi_hba
*hisi_hba
= sas_dev
->hisi_hba
;
1000 struct device
*dev
= hisi_hba
->dev
;
1001 struct sas_task
*task
;
1004 for (retry
= 0; retry
< TASK_RETRY
; retry
++) {
1005 task
= sas_alloc_slow_task(GFP_KERNEL
);
1010 task
->task_proto
= device
->tproto
;
1012 if (dev_is_sata(device
)) {
1013 task
->ata_task
.device_control_reg_update
= 1;
1014 memcpy(&task
->ata_task
.fis
, parameter
, para_len
);
1016 memcpy(&task
->ssp_task
, parameter
, para_len
);
1018 task
->task_done
= hisi_sas_task_done
;
1020 task
->slow_task
->timer
.function
= hisi_sas_tmf_timedout
;
1021 task
->slow_task
->timer
.expires
= jiffies
+ TASK_TIMEOUT
*HZ
;
1022 add_timer(&task
->slow_task
->timer
);
1024 res
= hisi_sas_task_exec(task
, GFP_KERNEL
, 1, tmf
);
1027 del_timer(&task
->slow_task
->timer
);
1028 dev_err(dev
, "abort tmf: executing internal task failed: %d\n",
1033 wait_for_completion(&task
->slow_task
->completion
);
1034 res
= TMF_RESP_FUNC_FAILED
;
1035 /* Even TMF timed out, return direct. */
1036 if ((task
->task_state_flags
& SAS_TASK_STATE_ABORTED
)) {
1037 if (!(task
->task_state_flags
& SAS_TASK_STATE_DONE
)) {
1038 struct hisi_sas_slot
*slot
= task
->lldd_task
;
1040 dev_err(dev
, "abort tmf: TMF task timeout and not done\n");
1042 struct hisi_sas_cq
*cq
=
1043 &hisi_hba
->cq
[slot
->dlvry_queue
];
1045 * flush tasklet to avoid free'ing task
1046 * before using task in IO completion
1048 tasklet_kill(&cq
->tasklet
);
1054 dev_err(dev
, "abort tmf: TMF task timeout\n");
1057 if (task
->task_status
.resp
== SAS_TASK_COMPLETE
&&
1058 task
->task_status
.stat
== TMF_RESP_FUNC_COMPLETE
) {
1059 res
= TMF_RESP_FUNC_COMPLETE
;
1063 if (task
->task_status
.resp
== SAS_TASK_COMPLETE
&&
1064 task
->task_status
.stat
== TMF_RESP_FUNC_SUCC
) {
1065 res
= TMF_RESP_FUNC_SUCC
;
1069 if (task
->task_status
.resp
== SAS_TASK_COMPLETE
&&
1070 task
->task_status
.stat
== SAS_DATA_UNDERRUN
) {
1071 /* no error, but return the number of bytes of
1074 dev_warn(dev
, "abort tmf: task to dev %016llx "
1075 "resp: 0x%x sts 0x%x underrun\n",
1076 SAS_ADDR(device
->sas_addr
),
1077 task
->task_status
.resp
,
1078 task
->task_status
.stat
);
1079 res
= task
->task_status
.residual
;
1083 if (task
->task_status
.resp
== SAS_TASK_COMPLETE
&&
1084 task
->task_status
.stat
== SAS_DATA_OVERRUN
) {
1085 dev_warn(dev
, "abort tmf: blocked task error\n");
1090 dev_warn(dev
, "abort tmf: task to dev "
1091 "%016llx resp: 0x%x status 0x%x\n",
1092 SAS_ADDR(device
->sas_addr
), task
->task_status
.resp
,
1093 task
->task_status
.stat
);
1094 sas_free_task(task
);
1098 if (retry
== TASK_RETRY
)
1099 dev_warn(dev
, "abort tmf: executing internal task failed!\n");
1100 sas_free_task(task
);
1104 static void hisi_sas_fill_ata_reset_cmd(struct ata_device
*dev
,
1105 bool reset
, int pmp
, u8
*fis
)
1107 struct ata_taskfile tf
;
1109 ata_tf_init(dev
, &tf
);
1113 tf
.ctl
&= ~ATA_SRST
;
1114 tf
.command
= ATA_CMD_DEV_RESET
;
1115 ata_tf_to_fis(&tf
, pmp
, 0, fis
);
1118 static int hisi_sas_softreset_ata_disk(struct domain_device
*device
)
1121 struct ata_port
*ap
= device
->sata_dev
.ap
;
1122 struct ata_link
*link
;
1123 int rc
= TMF_RESP_FUNC_FAILED
;
1124 struct hisi_hba
*hisi_hba
= dev_to_hisi_hba(device
);
1125 struct device
*dev
= hisi_hba
->dev
;
1126 int s
= sizeof(struct host_to_dev_fis
);
1128 ata_for_each_link(link
, ap
, EDGE
) {
1129 int pmp
= sata_srst_pmp(link
);
1131 hisi_sas_fill_ata_reset_cmd(link
->device
, 1, pmp
, fis
);
1132 rc
= hisi_sas_exec_internal_tmf_task(device
, fis
, s
, NULL
);
1133 if (rc
!= TMF_RESP_FUNC_COMPLETE
)
1137 if (rc
== TMF_RESP_FUNC_COMPLETE
) {
1138 ata_for_each_link(link
, ap
, EDGE
) {
1139 int pmp
= sata_srst_pmp(link
);
1141 hisi_sas_fill_ata_reset_cmd(link
->device
, 0, pmp
, fis
);
1142 rc
= hisi_sas_exec_internal_tmf_task(device
, fis
,
1144 if (rc
!= TMF_RESP_FUNC_COMPLETE
)
1145 dev_err(dev
, "ata disk de-reset failed\n");
1148 dev_err(dev
, "ata disk reset failed\n");
1151 if (rc
== TMF_RESP_FUNC_COMPLETE
)
1152 hisi_sas_release_task(hisi_hba
, device
);
1157 static int hisi_sas_debug_issue_ssp_tmf(struct domain_device
*device
,
1158 u8
*lun
, struct hisi_sas_tmf_task
*tmf
)
1160 struct sas_ssp_task ssp_task
;
1162 if (!(device
->tproto
& SAS_PROTOCOL_SSP
))
1163 return TMF_RESP_FUNC_ESUPP
;
1165 memcpy(ssp_task
.LUN
, lun
, 8);
1167 return hisi_sas_exec_internal_tmf_task(device
, &ssp_task
,
1168 sizeof(ssp_task
), tmf
);
1171 static void hisi_sas_refresh_port_id(struct hisi_hba
*hisi_hba
)
1173 u32 state
= hisi_hba
->hw
->get_phys_state(hisi_hba
);
1176 for (i
= 0; i
< HISI_SAS_MAX_DEVICES
; i
++) {
1177 struct hisi_sas_device
*sas_dev
= &hisi_hba
->devices
[i
];
1178 struct domain_device
*device
= sas_dev
->sas_device
;
1179 struct asd_sas_port
*sas_port
;
1180 struct hisi_sas_port
*port
;
1181 struct hisi_sas_phy
*phy
= NULL
;
1182 struct asd_sas_phy
*sas_phy
;
1184 if ((sas_dev
->dev_type
== SAS_PHY_UNUSED
)
1185 || !device
|| !device
->port
)
1188 sas_port
= device
->port
;
1189 port
= to_hisi_sas_port(sas_port
);
1191 list_for_each_entry(sas_phy
, &sas_port
->phy_list
, port_phy_el
)
1192 if (state
& BIT(sas_phy
->id
)) {
1193 phy
= sas_phy
->lldd_phy
;
1198 port
->id
= phy
->port_id
;
1200 /* Update linkrate of directly attached device. */
1201 if (!device
->parent
)
1202 device
->linkrate
= phy
->sas_phy
.linkrate
;
1204 hisi_hba
->hw
->setup_itct(hisi_hba
, sas_dev
);
1210 static void hisi_sas_rescan_topology(struct hisi_hba
*hisi_hba
, u32 old_state
,
1213 struct sas_ha_struct
*sas_ha
= &hisi_hba
->sha
;
1214 struct asd_sas_port
*_sas_port
= NULL
;
1217 for (phy_no
= 0; phy_no
< hisi_hba
->n_phy
; phy_no
++) {
1218 struct hisi_sas_phy
*phy
= &hisi_hba
->phy
[phy_no
];
1219 struct asd_sas_phy
*sas_phy
= &phy
->sas_phy
;
1220 struct asd_sas_port
*sas_port
= sas_phy
->port
;
1221 bool do_port_check
= !!(_sas_port
!= sas_port
);
1223 if (!sas_phy
->phy
->enabled
)
1226 /* Report PHY state change to libsas */
1227 if (state
& BIT(phy_no
)) {
1228 if (do_port_check
&& sas_port
&& sas_port
->port_dev
) {
1229 struct domain_device
*dev
= sas_port
->port_dev
;
1231 _sas_port
= sas_port
;
1233 if (DEV_IS_EXPANDER(dev
->dev_type
))
1234 sas_ha
->notify_port_event(sas_phy
,
1235 PORTE_BROADCAST_RCVD
);
1237 } else if (old_state
& (1 << phy_no
))
1238 /* PHY down but was up before */
1239 hisi_sas_phy_down(hisi_hba
, phy_no
, 0);
1244 static void hisi_sas_reset_init_all_devices(struct hisi_hba
*hisi_hba
)
1246 struct hisi_sas_device
*sas_dev
;
1247 struct domain_device
*device
;
1250 for (i
= 0; i
< HISI_SAS_MAX_DEVICES
; i
++) {
1251 sas_dev
= &hisi_hba
->devices
[i
];
1252 device
= sas_dev
->sas_device
;
1254 if ((sas_dev
->dev_type
== SAS_PHY_UNUSED
) || !device
)
1257 hisi_sas_init_device(device
);
1261 static void hisi_sas_send_ata_reset_each_phy(struct hisi_hba
*hisi_hba
,
1262 struct asd_sas_port
*sas_port
,
1263 struct domain_device
*device
)
1265 struct hisi_sas_tmf_task tmf_task
= { .force_phy
= 1 };
1266 struct ata_port
*ap
= device
->sata_dev
.ap
;
1267 struct device
*dev
= hisi_hba
->dev
;
1268 int s
= sizeof(struct host_to_dev_fis
);
1269 int rc
= TMF_RESP_FUNC_FAILED
;
1270 struct asd_sas_phy
*sas_phy
;
1271 struct ata_link
*link
;
1275 state
= hisi_hba
->hw
->get_phys_state(hisi_hba
);
1276 list_for_each_entry(sas_phy
, &sas_port
->phy_list
, port_phy_el
) {
1277 if (!(state
& BIT(sas_phy
->id
)))
1280 ata_for_each_link(link
, ap
, EDGE
) {
1281 int pmp
= sata_srst_pmp(link
);
1283 tmf_task
.phy_id
= sas_phy
->id
;
1284 hisi_sas_fill_ata_reset_cmd(link
->device
, 1, pmp
, fis
);
1285 rc
= hisi_sas_exec_internal_tmf_task(device
, fis
, s
,
1287 if (rc
!= TMF_RESP_FUNC_COMPLETE
) {
1288 dev_err(dev
, "phy%d ata reset failed rc=%d\n",
1296 static void hisi_sas_terminate_stp_reject(struct hisi_hba
*hisi_hba
)
1298 struct device
*dev
= hisi_hba
->dev
;
1301 for (i
= 0; i
< HISI_SAS_MAX_DEVICES
; i
++) {
1302 struct hisi_sas_device
*sas_dev
= &hisi_hba
->devices
[i
];
1303 struct domain_device
*device
= sas_dev
->sas_device
;
1305 if ((sas_dev
->dev_type
== SAS_PHY_UNUSED
) || !device
)
1308 rc
= hisi_sas_internal_task_abort(hisi_hba
, device
,
1309 HISI_SAS_INT_ABT_DEV
, 0);
1311 dev_err(dev
, "STP reject: abort dev failed %d\n", rc
);
1314 for (port_no
= 0; port_no
< hisi_hba
->n_phy
; port_no
++) {
1315 struct hisi_sas_port
*port
= &hisi_hba
->port
[port_no
];
1316 struct asd_sas_port
*sas_port
= &port
->sas_port
;
1317 struct domain_device
*port_dev
= sas_port
->port_dev
;
1318 struct domain_device
*device
;
1320 if (!port_dev
|| !DEV_IS_EXPANDER(port_dev
->dev_type
))
1323 /* Try to find a SATA device */
1324 list_for_each_entry(device
, &sas_port
->dev_list
,
1326 if (dev_is_sata(device
)) {
1327 hisi_sas_send_ata_reset_each_phy(hisi_hba
,
1336 void hisi_sas_controller_reset_prepare(struct hisi_hba
*hisi_hba
)
1338 struct Scsi_Host
*shost
= hisi_hba
->shost
;
1340 down(&hisi_hba
->sem
);
1341 hisi_hba
->phy_state
= hisi_hba
->hw
->get_phys_state(hisi_hba
);
1343 scsi_block_requests(shost
);
1344 hisi_hba
->hw
->wait_cmds_complete_timeout(hisi_hba
, 100, 5000);
1346 if (timer_pending(&hisi_hba
->timer
))
1347 del_timer_sync(&hisi_hba
->timer
);
1349 set_bit(HISI_SAS_REJECT_CMD_BIT
, &hisi_hba
->flags
);
1351 EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_prepare
);
1353 void hisi_sas_controller_reset_done(struct hisi_hba
*hisi_hba
)
1355 struct Scsi_Host
*shost
= hisi_hba
->shost
;
1358 /* Init and wait for PHYs to come up and all libsas event finished. */
1359 hisi_hba
->hw
->phys_init(hisi_hba
);
1361 hisi_sas_refresh_port_id(hisi_hba
);
1362 clear_bit(HISI_SAS_REJECT_CMD_BIT
, &hisi_hba
->flags
);
1365 if (hisi_hba
->reject_stp_links_msk
)
1366 hisi_sas_terminate_stp_reject(hisi_hba
);
1367 hisi_sas_reset_init_all_devices(hisi_hba
);
1368 scsi_unblock_requests(shost
);
1369 clear_bit(HISI_SAS_RESET_BIT
, &hisi_hba
->flags
);
1371 state
= hisi_hba
->hw
->get_phys_state(hisi_hba
);
1372 hisi_sas_rescan_topology(hisi_hba
, hisi_hba
->phy_state
, state
);
1374 EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_done
);
1376 static int hisi_sas_controller_reset(struct hisi_hba
*hisi_hba
)
1378 struct device
*dev
= hisi_hba
->dev
;
1379 struct Scsi_Host
*shost
= hisi_hba
->shost
;
1382 if (!hisi_hba
->hw
->soft_reset
)
1385 if (test_and_set_bit(HISI_SAS_RESET_BIT
, &hisi_hba
->flags
))
1388 dev_info(dev
, "controller resetting...\n");
1389 hisi_sas_controller_reset_prepare(hisi_hba
);
1391 rc
= hisi_hba
->hw
->soft_reset(hisi_hba
);
1393 dev_warn(dev
, "controller reset failed (%d)\n", rc
);
1394 clear_bit(HISI_SAS_REJECT_CMD_BIT
, &hisi_hba
->flags
);
1396 scsi_unblock_requests(shost
);
1397 clear_bit(HISI_SAS_RESET_BIT
, &hisi_hba
->flags
);
1401 hisi_sas_controller_reset_done(hisi_hba
);
1402 dev_info(dev
, "controller reset complete\n");
1407 static int hisi_sas_abort_task(struct sas_task
*task
)
1409 struct scsi_lun lun
;
1410 struct hisi_sas_tmf_task tmf_task
;
1411 struct domain_device
*device
= task
->dev
;
1412 struct hisi_sas_device
*sas_dev
= device
->lldd_dev
;
1413 struct hisi_hba
*hisi_hba
;
1415 int rc
= TMF_RESP_FUNC_FAILED
;
1416 unsigned long flags
;
1419 return TMF_RESP_FUNC_FAILED
;
1421 hisi_hba
= dev_to_hisi_hba(task
->dev
);
1422 dev
= hisi_hba
->dev
;
1424 spin_lock_irqsave(&task
->task_state_lock
, flags
);
1425 if (task
->task_state_flags
& SAS_TASK_STATE_DONE
) {
1426 struct hisi_sas_slot
*slot
= task
->lldd_task
;
1427 struct hisi_sas_cq
*cq
;
1431 * flush tasklet to avoid free'ing task
1432 * before using task in IO completion
1434 cq
= &hisi_hba
->cq
[slot
->dlvry_queue
];
1435 tasklet_kill(&cq
->tasklet
);
1437 spin_unlock_irqrestore(&task
->task_state_lock
, flags
);
1438 rc
= TMF_RESP_FUNC_COMPLETE
;
1441 task
->task_state_flags
|= SAS_TASK_STATE_ABORTED
;
1442 spin_unlock_irqrestore(&task
->task_state_lock
, flags
);
1444 sas_dev
->dev_status
= HISI_SAS_DEV_EH
;
1445 if (task
->lldd_task
&& task
->task_proto
& SAS_PROTOCOL_SSP
) {
1446 struct scsi_cmnd
*cmnd
= task
->uldd_task
;
1447 struct hisi_sas_slot
*slot
= task
->lldd_task
;
1448 u32 tag
= slot
->idx
;
1451 int_to_scsilun(cmnd
->device
->lun
, &lun
);
1452 tmf_task
.tmf
= TMF_ABORT_TASK
;
1453 tmf_task
.tag_of_task_to_be_managed
= cpu_to_le16(tag
);
1455 rc
= hisi_sas_debug_issue_ssp_tmf(task
->dev
, lun
.scsi_lun
,
1458 rc2
= hisi_sas_internal_task_abort(hisi_hba
, device
,
1459 HISI_SAS_INT_ABT_CMD
, tag
);
1461 dev_err(dev
, "abort task: internal abort (%d)\n", rc2
);
1462 return TMF_RESP_FUNC_FAILED
;
1466 * If the TMF finds that the IO is not in the device and also
1467 * the internal abort does not succeed, then it is safe to
1469 * Note: if the internal abort succeeds then the slot
1470 * will have already been completed
1472 if (rc
== TMF_RESP_FUNC_COMPLETE
&& rc2
!= TMF_RESP_FUNC_SUCC
) {
1473 if (task
->lldd_task
)
1474 hisi_sas_do_release_task(hisi_hba
, task
, slot
);
1476 } else if (task
->task_proto
& SAS_PROTOCOL_SATA
||
1477 task
->task_proto
& SAS_PROTOCOL_STP
) {
1478 if (task
->dev
->dev_type
== SAS_SATA_DEV
) {
1479 rc
= hisi_sas_internal_task_abort(hisi_hba
, device
,
1480 HISI_SAS_INT_ABT_DEV
, 0);
1482 dev_err(dev
, "abort task: internal abort failed\n");
1485 hisi_sas_dereg_device(hisi_hba
, device
);
1486 rc
= hisi_sas_softreset_ata_disk(device
);
1488 } else if (task
->lldd_task
&& task
->task_proto
& SAS_PROTOCOL_SMP
) {
1490 struct hisi_sas_slot
*slot
= task
->lldd_task
;
1491 u32 tag
= slot
->idx
;
1492 struct hisi_sas_cq
*cq
= &hisi_hba
->cq
[slot
->dlvry_queue
];
1494 rc
= hisi_sas_internal_task_abort(hisi_hba
, device
,
1495 HISI_SAS_INT_ABT_CMD
, tag
);
1496 if (((rc
< 0) || (rc
== TMF_RESP_FUNC_FAILED
)) &&
1499 * flush tasklet to avoid free'ing task
1500 * before using task in IO completion
1502 tasklet_kill(&cq
->tasklet
);
1508 if (rc
!= TMF_RESP_FUNC_COMPLETE
)
1509 dev_notice(dev
, "abort task: rc=%d\n", rc
);
1513 static int hisi_sas_abort_task_set(struct domain_device
*device
, u8
*lun
)
1515 struct hisi_hba
*hisi_hba
= dev_to_hisi_hba(device
);
1516 struct device
*dev
= hisi_hba
->dev
;
1517 struct hisi_sas_tmf_task tmf_task
;
1518 int rc
= TMF_RESP_FUNC_FAILED
;
1520 rc
= hisi_sas_internal_task_abort(hisi_hba
, device
,
1521 HISI_SAS_INT_ABT_DEV
, 0);
1523 dev_err(dev
, "abort task set: internal abort rc=%d\n", rc
);
1524 return TMF_RESP_FUNC_FAILED
;
1526 hisi_sas_dereg_device(hisi_hba
, device
);
1528 tmf_task
.tmf
= TMF_ABORT_TASK_SET
;
1529 rc
= hisi_sas_debug_issue_ssp_tmf(device
, lun
, &tmf_task
);
1531 if (rc
== TMF_RESP_FUNC_COMPLETE
)
1532 hisi_sas_release_task(hisi_hba
, device
);
1537 static int hisi_sas_clear_aca(struct domain_device
*device
, u8
*lun
)
1539 int rc
= TMF_RESP_FUNC_FAILED
;
1540 struct hisi_sas_tmf_task tmf_task
;
1542 tmf_task
.tmf
= TMF_CLEAR_ACA
;
1543 rc
= hisi_sas_debug_issue_ssp_tmf(device
, lun
, &tmf_task
);
1548 static int hisi_sas_debug_I_T_nexus_reset(struct domain_device
*device
)
1550 struct sas_phy
*local_phy
= sas_get_local_phy(device
);
1551 int rc
, reset_type
= (device
->dev_type
== SAS_SATA_DEV
||
1552 (device
->tproto
& SAS_PROTOCOL_STP
)) ? 0 : 1;
1553 struct hisi_hba
*hisi_hba
= dev_to_hisi_hba(device
);
1554 struct sas_ha_struct
*sas_ha
= &hisi_hba
->sha
;
1555 struct asd_sas_phy
*sas_phy
= sas_ha
->sas_phy
[local_phy
->number
];
1556 struct hisi_sas_phy
*phy
= container_of(sas_phy
,
1557 struct hisi_sas_phy
, sas_phy
);
1558 DECLARE_COMPLETION_ONSTACK(phyreset
);
1560 if (scsi_is_sas_phy_local(local_phy
)) {
1562 phy
->reset_completion
= &phyreset
;
1565 rc
= sas_phy_reset(local_phy
, reset_type
);
1566 sas_put_local_phy(local_phy
);
1568 if (scsi_is_sas_phy_local(local_phy
)) {
1569 int ret
= wait_for_completion_timeout(&phyreset
, 2 * HZ
);
1570 unsigned long flags
;
1572 spin_lock_irqsave(&phy
->lock
, flags
);
1573 phy
->reset_completion
= NULL
;
1575 spin_unlock_irqrestore(&phy
->lock
, flags
);
1577 /* report PHY down if timed out */
1579 hisi_sas_phy_down(hisi_hba
, sas_phy
->id
, 0);
1586 static int hisi_sas_I_T_nexus_reset(struct domain_device
*device
)
1588 struct hisi_sas_device
*sas_dev
= device
->lldd_dev
;
1589 struct hisi_hba
*hisi_hba
= dev_to_hisi_hba(device
);
1590 struct device
*dev
= hisi_hba
->dev
;
1591 int rc
= TMF_RESP_FUNC_FAILED
;
1593 if (sas_dev
->dev_status
!= HISI_SAS_DEV_EH
)
1594 return TMF_RESP_FUNC_FAILED
;
1595 sas_dev
->dev_status
= HISI_SAS_DEV_NORMAL
;
1597 rc
= hisi_sas_internal_task_abort(hisi_hba
, device
,
1598 HISI_SAS_INT_ABT_DEV
, 0);
1600 dev_err(dev
, "I_T nexus reset: internal abort (%d)\n", rc
);
1601 return TMF_RESP_FUNC_FAILED
;
1603 hisi_sas_dereg_device(hisi_hba
, device
);
1605 rc
= hisi_sas_debug_I_T_nexus_reset(device
);
1607 if ((rc
== TMF_RESP_FUNC_COMPLETE
) || (rc
== -ENODEV
))
1608 hisi_sas_release_task(hisi_hba
, device
);
1613 static int hisi_sas_lu_reset(struct domain_device
*device
, u8
*lun
)
1615 struct hisi_sas_device
*sas_dev
= device
->lldd_dev
;
1616 struct hisi_hba
*hisi_hba
= dev_to_hisi_hba(device
);
1617 struct device
*dev
= hisi_hba
->dev
;
1618 int rc
= TMF_RESP_FUNC_FAILED
;
1620 sas_dev
->dev_status
= HISI_SAS_DEV_EH
;
1621 if (dev_is_sata(device
)) {
1622 struct sas_phy
*phy
;
1624 /* Clear internal IO and then hardreset */
1625 rc
= hisi_sas_internal_task_abort(hisi_hba
, device
,
1626 HISI_SAS_INT_ABT_DEV
, 0);
1628 dev_err(dev
, "lu_reset: internal abort failed\n");
1631 hisi_sas_dereg_device(hisi_hba
, device
);
1633 phy
= sas_get_local_phy(device
);
1635 rc
= sas_phy_reset(phy
, 1);
1638 hisi_sas_release_task(hisi_hba
, device
);
1639 sas_put_local_phy(phy
);
1641 struct hisi_sas_tmf_task tmf_task
= { .tmf
= TMF_LU_RESET
};
1643 rc
= hisi_sas_internal_task_abort(hisi_hba
, device
,
1644 HISI_SAS_INT_ABT_DEV
, 0);
1646 dev_err(dev
, "lu_reset: internal abort failed\n");
1649 hisi_sas_dereg_device(hisi_hba
, device
);
1651 rc
= hisi_sas_debug_issue_ssp_tmf(device
, lun
, &tmf_task
);
1652 if (rc
== TMF_RESP_FUNC_COMPLETE
)
1653 hisi_sas_release_task(hisi_hba
, device
);
1656 if (rc
!= TMF_RESP_FUNC_COMPLETE
)
1657 dev_err(dev
, "lu_reset: for device[%d]:rc= %d\n",
1658 sas_dev
->device_id
, rc
);
1662 static int hisi_sas_clear_nexus_ha(struct sas_ha_struct
*sas_ha
)
1664 struct hisi_hba
*hisi_hba
= sas_ha
->lldd_ha
;
1665 struct device
*dev
= hisi_hba
->dev
;
1666 HISI_SAS_DECLARE_RST_WORK_ON_STACK(r
);
1669 queue_work(hisi_hba
->wq
, &r
.work
);
1670 wait_for_completion(r
.completion
);
1672 return TMF_RESP_FUNC_FAILED
;
1674 for (i
= 0; i
< HISI_SAS_MAX_DEVICES
; i
++) {
1675 struct hisi_sas_device
*sas_dev
= &hisi_hba
->devices
[i
];
1676 struct domain_device
*device
= sas_dev
->sas_device
;
1678 if ((sas_dev
->dev_type
== SAS_PHY_UNUSED
) || !device
||
1679 DEV_IS_EXPANDER(device
->dev_type
))
1682 rc
= hisi_sas_debug_I_T_nexus_reset(device
);
1683 if (rc
!= TMF_RESP_FUNC_COMPLETE
)
1684 dev_info(dev
, "clear nexus ha: for device[%d] rc=%d\n",
1685 sas_dev
->device_id
, rc
);
1688 hisi_sas_release_tasks(hisi_hba
);
1690 return TMF_RESP_FUNC_COMPLETE
;
1693 static int hisi_sas_query_task(struct sas_task
*task
)
1695 struct scsi_lun lun
;
1696 struct hisi_sas_tmf_task tmf_task
;
1697 int rc
= TMF_RESP_FUNC_FAILED
;
1699 if (task
->lldd_task
&& task
->task_proto
& SAS_PROTOCOL_SSP
) {
1700 struct scsi_cmnd
*cmnd
= task
->uldd_task
;
1701 struct domain_device
*device
= task
->dev
;
1702 struct hisi_sas_slot
*slot
= task
->lldd_task
;
1703 u32 tag
= slot
->idx
;
1705 int_to_scsilun(cmnd
->device
->lun
, &lun
);
1706 tmf_task
.tmf
= TMF_QUERY_TASK
;
1707 tmf_task
.tag_of_task_to_be_managed
= cpu_to_le16(tag
);
1709 rc
= hisi_sas_debug_issue_ssp_tmf(device
,
1713 /* The task is still in Lun, release it then */
1714 case TMF_RESP_FUNC_SUCC
:
1715 /* The task is not in Lun or failed, reset the phy */
1716 case TMF_RESP_FUNC_FAILED
:
1717 case TMF_RESP_FUNC_COMPLETE
:
1720 rc
= TMF_RESP_FUNC_FAILED
;
1728 hisi_sas_internal_abort_task_exec(struct hisi_hba
*hisi_hba
, int device_id
,
1729 struct sas_task
*task
, int abort_flag
,
1732 struct domain_device
*device
= task
->dev
;
1733 struct hisi_sas_device
*sas_dev
= device
->lldd_dev
;
1734 struct device
*dev
= hisi_hba
->dev
;
1735 struct hisi_sas_port
*port
;
1736 struct hisi_sas_slot
*slot
;
1737 struct asd_sas_port
*sas_port
= device
->port
;
1738 struct hisi_sas_cmd_hdr
*cmd_hdr_base
;
1739 struct hisi_sas_dq
*dq
= sas_dev
->dq
;
1740 int dlvry_queue_slot
, dlvry_queue
, n_elem
= 0, rc
, slot_idx
;
1741 unsigned long flags
, flags_dq
= 0;
1744 if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT
, &hisi_hba
->flags
)))
1750 port
= to_hisi_sas_port(sas_port
);
1752 /* simply get a slot and send abort command */
1753 spin_lock_irqsave(&hisi_hba
->lock
, flags
);
1754 rc
= hisi_sas_slot_index_alloc(hisi_hba
, &slot_idx
);
1756 spin_unlock_irqrestore(&hisi_hba
->lock
, flags
);
1759 spin_unlock_irqrestore(&hisi_hba
->lock
, flags
);
1761 slot
= &hisi_hba
->slot_info
[slot_idx
];
1763 spin_lock_irqsave(&dq
->lock
, flags_dq
);
1764 wr_q_index
= hisi_hba
->hw
->get_free_slot(hisi_hba
, dq
);
1765 if (wr_q_index
< 0) {
1766 spin_unlock_irqrestore(&dq
->lock
, flags_dq
);
1770 list_add_tail(&slot
->delivery
, &dq
->list
);
1771 spin_unlock_irqrestore(&dq
->lock
, flags_dq
);
1773 dlvry_queue
= dq
->id
;
1774 dlvry_queue_slot
= wr_q_index
;
1776 slot
->n_elem
= n_elem
;
1777 slot
->dlvry_queue
= dlvry_queue
;
1778 slot
->dlvry_queue_slot
= dlvry_queue_slot
;
1779 cmd_hdr_base
= hisi_hba
->cmd_hdr
[dlvry_queue
];
1780 slot
->cmd_hdr
= &cmd_hdr_base
[dlvry_queue_slot
];
1783 slot
->is_internal
= true;
1784 task
->lldd_task
= slot
;
1786 memset(slot
->cmd_hdr
, 0, sizeof(struct hisi_sas_cmd_hdr
));
1787 memset(hisi_sas_cmd_hdr_addr_mem(slot
), 0, HISI_SAS_COMMAND_TABLE_SZ
);
1788 memset(hisi_sas_status_buf_addr_mem(slot
), 0, HISI_SAS_STATUS_BUF_SZ
);
1790 hisi_sas_task_prep_abort(hisi_hba
, slot
, device_id
,
1791 abort_flag
, task_tag
);
1793 spin_lock_irqsave(&task
->task_state_lock
, flags
);
1794 task
->task_state_flags
|= SAS_TASK_AT_INITIATOR
;
1795 spin_unlock_irqrestore(&task
->task_state_lock
, flags
);
1797 WRITE_ONCE(slot
->ready
, 1);
1798 /* send abort command to the chip */
1799 spin_lock_irqsave(&dq
->lock
, flags
);
1800 list_add_tail(&slot
->entry
, &sas_dev
->list
);
1801 hisi_hba
->hw
->start_delivery(dq
);
1802 spin_unlock_irqrestore(&dq
->lock
, flags
);
1807 spin_lock_irqsave(&hisi_hba
->lock
, flags
);
1808 hisi_sas_slot_index_free(hisi_hba
, slot_idx
);
1809 spin_unlock_irqrestore(&hisi_hba
->lock
, flags
);
1811 dev_err(dev
, "internal abort task prep: failed[%d]!\n", rc
);
1817 * hisi_sas_internal_task_abort -- execute an internal
1818 * abort command for single IO command or a device
1819 * @hisi_hba: host controller struct
1820 * @device: domain device
1821 * @abort_flag: mode of operation, device or single IO
1822 * @tag: tag of IO to be aborted (only relevant to single
1826 hisi_sas_internal_task_abort(struct hisi_hba
*hisi_hba
,
1827 struct domain_device
*device
,
1828 int abort_flag
, int tag
)
1830 struct sas_task
*task
;
1831 struct hisi_sas_device
*sas_dev
= device
->lldd_dev
;
1832 struct device
*dev
= hisi_hba
->dev
;
1836 * The interface is not realized means this HW don't support internal
1837 * abort, or don't need to do internal abort. Then here, we return
1838 * TMF_RESP_FUNC_FAILED and let other steps go on, which depends that
1839 * the internal abort has been executed and returned CQ.
1841 if (!hisi_hba
->hw
->prep_abort
)
1842 return TMF_RESP_FUNC_FAILED
;
1844 task
= sas_alloc_slow_task(GFP_KERNEL
);
1849 task
->task_proto
= device
->tproto
;
1850 task
->task_done
= hisi_sas_task_done
;
1851 task
->slow_task
->timer
.function
= hisi_sas_tmf_timedout
;
1852 task
->slow_task
->timer
.expires
= jiffies
+ INTERNAL_ABORT_TIMEOUT
*HZ
;
1853 add_timer(&task
->slow_task
->timer
);
1855 res
= hisi_sas_internal_abort_task_exec(hisi_hba
, sas_dev
->device_id
,
1856 task
, abort_flag
, tag
);
1858 del_timer(&task
->slow_task
->timer
);
1859 dev_err(dev
, "internal task abort: executing internal task failed: %d\n",
1863 wait_for_completion(&task
->slow_task
->completion
);
1864 res
= TMF_RESP_FUNC_FAILED
;
1866 /* Internal abort timed out */
1867 if ((task
->task_state_flags
& SAS_TASK_STATE_ABORTED
)) {
1868 if (!(task
->task_state_flags
& SAS_TASK_STATE_DONE
)) {
1869 struct hisi_sas_slot
*slot
= task
->lldd_task
;
1872 struct hisi_sas_cq
*cq
=
1873 &hisi_hba
->cq
[slot
->dlvry_queue
];
1875 * flush tasklet to avoid free'ing task
1876 * before using task in IO completion
1878 tasklet_kill(&cq
->tasklet
);
1881 dev_err(dev
, "internal task abort: timeout and not done.\n");
1885 dev_err(dev
, "internal task abort: timeout.\n");
1888 if (task
->task_status
.resp
== SAS_TASK_COMPLETE
&&
1889 task
->task_status
.stat
== TMF_RESP_FUNC_COMPLETE
) {
1890 res
= TMF_RESP_FUNC_COMPLETE
;
1894 if (task
->task_status
.resp
== SAS_TASK_COMPLETE
&&
1895 task
->task_status
.stat
== TMF_RESP_FUNC_SUCC
) {
1896 res
= TMF_RESP_FUNC_SUCC
;
1901 dev_dbg(dev
, "internal task abort: task to dev %016llx task=%p "
1902 "resp: 0x%x sts 0x%x\n",
1903 SAS_ADDR(device
->sas_addr
),
1905 task
->task_status
.resp
, /* 0 is complete, -1 is undelivered */
1906 task
->task_status
.stat
);
1907 sas_free_task(task
);
1912 static void hisi_sas_port_formed(struct asd_sas_phy
*sas_phy
)
1914 hisi_sas_port_notify_formed(sas_phy
);
1917 static void hisi_sas_port_deformed(struct asd_sas_phy
*sas_phy
)
1921 static int hisi_sas_write_gpio(struct sas_ha_struct
*sha
, u8 reg_type
,
1922 u8 reg_index
, u8 reg_count
, u8
*write_data
)
1924 struct hisi_hba
*hisi_hba
= sha
->lldd_ha
;
1926 if (!hisi_hba
->hw
->write_gpio
)
1929 return hisi_hba
->hw
->write_gpio(hisi_hba
, reg_type
,
1930 reg_index
, reg_count
, write_data
);
1933 static void hisi_sas_phy_disconnected(struct hisi_sas_phy
*phy
)
1935 struct asd_sas_phy
*sas_phy
= &phy
->sas_phy
;
1936 struct sas_phy
*sphy
= sas_phy
->phy
;
1937 struct sas_phy_data
*d
= sphy
->hostdata
;
1939 phy
->phy_attached
= 0;
1944 sphy
->negotiated_linkrate
= SAS_LINK_RATE_UNKNOWN
;
1946 sphy
->negotiated_linkrate
= SAS_PHY_DISABLED
;
1949 void hisi_sas_phy_down(struct hisi_hba
*hisi_hba
, int phy_no
, int rdy
)
1951 struct hisi_sas_phy
*phy
= &hisi_hba
->phy
[phy_no
];
1952 struct asd_sas_phy
*sas_phy
= &phy
->sas_phy
;
1953 struct sas_ha_struct
*sas_ha
= &hisi_hba
->sha
;
1954 struct device
*dev
= hisi_hba
->dev
;
1957 /* Phy down but ready */
1958 hisi_sas_bytes_dmaed(hisi_hba
, phy_no
);
1959 hisi_sas_port_notify_formed(sas_phy
);
1961 struct hisi_sas_port
*port
= phy
->port
;
1963 if (test_bit(HISI_SAS_RESET_BIT
, &hisi_hba
->flags
) ||
1965 dev_info(dev
, "ignore flutter phy%d down\n", phy_no
);
1968 /* Phy down and not ready */
1969 sas_ha
->notify_phy_event(sas_phy
, PHYE_LOSS_OF_SIGNAL
);
1970 sas_phy_disconnected(sas_phy
);
1973 if (phy
->phy_type
& PORT_TYPE_SAS
) {
1974 int port_id
= port
->id
;
1976 if (!hisi_hba
->hw
->get_wideport_bitmap(hisi_hba
,
1978 port
->port_attached
= 0;
1979 } else if (phy
->phy_type
& PORT_TYPE_SATA
)
1980 port
->port_attached
= 0;
1982 hisi_sas_phy_disconnected(phy
);
1985 EXPORT_SYMBOL_GPL(hisi_sas_phy_down
);
1987 void hisi_sas_kill_tasklets(struct hisi_hba
*hisi_hba
)
1991 for (i
= 0; i
< hisi_hba
->queue_count
; i
++) {
1992 struct hisi_sas_cq
*cq
= &hisi_hba
->cq
[i
];
1994 tasklet_kill(&cq
->tasklet
);
1997 EXPORT_SYMBOL_GPL(hisi_sas_kill_tasklets
);
1999 struct scsi_transport_template
*hisi_sas_stt
;
2000 EXPORT_SYMBOL_GPL(hisi_sas_stt
);
2002 struct device_attribute
*host_attrs
[] = {
2003 &dev_attr_phy_event_threshold
,
2006 EXPORT_SYMBOL_GPL(host_attrs
);
2008 static struct sas_domain_function_template hisi_sas_transport_ops
= {
2009 .lldd_dev_found
= hisi_sas_dev_found
,
2010 .lldd_dev_gone
= hisi_sas_dev_gone
,
2011 .lldd_execute_task
= hisi_sas_queue_command
,
2012 .lldd_control_phy
= hisi_sas_control_phy
,
2013 .lldd_abort_task
= hisi_sas_abort_task
,
2014 .lldd_abort_task_set
= hisi_sas_abort_task_set
,
2015 .lldd_clear_aca
= hisi_sas_clear_aca
,
2016 .lldd_I_T_nexus_reset
= hisi_sas_I_T_nexus_reset
,
2017 .lldd_lu_reset
= hisi_sas_lu_reset
,
2018 .lldd_query_task
= hisi_sas_query_task
,
2019 .lldd_clear_nexus_ha
= hisi_sas_clear_nexus_ha
,
2020 .lldd_port_formed
= hisi_sas_port_formed
,
2021 .lldd_port_deformed
= hisi_sas_port_deformed
,
2022 .lldd_write_gpio
= hisi_sas_write_gpio
,
2025 void hisi_sas_init_mem(struct hisi_hba
*hisi_hba
)
2027 int i
, s
, max_command_entries
= hisi_hba
->hw
->max_command_entries
;
2029 for (i
= 0; i
< hisi_hba
->queue_count
; i
++) {
2030 struct hisi_sas_cq
*cq
= &hisi_hba
->cq
[i
];
2031 struct hisi_sas_dq
*dq
= &hisi_hba
->dq
[i
];
2033 s
= sizeof(struct hisi_sas_cmd_hdr
) * HISI_SAS_QUEUE_SLOTS
;
2034 memset(hisi_hba
->cmd_hdr
[i
], 0, s
);
2037 s
= hisi_hba
->hw
->complete_hdr_size
* HISI_SAS_QUEUE_SLOTS
;
2038 memset(hisi_hba
->complete_hdr
[i
], 0, s
);
2042 s
= sizeof(struct hisi_sas_initial_fis
) * hisi_hba
->n_phy
;
2043 memset(hisi_hba
->initial_fis
, 0, s
);
2045 s
= max_command_entries
* sizeof(struct hisi_sas_iost
);
2046 memset(hisi_hba
->iost
, 0, s
);
2048 s
= max_command_entries
* sizeof(struct hisi_sas_breakpoint
);
2049 memset(hisi_hba
->breakpoint
, 0, s
);
2051 s
= HISI_SAS_MAX_ITCT_ENTRIES
* sizeof(struct hisi_sas_sata_breakpoint
);
2052 memset(hisi_hba
->sata_breakpoint
, 0, s
);
2054 EXPORT_SYMBOL_GPL(hisi_sas_init_mem
);
2056 int hisi_sas_alloc(struct hisi_hba
*hisi_hba
, struct Scsi_Host
*shost
)
2058 struct device
*dev
= hisi_hba
->dev
;
2059 int i
, j
, s
, max_command_entries
= hisi_hba
->hw
->max_command_entries
;
2060 int max_command_entries_ru
, sz_slot_buf_ru
;
2061 int blk_cnt
, slots_per_blk
;
2063 sema_init(&hisi_hba
->sem
, 1);
2064 spin_lock_init(&hisi_hba
->lock
);
2065 for (i
= 0; i
< hisi_hba
->n_phy
; i
++) {
2066 hisi_sas_phy_init(hisi_hba
, i
);
2067 hisi_hba
->port
[i
].port_attached
= 0;
2068 hisi_hba
->port
[i
].id
= -1;
2071 for (i
= 0; i
< HISI_SAS_MAX_DEVICES
; i
++) {
2072 hisi_hba
->devices
[i
].dev_type
= SAS_PHY_UNUSED
;
2073 hisi_hba
->devices
[i
].device_id
= i
;
2074 hisi_hba
->devices
[i
].dev_status
= HISI_SAS_DEV_NORMAL
;
2077 for (i
= 0; i
< hisi_hba
->queue_count
; i
++) {
2078 struct hisi_sas_cq
*cq
= &hisi_hba
->cq
[i
];
2079 struct hisi_sas_dq
*dq
= &hisi_hba
->dq
[i
];
2081 /* Completion queue structure */
2083 cq
->hisi_hba
= hisi_hba
;
2085 /* Delivery queue structure */
2086 spin_lock_init(&dq
->lock
);
2087 INIT_LIST_HEAD(&dq
->list
);
2089 dq
->hisi_hba
= hisi_hba
;
2091 /* Delivery queue */
2092 s
= sizeof(struct hisi_sas_cmd_hdr
) * HISI_SAS_QUEUE_SLOTS
;
2093 hisi_hba
->cmd_hdr
[i
] = dmam_alloc_coherent(dev
, s
,
2094 &hisi_hba
->cmd_hdr_dma
[i
],
2096 if (!hisi_hba
->cmd_hdr
[i
])
2099 /* Completion queue */
2100 s
= hisi_hba
->hw
->complete_hdr_size
* HISI_SAS_QUEUE_SLOTS
;
2101 hisi_hba
->complete_hdr
[i
] = dmam_alloc_coherent(dev
, s
,
2102 &hisi_hba
->complete_hdr_dma
[i
],
2104 if (!hisi_hba
->complete_hdr
[i
])
2108 s
= HISI_SAS_MAX_ITCT_ENTRIES
* sizeof(struct hisi_sas_itct
);
2109 hisi_hba
->itct
= dmam_alloc_coherent(dev
, s
, &hisi_hba
->itct_dma
,
2111 if (!hisi_hba
->itct
)
2113 memset(hisi_hba
->itct
, 0, s
);
2115 hisi_hba
->slot_info
= devm_kcalloc(dev
, max_command_entries
,
2116 sizeof(struct hisi_sas_slot
),
2118 if (!hisi_hba
->slot_info
)
2121 /* roundup to avoid overly large block size */
2122 max_command_entries_ru
= roundup(max_command_entries
, 64);
2123 sz_slot_buf_ru
= roundup(sizeof(struct hisi_sas_slot_buf_table
), 64);
2124 s
= lcm(max_command_entries_ru
, sz_slot_buf_ru
);
2125 blk_cnt
= (max_command_entries_ru
* sz_slot_buf_ru
) / s
;
2126 slots_per_blk
= s
/ sz_slot_buf_ru
;
2127 for (i
= 0; i
< blk_cnt
; i
++) {
2128 struct hisi_sas_slot_buf_table
*buf
;
2130 int slot_index
= i
* slots_per_blk
;
2132 buf
= dmam_alloc_coherent(dev
, s
, &buf_dma
, GFP_KERNEL
);
2137 for (j
= 0; j
< slots_per_blk
; j
++, slot_index
++) {
2138 struct hisi_sas_slot
*slot
;
2140 slot
= &hisi_hba
->slot_info
[slot_index
];
2142 slot
->buf_dma
= buf_dma
;
2143 slot
->idx
= slot_index
;
2146 buf_dma
+= sizeof(*buf
);
2150 s
= max_command_entries
* sizeof(struct hisi_sas_iost
);
2151 hisi_hba
->iost
= dmam_alloc_coherent(dev
, s
, &hisi_hba
->iost_dma
,
2153 if (!hisi_hba
->iost
)
2156 s
= max_command_entries
* sizeof(struct hisi_sas_breakpoint
);
2157 hisi_hba
->breakpoint
= dmam_alloc_coherent(dev
, s
,
2158 &hisi_hba
->breakpoint_dma
,
2160 if (!hisi_hba
->breakpoint
)
2163 hisi_hba
->slot_index_count
= max_command_entries
;
2164 s
= hisi_hba
->slot_index_count
/ BITS_PER_BYTE
;
2165 hisi_hba
->slot_index_tags
= devm_kzalloc(dev
, s
, GFP_KERNEL
);
2166 if (!hisi_hba
->slot_index_tags
)
2169 s
= sizeof(struct hisi_sas_initial_fis
) * HISI_SAS_MAX_PHYS
;
2170 hisi_hba
->initial_fis
= dmam_alloc_coherent(dev
, s
,
2171 &hisi_hba
->initial_fis_dma
,
2173 if (!hisi_hba
->initial_fis
)
2176 s
= HISI_SAS_MAX_ITCT_ENTRIES
* sizeof(struct hisi_sas_sata_breakpoint
);
2177 hisi_hba
->sata_breakpoint
= dmam_alloc_coherent(dev
, s
,
2178 &hisi_hba
->sata_breakpoint_dma
,
2180 if (!hisi_hba
->sata_breakpoint
)
2182 hisi_sas_init_mem(hisi_hba
);
2184 hisi_sas_slot_index_init(hisi_hba
);
2186 hisi_hba
->wq
= create_singlethread_workqueue(dev_name(dev
));
2187 if (!hisi_hba
->wq
) {
2188 dev_err(dev
, "sas_alloc: failed to create workqueue\n");
2196 EXPORT_SYMBOL_GPL(hisi_sas_alloc
);
2198 void hisi_sas_free(struct hisi_hba
*hisi_hba
)
2201 destroy_workqueue(hisi_hba
->wq
);
2203 EXPORT_SYMBOL_GPL(hisi_sas_free
);
2205 void hisi_sas_rst_work_handler(struct work_struct
*work
)
2207 struct hisi_hba
*hisi_hba
=
2208 container_of(work
, struct hisi_hba
, rst_work
);
2210 hisi_sas_controller_reset(hisi_hba
);
2212 EXPORT_SYMBOL_GPL(hisi_sas_rst_work_handler
);
2214 void hisi_sas_sync_rst_work_handler(struct work_struct
*work
)
2216 struct hisi_sas_rst
*rst
=
2217 container_of(work
, struct hisi_sas_rst
, work
);
2219 if (!hisi_sas_controller_reset(rst
->hisi_hba
))
2221 complete(rst
->completion
);
2223 EXPORT_SYMBOL_GPL(hisi_sas_sync_rst_work_handler
);
2225 int hisi_sas_get_fw_info(struct hisi_hba
*hisi_hba
)
2227 struct device
*dev
= hisi_hba
->dev
;
2228 struct platform_device
*pdev
= hisi_hba
->platform_dev
;
2229 struct device_node
*np
= pdev
? pdev
->dev
.of_node
: NULL
;
2232 if (device_property_read_u8_array(dev
, "sas-addr", hisi_hba
->sas_addr
,
2234 dev_err(dev
, "could not get property sas-addr\n");
2240 * These properties are only required for platform device-based
2241 * controller with DT firmware.
2243 hisi_hba
->ctrl
= syscon_regmap_lookup_by_phandle(np
,
2244 "hisilicon,sas-syscon");
2245 if (IS_ERR(hisi_hba
->ctrl
)) {
2246 dev_err(dev
, "could not get syscon\n");
2250 if (device_property_read_u32(dev
, "ctrl-reset-reg",
2251 &hisi_hba
->ctrl_reset_reg
)) {
2253 "could not get property ctrl-reset-reg\n");
2257 if (device_property_read_u32(dev
, "ctrl-reset-sts-reg",
2258 &hisi_hba
->ctrl_reset_sts_reg
)) {
2260 "could not get property ctrl-reset-sts-reg\n");
2264 if (device_property_read_u32(dev
, "ctrl-clock-ena-reg",
2265 &hisi_hba
->ctrl_clock_ena_reg
)) {
2267 "could not get property ctrl-clock-ena-reg\n");
2272 refclk
= devm_clk_get(dev
, NULL
);
2274 dev_dbg(dev
, "no ref clk property\n");
2276 hisi_hba
->refclk_frequency_mhz
= clk_get_rate(refclk
) / 1000000;
2278 if (device_property_read_u32(dev
, "phy-count", &hisi_hba
->n_phy
)) {
2279 dev_err(dev
, "could not get property phy-count\n");
2283 if (device_property_read_u32(dev
, "queue-count",
2284 &hisi_hba
->queue_count
)) {
2285 dev_err(dev
, "could not get property queue-count\n");
2291 EXPORT_SYMBOL_GPL(hisi_sas_get_fw_info
);
2293 static struct Scsi_Host
*hisi_sas_shost_alloc(struct platform_device
*pdev
,
2294 const struct hisi_sas_hw
*hw
)
2296 struct resource
*res
;
2297 struct Scsi_Host
*shost
;
2298 struct hisi_hba
*hisi_hba
;
2299 struct device
*dev
= &pdev
->dev
;
2301 shost
= scsi_host_alloc(hw
->sht
, sizeof(*hisi_hba
));
2303 dev_err(dev
, "scsi host alloc failed\n");
2306 hisi_hba
= shost_priv(shost
);
2308 INIT_WORK(&hisi_hba
->rst_work
, hisi_sas_rst_work_handler
);
2310 hisi_hba
->dev
= dev
;
2311 hisi_hba
->platform_dev
= pdev
;
2312 hisi_hba
->shost
= shost
;
2313 SHOST_TO_SAS_HA(shost
) = &hisi_hba
->sha
;
2315 timer_setup(&hisi_hba
->timer
, NULL
, 0);
2317 if (hisi_sas_get_fw_info(hisi_hba
) < 0)
2320 if (dma_set_mask_and_coherent(dev
, DMA_BIT_MASK(64)) &&
2321 dma_set_mask_and_coherent(dev
, DMA_BIT_MASK(32))) {
2322 dev_err(dev
, "No usable DMA addressing method\n");
2326 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
2327 hisi_hba
->regs
= devm_ioremap_resource(dev
, res
);
2328 if (IS_ERR(hisi_hba
->regs
))
2331 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 1);
2333 hisi_hba
->sgpio_regs
= devm_ioremap_resource(dev
, res
);
2334 if (IS_ERR(hisi_hba
->sgpio_regs
))
2338 if (hisi_sas_alloc(hisi_hba
, shost
)) {
2339 hisi_sas_free(hisi_hba
);
2345 scsi_host_put(shost
);
2346 dev_err(dev
, "shost alloc failed\n");
2350 int hisi_sas_probe(struct platform_device
*pdev
,
2351 const struct hisi_sas_hw
*hw
)
2353 struct Scsi_Host
*shost
;
2354 struct hisi_hba
*hisi_hba
;
2355 struct device
*dev
= &pdev
->dev
;
2356 struct asd_sas_phy
**arr_phy
;
2357 struct asd_sas_port
**arr_port
;
2358 struct sas_ha_struct
*sha
;
2359 int rc
, phy_nr
, port_nr
, i
;
2361 shost
= hisi_sas_shost_alloc(pdev
, hw
);
2365 sha
= SHOST_TO_SAS_HA(shost
);
2366 hisi_hba
= shost_priv(shost
);
2367 platform_set_drvdata(pdev
, sha
);
2369 phy_nr
= port_nr
= hisi_hba
->n_phy
;
2371 arr_phy
= devm_kcalloc(dev
, phy_nr
, sizeof(void *), GFP_KERNEL
);
2372 arr_port
= devm_kcalloc(dev
, port_nr
, sizeof(void *), GFP_KERNEL
);
2373 if (!arr_phy
|| !arr_port
) {
2378 sha
->sas_phy
= arr_phy
;
2379 sha
->sas_port
= arr_port
;
2380 sha
->lldd_ha
= hisi_hba
;
2382 shost
->transportt
= hisi_sas_stt
;
2383 shost
->max_id
= HISI_SAS_MAX_DEVICES
;
2384 shost
->max_lun
= ~0;
2385 shost
->max_channel
= 1;
2386 shost
->max_cmd_len
= 16;
2387 shost
->sg_tablesize
= min_t(u16
, SG_ALL
, HISI_SAS_SGE_PAGE_CNT
);
2388 shost
->can_queue
= hisi_hba
->hw
->max_command_entries
;
2389 shost
->cmd_per_lun
= hisi_hba
->hw
->max_command_entries
;
2391 sha
->sas_ha_name
= DRV_NAME
;
2392 sha
->dev
= hisi_hba
->dev
;
2393 sha
->lldd_module
= THIS_MODULE
;
2394 sha
->sas_addr
= &hisi_hba
->sas_addr
[0];
2395 sha
->num_phys
= hisi_hba
->n_phy
;
2396 sha
->core
.shost
= hisi_hba
->shost
;
2398 for (i
= 0; i
< hisi_hba
->n_phy
; i
++) {
2399 sha
->sas_phy
[i
] = &hisi_hba
->phy
[i
].sas_phy
;
2400 sha
->sas_port
[i
] = &hisi_hba
->port
[i
].sas_port
;
2403 rc
= scsi_add_host(shost
, &pdev
->dev
);
2407 rc
= sas_register_ha(sha
);
2409 goto err_out_register_ha
;
2411 rc
= hisi_hba
->hw
->hw_init(hisi_hba
);
2413 goto err_out_register_ha
;
2415 scsi_scan_host(shost
);
2419 err_out_register_ha
:
2420 scsi_remove_host(shost
);
2422 hisi_sas_free(hisi_hba
);
2423 scsi_host_put(shost
);
2426 EXPORT_SYMBOL_GPL(hisi_sas_probe
);
2428 int hisi_sas_remove(struct platform_device
*pdev
)
2430 struct sas_ha_struct
*sha
= platform_get_drvdata(pdev
);
2431 struct hisi_hba
*hisi_hba
= sha
->lldd_ha
;
2432 struct Scsi_Host
*shost
= sha
->core
.shost
;
2434 if (timer_pending(&hisi_hba
->timer
))
2435 del_timer(&hisi_hba
->timer
);
2437 sas_unregister_ha(sha
);
2438 sas_remove_host(sha
->core
.shost
);
2440 hisi_sas_free(hisi_hba
);
2441 scsi_host_put(shost
);
2444 EXPORT_SYMBOL_GPL(hisi_sas_remove
);
2446 static __init
int hisi_sas_init(void)
2448 hisi_sas_stt
= sas_domain_attach_transport(&hisi_sas_transport_ops
);
2455 static __exit
void hisi_sas_exit(void)
2457 sas_release_transport(hisi_sas_stt
);
2460 module_init(hisi_sas_init
);
2461 module_exit(hisi_sas_exit
);
2463 MODULE_LICENSE("GPL");
2464 MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
2465 MODULE_DESCRIPTION("HISILICON SAS controller driver");
2466 MODULE_ALIAS("platform:" DRV_NAME
);